mirror of
https://git.yoctoproject.org/poky
synced 2026-02-16 05:33:03 +01:00
Compare commits
94 Commits
hardknott-
...
yocto-3.2.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
943ef2fad8 | ||
|
|
76dac9d657 | ||
|
|
333f24caec | ||
|
|
e5bd9b93b4 | ||
|
|
a4ff9dd2dc | ||
|
|
2d3224bf20 | ||
|
|
e6f6420d98 | ||
|
|
f0b8b3a960 | ||
|
|
fef73fcd3a | ||
|
|
d12e2d67c9 | ||
|
|
eeb98ec6ae | ||
|
|
3f2bc0a2e1 | ||
|
|
cbd023e0db | ||
|
|
307146220b | ||
|
|
d754cd3a49 | ||
|
|
3d5309b736 | ||
|
|
369b6e0192 | ||
|
|
e03e489758 | ||
|
|
321e17803e | ||
|
|
086ed4af2a | ||
|
|
67ff1d9ffb | ||
|
|
8de9b33e14 | ||
|
|
afe59c8e1d | ||
|
|
f6434fde67 | ||
|
|
e46465c718 | ||
|
|
e4156f232b | ||
|
|
bfa254bd1a | ||
|
|
4315a12330 | ||
|
|
9b58e1d1a8 | ||
|
|
f4ff33fd11 | ||
|
|
f9f50c5638 | ||
|
|
23eef02eff | ||
|
|
bef1f4761e | ||
|
|
8b9bdf1d1e | ||
|
|
1a4b81a392 | ||
|
|
c111b692cc | ||
|
|
701e43727a | ||
|
|
dedca9ecb7 | ||
|
|
d890775c90 | ||
|
|
fd3e68b355 | ||
|
|
678eafa74d | ||
|
|
c2014927f2 | ||
|
|
c5b7872dab | ||
|
|
2691a54e91 | ||
|
|
e2de476001 | ||
|
|
45c8a7e583 | ||
|
|
4d2fd8ddd3 | ||
|
|
ea0af53e2a | ||
|
|
2d342da2a3 | ||
|
|
f1b304df93 | ||
|
|
b569f2a414 | ||
|
|
411f541288 | ||
|
|
83477f0280 | ||
|
|
7e7893983f | ||
|
|
e3a67d60cc | ||
|
|
23a0428069 | ||
|
|
b74901b816 | ||
|
|
010625f35a | ||
|
|
0647439a0a | ||
|
|
87a05c7316 | ||
|
|
5c33ee311c | ||
|
|
3ad92d4d09 | ||
|
|
5e5a7fd73d | ||
|
|
3269613984 | ||
|
|
b955cbdcfb | ||
|
|
58e47e1b70 | ||
|
|
bb0524e189 | ||
|
|
7d58c8bed6 | ||
|
|
5232b03e22 | ||
|
|
e2312cd887 | ||
|
|
f552970178 | ||
|
|
d59e28ea73 | ||
|
|
61642ef429 | ||
|
|
7f6f1519b9 | ||
|
|
528de6bc4f | ||
|
|
0ccf16fab3 | ||
|
|
4e513e2b86 | ||
|
|
1272d1b8fc | ||
|
|
686396e3dc | ||
|
|
2fa7fde32f | ||
|
|
72050b72e2 | ||
|
|
2fa97151cd | ||
|
|
e67a7af07c | ||
|
|
2306702899 | ||
|
|
f652c4d1b8 | ||
|
|
ca1ed50ab3 | ||
|
|
46db037b1f | ||
|
|
70761072f5 | ||
|
|
efa68c6490 | ||
|
|
3daa976efb | ||
|
|
4d35e4b168 | ||
|
|
dff89518bd | ||
|
|
cdae385f7d | ||
|
|
b7a7dde44a |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -30,5 +30,4 @@ hob-image-*.bb
|
||||
pull-*/
|
||||
bitbake/lib/toaster/contrib/tts/backlog.txt
|
||||
bitbake/lib/toaster/contrib/tts/log/*
|
||||
bitbake/lib/toaster/contrib/tts/.cache/*
|
||||
bitbake/lib/bb/tests/runqueue-tests/bitbake-cookerdaemon.log
|
||||
bitbake/lib/toaster/contrib/tts/.cache/*
|
||||
@@ -6,24 +6,24 @@ of OpenEmbedded. It is distro-less (can build a functional image with
|
||||
DISTRO = "nodistro") and contains only emulated machine support.
|
||||
|
||||
For information about OpenEmbedded, see the OpenEmbedded website:
|
||||
https://www.openembedded.org/
|
||||
http://www.openembedded.org/
|
||||
|
||||
The Yocto Project has extensive documentation about OE including a reference manual
|
||||
which can be found at:
|
||||
https://docs.yoctoproject.org/
|
||||
http://yoctoproject.org/documentation
|
||||
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to
|
||||
https://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
http://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
for guidelines on how to submit patches.
|
||||
|
||||
Mailing list:
|
||||
|
||||
https://lists.openembedded.org/g/openembedded-core
|
||||
http://lists.openembedded.org/mailman/listinfo/openembedded-core
|
||||
|
||||
Source code:
|
||||
|
||||
https://git.openembedded.org/openembedded-core/
|
||||
http://git.openembedded.org/openembedded-core/
|
||||
|
||||
@@ -11,7 +11,7 @@ For information about Bitbake, see the OpenEmbedded website:
|
||||
|
||||
Bitbake plain documentation can be found under the doc directory or its integrated
|
||||
html version at the Yocto Project website:
|
||||
https://docs.yoctoproject.org
|
||||
http://yoctoproject.org/documentation
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
@@ -26,7 +26,7 @@ from bb.main import bitbake_main, BitBakeConfigParameters, BBMainException
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
|
||||
__version__ = "1.50.0"
|
||||
__version__ = "1.48.0"
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __version__ != bb.__version__:
|
||||
|
||||
@@ -151,6 +151,9 @@ def main():
|
||||
func = getattr(args, 'func', None)
|
||||
if func:
|
||||
client = hashserv.create_client(args.address)
|
||||
# Try to establish a connection to the server now to detect failures
|
||||
# early
|
||||
client.connect()
|
||||
|
||||
return func(args, client)
|
||||
|
||||
|
||||
@@ -30,11 +30,9 @@ def main():
|
||||
"--bind [::1]:8686"'''
|
||||
)
|
||||
|
||||
parser.add_argument('-b', '--bind', default=DEFAULT_BIND, help='Bind address (default "%(default)s")')
|
||||
parser.add_argument('-d', '--database', default='./hashserv.db', help='Database file (default "%(default)s")')
|
||||
parser.add_argument('-l', '--log', default='WARNING', help='Set logging level')
|
||||
parser.add_argument('-u', '--upstream', help='Upstream hashserv to pull hashes from')
|
||||
parser.add_argument('-r', '--read-only', action='store_true', help='Disallow write operations from clients')
|
||||
parser.add_argument('--bind', default=DEFAULT_BIND, help='Bind address (default "%(default)s")')
|
||||
parser.add_argument('--database', default='./hashserv.db', help='Database file (default "%(default)s")')
|
||||
parser.add_argument('--log', default='WARNING', help='Set logging level')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -49,7 +47,7 @@ def main():
|
||||
console.setLevel(level)
|
||||
logger.addHandler(console)
|
||||
|
||||
server = hashserv.create_server(args.bind, args.database, upstream=args.upstream, read_only=args.read_only)
|
||||
server = hashserv.create_server(args.bind, args.database)
|
||||
server.serve_forever()
|
||||
return 0
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ readypipeinfd = int(sys.argv[3])
|
||||
logfile = sys.argv[4]
|
||||
lockname = sys.argv[5]
|
||||
sockname = sys.argv[6]
|
||||
timeout = float(sys.argv[7])
|
||||
timeout = sys.argv[7]
|
||||
xmlrpcinterface = (sys.argv[8], int(sys.argv[9]))
|
||||
if xmlrpcinterface[0] == "None":
|
||||
xmlrpcinterface = (None, xmlrpcinterface[1])
|
||||
|
||||
@@ -16,8 +16,6 @@ import signal
|
||||
import pickle
|
||||
import traceback
|
||||
import queue
|
||||
import shlex
|
||||
import subprocess
|
||||
from multiprocessing import Lock
|
||||
from threading import Thread
|
||||
|
||||
@@ -120,9 +118,7 @@ def worker_child_fire(event, d):
|
||||
data = b"<event>" + pickle.dumps(event) + b"</event>"
|
||||
try:
|
||||
worker_pipe_lock.acquire()
|
||||
while(len(data)):
|
||||
written = worker_pipe.write(data)
|
||||
data = data[written:]
|
||||
worker_pipe.write(data)
|
||||
worker_pipe_lock.release()
|
||||
except IOError:
|
||||
sigterm_handler(None, None)
|
||||
@@ -147,27 +143,21 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskha
|
||||
# a fork() or exec*() activates PSEUDO...
|
||||
|
||||
envbackup = {}
|
||||
fakeroot = False
|
||||
fakeenv = {}
|
||||
umask = None
|
||||
|
||||
taskdep = workerdata["taskdeps"][fn]
|
||||
if 'umask' in taskdep and taskname in taskdep['umask']:
|
||||
umask = taskdep['umask'][taskname]
|
||||
elif workerdata["umask"]:
|
||||
umask = workerdata["umask"]
|
||||
if umask:
|
||||
# umask might come in as a number or text string..
|
||||
try:
|
||||
umask = int(umask, 8)
|
||||
umask = int(taskdep['umask'][taskname],8)
|
||||
except TypeError:
|
||||
pass
|
||||
umask = taskdep['umask'][taskname]
|
||||
|
||||
dry_run = cfg.dry_run or dry_run_exec
|
||||
|
||||
# We can't use the fakeroot environment in a dry run as it possibly hasn't been built
|
||||
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not dry_run:
|
||||
fakeroot = True
|
||||
envvars = (workerdata["fakerootenv"][fn] or "").split()
|
||||
for key, value in (var.split('=') for var in envvars):
|
||||
envbackup[key] = os.environ.get(key)
|
||||
@@ -177,7 +167,7 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskha
|
||||
fakedirs = (workerdata["fakerootdirs"][fn] or "").split()
|
||||
for p in fakedirs:
|
||||
bb.utils.mkdirhier(p)
|
||||
logger.debug2('Running %s:%s under fakeroot, fakedirs: %s' %
|
||||
logger.debug(2, 'Running %s:%s under fakeroot, fakedirs: %s' %
|
||||
(fn, taskname, ', '.join(fakedirs)))
|
||||
else:
|
||||
envvars = (workerdata["fakerootnoenv"][fn] or "").split()
|
||||
@@ -286,13 +276,7 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskha
|
||||
try:
|
||||
if dry_run:
|
||||
return 0
|
||||
try:
|
||||
ret = bb.build.exec_task(fn, taskname, the_data, cfg.profile)
|
||||
finally:
|
||||
if fakeroot:
|
||||
fakerootcmd = shlex.split(the_data.getVar("FAKEROOTCMD"))
|
||||
subprocess.run(fakerootcmd + ['-S'], check=True, stdout=subprocess.PIPE)
|
||||
return ret
|
||||
return bb.build.exec_task(fn, taskname, the_data, cfg.profile)
|
||||
except:
|
||||
os._exit(1)
|
||||
if not profiling:
|
||||
@@ -337,9 +321,7 @@ class runQueueWorkerPipe():
|
||||
end = len(self.queue)
|
||||
index = self.queue.find(b"</event>")
|
||||
while index != -1:
|
||||
msg = self.queue[:index+8]
|
||||
assert msg.startswith(b"<event>") and msg.count(b"<event>") == 1
|
||||
worker_fire_prepickled(msg)
|
||||
worker_fire_prepickled(self.queue[:index+8])
|
||||
self.queue = self.queue[index+8:]
|
||||
index = self.queue.find(b"</event>")
|
||||
return (end > start)
|
||||
@@ -523,11 +505,9 @@ except BaseException as e:
|
||||
import traceback
|
||||
sys.stderr.write(traceback.format_exc())
|
||||
sys.stderr.write(str(e))
|
||||
finally:
|
||||
worker_thread_exit = True
|
||||
worker_thread.join()
|
||||
|
||||
workerlog_write("exiting")
|
||||
if not normalexit:
|
||||
sys.exit(1)
|
||||
worker_thread_exit = True
|
||||
worker_thread.join()
|
||||
|
||||
workerlog_write("exitting")
|
||||
sys.exit(0)
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
# Copyright (c) 2021 Joshua Watt <JPEWhacker@gmail.com>
|
||||
#
|
||||
# Dockerfile to build a bitbake hash equivalence server container
|
||||
#
|
||||
# From the root of the bitbake repository, run:
|
||||
#
|
||||
# docker build -f contrib/hashserv/Dockerfile .
|
||||
#
|
||||
|
||||
FROM alpine:3.13.1
|
||||
|
||||
RUN apk add --no-cache python3
|
||||
|
||||
COPY bin/bitbake-hashserv /opt/bbhashserv/bin/
|
||||
COPY lib/hashserv /opt/bbhashserv/lib/hashserv/
|
||||
|
||||
ENTRYPOINT ["/opt/bbhashserv/bin/bitbake-hashserv"]
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?= -j auto
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
|
||||
@@ -244,8 +244,7 @@ want upstream. Here is an example: ::
|
||||
BBFILE_COLLECTIONS = "upstream local"
|
||||
BBFILE_PATTERN_upstream = "^/stuff/openembedded/"
|
||||
BBFILE_PATTERN_local = "^/stuff/openembedded.modified/"
|
||||
BBFILE_PRIORITY_upstream = "5"
|
||||
BBFILE_PRIORITY_local = "10"
|
||||
BBFILE_PRIORITY_upstream = "5" BBFILE_PRIORITY_local = "10"
|
||||
|
||||
.. note::
|
||||
|
||||
|
||||
@@ -441,15 +441,6 @@ Here are some example URLs: ::
|
||||
SRC_URI = "git://git.oe.handhelds.org/git/vip.git;tag=version-1"
|
||||
SRC_URI = "git://git.oe.handhelds.org/git/vip.git;protocol=http"
|
||||
|
||||
.. note::
|
||||
|
||||
Specifying passwords directly in ``git://`` urls is not supported.
|
||||
There are several reasons: ``SRC_URI`` is often written out to logs and
|
||||
other places, and that could easily leak passwords; it is also all too
|
||||
easy to share metadata without removing passwords. SSH keys, ``~/.netrc``
|
||||
and ``~/.ssh/config`` files can be used as alternatives.
|
||||
|
||||
|
||||
.. _gitsm-fetcher:
|
||||
|
||||
Git Submodule Fetcher (``gitsm://``)
|
||||
@@ -633,34 +624,6 @@ Here are some example URLs: ::
|
||||
SRC_URI = "repo://REPOROOT;protocol=git;branch=some_branch;manifest=my_manifest.xml"
|
||||
SRC_URI = "repo://REPOROOT;protocol=file;branch=some_branch;manifest=my_manifest.xml"
|
||||
|
||||
.. _az-fetcher:
|
||||
|
||||
Az Fetcher (``az://``)
|
||||
--------------------------
|
||||
|
||||
This submodule fetches data from an
|
||||
`Azure Storage account <https://docs.microsoft.com/en-us/azure/storage/>`__ ,
|
||||
it inherits its functionality from the HTTP wget fetcher, but modifies its
|
||||
behavior to accomodate the usage of a
|
||||
`Shared Access Signature (SAS) <https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview>`__
|
||||
for non-public data.
|
||||
|
||||
Such functionality is set by the variable:
|
||||
|
||||
- :term:`AZ_SAS`: The Azure Storage Shared Access Signature provides secure
|
||||
delegate access to resources, if this variable is set, the Az Fetcher will
|
||||
use it when fetching artifacts from the cloud.
|
||||
|
||||
You can specify the AZ_SAS variable as shown below: ::
|
||||
|
||||
AZ_SAS = "se=2021-01-01&sp=r&sv=2018-11-09&sr=c&skoid=<skoid>&sig=<signature>"
|
||||
|
||||
Here is an example URL: ::
|
||||
|
||||
SRC_URI = "az://<azure-storage-account>.blob.core.windows.net/<foo_container>/<bar_file>"
|
||||
|
||||
It can also be used when setting mirrors definitions using the :term:`PREMIRRORS` variable.
|
||||
|
||||
Other Fetchers
|
||||
--------------
|
||||
|
||||
|
||||
@@ -1296,17 +1296,6 @@ For more information on task dependencies, see the
|
||||
See the ":ref:`bitbake-user-manual/bitbake-user-manual-metadata:variable flags`" section for information
|
||||
on variable flags you can use with tasks.
|
||||
|
||||
.. note::
|
||||
|
||||
While it's infrequent, it's possible to define multiple tasks as
|
||||
dependencies when calling ``addtask``. For example, here's a snippet
|
||||
from the OpenEmbedded class file ``package_tar.bbclass``::
|
||||
|
||||
addtask package_write_tar before do_build after do_packagedata do_package
|
||||
|
||||
Note how the ``package_write_tar`` task has to wait until both of
|
||||
``do_packagedata`` and ``do_package`` complete.
|
||||
|
||||
Deleting a Task
|
||||
---------------
|
||||
|
||||
@@ -1580,7 +1569,7 @@ might have an interest in viewing:
|
||||
events when each of the workers parse the base configuration or if
|
||||
the server changes configuration and reparses. Any given datastore
|
||||
only has one such event executed against it, however. If
|
||||
:term:`BB_INVALIDCONF` is set in the datastore by the event
|
||||
```BB_INVALIDCONF`` <#>`__ is set in the datastore by the event
|
||||
handler, the configuration is reparsed and a new event triggered,
|
||||
allowing the metadata to update configuration.
|
||||
|
||||
|
||||
@@ -39,19 +39,6 @@ overview of their function and contents.
|
||||
when specified allows for the Git binary from the host to be used
|
||||
rather than building ``git-native``.
|
||||
|
||||
:term:`AZ_SAS`
|
||||
Azure Storage Shared Access Signature, when using the
|
||||
:ref:`Azure Storage fetcher <bitbake-user-manual/bitbake-user-manual-fetching:fetchers>`
|
||||
This variable can be defined to be used by the fetcher to authenticate
|
||||
and gain access to non-public artifacts.
|
||||
::
|
||||
|
||||
AZ_SAS = ""se=2021-01-01&sp=r&sv=2018-11-09&sr=c&skoid=<skoid>&sig=<signature>""
|
||||
|
||||
For more information see Microsoft's Azure Storage documentation at
|
||||
https://docs.microsoft.com/en-us/azure/storage/common/storage-sas-overview
|
||||
|
||||
|
||||
:term:`B`
|
||||
The directory in which BitBake executes functions during a recipe's
|
||||
build process.
|
||||
@@ -121,10 +108,6 @@ overview of their function and contents.
|
||||
command line option). The task name specified should not include the
|
||||
``do_`` prefix.
|
||||
|
||||
:term:`BB_DEFAULT_UMASK`
|
||||
The default umask to apply to tasks if specified and no task specific
|
||||
umask flag is set.
|
||||
|
||||
:term:`BB_DISKMON_DIRS`
|
||||
Monitors disk space and available inodes during the build and allows
|
||||
you to control the build based on these parameters.
|
||||
@@ -270,6 +253,45 @@ overview of their function and contents.
|
||||
``my-recipe.bb`` is executing, the ``BB_FILENAME`` variable contains
|
||||
"/foo/path/my-recipe.bb".
|
||||
|
||||
:term:`BBFILES_DYNAMIC`
|
||||
Activates content depending on presence of identified layers. You
|
||||
identify the layers by the collections that the layers define.
|
||||
|
||||
Use the ``BBFILES_DYNAMIC`` variable to avoid ``.bbappend`` files whose
|
||||
corresponding ``.bb`` file is in a layer that attempts to modify other
|
||||
layers through ``.bbappend`` but does not want to introduce a hard
|
||||
dependency on those other layers.
|
||||
|
||||
Additionally you can prefix the rule with "!" to add ``.bbappend`` and
|
||||
``.bb`` files in case a layer is not present. Use this avoid hard
|
||||
dependency on those other layers.
|
||||
|
||||
Use the following form for ``BBFILES_DYNAMIC``: ::
|
||||
|
||||
collection_name:filename_pattern
|
||||
|
||||
The following example identifies two collection names and two filename
|
||||
patterns: ::
|
||||
|
||||
BBFILES_DYNAMIC += "\
|
||||
clang-layer:${LAYERDIR}/bbappends/meta-clang/*/*/*.bbappend \
|
||||
core:${LAYERDIR}/bbappends/openembedded-core/meta/*/*/*.bbappend \
|
||||
"
|
||||
|
||||
When the collection name is prefixed with "!" it will add the file pattern in case
|
||||
the layer is absent: ::
|
||||
|
||||
BBFILES_DYNAMIC += "\
|
||||
!clang-layer:${LAYERDIR}/backfill/meta-clang/*/*/*.bb \
|
||||
"
|
||||
|
||||
This next example shows an error message that occurs because invalid
|
||||
entries are found, which cause parsing to abort: ::
|
||||
|
||||
ERROR: BBFILES_DYNAMIC entries must be of the form {!}<collection name>:<filename pattern>, not:
|
||||
/work/my-layer/bbappends/meta-security-isafw/*/*/*.bbappend
|
||||
/work/my-layer/bbappends/openembedded-core/meta/*/*/*.bbappend
|
||||
|
||||
:term:`BB_GENERATE_MIRROR_TARBALLS`
|
||||
Causes tarballs of the Git repositories, including the Git metadata,
|
||||
to be placed in the :term:`DL_DIR` directory. Anyone
|
||||
@@ -645,45 +667,6 @@ overview of their function and contents.
|
||||
For details on the syntax, see the documentation by following the
|
||||
previous link.
|
||||
|
||||
:term:`BBFILES_DYNAMIC`
|
||||
Activates content depending on presence of identified layers. You
|
||||
identify the layers by the collections that the layers define.
|
||||
|
||||
Use the ``BBFILES_DYNAMIC`` variable to avoid ``.bbappend`` files whose
|
||||
corresponding ``.bb`` file is in a layer that attempts to modify other
|
||||
layers through ``.bbappend`` but does not want to introduce a hard
|
||||
dependency on those other layers.
|
||||
|
||||
Additionally you can prefix the rule with "!" to add ``.bbappend`` and
|
||||
``.bb`` files in case a layer is not present. Use this avoid hard
|
||||
dependency on those other layers.
|
||||
|
||||
Use the following form for ``BBFILES_DYNAMIC``: ::
|
||||
|
||||
collection_name:filename_pattern
|
||||
|
||||
The following example identifies two collection names and two filename
|
||||
patterns: ::
|
||||
|
||||
BBFILES_DYNAMIC += "\
|
||||
clang-layer:${LAYERDIR}/bbappends/meta-clang/*/*/*.bbappend \
|
||||
core:${LAYERDIR}/bbappends/openembedded-core/meta/*/*/*.bbappend \
|
||||
"
|
||||
|
||||
When the collection name is prefixed with "!" it will add the file pattern in case
|
||||
the layer is absent: ::
|
||||
|
||||
BBFILES_DYNAMIC += "\
|
||||
!clang-layer:${LAYERDIR}/backfill/meta-clang/*/*/*.bb \
|
||||
"
|
||||
|
||||
This next example shows an error message that occurs because invalid
|
||||
entries are found, which cause parsing to abort: ::
|
||||
|
||||
ERROR: BBFILES_DYNAMIC entries must be of the form {!}<collection name>:<filename pattern>, not:
|
||||
/work/my-layer/bbappends/meta-security-isafw/*/*/*.bbappend
|
||||
/work/my-layer/bbappends/openembedded-core/meta/*/*/*.bbappend
|
||||
|
||||
:term:`BBINCLUDED`
|
||||
Contains a space-separated list of all of all files that BitBake's
|
||||
parser included during parsing of the current file.
|
||||
@@ -1096,8 +1079,8 @@ overview of their function and contents.
|
||||
PREFERRED_PROVIDER_aaa = "bbb"
|
||||
|
||||
:term:`PREFERRED_VERSION`
|
||||
If there are multiple versions of a recipe available, this variable
|
||||
determines which version should be given preference. You must always
|
||||
If there are multiple versions of recipes available, this variable
|
||||
determines which recipe should be given preference. You must always
|
||||
suffix the variable with the :term:`PN` you want to
|
||||
select, and you should set :term:`PV` accordingly for
|
||||
precedence.
|
||||
@@ -1117,10 +1100,6 @@ overview of their function and contents.
|
||||
end of the string. You cannot use the wildcard character in any other
|
||||
location of the string.
|
||||
|
||||
If a recipe with the specified version is not available, a warning
|
||||
message will be shown. See :term:`REQUIRED_VERSION` if you want this
|
||||
to be an error instead.
|
||||
|
||||
:term:`PREMIRRORS`
|
||||
Specifies additional paths from which BitBake gets source code. When
|
||||
the build system searches for source code, it first tries the local
|
||||
@@ -1231,16 +1210,6 @@ overview of their function and contents.
|
||||
The directory in which a local copy of a ``google-repo`` directory is
|
||||
stored when it is synced.
|
||||
|
||||
:term:`REQUIRED_VERSION`
|
||||
If there are multiple versions of a recipe available, this variable
|
||||
determines which version should be given preference. ``REQUIRED_VERSION``
|
||||
works in exactly the same manner as :term:`PREFERRED_VERSION`, except
|
||||
that if the specified version is not available then an error message
|
||||
is shown and the build fails immediately.
|
||||
|
||||
If both ``REQUIRED_VERSION`` and ``PREFERRED_VERSION`` are set for
|
||||
the same recipe, the ``REQUIRED_VERSION`` value applies.
|
||||
|
||||
:term:`RPROVIDES`
|
||||
A list of package name aliases that a package also provides. These
|
||||
aliases are useful for satisfying runtime dependencies of other
|
||||
@@ -1330,8 +1299,6 @@ overview of their function and contents.
|
||||
- ``svn://`` : Fetches files from a Subversion (``svn``) revision
|
||||
control repository.
|
||||
|
||||
- ``az://`` : Fetches files from an Azure Storage account using HTTPS.
|
||||
|
||||
Here are some additional options worth mentioning:
|
||||
|
||||
- ``unpack`` : Controls whether or not to unpack the file if it is
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
# import sys
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
|
||||
import sys
|
||||
import datetime
|
||||
|
||||
current_version = "dev"
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
__version__ = "1.50.0"
|
||||
__version__ = "1.48.0"
|
||||
|
||||
import sys
|
||||
if sys.version_info < (3, 5, 0):
|
||||
@@ -21,8 +21,8 @@ class BBHandledException(Exception):
|
||||
The big dilemma for generic bitbake code is what information to give the user
|
||||
when an exception occurs. Any exception inheriting this base exception class
|
||||
has already provided information to the user via some 'fired' message type such as
|
||||
an explicitly fired event using bb.fire, or a bb.error message. If bitbake
|
||||
encounters an exception derived from this class, no backtrace or other information
|
||||
an explicitly fired event using bb.fire, or a bb.error message. If bitbake
|
||||
encounters an exception derived from this class, no backtrace or other information
|
||||
will be given to the user, its assumed the earlier event provided the relevant information.
|
||||
"""
|
||||
pass
|
||||
@@ -42,23 +42,14 @@ class BBLoggerMixin(object):
|
||||
|
||||
def setup_bblogger(self, name):
|
||||
if name.split(".")[0] == "BitBake":
|
||||
self.debug = self._debug_helper
|
||||
|
||||
def _debug_helper(self, *args, **kwargs):
|
||||
return self.bbdebug(1, *args, **kwargs)
|
||||
|
||||
def debug2(self, *args, **kwargs):
|
||||
return self.bbdebug(2, *args, **kwargs)
|
||||
|
||||
def debug3(self, *args, **kwargs):
|
||||
return self.bbdebug(3, *args, **kwargs)
|
||||
self.debug = self.bbdebug
|
||||
|
||||
def bbdebug(self, level, msg, *args, **kwargs):
|
||||
loglevel = logging.DEBUG - level + 1
|
||||
if not bb.event.worker_pid:
|
||||
if self.name in bb.msg.loggerDefaultDomains and loglevel > (bb.msg.loggerDefaultDomains[self.name]):
|
||||
return
|
||||
if loglevel < bb.msg.loggerDefaultLogLevel:
|
||||
if loglevel > bb.msg.loggerDefaultLogLevel:
|
||||
return
|
||||
return self.log(loglevel, msg, *args, **kwargs)
|
||||
|
||||
@@ -137,7 +128,7 @@ def debug(lvl, *args):
|
||||
mainlogger.warning("Passed invalid debug level '%s' to bb.debug", lvl)
|
||||
args = (lvl,) + args
|
||||
lvl = 1
|
||||
mainlogger.bbdebug(lvl, ''.join(args))
|
||||
mainlogger.debug(lvl, ''.join(args))
|
||||
|
||||
def note(*args):
|
||||
mainlogger.info(''.join(args))
|
||||
|
||||
@@ -298,10 +298,6 @@ def exec_func_python(func, d, runfile, cwd=None):
|
||||
comp = utils.better_compile(code, func, "exec_python_func() autogenerated")
|
||||
utils.better_exec(comp, {"d": d}, code, "exec_python_func() autogenerated")
|
||||
finally:
|
||||
# We want any stdout/stderr to be printed before any other log messages to make debugging
|
||||
# more accurate. In some cases we seem to lose stdout/stderr entirely in logging tests without this.
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
bb.debug(2, "Python function %s finished" % func)
|
||||
|
||||
if cwd and olddir:
|
||||
@@ -587,7 +583,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
logger.error("No such task: %s" % task)
|
||||
return 1
|
||||
|
||||
logger.debug("Executing task %s", task)
|
||||
logger.debug(1, "Executing task %s", task)
|
||||
|
||||
localdata = _task_data(fn, task, d)
|
||||
tempdir = localdata.getVar('T')
|
||||
@@ -600,7 +596,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
curnice = os.nice(0)
|
||||
nice = int(nice) - curnice
|
||||
newnice = os.nice(nice)
|
||||
logger.debug("Renice to %s " % newnice)
|
||||
logger.debug(1, "Renice to %s " % newnice)
|
||||
ionice = localdata.getVar("BB_TASK_IONICE_LEVEL")
|
||||
if ionice:
|
||||
try:
|
||||
@@ -698,16 +694,12 @@ def _exec_task(fn, task, d, quieterr):
|
||||
except bb.BBHandledException:
|
||||
event.fire(TaskFailed(task, fn, logfn, localdata, True), localdata)
|
||||
return 1
|
||||
except (Exception, SystemExit) as exc:
|
||||
except Exception as exc:
|
||||
if quieterr:
|
||||
event.fire(TaskFailedSilent(task, fn, logfn, localdata), localdata)
|
||||
else:
|
||||
errprinted = errchk.triggered
|
||||
# If the output is already on stdout, we've printed the information in the
|
||||
# logs once already so don't duplicate
|
||||
if verboseStdoutLogging:
|
||||
errprinted = True
|
||||
logger.error(repr(exc))
|
||||
logger.error(str(exc))
|
||||
event.fire(TaskFailed(task, fn, logfn, localdata, errprinted), localdata)
|
||||
return 1
|
||||
finally:
|
||||
@@ -728,7 +720,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
|
||||
logfile.close()
|
||||
if os.path.exists(logfn) and os.path.getsize(logfn) == 0:
|
||||
logger.debug2("Zero size logfn %s, removing", logfn)
|
||||
logger.debug(2, "Zero size logfn %s, removing", logfn)
|
||||
bb.utils.remove(logfn)
|
||||
bb.utils.remove(loglink)
|
||||
event.fire(TaskSucceeded(task, fn, logfn, localdata), localdata)
|
||||
@@ -862,23 +854,6 @@ def make_stamp(task, d, file_name = None):
|
||||
file_name = d.getVar('BB_FILENAME')
|
||||
bb.parse.siggen.dump_sigtask(file_name, task, stampbase, True)
|
||||
|
||||
def find_stale_stamps(task, d, file_name=None):
|
||||
current = stamp_internal(task, d, file_name)
|
||||
current2 = stamp_internal(task + "_setscene", d, file_name)
|
||||
cleanmask = stamp_cleanmask_internal(task, d, file_name)
|
||||
found = []
|
||||
for mask in cleanmask:
|
||||
for name in glob.glob(mask):
|
||||
if "sigdata" in name or "sigbasedata" in name:
|
||||
continue
|
||||
if name.endswith('.taint'):
|
||||
continue
|
||||
if name == current or name == current2:
|
||||
continue
|
||||
logger.debug2("Stampfile %s does not match %s or %s" % (name, current, current2))
|
||||
found.append(name)
|
||||
return found
|
||||
|
||||
def del_stamp(task, d, file_name = None):
|
||||
"""
|
||||
Removes a stamp for a given task
|
||||
@@ -1033,8 +1008,6 @@ def tasksbetween(task_start, task_end, d):
|
||||
def follow_chain(task, endtask, chain=None):
|
||||
if not chain:
|
||||
chain = []
|
||||
if task in chain:
|
||||
bb.fatal("Circular task dependencies as %s depends on itself via the chain %s" % (task, " -> ".join(chain)))
|
||||
chain.append(task)
|
||||
for othertask in tasks:
|
||||
if othertask == task:
|
||||
|
||||
@@ -19,15 +19,14 @@
|
||||
import os
|
||||
import logging
|
||||
import pickle
|
||||
from collections import defaultdict
|
||||
from collections.abc import Mapping
|
||||
from collections import defaultdict, Mapping
|
||||
import bb.utils
|
||||
from bb import PrefixLoggerAdapter
|
||||
import re
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
|
||||
__cache_version__ = "154"
|
||||
__cache_version__ = "153"
|
||||
|
||||
def getCacheFile(path, filename, mc, data_hash):
|
||||
mcspec = ''
|
||||
@@ -95,7 +94,6 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
if not self.packages:
|
||||
self.packages.append(self.pn)
|
||||
self.packages_dynamic = self.listvar('PACKAGES_DYNAMIC', metadata)
|
||||
self.rprovides_pkg = self.pkgvar('RPROVIDES', self.packages, metadata)
|
||||
|
||||
self.skipreason = self.getvar('__SKIPPED', metadata)
|
||||
if self.skipreason:
|
||||
@@ -122,12 +120,12 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
self.depends = self.depvar('DEPENDS', metadata)
|
||||
self.rdepends = self.depvar('RDEPENDS', metadata)
|
||||
self.rrecommends = self.depvar('RRECOMMENDS', metadata)
|
||||
self.rprovides_pkg = self.pkgvar('RPROVIDES', self.packages, metadata)
|
||||
self.rdepends_pkg = self.pkgvar('RDEPENDS', self.packages, metadata)
|
||||
self.rrecommends_pkg = self.pkgvar('RRECOMMENDS', self.packages, metadata)
|
||||
self.inherits = self.getvar('__inherit_cache', metadata, expand=False)
|
||||
self.fakerootenv = self.getvar('FAKEROOTENV', metadata)
|
||||
self.fakerootdirs = self.getvar('FAKEROOTDIRS', metadata)
|
||||
self.fakerootlogs = self.getvar('FAKEROOTLOGS', metadata)
|
||||
self.fakerootnoenv = self.getvar('FAKEROOTNOENV', metadata)
|
||||
self.extradepsfunc = self.getvar('calculate_extra_depends', metadata)
|
||||
|
||||
@@ -165,7 +163,6 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
cachedata.fakerootenv = {}
|
||||
cachedata.fakerootnoenv = {}
|
||||
cachedata.fakerootdirs = {}
|
||||
cachedata.fakerootlogs = {}
|
||||
cachedata.extradepsfunc = {}
|
||||
|
||||
def add_cacheData(self, cachedata, fn):
|
||||
@@ -218,7 +215,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
if not self.not_world:
|
||||
cachedata.possible_world.append(fn)
|
||||
#else:
|
||||
# logger.debug2("EXCLUDE FROM WORLD: %s", fn)
|
||||
# logger.debug(2, "EXCLUDE FROM WORLD: %s", fn)
|
||||
|
||||
# create a collection of all targets for sanity checking
|
||||
# tasks, such as upstream versions, license, and tools for
|
||||
@@ -234,7 +231,6 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
cachedata.fakerootenv[fn] = self.fakerootenv
|
||||
cachedata.fakerootnoenv[fn] = self.fakerootnoenv
|
||||
cachedata.fakerootdirs[fn] = self.fakerootdirs
|
||||
cachedata.fakerootlogs[fn] = self.fakerootlogs
|
||||
cachedata.extradepsfunc[fn] = self.extradepsfunc
|
||||
|
||||
def virtualfn2realfn(virtualfn):
|
||||
@@ -242,7 +238,7 @@ def virtualfn2realfn(virtualfn):
|
||||
Convert a virtual file name to a real one + the associated subclass keyword
|
||||
"""
|
||||
mc = ""
|
||||
if virtualfn.startswith('mc:') and virtualfn.count(':') >= 2:
|
||||
if virtualfn.startswith('mc:'):
|
||||
elems = virtualfn.split(':')
|
||||
mc = elems[1]
|
||||
virtualfn = ":".join(elems[2:])
|
||||
@@ -272,7 +268,7 @@ def variant2virtual(realfn, variant):
|
||||
"""
|
||||
if variant == "":
|
||||
return realfn
|
||||
if variant.startswith("mc:") and variant.count(':') >= 2:
|
||||
if variant.startswith("mc:"):
|
||||
elems = variant.split(":")
|
||||
if elems[2]:
|
||||
return "mc:" + elems[1] + ":virtual:" + ":".join(elems[2:]) + ":" + realfn
|
||||
@@ -327,7 +323,7 @@ class NoCache(object):
|
||||
Return a complete set of data for fn.
|
||||
To do this, we need to parse the file.
|
||||
"""
|
||||
logger.debug("Parsing %s (full)" % virtualfn)
|
||||
logger.debug(1, "Parsing %s (full)" % virtualfn)
|
||||
(fn, virtual, mc) = virtualfn2realfn(virtualfn)
|
||||
bb_data = self.load_bbfile(virtualfn, appends, virtonly=True)
|
||||
return bb_data[virtual]
|
||||
@@ -404,7 +400,7 @@ class Cache(NoCache):
|
||||
|
||||
self.cachefile = self.getCacheFile("bb_cache.dat")
|
||||
|
||||
self.logger.debug("Cache dir: %s", self.cachedir)
|
||||
self.logger.debug(1, "Cache dir: %s", self.cachedir)
|
||||
bb.utils.mkdirhier(self.cachedir)
|
||||
|
||||
cache_ok = True
|
||||
@@ -412,7 +408,7 @@ class Cache(NoCache):
|
||||
for cache_class in self.caches_array:
|
||||
cachefile = self.getCacheFile(cache_class.cachefile)
|
||||
cache_exists = os.path.exists(cachefile)
|
||||
self.logger.debug2("Checking if %s exists: %r", cachefile, cache_exists)
|
||||
self.logger.debug(2, "Checking if %s exists: %r", cachefile, cache_exists)
|
||||
cache_ok = cache_ok and cache_exists
|
||||
cache_class.init_cacheData(self)
|
||||
if cache_ok:
|
||||
@@ -420,7 +416,7 @@ class Cache(NoCache):
|
||||
elif os.path.isfile(self.cachefile):
|
||||
self.logger.info("Out of date cache found, rebuilding...")
|
||||
else:
|
||||
self.logger.debug("Cache file %s not found, building..." % self.cachefile)
|
||||
self.logger.debug(1, "Cache file %s not found, building..." % self.cachefile)
|
||||
|
||||
# We don't use the symlink, its just for debugging convinience
|
||||
if self.mc:
|
||||
@@ -453,11 +449,13 @@ class Cache(NoCache):
|
||||
return cachesize
|
||||
|
||||
def load_cachefile(self, progress):
|
||||
cachesize = self.cachesize()
|
||||
previous_progress = 0
|
||||
previous_percent = 0
|
||||
|
||||
for cache_class in self.caches_array:
|
||||
cachefile = self.getCacheFile(cache_class.cachefile)
|
||||
self.logger.debug('Loading cache file: %s' % cachefile)
|
||||
self.logger.debug(1, 'Loading cache file: %s' % cachefile)
|
||||
with open(cachefile, "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
# Check cache version information
|
||||
@@ -504,7 +502,7 @@ class Cache(NoCache):
|
||||
|
||||
def parse(self, filename, appends):
|
||||
"""Parse the specified filename, returning the recipe information"""
|
||||
self.logger.debug("Parsing %s", filename)
|
||||
self.logger.debug(1, "Parsing %s", filename)
|
||||
infos = []
|
||||
datastores = self.load_bbfile(filename, appends, mc=self.mc)
|
||||
depends = []
|
||||
@@ -558,7 +556,7 @@ class Cache(NoCache):
|
||||
cached, infos = self.load(fn, appends)
|
||||
for virtualfn, info_array in infos:
|
||||
if info_array[0].skipped:
|
||||
self.logger.debug("Skipping %s: %s", virtualfn, info_array[0].skipreason)
|
||||
self.logger.debug(1, "Skipping %s: %s", virtualfn, info_array[0].skipreason)
|
||||
skipped += 1
|
||||
else:
|
||||
self.add_info(virtualfn, info_array, cacheData, not cached)
|
||||
@@ -594,21 +592,21 @@ class Cache(NoCache):
|
||||
|
||||
# File isn't in depends_cache
|
||||
if not fn in self.depends_cache:
|
||||
self.logger.debug2("%s is not cached", fn)
|
||||
self.logger.debug(2, "%s is not cached", fn)
|
||||
return False
|
||||
|
||||
mtime = bb.parse.cached_mtime_noerror(fn)
|
||||
|
||||
# Check file still exists
|
||||
if mtime == 0:
|
||||
self.logger.debug2("%s no longer exists", fn)
|
||||
self.logger.debug(2, "%s no longer exists", fn)
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
info_array = self.depends_cache[fn]
|
||||
# Check the file's timestamp
|
||||
if mtime != info_array[0].timestamp:
|
||||
self.logger.debug2("%s changed", fn)
|
||||
self.logger.debug(2, "%s changed", fn)
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
@@ -619,13 +617,13 @@ class Cache(NoCache):
|
||||
fmtime = bb.parse.cached_mtime_noerror(f)
|
||||
# Check if file still exists
|
||||
if old_mtime != 0 and fmtime == 0:
|
||||
self.logger.debug2("%s's dependency %s was removed",
|
||||
self.logger.debug(2, "%s's dependency %s was removed",
|
||||
fn, f)
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
if (fmtime != old_mtime):
|
||||
self.logger.debug2("%s's dependency %s changed",
|
||||
self.logger.debug(2, "%s's dependency %s changed",
|
||||
fn, f)
|
||||
self.remove(fn)
|
||||
return False
|
||||
@@ -642,14 +640,14 @@ class Cache(NoCache):
|
||||
continue
|
||||
f, exist = f.split(":")
|
||||
if (exist == "True" and not os.path.exists(f)) or (exist == "False" and os.path.exists(f)):
|
||||
self.logger.debug2("%s's file checksum list file %s changed",
|
||||
self.logger.debug(2, "%s's file checksum list file %s changed",
|
||||
fn, f)
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
if tuple(appends) != tuple(info_array[0].appends):
|
||||
self.logger.debug2("appends for %s changed", fn)
|
||||
self.logger.debug2("%s to %s" % (str(appends), str(info_array[0].appends)))
|
||||
self.logger.debug(2, "appends for %s changed", fn)
|
||||
self.logger.debug(2, "%s to %s" % (str(appends), str(info_array[0].appends)))
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
@@ -658,10 +656,10 @@ class Cache(NoCache):
|
||||
virtualfn = variant2virtual(fn, cls)
|
||||
self.clean.add(virtualfn)
|
||||
if virtualfn not in self.depends_cache:
|
||||
self.logger.debug2("%s is not cached", virtualfn)
|
||||
self.logger.debug(2, "%s is not cached", virtualfn)
|
||||
invalid = True
|
||||
elif len(self.depends_cache[virtualfn]) != len(self.caches_array):
|
||||
self.logger.debug2("Extra caches missing for %s?" % virtualfn)
|
||||
self.logger.debug(2, "Extra caches missing for %s?" % virtualfn)
|
||||
invalid = True
|
||||
|
||||
# If any one of the variants is not present, mark as invalid for all
|
||||
@@ -669,10 +667,10 @@ class Cache(NoCache):
|
||||
for cls in info_array[0].variants:
|
||||
virtualfn = variant2virtual(fn, cls)
|
||||
if virtualfn in self.clean:
|
||||
self.logger.debug2("Removing %s from cache", virtualfn)
|
||||
self.logger.debug(2, "Removing %s from cache", virtualfn)
|
||||
self.clean.remove(virtualfn)
|
||||
if fn in self.clean:
|
||||
self.logger.debug2("Marking %s as not clean", fn)
|
||||
self.logger.debug(2, "Marking %s as not clean", fn)
|
||||
self.clean.remove(fn)
|
||||
return False
|
||||
|
||||
@@ -685,10 +683,10 @@ class Cache(NoCache):
|
||||
Called from the parser in error cases
|
||||
"""
|
||||
if fn in self.depends_cache:
|
||||
self.logger.debug("Removing %s from cache", fn)
|
||||
self.logger.debug(1, "Removing %s from cache", fn)
|
||||
del self.depends_cache[fn]
|
||||
if fn in self.clean:
|
||||
self.logger.debug("Marking %s as unclean", fn)
|
||||
self.logger.debug(1, "Marking %s as unclean", fn)
|
||||
self.clean.remove(fn)
|
||||
|
||||
def sync(self):
|
||||
@@ -701,13 +699,13 @@ class Cache(NoCache):
|
||||
return
|
||||
|
||||
if self.cacheclean:
|
||||
self.logger.debug2("Cache is clean, not saving.")
|
||||
self.logger.debug(2, "Cache is clean, not saving.")
|
||||
return
|
||||
|
||||
for cache_class in self.caches_array:
|
||||
cache_class_name = cache_class.__name__
|
||||
cachefile = self.getCacheFile(cache_class.cachefile)
|
||||
self.logger.debug2("Writing %s", cachefile)
|
||||
self.logger.debug(2, "Writing %s", cachefile)
|
||||
with open(cachefile, "wb") as f:
|
||||
p = pickle.Pickler(f, pickle.HIGHEST_PROTOCOL)
|
||||
p.dump(__cache_version__)
|
||||
@@ -818,6 +816,10 @@ class MulticonfigCache(Mapping):
|
||||
for k in self.__caches:
|
||||
yield k
|
||||
|
||||
def keys(self):
|
||||
return self.__caches[key]
|
||||
|
||||
|
||||
def init(cooker):
|
||||
"""
|
||||
The Objective: Cache the minimum amount of data possible yet get to the
|
||||
@@ -883,7 +885,7 @@ class MultiProcessCache(object):
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
self.cachefile = os.path.join(cachedir,
|
||||
cache_file_name or self.__class__.cache_file_name)
|
||||
logger.debug("Using cache in '%s'", self.cachefile)
|
||||
logger.debug(1, "Using cache in '%s'", self.cachefile)
|
||||
|
||||
glf = bb.utils.lockfile(self.cachefile + ".lock")
|
||||
|
||||
@@ -989,7 +991,7 @@ class SimpleCache(object):
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
self.cachefile = os.path.join(cachedir,
|
||||
cache_file_name or self.__class__.cache_file_name)
|
||||
logger.debug("Using cache in '%s'", self.cachefile)
|
||||
logger.debug(1, "Using cache in '%s'", self.cachefile)
|
||||
|
||||
glf = bb.utils.lockfile(self.cachefile + ".lock")
|
||||
|
||||
|
||||
@@ -212,9 +212,9 @@ class PythonParser():
|
||||
funcstr = codegen.to_source(func)
|
||||
argstr = codegen.to_source(arg)
|
||||
except TypeError:
|
||||
self.log.debug2('Failed to convert function and argument to source form')
|
||||
self.log.debug(2, 'Failed to convert function and argument to source form')
|
||||
else:
|
||||
self.log.debug(self.unhandled_message % (funcstr, argstr))
|
||||
self.log.debug(1, self.unhandled_message % (funcstr, argstr))
|
||||
|
||||
def visit_Call(self, node):
|
||||
name = self.called_node_name(node.func)
|
||||
@@ -450,7 +450,7 @@ class ShellParser():
|
||||
|
||||
cmd = word[1]
|
||||
if cmd.startswith("$"):
|
||||
self.log.debug(self.unhandled_template % cmd)
|
||||
self.log.debug(1, self.unhandled_template % cmd)
|
||||
elif cmd == "eval":
|
||||
command = " ".join(word for _, word in words[1:])
|
||||
self._parse_shell(command)
|
||||
|
||||
@@ -73,9 +73,7 @@ class SkippedPackage:
|
||||
self.pn = info.pn
|
||||
self.skipreason = info.skipreason
|
||||
self.provides = info.provides
|
||||
self.rprovides = info.packages + info.rprovides
|
||||
for package in info.packages:
|
||||
self.rprovides += info.rprovides_pkg[package]
|
||||
self.rprovides = info.rprovides
|
||||
elif reason:
|
||||
self.skipreason = reason
|
||||
|
||||
@@ -382,29 +380,14 @@ class BBCooker:
|
||||
try:
|
||||
self.prhost = prserv.serv.auto_start(self.data)
|
||||
except prserv.serv.PRServiceConfigError as e:
|
||||
bb.fatal("Unable to start PR Server, exiting")
|
||||
bb.fatal("Unable to start PR Server, exitting")
|
||||
|
||||
if self.data.getVar("BB_HASHSERVE") == "auto":
|
||||
# Create a new hash server bound to a unix domain socket
|
||||
if not self.hashserv:
|
||||
dbfile = (self.data.getVar("PERSISTENT_DIR") or self.data.getVar("CACHE")) + "/hashserv.db"
|
||||
upstream = self.data.getVar("BB_HASHSERVE_UPSTREAM") or None
|
||||
if upstream:
|
||||
import socket
|
||||
try:
|
||||
sock = socket.create_connection(upstream.split(":"), 5)
|
||||
sock.close()
|
||||
except socket.error as e:
|
||||
bb.warn("BB_HASHSERVE_UPSTREAM is not valid, unable to connect hash equivalence server at '%s': %s"
|
||||
% (upstream, repr(e)))
|
||||
|
||||
self.hashservaddr = "unix://%s/hashserve.sock" % self.data.getVar("TOPDIR")
|
||||
self.hashserv = hashserv.create_server(
|
||||
self.hashservaddr,
|
||||
dbfile,
|
||||
sync=False,
|
||||
upstream=upstream,
|
||||
)
|
||||
self.hashserv = hashserv.create_server(self.hashservaddr, dbfile, sync=False)
|
||||
self.hashserv.process = multiprocessing.Process(target=self.hashserv.serve_forever)
|
||||
self.hashserv.process.start()
|
||||
self.data.setVar("BB_HASHSERVE", self.hashservaddr)
|
||||
@@ -426,8 +409,6 @@ class BBCooker:
|
||||
self.data.disableTracking()
|
||||
|
||||
def parseConfiguration(self):
|
||||
self.updateCacheSync()
|
||||
|
||||
# Change nice level if we're asked to
|
||||
nice = self.data.getVar("BB_NICE_LEVEL")
|
||||
if nice:
|
||||
@@ -458,7 +439,7 @@ class BBCooker:
|
||||
continue
|
||||
except AttributeError:
|
||||
pass
|
||||
logger.debug("Marking as dirty due to '%s' option change to '%s'" % (o, options[o]))
|
||||
logger.debug(1, "Marking as dirty due to '%s' option change to '%s'" % (o, options[o]))
|
||||
print("Marking as dirty due to '%s' option change to '%s'" % (o, options[o]))
|
||||
clean = False
|
||||
if hasattr(self.configuration, o):
|
||||
@@ -485,17 +466,17 @@ class BBCooker:
|
||||
|
||||
for k in bb.utils.approved_variables():
|
||||
if k in environment and k not in self.configuration.env:
|
||||
logger.debug("Updating new environment variable %s to %s" % (k, environment[k]))
|
||||
logger.debug(1, "Updating new environment variable %s to %s" % (k, environment[k]))
|
||||
self.configuration.env[k] = environment[k]
|
||||
clean = False
|
||||
if k in self.configuration.env and k not in environment:
|
||||
logger.debug("Updating environment variable %s (deleted)" % (k))
|
||||
logger.debug(1, "Updating environment variable %s (deleted)" % (k))
|
||||
del self.configuration.env[k]
|
||||
clean = False
|
||||
if k not in self.configuration.env and k not in environment:
|
||||
continue
|
||||
if environment[k] != self.configuration.env[k]:
|
||||
logger.debug("Updating environment variable %s from %s to %s" % (k, self.configuration.env[k], environment[k]))
|
||||
logger.debug(1, "Updating environment variable %s from %s to %s" % (k, self.configuration.env[k], environment[k]))
|
||||
self.configuration.env[k] = environment[k]
|
||||
clean = False
|
||||
|
||||
@@ -503,7 +484,7 @@ class BBCooker:
|
||||
self.configuration.env = environment
|
||||
|
||||
if not clean:
|
||||
logger.debug("Base environment change, triggering reparse")
|
||||
logger.debug(1, "Base environment change, triggering reparse")
|
||||
self.reset()
|
||||
|
||||
def runCommands(self, server, data, abort):
|
||||
@@ -517,30 +498,22 @@ class BBCooker:
|
||||
|
||||
def showVersions(self):
|
||||
|
||||
(latest_versions, preferred_versions, required) = self.findProviders()
|
||||
(latest_versions, preferred_versions) = self.findProviders()
|
||||
|
||||
logger.plain("%-35s %25s %25s %25s", "Recipe Name", "Latest Version", "Preferred Version", "Required Version")
|
||||
logger.plain("%-35s %25s %25s %25s\n", "===========", "==============", "=================", "================")
|
||||
logger.plain("%-35s %25s %25s", "Recipe Name", "Latest Version", "Preferred Version")
|
||||
logger.plain("%-35s %25s %25s\n", "===========", "==============", "=================")
|
||||
|
||||
for p in sorted(self.recipecaches[''].pkg_pn):
|
||||
preferred = preferred_versions[p]
|
||||
pref = preferred_versions[p]
|
||||
latest = latest_versions[p]
|
||||
requiredstr = ""
|
||||
preferredstr = ""
|
||||
if required[p]:
|
||||
if preferred[0] is not None:
|
||||
requiredstr = preferred[0][0] + ":" + preferred[0][1] + '-' + preferred[0][2]
|
||||
else:
|
||||
bb.fatal("REQUIRED_VERSION of package %s not available" % p)
|
||||
else:
|
||||
preferredstr = preferred[0][0] + ":" + preferred[0][1] + '-' + preferred[0][2]
|
||||
|
||||
prefstr = pref[0][0] + ":" + pref[0][1] + '-' + pref[0][2]
|
||||
lateststr = latest[0][0] + ":" + latest[0][1] + "-" + latest[0][2]
|
||||
|
||||
if preferred == latest:
|
||||
preferredstr = ""
|
||||
if pref == latest:
|
||||
prefstr = ""
|
||||
|
||||
logger.plain("%-35s %25s %25s %25s", p, lateststr, preferredstr, requiredstr)
|
||||
logger.plain("%-35s %25s %25s", p, lateststr, prefstr)
|
||||
|
||||
def showEnvironment(self, buildfile=None, pkgs_to_build=None):
|
||||
"""
|
||||
@@ -639,7 +612,7 @@ class BBCooker:
|
||||
# Replace string such as "mc:*:bash"
|
||||
# into "mc:A:bash mc:B:bash bash"
|
||||
for k in targetlist:
|
||||
if k.startswith("mc:") and k.count(':') >= 2:
|
||||
if k.startswith("mc:"):
|
||||
if wildcard:
|
||||
bb.fatal('multiconfig conflict')
|
||||
if k.split(":")[1] == "*":
|
||||
@@ -673,7 +646,7 @@ class BBCooker:
|
||||
for k in fulltargetlist:
|
||||
origk = k
|
||||
mc = ""
|
||||
if k.startswith("mc:") and k.count(':') >= 2:
|
||||
if k.startswith("mc:"):
|
||||
mc = k.split(":")[1]
|
||||
k = ":".join(k.split(":")[2:])
|
||||
ktask = task
|
||||
@@ -722,7 +695,7 @@ class BBCooker:
|
||||
if depmc not in self.multiconfigs:
|
||||
bb.fatal("Multiconfig dependency %s depends on nonexistent multiconfig configuration named configuration %s" % (k,depmc))
|
||||
else:
|
||||
logger.debug("Adding providers for multiconfig dependency %s" % l[3])
|
||||
logger.debug(1, "Adding providers for multiconfig dependency %s" % l[3])
|
||||
taskdata[depmc].add_provider(localdata[depmc], self.recipecaches[depmc], l[3])
|
||||
seen.add(k)
|
||||
new = True
|
||||
@@ -815,9 +788,7 @@ class BBCooker:
|
||||
for dep in rq.rqdata.runtaskentries[tid].depends:
|
||||
(depmc, depfn, _, deptaskfn) = bb.runqueue.split_tid_mcfn(dep)
|
||||
deppn = self.recipecaches[depmc].pkg_fn[deptaskfn]
|
||||
if depmc:
|
||||
depmc = "mc:" + depmc + ":"
|
||||
depend_tree["tdepends"][dotname].append("%s%s.%s" % (depmc, deppn, bb.runqueue.taskname_from_tid(dep)))
|
||||
depend_tree["tdepends"][dotname].append("%s.%s" % (deppn, bb.runqueue.taskname_from_tid(dep)))
|
||||
if taskfn not in seen_fns:
|
||||
seen_fns.append(taskfn)
|
||||
packages = []
|
||||
@@ -1088,16 +1059,10 @@ class BBCooker:
|
||||
if pn in self.recipecaches[mc].providers:
|
||||
filenames = self.recipecaches[mc].providers[pn]
|
||||
eligible, foundUnique = bb.providers.filterProviders(filenames, pn, self.databuilder.mcdata[mc], self.recipecaches[mc])
|
||||
if eligible is not None:
|
||||
filename = eligible[0]
|
||||
else:
|
||||
filename = None
|
||||
filename = eligible[0]
|
||||
return None, None, None, filename
|
||||
elif pn in self.recipecaches[mc].pkg_pn:
|
||||
(latest, latest_f, preferred_ver, preferred_file, required) = bb.providers.findBestProvider(pn, self.databuilder.mcdata[mc], self.recipecaches[mc], self.recipecaches[mc].pkg_pn)
|
||||
if required and preferred_file is None:
|
||||
return None, None, None, None
|
||||
return (latest, latest_f, preferred_ver, preferred_file)
|
||||
return bb.providers.findBestProvider(pn, self.databuilder.mcdata[mc], self.recipecaches[mc], self.recipecaches[mc].pkg_pn)
|
||||
else:
|
||||
return None, None, None, None
|
||||
|
||||
@@ -1586,7 +1551,7 @@ class BBCooker:
|
||||
self.inotify_modified_files = []
|
||||
|
||||
if not self.baseconfig_valid:
|
||||
logger.debug("Reloading base configuration data")
|
||||
logger.debug(1, "Reloading base configuration data")
|
||||
self.initConfigurationData()
|
||||
self.handlePRServ()
|
||||
|
||||
@@ -2216,33 +2181,21 @@ class CookerParser(object):
|
||||
yield not cached, mc, infos
|
||||
|
||||
def parse_generator(self):
|
||||
empty = False
|
||||
while self.processes or not empty:
|
||||
for process in self.processes.copy():
|
||||
if not process.is_alive():
|
||||
process.join()
|
||||
self.processes.remove(process)
|
||||
|
||||
while True:
|
||||
if self.parsed >= self.toparse:
|
||||
break
|
||||
|
||||
try:
|
||||
result = self.result_queue.get(timeout=0.25)
|
||||
except queue.Empty:
|
||||
empty = True
|
||||
pass
|
||||
else:
|
||||
empty = False
|
||||
value = result[1]
|
||||
if isinstance(value, BaseException):
|
||||
raise value
|
||||
else:
|
||||
yield result
|
||||
|
||||
if not (self.parsed >= self.toparse):
|
||||
raise bb.parse.ParseError("Not all recipes parsed, parser thread killed/died? Exiting.", None)
|
||||
|
||||
|
||||
def parse_next(self):
|
||||
result = []
|
||||
parsed = None
|
||||
@@ -2254,18 +2207,18 @@ class CookerParser(object):
|
||||
except bb.BBHandledException as exc:
|
||||
self.error += 1
|
||||
logger.error('Failed to parse recipe: %s' % exc.recipe)
|
||||
self.shutdown(clean=False, force=True)
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
except ParsingFailure as exc:
|
||||
self.error += 1
|
||||
logger.error('Unable to parse %s: %s' %
|
||||
(exc.recipe, bb.exceptions.to_string(exc.realexception)))
|
||||
self.shutdown(clean=False, force=True)
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
except bb.parse.ParseError as exc:
|
||||
self.error += 1
|
||||
logger.error(str(exc))
|
||||
self.shutdown(clean=False, force=True)
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
except bb.data_smart.ExpansionError as exc:
|
||||
self.error += 1
|
||||
@@ -2274,7 +2227,7 @@ class CookerParser(object):
|
||||
tb = list(itertools.dropwhile(lambda e: e.filename.startswith(bbdir), exc.traceback))
|
||||
logger.error('ExpansionError during parsing %s', value.recipe,
|
||||
exc_info=(etype, value, tb))
|
||||
self.shutdown(clean=False, force=True)
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
except Exception as exc:
|
||||
self.error += 1
|
||||
@@ -2286,7 +2239,7 @@ class CookerParser(object):
|
||||
# Most likely, an exception occurred during raising an exception
|
||||
import traceback
|
||||
logger.error('Exception during parse: %s' % traceback.format_exc())
|
||||
self.shutdown(clean=False, force=True)
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
|
||||
self.current += 1
|
||||
|
||||
@@ -23,8 +23,8 @@ logger = logging.getLogger("BitBake")
|
||||
parselog = logging.getLogger("BitBake.Parsing")
|
||||
|
||||
class ConfigParameters(object):
|
||||
def __init__(self, argv=None):
|
||||
self.options, targets = self.parseCommandLine(argv or sys.argv)
|
||||
def __init__(self, argv=sys.argv):
|
||||
self.options, targets = self.parseCommandLine(argv)
|
||||
self.environment = self.parseEnvironment()
|
||||
|
||||
self.options.pkgs_to_build = targets or []
|
||||
@@ -209,7 +209,7 @@ def findConfigFile(configfile, data):
|
||||
return None
|
||||
|
||||
#
|
||||
# We search for a conf/bblayers.conf under an entry in BBPATH or in cwd working
|
||||
# We search for a conf/bblayers.conf under an entry in BBPATH or in cwd working
|
||||
# up to /. If that fails, we search for a conf/bitbake.conf in BBPATH.
|
||||
#
|
||||
|
||||
@@ -291,8 +291,6 @@ class CookerDataBuilder(object):
|
||||
|
||||
multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
|
||||
for config in multiconfig:
|
||||
if config[0].isdigit():
|
||||
bb.fatal("Multiconfig name '%s' is invalid as multiconfigs cannot start with a digit" % config)
|
||||
mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config)
|
||||
bb.event.fire(bb.event.ConfigParsed(), mcdata)
|
||||
self.mcdata[config] = mcdata
|
||||
@@ -344,9 +342,6 @@ class CookerDataBuilder(object):
|
||||
layers = (data.getVar('BBLAYERS') or "").split()
|
||||
broken_layers = []
|
||||
|
||||
if not layers:
|
||||
bb.fatal("The bblayers.conf file doesn't contain any BBLAYERS definition")
|
||||
|
||||
data = bb.data.createCopy(data)
|
||||
approved = bb.utils.approved_variables()
|
||||
|
||||
@@ -401,8 +396,6 @@ class CookerDataBuilder(object):
|
||||
if c in collections_tmp:
|
||||
bb.fatal("Found duplicated BBFILE_COLLECTIONS '%s', check bblayers.conf or layer.conf to fix it." % c)
|
||||
compat = set((data.getVar("LAYERSERIES_COMPAT_%s" % c) or "").split())
|
||||
if compat and not layerseries:
|
||||
bb.fatal("No core layer found to work with layer '%s'. Missing entry in bblayers.conf?" % c)
|
||||
if compat and not (compat & layerseries):
|
||||
bb.fatal("Layer %s is not compatible with the core layer which only supports these series: %s (layer is compatible with %s)"
|
||||
% (c, " ".join(layerseries), " ".join(compat)))
|
||||
@@ -436,7 +429,7 @@ class CookerDataBuilder(object):
|
||||
parselog.critical("Undefined event handler function '%s'" % var)
|
||||
raise bb.BBHandledException()
|
||||
handlerln = int(data.getVarFlag(var, "lineno", False))
|
||||
bb.event.register(var, data.getVar(var, False), (data.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln, data)
|
||||
bb.event.register(var, data.getVar(var, False), (data.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln)
|
||||
|
||||
data.setVar('BBINCLUDED',bb.parse.get_file_depends(data))
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ BitBake build tools.
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import copy, re, sys, traceback
|
||||
from collections.abc import MutableMapping
|
||||
from collections import MutableMapping
|
||||
import logging
|
||||
import hashlib
|
||||
import bb, bb.codeparser
|
||||
@@ -28,7 +28,7 @@ logger = logging.getLogger("BitBake.Data")
|
||||
|
||||
__setvar_keyword__ = ["_append", "_prepend", "_remove"]
|
||||
__setvar_regexp__ = re.compile(r'(?P<base>.*?)(?P<keyword>_append|_prepend|_remove)(_(?P<add>[^A-Z]*))?$')
|
||||
__expand_var_regexp__ = re.compile(r"\${[a-zA-Z0-9\-_+./~:]+?}")
|
||||
__expand_var_regexp__ = re.compile(r"\${[a-zA-Z0-9\-_+./~]+?}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@.+?}")
|
||||
__whitespace_split__ = re.compile(r'(\s)')
|
||||
__override_regexp__ = re.compile(r'[a-z0-9]+')
|
||||
@@ -403,7 +403,7 @@ class DataSmart(MutableMapping):
|
||||
s = __expand_python_regexp__.sub(varparse.python_sub, s)
|
||||
except SyntaxError as e:
|
||||
# Likely unmatched brackets, just don't expand the expression
|
||||
if e.msg != "EOL while scanning string literal" and not e.msg.startswith("unterminated string literal"):
|
||||
if e.msg != "EOL while scanning string literal":
|
||||
raise
|
||||
if s == olds:
|
||||
break
|
||||
@@ -411,8 +411,6 @@ class DataSmart(MutableMapping):
|
||||
raise
|
||||
except bb.parse.SkipRecipe:
|
||||
raise
|
||||
except bb.BBHandledException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
tb = sys.exc_info()[2]
|
||||
raise ExpansionError(varname, s, exc).with_traceback(tb) from exc
|
||||
@@ -483,7 +481,6 @@ class DataSmart(MutableMapping):
|
||||
|
||||
def setVar(self, var, value, **loginfo):
|
||||
#print("var=" + str(var) + " val=" + str(value))
|
||||
var = var.replace(":", "_")
|
||||
self.expand_cache = {}
|
||||
parsing=False
|
||||
if 'parsing' in loginfo:
|
||||
@@ -592,8 +589,6 @@ class DataSmart(MutableMapping):
|
||||
"""
|
||||
Rename the variable key to newkey
|
||||
"""
|
||||
key = key.replace(":", "_")
|
||||
newkey = newkey.replace(":", "_")
|
||||
if key == newkey:
|
||||
bb.warn("Calling renameVar with equivalent keys (%s) is invalid" % key)
|
||||
return
|
||||
@@ -642,7 +637,6 @@ class DataSmart(MutableMapping):
|
||||
self.setVar(var + "_prepend", value, ignore=True, parsing=True)
|
||||
|
||||
def delVar(self, var, **loginfo):
|
||||
var = var.replace(":", "_")
|
||||
self.expand_cache = {}
|
||||
|
||||
loginfo['detail'] = ""
|
||||
@@ -670,7 +664,6 @@ class DataSmart(MutableMapping):
|
||||
override = None
|
||||
|
||||
def setVarFlag(self, var, flag, value, **loginfo):
|
||||
var = var.replace(":", "_")
|
||||
self.expand_cache = {}
|
||||
|
||||
if 'op' not in loginfo:
|
||||
@@ -694,7 +687,6 @@ class DataSmart(MutableMapping):
|
||||
self.dict["__exportlist"]["_content"].add(var)
|
||||
|
||||
def getVarFlag(self, var, flag, expand=True, noweakdefault=False, parsing=False, retparser=False):
|
||||
var = var.replace(":", "_")
|
||||
if flag == "_content":
|
||||
cachename = var
|
||||
else:
|
||||
@@ -822,7 +814,6 @@ class DataSmart(MutableMapping):
|
||||
return value
|
||||
|
||||
def delVarFlag(self, var, flag, **loginfo):
|
||||
var = var.replace(":", "_")
|
||||
self.expand_cache = {}
|
||||
|
||||
local_var, _ = self._findVar(var)
|
||||
@@ -840,7 +831,6 @@ class DataSmart(MutableMapping):
|
||||
del self.dict[var][flag]
|
||||
|
||||
def appendVarFlag(self, var, flag, value, **loginfo):
|
||||
var = var.replace(":", "_")
|
||||
loginfo['op'] = 'append'
|
||||
loginfo['flag'] = flag
|
||||
self.varhistory.record(**loginfo)
|
||||
@@ -848,7 +838,6 @@ class DataSmart(MutableMapping):
|
||||
self.setVarFlag(var, flag, newvalue, ignore=True)
|
||||
|
||||
def prependVarFlag(self, var, flag, value, **loginfo):
|
||||
var = var.replace(":", "_")
|
||||
loginfo['op'] = 'prepend'
|
||||
loginfo['flag'] = flag
|
||||
self.varhistory.record(**loginfo)
|
||||
@@ -856,7 +845,6 @@ class DataSmart(MutableMapping):
|
||||
self.setVarFlag(var, flag, newvalue, ignore=True)
|
||||
|
||||
def setVarFlags(self, var, flags, **loginfo):
|
||||
var = var.replace(":", "_")
|
||||
self.expand_cache = {}
|
||||
infer_caller_details(loginfo)
|
||||
if not var in self.dict:
|
||||
@@ -871,7 +859,6 @@ class DataSmart(MutableMapping):
|
||||
self.dict[var][i] = flags[i]
|
||||
|
||||
def getVarFlags(self, var, expand = False, internalflags=False):
|
||||
var = var.replace(":", "_")
|
||||
local_var, _ = self._findVar(var)
|
||||
flags = {}
|
||||
|
||||
@@ -888,7 +875,6 @@ class DataSmart(MutableMapping):
|
||||
|
||||
|
||||
def delVarFlags(self, var, **loginfo):
|
||||
var = var.replace(":", "_")
|
||||
self.expand_cache = {}
|
||||
if not var in self.dict:
|
||||
self._makeShadowCopy(var)
|
||||
@@ -1019,7 +1005,7 @@ class DataSmart(MutableMapping):
|
||||
else:
|
||||
data.update({key:value})
|
||||
|
||||
varflags = d.getVarFlags(key, internalflags = True, expand=["vardepvalue"])
|
||||
varflags = d.getVarFlags(key, internalflags = True)
|
||||
if not varflags:
|
||||
continue
|
||||
for f in varflags:
|
||||
|
||||
@@ -118,8 +118,6 @@ def fire_class_handlers(event, d):
|
||||
if _eventfilter:
|
||||
if not _eventfilter(name, handler, event, d):
|
||||
continue
|
||||
if d is not None and not name in (d.getVar("__BBHANDLERS_MC") or set()):
|
||||
continue
|
||||
execute_handler(name, handler, event, d)
|
||||
|
||||
ui_queue = []
|
||||
@@ -229,19 +227,11 @@ def fire_from_worker(event, d):
|
||||
fire_ui_handlers(event, d)
|
||||
|
||||
noop = lambda _: None
|
||||
def register(name, handler, mask=None, filename=None, lineno=None, data=None):
|
||||
def register(name, handler, mask=None, filename=None, lineno=None):
|
||||
"""Register an Event handler"""
|
||||
|
||||
if data is not None and data.getVar("BB_CURRENT_MC"):
|
||||
mc = data.getVar("BB_CURRENT_MC")
|
||||
name = '%s%s' % (mc.replace('-', '_'), name)
|
||||
|
||||
# already registered
|
||||
if name in _handlers:
|
||||
if data is not None:
|
||||
bbhands_mc = (data.getVar("__BBHANDLERS_MC") or set())
|
||||
bbhands_mc.add(name)
|
||||
data.setVar("__BBHANDLERS_MC", bbhands_mc)
|
||||
return AlreadyRegistered
|
||||
|
||||
if handler is not None:
|
||||
@@ -278,20 +268,10 @@ def register(name, handler, mask=None, filename=None, lineno=None, data=None):
|
||||
_event_handler_map[m] = {}
|
||||
_event_handler_map[m][name] = True
|
||||
|
||||
if data is not None:
|
||||
bbhands_mc = (data.getVar("__BBHANDLERS_MC") or set())
|
||||
bbhands_mc.add(name)
|
||||
data.setVar("__BBHANDLERS_MC", bbhands_mc)
|
||||
|
||||
return Registered
|
||||
|
||||
def remove(name, handler, data=None):
|
||||
def remove(name, handler):
|
||||
"""Remove an Event handler"""
|
||||
if data is not None:
|
||||
if data.getVar("BB_CURRENT_MC"):
|
||||
mc = data.getVar("BB_CURRENT_MC")
|
||||
name = '%s%s' % (mc.replace('-', '_'), name)
|
||||
|
||||
_handlers.pop(name)
|
||||
if name in _catchall_handlers:
|
||||
_catchall_handlers.pop(name)
|
||||
@@ -299,12 +279,6 @@ def remove(name, handler, data=None):
|
||||
if name in _event_handler_map[event]:
|
||||
_event_handler_map[event].pop(name)
|
||||
|
||||
if data is not None:
|
||||
bbhands_mc = (data.getVar("__BBHANDLERS_MC") or set())
|
||||
if name in bbhands_mc:
|
||||
bbhands_mc.remove(name)
|
||||
data.setVar("__BBHANDLERS_MC", bbhands_mc)
|
||||
|
||||
def get_handlers():
|
||||
return _handlers
|
||||
|
||||
@@ -670,17 +644,6 @@ class ReachableStamps(Event):
|
||||
Event.__init__(self)
|
||||
self.stamps = stamps
|
||||
|
||||
class StaleSetSceneTasks(Event):
|
||||
"""
|
||||
An event listing setscene tasks which are 'stale' and will
|
||||
be rerun. The metadata may use to clean up stale data.
|
||||
tasks is a mapping of tasks and matching stale stamps.
|
||||
"""
|
||||
|
||||
def __init__(self, tasks):
|
||||
Event.__init__(self)
|
||||
self.tasks = tasks
|
||||
|
||||
class FilesMatchingFound(Event):
|
||||
"""
|
||||
Event when a list of files matching the supplied pattern has
|
||||
|
||||
@@ -290,7 +290,7 @@ class URI(object):
|
||||
|
||||
def _param_str_split(self, string, elmdelim, kvdelim="="):
|
||||
ret = collections.OrderedDict()
|
||||
for k, v in [x.split(kvdelim, 1) for x in string.split(elmdelim) if x]:
|
||||
for k, v in [x.split(kvdelim, 1) for x in string.split(elmdelim)]:
|
||||
ret[k] = v
|
||||
return ret
|
||||
|
||||
@@ -428,9 +428,8 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
uri_decoded = list(decodeurl(ud.url))
|
||||
uri_find_decoded = list(decodeurl(uri_find))
|
||||
uri_replace_decoded = list(decodeurl(uri_replace))
|
||||
logger.debug2("For url %s comparing %s to %s" % (uri_decoded, uri_find_decoded, uri_replace_decoded))
|
||||
logger.debug(2, "For url %s comparing %s to %s" % (uri_decoded, uri_find_decoded, uri_replace_decoded))
|
||||
result_decoded = ['', '', '', '', '', {}]
|
||||
# 0 - type, 1 - host, 2 - path, 3 - user, 4- pswd, 5 - params
|
||||
for loc, i in enumerate(uri_find_decoded):
|
||||
result_decoded[loc] = uri_decoded[loc]
|
||||
regexp = i
|
||||
@@ -450,9 +449,6 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
for l in replacements:
|
||||
uri_replace_decoded[loc][k] = uri_replace_decoded[loc][k].replace(l, replacements[l])
|
||||
result_decoded[loc][k] = uri_replace_decoded[loc][k]
|
||||
elif (loc == 3 or loc == 4) and uri_replace_decoded[loc]:
|
||||
# User/password in the replacement is just a straight replacement
|
||||
result_decoded[loc] = uri_replace_decoded[loc]
|
||||
elif (re.match(regexp, uri_decoded[loc])):
|
||||
if not uri_replace_decoded[loc]:
|
||||
result_decoded[loc] = ""
|
||||
@@ -478,7 +474,7 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
result = encodeurl(result_decoded)
|
||||
if result == ud.url:
|
||||
return None
|
||||
logger.debug2("For url %s returning %s" % (ud.url, result))
|
||||
logger.debug(2, "For url %s returning %s" % (ud.url, result))
|
||||
return result
|
||||
|
||||
methods = []
|
||||
@@ -503,9 +499,9 @@ def fetcher_init(d):
|
||||
# When to drop SCM head revisions controlled by user policy
|
||||
srcrev_policy = d.getVar('BB_SRCREV_POLICY') or "clear"
|
||||
if srcrev_policy == "cache":
|
||||
logger.debug("Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
logger.debug(1, "Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
elif srcrev_policy == "clear":
|
||||
logger.debug("Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
logger.debug(1, "Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
revs.clear()
|
||||
else:
|
||||
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
|
||||
@@ -566,9 +562,6 @@ def verify_checksum(ud, d, precomputed={}):
|
||||
|
||||
checksum_expected = getattr(ud, "%s_expected" % checksum_id)
|
||||
|
||||
if checksum_expected == '':
|
||||
checksum_expected = None
|
||||
|
||||
return {
|
||||
"id": checksum_id,
|
||||
"name": checksum_name,
|
||||
@@ -619,7 +612,7 @@ def verify_checksum(ud, d, precomputed={}):
|
||||
|
||||
for ci in checksum_infos:
|
||||
if ci["expected"] and ci["expected"] != ci["data"]:
|
||||
messages.append("File: '%s' has %s checksum '%s' when '%s' was " \
|
||||
messages.append("File: '%s' has %s checksum %s when %s was " \
|
||||
"expected" % (ud.localpath, ci["id"], ci["data"], ci["expected"]))
|
||||
bad_checksum = ci["data"]
|
||||
|
||||
@@ -860,13 +853,18 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
if val:
|
||||
cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd)
|
||||
|
||||
# Ensure that a _PYTHON_SYSCONFIGDATA_NAME value set by a recipe
|
||||
# (for example via python3native.bbclass since warrior) is not set for
|
||||
# host Python (otherwise tools like git-make-shallow will fail)
|
||||
cmd = 'unset _PYTHON_SYSCONFIGDATA_NAME; ' + cmd
|
||||
|
||||
# Disable pseudo as it may affect ssh, potentially causing it to hang.
|
||||
cmd = 'export PSEUDO_DISABLED=1; ' + cmd
|
||||
|
||||
if workdir:
|
||||
logger.debug("Running '%s' in %s" % (cmd, workdir))
|
||||
logger.debug(1, "Running '%s' in %s" % (cmd, workdir))
|
||||
else:
|
||||
logger.debug("Running %s", cmd)
|
||||
logger.debug(1, "Running %s", cmd)
|
||||
|
||||
success = False
|
||||
error_message = ""
|
||||
@@ -875,7 +873,7 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
(output, errors) = bb.process.run(cmd, log=log, shell=True, stderr=subprocess.PIPE, cwd=workdir)
|
||||
success = True
|
||||
except bb.process.NotFoundError as e:
|
||||
error_message = "Fetch command %s not found" % (e.command)
|
||||
error_message = "Fetch command %s" % (e.command)
|
||||
except bb.process.ExecutionError as e:
|
||||
if e.stdout:
|
||||
output = "output:\n%s\n%s" % (e.stdout, e.stderr)
|
||||
@@ -907,7 +905,7 @@ def check_network_access(d, info, url):
|
||||
elif not trusted_network(d, url):
|
||||
raise UntrustedUrl(url, info)
|
||||
else:
|
||||
logger.debug("Fetcher accessed the network with the command %s" % info)
|
||||
logger.debug(1, "Fetcher accessed the network with the command %s" % info)
|
||||
|
||||
def build_mirroruris(origud, mirrors, ld):
|
||||
uris = []
|
||||
@@ -933,7 +931,7 @@ def build_mirroruris(origud, mirrors, ld):
|
||||
continue
|
||||
|
||||
if not trusted_network(ld, newuri):
|
||||
logger.debug("Mirror %s not in the list of trusted networks, skipping" % (newuri))
|
||||
logger.debug(1, "Mirror %s not in the list of trusted networks, skipping" % (newuri))
|
||||
continue
|
||||
|
||||
# Create a local copy of the mirrors minus the current line
|
||||
@@ -946,8 +944,8 @@ def build_mirroruris(origud, mirrors, ld):
|
||||
newud = FetchData(newuri, ld)
|
||||
newud.setup_localpath(ld)
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
logger.debug("Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url))
|
||||
logger.debug(str(e))
|
||||
logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url))
|
||||
logger.debug(1, str(e))
|
||||
try:
|
||||
# setup_localpath of file:// urls may fail, we should still see
|
||||
# if mirrors of the url exist
|
||||
@@ -1050,8 +1048,8 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
|
||||
elif isinstance(e, NoChecksumError):
|
||||
raise
|
||||
else:
|
||||
logger.debug("Mirror fetch failure for url %s (original url: %s)" % (ud.url, origud.url))
|
||||
logger.debug(str(e))
|
||||
logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (ud.url, origud.url))
|
||||
logger.debug(1, str(e))
|
||||
try:
|
||||
ud.method.clean(ud, ld)
|
||||
except UnboundLocalError:
|
||||
@@ -1250,7 +1248,7 @@ class FetchData(object):
|
||||
|
||||
if checksum_name in self.parm:
|
||||
checksum_expected = self.parm[checksum_name]
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3", "az"]:
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]:
|
||||
checksum_expected = None
|
||||
else:
|
||||
checksum_expected = d.getVarFlag("SRC_URI", checksum_name)
|
||||
@@ -1463,10 +1461,6 @@ class FetchMethod(object):
|
||||
cmd = '7z x -so %s | tar x --no-same-owner -f -' % file
|
||||
elif file.endswith('.7z'):
|
||||
cmd = '7za x -y %s 1>/dev/null' % file
|
||||
elif file.endswith('.tzst') or file.endswith('.tar.zst'):
|
||||
cmd = 'zstd --decompress --stdout %s | tar x --no-same-owner -f -' % file
|
||||
elif file.endswith('.zst'):
|
||||
cmd = 'zstd --decompress --stdout %s > %s' % (file, efile)
|
||||
elif file.endswith('.zip') or file.endswith('.jar'):
|
||||
try:
|
||||
dos = bb.utils.to_boolean(urldata.parm.get('dos'), False)
|
||||
@@ -1695,7 +1689,7 @@ class Fetch(object):
|
||||
if m.verify_donestamp(ud, self.d) and not m.need_update(ud, self.d):
|
||||
done = True
|
||||
elif m.try_premirror(ud, self.d):
|
||||
logger.debug("Trying PREMIRRORS")
|
||||
logger.debug(1, "Trying PREMIRRORS")
|
||||
mirrors = mirror_from_string(self.d.getVar('PREMIRRORS'))
|
||||
done = m.try_mirrors(self, ud, self.d, mirrors)
|
||||
if done:
|
||||
@@ -1705,7 +1699,7 @@ class Fetch(object):
|
||||
m.update_donestamp(ud, self.d)
|
||||
except ChecksumError as e:
|
||||
logger.warning("Checksum failure encountered with premirror download of %s - will attempt other sources." % u)
|
||||
logger.debug(str(e))
|
||||
logger.debug(1, str(e))
|
||||
done = False
|
||||
|
||||
if premirroronly:
|
||||
@@ -1717,7 +1711,7 @@ class Fetch(object):
|
||||
try:
|
||||
if not trusted_network(self.d, ud.url):
|
||||
raise UntrustedUrl(ud.url)
|
||||
logger.debug("Trying Upstream")
|
||||
logger.debug(1, "Trying Upstream")
|
||||
m.download(ud, self.d)
|
||||
if hasattr(m, "build_mirror_data"):
|
||||
m.build_mirror_data(ud, self.d)
|
||||
@@ -1732,19 +1726,19 @@ class Fetch(object):
|
||||
except BBFetchException as e:
|
||||
if isinstance(e, ChecksumError):
|
||||
logger.warning("Checksum failure encountered with download of %s - will attempt other sources if available" % u)
|
||||
logger.debug(str(e))
|
||||
logger.debug(1, str(e))
|
||||
if os.path.exists(ud.localpath):
|
||||
rename_bad_checksum(ud, e.checksum)
|
||||
elif isinstance(e, NoChecksumError):
|
||||
raise
|
||||
else:
|
||||
logger.warning('Failed to fetch URL %s, attempting MIRRORS if available' % u)
|
||||
logger.debug(str(e))
|
||||
logger.debug(1, str(e))
|
||||
firsterr = e
|
||||
# Remove any incomplete fetch
|
||||
if not verified_stamp:
|
||||
m.clean(ud, self.d)
|
||||
logger.debug("Trying MIRRORS")
|
||||
logger.debug(1, "Trying MIRRORS")
|
||||
mirrors = mirror_from_string(self.d.getVar('MIRRORS'))
|
||||
done = m.try_mirrors(self, ud, self.d, mirrors)
|
||||
|
||||
@@ -1781,7 +1775,7 @@ class Fetch(object):
|
||||
ud = self.ud[u]
|
||||
ud.setup_localpath(self.d)
|
||||
m = ud.method
|
||||
logger.debug("Testing URL %s", u)
|
||||
logger.debug(1, "Testing URL %s", u)
|
||||
# First try checking uri, u, from PREMIRRORS
|
||||
mirrors = mirror_from_string(self.d.getVar('PREMIRRORS'))
|
||||
ret = m.try_mirrors(self, ud, self.d, mirrors, True)
|
||||
@@ -1915,7 +1909,6 @@ from . import repo
|
||||
from . import clearcase
|
||||
from . import npm
|
||||
from . import npmsw
|
||||
from . import az
|
||||
|
||||
methods.append(local.Local())
|
||||
methods.append(wget.Wget())
|
||||
@@ -1935,4 +1928,3 @@ methods.append(repo.Repo())
|
||||
methods.append(clearcase.ClearCase())
|
||||
methods.append(npm.Npm())
|
||||
methods.append(npmsw.NpmShrinkWrap())
|
||||
methods.append(az.Az())
|
||||
|
||||
@@ -1,93 +0,0 @@
|
||||
"""
|
||||
BitBake 'Fetch' Azure Storage implementation
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2021 Alejandro Hernandez Samaniego
|
||||
#
|
||||
# Based on bb.fetch2.wget:
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import shlex
|
||||
import os
|
||||
import bb
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2.wget import Wget
|
||||
|
||||
|
||||
class Az(Wget):
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched from Azure Storage
|
||||
"""
|
||||
return ud.type in ['az']
|
||||
|
||||
|
||||
def checkstatus(self, fetch, ud, d, try_again=True):
|
||||
|
||||
# checkstatus discards parameters either way, we need to do this before adding the SAS
|
||||
ud.url = ud.url.replace('az://','https://').split(';')[0]
|
||||
|
||||
az_sas = d.getVar('AZ_SAS')
|
||||
if az_sas and az_sas not in ud.url:
|
||||
ud.url += az_sas
|
||||
|
||||
return Wget.checkstatus(self, fetch, ud, d, try_again)
|
||||
|
||||
# Override download method, include retries
|
||||
def download(self, ud, d, retries=3):
|
||||
"""Fetch urls"""
|
||||
|
||||
# If were reaching the account transaction limit we might be refused a connection,
|
||||
# retrying allows us to avoid false negatives since the limit changes over time
|
||||
fetchcmd = self.basecmd + ' --retry-connrefused --waitretry=5'
|
||||
|
||||
# We need to provide a localpath to avoid wget using the SAS
|
||||
# ud.localfile either has the downloadfilename or ud.path
|
||||
localpath = os.path.join(d.getVar("DL_DIR"), ud.localfile)
|
||||
bb.utils.mkdirhier(os.path.dirname(localpath))
|
||||
fetchcmd += " -O %s" % shlex.quote(localpath)
|
||||
|
||||
|
||||
if ud.user and ud.pswd:
|
||||
fetchcmd += " --user=%s --password=%s --auth-no-challenge" % (ud.user, ud.pswd)
|
||||
|
||||
# Check if a Shared Access Signature was given and use it
|
||||
az_sas = d.getVar('AZ_SAS')
|
||||
|
||||
if az_sas:
|
||||
azuri = '%s%s%s%s' % ('https://', ud.host, ud.path, az_sas)
|
||||
else:
|
||||
azuri = '%s%s%s' % ('https://', ud.host, ud.path)
|
||||
|
||||
if os.path.exists(ud.localpath):
|
||||
# file exists, but we didnt complete it.. trying again.
|
||||
fetchcmd += d.expand(" -c -P ${DL_DIR} '%s'" % azuri)
|
||||
else:
|
||||
fetchcmd += d.expand(" -P ${DL_DIR} '%s'" % azuri)
|
||||
|
||||
try:
|
||||
self._runwget(ud, d, fetchcmd, False)
|
||||
except FetchError as e:
|
||||
# Azure fails on handshake sometimes when using wget after some stress, producing a
|
||||
# FetchError from the fetcher, if the artifact exists retyring should succeed
|
||||
if 'Unable to establish SSL connection' in str(e):
|
||||
logger.debug2('Unable to establish SSL connection: Retries remaining: %s, Retrying...' % retries)
|
||||
self.download(ud, d, retries -1)
|
||||
|
||||
# Sanity check since wget can pretend it succeed when it didn't
|
||||
# Also, this used to happen if sourceforge sent us to the mirror page
|
||||
if not os.path.exists(ud.localpath):
|
||||
raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (azuri, ud.localpath), azuri)
|
||||
|
||||
if os.path.getsize(ud.localpath) == 0:
|
||||
os.remove(ud.localpath)
|
||||
raise FetchError("The fetch of %s resulted in a zero size file?! Deleting and failing since this isn't right." % (azuri), azuri)
|
||||
|
||||
return True
|
||||
@@ -74,16 +74,16 @@ class Bzr(FetchMethod):
|
||||
|
||||
if os.access(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir), '.bzr'), os.R_OK):
|
||||
bzrcmd = self._buildbzrcommand(ud, d, "update")
|
||||
logger.debug("BZR Update %s", ud.url)
|
||||
logger.debug(1, "BZR Update %s", ud.url)
|
||||
bb.fetch2.check_network_access(d, bzrcmd, ud.url)
|
||||
runfetchcmd(bzrcmd, d, workdir=os.path.join(ud.pkgdir, os.path.basename(ud.path)))
|
||||
else:
|
||||
bb.utils.remove(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir)), True)
|
||||
bzrcmd = self._buildbzrcommand(ud, d, "fetch")
|
||||
bb.fetch2.check_network_access(d, bzrcmd, ud.url)
|
||||
logger.debug("BZR Checkout %s", ud.url)
|
||||
logger.debug(1, "BZR Checkout %s", ud.url)
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
logger.debug("Running %s", bzrcmd)
|
||||
logger.debug(1, "Running %s", bzrcmd)
|
||||
runfetchcmd(bzrcmd, d, workdir=ud.pkgdir)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
@@ -109,7 +109,7 @@ class Bzr(FetchMethod):
|
||||
"""
|
||||
Return the latest upstream revision number
|
||||
"""
|
||||
logger.debug2("BZR fetcher hitting network for %s", ud.url)
|
||||
logger.debug(2, "BZR fetcher hitting network for %s", ud.url)
|
||||
|
||||
bb.fetch2.check_network_access(d, self._buildbzrcommand(ud, d, "revno"), ud.url)
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ class ClearCase(FetchMethod):
|
||||
return ud.type in ['ccrc']
|
||||
|
||||
def debug(self, msg):
|
||||
logger.debug("ClearCase: %s", msg)
|
||||
logger.debug(1, "ClearCase: %s", msg)
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""
|
||||
|
||||
@@ -109,7 +109,7 @@ class Cvs(FetchMethod):
|
||||
cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd)
|
||||
|
||||
# create module directory
|
||||
logger.debug2("Fetch: checking for module directory")
|
||||
logger.debug(2, "Fetch: checking for module directory")
|
||||
moddir = os.path.join(ud.pkgdir, localdir)
|
||||
workdir = None
|
||||
if os.access(os.path.join(moddir, 'CVS'), os.R_OK):
|
||||
@@ -123,7 +123,7 @@ class Cvs(FetchMethod):
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
workdir = ud.pkgdir
|
||||
logger.debug("Running %s", cvscmd)
|
||||
logger.debug(1, "Running %s", cvscmd)
|
||||
bb.fetch2.check_network_access(d, cvscmd, ud.url)
|
||||
cmd = cvscmd
|
||||
|
||||
|
||||
@@ -68,7 +68,6 @@ import subprocess
|
||||
import tempfile
|
||||
import bb
|
||||
import bb.progress
|
||||
from contextlib import contextmanager
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
@@ -142,10 +141,6 @@ class Git(FetchMethod):
|
||||
ud.proto = 'file'
|
||||
else:
|
||||
ud.proto = "git"
|
||||
if ud.host == "github.com" and ud.proto == "git":
|
||||
# github stopped supporting git protocol
|
||||
# https://github.blog/2021-09-01-improving-git-protocol-security-github/#no-more-unauthenticated-git
|
||||
ud.proto = "https"
|
||||
|
||||
if not ud.proto in ('git', 'file', 'ssh', 'http', 'https', 'rsync'):
|
||||
raise bb.fetch2.ParameterError("Invalid protocol type", ud.url)
|
||||
@@ -225,12 +220,7 @@ class Git(FetchMethod):
|
||||
ud.shallow = False
|
||||
|
||||
if ud.usehead:
|
||||
# When usehead is set let's associate 'HEAD' with the unresolved
|
||||
# rev of this repository. This will get resolved into a revision
|
||||
# later. If an actual revision happens to have also been provided
|
||||
# then this setting will be overridden.
|
||||
for name in ud.names:
|
||||
ud.unresolvedrev[name] = 'HEAD'
|
||||
ud.unresolvedrev['default'] = 'HEAD'
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c core.fsyncobjectfiles=0"
|
||||
|
||||
@@ -389,50 +379,7 @@ class Git(FetchMethod):
|
||||
if missing_rev:
|
||||
raise bb.fetch2.FetchError("Unable to find revision %s even from upstream" % missing_rev)
|
||||
|
||||
if self._contains_lfs(ud, d, ud.clonedir) and self._need_lfs(ud):
|
||||
# Unpack temporary working copy, use it to run 'git checkout' to force pre-fetching
|
||||
# of all LFS blobs needed at the the srcrev.
|
||||
#
|
||||
# It would be nice to just do this inline here by running 'git-lfs fetch'
|
||||
# on the bare clonedir, but that operation requires a working copy on some
|
||||
# releases of Git LFS.
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
|
||||
try:
|
||||
# Do the checkout. This implicitly involves a Git LFS fetch.
|
||||
Git.unpack(self, ud, tmpdir, d)
|
||||
|
||||
# Scoop up a copy of any stuff that Git LFS downloaded. Merge them into
|
||||
# the bare clonedir.
|
||||
#
|
||||
# As this procedure is invoked repeatedly on incremental fetches as
|
||||
# a recipe's SRCREV is bumped throughout its lifetime, this will
|
||||
# result in a gradual accumulation of LFS blobs in <ud.clonedir>/lfs
|
||||
# corresponding to all the blobs reachable from the different revs
|
||||
# fetched across time.
|
||||
#
|
||||
# Only do this if the unpack resulted in a .git/lfs directory being
|
||||
# created; this only happens if at least one blob needed to be
|
||||
# downloaded.
|
||||
if os.path.exists(os.path.join(tmpdir, "git", ".git", "lfs")):
|
||||
runfetchcmd("tar -cf - lfs | tar -xf - -C %s" % ud.clonedir, d, workdir="%s/git/.git" % tmpdir)
|
||||
finally:
|
||||
bb.utils.remove(tmpdir, recurse=True)
|
||||
|
||||
def build_mirror_data(self, ud, d):
|
||||
|
||||
# Create as a temp file and move atomically into position to avoid races
|
||||
@contextmanager
|
||||
def create_atomic(filename):
|
||||
fd, tfile = tempfile.mkstemp(dir=os.path.dirname(filename))
|
||||
try:
|
||||
yield tfile
|
||||
umask = os.umask(0o666)
|
||||
os.umask(umask)
|
||||
os.chmod(tfile, (0o666 & ~umask))
|
||||
os.rename(tfile, filename)
|
||||
finally:
|
||||
os.close(fd)
|
||||
|
||||
if ud.shallow and ud.write_shallow_tarballs:
|
||||
if not os.path.exists(ud.fullshallow):
|
||||
if os.path.islink(ud.fullshallow):
|
||||
@@ -443,8 +390,7 @@ class Git(FetchMethod):
|
||||
self.clone_shallow_local(ud, shallowclone, d)
|
||||
|
||||
logger.info("Creating tarball of git repository")
|
||||
with create_atomic(ud.fullshallow) as tfile:
|
||||
runfetchcmd("tar -czf %s ." % tfile, d, workdir=shallowclone)
|
||||
runfetchcmd("tar -czf %s ." % ud.fullshallow, d, workdir=shallowclone)
|
||||
runfetchcmd("touch %s.done" % ud.fullshallow, d)
|
||||
finally:
|
||||
bb.utils.remove(tempdir, recurse=True)
|
||||
@@ -453,8 +399,7 @@ class Git(FetchMethod):
|
||||
os.unlink(ud.fullmirror)
|
||||
|
||||
logger.info("Creating tarball of git repository")
|
||||
with create_atomic(ud.fullmirror) as tfile:
|
||||
runfetchcmd("tar -czf %s ." % tfile, d, workdir=ud.clonedir)
|
||||
runfetchcmd("tar -czf %s ." % ud.fullmirror, d, workdir=ud.clonedir)
|
||||
runfetchcmd("touch %s.done" % ud.fullmirror, d)
|
||||
|
||||
def clone_shallow_local(self, ud, dest, d):
|
||||
@@ -529,7 +474,7 @@ class Git(FetchMethod):
|
||||
if os.path.exists(destdir):
|
||||
bb.utils.prunedir(destdir)
|
||||
|
||||
need_lfs = self._need_lfs(ud)
|
||||
need_lfs = ud.parm.get("lfs", "1") == "1"
|
||||
|
||||
if not need_lfs:
|
||||
ud.basecmd = "GIT_LFS_SKIP_SMUDGE=1 " + ud.basecmd
|
||||
@@ -618,9 +563,6 @@ class Git(FetchMethod):
|
||||
raise bb.fetch2.FetchError("The command '%s' gave output with more then 1 line unexpectedly, output: '%s'" % (cmd, output))
|
||||
return output.split()[0] != "0"
|
||||
|
||||
def _need_lfs(self, ud):
|
||||
return ud.parm.get("lfs", "1") == "1"
|
||||
|
||||
def _contains_lfs(self, ud, d, wd):
|
||||
"""
|
||||
Check if the repository has 'lfs' (large file) content
|
||||
@@ -631,14 +573,8 @@ class Git(FetchMethod):
|
||||
else:
|
||||
branchname = "master"
|
||||
|
||||
# The bare clonedir doesn't use the remote names; it has the branch immediately.
|
||||
if wd == ud.clonedir:
|
||||
refname = ud.branches[ud.names[0]]
|
||||
else:
|
||||
refname = "origin/%s" % ud.branches[ud.names[0]]
|
||||
|
||||
cmd = "%s grep lfs %s:.gitattributes | wc -l" % (
|
||||
ud.basecmd, refname)
|
||||
cmd = "%s grep lfs origin/%s:.gitattributes | wc -l" % (
|
||||
ud.basecmd, ud.branches[ud.names[0]])
|
||||
|
||||
try:
|
||||
output = runfetchcmd(cmd, d, quiet=True, workdir=wd)
|
||||
@@ -659,11 +595,6 @@ class Git(FetchMethod):
|
||||
"""
|
||||
Return the repository URL
|
||||
"""
|
||||
# Note that we do not support passwords directly in the git urls. There are several
|
||||
# reasons. SRC_URI can be written out to things like buildhistory and people don't
|
||||
# want to leak passwords like that. Its also all too easy to share metadata without
|
||||
# removing the password. ssh keys, ~/.netrc and ~/.ssh/config files can be used as
|
||||
# alternatives so we will not take patches adding password support here.
|
||||
if ud.user:
|
||||
username = ud.user + '@'
|
||||
else:
|
||||
|
||||
@@ -78,7 +78,7 @@ class GitSM(Git):
|
||||
module_hash = ""
|
||||
|
||||
if not module_hash:
|
||||
logger.debug("submodule %s is defined, but is not initialized in the repository. Skipping", m)
|
||||
logger.debug(1, "submodule %s is defined, but is not initialized in the repository. Skipping", m)
|
||||
continue
|
||||
|
||||
submodules.append(m)
|
||||
@@ -179,7 +179,7 @@ class GitSM(Git):
|
||||
(ud.basecmd, ud.revisions[ud.names[0]]), d, workdir=ud.clonedir)
|
||||
|
||||
if len(need_update_list) > 0:
|
||||
logger.debug('gitsm: Submodules requiring update: %s' % (' '.join(need_update_list)))
|
||||
logger.debug(1, 'gitsm: Submodules requiring update: %s' % (' '.join(need_update_list)))
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@@ -150,7 +150,7 @@ class Hg(FetchMethod):
|
||||
def download(self, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
logger.debug2("Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
# If the checkout doesn't exist and the mirror tarball does, extract it
|
||||
if not os.path.exists(ud.pkgdir) and os.path.exists(ud.fullmirror):
|
||||
@@ -160,7 +160,7 @@ class Hg(FetchMethod):
|
||||
if os.access(os.path.join(ud.moddir, '.hg'), os.R_OK):
|
||||
# Found the source, check whether need pull
|
||||
updatecmd = self._buildhgcommand(ud, d, "update")
|
||||
logger.debug("Running %s", updatecmd)
|
||||
logger.debug(1, "Running %s", updatecmd)
|
||||
try:
|
||||
runfetchcmd(updatecmd, d, workdir=ud.moddir)
|
||||
except bb.fetch2.FetchError:
|
||||
@@ -168,7 +168,7 @@ class Hg(FetchMethod):
|
||||
pullcmd = self._buildhgcommand(ud, d, "pull")
|
||||
logger.info("Pulling " + ud.url)
|
||||
# update sources there
|
||||
logger.debug("Running %s", pullcmd)
|
||||
logger.debug(1, "Running %s", pullcmd)
|
||||
bb.fetch2.check_network_access(d, pullcmd, ud.url)
|
||||
runfetchcmd(pullcmd, d, workdir=ud.moddir)
|
||||
try:
|
||||
@@ -183,14 +183,14 @@ class Hg(FetchMethod):
|
||||
logger.info("Fetch " + ud.url)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
logger.debug("Running %s", fetchcmd)
|
||||
logger.debug(1, "Running %s", fetchcmd)
|
||||
bb.fetch2.check_network_access(d, fetchcmd, ud.url)
|
||||
runfetchcmd(fetchcmd, d, workdir=ud.pkgdir)
|
||||
|
||||
# Even when we clone (fetch), we still need to update as hg's clone
|
||||
# won't checkout the specified revision if its on a branch
|
||||
updatecmd = self._buildhgcommand(ud, d, "update")
|
||||
logger.debug("Running %s", updatecmd)
|
||||
logger.debug(1, "Running %s", updatecmd)
|
||||
runfetchcmd(updatecmd, d, workdir=ud.moddir)
|
||||
|
||||
def clean(self, ud, d):
|
||||
@@ -247,9 +247,9 @@ class Hg(FetchMethod):
|
||||
if scmdata != "nokeep":
|
||||
proto = ud.parm.get('protocol', 'http')
|
||||
if not os.access(os.path.join(codir, '.hg'), os.R_OK):
|
||||
logger.debug2("Unpack: creating new hg repository in '" + codir + "'")
|
||||
logger.debug(2, "Unpack: creating new hg repository in '" + codir + "'")
|
||||
runfetchcmd("%s init %s" % (ud.basecmd, codir), d)
|
||||
logger.debug2("Unpack: updating source in '" + codir + "'")
|
||||
logger.debug(2, "Unpack: updating source in '" + codir + "'")
|
||||
if ud.user and ud.pswd:
|
||||
runfetchcmd("%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" pull %s" % (ud.basecmd, ud.user, ud.pswd, proto, ud.moddir), d, workdir=codir)
|
||||
else:
|
||||
@@ -259,5 +259,5 @@ class Hg(FetchMethod):
|
||||
else:
|
||||
runfetchcmd("%s up -C %s" % (ud.basecmd, revflag), d, workdir=codir)
|
||||
else:
|
||||
logger.debug2("Unpack: extracting source to '" + codir + "'")
|
||||
logger.debug(2, "Unpack: extracting source to '" + codir + "'")
|
||||
runfetchcmd("%s archive -t files %s %s" % (ud.basecmd, revflag, codir), d, workdir=ud.moddir)
|
||||
|
||||
@@ -54,12 +54,12 @@ class Local(FetchMethod):
|
||||
return [path]
|
||||
filespath = d.getVar('FILESPATH')
|
||||
if filespath:
|
||||
logger.debug2("Searching for %s in paths:\n %s" % (path, "\n ".join(filespath.split(":"))))
|
||||
logger.debug(2, "Searching for %s in paths:\n %s" % (path, "\n ".join(filespath.split(":"))))
|
||||
newpath, hist = bb.utils.which(filespath, path, history=True)
|
||||
searched.extend(hist)
|
||||
if not os.path.exists(newpath):
|
||||
dldirfile = os.path.join(d.getVar("DL_DIR"), path)
|
||||
logger.debug2("Defaulting to %s for %s" % (dldirfile, path))
|
||||
logger.debug(2, "Defaulting to %s for %s" % (dldirfile, path))
|
||||
bb.utils.mkdirhier(os.path.dirname(dldirfile))
|
||||
searched.append(dldirfile)
|
||||
return searched
|
||||
|
||||
@@ -29,8 +29,6 @@ from bb.fetch2.npm import npm_integrity
|
||||
from bb.fetch2.npm import npm_localfile
|
||||
from bb.fetch2.npm import npm_unpack
|
||||
from bb.utils import is_semver
|
||||
from bb.utils import lockfile
|
||||
from bb.utils import unlockfile
|
||||
|
||||
def foreach_dependencies(shrinkwrap, callback=None, dev=False):
|
||||
"""
|
||||
@@ -189,9 +187,7 @@ class NpmShrinkWrap(FetchMethod):
|
||||
proxy_ud = ud.proxy.ud[proxy_url]
|
||||
proxy_d = ud.proxy.d
|
||||
proxy_ud.setup_localpath(proxy_d)
|
||||
lf = lockfile(proxy_ud.lockfile)
|
||||
returns.append(handle(proxy_ud.method, proxy_ud, proxy_d))
|
||||
unlockfile(lf)
|
||||
return returns
|
||||
|
||||
def verify_donestamp(self, ud, d):
|
||||
|
||||
@@ -84,13 +84,13 @@ class Osc(FetchMethod):
|
||||
Fetch url
|
||||
"""
|
||||
|
||||
logger.debug2("Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
if os.access(os.path.join(d.getVar('OSCDIR'), ud.path, ud.module), os.R_OK):
|
||||
oscupdatecmd = self._buildosccommand(ud, d, "update")
|
||||
logger.info("Update "+ ud.url)
|
||||
# update sources there
|
||||
logger.debug("Running %s", oscupdatecmd)
|
||||
logger.debug(1, "Running %s", oscupdatecmd)
|
||||
bb.fetch2.check_network_access(d, oscupdatecmd, ud.url)
|
||||
runfetchcmd(oscupdatecmd, d, workdir=ud.moddir)
|
||||
else:
|
||||
@@ -98,7 +98,7 @@ class Osc(FetchMethod):
|
||||
logger.info("Fetch " + ud.url)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
logger.debug("Running %s", oscfetchcmd)
|
||||
logger.debug(1, "Running %s", oscfetchcmd)
|
||||
bb.fetch2.check_network_access(d, oscfetchcmd, ud.url)
|
||||
runfetchcmd(oscfetchcmd, d, workdir=ud.pkgdir)
|
||||
|
||||
|
||||
@@ -90,16 +90,16 @@ class Perforce(FetchMethod):
|
||||
p4port = d.getVar('P4PORT')
|
||||
|
||||
if p4port:
|
||||
logger.debug('Using recipe provided P4PORT: %s' % p4port)
|
||||
logger.debug(1, 'Using recipe provided P4PORT: %s' % p4port)
|
||||
ud.host = p4port
|
||||
else:
|
||||
logger.debug('Trying to use P4CONFIG to automatically set P4PORT...')
|
||||
logger.debug(1, 'Trying to use P4CONFIG to automatically set P4PORT...')
|
||||
ud.usingp4config = True
|
||||
p4cmd = '%s info | grep "Server address"' % ud.basecmd
|
||||
bb.fetch2.check_network_access(d, p4cmd, ud.url)
|
||||
ud.host = runfetchcmd(p4cmd, d, True)
|
||||
ud.host = ud.host.split(': ')[1].strip()
|
||||
logger.debug('Determined P4PORT to be: %s' % ud.host)
|
||||
logger.debug(1, 'Determined P4PORT to be: %s' % ud.host)
|
||||
if not ud.host:
|
||||
raise FetchError('Could not determine P4PORT from P4CONFIG')
|
||||
|
||||
@@ -119,7 +119,6 @@ class Perforce(FetchMethod):
|
||||
cleanedpath = ud.path.replace('/...', '').replace('/', '.')
|
||||
cleanedhost = ud.host.replace(':', '.')
|
||||
|
||||
cleanedmodule = ""
|
||||
# Merge the path and module into the final depot location
|
||||
if ud.module:
|
||||
if ud.module.find('/') == 0:
|
||||
@@ -134,7 +133,7 @@ class Perforce(FetchMethod):
|
||||
|
||||
ud.setup_revisions(d)
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s_%s.tar.gz' % (cleanedhost, cleanedpath, cleanedmodule, ud.revision))
|
||||
ud.localfile = d.expand('%s_%s_%s.tar.gz' % (cleanedhost, cleanedpath, ud.revision))
|
||||
|
||||
def _buildp4command(self, ud, d, command, depot_filename=None):
|
||||
"""
|
||||
@@ -208,7 +207,7 @@ class Perforce(FetchMethod):
|
||||
for filename in p4fileslist:
|
||||
item = filename.split(' - ')
|
||||
lastaction = item[1].split()
|
||||
logger.debug('File: %s Last Action: %s' % (item[0], lastaction[0]))
|
||||
logger.debug(1, 'File: %s Last Action: %s' % (item[0], lastaction[0]))
|
||||
if lastaction[0] == 'delete':
|
||||
continue
|
||||
filelist.append(item[0])
|
||||
@@ -255,7 +254,7 @@ class Perforce(FetchMethod):
|
||||
raise FetchError('Could not determine the latest perforce changelist')
|
||||
|
||||
tipcset = tip.split(' ')[1]
|
||||
logger.debug('p4 tip found to be changelist %s' % tipcset)
|
||||
logger.debug(1, 'p4 tip found to be changelist %s' % tipcset)
|
||||
return tipcset
|
||||
|
||||
def sortable_revision(self, ud, d, name):
|
||||
|
||||
@@ -47,7 +47,7 @@ class Repo(FetchMethod):
|
||||
"""Fetch url"""
|
||||
|
||||
if os.access(os.path.join(d.getVar("DL_DIR"), ud.localfile), os.R_OK):
|
||||
logger.debug("%s already exists (or was stashed). Skipping repo init / sync.", ud.localpath)
|
||||
logger.debug(1, "%s already exists (or was stashed). Skipping repo init / sync.", ud.localpath)
|
||||
return
|
||||
|
||||
repodir = d.getVar("REPODIR") or (d.getVar("DL_DIR") + "/repo")
|
||||
|
||||
@@ -86,7 +86,7 @@ class Svn(FetchMethod):
|
||||
if command == "info":
|
||||
svncmd = "%s info %s %s://%s/%s/" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module)
|
||||
elif command == "log1":
|
||||
svncmd = "%s log --limit 1 --quiet %s %s://%s/%s/" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module)
|
||||
svncmd = "%s log --limit 1 %s %s://%s/%s/" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module)
|
||||
else:
|
||||
suffix = ""
|
||||
|
||||
@@ -116,7 +116,7 @@ class Svn(FetchMethod):
|
||||
def download(self, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
logger.debug2("Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
lf = bb.utils.lockfile(ud.svnlock)
|
||||
|
||||
@@ -129,7 +129,7 @@ class Svn(FetchMethod):
|
||||
runfetchcmd(ud.basecmd + " upgrade", d, workdir=ud.moddir)
|
||||
except FetchError:
|
||||
pass
|
||||
logger.debug("Running %s", svncmd)
|
||||
logger.debug(1, "Running %s", svncmd)
|
||||
bb.fetch2.check_network_access(d, svncmd, ud.url)
|
||||
runfetchcmd(svncmd, d, workdir=ud.moddir)
|
||||
else:
|
||||
@@ -137,7 +137,7 @@ class Svn(FetchMethod):
|
||||
logger.info("Fetch " + ud.url)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
logger.debug("Running %s", svncmd)
|
||||
logger.debug(1, "Running %s", svncmd)
|
||||
bb.fetch2.check_network_access(d, svncmd, ud.url)
|
||||
runfetchcmd(svncmd, d, workdir=ud.pkgdir)
|
||||
|
||||
|
||||
@@ -52,12 +52,6 @@ class WgetProgressHandler(bb.progress.LineFilterProgressHandler):
|
||||
|
||||
|
||||
class Wget(FetchMethod):
|
||||
|
||||
# CDNs like CloudFlare may do a 'browser integrity test' which can fail
|
||||
# with the standard wget/urllib User-Agent, so pretend to be a modern
|
||||
# browser.
|
||||
user_agent = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:84.0) Gecko/20100101 Firefox/84.0"
|
||||
|
||||
"""Class to fetch urls via 'wget'"""
|
||||
def supports(self, ud, d):
|
||||
"""
|
||||
@@ -88,7 +82,7 @@ class Wget(FetchMethod):
|
||||
|
||||
progresshandler = WgetProgressHandler(d)
|
||||
|
||||
logger.debug2("Fetching %s using command '%s'" % (ud.url, command))
|
||||
logger.debug(2, "Fetching %s using command '%s'" % (ud.url, command))
|
||||
bb.fetch2.check_network_access(d, command, ud.url)
|
||||
runfetchcmd(command + ' --progress=dot -v', d, quiet, log=progresshandler, workdir=workdir)
|
||||
|
||||
@@ -303,7 +297,7 @@ class Wget(FetchMethod):
|
||||
# Some servers (FusionForge, as used on Alioth) require that the
|
||||
# optional Accept header is set.
|
||||
r.add_header("Accept", "*/*")
|
||||
r.add_header("User-Agent", self.user_agent)
|
||||
r.add_header("User-Agent", "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12")
|
||||
def add_basic_auth(login_str, request):
|
||||
'''Adds Basic auth to http request, pass in login:password as string'''
|
||||
import base64
|
||||
@@ -322,23 +316,15 @@ class Wget(FetchMethod):
|
||||
except (TypeError, ImportError, IOError, netrc.NetrcParseError):
|
||||
pass
|
||||
|
||||
with opener.open(r, timeout=30) as response:
|
||||
with opener.open(r) as response:
|
||||
pass
|
||||
except urllib.error.URLError as e:
|
||||
if try_again:
|
||||
logger.debug2("checkstatus: trying again")
|
||||
logger.debug(2, "checkstatus: trying again")
|
||||
return self.checkstatus(fetch, ud, d, False)
|
||||
else:
|
||||
# debug for now to avoid spamming the logs in e.g. remote sstate searches
|
||||
logger.debug2("checkstatus() urlopen failed: %s" % e)
|
||||
return False
|
||||
except ConnectionResetError as e:
|
||||
if try_again:
|
||||
logger.debug2("checkstatus: trying again")
|
||||
return self.checkstatus(fetch, ud, d, False)
|
||||
else:
|
||||
# debug for now to avoid spamming the logs in e.g. remote sstate searches
|
||||
logger.debug2("checkstatus() urlopen failed: %s" % e)
|
||||
logger.debug(2, "checkstatus() urlopen failed: %s" % e)
|
||||
return False
|
||||
return True
|
||||
|
||||
@@ -415,8 +401,9 @@ class Wget(FetchMethod):
|
||||
"""
|
||||
f = tempfile.NamedTemporaryFile()
|
||||
with tempfile.TemporaryDirectory(prefix="wget-index-") as workdir, tempfile.NamedTemporaryFile(dir=workdir, prefix="wget-listing-") as f:
|
||||
agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12"
|
||||
fetchcmd = self.basecmd
|
||||
fetchcmd += " -O " + f.name + " --user-agent='" + self.user_agent + "' '" + uri + "'"
|
||||
fetchcmd += " -O " + f.name + " --user-agent='" + agent + "' '" + uri + "'"
|
||||
try:
|
||||
self._runwget(ud, d, fetchcmd, True, workdir=workdir)
|
||||
fetchresult = f.read()
|
||||
@@ -472,7 +459,7 @@ class Wget(FetchMethod):
|
||||
version_dir = ['', '', '']
|
||||
version = ['', '', '']
|
||||
|
||||
dirver_regex = re.compile(r"(?P<pfx>\D*)(?P<ver>(\d+[\.\-_])*(\d+))")
|
||||
dirver_regex = re.compile(r"(?P<pfx>\D*)(?P<ver>(\d+[\.\-_])+(\d+))")
|
||||
s = dirver_regex.search(dirver)
|
||||
if s:
|
||||
version_dir[1] = s.group('ver')
|
||||
|
||||
@@ -119,181 +119,178 @@ warnings.filterwarnings("ignore", category=ImportWarning)
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning, module="<string>$")
|
||||
warnings.filterwarnings("ignore", message="With-statements now directly support multiple context managers")
|
||||
|
||||
class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
|
||||
def create_bitbake_parser():
|
||||
parser = optparse.OptionParser(
|
||||
formatter=BitbakeHelpFormatter(),
|
||||
version="BitBake Build Tool Core version %s" % bb.__version__,
|
||||
usage="""%prog [options] [recipename/target recipe:do_task ...]
|
||||
def parseCommandLine(self, argv=sys.argv):
|
||||
parser = optparse.OptionParser(
|
||||
formatter=BitbakeHelpFormatter(),
|
||||
version="BitBake Build Tool Core version %s" % bb.__version__,
|
||||
usage="""%prog [options] [recipename/target recipe:do_task ...]
|
||||
|
||||
Executes the specified task (default is 'build') for a given set of target recipes (.bb files).
|
||||
It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH which
|
||||
will provide the layer, BBFILES and other configuration information.""")
|
||||
|
||||
parser.add_option("-b", "--buildfile", action="store", dest="buildfile", default=None,
|
||||
help="Execute tasks from a specific .bb recipe directly. WARNING: Does "
|
||||
"not handle any dependencies from other recipes.")
|
||||
parser.add_option("-b", "--buildfile", action="store", dest="buildfile", default=None,
|
||||
help="Execute tasks from a specific .bb recipe directly. WARNING: Does "
|
||||
"not handle any dependencies from other recipes.")
|
||||
|
||||
parser.add_option("-k", "--continue", action="store_false", dest="abort", default=True,
|
||||
help="Continue as much as possible after an error. While the target that "
|
||||
"failed and anything depending on it cannot be built, as much as "
|
||||
"possible will be built before stopping.")
|
||||
parser.add_option("-k", "--continue", action="store_false", dest="abort", default=True,
|
||||
help="Continue as much as possible after an error. While the target that "
|
||||
"failed and anything depending on it cannot be built, as much as "
|
||||
"possible will be built before stopping.")
|
||||
|
||||
parser.add_option("-f", "--force", action="store_true", dest="force", default=False,
|
||||
help="Force the specified targets/task to run (invalidating any "
|
||||
"existing stamp file).")
|
||||
parser.add_option("-f", "--force", action="store_true", dest="force", default=False,
|
||||
help="Force the specified targets/task to run (invalidating any "
|
||||
"existing stamp file).")
|
||||
|
||||
parser.add_option("-c", "--cmd", action="store", dest="cmd",
|
||||
help="Specify the task to execute. The exact options available "
|
||||
"depend on the metadata. Some examples might be 'compile'"
|
||||
" or 'populate_sysroot' or 'listtasks' may give a list of "
|
||||
"the tasks available.")
|
||||
parser.add_option("-c", "--cmd", action="store", dest="cmd",
|
||||
help="Specify the task to execute. The exact options available "
|
||||
"depend on the metadata. Some examples might be 'compile'"
|
||||
" or 'populate_sysroot' or 'listtasks' may give a list of "
|
||||
"the tasks available.")
|
||||
|
||||
parser.add_option("-C", "--clear-stamp", action="store", dest="invalidate_stamp",
|
||||
help="Invalidate the stamp for the specified task such as 'compile' "
|
||||
"and then run the default task for the specified target(s).")
|
||||
parser.add_option("-C", "--clear-stamp", action="store", dest="invalidate_stamp",
|
||||
help="Invalidate the stamp for the specified task such as 'compile' "
|
||||
"and then run the default task for the specified target(s).")
|
||||
|
||||
parser.add_option("-r", "--read", action="append", dest="prefile", default=[],
|
||||
help="Read the specified file before bitbake.conf.")
|
||||
parser.add_option("-r", "--read", action="append", dest="prefile", default=[],
|
||||
help="Read the specified file before bitbake.conf.")
|
||||
|
||||
parser.add_option("-R", "--postread", action="append", dest="postfile", default=[],
|
||||
help="Read the specified file after bitbake.conf.")
|
||||
parser.add_option("-R", "--postread", action="append", dest="postfile", default=[],
|
||||
help="Read the specified file after bitbake.conf.")
|
||||
|
||||
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
|
||||
help="Enable tracing of shell tasks (with 'set -x'). "
|
||||
"Also print bb.note(...) messages to stdout (in "
|
||||
"addition to writing them to ${T}/log.do_<task>).")
|
||||
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
|
||||
help="Enable tracing of shell tasks (with 'set -x'). "
|
||||
"Also print bb.note(...) messages to stdout (in "
|
||||
"addition to writing them to ${T}/log.do_<task>).")
|
||||
|
||||
parser.add_option("-D", "--debug", action="count", dest="debug", default=0,
|
||||
help="Increase the debug level. You can specify this "
|
||||
"more than once. -D sets the debug level to 1, "
|
||||
"where only bb.debug(1, ...) messages are printed "
|
||||
"to stdout; -DD sets the debug level to 2, where "
|
||||
"both bb.debug(1, ...) and bb.debug(2, ...) "
|
||||
"messages are printed; etc. Without -D, no debug "
|
||||
"messages are printed. Note that -D only affects "
|
||||
"output to stdout. All debug messages are written "
|
||||
"to ${T}/log.do_taskname, regardless of the debug "
|
||||
"level.")
|
||||
parser.add_option("-D", "--debug", action="count", dest="debug", default=0,
|
||||
help="Increase the debug level. You can specify this "
|
||||
"more than once. -D sets the debug level to 1, "
|
||||
"where only bb.debug(1, ...) messages are printed "
|
||||
"to stdout; -DD sets the debug level to 2, where "
|
||||
"both bb.debug(1, ...) and bb.debug(2, ...) "
|
||||
"messages are printed; etc. Without -D, no debug "
|
||||
"messages are printed. Note that -D only affects "
|
||||
"output to stdout. All debug messages are written "
|
||||
"to ${T}/log.do_taskname, regardless of the debug "
|
||||
"level.")
|
||||
|
||||
parser.add_option("-q", "--quiet", action="count", dest="quiet", default=0,
|
||||
help="Output less log message data to the terminal. You can specify this more than once.")
|
||||
parser.add_option("-q", "--quiet", action="count", dest="quiet", default=0,
|
||||
help="Output less log message data to the terminal. You can specify this more than once.")
|
||||
|
||||
parser.add_option("-n", "--dry-run", action="store_true", dest="dry_run", default=False,
|
||||
help="Don't execute, just go through the motions.")
|
||||
parser.add_option("-n", "--dry-run", action="store_true", dest="dry_run", default=False,
|
||||
help="Don't execute, just go through the motions.")
|
||||
|
||||
parser.add_option("-S", "--dump-signatures", action="append", dest="dump_signatures",
|
||||
default=[], metavar="SIGNATURE_HANDLER",
|
||||
help="Dump out the signature construction information, with no task "
|
||||
"execution. The SIGNATURE_HANDLER parameter is passed to the "
|
||||
"handler. Two common values are none and printdiff but the handler "
|
||||
"may define more/less. none means only dump the signature, printdiff"
|
||||
" means compare the dumped signature with the cached one.")
|
||||
parser.add_option("-S", "--dump-signatures", action="append", dest="dump_signatures",
|
||||
default=[], metavar="SIGNATURE_HANDLER",
|
||||
help="Dump out the signature construction information, with no task "
|
||||
"execution. The SIGNATURE_HANDLER parameter is passed to the "
|
||||
"handler. Two common values are none and printdiff but the handler "
|
||||
"may define more/less. none means only dump the signature, printdiff"
|
||||
" means compare the dumped signature with the cached one.")
|
||||
|
||||
parser.add_option("-p", "--parse-only", action="store_true",
|
||||
dest="parse_only", default=False,
|
||||
help="Quit after parsing the BB recipes.")
|
||||
parser.add_option("-p", "--parse-only", action="store_true",
|
||||
dest="parse_only", default=False,
|
||||
help="Quit after parsing the BB recipes.")
|
||||
|
||||
parser.add_option("-s", "--show-versions", action="store_true",
|
||||
dest="show_versions", default=False,
|
||||
help="Show current and preferred versions of all recipes.")
|
||||
parser.add_option("-s", "--show-versions", action="store_true",
|
||||
dest="show_versions", default=False,
|
||||
help="Show current and preferred versions of all recipes.")
|
||||
|
||||
parser.add_option("-e", "--environment", action="store_true",
|
||||
dest="show_environment", default=False,
|
||||
help="Show the global or per-recipe environment complete with information"
|
||||
" about where variables were set/changed.")
|
||||
parser.add_option("-e", "--environment", action="store_true",
|
||||
dest="show_environment", default=False,
|
||||
help="Show the global or per-recipe environment complete with information"
|
||||
" about where variables were set/changed.")
|
||||
|
||||
parser.add_option("-g", "--graphviz", action="store_true", dest="dot_graph", default=False,
|
||||
help="Save dependency tree information for the specified "
|
||||
"targets in the dot syntax.")
|
||||
parser.add_option("-g", "--graphviz", action="store_true", dest="dot_graph", default=False,
|
||||
help="Save dependency tree information for the specified "
|
||||
"targets in the dot syntax.")
|
||||
|
||||
parser.add_option("-I", "--ignore-deps", action="append",
|
||||
dest="extra_assume_provided", default=[],
|
||||
help="Assume these dependencies don't exist and are already provided "
|
||||
"(equivalent to ASSUME_PROVIDED). Useful to make dependency "
|
||||
"graphs more appealing")
|
||||
parser.add_option("-I", "--ignore-deps", action="append",
|
||||
dest="extra_assume_provided", default=[],
|
||||
help="Assume these dependencies don't exist and are already provided "
|
||||
"(equivalent to ASSUME_PROVIDED). Useful to make dependency "
|
||||
"graphs more appealing")
|
||||
|
||||
parser.add_option("-l", "--log-domains", action="append", dest="debug_domains", default=[],
|
||||
help="Show debug logging for the specified logging domains")
|
||||
parser.add_option("-l", "--log-domains", action="append", dest="debug_domains", default=[],
|
||||
help="Show debug logging for the specified logging domains")
|
||||
|
||||
parser.add_option("-P", "--profile", action="store_true", dest="profile", default=False,
|
||||
help="Profile the command and save reports.")
|
||||
parser.add_option("-P", "--profile", action="store_true", dest="profile", default=False,
|
||||
help="Profile the command and save reports.")
|
||||
|
||||
# @CHOICES@ is substituted out by BitbakeHelpFormatter above
|
||||
parser.add_option("-u", "--ui", action="store", dest="ui",
|
||||
default=os.environ.get('BITBAKE_UI', 'knotty'),
|
||||
help="The user interface to use (@CHOICES@ - default %default).")
|
||||
# @CHOICES@ is substituted out by BitbakeHelpFormatter above
|
||||
parser.add_option("-u", "--ui", action="store", dest="ui",
|
||||
default=os.environ.get('BITBAKE_UI', 'knotty'),
|
||||
help="The user interface to use (@CHOICES@ - default %default).")
|
||||
|
||||
parser.add_option("", "--token", action="store", dest="xmlrpctoken",
|
||||
default=os.environ.get("BBTOKEN"),
|
||||
help="Specify the connection token to be used when connecting "
|
||||
"to a remote server.")
|
||||
parser.add_option("", "--token", action="store", dest="xmlrpctoken",
|
||||
default=os.environ.get("BBTOKEN"),
|
||||
help="Specify the connection token to be used when connecting "
|
||||
"to a remote server.")
|
||||
|
||||
parser.add_option("", "--revisions-changed", action="store_true",
|
||||
dest="revisions_changed", default=False,
|
||||
help="Set the exit code depending on whether upstream floating "
|
||||
"revisions have changed or not.")
|
||||
parser.add_option("", "--revisions-changed", action="store_true",
|
||||
dest="revisions_changed", default=False,
|
||||
help="Set the exit code depending on whether upstream floating "
|
||||
"revisions have changed or not.")
|
||||
|
||||
parser.add_option("", "--server-only", action="store_true",
|
||||
dest="server_only", default=False,
|
||||
help="Run bitbake without a UI, only starting a server "
|
||||
"(cooker) process.")
|
||||
parser.add_option("", "--server-only", action="store_true",
|
||||
dest="server_only", default=False,
|
||||
help="Run bitbake without a UI, only starting a server "
|
||||
"(cooker) process.")
|
||||
|
||||
parser.add_option("-B", "--bind", action="store", dest="bind", default=False,
|
||||
help="The name/address for the bitbake xmlrpc server to bind to.")
|
||||
parser.add_option("-B", "--bind", action="store", dest="bind", default=False,
|
||||
help="The name/address for the bitbake xmlrpc server to bind to.")
|
||||
|
||||
parser.add_option("-T", "--idle-timeout", type=float, dest="server_timeout",
|
||||
default=os.getenv("BB_SERVER_TIMEOUT"),
|
||||
help="Set timeout to unload bitbake server due to inactivity, "
|
||||
"set to -1 means no unload, "
|
||||
"default: Environment variable BB_SERVER_TIMEOUT.")
|
||||
parser.add_option("-T", "--idle-timeout", type=float, dest="server_timeout",
|
||||
default=os.getenv("BB_SERVER_TIMEOUT"),
|
||||
help="Set timeout to unload bitbake server due to inactivity, "
|
||||
"set to -1 means no unload, "
|
||||
"default: Environment variable BB_SERVER_TIMEOUT.")
|
||||
|
||||
parser.add_option("", "--no-setscene", action="store_true",
|
||||
dest="nosetscene", default=False,
|
||||
help="Do not run any setscene tasks. sstate will be ignored and "
|
||||
"everything needed, built.")
|
||||
parser.add_option("", "--no-setscene", action="store_true",
|
||||
dest="nosetscene", default=False,
|
||||
help="Do not run any setscene tasks. sstate will be ignored and "
|
||||
"everything needed, built.")
|
||||
|
||||
parser.add_option("", "--skip-setscene", action="store_true",
|
||||
dest="skipsetscene", default=False,
|
||||
help="Skip setscene tasks if they would be executed. Tasks previously "
|
||||
"restored from sstate will be kept, unlike --no-setscene")
|
||||
parser.add_option("", "--skip-setscene", action="store_true",
|
||||
dest="skipsetscene", default=False,
|
||||
help="Skip setscene tasks if they would be executed. Tasks previously "
|
||||
"restored from sstate will be kept, unlike --no-setscene")
|
||||
|
||||
parser.add_option("", "--setscene-only", action="store_true",
|
||||
dest="setsceneonly", default=False,
|
||||
help="Only run setscene tasks, don't run any real tasks.")
|
||||
parser.add_option("", "--setscene-only", action="store_true",
|
||||
dest="setsceneonly", default=False,
|
||||
help="Only run setscene tasks, don't run any real tasks.")
|
||||
|
||||
parser.add_option("", "--remote-server", action="store", dest="remote_server",
|
||||
default=os.environ.get("BBSERVER"),
|
||||
help="Connect to the specified server.")
|
||||
parser.add_option("", "--remote-server", action="store", dest="remote_server",
|
||||
default=os.environ.get("BBSERVER"),
|
||||
help="Connect to the specified server.")
|
||||
|
||||
parser.add_option("-m", "--kill-server", action="store_true",
|
||||
dest="kill_server", default=False,
|
||||
help="Terminate any running bitbake server.")
|
||||
parser.add_option("-m", "--kill-server", action="store_true",
|
||||
dest="kill_server", default=False,
|
||||
help="Terminate any running bitbake server.")
|
||||
|
||||
parser.add_option("", "--observe-only", action="store_true",
|
||||
dest="observe_only", default=False,
|
||||
help="Connect to a server as an observing-only client.")
|
||||
parser.add_option("", "--observe-only", action="store_true",
|
||||
dest="observe_only", default=False,
|
||||
help="Connect to a server as an observing-only client.")
|
||||
|
||||
parser.add_option("", "--status-only", action="store_true",
|
||||
dest="status_only", default=False,
|
||||
help="Check the status of the remote bitbake server.")
|
||||
parser.add_option("", "--status-only", action="store_true",
|
||||
dest="status_only", default=False,
|
||||
help="Check the status of the remote bitbake server.")
|
||||
|
||||
parser.add_option("-w", "--write-log", action="store", dest="writeeventlog",
|
||||
default=os.environ.get("BBEVENTLOG"),
|
||||
help="Writes the event log of the build to a bitbake event json file. "
|
||||
"Use '' (empty string) to assign the name automatically.")
|
||||
parser.add_option("-w", "--write-log", action="store", dest="writeeventlog",
|
||||
default=os.environ.get("BBEVENTLOG"),
|
||||
help="Writes the event log of the build to a bitbake event json file. "
|
||||
"Use '' (empty string) to assign the name automatically.")
|
||||
|
||||
parser.add_option("", "--runall", action="append", dest="runall",
|
||||
help="Run the specified task for any recipe in the taskgraph of the specified target (even if it wouldn't otherwise have run).")
|
||||
parser.add_option("", "--runall", action="append", dest="runall",
|
||||
help="Run the specified task for any recipe in the taskgraph of the specified target (even if it wouldn't otherwise have run).")
|
||||
|
||||
parser.add_option("", "--runonly", action="append", dest="runonly",
|
||||
help="Run only the specified task within the taskgraph of the specified targets (and any task dependencies those tasks may have).")
|
||||
return parser
|
||||
parser.add_option("", "--runonly", action="append", dest="runonly",
|
||||
help="Run only the specified task within the taskgraph of the specified targets (and any task dependencies those tasks may have).")
|
||||
|
||||
|
||||
class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
def parseCommandLine(self, argv=sys.argv):
|
||||
parser = create_bitbake_parser()
|
||||
options, targets = parser.parse_args(argv)
|
||||
|
||||
if options.quiet and options.verbose:
|
||||
@@ -469,7 +466,7 @@ def setup_bitbake(configParams, extrafeatures=None):
|
||||
logger.info("Retrying server connection (#%d)..." % tryno)
|
||||
else:
|
||||
logger.info("Retrying server connection (#%d)... (%s)" % (tryno, traceback.format_exc()))
|
||||
|
||||
|
||||
if not retries:
|
||||
bb.fatal("Unable to connect to bitbake server, or start one (server startup failures would be in bitbake-cookerdaemon.log).")
|
||||
bb.event.print_ui_queue()
|
||||
|
||||
@@ -59,7 +59,7 @@ def getMountedDev(path):
|
||||
pass
|
||||
return None
|
||||
|
||||
def getDiskData(BBDirs):
|
||||
def getDiskData(BBDirs, configuration):
|
||||
|
||||
"""Prepare disk data for disk space monitor"""
|
||||
|
||||
@@ -168,7 +168,7 @@ class diskMonitor:
|
||||
|
||||
BBDirs = configuration.getVar("BB_DISKMON_DIRS") or None
|
||||
if BBDirs:
|
||||
self.devDict = getDiskData(BBDirs)
|
||||
self.devDict = getDiskData(BBDirs, configuration)
|
||||
if self.devDict:
|
||||
self.spaceInterval, self.inodeInterval = getInterval(configuration)
|
||||
if self.spaceInterval and self.inodeInterval:
|
||||
|
||||
@@ -278,7 +278,7 @@ def setLoggingConfig(defaultconfig, userconfigfile=None):
|
||||
with open(os.path.normpath(userconfigfile), 'r') as f:
|
||||
if userconfigfile.endswith('.yml') or userconfigfile.endswith('.yaml'):
|
||||
import yaml
|
||||
userconfig = yaml.safe_load(f)
|
||||
userconfig = yaml.load(f)
|
||||
elif userconfigfile.endswith('.json') or userconfigfile.endswith('.cfg'):
|
||||
import json
|
||||
userconfig = json.load(f)
|
||||
|
||||
@@ -71,7 +71,7 @@ def update_mtime(f):
|
||||
|
||||
def update_cache(f):
|
||||
if f in __mtime_cache:
|
||||
logger.debug("Updating mtime cache for %s" % f)
|
||||
logger.debug(1, "Updating mtime cache for %s" % f)
|
||||
update_mtime(f)
|
||||
|
||||
def clear_cache():
|
||||
|
||||
@@ -34,7 +34,7 @@ class IncludeNode(AstNode):
|
||||
Include the file and evaluate the statements
|
||||
"""
|
||||
s = data.expand(self.what_file)
|
||||
logger.debug2("CONF %s:%s: including %s", self.filename, self.lineno, s)
|
||||
logger.debug(2, "CONF %s:%s: including %s", self.filename, self.lineno, s)
|
||||
|
||||
# TODO: Cache those includes... maybe not here though
|
||||
if self.force:
|
||||
@@ -97,7 +97,6 @@ class DataNode(AstNode):
|
||||
def eval(self, data):
|
||||
groupd = self.groupd
|
||||
key = groupd["var"]
|
||||
key = key.replace(":", "_")
|
||||
loginfo = {
|
||||
'variable': key,
|
||||
'file': self.filename,
|
||||
@@ -208,7 +207,6 @@ class ExportFuncsNode(AstNode):
|
||||
def eval(self, data):
|
||||
|
||||
for func in self.n:
|
||||
func = func.replace(":", "_")
|
||||
calledfunc = self.classname + "_" + func
|
||||
|
||||
if data.getVar(func, False) and not data.getVarFlag(func, 'export_func', False):
|
||||
@@ -337,7 +335,7 @@ def finalize(fn, d, variant = None):
|
||||
if not handlerfn:
|
||||
bb.fatal("Undefined event handler function '%s'" % var)
|
||||
handlerln = int(d.getVarFlag(var, "lineno", False))
|
||||
bb.event.register(var, d.getVar(var, False), (d.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln, data=d)
|
||||
bb.event.register(var, d.getVar(var, False), (d.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln)
|
||||
|
||||
bb.event.fire(bb.event.RecipePreFinalise(fn), d)
|
||||
|
||||
@@ -378,7 +376,7 @@ def _create_variants(datastores, names, function, onlyfinalise):
|
||||
def multi_finalize(fn, d):
|
||||
appends = (d.getVar("__BBAPPEND") or "").split()
|
||||
for append in appends:
|
||||
logger.debug("Appending .bbappend file %s to %s", append, fn)
|
||||
logger.debug(1, "Appending .bbappend file %s to %s", append, fn)
|
||||
bb.parse.BBHandler.handle(append, d, True)
|
||||
|
||||
onlyfinalise = d.getVar("__ONLYFINALISE", False)
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
#
|
||||
|
||||
import re, bb, os
|
||||
import bb.build, bb.utils, bb.data_smart
|
||||
import bb.build, bb.utils
|
||||
|
||||
from . import ConfHandler
|
||||
from .. import resolve_file, ast, logger, ParseError
|
||||
@@ -22,7 +22,7 @@ from .ConfHandler import include, init
|
||||
# For compatibility
|
||||
bb.deprecate_import(__name__, "bb.parse", ["vars_from_file"])
|
||||
|
||||
__func_start_regexp__ = re.compile(r"(((?P<py>python(?=(\s|\()))|(?P<fr>fakeroot(?=\s)))\s*)*(?P<func>[\w\.\-\+\{\}\$:]+)?\s*\(\s*\)\s*{$" )
|
||||
__func_start_regexp__ = re.compile(r"(((?P<py>python)|(?P<fr>fakeroot))\s*)*(?P<func>[\w\.\-\+\{\}\$]+)?\s*\(\s*\)\s*{$" )
|
||||
__inherit_regexp__ = re.compile(r"inherit\s+(.+)" )
|
||||
__export_func_regexp__ = re.compile(r"EXPORT_FUNCTIONS\s+(.+)" )
|
||||
__addtask_regexp__ = re.compile(r"addtask\s+(?P<func>\w+)\s*((before\s*(?P<before>((.*(?=after))|(.*))))|(after\s*(?P<after>((.*(?=before))|(.*)))))*")
|
||||
@@ -60,7 +60,7 @@ def inherit(files, fn, lineno, d):
|
||||
file = abs_fn
|
||||
|
||||
if not file in __inherit_cache:
|
||||
logger.debug("Inheriting %s (from %s:%d)" % (file, fn, lineno))
|
||||
logger.debug(1, "Inheriting %s (from %s:%d)" % (file, fn, lineno))
|
||||
__inherit_cache.append( file )
|
||||
d.setVar('__inherit_cache', __inherit_cache)
|
||||
include(fn, file, lineno, d, "inherit")
|
||||
@@ -233,10 +233,6 @@ def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
if taskexpression.count(word) > 1:
|
||||
logger.warning("addtask contained multiple '%s' keywords, only one is supported" % word)
|
||||
|
||||
# Check and warn for having task with exprssion as part of task name
|
||||
for te in taskexpression:
|
||||
if any( ( "%s_" % keyword ) in te for keyword in bb.data_smart.__setvar_keyword__ ):
|
||||
raise ParseError("Task name '%s' contains a keyword which is not recommended/supported.\nPlease rename the task not to include the keyword.\n%s" % (te, ("\n".join(map(str, bb.data_smart.__setvar_keyword__)))), fn)
|
||||
ast.handleAddTask(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ from bb.parse import ParseError, resolve_file, ast, logger, handle
|
||||
__config_regexp__ = re.compile( r"""
|
||||
^
|
||||
(?P<exp>export\s+)?
|
||||
(?P<var>[a-zA-Z0-9\-_+.${}/~:]+?)
|
||||
(?P<var>[a-zA-Z0-9\-_+.${}/~]+?)
|
||||
(\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])?
|
||||
|
||||
\s* (
|
||||
@@ -95,7 +95,7 @@ def include_single_file(parentfn, fn, lineno, data, error_out):
|
||||
if exc.errno == errno.ENOENT:
|
||||
if error_out:
|
||||
raise ParseError("Could not %s file %s" % (error_out, fn), parentfn, lineno)
|
||||
logger.debug2("CONF file '%s' not found", fn)
|
||||
logger.debug(2, "CONF file '%s' not found", fn)
|
||||
else:
|
||||
if error_out:
|
||||
raise ParseError("Could not %s file %s: %s" % (error_out, fn, exc.strerror), parentfn, lineno)
|
||||
|
||||
@@ -12,7 +12,6 @@ currently, providing a key/value store accessed by 'domain'.
|
||||
#
|
||||
|
||||
import collections
|
||||
import collections.abc
|
||||
import contextlib
|
||||
import functools
|
||||
import logging
|
||||
@@ -20,7 +19,7 @@ import os.path
|
||||
import sqlite3
|
||||
import sys
|
||||
import warnings
|
||||
from collections.abc import Mapping
|
||||
from collections import Mapping
|
||||
|
||||
sqlversion = sqlite3.sqlite_version_info
|
||||
if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
|
||||
@@ -30,7 +29,7 @@ if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
|
||||
logger = logging.getLogger("BitBake.PersistData")
|
||||
|
||||
@functools.total_ordering
|
||||
class SQLTable(collections.abc.MutableMapping):
|
||||
class SQLTable(collections.MutableMapping):
|
||||
class _Decorators(object):
|
||||
@staticmethod
|
||||
def retry(*, reconnect=True):
|
||||
@@ -249,7 +248,7 @@ class PersistData(object):
|
||||
stacklevel=2)
|
||||
|
||||
self.data = persist(d)
|
||||
logger.debug("Using '%s' as the persistent data cache",
|
||||
logger.debug(1, "Using '%s' as the persistent data cache",
|
||||
self.data.filename)
|
||||
|
||||
def addDomain(self, domain):
|
||||
|
||||
@@ -60,7 +60,7 @@ class Popen(subprocess.Popen):
|
||||
"close_fds": True,
|
||||
"preexec_fn": subprocess_setup,
|
||||
"stdout": subprocess.PIPE,
|
||||
"stderr": subprocess.PIPE,
|
||||
"stderr": subprocess.STDOUT,
|
||||
"stdin": subprocess.PIPE,
|
||||
"shell": False,
|
||||
}
|
||||
@@ -181,8 +181,5 @@ def run(cmd, input=None, log=None, extrafiles=None, **options):
|
||||
stderr = stderr.decode("utf-8")
|
||||
|
||||
if pipe.returncode != 0:
|
||||
if log:
|
||||
# Don't duplicate the output in the exception if logging it
|
||||
raise ExecutionError(cmd, pipe.returncode, None, None)
|
||||
raise ExecutionError(cmd, pipe.returncode, stdout, stderr)
|
||||
return stdout, stderr
|
||||
|
||||
@@ -38,17 +38,16 @@ def findProviders(cfgData, dataCache, pkg_pn = None):
|
||||
localdata = data.createCopy(cfgData)
|
||||
bb.data.expandKeys(localdata)
|
||||
|
||||
required = {}
|
||||
preferred_versions = {}
|
||||
latest_versions = {}
|
||||
|
||||
for pn in pkg_pn:
|
||||
(last_ver, last_file, pref_ver, pref_file, req) = findBestProvider(pn, localdata, dataCache, pkg_pn)
|
||||
(last_ver, last_file, pref_ver, pref_file) = findBestProvider(pn, localdata, dataCache, pkg_pn)
|
||||
preferred_versions[pn] = (pref_ver, pref_file)
|
||||
latest_versions[pn] = (last_ver, last_file)
|
||||
required[pn] = req
|
||||
|
||||
return (latest_versions, preferred_versions, required)
|
||||
return (latest_versions, preferred_versions)
|
||||
|
||||
|
||||
def allProviders(dataCache):
|
||||
"""
|
||||
@@ -60,6 +59,7 @@ def allProviders(dataCache):
|
||||
all_providers[pn].append((ver, fn))
|
||||
return all_providers
|
||||
|
||||
|
||||
def sortPriorities(pn, dataCache, pkg_pn = None):
|
||||
"""
|
||||
Reorder pkg_pn by file priority and default preference
|
||||
@@ -87,21 +87,6 @@ def sortPriorities(pn, dataCache, pkg_pn = None):
|
||||
|
||||
return tmp_pn
|
||||
|
||||
def versionVariableMatch(cfgData, keyword, pn):
|
||||
"""
|
||||
Return the value of the <keyword>_VERSION variable if set.
|
||||
"""
|
||||
|
||||
# pn can contain '_', e.g. gcc-cross-x86_64 and an override cannot
|
||||
# hence we do this manually rather than use OVERRIDES
|
||||
ver = cfgData.getVar("%s_VERSION_pn-%s" % (keyword, pn))
|
||||
if not ver:
|
||||
ver = cfgData.getVar("%s_VERSION_%s" % (keyword, pn))
|
||||
if not ver:
|
||||
ver = cfgData.getVar("%s_VERSION" % keyword)
|
||||
|
||||
return ver
|
||||
|
||||
def preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r):
|
||||
"""
|
||||
Check if the version pe,pv,pr is the preferred one.
|
||||
@@ -117,28 +102,19 @@ def preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r):
|
||||
|
||||
def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
"""
|
||||
Find the first provider in pkg_pn with REQUIRED_VERSION or PREFERRED_VERSION set.
|
||||
Find the first provider in pkg_pn with a PREFERRED_VERSION set.
|
||||
"""
|
||||
|
||||
preferred_file = None
|
||||
preferred_ver = None
|
||||
required = False
|
||||
|
||||
required_v = versionVariableMatch(cfgData, "REQUIRED", pn)
|
||||
preferred_v = versionVariableMatch(cfgData, "PREFERRED", pn)
|
||||
|
||||
itemstr = ""
|
||||
if item:
|
||||
itemstr = " (for item %s)" % item
|
||||
|
||||
if required_v is not None:
|
||||
if preferred_v is not None:
|
||||
logger.warn("REQUIRED_VERSION and PREFERRED_VERSION for package %s%s are both set using REQUIRED_VERSION %s", pn, itemstr, required_v)
|
||||
else:
|
||||
logger.debug("REQUIRED_VERSION is set for package %s%s", pn, itemstr)
|
||||
# REQUIRED_VERSION always takes precedence over PREFERRED_VERSION
|
||||
preferred_v = required_v
|
||||
required = True
|
||||
# pn can contain '_', e.g. gcc-cross-x86_64 and an override cannot
|
||||
# hence we do this manually rather than use OVERRIDES
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION_pn-%s" % pn)
|
||||
if not preferred_v:
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION_%s" % pn)
|
||||
if not preferred_v:
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION")
|
||||
|
||||
if preferred_v:
|
||||
m = re.match(r'(\d+:)*(.*)(_.*)*', preferred_v)
|
||||
@@ -171,9 +147,11 @@ def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
pv_str = preferred_v
|
||||
if not (preferred_e is None):
|
||||
pv_str = '%s:%s' % (preferred_e, pv_str)
|
||||
itemstr = ""
|
||||
if item:
|
||||
itemstr = " (for item %s)" % item
|
||||
if preferred_file is None:
|
||||
if not required:
|
||||
logger.warn("preferred version %s of %s not available%s", pv_str, pn, itemstr)
|
||||
logger.info("preferred version %s of %s not available%s", pv_str, pn, itemstr)
|
||||
available_vers = []
|
||||
for file_set in pkg_pn:
|
||||
for f in file_set:
|
||||
@@ -185,16 +163,12 @@ def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
available_vers.append(ver_str)
|
||||
if available_vers:
|
||||
available_vers.sort()
|
||||
logger.warn("versions of %s available: %s", pn, ' '.join(available_vers))
|
||||
if required:
|
||||
logger.error("required version %s of %s not available%s", pv_str, pn, itemstr)
|
||||
logger.info("versions of %s available: %s", pn, ' '.join(available_vers))
|
||||
else:
|
||||
if required:
|
||||
logger.debug("selecting %s as REQUIRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr)
|
||||
else:
|
||||
logger.debug("selecting %s as PREFERRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr)
|
||||
logger.debug(1, "selecting %s as PREFERRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr)
|
||||
|
||||
return (preferred_ver, preferred_file)
|
||||
|
||||
return (preferred_ver, preferred_file, required)
|
||||
|
||||
def findLatestProvider(pn, cfgData, dataCache, file_set):
|
||||
"""
|
||||
@@ -215,6 +189,7 @@ def findLatestProvider(pn, cfgData, dataCache, file_set):
|
||||
|
||||
return (latest, latest_f)
|
||||
|
||||
|
||||
def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
"""
|
||||
If there is a PREFERRED_VERSION, find the highest-priority bbfile
|
||||
@@ -223,16 +198,17 @@ def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
"""
|
||||
|
||||
sortpkg_pn = sortPriorities(pn, dataCache, pkg_pn)
|
||||
# Find the highest priority provider with a REQUIRED_VERSION or PREFERRED_VERSION set
|
||||
(preferred_ver, preferred_file, required) = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn, item)
|
||||
# Find the highest priority provider with a PREFERRED_VERSION set
|
||||
(preferred_ver, preferred_file) = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn, item)
|
||||
# Find the latest version of the highest priority provider
|
||||
(latest, latest_f) = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[0])
|
||||
|
||||
if not required and preferred_file is None:
|
||||
if preferred_file is None:
|
||||
preferred_file = latest_f
|
||||
preferred_ver = latest
|
||||
|
||||
return (latest, latest_f, preferred_ver, preferred_file, required)
|
||||
return (latest, latest_f, preferred_ver, preferred_file)
|
||||
|
||||
|
||||
def _filterProviders(providers, item, cfgData, dataCache):
|
||||
"""
|
||||
@@ -256,15 +232,12 @@ def _filterProviders(providers, item, cfgData, dataCache):
|
||||
pkg_pn[pn] = []
|
||||
pkg_pn[pn].append(p)
|
||||
|
||||
logger.debug("providers for %s are: %s", item, list(sorted(pkg_pn.keys())))
|
||||
logger.debug(1, "providers for %s are: %s", item, list(sorted(pkg_pn.keys())))
|
||||
|
||||
# First add REQUIRED_VERSIONS or PREFERRED_VERSIONS
|
||||
# First add PREFERRED_VERSIONS
|
||||
for pn in sorted(pkg_pn):
|
||||
sortpkg_pn[pn] = sortPriorities(pn, dataCache, pkg_pn)
|
||||
preferred_ver, preferred_file, required = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item)
|
||||
if required and preferred_file is None:
|
||||
return eligible
|
||||
preferred_versions[pn] = (preferred_ver, preferred_file)
|
||||
preferred_versions[pn] = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item)
|
||||
if preferred_versions[pn][1]:
|
||||
eligible.append(preferred_versions[pn][1])
|
||||
|
||||
@@ -275,8 +248,9 @@ def _filterProviders(providers, item, cfgData, dataCache):
|
||||
preferred_versions[pn] = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[pn][0])
|
||||
eligible.append(preferred_versions[pn][1])
|
||||
|
||||
if not eligible:
|
||||
return eligible
|
||||
if len(eligible) == 0:
|
||||
logger.error("no eligible providers for %s", item)
|
||||
return 0
|
||||
|
||||
# If pn == item, give it a slight default preference
|
||||
# This means PREFERRED_PROVIDER_foobar defaults to foobar if available
|
||||
@@ -292,6 +266,7 @@ def _filterProviders(providers, item, cfgData, dataCache):
|
||||
|
||||
return eligible
|
||||
|
||||
|
||||
def filterProviders(providers, item, cfgData, dataCache):
|
||||
"""
|
||||
Take a list of providers and filter/reorder according to the
|
||||
@@ -316,7 +291,7 @@ def filterProviders(providers, item, cfgData, dataCache):
|
||||
foundUnique = True
|
||||
break
|
||||
|
||||
logger.debug("sorted providers for %s are: %s", item, eligible)
|
||||
logger.debug(1, "sorted providers for %s are: %s", item, eligible)
|
||||
|
||||
return eligible, foundUnique
|
||||
|
||||
@@ -358,7 +333,7 @@ def filterProvidersRunTime(providers, item, cfgData, dataCache):
|
||||
provides = dataCache.pn_provides[pn]
|
||||
for provide in provides:
|
||||
prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % provide)
|
||||
#logger.debug("checking PREFERRED_PROVIDER_%s (value %s) against %s", provide, prefervar, pns.keys())
|
||||
#logger.debug(1, "checking PREFERRED_PROVIDER_%s (value %s) against %s", provide, prefervar, pns.keys())
|
||||
if prefervar in pns and pns[prefervar] not in preferred:
|
||||
var = "PREFERRED_PROVIDER_%s = %s" % (provide, prefervar)
|
||||
logger.verbose("selecting %s to satisfy runtime %s due to %s", prefervar, item, var)
|
||||
@@ -374,7 +349,7 @@ def filterProvidersRunTime(providers, item, cfgData, dataCache):
|
||||
if numberPreferred > 1:
|
||||
logger.error("Trying to resolve runtime dependency %s resulted in conflicting PREFERRED_PROVIDER entries being found.\nThe providers found were: %s\nThe PREFERRED_PROVIDER entries resulting in this conflict were: %s. You could set PREFERRED_RPROVIDER_%s" % (item, preferred, preferred_vars, item))
|
||||
|
||||
logger.debug("sorted runtime providers for %s are: %s", item, eligible)
|
||||
logger.debug(1, "sorted runtime providers for %s are: %s", item, eligible)
|
||||
|
||||
return eligible, numberPreferred
|
||||
|
||||
@@ -409,10 +384,11 @@ def getRuntimeProviders(dataCache, rdepend):
|
||||
regexp_cache[pattern] = regexp
|
||||
if regexp.match(rdepend):
|
||||
rproviders += dataCache.packages_dynamic[pattern]
|
||||
logger.debug("Assuming %s is a dynamic package, but it may not exist" % rdepend)
|
||||
logger.debug(1, "Assuming %s is a dynamic package, but it may not exist" % rdepend)
|
||||
|
||||
return rproviders
|
||||
|
||||
|
||||
def buildWorldTargetList(dataCache, task=None):
|
||||
"""
|
||||
Build package list for "bitbake world"
|
||||
@@ -420,22 +396,22 @@ def buildWorldTargetList(dataCache, task=None):
|
||||
if dataCache.world_target:
|
||||
return
|
||||
|
||||
logger.debug("collating packages for \"world\"")
|
||||
logger.debug(1, "collating packages for \"world\"")
|
||||
for f in dataCache.possible_world:
|
||||
terminal = True
|
||||
pn = dataCache.pkg_fn[f]
|
||||
if task and task not in dataCache.task_deps[f]['tasks']:
|
||||
logger.debug2("World build skipping %s as task %s doesn't exist", f, task)
|
||||
logger.debug(2, "World build skipping %s as task %s doesn't exist", f, task)
|
||||
terminal = False
|
||||
|
||||
for p in dataCache.pn_provides[pn]:
|
||||
if p.startswith('virtual/'):
|
||||
logger.debug2("World build skipping %s due to %s provider starting with virtual/", f, p)
|
||||
logger.debug(2, "World build skipping %s due to %s provider starting with virtual/", f, p)
|
||||
terminal = False
|
||||
break
|
||||
for pf in dataCache.providers[p]:
|
||||
if dataCache.pkg_fn[pf] != pn:
|
||||
logger.debug2("World build skipping %s due to both us and %s providing %s", f, pf, p)
|
||||
logger.debug(2, "World build skipping %s due to both us and %s providing %s", f, pf, p)
|
||||
terminal = False
|
||||
break
|
||||
if terminal:
|
||||
|
||||
@@ -38,7 +38,7 @@ def taskname_from_tid(tid):
|
||||
return tid.rsplit(":", 1)[1]
|
||||
|
||||
def mc_from_tid(tid):
|
||||
if tid.startswith('mc:') and tid.count(':') >= 2:
|
||||
if tid.startswith('mc:'):
|
||||
return tid.split(':')[1]
|
||||
return ""
|
||||
|
||||
@@ -47,13 +47,13 @@ def split_tid(tid):
|
||||
return (mc, fn, taskname)
|
||||
|
||||
def split_mc(n):
|
||||
if n.startswith("mc:") and n.count(':') >= 2:
|
||||
if n.startswith("mc:"):
|
||||
_, mc, n = n.split(":", 2)
|
||||
return (mc, n)
|
||||
return ('', n)
|
||||
|
||||
def split_tid_mcfn(tid):
|
||||
if tid.startswith('mc:') and tid.count(':') >= 2:
|
||||
if tid.startswith('mc:'):
|
||||
elems = tid.split(':')
|
||||
mc = elems[1]
|
||||
fn = ":".join(elems[2:-1])
|
||||
@@ -85,19 +85,15 @@ class RunQueueStats:
|
||||
"""
|
||||
Holds statistics on the tasks handled by the associated runQueue
|
||||
"""
|
||||
def __init__(self, total, setscene_total):
|
||||
def __init__(self, total):
|
||||
self.completed = 0
|
||||
self.skipped = 0
|
||||
self.failed = 0
|
||||
self.active = 0
|
||||
self.setscene_active = 0
|
||||
self.setscene_covered = 0
|
||||
self.setscene_notcovered = 0
|
||||
self.setscene_total = setscene_total
|
||||
self.total = total
|
||||
|
||||
def copy(self):
|
||||
obj = self.__class__(self.total, self.setscene_total)
|
||||
obj = self.__class__(self.total)
|
||||
obj.__dict__.update(self.__dict__)
|
||||
return obj
|
||||
|
||||
@@ -116,13 +112,6 @@ class RunQueueStats:
|
||||
def taskActive(self):
|
||||
self.active = self.active + 1
|
||||
|
||||
def updateCovered(self, covered, notcovered):
|
||||
self.setscene_covered = covered
|
||||
self.setscene_notcovered = notcovered
|
||||
|
||||
def updateActiveSetscene(self, active):
|
||||
self.setscene_active = active
|
||||
|
||||
# These values indicate the next step due to be run in the
|
||||
# runQueue state machine
|
||||
runQueuePrepare = 2
|
||||
@@ -555,8 +544,8 @@ class RunQueueData:
|
||||
for tid in self.runtaskentries:
|
||||
if task_done[tid] is False or deps_left[tid] != 0:
|
||||
problem_tasks.append(tid)
|
||||
logger.debug2("Task %s is not buildable", tid)
|
||||
logger.debug2("(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
|
||||
logger.debug(2, "Task %s is not buildable", tid)
|
||||
logger.debug(2, "(Complete marker was %s and the remaining dependency count was %s)\n", task_done[tid], deps_left[tid])
|
||||
self.runtaskentries[tid].weight = weight[tid]
|
||||
|
||||
if problem_tasks:
|
||||
@@ -654,7 +643,7 @@ class RunQueueData:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
|
||||
#runtid = build_tid(mc, fn, taskname)
|
||||
|
||||
#logger.debug2("Processing %s,%s:%s", mc, fn, taskname)
|
||||
#logger.debug(2, "Processing %s,%s:%s", mc, fn, taskname)
|
||||
|
||||
depends = set()
|
||||
task_deps = self.dataCaches[mc].task_deps[taskfn]
|
||||
@@ -926,36 +915,38 @@ class RunQueueData:
|
||||
#
|
||||
# Once all active tasks are marked, prune the ones we don't need.
|
||||
|
||||
delcount = {}
|
||||
for tid in list(self.runtaskentries.keys()):
|
||||
if tid not in runq_build:
|
||||
delcount[tid] = self.runtaskentries[tid]
|
||||
del self.runtaskentries[tid]
|
||||
|
||||
# Handle --runall
|
||||
if self.cooker.configuration.runall:
|
||||
# re-run the mark_active and then drop unused tasks from new list
|
||||
reduced_tasklist = set(self.runtaskentries.keys())
|
||||
for tid in list(self.runtaskentries.keys()):
|
||||
if tid not in runq_build:
|
||||
reduced_tasklist.remove(tid)
|
||||
runq_build = {}
|
||||
|
||||
for task in self.cooker.configuration.runall:
|
||||
if not task.startswith("do_"):
|
||||
task = "do_{0}".format(task)
|
||||
runall_tids = set()
|
||||
for tid in reduced_tasklist:
|
||||
for tid in list(self.runtaskentries):
|
||||
wanttid = "{0}:{1}".format(fn_from_tid(tid), task)
|
||||
if wanttid in delcount:
|
||||
self.runtaskentries[wanttid] = delcount[wanttid]
|
||||
if wanttid in self.runtaskentries:
|
||||
runall_tids.add(wanttid)
|
||||
|
||||
for tid in list(runall_tids):
|
||||
mark_active(tid, 1)
|
||||
mark_active(tid,1)
|
||||
if self.cooker.configuration.force:
|
||||
invalidate_task(tid, False)
|
||||
|
||||
delcount = set()
|
||||
for tid in list(self.runtaskentries.keys()):
|
||||
if tid not in runq_build:
|
||||
delcount.add(tid)
|
||||
del self.runtaskentries[tid]
|
||||
for tid in list(self.runtaskentries.keys()):
|
||||
if tid not in runq_build:
|
||||
delcount[tid] = self.runtaskentries[tid]
|
||||
del self.runtaskentries[tid]
|
||||
|
||||
if self.cooker.configuration.runall:
|
||||
if len(self.runtaskentries) == 0:
|
||||
bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
|
||||
|
||||
@@ -969,16 +960,16 @@ class RunQueueData:
|
||||
for task in self.cooker.configuration.runonly:
|
||||
if not task.startswith("do_"):
|
||||
task = "do_{0}".format(task)
|
||||
runonly_tids = [k for k in self.runtaskentries.keys() if taskname_from_tid(k) == task]
|
||||
runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == task }
|
||||
|
||||
for tid in runonly_tids:
|
||||
mark_active(tid, 1)
|
||||
for tid in list(runonly_tids):
|
||||
mark_active(tid,1)
|
||||
if self.cooker.configuration.force:
|
||||
invalidate_task(tid, False)
|
||||
|
||||
for tid in list(self.runtaskentries.keys()):
|
||||
if tid not in runq_build:
|
||||
delcount.add(tid)
|
||||
delcount[tid] = self.runtaskentries[tid]
|
||||
del self.runtaskentries[tid]
|
||||
|
||||
if len(self.runtaskentries) == 0:
|
||||
@@ -1208,9 +1199,9 @@ class RunQueueData:
|
||||
"""
|
||||
Dump some debug information on the internal data structures
|
||||
"""
|
||||
logger.debug3("run_tasks:")
|
||||
logger.debug(3, "run_tasks:")
|
||||
for tid in self.runtaskentries:
|
||||
logger.debug3(" %s: %s Deps %s RevDeps %s", tid,
|
||||
logger.debug(3, " %s: %s Deps %s RevDeps %s", tid,
|
||||
self.runtaskentries[tid].weight,
|
||||
self.runtaskentries[tid].depends,
|
||||
self.runtaskentries[tid].revdeps)
|
||||
@@ -1247,11 +1238,10 @@ class RunQueue:
|
||||
self.fakeworker = {}
|
||||
|
||||
def _start_worker(self, mc, fakeroot = False, rqexec = None):
|
||||
logger.debug("Starting bitbake-worker")
|
||||
logger.debug(1, "Starting bitbake-worker")
|
||||
magic = "decafbad"
|
||||
if self.cooker.configuration.profile:
|
||||
magic = "decafbadbad"
|
||||
fakerootlogs = None
|
||||
if fakeroot:
|
||||
magic = magic + "beef"
|
||||
mcdata = self.cooker.databuilder.mcdata[mc]
|
||||
@@ -1261,11 +1251,10 @@ class RunQueue:
|
||||
for key, value in (var.split('=') for var in fakerootenv):
|
||||
env[key] = value
|
||||
worker = subprocess.Popen(fakerootcmd + ["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=env)
|
||||
fakerootlogs = self.rqdata.dataCaches[mc].fakerootlogs
|
||||
else:
|
||||
worker = subprocess.Popen(["bitbake-worker", magic], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
|
||||
bb.utils.nonblockingfd(worker.stdout)
|
||||
workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec, fakerootlogs=fakerootlogs)
|
||||
workerpipe = runQueuePipe(worker.stdout, None, self.cfgData, self, rqexec)
|
||||
|
||||
workerdata = {
|
||||
"taskdeps" : self.rqdata.dataCaches[mc].task_deps,
|
||||
@@ -1282,7 +1271,6 @@ class RunQueue:
|
||||
"date" : self.cfgData.getVar("DATE"),
|
||||
"time" : self.cfgData.getVar("TIME"),
|
||||
"hashservaddr" : self.cooker.hashservaddr,
|
||||
"umask" : self.cfgData.getVar("BB_DEFAULT_UMASK"),
|
||||
}
|
||||
|
||||
worker.stdin.write(b"<cookerconfig>" + pickle.dumps(self.cooker.configuration) + b"</cookerconfig>")
|
||||
@@ -1295,7 +1283,7 @@ class RunQueue:
|
||||
def _teardown_worker(self, worker):
|
||||
if not worker:
|
||||
return
|
||||
logger.debug("Teardown for bitbake-worker")
|
||||
logger.debug(1, "Teardown for bitbake-worker")
|
||||
try:
|
||||
worker.process.stdin.write(b"<quit></quit>")
|
||||
worker.process.stdin.flush()
|
||||
@@ -1368,12 +1356,12 @@ class RunQueue:
|
||||
|
||||
# If the stamp is missing, it's not current
|
||||
if not os.access(stampfile, os.F_OK):
|
||||
logger.debug2("Stampfile %s not available", stampfile)
|
||||
logger.debug(2, "Stampfile %s not available", stampfile)
|
||||
return False
|
||||
# If it's a 'nostamp' task, it's not current
|
||||
taskdep = self.rqdata.dataCaches[mc].task_deps[taskfn]
|
||||
if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
|
||||
logger.debug2("%s.%s is nostamp\n", fn, taskname)
|
||||
logger.debug(2, "%s.%s is nostamp\n", fn, taskname)
|
||||
return False
|
||||
|
||||
if taskname != "do_setscene" and taskname.endswith("_setscene"):
|
||||
@@ -1397,18 +1385,18 @@ class RunQueue:
|
||||
continue
|
||||
if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
|
||||
if not t2:
|
||||
logger.debug2('Stampfile %s does not exist', stampfile2)
|
||||
logger.debug(2, 'Stampfile %s does not exist', stampfile2)
|
||||
iscurrent = False
|
||||
break
|
||||
if t1 < t2:
|
||||
logger.debug2('Stampfile %s < %s', stampfile, stampfile2)
|
||||
logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
|
||||
iscurrent = False
|
||||
break
|
||||
if recurse and iscurrent:
|
||||
if dep in cache:
|
||||
iscurrent = cache[dep]
|
||||
if not iscurrent:
|
||||
logger.debug2('Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
|
||||
logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
|
||||
else:
|
||||
iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
|
||||
cache[dep] = iscurrent
|
||||
@@ -1478,7 +1466,7 @@ class RunQueue:
|
||||
if not self.dm_event_handler_registered:
|
||||
res = bb.event.register(self.dm_event_handler_name,
|
||||
lambda x: self.dm.check(self) if self.state in [runQueueRunning, runQueueCleanUp] else False,
|
||||
('bb.event.HeartbeatEvent',), data=self.cfgData)
|
||||
('bb.event.HeartbeatEvent',))
|
||||
self.dm_event_handler_registered = True
|
||||
|
||||
dump = self.cooker.configuration.dump_signatures
|
||||
@@ -1517,7 +1505,7 @@ class RunQueue:
|
||||
build_done = self.state is runQueueComplete or self.state is runQueueFailed
|
||||
|
||||
if build_done and self.dm_event_handler_registered:
|
||||
bb.event.remove(self.dm_event_handler_name, None, data=self.cfgData)
|
||||
bb.event.remove(self.dm_event_handler_name, None)
|
||||
self.dm_event_handler_registered = False
|
||||
|
||||
if build_done and self.rqexe:
|
||||
@@ -1744,7 +1732,8 @@ class RunQueueExecute:
|
||||
self.holdoff_need_update = True
|
||||
self.sqdone = False
|
||||
|
||||
self.stats = RunQueueStats(len(self.rqdata.runtaskentries), len(self.rqdata.runq_setscene_tids))
|
||||
self.stats = RunQueueStats(len(self.rqdata.runtaskentries))
|
||||
self.sq_stats = RunQueueStats(len(self.rqdata.runq_setscene_tids))
|
||||
|
||||
for mc in rq.worker:
|
||||
rq.worker[mc].pipe.setrunqueueexec(self)
|
||||
@@ -1772,7 +1761,7 @@ class RunQueueExecute:
|
||||
for scheduler in schedulers:
|
||||
if self.scheduler == scheduler.name:
|
||||
self.sched = scheduler(self, self.rqdata)
|
||||
logger.debug("Using runqueue scheduler '%s'", scheduler.name)
|
||||
logger.debug(1, "Using runqueue scheduler '%s'", scheduler.name)
|
||||
break
|
||||
else:
|
||||
bb.fatal("Invalid scheduler '%s'. Available schedulers: %s" %
|
||||
@@ -1782,7 +1771,7 @@ class RunQueueExecute:
|
||||
self.sqdata = SQData()
|
||||
build_scenequeue_data(self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self)
|
||||
|
||||
def runqueue_process_waitpid(self, task, status, fakerootlog=None):
|
||||
def runqueue_process_waitpid(self, task, status):
|
||||
|
||||
# self.build_stamps[pid] may not exist when use shared work directory.
|
||||
if task in self.build_stamps:
|
||||
@@ -1795,10 +1784,9 @@ class RunQueueExecute:
|
||||
else:
|
||||
self.sq_task_complete(task)
|
||||
self.sq_live.remove(task)
|
||||
self.stats.updateActiveSetscene(len(self.sq_live))
|
||||
else:
|
||||
if status != 0:
|
||||
self.task_fail(task, status, fakerootlog=fakerootlog)
|
||||
self.task_fail(task, status)
|
||||
else:
|
||||
self.task_complete(task)
|
||||
return True
|
||||
@@ -1829,7 +1817,7 @@ class RunQueueExecute:
|
||||
def finish(self):
|
||||
self.rq.state = runQueueCleanUp
|
||||
|
||||
active = self.stats.active + len(self.sq_live)
|
||||
active = self.stats.active + self.sq_stats.active
|
||||
if active > 0:
|
||||
bb.event.fire(runQueueExitWait(active), self.cfgData)
|
||||
self.rq.read_workers()
|
||||
@@ -1862,7 +1850,7 @@ class RunQueueExecute:
|
||||
return valid
|
||||
|
||||
def can_start_task(self):
|
||||
active = self.stats.active + len(self.sq_live)
|
||||
active = self.stats.active + self.sq_stats.active
|
||||
can_start = active < self.number_tasks
|
||||
return can_start
|
||||
|
||||
@@ -1911,13 +1899,7 @@ class RunQueueExecute:
|
||||
break
|
||||
if alldeps:
|
||||
self.setbuildable(revdep)
|
||||
logger.debug("Marking task %s as buildable", revdep)
|
||||
|
||||
for t in self.sq_deferred.copy():
|
||||
if self.sq_deferred[t] == task:
|
||||
logger.debug2("Deferred task %s now buildable" % t)
|
||||
del self.sq_deferred[t]
|
||||
update_scenequeue_data([t], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
|
||||
logger.debug(1, "Marking task %s as buildable", revdep)
|
||||
|
||||
def task_complete(self, task):
|
||||
self.stats.taskCompleted()
|
||||
@@ -1925,31 +1907,14 @@ class RunQueueExecute:
|
||||
self.task_completeoutright(task)
|
||||
self.runq_tasksrun.add(task)
|
||||
|
||||
def task_fail(self, task, exitcode, fakerootlog=None):
|
||||
def task_fail(self, task, exitcode):
|
||||
"""
|
||||
Called when a task has failed
|
||||
Updates the state engine with the failure
|
||||
"""
|
||||
self.stats.taskFailed()
|
||||
self.failed_tids.append(task)
|
||||
|
||||
fakeroot_log = ""
|
||||
if fakerootlog and os.path.exists(fakerootlog):
|
||||
with open(fakerootlog) as fakeroot_log_file:
|
||||
fakeroot_failed = False
|
||||
for line in reversed(fakeroot_log_file.readlines()):
|
||||
for fakeroot_error in ['mismatch', 'error', 'fatal']:
|
||||
if fakeroot_error in line.lower():
|
||||
fakeroot_failed = True
|
||||
if 'doing new pid setup and server start' in line:
|
||||
break
|
||||
fakeroot_log = line + fakeroot_log
|
||||
|
||||
if not fakeroot_failed:
|
||||
fakeroot_log = None
|
||||
|
||||
bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq, fakeroot_log=fakeroot_log), self.cfgData)
|
||||
|
||||
bb.event.fire(runQueueTaskFailed(task, self.stats, exitcode, self.rq), self.cfgData)
|
||||
if self.rqdata.taskData[''].abort:
|
||||
self.rq.state = runQueueCleanUp
|
||||
|
||||
@@ -1964,8 +1929,8 @@ class RunQueueExecute:
|
||||
def summarise_scenequeue_errors(self):
|
||||
err = False
|
||||
if not self.sqdone:
|
||||
logger.debug('We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered)))
|
||||
completeevent = sceneQueueComplete(self.stats, self.rq)
|
||||
logger.debug(1, 'We could skip tasks %s', "\n".join(sorted(self.scenequeue_covered)))
|
||||
completeevent = sceneQueueComplete(self.sq_stats, self.rq)
|
||||
bb.event.fire(completeevent, self.cfgData)
|
||||
if self.sq_deferred:
|
||||
logger.error("Scenequeue had deferred entries: %s" % pprint.pformat(self.sq_deferred))
|
||||
@@ -1977,10 +1942,6 @@ class RunQueueExecute:
|
||||
logger.error("Scenequeue had holdoff tasks: %s" % pprint.pformat(self.holdoff_tasks))
|
||||
err = True
|
||||
|
||||
for tid in self.scenequeue_covered.intersection(self.scenequeue_notcovered):
|
||||
# No task should end up in both covered and uncovered, that is a bug.
|
||||
logger.error("Setscene task %s in both covered and notcovered." % tid)
|
||||
|
||||
for tid in self.rqdata.runq_setscene_tids:
|
||||
if tid not in self.scenequeue_covered and tid not in self.scenequeue_notcovered:
|
||||
err = True
|
||||
@@ -2025,7 +1986,7 @@ class RunQueueExecute:
|
||||
if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values():
|
||||
if nexttask not in self.sqdata.unskippable and len(self.sqdata.sq_revdeps[nexttask]) > 0 and self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and self.check_dependencies(nexttask, self.sqdata.sq_revdeps[nexttask]):
|
||||
if nexttask not in self.rqdata.target_tids:
|
||||
logger.debug2("Skipping setscene for task %s" % nexttask)
|
||||
logger.debug(2, "Skipping setscene for task %s" % nexttask)
|
||||
self.sq_task_skip(nexttask)
|
||||
self.scenequeue_notneeded.add(nexttask)
|
||||
if nexttask in self.sq_deferred:
|
||||
@@ -2038,26 +1999,28 @@ class RunQueueExecute:
|
||||
if nexttask in self.sq_deferred:
|
||||
if self.sq_deferred[nexttask] not in self.runq_complete:
|
||||
continue
|
||||
logger.debug("Task %s no longer deferred" % nexttask)
|
||||
logger.debug(1, "Task %s no longer deferred" % nexttask)
|
||||
del self.sq_deferred[nexttask]
|
||||
valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False, summary=False)
|
||||
if not valid:
|
||||
logger.debug("%s didn't become valid, skipping setscene" % nexttask)
|
||||
logger.debug(1, "%s didn't become valid, skipping setscene" % nexttask)
|
||||
self.sq_task_failoutright(nexttask)
|
||||
return True
|
||||
else:
|
||||
self.sqdata.outrightfail.remove(nexttask)
|
||||
if nexttask in self.sqdata.outrightfail:
|
||||
logger.debug2('No package found, so skipping setscene task %s', nexttask)
|
||||
logger.debug(2, 'No package found, so skipping setscene task %s', nexttask)
|
||||
self.sq_task_failoutright(nexttask)
|
||||
return True
|
||||
if nexttask in self.sqdata.unskippable:
|
||||
logger.debug2("Setscene task %s is unskippable" % nexttask)
|
||||
logger.debug(2, "Setscene task %s is unskippable" % nexttask)
|
||||
task = nexttask
|
||||
break
|
||||
if task is not None:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(task)
|
||||
taskname = taskname + "_setscene"
|
||||
if self.rq.check_stamp_task(task, taskname_from_tid(task), recurse = True, cache=self.stampcache):
|
||||
logger.debug2('Stamp for underlying task %s is current, so skipping setscene variant', task)
|
||||
logger.debug(2, 'Stamp for underlying task %s is current, so skipping setscene variant', task)
|
||||
self.sq_task_failoutright(task)
|
||||
return True
|
||||
|
||||
@@ -2067,16 +2030,16 @@ class RunQueueExecute:
|
||||
return True
|
||||
|
||||
if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
|
||||
logger.debug2('Setscene stamp current task %s, so skip it and its dependencies', task)
|
||||
logger.debug(2, 'Setscene stamp current task %s, so skip it and its dependencies', task)
|
||||
self.sq_task_skip(task)
|
||||
return True
|
||||
|
||||
if self.cooker.configuration.skipsetscene:
|
||||
logger.debug2('No setscene tasks should be executed. Skipping %s', task)
|
||||
logger.debug(2, 'No setscene tasks should be executed. Skipping %s', task)
|
||||
self.sq_task_failoutright(task)
|
||||
return True
|
||||
|
||||
startevent = sceneQueueTaskStarted(task, self.stats, self.rq)
|
||||
startevent = sceneQueueTaskStarted(task, self.sq_stats, self.rq)
|
||||
bb.event.fire(startevent, self.cfgData)
|
||||
|
||||
taskdepdata = self.sq_build_taskdepdata(task)
|
||||
@@ -2097,7 +2060,7 @@ class RunQueueExecute:
|
||||
self.build_stamps2.append(self.build_stamps[task])
|
||||
self.sq_running.add(task)
|
||||
self.sq_live.add(task)
|
||||
self.stats.updateActiveSetscene(len(self.sq_live))
|
||||
self.sq_stats.taskActive()
|
||||
if self.can_start_task():
|
||||
return True
|
||||
|
||||
@@ -2134,12 +2097,12 @@ class RunQueueExecute:
|
||||
return True
|
||||
|
||||
if task in self.tasks_covered:
|
||||
logger.debug2("Setscene covered task %s", task)
|
||||
logger.debug(2, "Setscene covered task %s", task)
|
||||
self.task_skip(task, "covered")
|
||||
return True
|
||||
|
||||
if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
|
||||
logger.debug2("Stamp current task %s", task)
|
||||
logger.debug(2, "Stamp current task %s", task)
|
||||
|
||||
self.task_skip(task, "existing")
|
||||
self.runq_tasksrun.add(task)
|
||||
@@ -2187,7 +2150,7 @@ class RunQueueExecute:
|
||||
if self.can_start_task():
|
||||
return True
|
||||
|
||||
if self.stats.active > 0 or len(self.sq_live) > 0:
|
||||
if self.stats.active > 0 or self.sq_stats.active > 0:
|
||||
self.rq.read_workers()
|
||||
return self.rq.active_fds()
|
||||
|
||||
@@ -2195,8 +2158,7 @@ class RunQueueExecute:
|
||||
if self.sq_deferred:
|
||||
tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
|
||||
logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
|
||||
if tid not in self.runq_complete:
|
||||
self.sq_task_failoutright(tid)
|
||||
self.sq_task_failoutright(tid)
|
||||
return True
|
||||
|
||||
if len(self.failed_tids) != 0:
|
||||
@@ -2310,16 +2272,10 @@ class RunQueueExecute:
|
||||
self.updated_taskhash_queue.remove((tid, unihash))
|
||||
|
||||
if unihash != self.rqdata.runtaskentries[tid].unihash:
|
||||
# Make sure we rehash any other tasks with the same task hash that we're deferred against.
|
||||
torehash = [tid]
|
||||
for deftid in self.sq_deferred:
|
||||
if self.sq_deferred[deftid] == tid:
|
||||
torehash.append(deftid)
|
||||
for hashtid in torehash:
|
||||
hashequiv_logger.verbose("Task %s unihash changed to %s" % (hashtid, unihash))
|
||||
self.rqdata.runtaskentries[hashtid].unihash = unihash
|
||||
bb.parse.siggen.set_unihash(hashtid, unihash)
|
||||
toprocess.add(hashtid)
|
||||
hashequiv_logger.verbose("Task %s unihash changed to %s" % (tid, unihash))
|
||||
self.rqdata.runtaskentries[tid].unihash = unihash
|
||||
bb.parse.siggen.set_unihash(tid, unihash)
|
||||
toprocess.add(tid)
|
||||
|
||||
# Work out all tasks which depend upon these
|
||||
total = set()
|
||||
@@ -2366,7 +2322,7 @@ class RunQueueExecute:
|
||||
remapped = True
|
||||
|
||||
if not remapped:
|
||||
#logger.debug("Task %s hash changes: %s->%s %s->%s" % (tid, orighash, newhash, origuni, newuni))
|
||||
#logger.debug(1, "Task %s hash changes: %s->%s %s->%s" % (tid, orighash, newhash, origuni, newuni))
|
||||
self.rqdata.runtaskentries[tid].hash = newhash
|
||||
self.rqdata.runtaskentries[tid].unihash = newuni
|
||||
changed.add(tid)
|
||||
@@ -2381,7 +2337,7 @@ class RunQueueExecute:
|
||||
for mc in self.rq.fakeworker:
|
||||
self.rq.fakeworker[mc].process.stdin.write(b"<newtaskhashes>" + pickle.dumps(bb.parse.siggen.get_taskhashes()) + b"</newtaskhashes>")
|
||||
|
||||
hashequiv_logger.debug(pprint.pformat("Tasks changed:\n%s" % (changed)))
|
||||
hashequiv_logger.debug(1, pprint.pformat("Tasks changed:\n%s" % (changed)))
|
||||
|
||||
for tid in changed:
|
||||
if tid not in self.rqdata.runq_setscene_tids:
|
||||
@@ -2400,7 +2356,7 @@ class RunQueueExecute:
|
||||
# Check no tasks this covers are running
|
||||
for dep in self.sqdata.sq_covered_tasks[tid]:
|
||||
if dep in self.runq_running and dep not in self.runq_complete:
|
||||
hashequiv_logger.debug2("Task %s is running which blocks setscene for %s from running" % (dep, tid))
|
||||
hashequiv_logger.debug(2, "Task %s is running which blocks setscene for %s from running" % (dep, tid))
|
||||
valid = False
|
||||
break
|
||||
if not valid:
|
||||
@@ -2459,11 +2415,6 @@ class RunQueueExecute:
|
||||
|
||||
if update_tasks:
|
||||
self.sqdone = False
|
||||
for tid in [t[0] for t in update_tasks]:
|
||||
h = pending_hash_index(tid, self.rqdata)
|
||||
if h in self.sqdata.hashes and tid != self.sqdata.hashes[h]:
|
||||
self.sq_deferred[tid] = self.sqdata.hashes[h]
|
||||
bb.note("Deferring %s after %s" % (tid, self.sqdata.hashes[h]))
|
||||
update_scenequeue_data([t[0] for t in update_tasks], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
|
||||
|
||||
for (tid, harddepfail, origvalid) in update_tasks:
|
||||
@@ -2473,20 +2424,13 @@ class RunQueueExecute:
|
||||
self.sq_task_failoutright(tid)
|
||||
|
||||
if changed:
|
||||
self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered))
|
||||
self.holdoff_need_update = True
|
||||
|
||||
def scenequeue_updatecounters(self, task, fail=False):
|
||||
|
||||
for dep in sorted(self.sqdata.sq_deps[task]):
|
||||
if fail and task in self.sqdata.sq_harddeps and dep in self.sqdata.sq_harddeps[task]:
|
||||
if dep in self.scenequeue_covered or dep in self.scenequeue_notcovered:
|
||||
# dependency could be already processed, e.g. noexec setscene task
|
||||
continue
|
||||
noexec, stamppresent = check_setscene_stamps(dep, self.rqdata, self.rq, self.stampcache)
|
||||
if noexec or stamppresent:
|
||||
continue
|
||||
logger.debug2("%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
|
||||
logger.debug(2, "%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
|
||||
self.sq_task_failoutright(dep)
|
||||
continue
|
||||
if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
|
||||
@@ -2507,7 +2451,6 @@ class RunQueueExecute:
|
||||
new.add(dep)
|
||||
next = new
|
||||
|
||||
self.stats.updateCovered(len(self.scenequeue_covered), len(self.scenequeue_notcovered))
|
||||
self.holdoff_need_update = True
|
||||
|
||||
def sq_task_completeoutright(self, task):
|
||||
@@ -2517,7 +2460,7 @@ class RunQueueExecute:
|
||||
completed dependencies as buildable
|
||||
"""
|
||||
|
||||
logger.debug('Found task %s which could be accelerated', task)
|
||||
logger.debug(1, 'Found task %s which could be accelerated', task)
|
||||
self.scenequeue_covered.add(task)
|
||||
self.scenequeue_updatecounters(task)
|
||||
|
||||
@@ -2531,11 +2474,13 @@ class RunQueueExecute:
|
||||
self.rq.state = runQueueCleanUp
|
||||
|
||||
def sq_task_complete(self, task):
|
||||
bb.event.fire(sceneQueueTaskCompleted(task, self.stats, self.rq), self.cfgData)
|
||||
self.sq_stats.taskCompleted()
|
||||
bb.event.fire(sceneQueueTaskCompleted(task, self.sq_stats, self.rq), self.cfgData)
|
||||
self.sq_task_completeoutright(task)
|
||||
|
||||
def sq_task_fail(self, task, result):
|
||||
bb.event.fire(sceneQueueTaskFailed(task, self.stats, result, self), self.cfgData)
|
||||
self.sq_stats.taskFailed()
|
||||
bb.event.fire(sceneQueueTaskFailed(task, self.sq_stats, result, self), self.cfgData)
|
||||
self.scenequeue_notcovered.add(task)
|
||||
self.scenequeue_updatecounters(task, True)
|
||||
self.sq_check_taskfail(task)
|
||||
@@ -2543,6 +2488,8 @@ class RunQueueExecute:
|
||||
def sq_task_failoutright(self, task):
|
||||
self.sq_running.add(task)
|
||||
self.sq_buildable.add(task)
|
||||
self.sq_stats.taskSkipped()
|
||||
self.sq_stats.taskCompleted()
|
||||
self.scenequeue_notcovered.add(task)
|
||||
self.scenequeue_updatecounters(task, True)
|
||||
|
||||
@@ -2550,6 +2497,8 @@ class RunQueueExecute:
|
||||
self.sq_running.add(task)
|
||||
self.sq_buildable.add(task)
|
||||
self.sq_task_completeoutright(task)
|
||||
self.sq_stats.taskSkipped()
|
||||
self.sq_stats.taskCompleted()
|
||||
|
||||
def sq_build_taskdepdata(self, task):
|
||||
def getsetscenedeps(tid):
|
||||
@@ -2803,55 +2752,8 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
|
||||
sqdata.stamppresent = set()
|
||||
sqdata.valid = set()
|
||||
|
||||
sqdata.hashes = {}
|
||||
sqrq.sq_deferred = {}
|
||||
for mc in sorted(sqdata.multiconfigs):
|
||||
for tid in sorted(sqdata.sq_revdeps):
|
||||
if mc_from_tid(tid) != mc:
|
||||
continue
|
||||
h = pending_hash_index(tid, rqdata)
|
||||
if h not in sqdata.hashes:
|
||||
sqdata.hashes[h] = tid
|
||||
else:
|
||||
sqrq.sq_deferred[tid] = sqdata.hashes[h]
|
||||
bb.note("Deferring %s after %s" % (tid, sqdata.hashes[h]))
|
||||
|
||||
update_scenequeue_data(sqdata.sq_revdeps, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True)
|
||||
|
||||
# Compute a list of 'stale' sstate tasks where the current hash does not match the one
|
||||
# in any stamp files. Pass the list out to metadata as an event.
|
||||
found = {}
|
||||
for tid in rqdata.runq_setscene_tids:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
|
||||
stamps = bb.build.find_stale_stamps(taskname, rqdata.dataCaches[mc], taskfn)
|
||||
if stamps:
|
||||
if mc not in found:
|
||||
found[mc] = {}
|
||||
found[mc][tid] = stamps
|
||||
for mc in found:
|
||||
event = bb.event.StaleSetSceneTasks(found[mc])
|
||||
bb.event.fire(event, cooker.databuilder.mcdata[mc])
|
||||
|
||||
def check_setscene_stamps(tid, rqdata, rq, stampcache, noexecstamp=False):
|
||||
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
|
||||
|
||||
taskdep = rqdata.dataCaches[mc].task_deps[taskfn]
|
||||
|
||||
if 'noexec' in taskdep and taskname in taskdep['noexec']:
|
||||
bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn)
|
||||
return True, False
|
||||
|
||||
if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache):
|
||||
logger.debug2('Setscene stamp current for task %s', tid)
|
||||
return False, True
|
||||
|
||||
if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache):
|
||||
logger.debug2('Normal stamp current for task %s', tid)
|
||||
return False, True
|
||||
|
||||
return False, False
|
||||
|
||||
def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True):
|
||||
|
||||
tocheck = set()
|
||||
@@ -2861,17 +2763,25 @@ def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, s
|
||||
sqdata.stamppresent.remove(tid)
|
||||
if tid in sqdata.valid:
|
||||
sqdata.valid.remove(tid)
|
||||
if tid in sqdata.outrightfail:
|
||||
sqdata.outrightfail.remove(tid)
|
||||
|
||||
noexec, stamppresent = check_setscene_stamps(tid, rqdata, rq, stampcache, noexecstamp=True)
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
|
||||
|
||||
if noexec:
|
||||
taskdep = rqdata.dataCaches[mc].task_deps[taskfn]
|
||||
|
||||
if 'noexec' in taskdep and taskname in taskdep['noexec']:
|
||||
sqdata.noexec.add(tid)
|
||||
sqrq.sq_task_skip(tid)
|
||||
bb.build.make_stamp(taskname + "_setscene", rqdata.dataCaches[mc], taskfn)
|
||||
continue
|
||||
|
||||
if rq.check_stamp_task(tid, taskname + "_setscene", cache=stampcache):
|
||||
logger.debug(2, 'Setscene stamp current for task %s', tid)
|
||||
sqdata.stamppresent.add(tid)
|
||||
sqrq.sq_task_skip(tid)
|
||||
continue
|
||||
|
||||
if stamppresent:
|
||||
if rq.check_stamp_task(tid, taskname, recurse = True, cache=stampcache):
|
||||
logger.debug(2, 'Normal stamp current for task %s', tid)
|
||||
sqdata.stamppresent.add(tid)
|
||||
sqrq.sq_task_skip(tid)
|
||||
continue
|
||||
@@ -2880,20 +2790,28 @@ def update_scenequeue_data(tids, sqdata, rqdata, rq, cooker, stampcache, sqrq, s
|
||||
|
||||
sqdata.valid |= rq.validate_hashes(tocheck, cooker.data, len(sqdata.stamppresent), False, summary=summary)
|
||||
|
||||
for tid in tids:
|
||||
if tid in sqdata.stamppresent:
|
||||
continue
|
||||
if tid in sqdata.valid:
|
||||
continue
|
||||
if tid in sqdata.noexec:
|
||||
continue
|
||||
if tid in sqrq.scenequeue_covered:
|
||||
continue
|
||||
if tid in sqrq.scenequeue_notcovered:
|
||||
continue
|
||||
if tid in sqrq.sq_deferred:
|
||||
continue
|
||||
sqdata.outrightfail.add(tid)
|
||||
sqdata.hashes = {}
|
||||
for mc in sorted(sqdata.multiconfigs):
|
||||
for tid in sorted(sqdata.sq_revdeps):
|
||||
if mc_from_tid(tid) != mc:
|
||||
continue
|
||||
if tid in sqdata.stamppresent:
|
||||
continue
|
||||
if tid in sqdata.valid:
|
||||
continue
|
||||
if tid in sqdata.noexec:
|
||||
continue
|
||||
if tid in sqrq.scenequeue_notcovered:
|
||||
continue
|
||||
sqdata.outrightfail.add(tid)
|
||||
|
||||
h = pending_hash_index(tid, rqdata)
|
||||
if h not in sqdata.hashes:
|
||||
sqdata.hashes[h] = tid
|
||||
else:
|
||||
sqrq.sq_deferred[tid] = sqdata.hashes[h]
|
||||
bb.note("Deferring %s after %s" % (tid, sqdata.hashes[h]))
|
||||
|
||||
|
||||
class TaskFailure(Exception):
|
||||
"""
|
||||
@@ -2957,16 +2875,12 @@ class runQueueTaskFailed(runQueueEvent):
|
||||
"""
|
||||
Event notifying a task failed
|
||||
"""
|
||||
def __init__(self, task, stats, exitcode, rq, fakeroot_log=None):
|
||||
def __init__(self, task, stats, exitcode, rq):
|
||||
runQueueEvent.__init__(self, task, stats, rq)
|
||||
self.exitcode = exitcode
|
||||
self.fakeroot_log = fakeroot_log
|
||||
|
||||
def __str__(self):
|
||||
if self.fakeroot_log:
|
||||
return "Task (%s) failed with exit code '%s' \nPseudo log:\n%s" % (self.taskstring, self.exitcode, self.fakeroot_log)
|
||||
else:
|
||||
return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
|
||||
return "Task (%s) failed with exit code '%s'" % (self.taskstring, self.exitcode)
|
||||
|
||||
class sceneQueueTaskFailed(sceneQueueEvent):
|
||||
"""
|
||||
@@ -3018,7 +2932,7 @@ class runQueuePipe():
|
||||
"""
|
||||
Abstraction for a pipe between a worker thread and the server
|
||||
"""
|
||||
def __init__(self, pipein, pipeout, d, rq, rqexec, fakerootlogs=None):
|
||||
def __init__(self, pipein, pipeout, d, rq, rqexec):
|
||||
self.input = pipein
|
||||
if pipeout:
|
||||
pipeout.close()
|
||||
@@ -3027,7 +2941,6 @@ class runQueuePipe():
|
||||
self.d = d
|
||||
self.rq = rq
|
||||
self.rqexec = rqexec
|
||||
self.fakerootlogs = fakerootlogs
|
||||
|
||||
def setrunqueueexec(self, rqexec):
|
||||
self.rqexec = rqexec
|
||||
@@ -3073,11 +2986,7 @@ class runQueuePipe():
|
||||
task, status = pickle.loads(self.queue[10:index])
|
||||
except (ValueError, pickle.UnpicklingError, AttributeError, IndexError) as e:
|
||||
bb.msg.fatal("RunQueue", "failed load pickle '%s': '%s'" % (e, self.queue[10:index]))
|
||||
(_, _, _, taskfn) = split_tid_mcfn(task)
|
||||
fakerootlog = None
|
||||
if self.fakerootlogs and taskfn and taskfn in self.fakerootlogs:
|
||||
fakerootlog = self.fakerootlogs[taskfn]
|
||||
self.rqexec.runqueue_process_waitpid(task, status, fakerootlog=fakerootlog)
|
||||
self.rqexec.runqueue_process_waitpid(task, status)
|
||||
found = True
|
||||
self.queue = self.queue[index+11:]
|
||||
index = self.queue.find(b"</exitcode>")
|
||||
|
||||
@@ -147,7 +147,7 @@ class ProcessServer():
|
||||
conn = newconnections.pop(-1)
|
||||
fds.append(conn)
|
||||
self.controllersock = conn
|
||||
elif not self.timeout and not ready:
|
||||
elif self.timeout is None and not ready:
|
||||
serverlog("No timeout, exiting.")
|
||||
self.quit = True
|
||||
|
||||
@@ -367,12 +367,7 @@ class ProcessServer():
|
||||
self.next_heartbeat = now + self.heartbeat_seconds
|
||||
if hasattr(self.cooker, "data"):
|
||||
heartbeat = bb.event.HeartbeatEvent(now)
|
||||
try:
|
||||
bb.event.fire(heartbeat, self.cooker.data)
|
||||
except Exception as exc:
|
||||
if not isinstance(exc, bb.BBHandledException):
|
||||
logger.exception('Running heartbeat function')
|
||||
self.quit = True
|
||||
bb.event.fire(heartbeat, self.cooker.data)
|
||||
if nextsleep and now + nextsleep > self.next_heartbeat:
|
||||
# Shorten timeout so that we we wake up in time for
|
||||
# the heartbeat.
|
||||
@@ -471,7 +466,7 @@ class BitBakeServer(object):
|
||||
try:
|
||||
r = ready.get()
|
||||
except EOFError:
|
||||
# Trap the child exiting/closing the pipe and error out
|
||||
# Trap the child exitting/closing the pipe and error out
|
||||
r = None
|
||||
if not r or r[0] != "r":
|
||||
ready.close()
|
||||
@@ -514,7 +509,7 @@ class BitBakeServer(object):
|
||||
os.set_inheritable(self.bitbake_lock.fileno(), True)
|
||||
os.set_inheritable(self.readypipein, True)
|
||||
serverscript = os.path.realpath(os.path.dirname(__file__) + "/../../../bin/bitbake-server")
|
||||
os.execl(sys.executable, "bitbake-server", serverscript, "decafbad", str(self.bitbake_lock.fileno()), str(self.readypipein), self.logfile, self.bitbake_lock.name, self.sockname, str(self.server_timeout or 0), str(self.xmlrpcinterface[0]), str(self.xmlrpcinterface[1]))
|
||||
os.execl(sys.executable, "bitbake-server", serverscript, "decafbad", str(self.bitbake_lock.fileno()), str(self.readypipein), self.logfile, self.bitbake_lock.name, self.sockname, str(self.server_timeout), str(self.xmlrpcinterface[0]), str(self.xmlrpcinterface[1]))
|
||||
|
||||
def execServer(lockfd, readypipeinfd, lockname, sockname, server_timeout, xmlrpcinterface):
|
||||
|
||||
@@ -659,7 +654,7 @@ class BBUIEventQueue:
|
||||
self.reader = ConnectionReader(readfd)
|
||||
|
||||
self.t = threading.Thread()
|
||||
self.t.daemon = True
|
||||
self.t.setDaemon(True)
|
||||
self.t.run = self.startCallbackHandler
|
||||
self.t.start()
|
||||
|
||||
|
||||
@@ -311,7 +311,13 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
|
||||
data = self.basehash[tid]
|
||||
for dep in self.runtaskdeps[tid]:
|
||||
data = data + self.get_unihash(dep)
|
||||
if dep in self.unihash:
|
||||
if self.unihash[dep] is None:
|
||||
data = data + self.taskhash[dep]
|
||||
else:
|
||||
data = data + self.unihash[dep]
|
||||
else:
|
||||
data = data + self.get_unihash(dep)
|
||||
|
||||
for (f, cs) in self.file_checksum_values[tid]:
|
||||
if cs:
|
||||
@@ -541,7 +547,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
# is much more interesting, so it is reported at debug level 1
|
||||
hashequiv_logger.debug((1, 2)[unihash == taskhash], 'Found unihash %s in place of %s for %s from %s' % (unihash, taskhash, tid, self.server))
|
||||
else:
|
||||
hashequiv_logger.debug2('No reported unihash for %s:%s from %s' % (tid, taskhash, self.server))
|
||||
hashequiv_logger.debug(2, 'No reported unihash for %s:%s from %s' % (tid, taskhash, self.server))
|
||||
except hashserv.client.HashConnectionError as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
|
||||
@@ -615,12 +621,12 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
new_unihash = data['unihash']
|
||||
|
||||
if new_unihash != unihash:
|
||||
hashequiv_logger.debug('Task %s unihash changed %s -> %s by server %s' % (taskhash, unihash, new_unihash, self.server))
|
||||
hashequiv_logger.debug(1, 'Task %s unihash changed %s -> %s by server %s' % (taskhash, unihash, new_unihash, self.server))
|
||||
bb.event.fire(bb.runqueue.taskUniHashUpdate(fn + ':do_' + task, new_unihash), d)
|
||||
self.set_unihash(tid, new_unihash)
|
||||
d.setVar('BB_UNIHASH', new_unihash)
|
||||
else:
|
||||
hashequiv_logger.debug('Reported task %s as unihash %s to %s' % (taskhash, unihash, self.server))
|
||||
hashequiv_logger.debug(1, 'Reported task %s as unihash %s to %s' % (taskhash, unihash, self.server))
|
||||
except hashserv.client.HashConnectionError as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
finally:
|
||||
@@ -748,7 +754,7 @@ def clean_basepath(basepath):
|
||||
if basepath[0] == '/':
|
||||
return cleaned
|
||||
|
||||
if basepath.startswith("mc:") and basepath.count(':') >= 2:
|
||||
if basepath.startswith("mc:"):
|
||||
mc, mc_name, basepath = basepath.split(":", 2)
|
||||
mc_suffix = ':mc:' + mc_name
|
||||
else:
|
||||
|
||||
@@ -131,7 +131,7 @@ class TaskData:
|
||||
for depend in dataCache.deps[fn]:
|
||||
dependids.add(depend)
|
||||
self.depids[fn] = list(dependids)
|
||||
logger.debug2("Added dependencies %s for %s", str(dataCache.deps[fn]), fn)
|
||||
logger.debug(2, "Added dependencies %s for %s", str(dataCache.deps[fn]), fn)
|
||||
|
||||
# Work out runtime dependencies
|
||||
if not fn in self.rdepids:
|
||||
@@ -149,9 +149,9 @@ class TaskData:
|
||||
rreclist.append(rdepend)
|
||||
rdependids.add(rdepend)
|
||||
if rdependlist:
|
||||
logger.debug2("Added runtime dependencies %s for %s", str(rdependlist), fn)
|
||||
logger.debug(2, "Added runtime dependencies %s for %s", str(rdependlist), fn)
|
||||
if rreclist:
|
||||
logger.debug2("Added runtime recommendations %s for %s", str(rreclist), fn)
|
||||
logger.debug(2, "Added runtime recommendations %s for %s", str(rreclist), fn)
|
||||
self.rdepids[fn] = list(rdependids)
|
||||
|
||||
for dep in self.depids[fn]:
|
||||
@@ -378,7 +378,7 @@ class TaskData:
|
||||
for fn in eligible:
|
||||
if fn in self.failed_fns:
|
||||
continue
|
||||
logger.debug2("adding %s to satisfy %s", fn, item)
|
||||
logger.debug(2, "adding %s to satisfy %s", fn, item)
|
||||
self.add_build_target(fn, item)
|
||||
self.add_tasks(fn, dataCache)
|
||||
|
||||
@@ -431,7 +431,7 @@ class TaskData:
|
||||
for fn in eligible:
|
||||
if fn in self.failed_fns:
|
||||
continue
|
||||
logger.debug2("adding '%s' to satisfy runtime '%s'", fn, item)
|
||||
logger.debug(2, "adding '%s' to satisfy runtime '%s'", fn, item)
|
||||
self.add_runtime_target(fn, item)
|
||||
self.add_tasks(fn, dataCache)
|
||||
|
||||
@@ -446,7 +446,7 @@ class TaskData:
|
||||
return
|
||||
if not missing_list:
|
||||
missing_list = []
|
||||
logger.debug("File '%s' is unbuildable, removing...", fn)
|
||||
logger.debug(1, "File '%s' is unbuildable, removing...", fn)
|
||||
self.failed_fns.append(fn)
|
||||
for target in self.build_targets:
|
||||
if fn in self.build_targets[target]:
|
||||
@@ -526,7 +526,7 @@ class TaskData:
|
||||
added = added + 1
|
||||
except (bb.providers.NoRProvider, bb.providers.MultipleRProvider):
|
||||
self.remove_runtarget(target)
|
||||
logger.debug("Resolved " + str(added) + " extra dependencies")
|
||||
logger.debug(1, "Resolved " + str(added) + " extra dependencies")
|
||||
if added == 0:
|
||||
break
|
||||
# self.dump_data()
|
||||
@@ -549,38 +549,38 @@ class TaskData:
|
||||
"""
|
||||
Dump some debug information on the internal data structures
|
||||
"""
|
||||
logger.debug3("build_names:")
|
||||
logger.debug3(", ".join(self.build_targets))
|
||||
logger.debug(3, "build_names:")
|
||||
logger.debug(3, ", ".join(self.build_targets))
|
||||
|
||||
logger.debug3("run_names:")
|
||||
logger.debug3(", ".join(self.run_targets))
|
||||
logger.debug(3, "run_names:")
|
||||
logger.debug(3, ", ".join(self.run_targets))
|
||||
|
||||
logger.debug3("build_targets:")
|
||||
logger.debug(3, "build_targets:")
|
||||
for target in self.build_targets:
|
||||
targets = "None"
|
||||
if target in self.build_targets:
|
||||
targets = self.build_targets[target]
|
||||
logger.debug3(" %s: %s", target, targets)
|
||||
logger.debug(3, " %s: %s", target, targets)
|
||||
|
||||
logger.debug3("run_targets:")
|
||||
logger.debug(3, "run_targets:")
|
||||
for target in self.run_targets:
|
||||
targets = "None"
|
||||
if target in self.run_targets:
|
||||
targets = self.run_targets[target]
|
||||
logger.debug3(" %s: %s", target, targets)
|
||||
logger.debug(3, " %s: %s", target, targets)
|
||||
|
||||
logger.debug3("tasks:")
|
||||
logger.debug(3, "tasks:")
|
||||
for tid in self.taskentries:
|
||||
logger.debug3(" %s: %s %s %s",
|
||||
logger.debug(3, " %s: %s %s %s",
|
||||
tid,
|
||||
self.taskentries[tid].idepends,
|
||||
self.taskentries[tid].irdepends,
|
||||
self.taskentries[tid].tdepends)
|
||||
|
||||
logger.debug3("dependency ids (per fn):")
|
||||
logger.debug(3, "dependency ids (per fn):")
|
||||
for fn in self.depids:
|
||||
logger.debug3(" %s: %s", fn, self.depids[fn])
|
||||
logger.debug(3, " %s: %s", fn, self.depids[fn])
|
||||
|
||||
logger.debug3("runtime dependency ids (per fn):")
|
||||
logger.debug(3, "runtime dependency ids (per fn):")
|
||||
for fn in self.rdepids:
|
||||
logger.debug3(" %s: %s", fn, self.rdepids[fn])
|
||||
logger.debug(3, " %s: %s", fn, self.rdepids[fn])
|
||||
|
||||
@@ -111,9 +111,9 @@ ${D}${libdir}/pkgconfig/*.pc
|
||||
self.assertExecs(set(["sed"]))
|
||||
|
||||
def test_parameter_expansion_modifiers(self):
|
||||
# -,+ and : are also valid modifiers for parameter expansion, but are
|
||||
# - and + are also valid modifiers for parameter expansion, but are
|
||||
# valid characters in bitbake variable names, so are not included here
|
||||
for i in ('=', '?', '#', '%', '##', '%%'):
|
||||
for i in ('=', ':-', ':=', '?', ':?', ':+', '#', '%', '##', '%%'):
|
||||
name = "foo%sbar" % i
|
||||
self.parseExpression("${%s}" % name)
|
||||
self.assertNotIn(name, self.references)
|
||||
|
||||
@@ -31,7 +31,7 @@ class ColorCodeTests(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.d = bb.data.init()
|
||||
self._progress_watcher = ProgressWatcher()
|
||||
bb.event.register("bb.build.TaskProgress", self._progress_watcher.handle_event, data=self.d)
|
||||
bb.event.register("bb.build.TaskProgress", self._progress_watcher.handle_event)
|
||||
|
||||
def tearDown(self):
|
||||
bb.event.remove("bb.build.TaskProgress", None)
|
||||
|
||||
@@ -87,25 +87,6 @@ class URITest(unittest.TestCase):
|
||||
},
|
||||
'relative': False
|
||||
},
|
||||
# Check that trailing semicolons are handled correctly
|
||||
"http://www.example.org/index.html?qparam1=qvalue1;param2=value2;" : {
|
||||
'uri': 'http://www.example.org/index.html?qparam1=qvalue1;param2=value2',
|
||||
'scheme': 'http',
|
||||
'hostname': 'www.example.org',
|
||||
'port': None,
|
||||
'hostport': 'www.example.org',
|
||||
'path': '/index.html',
|
||||
'userinfo': '',
|
||||
'username': '',
|
||||
'password': '',
|
||||
'params': {
|
||||
'param2': 'value2'
|
||||
},
|
||||
'query': {
|
||||
'qparam1': 'qvalue1'
|
||||
},
|
||||
'relative': False
|
||||
},
|
||||
"http://www.example.com:8080/index.html" : {
|
||||
'uri': 'http://www.example.com:8080/index.html',
|
||||
'scheme': 'http',
|
||||
@@ -390,7 +371,6 @@ class FetcherTest(unittest.TestCase):
|
||||
if os.environ.get("BB_TMPDIR_NOCLEAN") == "yes":
|
||||
print("Not cleaning up %s. Please remove manually." % self.tempdir)
|
||||
else:
|
||||
bb.process.run('chmod u+rw -R %s' % self.tempdir)
|
||||
bb.utils.prunedir(self.tempdir)
|
||||
|
||||
class MirrorUriTest(FetcherTest):
|
||||
@@ -431,10 +411,6 @@ class MirrorUriTest(FetcherTest):
|
||||
("git://someserver.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master", "git://someserver.org/bitbake;branch=master", "git://git.openembedded.org/bitbake;protocol=http")
|
||||
: "git://git.openembedded.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master;protocol=http",
|
||||
|
||||
("git://user1@someserver.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master", "git://someserver.org/bitbake;branch=master", "git://user2@git.openembedded.org/bitbake;protocol=http")
|
||||
: "git://user2@git.openembedded.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master;protocol=http",
|
||||
|
||||
|
||||
#Renaming files doesn't work
|
||||
#("http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere2.org/somedir3/somefile_2.3.4.tar.gz") : "http://somewhere2.org/somedir3/somefile_2.3.4.tar.gz"
|
||||
#("file://sstate-xyz.tgz", "file://.*/.*", "file:///somewhere/1234/sstate-cache") : "file:///somewhere/1234/sstate-cache/sstate-xyz.tgz",
|
||||
@@ -495,7 +471,7 @@ class GitDownloadDirectoryNamingTest(FetcherTest):
|
||||
super(GitDownloadDirectoryNamingTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake"
|
||||
self.recipe_dir = "git.openembedded.org.bitbake"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git;protocol=https"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git"
|
||||
self.mirror_dir = "github.com.openembedded.bitbake.git"
|
||||
|
||||
self.d.setVar('SRCREV', '82ea737a0b42a8b53e11c9cde141e9e9c0bd8c40')
|
||||
@@ -543,7 +519,7 @@ class TarballNamingTest(FetcherTest):
|
||||
super(TarballNamingTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake"
|
||||
self.recipe_tarball = "git2_git.openembedded.org.bitbake.tar.gz"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git;protocol=https"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git"
|
||||
self.mirror_tarball = "git2_github.com.openembedded.bitbake.git.tar.gz"
|
||||
|
||||
self.d.setVar('BB_GENERATE_MIRROR_TARBALLS', '1')
|
||||
@@ -577,7 +553,7 @@ class GitShallowTarballNamingTest(FetcherTest):
|
||||
super(GitShallowTarballNamingTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake"
|
||||
self.recipe_tarball = "gitshallow_git.openembedded.org.bitbake_82ea737-1_master.tar.gz"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git;protocol=https"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git"
|
||||
self.mirror_tarball = "gitshallow_github.com.openembedded.bitbake.git_82ea737-1_master.tar.gz"
|
||||
|
||||
self.d.setVar('BB_GIT_SHALLOW', '1')
|
||||
@@ -678,62 +654,6 @@ class FetcherLocalTest(FetcherTest):
|
||||
with self.assertRaises(bb.fetch2.UnpackError):
|
||||
self.fetchUnpack(['file://a;subdir=/bin/sh'])
|
||||
|
||||
def test_local_gitfetch_usehead(self):
|
||||
# Create dummy local Git repo
|
||||
src_dir = tempfile.mkdtemp(dir=self.tempdir,
|
||||
prefix='gitfetch_localusehead_')
|
||||
src_dir = os.path.abspath(src_dir)
|
||||
bb.process.run("git init", cwd=src_dir)
|
||||
bb.process.run("git config user.email 'you@example.com'", cwd=src_dir)
|
||||
bb.process.run("git config user.name 'Your Name'", cwd=src_dir)
|
||||
bb.process.run("git commit --allow-empty -m'Dummy commit'",
|
||||
cwd=src_dir)
|
||||
# Use other branch than master
|
||||
bb.process.run("git checkout -b my-devel", cwd=src_dir)
|
||||
bb.process.run("git commit --allow-empty -m'Dummy commit 2'",
|
||||
cwd=src_dir)
|
||||
stdout = bb.process.run("git rev-parse HEAD", cwd=src_dir)
|
||||
orig_rev = stdout[0].strip()
|
||||
|
||||
# Fetch and check revision
|
||||
self.d.setVar("SRCREV", "AUTOINC")
|
||||
url = "git://" + src_dir + ";protocol=file;usehead=1"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
stdout = bb.process.run("git rev-parse HEAD",
|
||||
cwd=os.path.join(self.unpackdir, 'git'))
|
||||
unpack_rev = stdout[0].strip()
|
||||
self.assertEqual(orig_rev, unpack_rev)
|
||||
|
||||
def test_local_gitfetch_usehead_withname(self):
|
||||
# Create dummy local Git repo
|
||||
src_dir = tempfile.mkdtemp(dir=self.tempdir,
|
||||
prefix='gitfetch_localusehead_')
|
||||
src_dir = os.path.abspath(src_dir)
|
||||
bb.process.run("git init", cwd=src_dir)
|
||||
bb.process.run("git config user.email 'you@example.com'", cwd=src_dir)
|
||||
bb.process.run("git config user.name 'Your Name'", cwd=src_dir)
|
||||
bb.process.run("git commit --allow-empty -m'Dummy commit'",
|
||||
cwd=src_dir)
|
||||
# Use other branch than master
|
||||
bb.process.run("git checkout -b my-devel", cwd=src_dir)
|
||||
bb.process.run("git commit --allow-empty -m'Dummy commit 2'",
|
||||
cwd=src_dir)
|
||||
stdout = bb.process.run("git rev-parse HEAD", cwd=src_dir)
|
||||
orig_rev = stdout[0].strip()
|
||||
|
||||
# Fetch and check revision
|
||||
self.d.setVar("SRCREV", "AUTOINC")
|
||||
url = "git://" + src_dir + ";protocol=file;usehead=1;name=newName"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
stdout = bb.process.run("git rev-parse HEAD",
|
||||
cwd=os.path.join(self.unpackdir, 'git'))
|
||||
unpack_rev = stdout[0].strip()
|
||||
self.assertEqual(orig_rev, unpack_rev)
|
||||
|
||||
class FetcherNoNetworkTest(FetcherTest):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
@@ -924,21 +844,35 @@ class FetcherNetworkTest(FetcherTest):
|
||||
self.assertRaises(bb.fetch.FetchError, self.gitfetcher, url1, url2)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_usehead(self):
|
||||
# Since self.gitfetcher() sets SRCREV we expect this to override
|
||||
# `usehead=1' and instead fetch the specified SRCREV. See
|
||||
# test_local_gitfetch_usehead() for a positive use of the usehead
|
||||
# feature.
|
||||
url = "git://git.openembedded.org/bitbake;usehead=1"
|
||||
self.assertRaises(bb.fetch.ParameterError, self.gitfetcher, url, url)
|
||||
def test_gitfetch_localusehead(self):
|
||||
# Create dummy local Git repo
|
||||
src_dir = tempfile.mkdtemp(dir=self.tempdir,
|
||||
prefix='gitfetch_localusehead_')
|
||||
src_dir = os.path.abspath(src_dir)
|
||||
bb.process.run("git init", cwd=src_dir)
|
||||
bb.process.run("git commit --allow-empty -m'Dummy commit'",
|
||||
cwd=src_dir)
|
||||
# Use other branch than master
|
||||
bb.process.run("git checkout -b my-devel", cwd=src_dir)
|
||||
bb.process.run("git commit --allow-empty -m'Dummy commit 2'",
|
||||
cwd=src_dir)
|
||||
stdout = bb.process.run("git rev-parse HEAD", cwd=src_dir)
|
||||
orig_rev = stdout[0].strip()
|
||||
|
||||
# Fetch and check revision
|
||||
self.d.setVar("SRCREV", "AUTOINC")
|
||||
url = "git://" + src_dir + ";protocol=file;usehead=1"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
stdout = bb.process.run("git rev-parse HEAD",
|
||||
cwd=os.path.join(self.unpackdir, 'git'))
|
||||
unpack_rev = stdout[0].strip()
|
||||
self.assertEqual(orig_rev, unpack_rev)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_usehead_withname(self):
|
||||
# Since self.gitfetcher() sets SRCREV we expect this to override
|
||||
# `usehead=1' and instead fetch the specified SRCREV. See
|
||||
# test_local_gitfetch_usehead() for a positive use of the usehead
|
||||
# feature.
|
||||
url = "git://git.openembedded.org/bitbake;usehead=1;name=newName"
|
||||
def test_gitfetch_remoteusehead(self):
|
||||
url = "git://git.openembedded.org/bitbake;usehead=1"
|
||||
self.assertRaises(bb.fetch.ParameterError, self.gitfetcher, url, url)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
@@ -989,7 +923,7 @@ class FetcherNetworkTest(FetcherTest):
|
||||
def test_git_submodule_dbus_broker(self):
|
||||
# The following external repositories have show failures in fetch and unpack operations
|
||||
# We want to avoid regressions!
|
||||
url = "gitsm://github.com/bus1/dbus-broker;protocol=https;rev=fc874afa0992d0c75ec25acb43d344679f0ee7d2;branch=main"
|
||||
url = "gitsm://github.com/bus1/dbus-broker;protocol=git;rev=fc874afa0992d0c75ec25acb43d344679f0ee7d2;branch=main"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
@@ -1005,7 +939,7 @@ class FetcherNetworkTest(FetcherTest):
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_git_submodule_CLI11(self):
|
||||
url = "gitsm://github.com/CLIUtils/CLI11;protocol=https;rev=bd4dc911847d0cde7a6b41dfa626a85aab213baf;branch=main"
|
||||
url = "gitsm://github.com/CLIUtils/CLI11;protocol=git;rev=bd4dc911847d0cde7a6b41dfa626a85aab213baf"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
@@ -1020,12 +954,12 @@ class FetcherNetworkTest(FetcherTest):
|
||||
@skipIfNoNetwork()
|
||||
def test_git_submodule_update_CLI11(self):
|
||||
""" Prevent regression on update detection not finding missing submodule, or modules without needed commits """
|
||||
url = "gitsm://github.com/CLIUtils/CLI11;protocol=https;rev=cf6a99fa69aaefe477cc52e3ef4a7d2d7fa40714;branch=main"
|
||||
url = "gitsm://github.com/CLIUtils/CLI11;protocol=git;rev=cf6a99fa69aaefe477cc52e3ef4a7d2d7fa40714"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
|
||||
# CLI11 that pulls in a newer nlohmann-json
|
||||
url = "gitsm://github.com/CLIUtils/CLI11;protocol=https;rev=49ac989a9527ee9bb496de9ded7b4872c2e0e5ca;branch=main"
|
||||
url = "gitsm://github.com/CLIUtils/CLI11;protocol=git;rev=49ac989a9527ee9bb496de9ded7b4872c2e0e5ca"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
@@ -1039,7 +973,7 @@ class FetcherNetworkTest(FetcherTest):
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_git_submodule_aktualizr(self):
|
||||
url = "gitsm://github.com/advancedtelematic/aktualizr;branch=master;protocol=https;rev=d00d1a04cc2366d1a5f143b84b9f507f8bd32c44"
|
||||
url = "gitsm://github.com/advancedtelematic/aktualizr;branch=master;protocol=git;rev=d00d1a04cc2366d1a5f143b84b9f507f8bd32c44"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
@@ -1059,7 +993,7 @@ class FetcherNetworkTest(FetcherTest):
|
||||
""" Prevent regression on deeply nested submodules not being checked out properly, even though they were fetched. """
|
||||
|
||||
# This repository also has submodules where the module (name), path and url do not align
|
||||
url = "gitsm://github.com/azure/iotedge.git;protocol=https;rev=d76e0316c6f324345d77c48a83ce836d09392699"
|
||||
url = "gitsm://github.com/azure/iotedge.git;protocol=git;rev=d76e0316c6f324345d77c48a83ce836d09392699"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
@@ -1117,7 +1051,7 @@ class SVNTest(FetcherTest):
|
||||
|
||||
bb.process.run("svn co %s svnfetch_co" % self.repo_url, cwd=self.tempdir)
|
||||
# Github will emulate SVN. Use this to check if we're downloding...
|
||||
bb.process.run("svn propset svn:externals 'bitbake https://github.com/PhilipHazel/pcre2.git' .",
|
||||
bb.process.run("svn propset svn:externals 'bitbake svn://vcs.pcre.org/pcre2/code' .",
|
||||
cwd=os.path.join(self.tempdir, 'svnfetch_co', 'trunk'))
|
||||
bb.process.run("svn commit --non-interactive -m 'Add external'",
|
||||
cwd=os.path.join(self.tempdir, 'svnfetch_co', 'trunk'))
|
||||
@@ -1235,7 +1169,7 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
|
||||
test_git_uris = {
|
||||
# version pattern "X.Y.Z"
|
||||
("mx-1.0", "git://github.com/clutter-project/mx.git;branch=mx-1.4;protocol=https", "9b1db6b8060bd00b121a692f942404a24ae2960f", "")
|
||||
("mx-1.0", "git://github.com/clutter-project/mx.git;branch=mx-1.4", "9b1db6b8060bd00b121a692f942404a24ae2960f", "")
|
||||
: "1.99.4",
|
||||
# version pattern "vX.Y"
|
||||
# mirror of git.infradead.org since network issues interfered with testing
|
||||
@@ -1246,7 +1180,7 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
("presentproto", "git://git.yoctoproject.org/bbfetchtests-presentproto", "24f3a56e541b0a9e6c6ee76081f441221a120ef9", "")
|
||||
: "1.0",
|
||||
# version pattern "pkg_name-vX.Y.Z"
|
||||
("dtc", "git://git.yoctoproject.org/bbfetchtests-dtc.git", "65cc4d2748a2c2e6f27f1cf39e07a5dbabd80ebf", "")
|
||||
("dtc", "git://git.qemu.org/dtc.git", "65cc4d2748a2c2e6f27f1cf39e07a5dbabd80ebf", "")
|
||||
: "1.4.0",
|
||||
# combination version pattern
|
||||
("sysprof", "git://gitlab.gnome.org/GNOME/sysprof.git;protocol=https", "cd44ee6644c3641507fb53b8a2a69137f2971219", "")
|
||||
@@ -1262,9 +1196,9 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
: "0.4.3",
|
||||
("build-appliance-image", "git://git.yoctoproject.org/poky", "b37dd451a52622d5b570183a81583cc34c2ff555", "(?P<pver>(([0-9][\.|_]?)+[0-9]))")
|
||||
: "11.0.0",
|
||||
("chkconfig-alternatives-native", "git://github.com/kergoth/chkconfig;branch=sysroot;protocol=https", "cd437ecbd8986c894442f8fce1e0061e20f04dee", "chkconfig\-(?P<pver>((\d+[\.\-_]*)+))")
|
||||
("chkconfig-alternatives-native", "git://github.com/kergoth/chkconfig;branch=sysroot", "cd437ecbd8986c894442f8fce1e0061e20f04dee", "chkconfig\-(?P<pver>((\d+[\.\-_]*)+))")
|
||||
: "1.3.59",
|
||||
("remake", "git://github.com/rocky/remake.git;protocol=https", "f05508e521987c8494c92d9c2871aec46307d51d", "(?P<pver>(\d+\.(\d+\.)*\d*(\+dbg\d+(\.\d+)*)*))")
|
||||
("remake", "git://github.com/rocky/remake.git", "f05508e521987c8494c92d9c2871aec46307d51d", "(?P<pver>(\d+\.(\d+\.)*\d*(\+dbg\d+(\.\d+)*)*))")
|
||||
: "3.82+dbg0.9",
|
||||
}
|
||||
|
||||
@@ -1354,10 +1288,13 @@ class FetchCheckStatusTest(FetcherTest):
|
||||
"http://downloads.yoctoproject.org/releases/sato/sato-engine-0.2.tar.gz",
|
||||
"http://downloads.yoctoproject.org/releases/sato/sato-engine-0.3.tar.gz",
|
||||
"https://yoctoproject.org/",
|
||||
"https://docs.yoctoproject.org",
|
||||
"https://yoctoproject.org/documentation",
|
||||
"http://downloads.yoctoproject.org/releases/opkg/opkg-0.1.7.tar.gz",
|
||||
"http://downloads.yoctoproject.org/releases/opkg/opkg-0.3.0.tar.gz",
|
||||
"ftp://sourceware.org/pub/libffi/libffi-1.20.tar.gz",
|
||||
"http://ftp.gnu.org/gnu/autoconf/autoconf-2.60.tar.gz",
|
||||
"https://ftp.gnu.org/gnu/chess/gnuchess-5.08.tar.gz",
|
||||
"https://ftp.gnu.org/gnu/gmp/gmp-4.0.tar.gz",
|
||||
# GitHub releases are hosted on Amazon S3, which doesn't support HEAD
|
||||
"https://github.com/kergoth/tslib/releases/download/1.1/tslib-1.1.tar.xz"
|
||||
]
|
||||
@@ -1396,8 +1333,6 @@ class GitMakeShallowTest(FetcherTest):
|
||||
self.gitdir = os.path.join(self.tempdir, 'gitshallow')
|
||||
bb.utils.mkdirhier(self.gitdir)
|
||||
bb.process.run('git init', cwd=self.gitdir)
|
||||
bb.process.run('git config user.email "you@example.com"', cwd=self.gitdir)
|
||||
bb.process.run('git config user.name "Your Name"', cwd=self.gitdir)
|
||||
|
||||
def assertRefs(self, expected_refs):
|
||||
actual_refs = self.git(['for-each-ref', '--format=%(refname)']).splitlines()
|
||||
@@ -1521,8 +1456,6 @@ class GitShallowTest(FetcherTest):
|
||||
|
||||
bb.utils.mkdirhier(self.srcdir)
|
||||
self.git('init', cwd=self.srcdir)
|
||||
self.git('config user.email "you@example.com"', cwd=self.srcdir)
|
||||
self.git('config user.name "Your Name"', cwd=self.srcdir)
|
||||
self.d.setVar('WORKDIR', self.tempdir)
|
||||
self.d.setVar('S', self.gitdir)
|
||||
self.d.delVar('PREMIRRORS')
|
||||
@@ -1604,7 +1537,6 @@ class GitShallowTest(FetcherTest):
|
||||
|
||||
# fetch and unpack, from the shallow tarball
|
||||
bb.utils.remove(self.gitdir, recurse=True)
|
||||
bb.process.run('chmod u+w -R "%s"' % ud.clonedir)
|
||||
bb.utils.remove(ud.clonedir, recurse=True)
|
||||
bb.utils.remove(ud.clonedir.replace('gitsource', 'gitsubmodule'), recurse=True)
|
||||
|
||||
@@ -1757,8 +1689,6 @@ class GitShallowTest(FetcherTest):
|
||||
smdir = os.path.join(self.tempdir, 'gitsubmodule')
|
||||
bb.utils.mkdirhier(smdir)
|
||||
self.git('init', cwd=smdir)
|
||||
self.git('config user.email "you@example.com"', cwd=smdir)
|
||||
self.git('config user.name "Your Name"', cwd=smdir)
|
||||
# Make this look like it was cloned from a remote...
|
||||
self.git('config --add remote.origin.url "%s"' % smdir, cwd=smdir)
|
||||
self.git('config --add remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"', cwd=smdir)
|
||||
@@ -1789,8 +1719,6 @@ class GitShallowTest(FetcherTest):
|
||||
smdir = os.path.join(self.tempdir, 'gitsubmodule')
|
||||
bb.utils.mkdirhier(smdir)
|
||||
self.git('init', cwd=smdir)
|
||||
self.git('config user.email "you@example.com"', cwd=smdir)
|
||||
self.git('config user.name "Your Name"', cwd=smdir)
|
||||
# Make this look like it was cloned from a remote...
|
||||
self.git('config --add remote.origin.url "%s"' % smdir, cwd=smdir)
|
||||
self.git('config --add remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"', cwd=smdir)
|
||||
@@ -1833,8 +1761,8 @@ class GitShallowTest(FetcherTest):
|
||||
self.git('annex init', cwd=self.srcdir)
|
||||
open(os.path.join(self.srcdir, 'c'), 'w').close()
|
||||
self.git('annex add c', cwd=self.srcdir)
|
||||
self.git('commit --author "Foo Bar <foo@bar>" -m annex-c -a', cwd=self.srcdir)
|
||||
bb.process.run('chmod u+w -R %s' % self.srcdir)
|
||||
self.git('commit -m annex-c -a', cwd=self.srcdir)
|
||||
bb.process.run('chmod u+w -R %s' % os.path.join(self.srcdir, '.git', 'annex'))
|
||||
|
||||
uri = 'gitannex://%s;protocol=file;subdir=${S}' % self.srcdir
|
||||
fetcher, ud = self.fetch_shallow(uri)
|
||||
@@ -2048,7 +1976,7 @@ class GitShallowTest(FetcherTest):
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_bitbake(self):
|
||||
self.git('remote add --mirror=fetch origin https://github.com/openembedded/bitbake', cwd=self.srcdir)
|
||||
self.git('remote add --mirror=fetch origin git://github.com/openembedded/bitbake', cwd=self.srcdir)
|
||||
self.git('config core.bare true', cwd=self.srcdir)
|
||||
self.git('fetch', cwd=self.srcdir)
|
||||
|
||||
@@ -2109,8 +2037,6 @@ class GitLfsTest(FetcherTest):
|
||||
|
||||
bb.utils.mkdirhier(self.srcdir)
|
||||
self.git('init', cwd=self.srcdir)
|
||||
self.git('config user.email "you@example.com"', cwd=self.srcdir)
|
||||
self.git('config user.name "Your Name"', cwd=self.srcdir)
|
||||
with open(os.path.join(self.srcdir, '.gitattributes'), 'wt') as attrs:
|
||||
attrs.write('*.mp3 filter=lfs -text')
|
||||
self.git(['add', '.gitattributes'], cwd=self.srcdir)
|
||||
@@ -2125,14 +2051,13 @@ class GitLfsTest(FetcherTest):
|
||||
cwd = self.gitdir
|
||||
return bb.process.run(cmd, cwd=cwd)[0]
|
||||
|
||||
def fetch(self, uri=None, download=True):
|
||||
def fetch(self, uri=None):
|
||||
uris = self.d.getVar('SRC_URI').split()
|
||||
uri = uris[0]
|
||||
d = self.d
|
||||
|
||||
fetcher = bb.fetch2.Fetch(uris, d)
|
||||
if download:
|
||||
fetcher.download()
|
||||
fetcher.download()
|
||||
ud = fetcher.ud[uri]
|
||||
return fetcher, ud
|
||||
|
||||
@@ -2142,21 +2067,16 @@ class GitLfsTest(FetcherTest):
|
||||
uri = 'git://%s;protocol=file;subdir=${S};lfs=1' % self.srcdir
|
||||
self.d.setVar('SRC_URI', uri)
|
||||
|
||||
# Careful: suppress initial attempt at downloading until
|
||||
# we know whether git-lfs is installed.
|
||||
fetcher, ud = self.fetch(uri=None, download=False)
|
||||
fetcher, ud = self.fetch()
|
||||
self.assertIsNotNone(ud.method._find_git_lfs)
|
||||
|
||||
# If git-lfs can be found, the unpack should be successful. Only
|
||||
# attempt this with the real live copy of git-lfs installed.
|
||||
if ud.method._find_git_lfs(self.d):
|
||||
fetcher.download()
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
# If git-lfs can be found, the unpack should be successful
|
||||
ud.method._find_git_lfs = lambda d: True
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
|
||||
# If git-lfs cannot be found, the unpack should throw an error
|
||||
with self.assertRaises(bb.fetch2.FetchError):
|
||||
fetcher.download()
|
||||
ud.method._find_git_lfs = lambda d: False
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
@@ -2167,16 +2087,10 @@ class GitLfsTest(FetcherTest):
|
||||
uri = 'git://%s;protocol=file;subdir=${S};lfs=0' % self.srcdir
|
||||
self.d.setVar('SRC_URI', uri)
|
||||
|
||||
# In contrast to test_lfs_enabled(), allow the implicit download
|
||||
# done by self.fetch() to occur here. The point of this test case
|
||||
# is to verify that the fetcher can survive even if the source
|
||||
# repository has Git LFS usage configured.
|
||||
fetcher, ud = self.fetch()
|
||||
self.assertIsNotNone(ud.method._find_git_lfs)
|
||||
|
||||
# If git-lfs can be found, the unpack should be successful. A
|
||||
# live copy of git-lfs is not required for this case, so
|
||||
# unconditionally forge its presence.
|
||||
# If git-lfs can be found, the unpack should be successful
|
||||
ud.method._find_git_lfs = lambda d: True
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
|
||||
@@ -1 +1 @@
|
||||
do_install[mcdepends] = "mc:mc-1:mc_2:a1:do_build"
|
||||
do_install[mcdepends] = "mc:mc1:mc2:a1:do_build"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
python () {
|
||||
if d.getVar("BB_CURRENT_MC") == "mc-1":
|
||||
bb.fatal("Multiconfig is mc-1")
|
||||
if d.getVar("BB_CURRENT_MC") == "mc1":
|
||||
bb.fatal("Multiconfig is mc1")
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
python () {
|
||||
if d.getVar("BB_CURRENT_MC") == "mc_2":
|
||||
bb.fatal("Multiconfig is mc_2")
|
||||
if d.getVar("BB_CURRENT_MC") == "mc2":
|
||||
bb.fatal("Multiconfig is mc2")
|
||||
}
|
||||
|
||||
@@ -216,66 +216,66 @@ class RunQueueTests(unittest.TestCase):
|
||||
def test_multiconfig_setscene_optimise(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
extraenv = {
|
||||
"BBMULTICONFIG" : "mc-1 mc_2",
|
||||
"BBMULTICONFIG" : "mc1 mc2",
|
||||
"BB_SIGNATURE_HANDLER" : "basic"
|
||||
}
|
||||
cmd = ["bitbake", "b1", "mc:mc-1:b1", "mc:mc_2:b1"]
|
||||
cmd = ["bitbake", "b1", "mc:mc1:b1", "mc:mc2:b1"]
|
||||
setscenetasks = ['package_write_ipk_setscene', 'package_write_rpm_setscene', 'packagedata_setscene',
|
||||
'populate_sysroot_setscene', 'package_qa_setscene']
|
||||
sstatevalid = ""
|
||||
tasks = self.run_bitbakecmd(cmd, tempdir, sstatevalid, extraenv=extraenv)
|
||||
expected = ['a1:' + x for x in self.alltasks] + ['b1:' + x for x in self.alltasks] + \
|
||||
['mc-1:b1:' + x for x in setscenetasks] + ['mc-1:a1:' + x for x in setscenetasks] + \
|
||||
['mc_2:b1:' + x for x in setscenetasks] + ['mc_2:a1:' + x for x in setscenetasks] + \
|
||||
['mc-1:b1:build', 'mc_2:b1:build']
|
||||
for x in ['mc-1:a1:package_qa_setscene', 'mc_2:a1:package_qa_setscene', 'a1:build', 'a1:package_qa']:
|
||||
['mc1:b1:' + x for x in setscenetasks] + ['mc1:a1:' + x for x in setscenetasks] + \
|
||||
['mc2:b1:' + x for x in setscenetasks] + ['mc2:a1:' + x for x in setscenetasks] + \
|
||||
['mc1:b1:build', 'mc2:b1:build']
|
||||
for x in ['mc1:a1:package_qa_setscene', 'mc2:a1:package_qa_setscene', 'a1:build', 'a1:package_qa']:
|
||||
expected.remove(x)
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
def test_multiconfig_bbmask(self):
|
||||
# This test validates that multiconfigs can independently mask off
|
||||
# recipes they do not want with BBMASK. It works by having recipes
|
||||
# that will fail to parse for mc-1 and mc_2, then making each multiconfig
|
||||
# that will fail to parse for mc1 and mc2, then making each multiconfig
|
||||
# build the one that does parse. This ensures that the recipes are in
|
||||
# each multiconfigs BBFILES, but each is masking only the one that
|
||||
# doesn't parse
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
extraenv = {
|
||||
"BBMULTICONFIG" : "mc-1 mc_2",
|
||||
"BBMULTICONFIG" : "mc1 mc2",
|
||||
"BB_SIGNATURE_HANDLER" : "basic",
|
||||
"EXTRA_BBFILES": "${COREBASE}/recipes/fails-mc/*.bb",
|
||||
}
|
||||
cmd = ["bitbake", "mc:mc-1:fails-mc2", "mc:mc_2:fails-mc1"]
|
||||
cmd = ["bitbake", "mc:mc1:fails-mc2", "mc:mc2:fails-mc1"]
|
||||
self.run_bitbakecmd(cmd, tempdir, "", extraenv=extraenv)
|
||||
|
||||
def test_multiconfig_mcdepends(self):
|
||||
with tempfile.TemporaryDirectory(prefix="runqueuetest") as tempdir:
|
||||
extraenv = {
|
||||
"BBMULTICONFIG" : "mc-1 mc_2",
|
||||
"BBMULTICONFIG" : "mc1 mc2",
|
||||
"BB_SIGNATURE_HANDLER" : "TestMulticonfigDepends",
|
||||
"EXTRA_BBFILES": "${COREBASE}/recipes/fails-mc/*.bb",
|
||||
}
|
||||
tasks = self.run_bitbakecmd(["bitbake", "mc:mc-1:f1"], tempdir, "", extraenv=extraenv, cleanup=True)
|
||||
expected = ["mc-1:f1:%s" % t for t in self.alltasks] + \
|
||||
["mc_2:a1:%s" % t for t in self.alltasks]
|
||||
tasks = self.run_bitbakecmd(["bitbake", "mc:mc1:f1"], tempdir, "", extraenv=extraenv, cleanup=True)
|
||||
expected = ["mc1:f1:%s" % t for t in self.alltasks] + \
|
||||
["mc2:a1:%s" % t for t in self.alltasks]
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
# A rebuild does nothing
|
||||
tasks = self.run_bitbakecmd(["bitbake", "mc:mc-1:f1"], tempdir, "", extraenv=extraenv, cleanup=True)
|
||||
tasks = self.run_bitbakecmd(["bitbake", "mc:mc1:f1"], tempdir, "", extraenv=extraenv, cleanup=True)
|
||||
self.assertEqual(set(tasks), set())
|
||||
|
||||
# Test that a signature change in the dependent task causes
|
||||
# mcdepends to rebuild
|
||||
tasks = self.run_bitbakecmd(["bitbake", "mc:mc_2:a1", "-c", "compile", "-f"], tempdir, "", extraenv=extraenv, cleanup=True)
|
||||
expected = ["mc_2:a1:compile"]
|
||||
tasks = self.run_bitbakecmd(["bitbake", "mc:mc2:a1", "-c", "compile", "-f"], tempdir, "", extraenv=extraenv, cleanup=True)
|
||||
expected = ["mc2:a1:compile"]
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
rerun_tasks = self.alltasks[:]
|
||||
for x in ("fetch", "unpack", "patch", "prepare_recipe_sysroot", "configure", "compile"):
|
||||
rerun_tasks.remove(x)
|
||||
tasks = self.run_bitbakecmd(["bitbake", "mc:mc-1:f1"], tempdir, "", extraenv=extraenv, cleanup=True)
|
||||
expected = ["mc-1:f1:%s" % t for t in rerun_tasks] + \
|
||||
["mc_2:a1:%s" % t for t in rerun_tasks]
|
||||
tasks = self.run_bitbakecmd(["bitbake", "mc:mc1:f1"], tempdir, "", extraenv=extraenv, cleanup=True)
|
||||
expected = ["mc1:f1:%s" % t for t in rerun_tasks] + \
|
||||
["mc2:a1:%s" % t for t in rerun_tasks]
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
@unittest.skipIf(sys.version_info < (3, 5, 0), 'Python 3.5 or later required')
|
||||
@@ -361,7 +361,7 @@ class RunQueueTests(unittest.TestCase):
|
||||
|
||||
def shutdown(self, tempdir):
|
||||
# Wait for the hashserve socket to disappear else we'll see races with the tempdir cleanup
|
||||
while (os.path.exists(tempdir + "/hashserve.sock") or os.path.exists(tempdir + "cache/hashserv.db-wal")):
|
||||
while os.path.exists(tempdir + "/hashserve.sock"):
|
||||
time.sleep(0.5)
|
||||
|
||||
|
||||
|
||||
@@ -440,7 +440,7 @@ class Tinfoil:
|
||||
to initialise Tinfoil and use it with config_only=True first and
|
||||
then conditionally call this function to parse recipes later.
|
||||
"""
|
||||
config_params = TinfoilConfigParameters(config_only=False, quiet=self.quiet)
|
||||
config_params = TinfoilConfigParameters(config_only=False)
|
||||
self.run_actions(config_params)
|
||||
self.recipes_parsed = True
|
||||
|
||||
|
||||
@@ -148,14 +148,14 @@ class ORMWrapper(object):
|
||||
buildrequest = None
|
||||
if brbe is not None:
|
||||
# Toaster-triggered build
|
||||
logger.debug("buildinfohelper: brbe is %s" % brbe)
|
||||
logger.debug(1, "buildinfohelper: brbe is %s" % brbe)
|
||||
br, _ = brbe.split(":")
|
||||
buildrequest = BuildRequest.objects.get(pk=br)
|
||||
prj = buildrequest.project
|
||||
else:
|
||||
# CLI build
|
||||
prj = Project.objects.get_or_create_default_project()
|
||||
logger.debug("buildinfohelper: project is not specified, defaulting to %s" % prj)
|
||||
logger.debug(1, "buildinfohelper: project is not specified, defaulting to %s" % prj)
|
||||
|
||||
if buildrequest is not None:
|
||||
# reuse existing Build object
|
||||
@@ -171,7 +171,7 @@ class ORMWrapper(object):
|
||||
completed_on=now,
|
||||
build_name='')
|
||||
|
||||
logger.debug("buildinfohelper: build is created %s" % build)
|
||||
logger.debug(1, "buildinfohelper: build is created %s" % build)
|
||||
|
||||
if buildrequest is not None:
|
||||
buildrequest.build = build
|
||||
@@ -906,7 +906,7 @@ class BuildInfoHelper(object):
|
||||
|
||||
self.project = None
|
||||
|
||||
logger.debug("buildinfohelper: Build info helper inited %s" % vars(self))
|
||||
logger.debug(1, "buildinfohelper: Build info helper inited %s" % vars(self))
|
||||
|
||||
|
||||
###################
|
||||
@@ -1620,7 +1620,7 @@ class BuildInfoHelper(object):
|
||||
# if we have a backlog of events, do our best to save them here
|
||||
if len(self.internal_state['backlog']):
|
||||
tempevent = self.internal_state['backlog'].pop()
|
||||
logger.debug("buildinfohelper: Saving stored event %s "
|
||||
logger.debug(1, "buildinfohelper: Saving stored event %s "
|
||||
% tempevent)
|
||||
self.store_log_event(tempevent,cli_backlog)
|
||||
else:
|
||||
|
||||
@@ -745,7 +745,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.runqueue.sceneQueueTaskStarted):
|
||||
logger.info("Running setscene task %d of %d (%s)" % (event.stats.setscene_covered + event.stats.setscene_active + event.stats.setscene_notcovered + 1, event.stats.setscene_total, event.taskstring))
|
||||
logger.info("Running setscene task %d of %d (%s)" % (event.stats.completed + event.stats.active + event.stats.failed + 1, event.stats.total, event.taskstring))
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.runqueue.runQueueTaskStarted):
|
||||
|
||||
@@ -49,8 +49,8 @@ class BBUIHelper:
|
||||
tid = event._fn + ":" + event._task
|
||||
removetid(event.pid, tid)
|
||||
self.failed_tasks.append( { 'title' : "%s %s" % (event._package, event._task)})
|
||||
elif isinstance(event, bb.runqueue.runQueueTaskStarted) or isinstance(event, bb.runqueue.sceneQueueTaskStarted):
|
||||
self.tasknumber_current = event.stats.completed + event.stats.active + event.stats.failed + event.stats.setscene_active + 1
|
||||
elif isinstance(event, bb.runqueue.runQueueTaskStarted):
|
||||
self.tasknumber_current = event.stats.completed + event.stats.active + event.stats.failed + 1
|
||||
self.tasknumber_total = event.stats.total
|
||||
self.needUpdate = True
|
||||
elif isinstance(event, bb.build.TaskProgress):
|
||||
|
||||
@@ -16,8 +16,7 @@ import bb.msg
|
||||
import multiprocessing
|
||||
import fcntl
|
||||
import importlib
|
||||
import importlib.machinery
|
||||
import importlib.util
|
||||
from importlib import machinery
|
||||
import itertools
|
||||
import subprocess
|
||||
import glob
|
||||
@@ -130,7 +129,6 @@ def vercmp(ta, tb):
|
||||
return r
|
||||
|
||||
def vercmp_string(a, b):
|
||||
""" Split version strings and compare them """
|
||||
ta = split_version(a)
|
||||
tb = split_version(b)
|
||||
return vercmp(ta, tb)
|
||||
@@ -249,12 +247,6 @@ def explode_dep_versions2(s, *, sort=True):
|
||||
return r
|
||||
|
||||
def explode_dep_versions(s):
|
||||
"""
|
||||
Take an RDEPENDS style string of format:
|
||||
"DEPEND1 (optional version) DEPEND2 (optional version) ..."
|
||||
skip null value and items appeared in dependancy string multiple times
|
||||
and return a dictionary of dependencies and versions.
|
||||
"""
|
||||
r = explode_dep_versions2(s)
|
||||
for d in r:
|
||||
if not r[d]:
|
||||
@@ -452,10 +444,6 @@ def lockfile(name, shared=False, retry=True, block=False):
|
||||
consider the possibility of sending a signal to the process to break
|
||||
out - at which point you want block=True rather than retry=True.
|
||||
"""
|
||||
if len(name) > 255:
|
||||
root, ext = os.path.splitext(name)
|
||||
name = root[:255 - len(ext)] + ext
|
||||
|
||||
dirname = os.path.dirname(name)
|
||||
mkdirhier(dirname)
|
||||
|
||||
@@ -492,7 +480,7 @@ def lockfile(name, shared=False, retry=True, block=False):
|
||||
return lf
|
||||
lf.close()
|
||||
except OSError as e:
|
||||
if e.errno == errno.EACCES or e.errno == errno.ENAMETOOLONG:
|
||||
if e.errno == errno.EACCES:
|
||||
logger.error("Unable to acquire lock '%s', %s",
|
||||
e.strerror, name)
|
||||
sys.exit(1)
|
||||
@@ -614,7 +602,7 @@ def filter_environment(good_vars):
|
||||
os.environ["LC_ALL"] = "en_US.UTF-8"
|
||||
|
||||
if removed_vars:
|
||||
logger.debug("Removed the following variables from the environment: %s", ", ".join(removed_vars.keys()))
|
||||
logger.debug(1, "Removed the following variables from the environment: %s", ", ".join(removed_vars.keys()))
|
||||
|
||||
return removed_vars
|
||||
|
||||
@@ -704,7 +692,7 @@ def remove(path, recurse=False, ionice=False):
|
||||
raise
|
||||
|
||||
def prunedir(topdir, ionice=False):
|
||||
""" Delete everything reachable from the directory named in 'topdir'. """
|
||||
# Delete everything reachable from the directory named in 'topdir'.
|
||||
# CAUTION: This is dangerous!
|
||||
if _check_unsafe_delete_path(topdir):
|
||||
raise Exception('bb.utils.prunedir: called with dangerous path "%s", refusing to delete!' % topdir)
|
||||
@@ -715,10 +703,8 @@ def prunedir(topdir, ionice=False):
|
||||
# but thats possibly insane and suffixes is probably going to be small
|
||||
#
|
||||
def prune_suffix(var, suffixes, d):
|
||||
"""
|
||||
See if var ends with any of the suffixes listed and
|
||||
remove it if found
|
||||
"""
|
||||
# See if var ends with any of the suffixes listed and
|
||||
# remove it if found
|
||||
for suffix in suffixes:
|
||||
if suffix and var.endswith(suffix):
|
||||
return var[:-len(suffix)]
|
||||
@@ -970,10 +956,6 @@ def umask(new_mask):
|
||||
os.umask(current_mask)
|
||||
|
||||
def to_boolean(string, default=None):
|
||||
"""
|
||||
Check input string and return boolean value True/False/None
|
||||
depending upon the checks
|
||||
"""
|
||||
if not string:
|
||||
return default
|
||||
|
||||
@@ -1017,23 +999,6 @@ def contains(variable, checkvalues, truevalue, falsevalue, d):
|
||||
return falsevalue
|
||||
|
||||
def contains_any(variable, checkvalues, truevalue, falsevalue, d):
|
||||
"""Check if a variable contains any values specified.
|
||||
|
||||
Arguments:
|
||||
|
||||
variable -- the variable name. This will be fetched and expanded (using
|
||||
d.getVar(variable)) and then split into a set().
|
||||
|
||||
checkvalues -- if this is a string it is split on whitespace into a set(),
|
||||
otherwise coerced directly into a set().
|
||||
|
||||
truevalue -- the value to return if checkvalues is a subset of variable.
|
||||
|
||||
falsevalue -- the value to return if variable is empty or if checkvalues is
|
||||
not a subset of variable.
|
||||
|
||||
d -- the data store.
|
||||
"""
|
||||
val = d.getVar(variable)
|
||||
if not val:
|
||||
return falsevalue
|
||||
@@ -1595,8 +1560,8 @@ def set_process_name(name):
|
||||
except:
|
||||
pass
|
||||
|
||||
# export common proxies variables from datastore to environment
|
||||
def export_proxies(d):
|
||||
""" export common proxies variables from datastore to environment """
|
||||
import os
|
||||
|
||||
variables = ['http_proxy', 'HTTP_PROXY', 'https_proxy', 'HTTPS_PROXY',
|
||||
@@ -1618,14 +1583,12 @@ def export_proxies(d):
|
||||
|
||||
def load_plugins(logger, plugins, pluginpath):
|
||||
def load_plugin(name):
|
||||
logger.debug('Loading plugin %s' % name)
|
||||
logger.debug(1, 'Loading plugin %s' % name)
|
||||
spec = importlib.machinery.PathFinder.find_spec(name, path=[pluginpath] )
|
||||
if spec:
|
||||
mod = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(mod)
|
||||
return mod
|
||||
return spec.loader.load_module()
|
||||
|
||||
logger.debug('Loading plugins from %s...' % pluginpath)
|
||||
logger.debug(1, 'Loading plugins from %s...' % pluginpath)
|
||||
|
||||
expanded = (glob.glob(os.path.join(pluginpath, '*' + ext))
|
||||
for ext in python_extensions)
|
||||
|
||||
@@ -50,10 +50,10 @@ class ActionPlugin(LayerPlugin):
|
||||
if not (args.force or notadded):
|
||||
try:
|
||||
self.tinfoil.run_command('parseConfiguration')
|
||||
except (bb.tinfoil.TinfoilUIException, bb.BBHandledException):
|
||||
except bb.tinfoil.TinfoilUIException:
|
||||
# Restore the back up copy of bblayers.conf
|
||||
shutil.copy2(backup, bblayers_conf)
|
||||
bb.fatal("Parse failure with the specified layer added, aborting.")
|
||||
bb.fatal("Parse failure with the specified layer added")
|
||||
else:
|
||||
for item in notadded:
|
||||
sys.stderr.write("Specified layer %s is already in BBLAYERS\n" % item)
|
||||
|
||||
@@ -79,7 +79,7 @@ class LayerIndexPlugin(ActionPlugin):
|
||||
branches = [args.branch]
|
||||
else:
|
||||
branches = (self.tinfoil.config_data.getVar('LAYERSERIES_CORENAMES') or 'master').split()
|
||||
logger.debug('Trying branches: %s' % branches)
|
||||
logger.debug(1, 'Trying branches: %s' % branches)
|
||||
|
||||
ignore_layers = []
|
||||
if args.ignore:
|
||||
|
||||
@@ -128,7 +128,7 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
|
||||
sys.exit(1)
|
||||
|
||||
pkg_pn = self.tinfoil.cooker.recipecaches[mc].pkg_pn
|
||||
(latest_versions, preferred_versions, required_versions) = self.tinfoil.find_providers(mc)
|
||||
(latest_versions, preferred_versions) = self.tinfoil.find_providers(mc)
|
||||
allproviders = self.tinfoil.get_all_providers(mc)
|
||||
|
||||
# Ensure we list skipped recipes
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import asyncio
|
||||
from contextlib import closing
|
||||
import re
|
||||
import sqlite3
|
||||
@@ -22,24 +21,6 @@ ADDR_TYPE_TCP = 1
|
||||
# is necessary
|
||||
DEFAULT_MAX_CHUNK = 32 * 1024
|
||||
|
||||
TABLE_DEFINITION = (
|
||||
("method", "TEXT NOT NULL"),
|
||||
("outhash", "TEXT NOT NULL"),
|
||||
("taskhash", "TEXT NOT NULL"),
|
||||
("unihash", "TEXT NOT NULL"),
|
||||
("created", "DATETIME"),
|
||||
|
||||
# Optional fields
|
||||
("owner", "TEXT"),
|
||||
("PN", "TEXT"),
|
||||
("PV", "TEXT"),
|
||||
("PR", "TEXT"),
|
||||
("task", "TEXT"),
|
||||
("outhash_siginfo", "TEXT"),
|
||||
)
|
||||
|
||||
TABLE_COLUMNS = tuple(name for name, _ in TABLE_DEFINITION)
|
||||
|
||||
def setup_database(database, sync=True):
|
||||
db = sqlite3.connect(database)
|
||||
db.row_factory = sqlite3.Row
|
||||
@@ -48,10 +29,23 @@ def setup_database(database, sync=True):
|
||||
cursor.execute('''
|
||||
CREATE TABLE IF NOT EXISTS tasks_v2 (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
%s
|
||||
method TEXT NOT NULL,
|
||||
outhash TEXT NOT NULL,
|
||||
taskhash TEXT NOT NULL,
|
||||
unihash TEXT NOT NULL,
|
||||
created DATETIME,
|
||||
|
||||
-- Optional fields
|
||||
owner TEXT,
|
||||
PN TEXT,
|
||||
PV TEXT,
|
||||
PR TEXT,
|
||||
task TEXT,
|
||||
outhash_siginfo TEXT,
|
||||
|
||||
UNIQUE(method, outhash, taskhash)
|
||||
)
|
||||
''' % " ".join("%s %s," % (name, typ) for name, typ in TABLE_DEFINITION))
|
||||
''')
|
||||
cursor.execute('PRAGMA journal_mode = WAL')
|
||||
cursor.execute('PRAGMA synchronous = %s' % ('NORMAL' if sync else 'OFF'))
|
||||
|
||||
@@ -94,10 +88,10 @@ def chunkify(msg, max_chunk):
|
||||
yield "\n"
|
||||
|
||||
|
||||
def create_server(addr, dbname, *, sync=True, upstream=None, read_only=False):
|
||||
def create_server(addr, dbname, *, sync=True):
|
||||
from . import server
|
||||
db = setup_database(dbname, sync=sync)
|
||||
s = server.Server(db, upstream=upstream, read_only=read_only)
|
||||
s = server.Server(db)
|
||||
|
||||
(typ, a) = parse_address(addr)
|
||||
if typ == ADDR_TYPE_UNIX:
|
||||
@@ -119,15 +113,3 @@ def create_client(addr):
|
||||
c.connect_tcp(*a)
|
||||
|
||||
return c
|
||||
|
||||
async def create_async_client(addr):
|
||||
from . import client
|
||||
c = client.AsyncClient()
|
||||
|
||||
(typ, a) = parse_address(addr)
|
||||
if typ == ADDR_TYPE_UNIX:
|
||||
await c.connect_unix(*a)
|
||||
else:
|
||||
await c.connect_tcp(*a)
|
||||
|
||||
return c
|
||||
|
||||
@@ -3,231 +3,189 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import socket
|
||||
import os
|
||||
from . import chunkify, DEFAULT_MAX_CHUNK, create_async_client
|
||||
from . import chunkify, DEFAULT_MAX_CHUNK
|
||||
|
||||
|
||||
logger = logging.getLogger("hashserv.client")
|
||||
logger = logging.getLogger('hashserv.client')
|
||||
|
||||
|
||||
class HashConnectionError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AsyncClient(object):
|
||||
class Client(object):
|
||||
MODE_NORMAL = 0
|
||||
MODE_GET_STREAM = 1
|
||||
|
||||
def __init__(self):
|
||||
self._socket = None
|
||||
self.reader = None
|
||||
self.writer = None
|
||||
self.mode = self.MODE_NORMAL
|
||||
self.max_chunk = DEFAULT_MAX_CHUNK
|
||||
|
||||
async def connect_tcp(self, address, port):
|
||||
async def connect_sock():
|
||||
return await asyncio.open_connection(address, port)
|
||||
def connect_tcp(self, address, port):
|
||||
def connect_sock():
|
||||
s = socket.create_connection((address, port))
|
||||
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
|
||||
s.setsockopt(socket.SOL_TCP, socket.TCP_QUICKACK, 1)
|
||||
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
|
||||
return s
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
async def connect_unix(self, path):
|
||||
async def connect_sock():
|
||||
return await asyncio.open_unix_connection(path)
|
||||
def connect_unix(self, path):
|
||||
def connect_sock():
|
||||
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
# AF_UNIX has path length issues so chdir here to workaround
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(os.path.dirname(path))
|
||||
s.connect(os.path.basename(path))
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
return s
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
async def connect(self):
|
||||
if self.reader is None or self.writer is None:
|
||||
(self.reader, self.writer) = await self._connect_sock()
|
||||
def connect(self):
|
||||
if self._socket is None:
|
||||
self._socket = self._connect_sock()
|
||||
|
||||
self.writer.write("OEHASHEQUIV 1.1\n\n".encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
self.reader = self._socket.makefile('r', encoding='utf-8')
|
||||
self.writer = self._socket.makefile('w', encoding='utf-8')
|
||||
|
||||
self.writer.write('OEHASHEQUIV 1.1\n\n')
|
||||
self.writer.flush()
|
||||
|
||||
# Restore mode if the socket is being re-created
|
||||
cur_mode = self.mode
|
||||
self.mode = self.MODE_NORMAL
|
||||
await self._set_mode(cur_mode)
|
||||
self._set_mode(cur_mode)
|
||||
|
||||
async def close(self):
|
||||
self.reader = None
|
||||
return self._socket
|
||||
|
||||
if self.writer is not None:
|
||||
self.writer.close()
|
||||
def close(self):
|
||||
if self._socket is not None:
|
||||
self._socket.close()
|
||||
self._socket = None
|
||||
self.reader = None
|
||||
self.writer = None
|
||||
|
||||
async def _send_wrapper(self, proc):
|
||||
def _send_wrapper(self, proc):
|
||||
count = 0
|
||||
while True:
|
||||
try:
|
||||
await self.connect()
|
||||
return await proc()
|
||||
except (
|
||||
OSError,
|
||||
HashConnectionError,
|
||||
json.JSONDecodeError,
|
||||
UnicodeDecodeError,
|
||||
) as e:
|
||||
logger.warning("Error talking to server: %s" % e)
|
||||
self.connect()
|
||||
return proc()
|
||||
except (OSError, HashConnectionError, json.JSONDecodeError, UnicodeDecodeError) as e:
|
||||
logger.warning('Error talking to server: %s' % e)
|
||||
if count >= 3:
|
||||
if not isinstance(e, HashConnectionError):
|
||||
raise HashConnectionError(str(e))
|
||||
raise e
|
||||
await self.close()
|
||||
self.close()
|
||||
count += 1
|
||||
|
||||
async def send_message(self, msg):
|
||||
async def get_line():
|
||||
line = await self.reader.readline()
|
||||
def send_message(self, msg):
|
||||
def get_line():
|
||||
line = self.reader.readline()
|
||||
if not line:
|
||||
raise HashConnectionError("Connection closed")
|
||||
raise HashConnectionError('Connection closed')
|
||||
|
||||
line = line.decode("utf-8")
|
||||
|
||||
if not line.endswith("\n"):
|
||||
raise HashConnectionError("Bad message %r" % message)
|
||||
if not line.endswith('\n'):
|
||||
raise HashConnectionError('Bad message %r' % message)
|
||||
|
||||
return line
|
||||
|
||||
async def proc():
|
||||
def proc():
|
||||
for c in chunkify(json.dumps(msg), self.max_chunk):
|
||||
self.writer.write(c.encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
self.writer.write(c)
|
||||
self.writer.flush()
|
||||
|
||||
l = await get_line()
|
||||
l = get_line()
|
||||
|
||||
m = json.loads(l)
|
||||
if m and "chunk-stream" in m:
|
||||
if 'chunk-stream' in m:
|
||||
lines = []
|
||||
while True:
|
||||
l = (await get_line()).rstrip("\n")
|
||||
l = get_line().rstrip('\n')
|
||||
if not l:
|
||||
break
|
||||
lines.append(l)
|
||||
|
||||
m = json.loads("".join(lines))
|
||||
m = json.loads(''.join(lines))
|
||||
|
||||
return m
|
||||
|
||||
return await self._send_wrapper(proc)
|
||||
return self._send_wrapper(proc)
|
||||
|
||||
async def send_stream(self, msg):
|
||||
async def proc():
|
||||
self.writer.write(("%s\n" % msg).encode("utf-8"))
|
||||
await self.writer.drain()
|
||||
l = await self.reader.readline()
|
||||
def send_stream(self, msg):
|
||||
def proc():
|
||||
self.writer.write("%s\n" % msg)
|
||||
self.writer.flush()
|
||||
l = self.reader.readline()
|
||||
if not l:
|
||||
raise HashConnectionError("Connection closed")
|
||||
return l.decode("utf-8").rstrip()
|
||||
raise HashConnectionError('Connection closed')
|
||||
return l.rstrip()
|
||||
|
||||
return await self._send_wrapper(proc)
|
||||
return self._send_wrapper(proc)
|
||||
|
||||
async def _set_mode(self, new_mode):
|
||||
def _set_mode(self, new_mode):
|
||||
if new_mode == self.MODE_NORMAL and self.mode == self.MODE_GET_STREAM:
|
||||
r = await self.send_stream("END")
|
||||
if r != "ok":
|
||||
raise HashConnectionError("Bad response from server %r" % r)
|
||||
r = self.send_stream('END')
|
||||
if r != 'ok':
|
||||
raise HashConnectionError('Bad response from server %r' % r)
|
||||
elif new_mode == self.MODE_GET_STREAM and self.mode == self.MODE_NORMAL:
|
||||
r = await self.send_message({"get-stream": None})
|
||||
if r != "ok":
|
||||
raise HashConnectionError("Bad response from server %r" % r)
|
||||
r = self.send_message({'get-stream': None})
|
||||
if r != 'ok':
|
||||
raise HashConnectionError('Bad response from server %r' % r)
|
||||
elif new_mode != self.mode:
|
||||
raise Exception(
|
||||
"Undefined mode transition %r -> %r" % (self.mode, new_mode)
|
||||
)
|
||||
raise Exception('Undefined mode transition %r -> %r' % (self.mode, new_mode))
|
||||
|
||||
self.mode = new_mode
|
||||
|
||||
async def get_unihash(self, method, taskhash):
|
||||
await self._set_mode(self.MODE_GET_STREAM)
|
||||
r = await self.send_stream("%s %s" % (method, taskhash))
|
||||
def get_unihash(self, method, taskhash):
|
||||
self._set_mode(self.MODE_GET_STREAM)
|
||||
r = self.send_stream('%s %s' % (method, taskhash))
|
||||
if not r:
|
||||
return None
|
||||
return r
|
||||
|
||||
async def report_unihash(self, taskhash, method, outhash, unihash, extra={}):
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
def report_unihash(self, taskhash, method, outhash, unihash, extra={}):
|
||||
self._set_mode(self.MODE_NORMAL)
|
||||
m = extra.copy()
|
||||
m["taskhash"] = taskhash
|
||||
m["method"] = method
|
||||
m["outhash"] = outhash
|
||||
m["unihash"] = unihash
|
||||
return await self.send_message({"report": m})
|
||||
m['taskhash'] = taskhash
|
||||
m['method'] = method
|
||||
m['outhash'] = outhash
|
||||
m['unihash'] = unihash
|
||||
return self.send_message({'report': m})
|
||||
|
||||
async def report_unihash_equiv(self, taskhash, method, unihash, extra={}):
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
def report_unihash_equiv(self, taskhash, method, unihash, extra={}):
|
||||
self._set_mode(self.MODE_NORMAL)
|
||||
m = extra.copy()
|
||||
m["taskhash"] = taskhash
|
||||
m["method"] = method
|
||||
m["unihash"] = unihash
|
||||
return await self.send_message({"report-equiv": m})
|
||||
m['taskhash'] = taskhash
|
||||
m['method'] = method
|
||||
m['unihash'] = unihash
|
||||
return self.send_message({'report-equiv': m})
|
||||
|
||||
async def get_taskhash(self, method, taskhash, all_properties=False):
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return await self.send_message(
|
||||
{"get": {"taskhash": taskhash, "method": method, "all": all_properties}}
|
||||
)
|
||||
def get_taskhash(self, method, taskhash, all_properties=False):
|
||||
self._set_mode(self.MODE_NORMAL)
|
||||
return self.send_message({'get': {
|
||||
'taskhash': taskhash,
|
||||
'method': method,
|
||||
'all': all_properties
|
||||
}})
|
||||
|
||||
async def get_outhash(self, method, outhash, taskhash):
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return await self.send_message(
|
||||
{"get-outhash": {"outhash": outhash, "taskhash": taskhash, "method": method}}
|
||||
)
|
||||
def get_stats(self):
|
||||
self._set_mode(self.MODE_NORMAL)
|
||||
return self.send_message({'get-stats': None})
|
||||
|
||||
async def get_stats(self):
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return await self.send_message({"get-stats": None})
|
||||
|
||||
async def reset_stats(self):
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return await self.send_message({"reset-stats": None})
|
||||
|
||||
async def backfill_wait(self):
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return (await self.send_message({"backfill-wait": None}))["tasks"]
|
||||
|
||||
|
||||
class Client(object):
|
||||
def __init__(self):
|
||||
self.client = AsyncClient()
|
||||
self.loop = asyncio.new_event_loop()
|
||||
|
||||
for call in (
|
||||
"connect_tcp",
|
||||
"close",
|
||||
"get_unihash",
|
||||
"report_unihash",
|
||||
"report_unihash_equiv",
|
||||
"get_taskhash",
|
||||
"get_stats",
|
||||
"reset_stats",
|
||||
"backfill_wait",
|
||||
):
|
||||
downcall = getattr(self.client, call)
|
||||
setattr(self, call, self._get_downcall_wrapper(downcall))
|
||||
|
||||
def _get_downcall_wrapper(self, downcall):
|
||||
def wrapper(*args, **kwargs):
|
||||
return self.loop.run_until_complete(downcall(*args, **kwargs))
|
||||
|
||||
return wrapper
|
||||
|
||||
def connect_unix(self, path):
|
||||
# AF_UNIX has path length issues so chdir here to workaround
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(os.path.dirname(path))
|
||||
self.loop.run_until_complete(self.client.connect_unix(os.path.basename(path)))
|
||||
self.loop.run_until_complete(self.client.connect())
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
@property
|
||||
def max_chunk(self):
|
||||
return self.client.max_chunk
|
||||
|
||||
@max_chunk.setter
|
||||
def max_chunk(self, value):
|
||||
self.client.max_chunk = value
|
||||
def reset_stats(self):
|
||||
self._set_mode(self.MODE_NORMAL)
|
||||
return self.send_message({'reset-stats': None})
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
from contextlib import closing, contextmanager
|
||||
from contextlib import closing
|
||||
from datetime import datetime
|
||||
import asyncio
|
||||
import json
|
||||
@@ -12,9 +12,8 @@ import math
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
from . import chunkify, DEFAULT_MAX_CHUNK, create_async_client, TABLE_COLUMNS
|
||||
from . import chunkify, DEFAULT_MAX_CHUNK
|
||||
|
||||
logger = logging.getLogger('hashserv.server')
|
||||
|
||||
@@ -112,95 +111,29 @@ class Stats(object):
|
||||
class ClientError(Exception):
|
||||
pass
|
||||
|
||||
class ServerError(Exception):
|
||||
pass
|
||||
|
||||
def insert_task(cursor, data, ignore=False):
|
||||
keys = sorted(data.keys())
|
||||
query = '''INSERT%s INTO tasks_v2 (%s) VALUES (%s)''' % (
|
||||
" OR IGNORE" if ignore else "",
|
||||
', '.join(keys),
|
||||
', '.join(':' + k for k in keys))
|
||||
cursor.execute(query, data)
|
||||
|
||||
async def copy_from_upstream(client, db, method, taskhash):
|
||||
d = await client.get_taskhash(method, taskhash, True)
|
||||
if d is not None:
|
||||
# Filter out unknown columns
|
||||
d = {k: v for k, v in d.items() if k in TABLE_COLUMNS}
|
||||
keys = sorted(d.keys())
|
||||
|
||||
with closing(db.cursor()) as cursor:
|
||||
insert_task(cursor, d)
|
||||
db.commit()
|
||||
|
||||
return d
|
||||
|
||||
async def copy_outhash_from_upstream(client, db, method, outhash, taskhash):
|
||||
d = await client.get_outhash(method, outhash, taskhash)
|
||||
if d is not None:
|
||||
# Filter out unknown columns
|
||||
d = {k: v for k, v in d.items() if k in TABLE_COLUMNS}
|
||||
keys = sorted(d.keys())
|
||||
|
||||
with closing(db.cursor()) as cursor:
|
||||
insert_task(cursor, d)
|
||||
db.commit()
|
||||
|
||||
return d
|
||||
|
||||
class ServerClient(object):
|
||||
FAST_QUERY = 'SELECT taskhash, method, unihash FROM tasks_v2 WHERE method=:method AND taskhash=:taskhash ORDER BY created ASC LIMIT 1'
|
||||
ALL_QUERY = 'SELECT * FROM tasks_v2 WHERE method=:method AND taskhash=:taskhash ORDER BY created ASC LIMIT 1'
|
||||
OUTHASH_QUERY = '''
|
||||
-- Find tasks with a matching outhash (that is, tasks that
|
||||
-- are equivalent)
|
||||
SELECT * FROM tasks_v2 WHERE method=:method AND outhash=:outhash
|
||||
|
||||
-- If there is an exact match on the taskhash, return it.
|
||||
-- Otherwise return the oldest matching outhash of any
|
||||
-- taskhash
|
||||
ORDER BY CASE WHEN taskhash=:taskhash THEN 1 ELSE 2 END,
|
||||
created ASC
|
||||
|
||||
-- Only return one row
|
||||
LIMIT 1
|
||||
'''
|
||||
|
||||
def __init__(self, reader, writer, db, request_stats, backfill_queue, upstream, read_only):
|
||||
def __init__(self, reader, writer, db, request_stats):
|
||||
self.reader = reader
|
||||
self.writer = writer
|
||||
self.db = db
|
||||
self.request_stats = request_stats
|
||||
self.max_chunk = DEFAULT_MAX_CHUNK
|
||||
self.backfill_queue = backfill_queue
|
||||
self.upstream = upstream
|
||||
|
||||
self.handlers = {
|
||||
'get': self.handle_get,
|
||||
'get-outhash': self.handle_get_outhash,
|
||||
'report': self.handle_report,
|
||||
'report-equiv': self.handle_equivreport,
|
||||
'get-stream': self.handle_get_stream,
|
||||
'get-stats': self.handle_get_stats,
|
||||
'reset-stats': self.handle_reset_stats,
|
||||
'chunk-stream': self.handle_chunk,
|
||||
}
|
||||
|
||||
if not read_only:
|
||||
self.handlers.update({
|
||||
'report': self.handle_report,
|
||||
'report-equiv': self.handle_equivreport,
|
||||
'reset-stats': self.handle_reset_stats,
|
||||
'backfill-wait': self.handle_backfill_wait,
|
||||
})
|
||||
|
||||
async def process_requests(self):
|
||||
if self.upstream is not None:
|
||||
self.upstream_client = await create_async_client(self.upstream)
|
||||
else:
|
||||
self.upstream_client = None
|
||||
|
||||
try:
|
||||
|
||||
|
||||
self.addr = self.writer.get_extra_info('peername')
|
||||
logger.debug('Client %r connected' % (self.addr,))
|
||||
|
||||
@@ -238,9 +171,6 @@ class ServerClient(object):
|
||||
except ClientError as e:
|
||||
logger.error(str(e))
|
||||
finally:
|
||||
if self.upstream_client is not None:
|
||||
await self.upstream_client.close()
|
||||
|
||||
self.writer.close()
|
||||
|
||||
async def dispatch_message(self, msg):
|
||||
@@ -309,34 +239,15 @@ class ServerClient(object):
|
||||
if row is not None:
|
||||
logger.debug('Found equivalent task %s -> %s', (row['taskhash'], row['unihash']))
|
||||
d = {k: row[k] for k in row.keys()}
|
||||
elif self.upstream_client is not None:
|
||||
d = await copy_from_upstream(self.upstream_client, self.db, method, taskhash)
|
||||
|
||||
self.write_message(d)
|
||||
else:
|
||||
d = None
|
||||
|
||||
self.write_message(d)
|
||||
|
||||
async def handle_get_outhash(self, request):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(self.OUTHASH_QUERY,
|
||||
{k: request[k] for k in ('method', 'outhash', 'taskhash')})
|
||||
|
||||
row = cursor.fetchone()
|
||||
|
||||
if row is not None:
|
||||
logger.debug('Found equivalent outhash %s -> %s', (row['outhash'], row['unihash']))
|
||||
d = {k: row[k] for k in row.keys()}
|
||||
else:
|
||||
d = None
|
||||
|
||||
self.write_message(d)
|
||||
self.write_message(None)
|
||||
|
||||
async def handle_get_stream(self, request):
|
||||
self.write_message('ok')
|
||||
|
||||
while True:
|
||||
upstream = None
|
||||
|
||||
l = await self.reader.readline()
|
||||
if not l:
|
||||
return
|
||||
@@ -361,12 +272,6 @@ class ServerClient(object):
|
||||
if row is not None:
|
||||
msg = ('%s\n' % row['unihash']).encode('utf-8')
|
||||
#logger.debug('Found equivalent task %s -> %s', (row['taskhash'], row['unihash']))
|
||||
elif self.upstream_client is not None:
|
||||
upstream = await self.upstream_client.get_unihash(method, taskhash)
|
||||
if upstream:
|
||||
msg = ("%s\n" % upstream).encode("utf-8")
|
||||
else:
|
||||
msg = "\n".encode("utf-8")
|
||||
else:
|
||||
msg = '\n'.encode('utf-8')
|
||||
|
||||
@@ -377,26 +282,25 @@ class ServerClient(object):
|
||||
|
||||
await self.writer.drain()
|
||||
|
||||
# Post to the backfill queue after writing the result to minimize
|
||||
# the turn around time on a request
|
||||
if upstream is not None:
|
||||
await self.backfill_queue.put((method, taskhash))
|
||||
|
||||
async def handle_report(self, data):
|
||||
with closing(self.db.cursor()) as cursor:
|
||||
cursor.execute(self.OUTHASH_QUERY,
|
||||
{k: data[k] for k in ('method', 'outhash', 'taskhash')})
|
||||
cursor.execute('''
|
||||
-- Find tasks with a matching outhash (that is, tasks that
|
||||
-- are equivalent)
|
||||
SELECT taskhash, method, unihash FROM tasks_v2 WHERE method=:method AND outhash=:outhash
|
||||
|
||||
-- If there is an exact match on the taskhash, return it.
|
||||
-- Otherwise return the oldest matching outhash of any
|
||||
-- taskhash
|
||||
ORDER BY CASE WHEN taskhash=:taskhash THEN 1 ELSE 2 END,
|
||||
created ASC
|
||||
|
||||
-- Only return one row
|
||||
LIMIT 1
|
||||
''', {k: data[k] for k in ('method', 'outhash', 'taskhash')})
|
||||
|
||||
row = cursor.fetchone()
|
||||
|
||||
if row is None and self.upstream_client:
|
||||
# Try upstream
|
||||
row = await copy_outhash_from_upstream(self.upstream_client,
|
||||
self.db,
|
||||
data['method'],
|
||||
data['outhash'],
|
||||
data['taskhash'])
|
||||
|
||||
# If no matching outhash was found, or one *was* found but it
|
||||
# wasn't an exact match on the taskhash, a new entry for this
|
||||
# taskhash should be added
|
||||
@@ -420,7 +324,11 @@ class ServerClient(object):
|
||||
if k in data:
|
||||
insert_data[k] = data[k]
|
||||
|
||||
insert_task(cursor, insert_data)
|
||||
cursor.execute('''INSERT INTO tasks_v2 (%s) VALUES (%s)''' % (
|
||||
', '.join(sorted(insert_data.keys())),
|
||||
', '.join(':' + k for k in sorted(insert_data.keys()))),
|
||||
insert_data)
|
||||
|
||||
self.db.commit()
|
||||
|
||||
logger.info('Adding taskhash %s with unihash %s',
|
||||
@@ -450,7 +358,11 @@ class ServerClient(object):
|
||||
if k in data:
|
||||
insert_data[k] = data[k]
|
||||
|
||||
insert_task(cursor, insert_data, ignore=True)
|
||||
cursor.execute('''INSERT OR IGNORE INTO tasks_v2 (%s) VALUES (%s)''' % (
|
||||
', '.join(sorted(insert_data.keys())),
|
||||
', '.join(':' + k for k in sorted(insert_data.keys()))),
|
||||
insert_data)
|
||||
|
||||
self.db.commit()
|
||||
|
||||
# Fetch the unihash that will be reported for the taskhash. If the
|
||||
@@ -482,13 +394,6 @@ class ServerClient(object):
|
||||
self.request_stats.reset()
|
||||
self.write_message(d)
|
||||
|
||||
async def handle_backfill_wait(self, request):
|
||||
d = {
|
||||
'tasks': self.backfill_queue.qsize(),
|
||||
}
|
||||
await self.backfill_queue.join()
|
||||
self.write_message(d)
|
||||
|
||||
def query_equivalent(self, method, taskhash, query):
|
||||
# This is part of the inner loop and must be as fast as possible
|
||||
try:
|
||||
@@ -500,10 +405,7 @@ class ServerClient(object):
|
||||
|
||||
|
||||
class Server(object):
|
||||
def __init__(self, db, loop=None, upstream=None, read_only=False):
|
||||
if upstream and read_only:
|
||||
raise ServerError("Read-only hashserv cannot pull from an upstream server")
|
||||
|
||||
def __init__(self, db, loop=None):
|
||||
self.request_stats = Stats()
|
||||
self.db = db
|
||||
|
||||
@@ -514,14 +416,11 @@ class Server(object):
|
||||
self.loop = loop
|
||||
self.close_loop = False
|
||||
|
||||
self.upstream = upstream
|
||||
self.read_only = read_only
|
||||
|
||||
self._cleanup_socket = None
|
||||
|
||||
def start_tcp_server(self, host, port):
|
||||
self.server = self.loop.run_until_complete(
|
||||
asyncio.start_server(self.handle_client, host, port)
|
||||
asyncio.start_server(self.handle_client, host, port, loop=self.loop)
|
||||
)
|
||||
|
||||
for s in self.server.sockets:
|
||||
@@ -546,7 +445,7 @@ class Server(object):
|
||||
# Work around path length limits in AF_UNIX
|
||||
os.chdir(os.path.dirname(path))
|
||||
self.server = self.loop.run_until_complete(
|
||||
asyncio.start_unix_server(self.handle_client, os.path.basename(path))
|
||||
asyncio.start_unix_server(self.handle_client, os.path.basename(path), loop=self.loop)
|
||||
)
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
@@ -559,7 +458,7 @@ class Server(object):
|
||||
async def handle_client(self, reader, writer):
|
||||
# writer.transport.set_write_buffer_limits(0)
|
||||
try:
|
||||
client = ServerClient(reader, writer, self.db, self.request_stats, self.backfill_queue, self.upstream, self.read_only)
|
||||
client = ServerClient(reader, writer, self.db, self.request_stats)
|
||||
await client.process_requests()
|
||||
except Exception as e:
|
||||
import traceback
|
||||
@@ -568,60 +467,23 @@ class Server(object):
|
||||
writer.close()
|
||||
logger.info('Client disconnected')
|
||||
|
||||
@contextmanager
|
||||
def _backfill_worker(self):
|
||||
async def backfill_worker_task():
|
||||
client = await create_async_client(self.upstream)
|
||||
try:
|
||||
while True:
|
||||
item = await self.backfill_queue.get()
|
||||
if item is None:
|
||||
self.backfill_queue.task_done()
|
||||
break
|
||||
method, taskhash = item
|
||||
await copy_from_upstream(client, self.db, method, taskhash)
|
||||
self.backfill_queue.task_done()
|
||||
finally:
|
||||
await client.close()
|
||||
|
||||
async def join_worker(worker):
|
||||
await self.backfill_queue.put(None)
|
||||
await worker
|
||||
|
||||
if self.upstream is not None:
|
||||
worker = asyncio.ensure_future(backfill_worker_task())
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
self.loop.run_until_complete(join_worker(worker))
|
||||
else:
|
||||
yield
|
||||
|
||||
def serve_forever(self):
|
||||
def signal_handler():
|
||||
self.loop.stop()
|
||||
|
||||
asyncio.set_event_loop(self.loop)
|
||||
self.loop.add_signal_handler(signal.SIGTERM, signal_handler)
|
||||
|
||||
try:
|
||||
self.backfill_queue = asyncio.Queue()
|
||||
self.loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
self.loop.add_signal_handler(signal.SIGTERM, signal_handler)
|
||||
self.server.close()
|
||||
self.loop.run_until_complete(self.server.wait_closed())
|
||||
logger.info('Server shutting down')
|
||||
|
||||
with self._backfill_worker():
|
||||
try:
|
||||
self.loop.run_forever()
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
if self.close_loop:
|
||||
self.loop.close()
|
||||
|
||||
self.server.close()
|
||||
|
||||
self.loop.run_until_complete(self.server.wait_closed())
|
||||
logger.info('Server shutting down')
|
||||
finally:
|
||||
if self.close_loop:
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
|
||||
if self._cleanup_socket is not None:
|
||||
self._cleanup_socket()
|
||||
if self._cleanup_socket is not None:
|
||||
self._cleanup_socket()
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
#
|
||||
|
||||
from . import create_server, create_client
|
||||
from .client import HashConnectionError
|
||||
import hashlib
|
||||
import logging
|
||||
import multiprocessing
|
||||
@@ -17,68 +16,44 @@ import threading
|
||||
import unittest
|
||||
import socket
|
||||
|
||||
def _run_server(server, idx):
|
||||
# logging.basicConfig(level=logging.DEBUG, filename='bbhashserv.log', filemode='w',
|
||||
# format='%(levelname)s %(filename)s:%(lineno)d %(message)s')
|
||||
sys.stdout = open('bbhashserv-%d.log' % idx, 'w')
|
||||
sys.stderr = sys.stdout
|
||||
server.serve_forever()
|
||||
|
||||
|
||||
class HashEquivalenceTestSetup(object):
|
||||
class TestHashEquivalenceServer(object):
|
||||
METHOD = 'TestMethod'
|
||||
|
||||
server_index = 0
|
||||
|
||||
def start_server(self, dbpath=None, upstream=None, read_only=False):
|
||||
self.server_index += 1
|
||||
if dbpath is None:
|
||||
dbpath = os.path.join(self.temp_dir.name, "db%d.sqlite" % self.server_index)
|
||||
|
||||
def cleanup_thread(thread):
|
||||
thread.terminate()
|
||||
thread.join()
|
||||
|
||||
server = create_server(self.get_server_addr(self.server_index),
|
||||
dbpath,
|
||||
upstream=upstream,
|
||||
read_only=read_only)
|
||||
server.dbpath = dbpath
|
||||
|
||||
server.thread = multiprocessing.Process(target=_run_server, args=(server, self.server_index))
|
||||
server.thread.start()
|
||||
self.addCleanup(cleanup_thread, server.thread)
|
||||
|
||||
def cleanup_client(client):
|
||||
client.close()
|
||||
|
||||
client = create_client(server.address)
|
||||
self.addCleanup(cleanup_client, client)
|
||||
|
||||
return (client, server)
|
||||
def _run_server(self):
|
||||
# logging.basicConfig(level=logging.DEBUG, filename='bbhashserv.log', filemode='w',
|
||||
# format='%(levelname)s %(filename)s:%(lineno)d %(message)s')
|
||||
self.server.serve_forever()
|
||||
|
||||
def setUp(self):
|
||||
if sys.version_info < (3, 5, 0):
|
||||
self.skipTest('Python 3.5 or later required')
|
||||
|
||||
self.temp_dir = tempfile.TemporaryDirectory(prefix='bb-hashserv')
|
||||
self.addCleanup(self.temp_dir.cleanup)
|
||||
self.dbfile = os.path.join(self.temp_dir.name, 'db.sqlite')
|
||||
|
||||
(self.client, self.server) = self.start_server()
|
||||
self.server = create_server(self.get_server_addr(), self.dbfile)
|
||||
self.server_thread = multiprocessing.Process(target=self._run_server)
|
||||
self.server_thread.start()
|
||||
self.client = create_client(self.server.address)
|
||||
|
||||
def assertClientGetHash(self, client, taskhash, unihash):
|
||||
result = client.get_unihash(self.METHOD, taskhash)
|
||||
self.assertEqual(result, unihash)
|
||||
def tearDown(self):
|
||||
# Shutdown server
|
||||
s = getattr(self, 'server', None)
|
||||
if s is not None:
|
||||
self.server_thread.terminate()
|
||||
self.server_thread.join()
|
||||
self.client.close()
|
||||
self.temp_dir.cleanup()
|
||||
|
||||
|
||||
class HashEquivalenceCommonTests(object):
|
||||
def test_create_hash(self):
|
||||
# Simple test that hashes can be created
|
||||
taskhash = '35788efcb8dfb0a02659d81cf2bfd695fb30faf9'
|
||||
outhash = '2765d4a5884be49b28601445c2760c5f21e7e5c0ee2b7e3fce98fd7e5970796f'
|
||||
unihash = 'f46d3fbb439bd9b921095da657a4de906510d2cd'
|
||||
|
||||
self.assertClientGetHash(self.client, taskhash, None)
|
||||
result = self.client.get_unihash(self.METHOD, taskhash)
|
||||
self.assertIsNone(result, msg='Found unexpected task, %r' % result)
|
||||
|
||||
result = self.client.report_unihash(taskhash, self.METHOD, outhash, unihash)
|
||||
self.assertEqual(result['unihash'], unihash, 'Server returned bad unihash')
|
||||
@@ -109,19 +84,22 @@ class HashEquivalenceCommonTests(object):
|
||||
unihash = '218e57509998197d570e2c98512d0105985dffc9'
|
||||
self.client.report_unihash(taskhash, self.METHOD, outhash, unihash)
|
||||
|
||||
self.assertClientGetHash(self.client, taskhash, unihash)
|
||||
result = self.client.get_unihash(self.METHOD, taskhash)
|
||||
self.assertEqual(result, unihash)
|
||||
|
||||
outhash2 = '0904a7fe3dc712d9fd8a74a616ddca2a825a8ee97adf0bd3fc86082c7639914d'
|
||||
unihash2 = 'ae9a7d252735f0dafcdb10e2e02561ca3a47314c'
|
||||
self.client.report_unihash(taskhash, self.METHOD, outhash2, unihash2)
|
||||
|
||||
self.assertClientGetHash(self.client, taskhash, unihash)
|
||||
result = self.client.get_unihash(self.METHOD, taskhash)
|
||||
self.assertEqual(result, unihash)
|
||||
|
||||
outhash3 = '77623a549b5b1a31e3732dfa8fe61d7ce5d44b3370f253c5360e136b852967b4'
|
||||
unihash3 = '9217a7d6398518e5dc002ed58f2cbbbc78696603'
|
||||
self.client.report_unihash(taskhash, self.METHOD, outhash3, unihash3)
|
||||
|
||||
self.assertClientGetHash(self.client, taskhash, unihash)
|
||||
result = self.client.get_unihash(self.METHOD, taskhash)
|
||||
self.assertEqual(result, unihash)
|
||||
|
||||
def test_huge_message(self):
|
||||
# Simple test that hashes can be created
|
||||
@@ -129,7 +107,8 @@ class HashEquivalenceCommonTests(object):
|
||||
outhash = '3c979c3db45c569f51ab7626a4651074be3a9d11a84b1db076f5b14f7d39db44'
|
||||
unihash = '90e9bc1d1f094c51824adca7f8ea79a048d68824'
|
||||
|
||||
self.assertClientGetHash(self.client, taskhash, None)
|
||||
result = self.client.get_unihash(self.METHOD, taskhash)
|
||||
self.assertIsNone(result, msg='Found unexpected task, %r' % result)
|
||||
|
||||
siginfo = "0" * (self.client.max_chunk * 4)
|
||||
|
||||
@@ -177,140 +156,16 @@ class HashEquivalenceCommonTests(object):
|
||||
|
||||
self.assertFalse(failures)
|
||||
|
||||
def test_upstream_server(self):
|
||||
# Tests upstream server support. This is done by creating two servers
|
||||
# that share a database file. The downstream server has it upstream
|
||||
# set to the test server, whereas the side server doesn't. This allows
|
||||
# verification that the hash requests are being proxied to the upstream
|
||||
# server by verifying that they appear on the downstream client, but not
|
||||
# the side client. It also verifies that the results are pulled into
|
||||
# the downstream database by checking that the downstream and side servers
|
||||
# match after the downstream is done waiting for all backfill tasks
|
||||
(down_client, down_server) = self.start_server(upstream=self.server.address)
|
||||
(side_client, side_server) = self.start_server(dbpath=down_server.dbpath)
|
||||
|
||||
def check_hash(taskhash, unihash, old_sidehash):
|
||||
nonlocal down_client
|
||||
nonlocal side_client
|
||||
|
||||
# check upstream server
|
||||
self.assertClientGetHash(self.client, taskhash, unihash)
|
||||
|
||||
# Hash should *not* be present on the side server
|
||||
self.assertClientGetHash(side_client, taskhash, old_sidehash)
|
||||
|
||||
# Hash should be present on the downstream server, since it
|
||||
# will defer to the upstream server. This will trigger
|
||||
# the backfill in the downstream server
|
||||
self.assertClientGetHash(down_client, taskhash, unihash)
|
||||
|
||||
# After waiting for the downstream client to finish backfilling the
|
||||
# task from the upstream server, it should appear in the side server
|
||||
# since the database is populated
|
||||
down_client.backfill_wait()
|
||||
self.assertClientGetHash(side_client, taskhash, unihash)
|
||||
|
||||
# Basic report
|
||||
taskhash = '8aa96fcffb5831b3c2c0cb75f0431e3f8b20554a'
|
||||
outhash = 'afe240a439959ce86f5e322f8c208e1fedefea9e813f2140c81af866cc9edf7e'
|
||||
unihash = '218e57509998197d570e2c98512d0105985dffc9'
|
||||
self.client.report_unihash(taskhash, self.METHOD, outhash, unihash)
|
||||
|
||||
check_hash(taskhash, unihash, None)
|
||||
|
||||
# Duplicated taskhash with multiple output hashes and unihashes.
|
||||
# All servers should agree with the originally reported hash
|
||||
outhash2 = '0904a7fe3dc712d9fd8a74a616ddca2a825a8ee97adf0bd3fc86082c7639914d'
|
||||
unihash2 = 'ae9a7d252735f0dafcdb10e2e02561ca3a47314c'
|
||||
self.client.report_unihash(taskhash, self.METHOD, outhash2, unihash2)
|
||||
|
||||
check_hash(taskhash, unihash, unihash)
|
||||
|
||||
# Report an equivalent task. The sideload will originally report
|
||||
# no unihash until backfilled
|
||||
taskhash3 = "044c2ec8aaf480685a00ff6ff49e6162e6ad34e1"
|
||||
unihash3 = "def64766090d28f627e816454ed46894bb3aab36"
|
||||
self.client.report_unihash(taskhash3, self.METHOD, outhash, unihash3)
|
||||
|
||||
check_hash(taskhash3, unihash, None)
|
||||
|
||||
# Test that reporting a unihash in the downstream client isn't
|
||||
# propagating to the upstream server
|
||||
taskhash4 = "e3da00593d6a7fb435c7e2114976c59c5fd6d561"
|
||||
outhash4 = "1cf8713e645f491eb9c959d20b5cae1c47133a292626dda9b10709857cbe688a"
|
||||
unihash4 = "3b5d3d83f07f259e9086fcb422c855286e18a57d"
|
||||
down_client.report_unihash(taskhash4, self.METHOD, outhash4, unihash4)
|
||||
down_client.backfill_wait()
|
||||
|
||||
self.assertClientGetHash(down_client, taskhash4, unihash4)
|
||||
self.assertClientGetHash(side_client, taskhash4, unihash4)
|
||||
self.assertClientGetHash(self.client, taskhash4, None)
|
||||
|
||||
# Test that reporting a unihash in the downstream is able to find a
|
||||
# match which was previously reported to the upstream server
|
||||
taskhash5 = '35788efcb8dfb0a02659d81cf2bfd695fb30faf9'
|
||||
outhash5 = '2765d4a5884be49b28601445c2760c5f21e7e5c0ee2b7e3fce98fd7e5970796f'
|
||||
unihash5 = 'f46d3fbb439bd9b921095da657a4de906510d2cd'
|
||||
result = self.client.report_unihash(taskhash5, self.METHOD, outhash5, unihash5)
|
||||
|
||||
taskhash6 = '35788efcb8dfb0a02659d81cf2bfd695fb30fafa'
|
||||
unihash6 = 'f46d3fbb439bd9b921095da657a4de906510d2ce'
|
||||
result = down_client.report_unihash(taskhash6, self.METHOD, outhash5, unihash6)
|
||||
self.assertEqual(result['unihash'], unihash5, 'Server failed to copy unihash from upstream')
|
||||
|
||||
def test_ro_server(self):
|
||||
(ro_client, ro_server) = self.start_server(dbpath=self.server.dbpath, read_only=True)
|
||||
|
||||
# Report a hash via the read-write server
|
||||
taskhash = '35788efcb8dfb0a02659d81cf2bfd695fb30faf9'
|
||||
outhash = '2765d4a5884be49b28601445c2760c5f21e7e5c0ee2b7e3fce98fd7e5970796f'
|
||||
unihash = 'f46d3fbb439bd9b921095da657a4de906510d2cd'
|
||||
|
||||
result = self.client.report_unihash(taskhash, self.METHOD, outhash, unihash)
|
||||
self.assertEqual(result['unihash'], unihash, 'Server returned bad unihash')
|
||||
|
||||
# Check the hash via the read-only server
|
||||
self.assertClientGetHash(ro_client, taskhash, unihash)
|
||||
|
||||
# Ensure that reporting via the read-only server fails
|
||||
taskhash2 = 'c665584ee6817aa99edfc77a44dd853828279370'
|
||||
outhash2 = '3c979c3db45c569f51ab7626a4651074be3a9d11a84b1db076f5b14f7d39db44'
|
||||
unihash2 = '90e9bc1d1f094c51824adca7f8ea79a048d68824'
|
||||
|
||||
with self.assertRaises(HashConnectionError):
|
||||
ro_client.report_unihash(taskhash2, self.METHOD, outhash2, unihash2)
|
||||
|
||||
# Ensure that the database was not modified
|
||||
self.assertClientGetHash(self.client, taskhash2, None)
|
||||
class TestHashEquivalenceUnixServer(TestHashEquivalenceServer, unittest.TestCase):
|
||||
def get_server_addr(self):
|
||||
return "unix://" + os.path.join(self.temp_dir.name, 'sock')
|
||||
|
||||
|
||||
class TestHashEquivalenceUnixServer(HashEquivalenceTestSetup, HashEquivalenceCommonTests, unittest.TestCase):
|
||||
def get_server_addr(self, server_idx):
|
||||
return "unix://" + os.path.join(self.temp_dir.name, 'sock%d' % server_idx)
|
||||
|
||||
|
||||
class TestHashEquivalenceUnixServerLongPath(HashEquivalenceTestSetup, unittest.TestCase):
|
||||
DEEP_DIRECTORY = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ccccccccccccccccccccccccccccccccccccccccccc"
|
||||
def get_server_addr(self, server_idx):
|
||||
os.makedirs(os.path.join(self.temp_dir.name, self.DEEP_DIRECTORY), exist_ok=True)
|
||||
return "unix://" + os.path.join(self.temp_dir.name, self.DEEP_DIRECTORY, 'sock%d' % server_idx)
|
||||
|
||||
|
||||
def test_long_sock_path(self):
|
||||
# Simple test that hashes can be created
|
||||
taskhash = '35788efcb8dfb0a02659d81cf2bfd695fb30faf9'
|
||||
outhash = '2765d4a5884be49b28601445c2760c5f21e7e5c0ee2b7e3fce98fd7e5970796f'
|
||||
unihash = 'f46d3fbb439bd9b921095da657a4de906510d2cd'
|
||||
|
||||
self.assertClientGetHash(self.client, taskhash, None)
|
||||
|
||||
result = self.client.report_unihash(taskhash, self.METHOD, outhash, unihash)
|
||||
self.assertEqual(result['unihash'], unihash, 'Server returned bad unihash')
|
||||
|
||||
|
||||
class TestHashEquivalenceTCPServer(HashEquivalenceTestSetup, HashEquivalenceCommonTests, unittest.TestCase):
|
||||
def get_server_addr(self, server_idx):
|
||||
class TestHashEquivalenceTCPServer(TestHashEquivalenceServer, unittest.TestCase):
|
||||
def get_server_addr(self):
|
||||
# Some hosts cause asyncio module to misbehave, when IPv6 is not enabled.
|
||||
# If IPv6 is enabled, it should be safe to use localhost directly, in general
|
||||
# case it is more reliable to resolve the IP address explicitly.
|
||||
return socket.gethostbyname("localhost") + ":0"
|
||||
|
||||
|
||||
@@ -94,7 +94,7 @@ class LayerIndex():
|
||||
if not param:
|
||||
continue
|
||||
item = param.split('=', 1)
|
||||
logger.debug(item)
|
||||
logger.debug(1, item)
|
||||
param_dict[item[0]] = item[1]
|
||||
|
||||
return param_dict
|
||||
@@ -123,7 +123,7 @@ class LayerIndex():
|
||||
up = urlparse(url)
|
||||
|
||||
if username:
|
||||
logger.debug("Configuring authentication for %s..." % url)
|
||||
logger.debug(1, "Configuring authentication for %s..." % url)
|
||||
password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
|
||||
password_mgr.add_password(None, "%s://%s" % (up.scheme, up.netloc), username, password)
|
||||
handler = urllib.request.HTTPBasicAuthHandler(password_mgr)
|
||||
@@ -133,20 +133,20 @@ class LayerIndex():
|
||||
|
||||
urllib.request.install_opener(opener)
|
||||
|
||||
logger.debug("Fetching %s (%s)..." % (url, ["without authentication", "with authentication"][bool(username)]))
|
||||
logger.debug(1, "Fetching %s (%s)..." % (url, ["without authentication", "with authentication"][bool(username)]))
|
||||
|
||||
try:
|
||||
res = urlopen(Request(url, headers={'User-Agent': 'Mozilla/5.0 (bitbake/lib/layerindex)'}, unverifiable=True))
|
||||
except urllib.error.HTTPError as e:
|
||||
logger.debug("HTTP Error: %s: %s" % (e.code, e.reason))
|
||||
logger.debug(" Requested: %s" % (url))
|
||||
logger.debug(" Actual: %s" % (e.geturl()))
|
||||
logger.debug(1, "HTTP Error: %s: %s" % (e.code, e.reason))
|
||||
logger.debug(1, " Requested: %s" % (url))
|
||||
logger.debug(1, " Actual: %s" % (e.geturl()))
|
||||
|
||||
if e.code == 404:
|
||||
logger.debug("Request not found.")
|
||||
logger.debug(1, "Request not found.")
|
||||
raise LayerIndexFetchError(url, e)
|
||||
else:
|
||||
logger.debug("Headers:\n%s" % (e.headers))
|
||||
logger.debug(1, "Headers:\n%s" % (e.headers))
|
||||
raise LayerIndexFetchError(url, e)
|
||||
except OSError as e:
|
||||
error = 0
|
||||
@@ -170,7 +170,7 @@ class LayerIndex():
|
||||
raise LayerIndexFetchError(url, "Unable to fetch OSError exception: %s" % e)
|
||||
|
||||
finally:
|
||||
logger.debug("...fetching %s (%s), done." % (url, ["without authentication", "with authentication"][bool(username)]))
|
||||
logger.debug(1, "...fetching %s (%s), done." % (url, ["without authentication", "with authentication"][bool(username)]))
|
||||
|
||||
return res
|
||||
|
||||
@@ -205,14 +205,14 @@ The format of the indexURI:
|
||||
if reload:
|
||||
self.indexes = []
|
||||
|
||||
logger.debug('Loading: %s' % indexURI)
|
||||
logger.debug(1, 'Loading: %s' % indexURI)
|
||||
|
||||
if not self.plugins:
|
||||
raise LayerIndexException("No LayerIndex Plugins available")
|
||||
|
||||
for plugin in self.plugins:
|
||||
# Check if the plugin was initialized
|
||||
logger.debug('Trying %s' % plugin.__class__)
|
||||
logger.debug(1, 'Trying %s' % plugin.__class__)
|
||||
if not hasattr(plugin, 'type') or not plugin.type:
|
||||
continue
|
||||
try:
|
||||
@@ -220,11 +220,11 @@ The format of the indexURI:
|
||||
indexEnt = plugin.load_index(indexURI, load)
|
||||
break
|
||||
except LayerIndexPluginUrlError as e:
|
||||
logger.debug("%s doesn't support %s" % (plugin.type, e.url))
|
||||
logger.debug(1, "%s doesn't support %s" % (plugin.type, e.url))
|
||||
except NotImplementedError:
|
||||
pass
|
||||
else:
|
||||
logger.debug("No plugins support %s" % indexURI)
|
||||
logger.debug(1, "No plugins support %s" % indexURI)
|
||||
raise LayerIndexException("No plugins support %s" % indexURI)
|
||||
|
||||
# Mark CONFIG data as something we've added...
|
||||
@@ -255,19 +255,19 @@ will write out the individual elements split by layer and related components.
|
||||
|
||||
for plugin in self.plugins:
|
||||
# Check if the plugin was initialized
|
||||
logger.debug('Trying %s' % plugin.__class__)
|
||||
logger.debug(1, 'Trying %s' % plugin.__class__)
|
||||
if not hasattr(plugin, 'type') or not plugin.type:
|
||||
continue
|
||||
try:
|
||||
plugin.store_index(indexURI, index)
|
||||
break
|
||||
except LayerIndexPluginUrlError as e:
|
||||
logger.debug("%s doesn't support %s" % (plugin.type, e.url))
|
||||
logger.debug(1, "%s doesn't support %s" % (plugin.type, e.url))
|
||||
except NotImplementedError:
|
||||
logger.debug("Store not implemented in %s" % plugin.type)
|
||||
logger.debug(1, "Store not implemented in %s" % plugin.type)
|
||||
pass
|
||||
else:
|
||||
logger.debug("No plugins support %s" % indexURI)
|
||||
logger.debug(1, "No plugins support %s" % indexURI)
|
||||
raise LayerIndexException("No plugins support %s" % indexURI)
|
||||
|
||||
|
||||
@@ -292,7 +292,7 @@ layerBranches set. If not, they are effectively blank.'''
|
||||
the default configuration until the first vcs_url/branch match.'''
|
||||
|
||||
for index in self.indexes:
|
||||
logger.debug(' searching %s' % index.config['DESCRIPTION'])
|
||||
logger.debug(1, ' searching %s' % index.config['DESCRIPTION'])
|
||||
layerBranch = index.find_vcs_url(vcs_url, [branch])
|
||||
if layerBranch:
|
||||
return layerBranch
|
||||
@@ -304,7 +304,7 @@ layerBranches set. If not, they are effectively blank.'''
|
||||
If a branch has not been specified, we will iterate over the branches in
|
||||
the default configuration until the first collection/branch match.'''
|
||||
|
||||
logger.debug('find_collection: %s (%s) %s' % (collection, version, branch))
|
||||
logger.debug(1, 'find_collection: %s (%s) %s' % (collection, version, branch))
|
||||
|
||||
if branch:
|
||||
branches = [branch]
|
||||
@@ -312,12 +312,12 @@ layerBranches set. If not, they are effectively blank.'''
|
||||
branches = None
|
||||
|
||||
for index in self.indexes:
|
||||
logger.debug(' searching %s' % index.config['DESCRIPTION'])
|
||||
logger.debug(1, ' searching %s' % index.config['DESCRIPTION'])
|
||||
layerBranch = index.find_collection(collection, version, branches)
|
||||
if layerBranch:
|
||||
return layerBranch
|
||||
else:
|
||||
logger.debug('Collection %s (%s) not found for branch (%s)' % (collection, version, branch))
|
||||
logger.debug(1, 'Collection %s (%s) not found for branch (%s)' % (collection, version, branch))
|
||||
return None
|
||||
|
||||
def find_layerbranch(self, name, branch=None):
|
||||
@@ -408,7 +408,7 @@ layerBranches set. If not, they are effectively blank.'''
|
||||
version=deplayerbranch.version
|
||||
)
|
||||
if rdeplayerbranch != deplayerbranch:
|
||||
logger.debug('Replaced %s:%s:%s with %s:%s:%s' % \
|
||||
logger.debug(1, 'Replaced %s:%s:%s with %s:%s:%s' % \
|
||||
(deplayerbranch.index.config['DESCRIPTION'],
|
||||
deplayerbranch.branch.name,
|
||||
deplayerbranch.layer.name,
|
||||
@@ -1121,7 +1121,7 @@ class LayerBranch(LayerIndexItemObj):
|
||||
@property
|
||||
def branch(self):
|
||||
try:
|
||||
logger.debug("Get branch object from branches[%s]" % (self.branch_id))
|
||||
logger.debug(1, "Get branch object from branches[%s]" % (self.branch_id))
|
||||
return self.index.branches[self.branch_id]
|
||||
except KeyError:
|
||||
raise AttributeError('Unable to find branches in index to map branch_id %s' % self.branch_id)
|
||||
@@ -1149,7 +1149,7 @@ class LayerBranch(LayerIndexItemObj):
|
||||
|
||||
@actual_branch.setter
|
||||
def actual_branch(self, value):
|
||||
logger.debug("Set actual_branch to %s .. name is %s" % (value, self.branch.name))
|
||||
logger.debug(1, "Set actual_branch to %s .. name is %s" % (value, self.branch.name))
|
||||
if value != self.branch.name:
|
||||
self._setattr('actual_branch', value, prop=False)
|
||||
else:
|
||||
|
||||
@@ -173,7 +173,7 @@ class CookerPlugin(layerindexlib.plugin.IndexPlugin):
|
||||
else:
|
||||
branches = ['HEAD']
|
||||
|
||||
logger.debug("Loading cooker data branches %s" % branches)
|
||||
logger.debug(1, "Loading cooker data branches %s" % branches)
|
||||
|
||||
index = self._load_bblayers(branches=branches)
|
||||
|
||||
@@ -220,7 +220,7 @@ class CookerPlugin(layerindexlib.plugin.IndexPlugin):
|
||||
required=required, layerbranch=layerBranchId,
|
||||
dependency=depLayerBranch.layer_id)
|
||||
|
||||
logger.debug('%s requires %s' % (layerDependency.layer.name, layerDependency.dependency.name))
|
||||
logger.debug(1, '%s requires %s' % (layerDependency.layer.name, layerDependency.dependency.name))
|
||||
index.add_element("layerDependencies", [layerDependency])
|
||||
|
||||
return layerDependencyId
|
||||
|
||||
@@ -82,7 +82,7 @@ class RestApiPlugin(layerindexlib.plugin.IndexPlugin):
|
||||
|
||||
|
||||
def load_cache(path, index, branches=[]):
|
||||
logger.debug('Loading json file %s' % path)
|
||||
logger.debug(1, 'Loading json file %s' % path)
|
||||
with open(path, 'rt', encoding='utf-8') as f:
|
||||
pindex = json.load(f)
|
||||
|
||||
@@ -102,7 +102,7 @@ class RestApiPlugin(layerindexlib.plugin.IndexPlugin):
|
||||
if newpBranch:
|
||||
index.add_raw_element('branches', layerindexlib.Branch, newpBranch)
|
||||
else:
|
||||
logger.debug('No matching branches (%s) in index file(s)' % branches)
|
||||
logger.debug(1, 'No matching branches (%s) in index file(s)' % branches)
|
||||
# No matching branches.. return nothing...
|
||||
return
|
||||
|
||||
@@ -120,7 +120,7 @@ class RestApiPlugin(layerindexlib.plugin.IndexPlugin):
|
||||
load_cache(up.path, index, branches)
|
||||
return index
|
||||
|
||||
logger.debug('Loading from dir %s...' % (up.path))
|
||||
logger.debug(1, 'Loading from dir %s...' % (up.path))
|
||||
for (dirpath, _, filenames) in os.walk(up.path):
|
||||
for filename in filenames:
|
||||
if not filename.endswith('.json'):
|
||||
@@ -144,7 +144,7 @@ class RestApiPlugin(layerindexlib.plugin.IndexPlugin):
|
||||
def _get_json_response(apiurl=None, username=None, password=None, retry=True):
|
||||
assert apiurl is not None
|
||||
|
||||
logger.debug("fetching %s" % apiurl)
|
||||
logger.debug(1, "fetching %s" % apiurl)
|
||||
|
||||
up = urlparse(apiurl)
|
||||
|
||||
@@ -163,9 +163,9 @@ class RestApiPlugin(layerindexlib.plugin.IndexPlugin):
|
||||
parsed = json.loads(res.read().decode('utf-8'))
|
||||
except ConnectionResetError:
|
||||
if retry:
|
||||
logger.debug("%s: Connection reset by peer. Retrying..." % url)
|
||||
logger.debug(1, "%s: Connection reset by peer. Retrying..." % url)
|
||||
parsed = _get_json_response(apiurl=up_stripped.geturl(), username=username, password=password, retry=False)
|
||||
logger.debug("%s: retry successful.")
|
||||
logger.debug(1, "%s: retry successful.")
|
||||
else:
|
||||
raise layerindexlib.LayerIndexFetchError('%s: Connection reset by peer. Is there a firewall blocking your connection?' % apiurl)
|
||||
|
||||
@@ -207,25 +207,25 @@ class RestApiPlugin(layerindexlib.plugin.IndexPlugin):
|
||||
if "*" not in branches:
|
||||
filter = "?filter=name:%s" % "OR".join(branches)
|
||||
|
||||
logger.debug("Loading %s from %s" % (branches, index.apilinks['branches']))
|
||||
logger.debug(1, "Loading %s from %s" % (branches, index.apilinks['branches']))
|
||||
|
||||
# The link won't include username/password, so pull it from the original url
|
||||
pindex['branches'] = _get_json_response(index.apilinks['branches'] + filter,
|
||||
username=up.username, password=up.password)
|
||||
if not pindex['branches']:
|
||||
logger.debug("No valid branches (%s) found at url %s." % (branch, url))
|
||||
logger.debug(1, "No valid branches (%s) found at url %s." % (branch, url))
|
||||
return index
|
||||
index.add_raw_element("branches", layerindexlib.Branch, pindex['branches'])
|
||||
|
||||
# Load all of the layerItems (these can not be easily filtered)
|
||||
logger.debug("Loading %s from %s" % ('layerItems', index.apilinks['layerItems']))
|
||||
logger.debug(1, "Loading %s from %s" % ('layerItems', index.apilinks['layerItems']))
|
||||
|
||||
|
||||
# The link won't include username/password, so pull it from the original url
|
||||
pindex['layerItems'] = _get_json_response(index.apilinks['layerItems'],
|
||||
username=up.username, password=up.password)
|
||||
if not pindex['layerItems']:
|
||||
logger.debug("No layers were found at url %s." % (url))
|
||||
logger.debug(1, "No layers were found at url %s." % (url))
|
||||
return index
|
||||
index.add_raw_element("layerItems", layerindexlib.LayerItem, pindex['layerItems'])
|
||||
|
||||
@@ -235,13 +235,13 @@ class RestApiPlugin(layerindexlib.plugin.IndexPlugin):
|
||||
for branch in index.branches:
|
||||
filter = "?filter=branch__name:%s" % index.branches[branch].name
|
||||
|
||||
logger.debug("Loading %s from %s" % ('layerBranches', index.apilinks['layerBranches']))
|
||||
logger.debug(1, "Loading %s from %s" % ('layerBranches', index.apilinks['layerBranches']))
|
||||
|
||||
# The link won't include username/password, so pull it from the original url
|
||||
pindex['layerBranches'] = _get_json_response(index.apilinks['layerBranches'] + filter,
|
||||
username=up.username, password=up.password)
|
||||
if not pindex['layerBranches']:
|
||||
logger.debug("No valid layer branches (%s) found at url %s." % (branches or "*", url))
|
||||
logger.debug(1, "No valid layer branches (%s) found at url %s." % (branches or "*", url))
|
||||
return index
|
||||
index.add_raw_element("layerBranches", layerindexlib.LayerBranch, pindex['layerBranches'])
|
||||
|
||||
@@ -256,7 +256,7 @@ class RestApiPlugin(layerindexlib.plugin.IndexPlugin):
|
||||
("distros", layerindexlib.Distro)]:
|
||||
if lName not in load:
|
||||
continue
|
||||
logger.debug("Loading %s from %s" % (lName, index.apilinks[lName]))
|
||||
logger.debug(1, "Loading %s from %s" % (lName, index.apilinks[lName]))
|
||||
|
||||
# The link won't include username/password, so pull it from the original url
|
||||
pindex[lName] = _get_json_response(index.apilinks[lName] + filter,
|
||||
@@ -283,7 +283,7 @@ class RestApiPlugin(layerindexlib.plugin.IndexPlugin):
|
||||
if up.scheme != 'file':
|
||||
raise layerindexlib.plugin.LayerIndexPluginUrlError(self.type, url)
|
||||
|
||||
logger.debug("Storing to %s..." % up.path)
|
||||
logger.debug(1, "Storing to %s..." % up.path)
|
||||
|
||||
try:
|
||||
layerbranches = index.layerBranches
|
||||
@@ -299,12 +299,12 @@ class RestApiPlugin(layerindexlib.plugin.IndexPlugin):
|
||||
if getattr(index, objects)[obj].layerbranch_id == layerbranchid:
|
||||
filtered.append(getattr(index, objects)[obj]._data)
|
||||
except AttributeError:
|
||||
logger.debug('No obj.layerbranch_id: %s' % objects)
|
||||
logger.debug(1, 'No obj.layerbranch_id: %s' % objects)
|
||||
# No simple filter method, just include it...
|
||||
try:
|
||||
filtered.append(getattr(index, objects)[obj]._data)
|
||||
except AttributeError:
|
||||
logger.debug('No obj._data: %s %s' % (objects, type(obj)))
|
||||
logger.debug(1, 'No obj._data: %s %s' % (objects, type(obj)))
|
||||
filtered.append(obj)
|
||||
return filtered
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ class LayerIndexCookerTest(LayersTest):
|
||||
|
||||
def test_find_collection(self):
|
||||
def _check(collection, expected):
|
||||
self.logger.debug("Looking for collection %s..." % collection)
|
||||
self.logger.debug(1, "Looking for collection %s..." % collection)
|
||||
result = self.layerindex.find_collection(collection)
|
||||
if expected:
|
||||
self.assertIsNotNone(result, msg="Did not find %s when it shouldn't be there" % collection)
|
||||
@@ -91,7 +91,7 @@ class LayerIndexCookerTest(LayersTest):
|
||||
|
||||
def test_find_layerbranch(self):
|
||||
def _check(name, expected):
|
||||
self.logger.debug("Looking for layerbranch %s..." % name)
|
||||
self.logger.debug(1, "Looking for layerbranch %s..." % name)
|
||||
result = self.layerindex.find_layerbranch(name)
|
||||
if expected:
|
||||
self.assertIsNotNone(result, msg="Did not find %s when it shouldn't be there" % collection)
|
||||
|
||||
@@ -57,11 +57,11 @@ class LayerIndexWebRestApiTest(LayersTest):
|
||||
type in self.layerindex.indexes[0].config['local']:
|
||||
continue
|
||||
for id in getattr(self.layerindex.indexes[0], type):
|
||||
self.logger.debug("type %s" % (type))
|
||||
self.logger.debug(1, "type %s" % (type))
|
||||
|
||||
self.assertTrue(id in getattr(reload.indexes[0], type), msg="Id number not in reloaded index")
|
||||
|
||||
self.logger.debug("%s ? %s" % (getattr(self.layerindex.indexes[0], type)[id], getattr(reload.indexes[0], type)[id]))
|
||||
self.logger.debug(1, "%s ? %s" % (getattr(self.layerindex.indexes[0], type)[id], getattr(reload.indexes[0], type)[id]))
|
||||
|
||||
self.assertEqual(getattr(self.layerindex.indexes[0], type)[id], getattr(reload.indexes[0], type)[id], msg="Reloaded contents different")
|
||||
|
||||
@@ -80,11 +80,11 @@ class LayerIndexWebRestApiTest(LayersTest):
|
||||
type in self.layerindex.indexes[0].config['local']:
|
||||
continue
|
||||
for id in getattr(self.layerindex.indexes[0] ,type):
|
||||
self.logger.debug("type %s" % (type))
|
||||
self.logger.debug(1, "type %s" % (type))
|
||||
|
||||
self.assertTrue(id in getattr(reload.indexes[0], type), msg="Id number missing from reloaded data")
|
||||
|
||||
self.logger.debug("%s ? %s" % (getattr(self.layerindex.indexes[0] ,type)[id], getattr(reload.indexes[0], type)[id]))
|
||||
self.logger.debug(1, "%s ? %s" % (getattr(self.layerindex.indexes[0] ,type)[id], getattr(reload.indexes[0], type)[id]))
|
||||
|
||||
self.assertEqual(getattr(self.layerindex.indexes[0] ,type)[id], getattr(reload.indexes[0], type)[id], msg="reloaded data does not match original")
|
||||
|
||||
@@ -111,14 +111,14 @@ class LayerIndexWebRestApiTest(LayersTest):
|
||||
if dep.layer.name == 'meta-python':
|
||||
break
|
||||
else:
|
||||
self.logger.debug("meta-python was not found")
|
||||
self.logger.debug(1, "meta-python was not found")
|
||||
raise self.failureException
|
||||
|
||||
# Only check the first element...
|
||||
break
|
||||
else:
|
||||
# Empty list, this is bad.
|
||||
self.logger.debug("Empty list of dependencies")
|
||||
self.logger.debug(1, "Empty list of dependencies")
|
||||
self.assertIsNotNone(first, msg="Empty list of dependencies")
|
||||
|
||||
# Last dep should be the requested item
|
||||
@@ -128,7 +128,7 @@ class LayerIndexWebRestApiTest(LayersTest):
|
||||
@skipIfNoNetwork()
|
||||
def test_find_collection(self):
|
||||
def _check(collection, expected):
|
||||
self.logger.debug("Looking for collection %s..." % collection)
|
||||
self.logger.debug(1, "Looking for collection %s..." % collection)
|
||||
result = self.layerindex.find_collection(collection)
|
||||
if expected:
|
||||
self.assertIsNotNone(result, msg="Did not find %s when it should be there" % collection)
|
||||
@@ -148,11 +148,11 @@ class LayerIndexWebRestApiTest(LayersTest):
|
||||
@skipIfNoNetwork()
|
||||
def test_find_layerbranch(self):
|
||||
def _check(name, expected):
|
||||
self.logger.debug("Looking for layerbranch %s..." % name)
|
||||
self.logger.debug(1, "Looking for layerbranch %s..." % name)
|
||||
|
||||
for index in self.layerindex.indexes:
|
||||
for layerbranchid in index.layerBranches:
|
||||
self.logger.debug("Present: %s" % index.layerBranches[layerbranchid].layer.name)
|
||||
self.logger.debug(1, "Present: %s" % index.layerBranches[layerbranchid].layer.name)
|
||||
result = self.layerindex.find_layerbranch(name)
|
||||
if expected:
|
||||
self.assertIsNotNone(result, msg="Did not find %s when it should be there" % collection)
|
||||
|
||||
@@ -119,7 +119,7 @@ class BuildTest(unittest.TestCase):
|
||||
if os.environ.get("TOASTER_TEST_USE_SSTATE_MIRROR"):
|
||||
ProjectVariable.objects.get_or_create(
|
||||
name="SSTATE_MIRRORS",
|
||||
value="file://.* http://sstate.yoctoproject.org/PATH;downloadfilename=PATH",
|
||||
value="file://.* http://autobuilder.yoctoproject.org/pub/sstate/PATH;downloadfilename=PATH",
|
||||
project=project)
|
||||
|
||||
ProjectTarget.objects.create(project=project,
|
||||
|
||||
@@ -1,78 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright (C) 2020 Agilent Technologies, Inc.
|
||||
# Author: Chris Laplante <chris.laplante@agilent.com>
|
||||
|
||||
# This sendemail-validate hook injects 'From: ' header lines into outgoing
|
||||
# emails sent via 'git send-email', to ensure that accurate commit authorship
|
||||
# information is present. It was created because some email servers
|
||||
# (notably Microsoft Exchange / Office 360) seem to butcher outgoing patches,
|
||||
# resulting in incorrect authorship.
|
||||
|
||||
# Current limitations:
|
||||
# 1. Assumes one per patch per email
|
||||
# 2. Minimal error checking
|
||||
#
|
||||
# Installation:
|
||||
# 1. Copy to .git/hooks/sendemail-validate
|
||||
# 2. chmod +x .git/hooks/sendemail-validate
|
||||
|
||||
|
||||
import enum
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
class Subject(enum.IntEnum):
|
||||
NOT_SEEN = 0
|
||||
CONSUMING = 1
|
||||
SEEN = 2
|
||||
|
||||
|
||||
def make_from_line():
|
||||
cmd = ["git", "var", "GIT_COMMITTER_IDENT"]
|
||||
proc = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, universal_newlines=True)
|
||||
regex = re.compile(r"^(.*>).*$")
|
||||
match = regex.match(proc.stdout)
|
||||
assert match is not None
|
||||
return "From: {0}".format(match.group(1))
|
||||
|
||||
|
||||
def main():
|
||||
email = sys.argv[1]
|
||||
|
||||
with open(email, "r") as f:
|
||||
email_lines = f.read().split("\n")
|
||||
|
||||
subject_seen = Subject.NOT_SEEN
|
||||
first_body_line = None
|
||||
for i, line in enumerate(email_lines):
|
||||
if (subject_seen == Subject.NOT_SEEN) and line.startswith("Subject: "):
|
||||
subject_seen = Subject.CONSUMING
|
||||
continue
|
||||
if subject_seen == Subject.CONSUMING:
|
||||
if not line.strip():
|
||||
subject_seen = Subject.SEEN
|
||||
continue
|
||||
if subject_seen == Subject.SEEN:
|
||||
first_body_line = i
|
||||
break
|
||||
|
||||
assert subject_seen == Subject.SEEN
|
||||
assert first_body_line is not None
|
||||
|
||||
from_line = make_from_line()
|
||||
# Only add FROM line if it is not already there
|
||||
if email_lines[first_body_line] != from_line:
|
||||
email_lines.insert(first_body_line, from_line)
|
||||
email_lines.insert(first_body_line + 1, "")
|
||||
with open(email, "w") as f:
|
||||
f.write("\n".join(email_lines))
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
|
||||
2
documentation/.gitignore
vendored
2
documentation/.gitignore
vendored
@@ -1,3 +1 @@
|
||||
_build/
|
||||
Pipfile.lock
|
||||
.vscode/
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?= -j auto
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = .
|
||||
BUILDDIR = _build
|
||||
|
||||
@@ -1,14 +0,0 @@
|
||||
[[source]]
|
||||
name = "pypi"
|
||||
url = "https://pypi.org/simple"
|
||||
verify_ssl = true
|
||||
|
||||
[dev-packages]
|
||||
|
||||
[packages]
|
||||
sphinx = "*"
|
||||
sphinx-rtd-theme = "*"
|
||||
pyyaml = "*"
|
||||
|
||||
[requires]
|
||||
python_version = "3"
|
||||
@@ -2,7 +2,7 @@ documentation
|
||||
=============
|
||||
|
||||
This is the directory that contains the Yocto Project documentation. The Yocto
|
||||
Project source repositories at https://git.yoctoproject.org/cgit.cgi have two
|
||||
Project source repositories at http://git.yoctoproject.org/cgit.cgi have two
|
||||
instances of the "documentation" directory. You should understand each of
|
||||
these instances.
|
||||
|
||||
@@ -47,12 +47,12 @@ Folders exist for individual manuals as follows:
|
||||
Each folder is self-contained regarding content and figures.
|
||||
|
||||
If you want to find HTML versions of the Yocto Project manuals on the web,
|
||||
go to https://www.yoctoproject.org and click on the "Docs" tab. From
|
||||
go to http://www.yoctoproject.org and click on the "Documentation" tab. From
|
||||
there you have access to archived documentation from previous releases, current
|
||||
documentation for the latest release, and "Docs in Progress" for the release
|
||||
currently being developed.
|
||||
|
||||
In general, the Yocto Project site (https://www.yoctoproject.org) is a great
|
||||
In general, the Yocto Project site (http://www.yoctoproject.org) is a great
|
||||
reference for both information and downloads.
|
||||
|
||||
poky.yaml
|
||||
@@ -91,13 +91,13 @@ Yocto Project documentation website
|
||||
A new website has been created to host the Yocto Project
|
||||
documentation, it can be found at: https://docs.yoctoproject.org/.
|
||||
|
||||
The entire Yocto Project documentation, as well as the BitBake manual,
|
||||
The entire Yocto Project documentation, as well as the BitBake manual
|
||||
is published on this website, including all previously released
|
||||
versions. A version switcher was added, as a drop-down menu on the top
|
||||
of the page to switch back and forth between the various versions of
|
||||
the current active Yocto Project releases.
|
||||
|
||||
Transition pages have been added (as rst files) to show links to old
|
||||
Transition pages have been added (as rst file) to show links to old
|
||||
versions of the Yocto Project documentation with links to each manual
|
||||
generated with DocBook.
|
||||
|
||||
@@ -109,7 +109,7 @@ obvious reasons, we will only support building the Yocto Project
|
||||
documentation with Python3.
|
||||
|
||||
Sphinx might be available in your Linux distro packages repositories,
|
||||
however it is not recommended to use distro packages, as they might be
|
||||
however it is not recommend using distro packages, as they might be
|
||||
old versions, especially if you are using an LTS version of your
|
||||
distro. The recommended method to install Sphinx and all required
|
||||
dependencies is to use the Python Package Index (pip).
|
||||
@@ -127,13 +127,6 @@ The resulting HTML index page will be _build/html/index.html, and you
|
||||
can browse your own copy of the locally generated documentation with
|
||||
your browser.
|
||||
|
||||
Alternatively, you can use Pipenv to automatically install all required
|
||||
dependencies in a virtual environment:
|
||||
|
||||
$ cd documentation
|
||||
$ pipenv install
|
||||
$ pipenv run make html
|
||||
|
||||
Sphinx theme and CSS customization
|
||||
==================================
|
||||
|
||||
@@ -185,7 +178,7 @@ Sphinx has a glossary directive. From
|
||||
https://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html#glossary:
|
||||
|
||||
This directive must contain a reST definition list with terms and
|
||||
definitions. It's then possible to refer to each definition through the
|
||||
definitions. The definitions will then be referencable with the
|
||||
[https://www.sphinx-doc.org/en/master/usage/restructuredtext/roles.html#role-term
|
||||
'term' role].
|
||||
|
||||
@@ -206,7 +199,7 @@ however there are important shortcomings. For example they cannot be
|
||||
used/nested inside code-block sections.
|
||||
|
||||
A Sphinx extension was implemented to support variable substitutions
|
||||
to mimic the DocBook based documentation behavior. Variable
|
||||
to mimic the DocBook based documentation behavior. Variabes
|
||||
substitutions are done while reading/parsing the .rst files. The
|
||||
pattern for variables substitutions is the same as with DocBook,
|
||||
e.g. `&VAR;`.
|
||||
@@ -222,13 +215,13 @@ For example, the following .rst content will produce the 'expected'
|
||||
content:
|
||||
|
||||
.. code-block::
|
||||
$ mkdir poky-&DISTRO;
|
||||
$ mkdir ~/poky-&DISTRO;
|
||||
or
|
||||
$ git clone &YOCTO_GIT_URL;/git/poky -b &DISTRO_NAME_NO_CAP;
|
||||
|
||||
Variables can be nested, like it was the case for DocBook:
|
||||
|
||||
YOCTO_HOME_URL : "https://www.yoctoproject.org"
|
||||
YOCTO_HOME_URL : "http://www.yoctoproject.org"
|
||||
YOCTO_DOCS_URL : "&YOCTO_HOME_URL;/docs"
|
||||
|
||||
Note directive
|
||||
@@ -237,14 +230,14 @@ Note directive
|
||||
Sphinx has a builtin 'note' directive that produces clean Note section
|
||||
in the output file. There are various types of directives such as
|
||||
"attention", "caution", "danger", "error", "hint", "important", "tip",
|
||||
"warning", "admonition" that are supported, and additional directives
|
||||
"warning", "admonition" that are supported, and additional directive
|
||||
can be added as Sphinx extension if needed.
|
||||
|
||||
Figures
|
||||
=======
|
||||
|
||||
The Yocto Project documentation has many figures/images. Sphinx has a
|
||||
'figure' directive which is straightforward to use. To include a
|
||||
'figure' directive which is straight forward to use. To include a
|
||||
figure in the body of the documentation:
|
||||
|
||||
.. image:: figures/YP-flow-diagram.png
|
||||
@@ -259,13 +252,10 @@ websites.
|
||||
More information can be found here:
|
||||
https://sublime-and-sphinx-guide.readthedocs.io/en/latest/references.html.
|
||||
|
||||
Anchor (<#link>) links are forbidden as they are not checked by Sphinx during
|
||||
the build and may be broken without knowing about it.
|
||||
|
||||
References
|
||||
==========
|
||||
|
||||
The following extension is enabled by default:
|
||||
The following extension is enabed by default:
|
||||
sphinx.ext.autosectionlabel
|
||||
(https://www.sphinx-doc.org/en/master/usage/extensions/autosectionlabel.html).
|
||||
|
||||
@@ -274,7 +264,7 @@ autosectionlabel_prefix_document is enabled by default, so that we can
|
||||
insert references from any document.
|
||||
|
||||
For example, to insert an HTML link to a section from
|
||||
documentation/manual/intro.rst, use:
|
||||
documentaion/manual/intro.rst, use:
|
||||
|
||||
Please check this :ref:`manual/intro:Cross-References to Locations in the Same Document`
|
||||
|
||||
@@ -297,8 +287,7 @@ Extlinks
|
||||
|
||||
The sphinx.ext.extlinks extension is enabled by default
|
||||
(https://sublime-and-sphinx-guide.readthedocs.io/en/latest/references.html#use-the-external-links-extension),
|
||||
and it is configured with the 'extlinks' definitions in
|
||||
the 'documentation/conf.py' file:
|
||||
and it is configured with:
|
||||
|
||||
'yocto_home': ('https://yoctoproject.org%s', None),
|
||||
'yocto_wiki': ('https://wiki.yoctoproject.org%s', None),
|
||||
@@ -310,10 +299,6 @@ the 'documentation/conf.py' file:
|
||||
'yocto_git': ('https://git.yoctoproject.org%s', None),
|
||||
'oe_home': ('https://www.openembedded.org%s', None),
|
||||
'oe_lists': ('https://lists.openembedded.org%s', None),
|
||||
'oe_git': ('https://git.openembedded.org%s', None),
|
||||
'oe_wiki': ('https://www.openembedded.org/wiki%s', None),
|
||||
'oe_layerindex': ('https://layers.openembedded.org%s', None),
|
||||
'oe_layer': ('https://layers.openembedded.org/layerindex/branch/master/layer%s', None),
|
||||
|
||||
It creates convenient shortcuts which can be used throughout the
|
||||
documentation rst files, as:
|
||||
@@ -333,9 +318,3 @@ References to the bitbake manual can be done like this:
|
||||
See the ":ref:`-D <bitbake:bitbake-user-manual/bitbake-user-manual-intro:usage and syntax>`" option
|
||||
or
|
||||
:term:`bitbake:BB_NUMBER_PARSE_THREADS`
|
||||
|
||||
Submitting documentation changes
|
||||
================================
|
||||
|
||||
Please see the top level README file in this repository for details of where
|
||||
to send patches.
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
|
||||
|
||||
=====================
|
||||
BitBake Documentation
|
||||
=====================
|
||||
|
||||
|
|
||||
|
||||
BitBake was originally a part of the OpenEmbedded project. It was inspired by
|
||||
the Portage package management system used by the Gentoo Linux distribution. In
|
||||
2004, the OpenEmbedded project was split the project into two distinct pieces:
|
||||
|
||||
- BitBake, a generic task executor
|
||||
- OpenEmbedded, a metadata set utilized by BitBake
|
||||
|
||||
Today, BitBake is the primary build tool of OpenEmbedded based projects, such as
|
||||
the Yocto Project.
|
||||
|
||||
The BitBake documentation can be found :doc:`here <bitbake:index>`.
|
||||
@@ -8,11 +8,11 @@
|
||||
|
||||
Permission is granted to copy, distribute and/or modify this document under the
|
||||
terms of the `Creative Commons Attribution-Share Alike 2.0 UK: England & Wales
|
||||
<https://creativecommons.org/licenses/by-sa/2.0/uk/>`_ as published by Creative
|
||||
<http://creativecommons.org/licenses/by-sa/2.0/uk/>`_ as published by Creative
|
||||
Commons.
|
||||
|
||||
To report any inaccuracies or problems with this (or any other Yocto Project)
|
||||
manual, or to send additions or changes, please send email/patches to the Yocto
|
||||
Project documentation mailing list at ``docs@lists.yoctoproject.org`` or
|
||||
log into the Freenode ``#yocto`` channel.
|
||||
log into the freenode ``#yocto`` channel.
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ build a reference embedded OS called Poky.
|
||||
(:term:`Build Host`) is not
|
||||
a native Linux system, you can still perform these steps by using
|
||||
CROss PlatformS (CROPS) and setting up a Poky container. See the
|
||||
:ref:`dev-manual/start:setting up to use cross platforms (crops)`
|
||||
:ref:`dev-manual/dev-manual-start:setting up to use cross platforms (crops)`
|
||||
section
|
||||
in the Yocto Project Development Tasks Manual for more
|
||||
information.
|
||||
@@ -34,12 +34,12 @@ build a reference embedded OS called Poky.
|
||||
compatible but not officially supported nor validated with
|
||||
WSLv2, if you still decide to use WSL please upgrade to WSLv2.
|
||||
|
||||
See the :ref:`dev-manual/start:setting up to use windows
|
||||
See the :ref:`dev-manual/dev-manual-start:setting up to use windows
|
||||
subsystem for linux (wslv2)` section in the Yocto Project Development
|
||||
Tasks Manual for more information.
|
||||
|
||||
If you want more conceptual or background information on the Yocto
|
||||
Project, see the :doc:`/overview-manual/index`.
|
||||
Project, see the :doc:`../overview-manual/overview-manual`.
|
||||
|
||||
Compatible Linux Distribution
|
||||
=============================
|
||||
@@ -52,23 +52,23 @@ following requirements:
|
||||
- Runs a supported Linux distribution (i.e. recent releases of Fedora,
|
||||
openSUSE, CentOS, Debian, or Ubuntu). For a list of Linux
|
||||
distributions that support the Yocto Project, see the
|
||||
:ref:`ref-manual/system-requirements:supported linux distributions`
|
||||
:ref:`ref-manual/ref-system-requirements:supported linux distributions`
|
||||
section in the Yocto Project Reference Manual. For detailed
|
||||
information on preparing your build host, see the
|
||||
:ref:`dev-manual/start:preparing the build host`
|
||||
:ref:`dev-manual/dev-manual-start:preparing the build host`
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
-
|
||||
|
||||
- Git &MIN_GIT_VERSION; or greater
|
||||
- tar &MIN_TAR_VERSION; or greater
|
||||
- Python &MIN_PYTHON_VERSION; or greater.
|
||||
- gcc &MIN_GCC_VERSION; or greater.
|
||||
- Git 1.8.3.1 or greater
|
||||
- tar 1.28 or greater
|
||||
- Python 3.5.0 or greater.
|
||||
- gcc 5.0 or greater.
|
||||
|
||||
If your build host does not meet any of these three listed version
|
||||
requirements, you can take steps to prepare the system so that you
|
||||
can still use the Yocto Project. See the
|
||||
:ref:`ref-manual/system-requirements:required git, tar, python and gcc versions`
|
||||
:ref:`ref-manual/ref-system-requirements:required git, tar, python and gcc versions`
|
||||
section in the Yocto Project Reference Manual for information.
|
||||
|
||||
Build Host Packages
|
||||
@@ -85,7 +85,7 @@ distribution:
|
||||
.. note::
|
||||
|
||||
For host package requirements on all supported Linux distributions,
|
||||
see the :ref:`ref-manual/system-requirements:required packages for the build host`
|
||||
see the :ref:`ref-manual/ref-system-requirements:required packages for the build host`
|
||||
section in the Yocto Project Reference Manual.
|
||||
|
||||
Use Git to Clone Poky
|
||||
@@ -106,61 +106,46 @@ commands to clone the Poky repository.
|
||||
Resolving deltas: 100% (323116/323116), done.
|
||||
Checking connectivity... done.
|
||||
|
||||
Go to :yocto_wiki:`Releases wiki page </Releases>`, and choose a release
|
||||
codename (such as ``&DISTRO_NAME_NO_CAP;``), corresponding to either the
|
||||
latest stable release or a Long Term Support release.
|
||||
|
||||
Then move to the ``poky`` directory and take a look at existing branches:
|
||||
Move to the ``poky`` directory and take a look at the tags:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ cd poky
|
||||
$ git branch -a
|
||||
.
|
||||
.
|
||||
.
|
||||
remotes/origin/HEAD -> origin/master
|
||||
remotes/origin/dunfell
|
||||
remotes/origin/dunfell-next
|
||||
.
|
||||
.
|
||||
.
|
||||
remotes/origin/gatesgarth
|
||||
remotes/origin/gatesgarth-next
|
||||
.
|
||||
.
|
||||
.
|
||||
remotes/origin/master
|
||||
remotes/origin/master-next
|
||||
$ git fetch --tags
|
||||
$ git tag
|
||||
1.1_M1.final
|
||||
1.1_M1.rc1
|
||||
1.1_M1.rc2
|
||||
1.1_M2.final
|
||||
1.1_M2.rc1
|
||||
.
|
||||
.
|
||||
.
|
||||
yocto-2.5
|
||||
yocto-2.5.1
|
||||
yocto-2.5.2
|
||||
yocto-2.6
|
||||
yocto-2.6.1
|
||||
yocto-2.6.2
|
||||
yocto-2.7
|
||||
yocto_1.5_M5.rc8
|
||||
|
||||
|
||||
For this example, check out the ``&DISTRO_NAME_NO_CAP;`` branch based on the
|
||||
``&DISTRO_NAME;`` release:
|
||||
For this example, check out the branch based on the
|
||||
``&DISTRO_REL_TAG;`` release:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ git checkout -t origin/&DISTRO_NAME_NO_CAP; -b my-&DISTRO_NAME_NO_CAP;
|
||||
Branch 'my-&DISTRO_NAME_NO_CAP;' set up to track remote branch '&DISTRO_NAME_NO_CAP;' from 'origin'.
|
||||
Switched to a new branch 'my-&DISTRO_NAME_NO_CAP;'
|
||||
$ git checkout tags/&DISTRO_REL_TAG; -b my-&DISTRO_REL_TAG;
|
||||
Switched to a new branch 'my-&DISTRO_REL_TAG;'
|
||||
|
||||
The previous Git checkout command creates a local branch named
|
||||
``my-&DISTRO_NAME_NO_CAP;``. The files available to you in that branch
|
||||
exactly match the repository's files in the ``&DISTRO_NAME_NO_CAP;``
|
||||
release branch.
|
||||
|
||||
Note that you can regularly type the following command in the same directory
|
||||
to keep your local files in sync with the release branch:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ git pull
|
||||
``my-&DISTRO_REL_TAG;``. The files available to you in that branch exactly
|
||||
match the repository's files in the ``&DISTRO_NAME_NO_CAP;`` development
|
||||
branch at the time of the Yocto Project &DISTRO_REL_TAG; release.
|
||||
|
||||
For more options and information about accessing Yocto Project related
|
||||
repositories, see the
|
||||
:ref:`dev-manual/start:locating yocto project source files`
|
||||
:ref:`dev-manual/dev-manual-start:locating yocto project source files`
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
Building Your Image
|
||||
@@ -180,18 +165,18 @@ an entire Linux distribution, including the toolchain, from source.
|
||||
infrastructure resources and get that information. A good starting
|
||||
point could also be to check your web browser settings. Finally,
|
||||
you can find more information on the
|
||||
":yocto_wiki:`Working Behind a Network Proxy </Working_Behind_a_Network_Proxy>`"
|
||||
":yocto_wiki:`Working Behind a Network Proxy </wiki/Working_Behind_a_Network_Proxy>`"
|
||||
page of the Yocto Project Wiki.
|
||||
|
||||
#. **Initialize the Build Environment:** From within the ``poky``
|
||||
directory, run the :ref:`ref-manual/structure:\`\`oe-init-build-env\`\``
|
||||
directory, run the :ref:`ref-manual/ref-structure:\`\`oe-init-build-env\`\``
|
||||
environment
|
||||
setup script to define Yocto Project's build environment on your
|
||||
build host.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ cd poky
|
||||
$ cd ~/poky
|
||||
$ source oe-init-build-env
|
||||
You had no conf/local.conf file. This configuration file has therefore been
|
||||
created for you with some default values. You may wish to edit it to, for
|
||||
@@ -204,7 +189,7 @@ an entire Linux distribution, including the toolchain, from source.
|
||||
|
||||
The Yocto Project has extensive documentation about OE including a reference
|
||||
manual which can be found at:
|
||||
https://docs.yoctoproject.org
|
||||
http://yoctoproject.org/documentation
|
||||
|
||||
For more information about OpenEmbedded see their website:
|
||||
http://www.openembedded.org/
|
||||
@@ -219,7 +204,7 @@ an entire Linux distribution, including the toolchain, from source.
|
||||
meta-toolchain
|
||||
meta-ide-support
|
||||
|
||||
You can also run generated QEMU images with a command like 'runqemu qemux86-64'
|
||||
You can also run generated qemu images with a command like 'runqemu qemux86-64'
|
||||
|
||||
Among other things, the script creates the :term:`Build Directory`, which is
|
||||
``build`` in this case and is located in the :term:`Source Directory`. After
|
||||
@@ -259,9 +244,9 @@ an entire Linux distribution, including the toolchain, from source.
|
||||
$ bitbake core-image-sato
|
||||
|
||||
For information on using the ``bitbake`` command, see the
|
||||
:ref:`overview-manual/concepts:bitbake` section in the Yocto Project Overview and
|
||||
:ref:`usingpoky-components-bitbake` section in the Yocto Project Overview and
|
||||
Concepts Manual, or see the ":ref:`BitBake Command
|
||||
<bitbake:bitbake-user-manual/bitbake-user-manual-intro:the bitbake command>`" section in the BitBake User Manual.
|
||||
<bitbake:bitbake-user-manual-command>`" section in the BitBake User Manual.
|
||||
|
||||
#. **Simulate Your Image Using QEMU:** Once this particular image is
|
||||
built, you can start QEMU, which is a Quick EMUlator that ships with
|
||||
@@ -272,7 +257,7 @@ an entire Linux distribution, including the toolchain, from source.
|
||||
$ runqemu qemux86-64
|
||||
|
||||
If you want to learn more about running QEMU, see the
|
||||
:ref:`dev-manual/qemu:using the quick emulator (qemu)` chapter in
|
||||
:ref:`dev-manual/dev-manual-qemu:using the quick emulator (qemu)` chapter in
|
||||
the Yocto Project Development Tasks Manual.
|
||||
|
||||
#. **Exit QEMU:** Exit QEMU by either clicking on the shutdown icon or by typing
|
||||
@@ -308,7 +293,7 @@ Follow these steps to add a hardware layer:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ cd poky
|
||||
$ cd ~/poky
|
||||
$ git clone https://github.com/kraj/meta-altera.git
|
||||
Cloning into 'meta-altera'...
|
||||
remote: Counting objects: 25170, done.
|
||||
@@ -352,7 +337,7 @@ Follow these steps to add a hardware layer:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ cd poky/build
|
||||
$ cd ~/poky/build
|
||||
$ bitbake-layers add-layer ../meta-altera
|
||||
NOTE: Starting bitbake server...
|
||||
Parsing recipes: 100% |##################################################################| Time: 0:00:32
|
||||
@@ -361,7 +346,7 @@ Follow these steps to add a hardware layer:
|
||||
|
||||
You can find
|
||||
more information on adding layers in the
|
||||
:ref:`dev-manual/common-tasks:adding a layer using the \`\`bitbake-layers\`\` script`
|
||||
:ref:`dev-manual/dev-manual-common-tasks:adding a layer using the \`\`bitbake-layers\`\` script`
|
||||
section.
|
||||
|
||||
Completing these steps has added the ``meta-altera`` layer to your Yocto
|
||||
@@ -389,14 +374,14 @@ The following commands run the tool to create a layer named
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ cd poky
|
||||
$ cd ~/poky
|
||||
$ bitbake-layers create-layer meta-mylayer
|
||||
NOTE: Starting bitbake server...
|
||||
Add your new layer with 'bitbake-layers add-layer meta-mylayer'
|
||||
|
||||
For more information
|
||||
on layers and how to create them, see the
|
||||
:ref:`dev-manual/common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`
|
||||
:ref:`dev-manual/dev-manual-common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
Where To Go Next
|
||||
@@ -412,14 +397,14 @@ information including the website, wiki pages, and user manuals:
|
||||
Development Community into which you can tap.
|
||||
|
||||
- **Developer Screencast:** The `Getting Started with the Yocto Project -
|
||||
New Developer Screencast Tutorial <https://vimeo.com/36450321>`__
|
||||
New Developer Screencast Tutorial <http://vimeo.com/36450321>`__
|
||||
provides a 30-minute video created for users unfamiliar with the
|
||||
Yocto Project but familiar with Linux build hosts. While this
|
||||
screencast is somewhat dated, the introductory and fundamental
|
||||
concepts are useful for the beginner.
|
||||
|
||||
- **Yocto Project Overview and Concepts Manual:** The
|
||||
:doc:`/overview-manual/index` is a great
|
||||
:doc:`../overview-manual/overview-manual` is a great
|
||||
place to start to learn about the Yocto Project. This manual
|
||||
introduces you to the Yocto Project and its development environment.
|
||||
The manual also provides conceptual information for various aspects
|
||||
@@ -44,7 +44,7 @@ machine or platform name, which is "bsp_root_name" in the above form.
|
||||
To help understand the BSP layer concept, consider the BSPs that the
|
||||
Yocto Project supports and provides with each release. You can see the
|
||||
layers in the
|
||||
:ref:`overview-manual/development-environment:yocto project source repositories`
|
||||
:ref:`overview-manual/overview-manual-development-environment:yocto project source repositories`
|
||||
through
|
||||
a web interface at :yocto_git:`/`. If you go to that interface,
|
||||
you will find a list of repositories under "Yocto Metadata Layers".
|
||||
@@ -72,7 +72,7 @@ For information on typical BSP development workflow, see the
|
||||
section. For more
|
||||
information on how to set up a local copy of source files from a Git
|
||||
repository, see the
|
||||
:ref:`dev-manual/start:locating yocto project source files`
|
||||
:ref:`dev-manual/dev-manual-start:locating yocto project source files`
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
The BSP layer's base directory (``meta-bsp_root_name``) is the root
|
||||
@@ -81,7 +81,7 @@ directory of that Layer. This directory is what you add to the
|
||||
``conf/bblayers.conf`` file found in your
|
||||
:term:`Build Directory`, which is
|
||||
established after you run the OpenEmbedded build environment setup
|
||||
script (i.e. :ref:`ref-manual/structure:\`\`oe-init-build-env\`\``).
|
||||
script (i.e. :ref:`ref-manual/ref-structure:\`\`oe-init-build-env\`\``).
|
||||
Adding the root directory allows the :term:`OpenEmbedded Build System`
|
||||
to recognize the BSP
|
||||
layer and from it build an image. Here is an example: ::
|
||||
@@ -128,7 +128,7 @@ you want to work with, such as: ::
|
||||
and so on.
|
||||
|
||||
For more information on layers, see the
|
||||
":ref:`dev-manual/common-tasks:understanding and creating layers`"
|
||||
":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`"
|
||||
section of the Yocto Project Development Tasks Manual.
|
||||
|
||||
Preparing Your Build Host to Work With BSP Layers
|
||||
@@ -146,7 +146,7 @@ section.
|
||||
:ref:`bsp-guide/bsp:example filesystem layout` section.
|
||||
|
||||
#. *Set Up the Build Environment:* Be sure you are set up to use BitBake
|
||||
in a shell. See the ":ref:`dev-manual/start:preparing the build host`"
|
||||
in a shell. See the ":ref:`dev-manual/dev-manual-start:preparing the build host`"
|
||||
section in the Yocto Project Development Tasks Manual for information on how
|
||||
to get a build host ready that is either a native Linux machine or a machine
|
||||
that uses CROPS.
|
||||
@@ -154,10 +154,10 @@ section.
|
||||
#. *Clone the poky Repository:* You need to have a local copy of the
|
||||
Yocto Project :term:`Source Directory` (i.e. a local
|
||||
``poky`` repository). See the
|
||||
":ref:`dev-manual/start:cloning the \`\`poky\`\` repository`" and
|
||||
":ref:`dev-manual/dev-manual-start:cloning the \`\`poky\`\` repository`" and
|
||||
possibly the
|
||||
":ref:`dev-manual/start:checking out by branch in poky`" or
|
||||
":ref:`dev-manual/start:checking out by tag in poky`"
|
||||
":ref:`dev-manual/dev-manual-start:checking out by branch in poky`" or
|
||||
":ref:`dev-manual/dev-manual-start:checking out by tag in poky`"
|
||||
sections
|
||||
all in the Yocto Project Development Tasks Manual for information on
|
||||
how to clone the ``poky`` repository and check out the appropriate
|
||||
@@ -172,7 +172,8 @@ section.
|
||||
#. *Optionally Clone the meta-intel BSP Layer:* If your hardware is
|
||||
based on current Intel CPUs and devices, you can leverage this BSP
|
||||
layer. For details on the ``meta-intel`` BSP layer, see the layer's
|
||||
:yocto_git:`README </meta-intel/tree/README>` file.
|
||||
`README <http://git.yoctoproject.org/cgit/cgit.cgi/meta-intel/tree/README>`__
|
||||
file.
|
||||
|
||||
#. *Navigate to Your Source Directory:* Typically, you set up the
|
||||
``meta-intel`` Git repository inside the :term:`Source Directory` (e.g.
|
||||
@@ -205,7 +206,7 @@ section.
|
||||
|
||||
To see the available branch names in a cloned repository, use the ``git
|
||||
branch -al`` command. See the
|
||||
":ref:`dev-manual/start:checking out by branch in poky`"
|
||||
":ref:`dev-manual/dev-manual-start:checking out by branch in poky`"
|
||||
section in the Yocto Project Development Tasks Manual for more
|
||||
information.
|
||||
|
||||
@@ -229,7 +230,7 @@ section.
|
||||
|
||||
#. *Initialize the Build Environment:* While in the root directory of
|
||||
the Source Directory (i.e. ``poky``), run the
|
||||
:ref:`ref-manual/structure:\`\`oe-init-build-env\`\`` environment
|
||||
:ref:`ref-manual/ref-structure:\`\`oe-init-build-env\`\`` environment
|
||||
setup script to define the OpenEmbedded build environment on your
|
||||
build host. ::
|
||||
|
||||
@@ -240,6 +241,8 @@ section.
|
||||
the script runs, your current working directory is set to the ``build``
|
||||
directory.
|
||||
|
||||
.. _bsp-filelayout:
|
||||
|
||||
Example Filesystem Layout
|
||||
=========================
|
||||
|
||||
@@ -250,10 +253,10 @@ standardization of software support for hardware.
|
||||
The proposed form described in this section does have elements that are
|
||||
specific to the OpenEmbedded build system. It is intended that
|
||||
developers can use this structure with other build systems besides the
|
||||
OpenEmbedded build system. It is also intended that it will be simple
|
||||
OpenEmbedded build system. It is also intended that it will be be simple
|
||||
to extract information and convert it to other formats if required. The
|
||||
OpenEmbedded build system, through its standard :ref:`layers mechanism
|
||||
<overview-manual/yp-intro:the yocto project layer model>`, can
|
||||
<overview-manual/overview-manual-yp-intro:the yocto project layer model>`, can
|
||||
directly accept the format described as a layer. The BSP layer captures
|
||||
all the hardware-specific details in one place using a standard format,
|
||||
which is useful for any person wishing to use the hardware platform
|
||||
@@ -289,7 +292,7 @@ individual BSPs could differ. ::
|
||||
meta-bsp_root_name/recipes-kernel/linux/linux-yocto_kernel_rev.bbappend
|
||||
|
||||
Below is an example of the Raspberry Pi BSP layer that is available from
|
||||
the :yocto_git:`Source Repositories <>`:
|
||||
the :yocto_git:`Source Respositories <>`:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
@@ -448,6 +451,8 @@ the :yocto_git:`Source Repositories <>`:
|
||||
|
||||
The following sections describe each part of the proposed BSP format.
|
||||
|
||||
.. _bsp-filelayout-license:
|
||||
|
||||
License Files
|
||||
-------------
|
||||
|
||||
@@ -463,9 +468,11 @@ requirements are handled with the ``COPYING.MIT`` file.
|
||||
Licensing files can be MIT, BSD, GPLv*, and so forth. These files are
|
||||
recommended for the BSP but are optional and totally up to the BSP
|
||||
developer. For information on how to maintain license compliance, see
|
||||
the ":ref:`dev-manual/common-tasks:maintaining open source license compliance during your product's lifecycle`"
|
||||
the ":ref:`dev-manual/dev-manual-common-tasks:maintaining open source license compliance during your product's lifecycle`"
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
.. _bsp-filelayout-readme:
|
||||
|
||||
README File
|
||||
-----------
|
||||
|
||||
@@ -481,6 +488,8 @@ At a minimum, the ``README`` file must contain a list of dependencies,
|
||||
such as the names of any other layers on which the BSP depends and the
|
||||
name of the BSP maintainer with his or her contact information.
|
||||
|
||||
.. _bsp-filelayout-readme-sources:
|
||||
|
||||
README.sources File
|
||||
-------------------
|
||||
|
||||
@@ -500,6 +509,8 @@ used to generate the images that ship with the BSP.
|
||||
If the BSP's ``binary`` directory is missing or the directory has no images, an
|
||||
existing ``README.sources`` file is meaningless and usually does not exist.
|
||||
|
||||
.. _bsp-filelayout-binary:
|
||||
|
||||
Pre-built User Binaries
|
||||
-----------------------
|
||||
|
||||
@@ -523,6 +534,8 @@ hardware. Additionally, the
|
||||
present to locate the sources used to build the images and provide
|
||||
information on the Metadata.
|
||||
|
||||
.. _bsp-filelayout-layer:
|
||||
|
||||
Layer Configuration File
|
||||
------------------------
|
||||
|
||||
@@ -573,6 +586,8 @@ This file simply makes :term:`BitBake` aware of the recipes and configuration
|
||||
directories. The file must exist so that the OpenEmbedded build system can
|
||||
recognize the BSP.
|
||||
|
||||
.. _bsp-filelayout-machine:
|
||||
|
||||
Hardware Configuration Options
|
||||
------------------------------
|
||||
|
||||
@@ -589,7 +604,7 @@ filenames correspond to the values to which users have set the
|
||||
|
||||
These files define things such as the kernel package to use
|
||||
(:term:`PREFERRED_PROVIDER` of
|
||||
:ref:`virtual/kernel <dev-manual/common-tasks:using virtual providers>`),
|
||||
:ref:`virtual/kernel <dev-manual/dev-manual-common-tasks:using virtual providers>`),
|
||||
the hardware drivers to include in different types of images, any
|
||||
special software components that are needed, any bootloader information,
|
||||
and also any special image format requirements.
|
||||
@@ -611,6 +626,8 @@ configuration file. For example, the Raspberry Pi BSP
|
||||
|
||||
include conf/machine/include/rpi-base.inc
|
||||
|
||||
.. _bsp-filelayout-misc-recipes:
|
||||
|
||||
Miscellaneous BSP-Specific Recipe Files
|
||||
---------------------------------------
|
||||
|
||||
@@ -641,6 +658,8 @@ directory. Here is the ``machconfig`` file for the Raspberry Pi BSP: ::
|
||||
``meta/recipes-bsp/formfactor/formfactor_0.0.bb``, which is found in
|
||||
the :term:`Source Directory`.
|
||||
|
||||
.. _bsp-filelayout-recipes-graphics:
|
||||
|
||||
Display Support Files
|
||||
---------------------
|
||||
|
||||
@@ -652,6 +671,8 @@ This optional directory contains recipes for the BSP if it has special
|
||||
requirements for graphics support. All files that are needed for the BSP
|
||||
to support a display are kept here.
|
||||
|
||||
.. _bsp-filelayout-kernel:
|
||||
|
||||
Linux Kernel Configuration
|
||||
--------------------------
|
||||
|
||||
@@ -693,7 +714,7 @@ BSP settings to the kernel, thus configuring the kernel for your
|
||||
particular BSP.
|
||||
|
||||
You can find more information on what your append file should contain in
|
||||
the ":ref:`kernel-dev/common:creating the append file`" section
|
||||
the ":ref:`kernel-dev/kernel-dev-common:creating the append file`" section
|
||||
in the Yocto Project Linux Kernel Development Manual.
|
||||
|
||||
An alternate scenario is when you create your own kernel recipe for the
|
||||
@@ -726,7 +747,7 @@ workflow.
|
||||
:align: center
|
||||
|
||||
#. *Set up Your Host Development System to Support Development Using the
|
||||
Yocto Project*: See the ":ref:`dev-manual/start:preparing the build host`"
|
||||
Yocto Project*: See the ":ref:`dev-manual/dev-manual-start:preparing the build host`"
|
||||
section in the Yocto Project Development Tasks Manual for options on how to
|
||||
get a system ready to use the Yocto Project.
|
||||
|
||||
@@ -754,9 +775,9 @@ workflow.
|
||||
are kept. The key point for a layer is that it is an isolated area
|
||||
that contains all the relevant information for the project that the
|
||||
OpenEmbedded build system knows about. For more information on
|
||||
layers, see the ":ref:`overview-manual/yp-intro:the yocto project layer model`"
|
||||
layers, see the ":ref:`overview-manual/overview-manual-yp-intro:the yocto project layer model`"
|
||||
section in the Yocto Project Overview and Concepts Manual. You can also
|
||||
reference the ":ref:`dev-manual/common-tasks:understanding and creating layers`"
|
||||
reference the ":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`"
|
||||
section in the Yocto Project Development Tasks Manual. For more
|
||||
information on BSP layers, see the ":ref:`bsp-guide/bsp:bsp layers`"
|
||||
section.
|
||||
@@ -815,7 +836,7 @@ workflow.
|
||||
key configuration files are configured appropriately: the
|
||||
``conf/local.conf`` and the ``conf/bblayers.conf`` file. You must
|
||||
make the OpenEmbedded build system aware of your new layer. See the
|
||||
":ref:`dev-manual/common-tasks:enabling your layer`"
|
||||
":ref:`dev-manual/dev-manual-common-tasks:enabling your layer`"
|
||||
section in the Yocto Project Development Tasks Manual for information
|
||||
on how to let the build system know about your new layer.
|
||||
|
||||
@@ -826,7 +847,7 @@ workflow.
|
||||
|
||||
The build process supports several types of images to satisfy
|
||||
different needs. See the
|
||||
":ref:`ref-manual/images:Images`" chapter in the Yocto
|
||||
":ref:`ref-manual/ref-images:Images`" chapter in the Yocto
|
||||
Project Reference Manual for information on supported images.
|
||||
|
||||
Requirements and Recommendations for Released BSPs
|
||||
@@ -846,14 +867,14 @@ Before looking at BSP requirements, you should consider the following:
|
||||
layer that can be added to the Yocto Project. For guidelines on
|
||||
creating a layer that meets these base requirements, see the
|
||||
":ref:`bsp-guide/bsp:bsp layers`" section in this manual and the
|
||||
":ref:`dev-manual/common-tasks:understanding and creating layers`"
|
||||
":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`"
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
- The requirements in this section apply regardless of how you package
|
||||
a BSP. You should consult the packaging and distribution guidelines
|
||||
for your specific release process. For an example of packaging and
|
||||
distribution requirements, see the ":yocto_wiki:`Third Party BSP Release
|
||||
Process </Third_Party_BSP_Release_Process>`"
|
||||
Process </wiki/Third_Party_BSP_Release_Process>`"
|
||||
wiki page.
|
||||
|
||||
- The requirements for the BSP as it is made available to a developer
|
||||
@@ -894,20 +915,20 @@ Yocto Project:
|
||||
``recipes-*`` subdirectories specific to the recipe's function, or
|
||||
within a subdirectory containing a set of closely-related recipes.
|
||||
The recipes themselves should follow the general guidelines for
|
||||
recipes used in the Yocto Project found in the ":oe_wiki:`OpenEmbedded
|
||||
Style Guide </Styleguide>`".
|
||||
recipes used in the Yocto Project found in the "`OpenEmbedded Style
|
||||
Guide <http://openembedded.org/wiki/Styleguide>`__".
|
||||
|
||||
- *License File:* You must include a license file in the
|
||||
``meta-bsp_root_name`` directory. This license covers the BSP
|
||||
Metadata as a whole. You must specify which license to use since no
|
||||
default license exists when one is not specified. See the
|
||||
:yocto_git:`COPYING.MIT </meta-raspberrypi/tree/COPYING.MIT>`
|
||||
:yocto_git:`COPYING.MIT </cgit.cgi/meta-raspberrypi/tree/COPYING.MIT>`
|
||||
file for the Raspberry Pi BSP in the ``meta-raspberrypi`` BSP layer
|
||||
as an example.
|
||||
|
||||
- *README File:* You must include a ``README`` file in the
|
||||
``meta-bsp_root_name`` directory. See the
|
||||
:yocto_git:`README.md </meta-raspberrypi/tree/README.md>`
|
||||
:yocto_git:`README.md </cgit.cgi/meta-raspberrypi/tree/README.md>`
|
||||
file for the Raspberry Pi BSP in the ``meta-raspberrypi`` BSP layer
|
||||
as an example.
|
||||
|
||||
@@ -928,7 +949,7 @@ Yocto Project:
|
||||
- The name and contact information for the BSP layer maintainer.
|
||||
This is the person to whom patches and questions should be sent.
|
||||
For information on how to find the right person, see the
|
||||
":ref:`dev-manual/common-tasks:submitting a change to the yocto project`"
|
||||
":ref:`dev-manual/dev-manual-common-tasks:submitting a change to the yocto project`"
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
- Instructions on how to build the BSP using the BSP layer.
|
||||
@@ -1013,7 +1034,7 @@ If you plan on customizing a recipe for a particular BSP, you need to do
|
||||
the following:
|
||||
|
||||
- Create a ``*.bbappend`` file for the modified recipe. For information on using
|
||||
append files, see the ":ref:`dev-manual/common-tasks:using
|
||||
append files, see the ":ref:`dev-manual/dev-manual-common-tasks:using
|
||||
.bbappend files in your layer`" section in the Yocto Project Development
|
||||
Tasks Manual.
|
||||
|
||||
@@ -1036,7 +1057,7 @@ the following:
|
||||
to reside in a machine-specific directory.
|
||||
|
||||
Following is a specific example to help you better understand the
|
||||
process. This example customizes a recipe by adding a
|
||||
process. This example customizes customizes a recipe by adding a
|
||||
BSP-specific configuration file named ``interfaces`` to the
|
||||
``init-ifupdown_1.0.bb`` recipe for machine "xyz" where the BSP layer
|
||||
also supports several other machines:
|
||||
@@ -1118,7 +1139,7 @@ list describes them in order of preference:
|
||||
Specifying the matching license string signifies that you agree to
|
||||
the license. Thus, the build system can build the corresponding
|
||||
recipe and include the component in the image. See the
|
||||
":ref:`dev-manual/common-tasks:enabling commercially licensed recipes`"
|
||||
":ref:`dev-manual/dev-manual-common-tasks:enabling commercially licensed recipes`"
|
||||
section in the Yocto Project Development Tasks Manual for details on
|
||||
how to use these variables.
|
||||
|
||||
@@ -1170,7 +1191,7 @@ Use these steps to create a BSP layer:
|
||||
``create-layer`` subcommand to create a new general layer. For
|
||||
instructions on how to create a general layer using the
|
||||
``bitbake-layers`` script, see the
|
||||
":ref:`dev-manual/common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`"
|
||||
":ref:`dev-manual/dev-manual-common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`"
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
- *Create a Layer Configuration File:* Every layer needs a layer
|
||||
@@ -1180,16 +1201,16 @@ Use these steps to create a BSP layer:
|
||||
:yocto_git:`Source Repositories <>`. To get examples of what you need
|
||||
in your configuration file, locate a layer (e.g. "meta-ti") and
|
||||
examine the
|
||||
:yocto_git:`local.conf </meta-ti/tree/conf/layer.conf>`
|
||||
:yocto_git:`local.conf </cgit/cgit.cgi/meta-ti/tree/conf/layer.conf>`
|
||||
file.
|
||||
|
||||
- *Create a Machine Configuration File:* Create a
|
||||
``conf/machine/bsp_root_name.conf`` file. See
|
||||
:yocto_git:`meta-yocto-bsp/conf/machine </poky/tree/meta-yocto-bsp/conf/machine>`
|
||||
:yocto_git:`meta-yocto-bsp/conf/machine </cgit/cgit.cgi/poky/tree/meta-yocto-bsp/conf/machine>`
|
||||
for sample ``bsp_root_name.conf`` files. Other samples such as
|
||||
:yocto_git:`meta-ti </meta-ti/tree/conf/machine>`
|
||||
:yocto_git:`meta-ti </cgit/cgit.cgi/meta-ti/tree/conf/machine>`
|
||||
and
|
||||
:yocto_git:`meta-freescale </meta-freescale/tree/conf/machine>`
|
||||
:yocto_git:`meta-freescale </cgit/cgit.cgi/meta-freescale/tree/conf/machine>`
|
||||
exist from other vendors that have more specific machine and tuning
|
||||
examples.
|
||||
|
||||
@@ -1197,13 +1218,13 @@ Use these steps to create a BSP layer:
|
||||
``recipes-kernel/linux`` by either using a kernel append file or a
|
||||
new custom kernel recipe file (e.g. ``yocto-linux_4.12.bb``). The BSP
|
||||
layers mentioned in the previous step also contain different kernel
|
||||
examples. See the ":ref:`kernel-dev/common:modifying an existing recipe`"
|
||||
examples. See the ":ref:`kernel-dev/kernel-dev-common:modifying an existing recipe`"
|
||||
section in the Yocto Project Linux Kernel Development Manual for
|
||||
information on how to create a custom kernel.
|
||||
|
||||
The remainder of this section provides a description of the Yocto
|
||||
Project reference BSP for Beaglebone, which resides in the
|
||||
:yocto_git:`meta-yocto-bsp </poky/tree/meta-yocto-bsp>`
|
||||
:yocto_git:`meta-yocto-bsp </cgit/cgit.cgi/poky/tree/meta-yocto-bsp>`
|
||||
layer.
|
||||
|
||||
BSP Layer Configuration Example
|
||||
@@ -1230,7 +1251,7 @@ configuration files is to examine various files for BSP from the
|
||||
:yocto_git:`Source Repositories <>`.
|
||||
|
||||
For a detailed description of this particular layer configuration file,
|
||||
see ":ref:`step 3 <dev-manual/common-tasks:creating your own layer>`"
|
||||
see ":ref:`step 3 <dev-manual/dev-manual-common-tasks:creating your own layer>`"
|
||||
in the discussion that describes how to create layers in the Yocto
|
||||
Project Development Tasks Manual.
|
||||
|
||||
@@ -1305,7 +1326,7 @@ the example reference machine configuration file for the BeagleBone
|
||||
development boards. Realize that much more can be defined as part of a
|
||||
machine's configuration file. In general, you can learn about related
|
||||
variables that this example does not have by locating the variables in
|
||||
the ":ref:`ref-manual/variables:variables glossary`" in the Yocto
|
||||
the ":ref:`ref-manual/ref-variables:variables glossary`" in the Yocto
|
||||
Project Reference Manual.
|
||||
|
||||
- :term:`PREFERRED_PROVIDER_virtual/xserver <PREFERRED_PROVIDER>`:
|
||||
@@ -1360,7 +1381,7 @@ Project Reference Manual.
|
||||
`JFFS2 <https://en.wikipedia.org/wiki/JFFS2>`__ image.
|
||||
|
||||
- :term:`WKS_FILE`: The location of
|
||||
the :ref:`Wic kickstart <ref-manual/kickstart:openembedded kickstart (\`\`.wks\`\`) reference>` file used
|
||||
the :ref:`Wic kickstart <ref-manual/ref-kickstart:openembedded kickstart (\`\`.wks\`\`) reference>` file used
|
||||
by the OpenEmbedded build system to create a partitioned image
|
||||
(image.wic).
|
||||
|
||||
@@ -1412,7 +1433,7 @@ Project Reference Manual.
|
||||
.. note::
|
||||
|
||||
For more information on how the SPL variables are used, see the
|
||||
:yocto_git:`u-boot.inc </poky/tree/meta/recipes-bsp/u-boot/u-boot.inc>`
|
||||
:yocto_git:`u-boot.inc </cgit/cgit.cgi/poky/tree/meta/recipes-bsp/u-boot/u-boot.inc>`
|
||||
include file.
|
||||
|
||||
- :term:`UBOOT_* <UBOOT_ENTRYPOINT>`: Defines
|
||||
@@ -1456,7 +1477,7 @@ The ``meta-yocto-bsp/recipes-kernel/linux`` directory in the layer contains
|
||||
metadata used to build the kernel. In this case, a kernel append file
|
||||
(i.e. ``linux-yocto_5.0.bbappend``) is used to override an established
|
||||
kernel recipe (i.e. ``linux-yocto_5.0.bb``), which is located in
|
||||
:yocto_git:`/poky/tree/meta/recipes-kernel/linux`.
|
||||
:yocto_git:`/cgit/cgit.cgi/poky/tree/meta/recipes-kernel/linux`.
|
||||
|
||||
Following is the contents of the append file: ::
|
||||
|
||||
|
||||
@@ -16,8 +16,7 @@ import os
|
||||
import sys
|
||||
import datetime
|
||||
|
||||
current_version = "3.3.4"
|
||||
bitbake_version = "1.50"
|
||||
current_version = "3.2.1"
|
||||
|
||||
# String used in sidebar
|
||||
version = 'Version: ' + current_version
|
||||
@@ -34,9 +33,6 @@ author = 'The Linux Foundation'
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
|
||||
# Prevent building with an outdated version of sphinx
|
||||
needs_sphinx = "3.1"
|
||||
|
||||
# to load local extension from the folder 'sphinx'
|
||||
sys.path.insert(0, os.path.abspath('sphinx'))
|
||||
|
||||
@@ -72,25 +68,21 @@ rst_prolog = """
|
||||
|
||||
# external links and substitutions
|
||||
extlinks = {
|
||||
'yocto_home': ('https://www.yoctoproject.org%s', None),
|
||||
'yocto_wiki': ('https://wiki.yoctoproject.org/wiki%s', None),
|
||||
'yocto_home': ('https://yoctoproject.org%s', None),
|
||||
'yocto_wiki': ('https://wiki.yoctoproject.org%s', None),
|
||||
'yocto_dl': ('https://downloads.yoctoproject.org%s', None),
|
||||
'yocto_lists': ('https://lists.yoctoproject.org%s', None),
|
||||
'yocto_bugs': ('https://bugzilla.yoctoproject.org%s', None),
|
||||
'yocto_ab': ('https://autobuilder.yoctoproject.org%s', None),
|
||||
'yocto_docs': ('https://docs.yoctoproject.org%s', None),
|
||||
'yocto_git': ('https://git.yoctoproject.org/cgit/cgit.cgi%s', None),
|
||||
'yocto_git': ('https://git.yoctoproject.org%s', None),
|
||||
'oe_home': ('https://www.openembedded.org%s', None),
|
||||
'oe_lists': ('https://lists.openembedded.org%s', None),
|
||||
'oe_git': ('https://git.openembedded.org%s', None),
|
||||
'oe_wiki': ('https://www.openembedded.org/wiki%s', None),
|
||||
'oe_layerindex': ('https://layers.openembedded.org%s', None),
|
||||
'oe_layer': ('https://layers.openembedded.org/layerindex/branch/master/layer%s', None),
|
||||
}
|
||||
|
||||
# Intersphinx config to use cross reference with Bitbake user manual
|
||||
intersphinx_mapping = {
|
||||
'bitbake': ('https://docs.yoctoproject.org/bitbake/' + bitbake_version, None)
|
||||
'bitbake': ('https://docs.yoctoproject.org/bitbake/1.48', None)
|
||||
}
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
@@ -132,8 +124,3 @@ html_last_updated_fmt = '%b %d, %Y'
|
||||
|
||||
# Remove the trailing 'dot' in section numbers
|
||||
html_secnumber_suffix = " "
|
||||
|
||||
latex_elements = {
|
||||
'passoptionstopackages': '\PassOptionsToPackage{bookmarksdepth=5}{hyperref}',
|
||||
'preamble': '\setcounter{tocdepth}{2}',
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -4,6 +4,8 @@
|
||||
The Yocto Project Development Tasks Manual
|
||||
******************************************
|
||||
|
||||
.. _dev-welcome:
|
||||
|
||||
Welcome
|
||||
=======
|
||||
|
||||
@@ -31,13 +33,13 @@ This manual provides the following:
|
||||
This manual does not provide the following:
|
||||
|
||||
- Redundant Step-by-step Instructions: For example, the
|
||||
:doc:`/sdk-manual/index` manual contains detailed
|
||||
:doc:`../sdk-manual/sdk-manual` manual contains detailed
|
||||
instructions on how to install an SDK, which is used to develop
|
||||
applications for target hardware.
|
||||
|
||||
- Reference or Conceptual Material: This type of material resides in an
|
||||
appropriate reference manual. For example, system variables are
|
||||
documented in the :doc:`/ref-manual/index`.
|
||||
documented in the :doc:`../ref-manual/ref-manual`.
|
||||
|
||||
- Detailed Public Information Not Specific to the Yocto Project: For
|
||||
example, exhaustive information on how to use the Source Control
|
||||
@@ -52,7 +54,7 @@ supplemental information is recommended for full comprehension. For
|
||||
introductory information on the Yocto Project, see the
|
||||
:yocto_home:`Yocto Project Website <>`. If you want to build an image with no
|
||||
knowledge of Yocto Project as a way of quickly testing it out, see the
|
||||
:doc:`/brief-yoctoprojectqs/index` document.
|
||||
:doc:`../brief-yoctoprojectqs/brief-yoctoprojectqs` document.
|
||||
|
||||
For a comprehensive list of links and other documentation, see the
|
||||
":ref:`ref-manual/resources:links and related documentation`"
|
||||
@@ -10,6 +10,8 @@ This chapter provides both procedures that show you how to use the Quick
|
||||
EMUlator (QEMU) and other QEMU information helpful for development
|
||||
purposes.
|
||||
|
||||
.. _qemu-dev-overview:
|
||||
|
||||
Overview
|
||||
========
|
||||
|
||||
@@ -37,6 +39,8 @@ following references:
|
||||
- `Documentation <https://wiki.qemu.org/Manual>`__\ *:* The QEMU user
|
||||
manual.
|
||||
|
||||
.. _qemu-running-qemu:
|
||||
|
||||
Running QEMU
|
||||
============
|
||||
|
||||
@@ -46,7 +50,7 @@ available. Follow these general steps to run QEMU:
|
||||
|
||||
1. *Install QEMU:* QEMU is made available with the Yocto Project a
|
||||
number of ways. One method is to install a Software Development Kit
|
||||
(SDK). See ":ref:`sdk-manual/intro:the qemu emulator`" section in the
|
||||
(SDK). See ":ref:`sdk-manual/sdk-intro:the qemu emulator`" section in the
|
||||
Yocto Project Application Development and the Extensible Software
|
||||
Development Kit (eSDK) manual for information on how to install QEMU.
|
||||
|
||||
@@ -58,7 +62,7 @@ available. Follow these general steps to run QEMU:
|
||||
environment script (i.e. :ref:`structure-core-script`):
|
||||
::
|
||||
|
||||
$ cd poky
|
||||
$ cd ~/poky
|
||||
$ source oe-init-build-env
|
||||
|
||||
- If you installed a cross-toolchain, you can run the script that
|
||||
@@ -66,7 +70,7 @@ available. Follow these general steps to run QEMU:
|
||||
the initialization script from the default ``poky_sdk`` directory:
|
||||
::
|
||||
|
||||
. poky_sdk/environment-setup-core2-64-poky-linux
|
||||
. ~/poky_sdk/environment-setup-core2-64-poky-linux
|
||||
|
||||
3. *Ensure the Artifacts are in Place:* You need to be sure you have a
|
||||
pre-built kernel that will boot in QEMU. You also need the target
|
||||
@@ -77,11 +81,11 @@ available. Follow these general steps to run QEMU:
|
||||
your :term:`Build Directory`.
|
||||
|
||||
- If you have not built an image, you can go to the
|
||||
:yocto_dl:`machines/qemu </releases/yocto/yocto-&DISTRO;/machines/qemu/>` area and download a
|
||||
:yocto_dl:`machines/qemu </releases/yocto/yocto-3.1.2/machines/qemu/>` area and download a
|
||||
pre-built image that matches your architecture and can be run on
|
||||
QEMU.
|
||||
|
||||
See the ":ref:`sdk-manual/appendix-obtain:extracting the root filesystem`"
|
||||
See the ":ref:`sdk-manual/sdk-appendix-obtain:extracting the root filesystem`"
|
||||
section in the Yocto Project Application Development and the
|
||||
Extensible Software Development Kit (eSDK) manual for information on
|
||||
how to extract a root filesystem.
|
||||
@@ -183,6 +187,8 @@ allow input of absolute coordinates. This default means that the mouse
|
||||
can enter and leave the main window without the grab taking effect
|
||||
leading to a better user experience.
|
||||
|
||||
.. _qemu-running-under-a-network-file-system-nfs-server:
|
||||
|
||||
Running Under a Network File System (NFS) Server
|
||||
================================================
|
||||
|
||||
@@ -237,6 +243,8 @@ using an NFS server.
|
||||
|
||||
runqemu-export-rootfs restart file-system-location
|
||||
|
||||
.. _qemu-kvm-cpu-compatibility:
|
||||
|
||||
QEMU CPU Compatibility Under KVM
|
||||
================================
|
||||
|
||||
@@ -258,6 +266,8 @@ directory. This setting specifies a ``-cpu`` option passed into QEMU in
|
||||
the ``runqemu`` script. Running ``qemu -cpu help`` returns a list of
|
||||
available supported CPU types.
|
||||
|
||||
.. _qemu-dev-performance:
|
||||
|
||||
QEMU Performance
|
||||
================
|
||||
|
||||
@@ -306,10 +316,12 @@ present, the toolchain is also automatically used.
|
||||
tarball by using the ``runqemu-extract-sdk`` command. After
|
||||
running the command, you must then point the ``runqemu`` script to
|
||||
the extracted directory instead of a root filesystem image file.
|
||||
See the
|
||||
":ref:`dev-manual/qemu:running under a network file system (nfs) server`"
|
||||
See the "`Running Under a Network File System (NFS)
|
||||
Server <#qemu-running-under-a-network-file-system-nfs-server>`__"
|
||||
section for more information.
|
||||
|
||||
.. _qemu-dev-command-line-syntax:
|
||||
|
||||
QEMU Command-Line Syntax
|
||||
========================
|
||||
|
||||
@@ -365,6 +377,8 @@ Following is the command-line help output for the ``runqemu`` command:
|
||||
runqemu path/to/<image>-<machine>.wic
|
||||
runqemu path/to/<image>-<machine>.wic.vmdk
|
||||
|
||||
.. _qemu-dev-runqemu-command-line-options:
|
||||
|
||||
``runqemu`` Command-Line Options
|
||||
================================
|
||||
|
||||
@@ -452,7 +466,7 @@ command line:
|
||||
or "qemux86-64" QEMU architectures. For KVM with VHOST to work, the
|
||||
following conditions must be met:
|
||||
|
||||
- ``kvm`` option conditions defined above must be met.
|
||||
- `kvm <#kvm-cond>`__ option conditions must be met.
|
||||
|
||||
- Your build host has to have virtio net device, which are
|
||||
``/dev/vhost-net``.
|
||||
@@ -7,10 +7,12 @@ Setting Up to Use the Yocto Project
|
||||
This chapter provides guidance on how to prepare to use the Yocto
|
||||
Project. You can learn about creating a team environment to develop
|
||||
using the Yocto Project, how to set up a :ref:`build
|
||||
host <dev-manual/start:preparing the build host>`, how to locate
|
||||
host <dev-manual/dev-manual-start:preparing the build host>`, how to locate
|
||||
Yocto Project source repositories, and how to create local Git
|
||||
repositories.
|
||||
|
||||
.. _usingpoky-changes-collaborate:
|
||||
|
||||
Creating a Team Development Environment
|
||||
=======================================
|
||||
|
||||
@@ -78,7 +80,7 @@ particular working environment and set of practices.
|
||||
developing under the control of an SCM system that is compatible
|
||||
with the OpenEmbedded build system is advisable. Of all of the SCMs
|
||||
supported by BitBake, the Yocto Project team strongly recommends using
|
||||
:ref:`overview-manual/development-environment:git`.
|
||||
:ref:`overview-manual/overview-manual-development-environment:git`.
|
||||
Git is a distributed system
|
||||
that is easy to back up, allows you to work remotely, and then
|
||||
connects back to the infrastructure.
|
||||
@@ -165,7 +167,7 @@ particular working environment and set of practices.
|
||||
- Highlights when commits break the build.
|
||||
|
||||
- Populates an :ref:`sstate
|
||||
cache <overview-manual/concepts:shared state cache>` from which
|
||||
cache <overview-manual/overview-manual-concepts:shared state cache>` from which
|
||||
developers can pull rather than requiring local builds.
|
||||
|
||||
- Allows commit hook triggers, which trigger builds when commits
|
||||
@@ -218,20 +220,20 @@ particular working environment and set of practices.
|
||||
some best practices exist within the Yocto Project development
|
||||
environment. Consider the following:
|
||||
|
||||
- Use :ref:`overview-manual/development-environment:git` as the source control
|
||||
- Use :ref:`overview-manual/overview-manual-development-environment:git` as the source control
|
||||
system.
|
||||
|
||||
- Maintain your Metadata in layers that make sense for your
|
||||
situation. See the ":ref:`overview-manual/yp-intro:the yocto project layer model`"
|
||||
situation. See the ":ref:`overview-manual/overview-manual-yp-intro:the yocto project layer model`"
|
||||
section in the Yocto Project Overview and Concepts Manual and the
|
||||
":ref:`dev-manual/common-tasks:understanding and creating layers`"
|
||||
":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`"
|
||||
section for more information on layers.
|
||||
|
||||
- Separate the project's Metadata and code by using separate Git
|
||||
repositories. See the ":ref:`overview-manual/development-environment:yocto project source repositories`"
|
||||
repositories. See the ":ref:`overview-manual/overview-manual-development-environment:yocto project source repositories`"
|
||||
section in the Yocto Project Overview and Concepts Manual for
|
||||
information on these repositories. See the
|
||||
":ref:`dev-manual/start:locating yocto project source files`"
|
||||
information on these repositories. See the "`Locating Yocto
|
||||
Project Source Files <#locating-yocto-project-source-files>`__"
|
||||
section for information on how to set up local Git repositories
|
||||
for related upstream Yocto Project Git repositories.
|
||||
|
||||
@@ -248,17 +250,19 @@ particular working environment and set of practices.
|
||||
project to fix bugs or add features. If you do submit patches,
|
||||
follow the project commit guidelines for writing good commit
|
||||
messages. See the
|
||||
":ref:`dev-manual/common-tasks:submitting a change to the yocto project`"
|
||||
":ref:`dev-manual/dev-manual-common-tasks:submitting a change to the yocto project`"
|
||||
section.
|
||||
|
||||
- Send changes to the core sooner than later as others are likely
|
||||
to run into the same issues. For some guidance on mailing lists
|
||||
to use, see the list in the
|
||||
":ref:`dev-manual/common-tasks:submitting a change to the yocto project`"
|
||||
":ref:`dev-manual/dev-manual-common-tasks:submitting a change to the yocto project`"
|
||||
section. For a description
|
||||
of the available mailing lists, see the ":ref:`resources-mailinglist`" section in
|
||||
the Yocto Project Reference Manual.
|
||||
|
||||
.. _dev-preparing-the-build-host:
|
||||
|
||||
Preparing the Build Host
|
||||
========================
|
||||
|
||||
@@ -288,7 +292,7 @@ Package (BSP) development and kernel development:
|
||||
section in the Yocto Project Board Support Package (BSP) Developer's
|
||||
Guide.
|
||||
|
||||
- *Kernel Development:* See the ":ref:`kernel-dev/common:preparing the build host to work on the kernel`"
|
||||
- *Kernel Development:* See the ":ref:`kernel-dev/kernel-dev-common:preparing the build host to work on the kernel`"
|
||||
section in the Yocto Project Linux Kernel Development Manual.
|
||||
|
||||
Setting Up a Native Linux Host
|
||||
@@ -305,7 +309,7 @@ Project Build Host:
|
||||
validation and their status, see the ":ref:`Supported Linux
|
||||
Distributions <detailed-supported-distros>`"
|
||||
section in the Yocto Project Reference Manual and the wiki page at
|
||||
:yocto_wiki:`Distribution Support </Distribution_Support>`.
|
||||
:yocto_wiki:`Distribution Support </wiki/Distribution_Support>`.
|
||||
|
||||
2. *Have Enough Free Memory:* Your system should have at least 50 Gbytes
|
||||
of free disk space for building images.
|
||||
@@ -314,18 +318,18 @@ Project Build Host:
|
||||
should be able to run on any modern distribution that has the
|
||||
following versions for Git, tar, Python and gcc.
|
||||
|
||||
- Git &MIN_GIT_VERSION; or greater
|
||||
- Git 1.8.3.1 or greater
|
||||
|
||||
- tar &MIN_TAR_VERSION; or greater
|
||||
- tar 1.28 or greater
|
||||
|
||||
- Python &MIN_PYTHON_VERSION; or greater.
|
||||
- Python 3.5.0 or greater.
|
||||
|
||||
- gcc &MIN_GCC_VERSION; or greater.
|
||||
- gcc 5.0 or greater.
|
||||
|
||||
If your build host does not meet any of these three listed version
|
||||
requirements, you can take steps to prepare the system so that you
|
||||
can still use the Yocto Project. See the
|
||||
":ref:`ref-manual/system-requirements:required git, tar, python and gcc versions`"
|
||||
":ref:`ref-manual/ref-system-requirements:required git, tar, python and gcc versions`"
|
||||
section in the Yocto Project Reference Manual for information.
|
||||
|
||||
4. *Install Development Host Packages:* Required development host
|
||||
@@ -334,20 +338,22 @@ Project Build Host:
|
||||
is large if you want to be able to cover all cases.
|
||||
|
||||
For lists of required packages for all scenarios, see the
|
||||
":ref:`ref-manual/system-requirements:required packages for the build host`"
|
||||
":ref:`ref-manual/ref-system-requirements:required packages for the build host`"
|
||||
section in the Yocto Project Reference Manual.
|
||||
|
||||
Once you have completed the previous steps, you are ready to continue
|
||||
using a given development path on your native Linux machine. If you are
|
||||
going to use BitBake, see the
|
||||
":ref:`dev-manual/start:cloning the \`\`poky\`\` repository`"
|
||||
":ref:`dev-manual/dev-manual-start:cloning the \`\`poky\`\` repository`"
|
||||
section. If you are going
|
||||
to use the Extensible SDK, see the ":doc:`/sdk-manual/extensible`" Chapter in the Yocto
|
||||
to use the Extensible SDK, see the ":doc:`../sdk-manual/sdk-extensible`" Chapter in the Yocto
|
||||
Project Application Development and the Extensible Software Development
|
||||
Kit (eSDK) manual. If you want to work on the kernel, see the :doc:`/kernel-dev/index`. If you are going to use
|
||||
Toaster, see the ":doc:`/toaster-manual/setup-and-use`"
|
||||
Kit (eSDK) manual. If you want to work on the kernel, see the :doc:`../kernel-dev/kernel-dev`. If you are going to use
|
||||
Toaster, see the ":doc:`../toaster-manual/toaster-manual-setup-and-use`"
|
||||
section in the Toaster User Manual.
|
||||
|
||||
.. _setting-up-to-use-crops:
|
||||
|
||||
Setting Up to Use CROss PlatformS (CROPS)
|
||||
-----------------------------------------
|
||||
|
||||
@@ -440,14 +446,16 @@ as your Yocto Project build host:
|
||||
Once you have a container set up, everything is in place to develop just
|
||||
as if you were running on a native Linux machine. If you are going to
|
||||
use the Poky container, see the
|
||||
":ref:`dev-manual/start:cloning the \`\`poky\`\` repository`"
|
||||
":ref:`dev-manual/dev-manual-start:cloning the \`\`poky\`\` repository`"
|
||||
section. If you are going to use the Extensible SDK container, see the
|
||||
":doc:`/sdk-manual/extensible`" Chapter in the Yocto
|
||||
":doc:`../sdk-manual/sdk-extensible`" Chapter in the Yocto
|
||||
Project Application Development and the Extensible Software Development
|
||||
Kit (eSDK) manual. If you are going to use the Toaster container, see
|
||||
the ":doc:`/toaster-manual/setup-and-use`"
|
||||
the ":doc:`../toaster-manual/toaster-manual-setup-and-use`"
|
||||
section in the Toaster User Manual.
|
||||
|
||||
.. _setting-up-to-use-wsl:
|
||||
|
||||
Setting Up to Use Windows Subsystem For Linux (WSLv2)
|
||||
-----------------------------------------------------
|
||||
|
||||
@@ -557,10 +565,10 @@ your Yocto Project build host:
|
||||
|
||||
Once you have WSLv2 set up, everything is in place to develop just as if
|
||||
you were running on a native Linux machine. If you are going to use the
|
||||
Extensible SDK container, see the ":doc:`/sdk-manual/extensible`" Chapter in the Yocto
|
||||
Extensible SDK container, see the ":doc:`../sdk-manual/sdk-extensible`" Chapter in the Yocto
|
||||
Project Application Development and the Extensible Software Development
|
||||
Kit (eSDK) manual. If you are going to use the Toaster container, see
|
||||
the ":doc:`/toaster-manual/setup-and-use`"
|
||||
the ":doc:`../toaster-manual/toaster-manual-setup-and-use`"
|
||||
section in the Toaster User Manual.
|
||||
|
||||
Locating Yocto Project Source Files
|
||||
@@ -572,21 +580,21 @@ files you'll need to work with the Yocto Project.
|
||||
.. note::
|
||||
|
||||
- For concepts and introductory information about Git as it is used
|
||||
in the Yocto Project, see the ":ref:`overview-manual/development-environment:git`"
|
||||
in the Yocto Project, see the ":ref:`overview-manual/overview-manual-development-environment:git`"
|
||||
section in the Yocto Project Overview and Concepts Manual.
|
||||
|
||||
- For concepts on Yocto Project source repositories, see the
|
||||
":ref:`overview-manual/development-environment:yocto project source repositories`"
|
||||
":ref:`overview-manual/overview-manual-development-environment:yocto project source repositories`"
|
||||
section in the Yocto Project Overview and Concepts Manual."
|
||||
|
||||
Accessing Source Repositories
|
||||
-----------------------------
|
||||
|
||||
Working from a copy of the upstream :ref:`dev-manual/start:accessing source repositories` is the
|
||||
Working from a copy of the upstream :ref:`dev-manual/dev-manual-start:accessing source repositories` is the
|
||||
preferred method for obtaining and using a Yocto Project release. You
|
||||
can view the Yocto Project Source Repositories at
|
||||
:yocto_git:`/`. In particular, you can find the ``poky``
|
||||
repository at :yocto_git:`/poky`.
|
||||
repository at :yocto_git:`/cgit.cgi/poky`.
|
||||
|
||||
Use the following procedure to locate the latest upstream copy of the
|
||||
``poky`` Git repository:
|
||||
@@ -600,12 +608,12 @@ Use the following procedure to locate the latest upstream copy of the
|
||||
|
||||
3. *Find the URL Used to Clone the Repository:* At the bottom of the
|
||||
page, note the URL used to clone that repository
|
||||
(e.g. :yocto_git:`/poky`).
|
||||
(e.g. :yocto_git:`/cgit.cgi/poky`).
|
||||
|
||||
.. note::
|
||||
|
||||
For information on cloning a repository, see the
|
||||
":ref:`dev-manual/start:cloning the \`\`poky\`\` repository`" section.
|
||||
":ref:`dev-manual/dev-manual-start:cloning the \`\`poky\`\` repository`" section.
|
||||
|
||||
Accessing Index of Releases
|
||||
---------------------------
|
||||
@@ -655,7 +663,8 @@ The :yocto_home:`Yocto Project Website <>` uses a "DOWNLOADS" page
|
||||
from which you can locate and download tarballs of any Yocto Project
|
||||
release. Rather than Git repositories, these files represent snapshot
|
||||
tarballs similar to the tarballs located in the Index of Releases
|
||||
described in the ":ref:`dev-manual/start:accessing index of releases`" section.
|
||||
described in the "`Accessing Index of
|
||||
Releases <#accessing-index-of-releases>`__" section.
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -677,7 +686,7 @@ described in the ":ref:`dev-manual/start:accessing index of releases`" section.
|
||||
.. note::
|
||||
|
||||
For a "map" of Yocto Project releases to version numbers, see the
|
||||
:yocto_wiki:`Releases </Releases>` wiki page.
|
||||
:yocto_wiki:`Releases </wiki/Releases>` wiki page.
|
||||
|
||||
You can use the "RELEASE ARCHIVE" link to reveal a menu of all Yocto
|
||||
Project releases.
|
||||
@@ -721,7 +730,7 @@ files is referred to as the :term:`Source Directory`
|
||||
in the Yocto Project documentation.
|
||||
|
||||
The preferred method of creating your Source Directory is by using
|
||||
:ref:`overview-manual/development-environment:git` to clone a local copy of the upstream
|
||||
:ref:`overview-manual/overview-manual-development-environment:git` to clone a local copy of the upstream
|
||||
``poky`` repository. Working from a cloned copy of the upstream
|
||||
repository allows you to contribute back into the Yocto Project or to
|
||||
simply work with the latest software on a development branch. Because
|
||||
@@ -758,16 +767,16 @@ Follow these steps to create a local version of the upstream
|
||||
"master" branch, which results in a snapshot of the latest
|
||||
development changes for "master". For information on how to check out
|
||||
a specific development branch or on how to check out a local branch
|
||||
based on a tag name, see the
|
||||
":ref:`dev-manual/start:checking out by branch in poky`" and
|
||||
":ref:`dev-manual/start:checking out by tag in poky`" sections, respectively.
|
||||
based on a tag name, see the "`Checking Out By Branch in
|
||||
Poky <#checking-out-by-branch-in-poky>`__" and `Checking Out By Tag
|
||||
in Poky <#checkout-out-by-tag-in-poky>`__" sections, respectively.
|
||||
|
||||
Once the local repository is created, you can change to that
|
||||
directory and check its status. Here, the single "master" branch
|
||||
exists on your system and by default, it is checked out:
|
||||
::
|
||||
|
||||
$ cd poky
|
||||
$ cd ~/poky
|
||||
$ git status
|
||||
On branch master
|
||||
Your branch is up-to-date with 'origin/master'.
|
||||
@@ -800,7 +809,7 @@ and then specifically check out that development branch.
|
||||
1. *Switch to the Poky Directory:* If you have a local poky Git
|
||||
repository, switch to that directory. If you do not have the local
|
||||
copy of poky, see the
|
||||
":ref:`dev-manual/start:cloning the \`\`poky\`\` repository`"
|
||||
":ref:`dev-manual/dev-manual-start:cloning the \`\`poky\`\` repository`"
|
||||
section.
|
||||
|
||||
2. *Determine Existing Branch Names:*
|
||||
@@ -846,6 +855,8 @@ and then specifically check out that development branch.
|
||||
master
|
||||
* &DISTRO_NAME_NO_CAP;
|
||||
|
||||
.. _checkout-out-by-tag-in-poky:
|
||||
|
||||
Checking Out by Tag in Poky
|
||||
---------------------------
|
||||
|
||||
@@ -863,7 +874,7 @@ similar to checking out by branch name except you use tag names.
|
||||
1. *Switch to the Poky Directory:* If you have a local poky Git
|
||||
repository, switch to that directory. If you do not have the local
|
||||
copy of poky, see the
|
||||
":ref:`dev-manual/start:cloning the \`\`poky\`\` repository`"
|
||||
":ref:`dev-manual/dev-manual-start:cloning the \`\`poky\`\` repository`"
|
||||
section.
|
||||
|
||||
2. *Fetch the Tag Names:* To checkout the branch based on a tag name,
|
||||
@@ -10,10 +10,10 @@ Yocto Project Development Tasks Manual
|
||||
:caption: Table of Contents
|
||||
:numbered:
|
||||
|
||||
intro
|
||||
start
|
||||
common-tasks
|
||||
qemu
|
||||
dev-manual-intro
|
||||
dev-manual-start
|
||||
dev-manual-common-tasks
|
||||
dev-manual-qemu
|
||||
history
|
||||
|
||||
.. include:: /boilerplate.rst
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 214 KiB After Width: | Height: | Size: 244 KiB |
@@ -14,7 +14,7 @@ Welcome to the Yocto Project Documentation
|
||||
:maxdepth: 1
|
||||
:caption: Introduction and Overview
|
||||
|
||||
Quick Build <brief-yoctoprojectqs/index>
|
||||
Quick Build <brief-yoctoprojectqs/brief-yoctoprojectqs>
|
||||
what-i-wish-id-known
|
||||
transitioning-to-a-custom-environment
|
||||
Yocto Project Software Overview <https://www.yoctoproject.org/software-overview/>
|
||||
@@ -25,16 +25,16 @@ Welcome to the Yocto Project Documentation
|
||||
:maxdepth: 1
|
||||
:caption: Manuals
|
||||
|
||||
Overview and Concepts Manual <overview-manual/index>
|
||||
Reference Manual <ref-manual/index>
|
||||
Board Support Package (BSP) Developer's guide <bsp-guide/index>
|
||||
Development Tasks Manual <dev-manual/index>
|
||||
Linux Kernel Development Manual <kernel-dev/index>
|
||||
Profile and Tracing Manual <profile-manual/index>
|
||||
Application Development and the Extensible SDK (eSDK) <sdk-manual/index>
|
||||
Toaster Manual <toaster-manual/index>
|
||||
Test Environment Manual <test-manual/index>
|
||||
bitbake
|
||||
Overview and Concepts Manual <overview-manual/overview-manual>
|
||||
Reference Manual <ref-manual/ref-manual>
|
||||
Board Support Package (BSP) Developer's guide <bsp-guide/bsp-guide>
|
||||
Development Tasks Manual <dev-manual/dev-manual>
|
||||
Linux Kernel Development Manual <kernel-dev/kernel-dev>
|
||||
Profile and Tracing Manual <profile-manual/profile-manual>
|
||||
Application Development and the Extensible SDK (eSDK) <sdk-manual/sdk-manual>
|
||||
Toaster Manual <toaster-manual/toaster-manual>
|
||||
Test Environment Manual <test-manual/test-manual>
|
||||
Bitbake User Manual <https://docs.yoctoproject.org/bitbake>
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
Working with Advanced Metadata (``yocto-kernel-cache``)
|
||||
*******************************************************
|
||||
|
||||
.. _kernel-dev-advanced-overview:
|
||||
|
||||
Overview
|
||||
========
|
||||
|
||||
@@ -16,7 +18,7 @@ complexity of the configuration and sources used to support multiple
|
||||
BSPs and Linux kernel types.
|
||||
|
||||
Kernel Metadata exists in many places. One area in the
|
||||
:ref:`overview-manual/development-environment:yocto project source repositories`
|
||||
:ref:`overview-manual/overview-manual-development-environment:yocto project source repositories`
|
||||
is the ``yocto-kernel-cache`` Git repository. You can find this repository
|
||||
grouped under the "Yocto Linux Kernel" heading in the
|
||||
:yocto_git:`Yocto Project Source Repositories <>`.
|
||||
@@ -56,8 +58,8 @@ using the same BSP description. Multiple Corei7-based BSPs could share
|
||||
the same "intel-corei7-64" value for ``KMACHINE``. It is important to
|
||||
realize that ``KMACHINE`` is just for kernel mapping, while ``MACHINE``
|
||||
is the machine type within a BSP Layer. Even with this distinction,
|
||||
however, these two variables can hold the same value. See the
|
||||
":ref:`kernel-dev/advanced:bsp descriptions`" section for more information.
|
||||
however, these two variables can hold the same value. See the `BSP
|
||||
Descriptions <#bsp-descriptions>`__ section for more information.
|
||||
|
||||
Every linux-yocto style recipe must also indicate the Linux kernel
|
||||
source repository branch used to build the Linux kernel. The
|
||||
@@ -87,7 +89,7 @@ Together with ``KMACHINE``, ``LINUX_KERNEL_TYPE`` defines the search
|
||||
arguments used by the kernel tools to find the appropriate description
|
||||
within the kernel Metadata with which to build out the sources and
|
||||
configuration. The linux-yocto recipes define "standard", "tiny", and
|
||||
"preempt-rt" kernel types. See the ":ref:`kernel-dev/advanced:kernel types`"
|
||||
"preempt-rt" kernel types. See the "`Kernel Types <#kernel-types>`__"
|
||||
section for more information on kernel types.
|
||||
|
||||
During the build, the kern-tools search for the BSP description file
|
||||
@@ -123,8 +125,8 @@ the entries in ``KERNEL_FEATURES`` are dependent on their location
|
||||
within the kernel Metadata itself. The examples here are taken from the
|
||||
``yocto-kernel-cache`` repository. Each branch of this repository
|
||||
contains "features" and "cfg" subdirectories at the top-level. For more
|
||||
information, see the ":ref:`kernel-dev/advanced:kernel metadata syntax`"
|
||||
section.
|
||||
information, see the "`Kernel Metadata
|
||||
Syntax <#kernel-metadata-syntax>`__" section.
|
||||
|
||||
Kernel Metadata Syntax
|
||||
======================
|
||||
@@ -148,7 +150,7 @@ Features aggregate sources in the form of patches and configuration
|
||||
fragments into a modular reusable unit. You can use features to
|
||||
implement conceptually separate kernel Metadata descriptions such as
|
||||
pure configuration fragments, simple patches, complex features, and
|
||||
kernel types. :ref:`kernel-dev/advanced:kernel types` define general kernel
|
||||
kernel types. `Kernel types <#kernel-types>`__ define general kernel
|
||||
features and policy to be reused in the BSPs.
|
||||
|
||||
BSPs define hardware-specific features and aggregate them with kernel
|
||||
@@ -167,9 +169,10 @@ following Metadata file hierarchy is recommended:
|
||||
ktypes/
|
||||
patches/
|
||||
|
||||
The ``bsp`` directory contains the :ref:`kernel-dev/advanced:bsp descriptions`.
|
||||
The remaining directories all contain "features". Separating ``bsp`` from the
|
||||
rest of the structure aids conceptualizing intended usage.
|
||||
The ``bsp`` directory contains the `BSP
|
||||
descriptions <#bsp-descriptions>`__. The remaining directories all
|
||||
contain "features". Separating ``bsp`` from the rest of the structure
|
||||
aids conceptualizing intended usage.
|
||||
|
||||
Use these guidelines to help place your ``scc`` description files within
|
||||
the structure:
|
||||
@@ -197,12 +200,11 @@ contain "features" as far as the kernel tools are concerned.
|
||||
Paths used in kernel Metadata files are relative to base, which is
|
||||
either
|
||||
:term:`FILESEXTRAPATHS` if
|
||||
you are creating Metadata in
|
||||
:ref:`recipe-space <kernel-dev/advanced:recipe-space metadata>`,
|
||||
you are creating Metadata in `recipe-space <#recipe-space-metadata>`__,
|
||||
or the top level of
|
||||
:yocto_git:`yocto-kernel-cache </yocto-kernel-cache/tree/>`
|
||||
if you are creating
|
||||
:ref:`kernel-dev/advanced:metadata outside the recipe-space`.
|
||||
:yocto_git:`yocto-kernel-cache </cgit/cgit.cgi/yocto-kernel-cache/tree/>`
|
||||
if you are creating `Metadata outside of the
|
||||
recipe-space <#metadata-outside-the-recipe-space>`__.
|
||||
|
||||
.. [1]
|
||||
``scc`` stands for Series Configuration Control, but the naming has
|
||||
@@ -243,7 +245,7 @@ two files: ``smp.scc`` and ``smp.cfg``. You can find these files in the
|
||||
CONFIG_X86_BIGSMP=y
|
||||
|
||||
You can find general information on configuration
|
||||
fragment files in the ":ref:`kernel-dev/common:creating configuration fragments`" section.
|
||||
fragment files in the ":ref:`creating-config-fragments`" section.
|
||||
|
||||
Within the ``smp.scc`` file, the
|
||||
:term:`KFEATURE_DESCRIPTION`
|
||||
@@ -264,7 +266,7 @@ non-hardware fragment.
|
||||
fragment.
|
||||
|
||||
As described in the
|
||||
":ref:`kernel-dev/common:validating configuration`" section, you can
|
||||
":ref:`kernel-dev/kernel-dev-common:validating configuration`" section, you can
|
||||
use the following BitBake command to audit your configuration:
|
||||
::
|
||||
|
||||
@@ -325,8 +327,8 @@ for the five patches in the directory.
|
||||
|
||||
You can create a typical ``.patch`` file using ``diff -Nurp`` or
|
||||
``git format-patch`` commands. For information on how to create patches,
|
||||
see the ":ref:`kernel-dev/common:using \`\`devtool\`\` to patch the kernel`"
|
||||
and ":ref:`kernel-dev/common:using traditional kernel development to patch the kernel`"
|
||||
see the ":ref:`kernel-dev/kernel-dev-common:using \`\`devtool\`\` to patch the kernel`"
|
||||
and ":ref:`kernel-dev/kernel-dev-common:using traditional kernel development to patch the kernel`"
|
||||
sections.
|
||||
|
||||
Features
|
||||
@@ -353,9 +355,9 @@ as how an additional feature description file is included with the
|
||||
Typically, features are less granular than configuration fragments and
|
||||
are more likely than configuration fragments and patches to be the types
|
||||
of things you want to specify in the ``KERNEL_FEATURES`` variable of the
|
||||
Linux kernel recipe. See the
|
||||
":ref:`kernel-dev/advanced:using kernel metadata in a recipe`" section earlier
|
||||
in the manual.
|
||||
Linux kernel recipe. See the "`Using Kernel Metadata in a
|
||||
Recipe <#using-kernel-metadata-in-a-recipe>`__" section earlier in the
|
||||
manual.
|
||||
|
||||
Kernel Types
|
||||
------------
|
||||
@@ -364,12 +366,12 @@ A kernel type defines a high-level kernel policy by aggregating
|
||||
non-hardware configuration fragments with patches you want to use when
|
||||
building a Linux kernel of a specific type (e.g. a real-time kernel).
|
||||
Syntactically, kernel types are no different than features as described
|
||||
in the ":ref:`kernel-dev/advanced:features`" section. The
|
||||
in the "`Features <#features>`__" section. The
|
||||
:term:`LINUX_KERNEL_TYPE`
|
||||
variable in the kernel recipe selects the kernel type. For example, in
|
||||
the ``linux-yocto_4.12.bb`` kernel recipe found in
|
||||
``poky/meta/recipes-kernel/linux``, a
|
||||
:ref:`require <bitbake:bitbake-user-manual/bitbake-user-manual-metadata:\`\`require\`\` directive>` directive
|
||||
:ref:`require <bitbake:require-inclusion>` directive
|
||||
includes the ``poky/meta/recipes-kernel/linux/linux-yocto.inc`` file,
|
||||
which has the following statement that defines the default kernel type:
|
||||
::
|
||||
@@ -386,9 +388,9 @@ type as follows:
|
||||
.. note::
|
||||
|
||||
You can find kernel recipes in the ``meta/recipes-kernel/linux`` directory
|
||||
of the :ref:`overview-manual/development-environment:yocto project source repositories`
|
||||
of the :ref:`overview-manual/overview-manual-development-environment:yocto project source repositories`
|
||||
(e.g. ``poky/meta/recipes-kernel/linux/linux-yocto_4.12.bb``). See the
|
||||
":ref:`kernel-dev/advanced:using kernel metadata in a recipe`"
|
||||
":ref:`kernel-dev/kernel-dev-advanced:using kernel metadata in a recipe`"
|
||||
section for more information.
|
||||
|
||||
Three kernel types ("standard", "tiny", and "preempt-rt") are supported
|
||||
@@ -453,7 +455,7 @@ and ``patch`` commands, respectively.
|
||||
It is not strictly necessary to create a kernel type ``.scc``
|
||||
file. The Board Support Package (BSP) file can implicitly define the
|
||||
kernel type using a ``define`` :term:`KTYPE` ``myktype`` line. See the
|
||||
":ref:`kernel-dev/advanced:bsp descriptions`" section for more
|
||||
":ref:`kernel-dev/kernel-dev-advanced:bsp descriptions`" section for more
|
||||
information.
|
||||
|
||||
BSP Descriptions
|
||||
@@ -469,12 +471,14 @@ supported kernel type.
|
||||
For BSPs supported by the Yocto Project, the BSP description files
|
||||
are located in the ``bsp`` directory of the ``yocto-kernel-cache``
|
||||
repository organized under the "Yocto Linux Kernel" heading in the
|
||||
:yocto_git:`Yocto Project Source Repositories <>`.
|
||||
:yocto_git:`Yocto Project Source Repositories </>`.
|
||||
|
||||
This section overviews the BSP description structure, the aggregation
|
||||
concepts, and presents a detailed example using a BSP supported by the
|
||||
Yocto Project (i.e. BeagleBone Board). For complete information on BSP
|
||||
layer file hierarchy, see the :doc:`/bsp-guide/index`.
|
||||
layer file hierarchy, see the :doc:`../bsp-guide/bsp-guide`.
|
||||
|
||||
.. _bsp-description-file-overview:
|
||||
|
||||
Description Overview
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
@@ -540,7 +544,7 @@ example, this is done using the following:
|
||||
|
||||
This file aggregates all the configuration
|
||||
fragments, patches, and features that make up your standard kernel
|
||||
policy. See the ":ref:`kernel-dev/advanced:kernel types`" section for more
|
||||
policy. See the "`Kernel Types <#kernel-types>`__" section for more
|
||||
information.
|
||||
|
||||
To aggregate common configurations and features specific to the kernel
|
||||
@@ -555,7 +559,7 @@ You can see that in the BeagleBone example with the following:
|
||||
include beaglebone.scc
|
||||
|
||||
For information on how to break a complete ``.config`` file into the various
|
||||
configuration fragments, see the ":ref:`kernel-dev/common:creating configuration fragments`" section.
|
||||
configuration fragments, see the ":ref:`creating-config-fragments`" section.
|
||||
|
||||
Finally, if you have any configurations specific to the hardware that
|
||||
are not in a ``*.scc`` file, you can include them as follows:
|
||||
@@ -579,6 +583,8 @@ types of configurations. However, the Malta 32-bit board does
|
||||
include mti-malta32.scc
|
||||
kconf hardware mti-malta32-le.cfg
|
||||
|
||||
.. _bsp-description-file-example-minnow:
|
||||
|
||||
Example
|
||||
~~~~~~~
|
||||
|
||||
@@ -696,7 +702,7 @@ good approach if you are working with Linux kernel sources you do not
|
||||
control or if you just do not want to maintain a Linux kernel Git
|
||||
repository on your own. For partial information on how you can define
|
||||
kernel Metadata in the recipe-space, see the
|
||||
":ref:`kernel-dev/common:modifying an existing recipe`" section.
|
||||
":ref:`kernel-dev/kernel-dev-common:modifying an existing recipe`" section.
|
||||
|
||||
Conversely, if you are actively developing a kernel and are already
|
||||
maintaining a Linux kernel Git repository of your own, you might find it
|
||||
@@ -716,7 +722,7 @@ modifying
|
||||
``oe-core/meta-skeleton/recipes-kernel/linux/linux-yocto-custom.bb`` to
|
||||
a recipe in your layer, ``FILESEXTRAPATHS`` is typically set to
|
||||
``${``\ :term:`THISDIR`\ ``}/${``\ :term:`PN`\ ``}``.
|
||||
See the ":ref:`kernel-dev/common:modifying an existing recipe`"
|
||||
See the ":ref:`kernel-dev/kernel-dev-common:modifying an existing recipe`"
|
||||
section for more information.
|
||||
|
||||
Here is an example that shows a trivial tree of kernel Metadata stored
|
||||
@@ -825,11 +831,11 @@ Given this scenario, you do not need to create any branches in the
|
||||
source repository. Rather, you just take the static patches you need and
|
||||
encapsulate them within a feature description. Once you have the feature
|
||||
description, you simply include that into the BSP description as
|
||||
described in the ":ref:`kernel-dev/advanced:bsp descriptions`" section.
|
||||
described in the "`BSP Descriptions <#bsp-descriptions>`__" section.
|
||||
|
||||
You can find information on how to create patches and BSP descriptions
|
||||
in the ":ref:`kernel-dev/advanced:patches`" and
|
||||
":ref:`kernel-dev/advanced:bsp descriptions`" sections.
|
||||
in the "`Patches <#patches>`__" and "`BSP
|
||||
Descriptions <#bsp-descriptions>`__" sections.
|
||||
|
||||
Machine Branches
|
||||
----------------
|
||||
@@ -919,6 +925,8 @@ after any ``branch`` commands:
|
||||
|
||||
include mybsp-hw.scc
|
||||
|
||||
.. _scc-reference:
|
||||
|
||||
SCC Description File Reference
|
||||
==============================
|
||||
|
||||
@@ -21,11 +21,11 @@ Preparing the Build Host to Work on the Kernel
|
||||
|
||||
Before you can do any kernel development, you need to be sure your build
|
||||
host is set up to use the Yocto Project. For information on how to get
|
||||
set up, see the ":doc:`/dev-manual/start`" section in
|
||||
set up, see the ":doc:`../dev-manual/dev-manual-start`" section in
|
||||
the Yocto Project Development Tasks Manual. Part of preparing the system
|
||||
is creating a local Git repository of the
|
||||
:term:`Source Directory` (``poky``) on your system. Follow the steps in the
|
||||
":ref:`dev-manual/start:cloning the \`\`poky\`\` repository`"
|
||||
":ref:`dev-manual/dev-manual-start:cloning the \`\`poky\`\` repository`"
|
||||
section in the Yocto Project Development Tasks Manual to set up your
|
||||
Source Directory.
|
||||
|
||||
@@ -34,12 +34,12 @@ Source Directory.
|
||||
Be sure you check out the appropriate development branch or you
|
||||
create your local branch by checking out a specific tag to get the
|
||||
desired version of Yocto Project. See the
|
||||
":ref:`dev-manual/start:checking out by branch in poky`" and
|
||||
":ref:`dev-manual/start:checking out by tag in poky`"
|
||||
":ref:`dev-manual/dev-manual-start:checking out by branch in poky`" and
|
||||
":ref:`dev-manual/dev-manual-start:checking out by tag in poky`"
|
||||
sections in the Yocto Project Development Tasks Manual for more information.
|
||||
|
||||
Kernel development is best accomplished using
|
||||
:ref:`devtool <sdk-manual/extensible:using \`\`devtool\`\` in your sdk workflow>`
|
||||
:ref:`devtool <sdk-manual/sdk-extensible:using \`\`devtool\`\` in your sdk workflow>`
|
||||
and not through traditional kernel workflow methods. The remainder of
|
||||
this section provides information for both scenarios.
|
||||
|
||||
@@ -49,7 +49,7 @@ Getting Ready to Develop Using ``devtool``
|
||||
Follow these steps to prepare to update the kernel image using
|
||||
``devtool``. Completing this procedure leaves you with a clean kernel
|
||||
image and ready to make modifications as described in the
|
||||
":ref:`kernel-dev/common:using \`\`devtool\`\` to patch the kernel`"
|
||||
":ref:`kernel-dev/kernel-dev-common:using \`\`devtool\`\` to patch the kernel`"
|
||||
section:
|
||||
|
||||
1. *Initialize the BitBake Environment:* Before building an extensible
|
||||
@@ -57,13 +57,13 @@ section:
|
||||
the build environment script (i.e. :ref:`structure-core-script`):
|
||||
::
|
||||
|
||||
$ cd poky
|
||||
$ cd ~/poky
|
||||
$ source oe-init-build-env
|
||||
|
||||
.. note::
|
||||
|
||||
The previous commands assume the
|
||||
:ref:`overview-manual/development-environment:yocto project source repositories`
|
||||
:ref:`overview-manual/overview-manual-development-environment:yocto project source repositories`
|
||||
(i.e. ``poky``) have been cloned using Git and the local repository is named
|
||||
"poky".
|
||||
|
||||
@@ -74,7 +74,7 @@ section:
|
||||
``MACHINE`` variable appropriately in your ``conf/local.conf`` file
|
||||
found in the
|
||||
:term:`Build Directory` (i.e.
|
||||
``poky/build`` in this example).
|
||||
``~/poky/build`` in this example).
|
||||
|
||||
Also, since you are preparing to work on the kernel image, you need
|
||||
to set the
|
||||
@@ -94,7 +94,7 @@ section:
|
||||
``bitbake-layers create-layer`` command as follows:
|
||||
::
|
||||
|
||||
$ cd poky/build
|
||||
$ cd ~/poky/build
|
||||
$ bitbake-layers create-layer ../../meta-mylayer
|
||||
NOTE: Starting bitbake server...
|
||||
Add your new layer with 'bitbake-layers add-layer ../../meta-mylayer'
|
||||
@@ -104,13 +104,13 @@ section:
|
||||
|
||||
For background information on working with common and BSP layers,
|
||||
see the
|
||||
":ref:`dev-manual/common-tasks:understanding and creating layers`"
|
||||
":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`"
|
||||
section in the Yocto Project Development Tasks Manual and the
|
||||
":ref:`bsp-guide/bsp:bsp layers`" section in the Yocto Project Board
|
||||
Support (BSP) Developer's Guide, respectively. For information on how to
|
||||
use the ``bitbake-layers create-layer`` command to quickly set up a layer,
|
||||
see the
|
||||
":ref:`dev-manual/common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`"
|
||||
":ref:`dev-manual/dev-manual-common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`"
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
4. *Inform the BitBake Build Environment About Your Layer:* As directed
|
||||
@@ -119,7 +119,7 @@ section:
|
||||
``bblayers.conf`` file as follows:
|
||||
::
|
||||
|
||||
$ cd poky/build
|
||||
$ cd ~/poky/build
|
||||
$ bitbake-layers add-layer ../../meta-mylayer
|
||||
NOTE: Starting bitbake server...
|
||||
$
|
||||
@@ -128,7 +128,7 @@ section:
|
||||
specifically for use with images to be run using QEMU:
|
||||
::
|
||||
|
||||
$ cd poky/build
|
||||
$ cd ~/poky/build
|
||||
$ bitbake core-image-minimal -c populate_sdk_ext
|
||||
|
||||
Once
|
||||
@@ -136,21 +136,21 @@ section:
|
||||
``*.sh`` file) in the following directory:
|
||||
::
|
||||
|
||||
poky/build/tmp/deploy/sdk
|
||||
~/poky/build/tmp/deploy/sdk
|
||||
|
||||
For this example, the installer file is named
|
||||
``poky-glibc-x86_64-core-image-minimal-i586-toolchain-ext-&DISTRO;.sh``.
|
||||
``poky-glibc-x86_64-core-image-minimal-i586-toolchain-ext-DISTRO.sh``.
|
||||
|
||||
6. *Install the Extensible SDK:* Use the following command to install
|
||||
the SDK. For this example, install the SDK in the default
|
||||
``poky_sdk`` directory:
|
||||
``~/poky_sdk`` directory:
|
||||
::
|
||||
|
||||
$ cd poky/build/tmp/deploy/sdk
|
||||
$ ./poky-glibc-x86_64-core-image-minimal-i586-toolchain-ext-&DISTRO;.sh
|
||||
Poky (Yocto Project Reference Distro) Extensible SDK installer version &DISTRO;
|
||||
$ cd ~/poky/build/tmp/deploy/sdk
|
||||
$ ./poky-glibc-x86_64-core-image-minimal-i586-toolchain-ext-3.1.2.sh
|
||||
Poky (Yocto Project Reference Distro) Extensible SDK installer version 3.1.2
|
||||
============================================================================
|
||||
Enter target directory for SDK (default: poky_sdk):
|
||||
Enter target directory for SDK (default: ~/poky_sdk):
|
||||
You are about to install the SDK to "/home/scottrif/poky_sdk". Proceed [Y/n]? Y
|
||||
Extracting SDK......................................done
|
||||
Setting it up...
|
||||
@@ -175,7 +175,7 @@ section:
|
||||
directed by the output from installing the SDK:
|
||||
::
|
||||
|
||||
$ source poky_sdk/environment-setup-i586-poky-linux
|
||||
$ source ~/poky_sdk/environment-setup-i586-poky-linux
|
||||
"SDK environment now set up; additionally you may now run devtool to perform development tasks.
|
||||
Run devtool --help for further details.
|
||||
|
||||
@@ -207,12 +207,12 @@ section:
|
||||
building for actual hardware and not for emulation, you could flash
|
||||
the image to a USB stick on ``/dev/sdd`` and boot your device. For an
|
||||
example that uses a Minnowboard, see the
|
||||
:yocto_wiki:`TipsAndTricks/KernelDevelopmentWithEsdk </TipsAndTricks/KernelDevelopmentWithEsdk>`
|
||||
:yocto_wiki:`TipsAndTricks/KernelDevelopmentWithEsdk </wiki/TipsAndTricks/KernelDevelopmentWithEsdk>`
|
||||
Wiki page.
|
||||
|
||||
At this point you have set up to start making modifications to the
|
||||
kernel by using the extensible SDK. For a continued example, see the
|
||||
":ref:`kernel-dev/common:using \`\`devtool\`\` to patch the kernel`"
|
||||
":ref:`kernel-dev/kernel-dev-common:using \`\`devtool\`\` to patch the kernel`"
|
||||
section.
|
||||
|
||||
Getting Ready for Traditional Kernel Development
|
||||
@@ -226,7 +226,7 @@ you will be editing these files.
|
||||
Follow these steps to prepare to update the kernel image using
|
||||
traditional kernel development flow with the Yocto Project. Completing
|
||||
this procedure leaves you ready to make modifications to the kernel
|
||||
source as described in the ":ref:`kernel-dev/common:using traditional kernel development to patch the kernel`"
|
||||
source as described in the ":ref:`kernel-dev/kernel-dev-common:using traditional kernel development to patch the kernel`"
|
||||
section:
|
||||
|
||||
1. *Initialize the BitBake Environment:* Before you can do anything
|
||||
@@ -236,11 +236,11 @@ section:
|
||||
Also, for this example, be sure that the local branch you have
|
||||
checked out for ``poky`` is the Yocto Project &DISTRO_NAME; branch. If
|
||||
you need to checkout out the &DISTRO_NAME; branch, see the
|
||||
":ref:`dev-manual/start:checking out by branch in poky`"
|
||||
":ref:`dev-manual/dev-manual-start:checking out by branch in poky`"
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
::
|
||||
|
||||
$ cd poky
|
||||
$ cd ~/poky
|
||||
$ git branch
|
||||
master
|
||||
* &DISTRO_NAME_NO_CAP;
|
||||
@@ -249,7 +249,7 @@ section:
|
||||
.. note::
|
||||
|
||||
The previous commands assume the
|
||||
:ref:`overview-manual/development-environment:yocto project source repositories`
|
||||
:ref:`overview-manual/overview-manual-development-environment:yocto project source repositories`
|
||||
(i.e. ``poky``) have been cloned using Git and the local repository is named
|
||||
"poky".
|
||||
|
||||
@@ -260,7 +260,7 @@ section:
|
||||
``MACHINE`` variable appropriately in your ``conf/local.conf`` file
|
||||
found in the
|
||||
:term:`Build Directory` (i.e.
|
||||
``poky/build`` in this example).
|
||||
``~/poky/build`` in this example).
|
||||
|
||||
Also, since you are preparing to work on the kernel image, you need
|
||||
to set the
|
||||
@@ -280,7 +280,7 @@ section:
|
||||
``bitbake-layers create-layer`` command as follows:
|
||||
::
|
||||
|
||||
$ cd poky/build
|
||||
$ cd ~/poky/build
|
||||
$ bitbake-layers create-layer ../../meta-mylayer
|
||||
NOTE: Starting bitbake server...
|
||||
Add your new layer with 'bitbake-layers add-layer ../../meta-mylayer'
|
||||
@@ -289,13 +289,13 @@ section:
|
||||
|
||||
For background information on working with common and BSP layers,
|
||||
see the
|
||||
":ref:`dev-manual/common-tasks:understanding and creating layers`"
|
||||
":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`"
|
||||
section in the Yocto Project Development Tasks Manual and the
|
||||
":ref:`bsp-guide/bsp:bsp layers`" section in the Yocto Project Board
|
||||
Support (BSP) Developer's Guide, respectively. For information on how to
|
||||
use the ``bitbake-layers create-layer`` command to quickly set up a layer,
|
||||
see the
|
||||
":ref:`dev-manual/common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`"
|
||||
":ref:`dev-manual/dev-manual-common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`"
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
4. *Inform the BitBake Build Environment About Your Layer:* As directed
|
||||
@@ -304,7 +304,7 @@ section:
|
||||
``bblayers.conf`` file as follows:
|
||||
::
|
||||
|
||||
$ cd poky/build
|
||||
$ cd ~/poky/build
|
||||
$ bitbake-layers add-layer ../../meta-mylayer
|
||||
NOTE: Starting bitbake server ...
|
||||
$
|
||||
@@ -365,7 +365,8 @@ section:
|
||||
|
||||
At this point, you are ready to start making modifications to the kernel
|
||||
using traditional kernel development steps. For a continued example, see
|
||||
the ":ref:`kernel-dev/common:using traditional kernel development to patch the kernel`"
|
||||
the "`Using Traditional Kernel Development to Patch the
|
||||
Kernel <#using-traditional-kernel-development-to-patch-the-kernel>`__"
|
||||
section.
|
||||
|
||||
Creating and Preparing a Layer
|
||||
@@ -377,7 +378,7 @@ layer contains its own :term:`BitBake`
|
||||
append files (``.bbappend``) and provides a convenient mechanism to
|
||||
create your own recipe files (``.bb``) as well as store and use kernel
|
||||
patch files. For background information on working with layers, see the
|
||||
":ref:`dev-manual/common-tasks:understanding and creating layers`"
|
||||
":ref:`dev-manual/dev-manual-common-tasks:understanding and creating layers`"
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
.. note::
|
||||
@@ -385,7 +386,7 @@ section in the Yocto Project Development Tasks Manual.
|
||||
The Yocto Project comes with many tools that simplify tasks you need
|
||||
to perform. One such tool is the ``bitbake-layers create-layer``
|
||||
command, which simplifies creating a new layer. See the
|
||||
":ref:`dev-manual/common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`"
|
||||
":ref:`dev-manual/dev-manual-common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`"
|
||||
section in the Yocto Project Development Tasks Manual for
|
||||
information on how to use this script to quick set up a new layer.
|
||||
|
||||
@@ -397,6 +398,7 @@ home directory:
|
||||
1. *Create Structure*: Create the layer's structure:
|
||||
::
|
||||
|
||||
$ cd $HOME
|
||||
$ mkdir meta-mylayer
|
||||
$ mkdir meta-mylayer/conf
|
||||
$ mkdir meta-mylayer/recipes-kernel
|
||||
@@ -441,7 +443,7 @@ home directory:
|
||||
The :term:`FILESEXTRAPATHS` and :term:`SRC_URI` statements
|
||||
enable the OpenEmbedded build system to find patch files. For more
|
||||
information on using append files, see the
|
||||
":ref:`dev-manual/common-tasks:using .bbappend files in your layer`"
|
||||
":ref:`dev-manual/dev-manual-common-tasks:using .bbappend files in your layer`"
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
Modifying an Existing Recipe
|
||||
@@ -455,15 +457,15 @@ the :term:`Source Directory` in
|
||||
|
||||
Modifying an existing recipe can consist of the following:
|
||||
|
||||
- :ref:`kernel-dev/common:creating the append file`
|
||||
- :ref:`kernel-dev/kernel-dev-common:creating the append file`
|
||||
|
||||
- :ref:`kernel-dev/common:applying patches`
|
||||
- :ref:`kernel-dev/kernel-dev-common:applying patches`
|
||||
|
||||
- :ref:`kernel-dev/common:changing the configuration`
|
||||
- :ref:`kernel-dev/kernel-dev-common:changing the configuration`
|
||||
|
||||
Before modifying an existing recipe, be sure that you have created a
|
||||
minimal, custom layer from which you can work. See the
|
||||
":ref:`kernel-dev/common:creating and preparing a layer`" section for
|
||||
minimal, custom layer from which you can work. See the "`Creating and
|
||||
Preparing a Layer <#creating-and-preparing-a-layer>`__" section for
|
||||
information.
|
||||
|
||||
Creating the Append File
|
||||
@@ -500,7 +502,7 @@ your layer in the following area:
|
||||
.. note::
|
||||
|
||||
If you are working on a new machine Board Support Package (BSP), be
|
||||
sure to refer to the :doc:`/bsp-guide/index`.
|
||||
sure to refer to the :doc:`../bsp-guide/bsp-guide`.
|
||||
|
||||
As an example, consider the following append file used by the BSPs in
|
||||
``meta-yocto-bsp``:
|
||||
@@ -640,9 +642,9 @@ and applies the patches before building the kernel.
|
||||
|
||||
For a detailed example showing how to patch the kernel using
|
||||
``devtool``, see the
|
||||
":ref:`kernel-dev/common:using \`\`devtool\`\` to patch the kernel`"
|
||||
":ref:`kernel-dev/kernel-dev-common:using \`\`devtool\`\` to patch the kernel`"
|
||||
and
|
||||
":ref:`kernel-dev/common:using traditional kernel development to patch the kernel`"
|
||||
":ref:`kernel-dev/kernel-dev-common:using traditional kernel development to patch the kernel`"
|
||||
sections.
|
||||
|
||||
Changing the Configuration
|
||||
@@ -709,7 +711,7 @@ Linux kernel, BitBake detects the change in the recipe and fetches and
|
||||
applies the new configuration before building the kernel.
|
||||
|
||||
For a detailed example showing how to configure the kernel, see the
|
||||
":ref:`kernel-dev/common:configuring the kernel`" section.
|
||||
"`Configuring the Kernel <#configuring-the-kernel>`__" section.
|
||||
|
||||
Using an "In-Tree" ``defconfig`` File
|
||||
--------------------------------------
|
||||
@@ -767,7 +769,7 @@ the extensible SDK and ``devtool``.
|
||||
|
||||
Before attempting this procedure, be sure you have performed the
|
||||
steps to get ready for updating the kernel as described in the
|
||||
":ref:`kernel-dev/common:getting ready to develop using \`\`devtool\`\``"
|
||||
":ref:`kernel-dev/kernel-dev-common:getting ready to develop using \`\`devtool\`\``"
|
||||
section.
|
||||
|
||||
Patching the kernel involves changing or adding configurations to an
|
||||
@@ -780,7 +782,7 @@ output at boot time through ``printk`` statements in the kernel's
|
||||
``calibrate.c`` source code file. Applying the patch and booting the
|
||||
modified image causes the added messages to appear on the emulator's
|
||||
console. The example is a continuation of the setup procedure found in
|
||||
the ":ref:`kernel-dev/common:getting ready to develop using \`\`devtool\`\``" Section.
|
||||
the ":ref:`kernel-dev/kernel-dev-common:getting ready to develop using \`\`devtool\`\``" Section.
|
||||
|
||||
1. *Check Out the Kernel Source Files:* First you must use ``devtool``
|
||||
to checkout the kernel source code in its workspace. Be sure you are
|
||||
@@ -789,7 +791,7 @@ the ":ref:`kernel-dev/common:getting ready to develop using \`\`devtool\`\``" Se
|
||||
.. note::
|
||||
|
||||
See this step in the
|
||||
":ref:`kernel-dev/common:getting ready to develop using \`\`devtool\`\``"
|
||||
":ref:`kernel-dev/kernel-dev-common:getting ready to develop using \`\`devtool\`\``"
|
||||
section for more information.
|
||||
|
||||
Use the following ``devtool`` command to check out the code:
|
||||
@@ -817,12 +819,12 @@ the ":ref:`kernel-dev/common:getting ready to develop using \`\`devtool\`\``" Se
|
||||
|
||||
1. *Change the working directory*: In the previous step, the output
|
||||
noted where you can find the source files (e.g.
|
||||
``poky_sdk/workspace/sources/linux-yocto``). Change to where the
|
||||
``~/poky_sdk/workspace/sources/linux-yocto``). Change to where the
|
||||
kernel source code is before making your edits to the
|
||||
``calibrate.c`` file:
|
||||
::
|
||||
|
||||
$ cd poky_sdk/workspace/sources/linux-yocto
|
||||
$ cd ~/poky_sdk/workspace/sources/linux-yocto
|
||||
|
||||
2. *Edit the source file*: Edit the ``init/calibrate.c`` file to have
|
||||
the following changes:
|
||||
@@ -860,7 +862,7 @@ the ":ref:`kernel-dev/common:getting ready to develop using \`\`devtool\`\``" Se
|
||||
If the image you originally created resulted in a Wic file, you
|
||||
can use an alternate method to create the new image with the
|
||||
updated kernel. For an example, see the steps in the
|
||||
:yocto_wiki:`TipsAndTricks/KernelDevelopmentWithEsdk </TipsAndTricks/KernelDevelopmentWithEsdk>`
|
||||
:yocto_wiki:`TipsAndTricks/KernelDevelopmentWithEsdk </wiki/TipsAndTricks/KernelDevelopmentWithEsdk>`
|
||||
Wiki Page.
|
||||
|
||||
::
|
||||
@@ -894,7 +896,7 @@ the ":ref:`kernel-dev/common:getting ready to develop using \`\`devtool\`\``" Se
|
||||
and use these Git commands to stage and commit your changes:
|
||||
::
|
||||
|
||||
$ cd poky_sdk/workspace/sources/linux-yocto
|
||||
$ cd ~/poky_sdk/workspace/sources/linux-yocto
|
||||
$ git status
|
||||
$ git add init/calibrate.c
|
||||
$ git commit -m "calibrate: Add printk example"
|
||||
@@ -910,7 +912,7 @@ the ":ref:`kernel-dev/common:getting ready to develop using \`\`devtool\`\``" Se
|
||||
.. note::
|
||||
|
||||
See Step 3 of the
|
||||
":ref:`kernel-dev/common:getting ready to develop using \`\`devtool\`\``"
|
||||
":ref:`kernel-dev/kernel-dev-common:getting ready to develop using \`\`devtool\`\``"
|
||||
section for information on setting up this layer.
|
||||
|
||||
Once the command
|
||||
@@ -924,7 +926,7 @@ the ":ref:`kernel-dev/common:getting ready to develop using \`\`devtool\`\``" Se
|
||||
set up to run BitBake:
|
||||
::
|
||||
|
||||
$ cd poky/build
|
||||
$ cd ~/poky/build
|
||||
$ bitbake core-image-minimal
|
||||
|
||||
Using Traditional Kernel Development to Patch the Kernel
|
||||
@@ -933,14 +935,14 @@ Using Traditional Kernel Development to Patch the Kernel
|
||||
The steps in this procedure show you how you can patch the kernel using
|
||||
traditional kernel development (i.e. not using ``devtool`` and the
|
||||
extensible SDK as described in the
|
||||
":ref:`kernel-dev/common:using \`\`devtool\`\` to patch the kernel`"
|
||||
":ref:`kernel-dev/kernel-dev-common:using \`\`devtool\`\` to patch the kernel`"
|
||||
section).
|
||||
|
||||
.. note::
|
||||
|
||||
Before attempting this procedure, be sure you have performed the
|
||||
steps to get ready for updating the kernel as described in the
|
||||
":ref:`kernel-dev/common:getting ready for traditional kernel development`"
|
||||
":ref:`kernel-dev/kernel-dev-common:getting ready for traditional kernel development`"
|
||||
section.
|
||||
|
||||
Patching the kernel involves changing or adding configurations to an
|
||||
@@ -953,14 +955,15 @@ emulator console output at boot time through ``printk`` statements in
|
||||
the kernel's ``calibrate.c`` source code file. Applying the patch and
|
||||
booting the modified image causes the added messages to appear on the
|
||||
emulator's console. The example is a continuation of the setup procedure
|
||||
found in the
|
||||
":ref:`kernel-dev/common:getting ready for traditional kernel development`"
|
||||
found in the "`Getting Ready for Traditional Kernel
|
||||
Development <#getting-ready-for-traditional-kernel-development>`__"
|
||||
Section.
|
||||
|
||||
1. *Edit the Source Files* Prior to this step, you should have used Git
|
||||
to create a local copy of the repository for your kernel. Assuming
|
||||
you created the repository as directed in the
|
||||
":ref:`kernel-dev/common:getting ready for traditional kernel development`"
|
||||
you created the repository as directed in the "`Getting Ready for
|
||||
Traditional Kernel
|
||||
Development <#getting-ready-for-traditional-kernel-development>`__"
|
||||
section, use the following commands to edit the ``calibrate.c`` file:
|
||||
|
||||
1. *Change the working directory*: You need to locate the source
|
||||
@@ -1012,7 +1015,7 @@ Section.
|
||||
to the following to your ``local.conf``:
|
||||
::
|
||||
|
||||
$ cd poky/build/conf
|
||||
$ cd ~/poky/build/conf
|
||||
|
||||
Add the following to the ``local.conf``:
|
||||
::
|
||||
@@ -1034,7 +1037,7 @@ Section.
|
||||
you can now use BitBake to build the image:
|
||||
::
|
||||
|
||||
$ cd poky/build
|
||||
$ cd ~/poky/build
|
||||
$ bitbake core-image-minimal
|
||||
|
||||
5. *Boot the image*: Boot the modified image in the QEMU emulator using
|
||||
@@ -1042,7 +1045,7 @@ Section.
|
||||
with no password:
|
||||
::
|
||||
|
||||
$ cd poky/build
|
||||
$ cd ~/poky/build
|
||||
$ runqemu qemux86
|
||||
|
||||
6. *Look for Your Changes:* As QEMU booted, you might have seen your
|
||||
@@ -1102,10 +1105,10 @@ Section.
|
||||
The :term:`FILESEXTRAPATHS` and :term:`SRC_URI` statements
|
||||
enable the OpenEmbedded build system to find the patch file.
|
||||
|
||||
For more information on append files and patches, see the
|
||||
":ref:`kernel-dev/common:creating the append file`" and
|
||||
":ref:`kernel-dev/common:applying patches`" sections. You can also see the
|
||||
":ref:`dev-manual/common-tasks:using .bbappend files in your layer`"
|
||||
For more information on append files and patches, see the "`Creating
|
||||
the Append File <#creating-the-append-file>`__" and "`Applying
|
||||
Patches <#applying-patches>`__" sections. You can also see the
|
||||
":ref:`dev-manual/dev-manual-common-tasks:using .bbappend files in your layer`"
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
.. note::
|
||||
@@ -1116,7 +1119,7 @@ Section.
|
||||
the following sequence of commands:
|
||||
::
|
||||
|
||||
$ cd poky/build
|
||||
$ cd ~/poky/build
|
||||
$ bitbake -c cleanall yocto-linux
|
||||
$ bitbake core-image-minimal -c cleanall
|
||||
$ bitbake core-image-minimal
|
||||
@@ -1138,8 +1141,8 @@ configuration fragments, and how to interactively modify your
|
||||
``.config`` file to create the leanest kernel configuration file
|
||||
possible.
|
||||
|
||||
For more information on kernel configuration, see the
|
||||
":ref:`kernel-dev/common:changing the configuration`" section.
|
||||
For more information on kernel configuration, see the "`Changing the
|
||||
Configuration <#changing-the-configuration>`__" section.
|
||||
|
||||
Using ``menuconfig``
|
||||
---------------------
|
||||
@@ -1169,7 +1172,7 @@ environment, you must do the following:
|
||||
The following commands initialize the BitBake environment, run the
|
||||
:ref:`ref-tasks-kernel_configme`
|
||||
task, and launch ``menuconfig``. These commands assume the Source
|
||||
Directory's top-level folder is ``poky``:
|
||||
Directory's top-level folder is ``~/poky``:
|
||||
::
|
||||
|
||||
$ cd poky
|
||||
@@ -1187,9 +1190,9 @@ the tool and save your changes to create an updated version of the
|
||||
|
||||
You can use the entire ``.config`` file as the ``defconfig`` file. For
|
||||
information on ``defconfig`` files, see the
|
||||
":ref:`kernel-dev/common:changing the configuration`",
|
||||
":ref:`kernel-dev/common:using an "in-tree" \`\`defconfig\`\` file`",
|
||||
and ":ref:`kernel-dev/common:creating a \`\`defconfig\`\` file`"
|
||||
":ref:`kernel-dev/kernel-dev-common:changing the configuration`",
|
||||
":ref:`kernel-dev/kernel-dev-common:using an "in-tree" \`\`defconfig\`\` file`",
|
||||
and ":ref:`kernel-dev/kernel-dev-common:creating a \`\`defconfig\`\` file`"
|
||||
sections.
|
||||
|
||||
Consider an example that configures the "CONFIG_SMP" setting for the
|
||||
@@ -1295,8 +1298,10 @@ created to hold the configuration changes.
|
||||
applies these on top of and after applying the existing ``defconfig`` file
|
||||
configurations.
|
||||
|
||||
For more information on configuring the kernel, see the
|
||||
":ref:`kernel-dev/common:changing the configuration`" section.
|
||||
For more information on configuring the kernel, see the "`Changing the
|
||||
Configuration <#changing-the-configuration>`__" section.
|
||||
|
||||
.. _creating-config-fragments:
|
||||
|
||||
Creating Configuration Fragments
|
||||
--------------------------------
|
||||
@@ -1317,7 +1322,7 @@ appear in the ``.config`` file, which is in the :term:`Build Directory`.
|
||||
|
||||
For more information about where the ``.config`` file is located, see the
|
||||
example in the
|
||||
":ref:`kernel-dev/common:using \`\`menuconfig\`\``"
|
||||
":ref:`kernel-dev/kernel-dev-common:using \`\`menuconfig\`\``"
|
||||
section.
|
||||
|
||||
It is simple to create a configuration fragment. One method is to use
|
||||
@@ -1367,14 +1372,14 @@ steps:
|
||||
$ bitbake linux-yocto -c diffconfig
|
||||
|
||||
The ``diffconfig`` command creates a file that is a list of Linux kernel
|
||||
``CONFIG_`` assignments. See the
|
||||
":ref:`kernel-dev/common:changing the configuration`" section for additional
|
||||
``CONFIG_`` assignments. See the "`Changing the
|
||||
Configuration <#changing-the-configuration>`__" section for additional
|
||||
information on how to use the output as a configuration fragment.
|
||||
|
||||
.. note::
|
||||
|
||||
You can also use this method to create configuration fragments for a
|
||||
BSP. See the ":ref:`kernel-dev/advanced:bsp descriptions`"
|
||||
BSP. See the ":ref:`kernel-dev/kernel-dev-advanced:bsp descriptions`"
|
||||
section for more information.
|
||||
|
||||
Where do you put your configuration fragment files? You can place these
|
||||
@@ -1420,7 +1425,7 @@ when you override a policy configuration in a hardware configuration
|
||||
fragment.
|
||||
|
||||
In order to run this task, you must have an existing ``.config`` file.
|
||||
See the ":ref:`kernel-dev/common:using \`\`menuconfig\`\``" section for
|
||||
See the ":ref:`kernel-dev/kernel-dev-common:using \`\`menuconfig\`\``" section for
|
||||
information on how to create a configuration file.
|
||||
|
||||
Following is sample output from the ``do_kernel_configcheck`` task:
|
||||
@@ -1493,7 +1498,7 @@ and
|
||||
tasks until they produce no warnings.
|
||||
|
||||
For more information on how to use the ``menuconfig`` tool, see the
|
||||
:ref:`kernel-dev/common:using \`\`menuconfig\`\`` section.
|
||||
:ref:`kernel-dev/kernel-dev-common:using \`\`menuconfig\`\`` section.
|
||||
|
||||
Fine-Tuning the Kernel Configuration File
|
||||
-----------------------------------------
|
||||
@@ -1609,10 +1614,11 @@ source directory. Follow these steps to clean up the version string:
|
||||
Depending on your particular kernel development workflow, the
|
||||
commands you use to rebuild the kernel might differ. For information
|
||||
on building the kernel image when using ``devtool``, see the
|
||||
":ref:`kernel-dev/common:using \`\`devtool\`\` to patch the kernel`"
|
||||
":ref:`kernel-dev/kernel-dev-common:using \`\`devtool\`\` to patch the kernel`"
|
||||
section. For
|
||||
information on building the kernel image when using Bitbake, see the
|
||||
":ref:`kernel-dev/common:using traditional kernel development to patch the kernel`"
|
||||
"`Using Traditional Kernel Development to Patch the
|
||||
Kernel <#using-traditional-kernel-development-to-patch-the-kernel>`__"
|
||||
section.
|
||||
|
||||
Working With Your Own Sources
|
||||
@@ -1730,9 +1736,8 @@ Here are some basic steps you can use to work with your own sources:
|
||||
|
||||
5. *Customize Your Recipe as Needed:* Provide further customizations to
|
||||
your recipe as needed just as you would customize an existing
|
||||
linux-yocto recipe. See the
|
||||
":ref:`ref-manual/devtool-reference:modifying an existing recipe`" section
|
||||
for information.
|
||||
linux-yocto recipe. See the "`Modifying an Existing
|
||||
Recipe <#modifying-an-existing-recipe>`__" section for information.
|
||||
|
||||
Working with Out-of-Tree Modules
|
||||
================================
|
||||
@@ -1912,7 +1917,7 @@ differences:
|
||||
$ git show origin/standard/base..origin/standard/emenlow
|
||||
|
||||
Use this command to create individual patches for each change. Here is
|
||||
an example that creates patch files for each commit and places them
|
||||
an example that that creates patch files for each commit and places them
|
||||
in your ``Documents`` directory:
|
||||
::
|
||||
|
||||
@@ -1939,7 +1944,7 @@ Adding Recipe-Space Kernel Features
|
||||
===================================
|
||||
|
||||
You can add kernel features in the
|
||||
:ref:`recipe-space <kernel-dev/advanced:recipe-space metadata>`
|
||||
:ref:`recipe-space <kernel-dev/kernel-dev-advanced:recipe-space metadata>`
|
||||
by using the :term:`KERNEL_FEATURES`
|
||||
variable and by specifying the feature's ``.scc`` file path in the
|
||||
:term:`SRC_URI` statement. When you
|
||||
@@ -1958,7 +1963,7 @@ OpenEmbedded build system searches all forms of kernel Metadata on the
|
||||
``SRC_URI`` statement regardless of whether the Metadata is in the
|
||||
"kernel-cache", system kernel Metadata, or a recipe-space Metadata (i.e.
|
||||
part of the kernel recipe). See the
|
||||
":ref:`kernel-dev/advanced:kernel metadata location`" section for
|
||||
":ref:`kernel-dev/kernel-dev-advanced:kernel metadata location`" section for
|
||||
additional information.
|
||||
|
||||
When you specify the feature's ``.scc`` file on the ``SRC_URI``
|
||||
@@ -4,6 +4,8 @@
|
||||
Advanced Kernel Concepts
|
||||
************************
|
||||
|
||||
.. _kernel-big-picture:
|
||||
|
||||
Yocto Project Kernel Development and Maintenance
|
||||
================================================
|
||||
|
||||
@@ -35,7 +37,7 @@ Yocto Project Linux kernel that caters to specific embedded designer
|
||||
needs for targeted hardware.
|
||||
|
||||
You can find a web interface to the Yocto Linux kernels in the
|
||||
:ref:`overview-manual/development-environment:yocto project source repositories`
|
||||
:ref:`overview-manual/overview-manual-development-environment:yocto project source repositories`
|
||||
at :yocto_git:`/`. If you look at the interface, you will see to
|
||||
the left a grouping of Git repositories titled "Yocto Linux Kernel".
|
||||
Within this group, you will find several Linux Yocto kernels developed
|
||||
@@ -71,7 +73,7 @@ and included with Yocto Project releases:
|
||||
and configurations for the linux-yocto kernel tree. This repository
|
||||
is useful when working on the linux-yocto kernel. For more
|
||||
information on this "Advanced Kernel Metadata", see the
|
||||
":doc:`/kernel-dev/advanced`" Chapter.
|
||||
":doc:`kernel-dev-advanced`" Chapter.
|
||||
|
||||
- *linux-yocto-dev:* A development kernel based on the latest
|
||||
upstream release candidate available.
|
||||
@@ -160,7 +162,7 @@ implemented by the Yocto Project team using the Source Code Manager
|
||||
|
||||
- You can find documentation on Git at https://git-scm.com/doc. You can
|
||||
also get an introduction to Git as it applies to the Yocto Project in the
|
||||
":ref:`overview-manual/development-environment:git`" section in the Yocto Project
|
||||
":ref:`overview-manual/overview-manual-development-environment:git`" section in the Yocto Project
|
||||
Overview and Concepts Manual. The latter reference provides an
|
||||
overview of Git and presents a minimal set of Git commands that
|
||||
allows you to be functional using Git. You can use as much, or as
|
||||
@@ -258,7 +260,7 @@ Yocto Linux kernel needed for any given set of requirements.
|
||||
Yocto Linux kernels, but rather shows a single generic kernel just
|
||||
for conceptual purposes. Also keep in mind that this structure
|
||||
represents the
|
||||
:ref:`overview-manual/development-environment:yocto project source repositories`
|
||||
:ref:`overview-manual/overview-manual-development-environment:yocto project source repositories`
|
||||
that are either pulled from during the build or established on the
|
||||
host development system prior to the build by either cloning a
|
||||
particular kernel's Git repository or by downloading and unpacking a
|
||||
@@ -293,13 +295,13 @@ ways:
|
||||
|
||||
- *Files Accessed While using devtool:* ``devtool``, which is
|
||||
available with the Yocto Project, is the preferred method by which to
|
||||
modify the kernel. See the ":ref:`kernel-dev/intro:kernel modification workflow`" section.
|
||||
modify the kernel. See the ":ref:`kernel-dev/kernel-dev-intro:kernel modification workflow`" section.
|
||||
|
||||
- *Cloned Repository:* If you are working in the kernel all the time,
|
||||
you probably would want to set up your own local Git repository of
|
||||
the Yocto Linux kernel tree. For information on how to clone a Yocto
|
||||
Linux kernel Git repository, see the
|
||||
":ref:`kernel-dev/common:preparing the build host to work on the kernel`"
|
||||
":ref:`kernel-dev/kernel-dev-common:preparing the build host to work on the kernel`"
|
||||
section.
|
||||
|
||||
- *Temporary Source Files from a Build:* If you just need to make some
|
||||
@@ -327,11 +329,11 @@ source files used during the build.
|
||||
|
||||
Again, for additional information on the Yocto Project kernel's
|
||||
architecture and its branching strategy, see the
|
||||
":ref:`kernel-dev/concepts-appx:yocto linux kernel architecture and branching strategies`"
|
||||
":ref:`kernel-dev/kernel-dev-concepts-appx:yocto linux kernel architecture and branching strategies`"
|
||||
section. You can also reference the
|
||||
":ref:`kernel-dev/common:using \`\`devtool\`\` to patch the kernel`"
|
||||
":ref:`kernel-dev/kernel-dev-common:using \`\`devtool\`\` to patch the kernel`"
|
||||
and
|
||||
":ref:`kernel-dev/common:using traditional kernel development to patch the kernel`"
|
||||
":ref:`kernel-dev/kernel-dev-common:using traditional kernel development to patch the kernel`"
|
||||
sections for detailed example that modifies the kernel.
|
||||
|
||||
Determining Hardware and Non-Hardware Features for the Kernel Configuration Audit Phase
|
||||
@@ -341,7 +343,7 @@ This section describes part of the kernel configuration audit phase that
|
||||
most developers can ignore. For general information on kernel
|
||||
configuration including ``menuconfig``, ``defconfig`` files, and
|
||||
configuration fragments, see the
|
||||
":ref:`kernel-dev/common:configuring the kernel`" section.
|
||||
":ref:`kernel-dev/kernel-dev-common:configuring the kernel`" section.
|
||||
|
||||
During this part of the audit phase, the contents of the final
|
||||
``.config`` file are compared against the fragments specified by the
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user