mirror of
https://git.yoctoproject.org/poky
synced 2026-02-26 11:29:40 +01:00
Compare commits
1 Commits
yocto-4.0.
...
uninative-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a8095c99ab |
24
SECURITY.md
24
SECURITY.md
@@ -1,24 +0,0 @@
|
||||
How to Report a Potential Vulnerability?
|
||||
========================================
|
||||
|
||||
If you would like to report a public issue (for example, one with a released
|
||||
CVE number), please report it using the
|
||||
[https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security Security Bugzilla].
|
||||
If you have a patch ready, submit it following the same procedure as any other
|
||||
patch as described in README.md.
|
||||
|
||||
If you are dealing with a not-yet released or urgent issue, please send a
|
||||
message to security AT yoctoproject DOT org, including as many details as
|
||||
possible: the layer or software module affected, the recipe and its version,
|
||||
and any example code, if available.
|
||||
|
||||
Branches maintained with security fixes
|
||||
---------------------------------------
|
||||
|
||||
See [https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS Stable release and LTS]
|
||||
for detailed info regarding the policies and maintenance of Stable branches.
|
||||
|
||||
The [https://wiki.yoctoproject.org/wiki/Releases Release page] contains a list of all
|
||||
releases of the Yocto Project. Versions in grey are no longer actively maintained with
|
||||
security patches, but well-tested patches may still be accepted for them for
|
||||
significant issues.
|
||||
@@ -1,24 +0,0 @@
|
||||
How to Report a Potential Vulnerability?
|
||||
========================================
|
||||
|
||||
If you would like to report a public issue (for example, one with a released
|
||||
CVE number), please report it using the
|
||||
[https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security Security Bugzilla].
|
||||
If you have a patch ready, submit it following the same procedure as any other
|
||||
patch as described in README.md.
|
||||
|
||||
If you are dealing with a not-yet released or urgent issue, please send a
|
||||
message to security AT yoctoproject DOT org, including as many details as
|
||||
possible: the layer or software module affected, the recipe and its version,
|
||||
and any example code, if available.
|
||||
|
||||
Branches maintained with security fixes
|
||||
---------------------------------------
|
||||
|
||||
See [https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS Stable release and LTS]
|
||||
for detailed info regarding the policies and maintenance of Stable branches.
|
||||
|
||||
The [https://wiki.yoctoproject.org/wiki/Releases Release page] contains a list of all
|
||||
releases of the Yocto Project. Versions in grey are no longer actively maintained with
|
||||
security patches, but well-tested patches may still be accepted for them for
|
||||
significant issues.
|
||||
@@ -25,7 +25,8 @@ except RuntimeError as exc:
|
||||
from bb import cookerdata
|
||||
from bb.main import bitbake_main, BitBakeConfigParameters, BBMainException
|
||||
|
||||
bb.utils.check_system_locale()
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
|
||||
__version__ = "2.0.0"
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
warnings.simplefilter("default")
|
||||
import argparse
|
||||
import logging
|
||||
@@ -28,7 +27,6 @@ logger = bb.msg.logger_create(myname)
|
||||
|
||||
is_dump = myname == 'bitbake-dumpsig'
|
||||
|
||||
|
||||
def find_siginfo(tinfoil, pn, taskname, sigs=None):
|
||||
result = None
|
||||
tinfoil.set_event_mask(['bb.event.FindSigInfoResult',
|
||||
@@ -54,7 +52,6 @@ def find_siginfo(tinfoil, pn, taskname, sigs=None):
|
||||
sys.exit(2)
|
||||
return result
|
||||
|
||||
|
||||
def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
|
||||
""" Find the most recent signature files for the specified PN/task """
|
||||
|
||||
@@ -66,10 +63,10 @@ def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
|
||||
if not sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s matching either %s or %s' % (pn, taskname, sig1, sig2))
|
||||
sys.exit(1)
|
||||
elif sig1 not in sigfiles:
|
||||
elif not sig1 in sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig1))
|
||||
sys.exit(1)
|
||||
elif sig2 not in sigfiles:
|
||||
elif not sig2 in sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig2))
|
||||
sys.exit(1)
|
||||
latestfiles = [sigfiles[sig1], sigfiles[sig2]]
|
||||
@@ -91,9 +88,9 @@ def recursecb(key, hash1, hash2):
|
||||
recout = []
|
||||
if not hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
|
||||
elif hash1 not in hashfiles:
|
||||
elif not hash1 in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash1))
|
||||
elif hash2 not in hashfiles:
|
||||
elif not hash2 in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash2))
|
||||
else:
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, color=color)
|
||||
@@ -113,36 +110,36 @@ parser.add_argument('-D', '--debug',
|
||||
|
||||
if is_dump:
|
||||
parser.add_argument("-t", "--task",
|
||||
help="find the signature data file for the last run of the specified task",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
help="find the signature data file for the last run of the specified task",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
|
||||
parser.add_argument("sigdatafile1",
|
||||
help="Signature file to dump. Not used when using -t/--task.",
|
||||
action="store", nargs='?', metavar="sigdatafile")
|
||||
help="Signature file to dump. Not used when using -t/--task.",
|
||||
action="store", nargs='?', metavar="sigdatafile")
|
||||
else:
|
||||
parser.add_argument('-c', '--color',
|
||||
help='Colorize the output (where %(metavar)s is %(choices)s)',
|
||||
choices=['auto', 'always', 'never'], default='auto', metavar='color')
|
||||
help='Colorize the output (where %(metavar)s is %(choices)s)',
|
||||
choices=['auto', 'always', 'never'], default='auto', metavar='color')
|
||||
|
||||
parser.add_argument('-d', '--dump',
|
||||
help='Dump the last signature data instead of comparing (equivalent to using bitbake-dumpsig)',
|
||||
action='store_true')
|
||||
help='Dump the last signature data instead of comparing (equivalent to using bitbake-dumpsig)',
|
||||
action='store_true')
|
||||
|
||||
parser.add_argument("-t", "--task",
|
||||
help="find the signature data files for the last two runs of the specified task and compare them",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
help="find the signature data files for the last two runs of the specified task and compare them",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
|
||||
parser.add_argument("-s", "--signature",
|
||||
help="With -t/--task, specify the signatures to look for instead of taking the last two",
|
||||
action="store", dest="sigargs", nargs=2, metavar=('fromsig', 'tosig'))
|
||||
help="With -t/--task, specify the signatures to look for instead of taking the last two",
|
||||
action="store", dest="sigargs", nargs=2, metavar=('fromsig', 'tosig'))
|
||||
|
||||
parser.add_argument("sigdatafile1",
|
||||
help="First signature file to compare (or signature file to dump, if second not specified). Not used when using -t/--task.",
|
||||
action="store", nargs='?')
|
||||
help="First signature file to compare (or signature file to dump, if second not specified). Not used when using -t/--task.",
|
||||
action="store", nargs='?')
|
||||
|
||||
parser.add_argument("sigdatafile2",
|
||||
help="Second signature file to compare",
|
||||
action="store", nargs='?')
|
||||
help="Second signature file to compare",
|
||||
action="store", nargs='?')
|
||||
|
||||
options = parser.parse_args()
|
||||
if is_dump:
|
||||
@@ -160,8 +157,7 @@ if options.taskargs:
|
||||
with bb.tinfoil.Tinfoil() as tinfoil:
|
||||
tinfoil.prepare(config_only=True)
|
||||
if not options.dump and options.sigargs:
|
||||
files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1], options.sigargs[0],
|
||||
options.sigargs[1])
|
||||
files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1], options.sigargs[0], options.sigargs[1])
|
||||
else:
|
||||
files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1])
|
||||
|
||||
@@ -170,8 +166,7 @@ if options.taskargs:
|
||||
output = bb.siggen.dump_sigfile(files[-1])
|
||||
else:
|
||||
if len(files) < 2:
|
||||
logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (
|
||||
options.taskargs[0], options.taskargs[1]))
|
||||
logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (options.taskargs[0], options.taskargs[1]))
|
||||
sys.exit(1)
|
||||
|
||||
# Recurse into signature comparison
|
||||
|
||||
@@ -25,7 +25,6 @@ if __name__ == "__main__":
|
||||
parser.add_argument('-u', '--unexpand', help='Do not expand the value (with --value)', action="store_true")
|
||||
parser.add_argument('-f', '--flag', help='Specify a variable flag to query (with --value)', default=None)
|
||||
parser.add_argument('--value', help='Only report the value, no history and no variable name', action="store_true")
|
||||
parser.add_argument('-q', '--quiet', help='Silence bitbake server logging', action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.unexpand and not args.value:
|
||||
@@ -36,10 +35,9 @@ if __name__ == "__main__":
|
||||
print("--flag only makes sense with --value")
|
||||
sys.exit(1)
|
||||
|
||||
quiet = args.quiet
|
||||
with bb.tinfoil.Tinfoil(tracking=True, setup_logging=not quiet) as tinfoil:
|
||||
with bb.tinfoil.Tinfoil(tracking=True) as tinfoil:
|
||||
if args.recipe:
|
||||
tinfoil.prepare(quiet=3 if quiet else 2)
|
||||
tinfoil.prepare(quiet=2)
|
||||
d = tinfoil.parse_recipe(args.recipe)
|
||||
else:
|
||||
tinfoil.prepare(quiet=2, config_only=True)
|
||||
|
||||
@@ -56,24 +56,25 @@ def main():
|
||||
nonlocal missed_hashes
|
||||
nonlocal max_time
|
||||
|
||||
with hashserv.create_client(args.address) as client:
|
||||
for i in range(args.requests):
|
||||
taskhash = hashlib.sha256()
|
||||
taskhash.update(args.taskhash_seed.encode('utf-8'))
|
||||
taskhash.update(str(i).encode('utf-8'))
|
||||
client = hashserv.create_client(args.address)
|
||||
|
||||
start_time = time.perf_counter()
|
||||
l = client.get_unihash(METHOD, taskhash.hexdigest())
|
||||
elapsed = time.perf_counter() - start_time
|
||||
for i in range(args.requests):
|
||||
taskhash = hashlib.sha256()
|
||||
taskhash.update(args.taskhash_seed.encode('utf-8'))
|
||||
taskhash.update(str(i).encode('utf-8'))
|
||||
|
||||
with lock:
|
||||
if l:
|
||||
found_hashes += 1
|
||||
else:
|
||||
missed_hashes += 1
|
||||
start_time = time.perf_counter()
|
||||
l = client.get_unihash(METHOD, taskhash.hexdigest())
|
||||
elapsed = time.perf_counter() - start_time
|
||||
|
||||
max_time = max(elapsed, max_time)
|
||||
pbar.update()
|
||||
with lock:
|
||||
if l:
|
||||
found_hashes += 1
|
||||
else:
|
||||
missed_hashes += 1
|
||||
|
||||
max_time = max(elapsed, max_time)
|
||||
pbar.update()
|
||||
|
||||
max_time = 0
|
||||
found_hashes = 0
|
||||
@@ -151,8 +152,9 @@ def main():
|
||||
|
||||
func = getattr(args, 'func', None)
|
||||
if func:
|
||||
with hashserv.create_client(args.address) as client:
|
||||
return func(args, client)
|
||||
client = hashserv.create_client(args.address)
|
||||
|
||||
return func(args, client)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
@@ -68,11 +68,11 @@ def main():
|
||||
|
||||
registered = False
|
||||
for plugin in plugins:
|
||||
if hasattr(plugin, 'tinfoil_init'):
|
||||
plugin.tinfoil_init(tinfoil)
|
||||
if hasattr(plugin, 'register_commands'):
|
||||
registered = True
|
||||
plugin.register_commands(subparsers)
|
||||
if hasattr(plugin, 'tinfoil_init'):
|
||||
plugin.tinfoil_init(tinfoil)
|
||||
|
||||
if not registered:
|
||||
logger.error("No commands registered - missing plugins?")
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -12,9 +12,8 @@ warnings.simplefilter("default")
|
||||
import logging
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
|
||||
import bb
|
||||
|
||||
bb.utils.check_system_locale()
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
|
||||
# Users shouldn't be running this code directly
|
||||
if len(sys.argv) != 10 or not sys.argv[1].startswith("decafbad"):
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -24,7 +22,8 @@ import subprocess
|
||||
from multiprocessing import Lock
|
||||
from threading import Thread
|
||||
|
||||
bb.utils.check_system_locale()
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
|
||||
# Users shouldn't be running this code directly
|
||||
if len(sys.argv) != 2 or not sys.argv[1].startswith("decafbad"):
|
||||
@@ -91,19 +90,19 @@ def worker_fire_prepickled(event):
|
||||
worker_thread_exit = False
|
||||
|
||||
def worker_flush(worker_queue):
|
||||
worker_queue_int = bytearray()
|
||||
worker_queue_int = b""
|
||||
global worker_pipe, worker_thread_exit
|
||||
|
||||
while True:
|
||||
try:
|
||||
worker_queue_int.extend(worker_queue.get(True, 1))
|
||||
worker_queue_int = worker_queue_int + worker_queue.get(True, 1)
|
||||
except queue.Empty:
|
||||
pass
|
||||
while (worker_queue_int or not worker_queue.empty()):
|
||||
try:
|
||||
(_, ready, _) = select.select([], [worker_pipe], [], 1)
|
||||
if not worker_queue.empty():
|
||||
worker_queue_int.extend(worker_queue.get())
|
||||
worker_queue_int = worker_queue_int + worker_queue.get()
|
||||
written = os.write(worker_pipe, worker_queue_int)
|
||||
worker_queue_int = worker_queue_int[written:]
|
||||
except (IOError, OSError) as e:
|
||||
@@ -338,12 +337,12 @@ class runQueueWorkerPipe():
|
||||
if pipeout:
|
||||
pipeout.close()
|
||||
bb.utils.nonblockingfd(self.input)
|
||||
self.queue = bytearray()
|
||||
self.queue = b""
|
||||
|
||||
def read(self):
|
||||
start = len(self.queue)
|
||||
try:
|
||||
self.queue.extend(self.input.read(102400) or b"")
|
||||
self.queue = self.queue + (self.input.read(102400) or b"")
|
||||
except (OSError, IOError) as e:
|
||||
if e.errno != errno.EAGAIN:
|
||||
raise
|
||||
@@ -371,7 +370,7 @@ class BitbakeWorker(object):
|
||||
def __init__(self, din):
|
||||
self.input = din
|
||||
bb.utils.nonblockingfd(self.input)
|
||||
self.queue = bytearray()
|
||||
self.queue = b""
|
||||
self.cookercfg = None
|
||||
self.databuilder = None
|
||||
self.data = None
|
||||
@@ -405,7 +404,7 @@ class BitbakeWorker(object):
|
||||
if len(r) == 0:
|
||||
# EOF on pipe, server must have terminated
|
||||
self.sigterm_exception(signal.SIGTERM, None)
|
||||
self.queue.extend(r)
|
||||
self.queue = self.queue + r
|
||||
except (OSError, IOError):
|
||||
pass
|
||||
if len(self.queue):
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -424,8 +424,8 @@ This fetcher supports the following parameters:
|
||||
|
||||
- *"nobranch":* Tells the fetcher to not check the SHA validation for
|
||||
the branch when set to "1". The default is "0". Set this option for
|
||||
the recipe that refers to the commit that is valid for any namespace
|
||||
(branch, tag, ...) instead of the branch.
|
||||
the recipe that refers to the commit that is valid for a tag instead
|
||||
of the branch.
|
||||
|
||||
- *"bareclone":* Tells the fetcher to clone a bare clone into the
|
||||
destination directory without checking out a working tree. Only the
|
||||
@@ -688,8 +688,6 @@ Here is an example URL::
|
||||
|
||||
It can also be used when setting mirrors definitions using the :term:`PREMIRRORS` variable.
|
||||
|
||||
.. _crate-fetcher:
|
||||
|
||||
Crate Fetcher (``crate://``)
|
||||
----------------------------
|
||||
|
||||
@@ -706,80 +704,6 @@ Here is an example URL::
|
||||
|
||||
SRC_URI = "crate://crates.io/glob/0.2.11"
|
||||
|
||||
.. _npm-fetcher:
|
||||
|
||||
NPM Fetcher (``npm://``)
|
||||
------------------------
|
||||
|
||||
This submodule fetches source code from an
|
||||
`NPM <https://en.wikipedia.org/wiki/Npm_(software)>`__
|
||||
Javascript package registry.
|
||||
|
||||
The format for the :term:`SRC_URI` setting must be::
|
||||
|
||||
SRC_URI = "npm://some.registry.url;ParameterA=xxx;ParameterB=xxx;..."
|
||||
|
||||
This fetcher supports the following parameters:
|
||||
|
||||
- *"package":* The NPM package name. This is a mandatory parameter.
|
||||
|
||||
- *"version":* The NPM package version. This is a mandatory parameter.
|
||||
|
||||
- *"downloadfilename":* Specifies the filename used when storing the downloaded file.
|
||||
|
||||
- *"destsuffix":* Specifies the directory to use to unpack the package (default: ``npm``).
|
||||
|
||||
Note that NPM fetcher only fetches the package source itself. The dependencies
|
||||
can be fetched through the `npmsw-fetcher`_.
|
||||
|
||||
Here is an example URL with both fetchers::
|
||||
|
||||
SRC_URI = " \
|
||||
npm://registry.npmjs.org/;package=cute-files;version=${PV} \
|
||||
npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json \
|
||||
"
|
||||
|
||||
See :yocto_docs:`Creating Node Package Manager (NPM) Packages
|
||||
</dev-manual/common-tasks.html#creating-node-package-manager-npm-packages>`
|
||||
in the Yocto Project manual for details about using
|
||||
:yocto_docs:`devtool <https://docs.yoctoproject.org/ref-manual/devtool-reference.html>`
|
||||
to automatically create a recipe from an NPM URL.
|
||||
|
||||
.. _npmsw-fetcher:
|
||||
|
||||
NPM shrinkwrap Fetcher (``npmsw://``)
|
||||
-------------------------------------
|
||||
|
||||
This submodule fetches source code from an
|
||||
`NPM shrinkwrap <https://docs.npmjs.com/cli/v8/commands/npm-shrinkwrap>`__
|
||||
description file, which lists the dependencies
|
||||
of an NPM package while locking their versions.
|
||||
|
||||
The format for the :term:`SRC_URI` setting must be::
|
||||
|
||||
SRC_URI = "npmsw://some.registry.url;ParameterA=xxx;ParameterB=xxx;..."
|
||||
|
||||
This fetcher supports the following parameters:
|
||||
|
||||
- *"dev":* Set this parameter to ``1`` to install "devDependencies".
|
||||
|
||||
- *"destsuffix":* Specifies the directory to use to unpack the dependencies
|
||||
(``${S}`` by default).
|
||||
|
||||
Note that the shrinkwrap file can also be provided by the recipe for
|
||||
the package which has such dependencies, for example::
|
||||
|
||||
SRC_URI = " \
|
||||
npm://registry.npmjs.org/;package=cute-files;version=${PV} \
|
||||
npmsw://${THISDIR}/${BPN}/npm-shrinkwrap.json \
|
||||
"
|
||||
|
||||
Such a file can automatically be generated using
|
||||
:yocto_docs:`devtool <https://docs.yoctoproject.org/ref-manual/devtool-reference.html>`
|
||||
as described in the :yocto_docs:`Creating Node Package Manager (NPM) Packages
|
||||
</dev-manual/common-tasks.html#creating-node-package-manager-npm-packages>`
|
||||
section of the Yocto Project.
|
||||
|
||||
Other Fetchers
|
||||
--------------
|
||||
|
||||
@@ -789,6 +713,8 @@ Fetch submodules also exist for the following:
|
||||
|
||||
- Mercurial (``hg://``)
|
||||
|
||||
- npm (``npm://``)
|
||||
|
||||
- OSC (``osc://``)
|
||||
|
||||
- Secure FTP (``sftp://``)
|
||||
|
||||
@@ -195,45 +195,22 @@ value. However, if ``A`` is not set, the variable is set to "aval".
|
||||
Setting a weak default value (??=)
|
||||
----------------------------------
|
||||
|
||||
The weak default value of a variable is the value which that variable
|
||||
will expand to if no value has been assigned to it via any of the other
|
||||
assignment operators. The "??=" operator takes effect immediately, replacing
|
||||
any previously defined weak default value. Here is an example::
|
||||
It is possible to use a "weaker" assignment than in the previous section
|
||||
by using the "??=" operator. This assignment behaves identical to "?="
|
||||
except that the assignment is made at the end of the parsing process
|
||||
rather than immediately. Consequently, when multiple "??=" assignments
|
||||
exist, the last one is used. Also, any "=" or "?=" assignment will
|
||||
override the value set with "??=". Here is an example::
|
||||
|
||||
W ??= "x"
|
||||
A := "${W}" # Immediate variable expansion
|
||||
W ??= "y"
|
||||
B := "${W}" # Immediate variable expansion
|
||||
W ??= "z"
|
||||
C = "${W}"
|
||||
W ?= "i"
|
||||
A ??= "somevalue"
|
||||
A ??= "someothervalue"
|
||||
|
||||
After parsing we will have::
|
||||
If ``A`` is set before the above statements are
|
||||
parsed, the variable retains its value. If ``A`` is not set, the
|
||||
variable is set to "someothervalue".
|
||||
|
||||
A = "x"
|
||||
B = "y"
|
||||
C = "i"
|
||||
W = "i"
|
||||
|
||||
Appending and prepending non-override style will not substitute the weak
|
||||
default value, which means that after parsing::
|
||||
|
||||
W ??= "x"
|
||||
W += "y"
|
||||
|
||||
we will have::
|
||||
|
||||
W = " y"
|
||||
|
||||
On the other hand, override-style appends/prepends/removes are applied after
|
||||
any active weak default value has been substituted::
|
||||
|
||||
W ??= "x"
|
||||
W:append = "y"
|
||||
|
||||
After parsing we will have::
|
||||
|
||||
W = "xy"
|
||||
Again, this assignment is a "lazy" or "weak" assignment because it does
|
||||
not occur until the end of the parsing process.
|
||||
|
||||
Immediate variable expansion (:=)
|
||||
---------------------------------
|
||||
|
||||
@@ -401,7 +401,7 @@ overview of their function and contents.
|
||||
|
||||
Example usage::
|
||||
|
||||
BB_HASHSERVE_UPSTREAM = "hashserv.yoctoproject.org:8686"
|
||||
BB_HASHSERVE_UPSTREAM = "typhoon.yocto.io:8687"
|
||||
|
||||
:term:`BB_INVALIDCONF`
|
||||
Used in combination with the ``ConfigParsed`` event to trigger
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
#
|
||||
# Copyright (C) 2006 Tim Ansell
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Please Note:
|
||||
# Be careful when using mutable types (ie Dict and Lists) - operations involving these are SLOW.
|
||||
# Assign a file to __warn__ to get warnings about slow operations.
|
||||
|
||||
@@ -15,13 +15,6 @@ import sys
|
||||
if sys.version_info < (3, 6, 0):
|
||||
raise RuntimeError("Sorry, python 3.6.0 or later is required for this version of bitbake")
|
||||
|
||||
if sys.version_info < (3, 10, 0):
|
||||
# With python 3.8 and 3.9, we see errors of "libgcc_s.so.1 must be installed for pthread_cancel to work"
|
||||
# https://stackoverflow.com/questions/64797838/libgcc-s-so-1-must-be-installed-for-pthread-cancel-to-work
|
||||
# https://bugs.ams1.psf.io/issue42888
|
||||
# so ensure libgcc_s is loaded early on
|
||||
import ctypes
|
||||
libgcc_s = ctypes.CDLL('libgcc_s.so.1')
|
||||
|
||||
class BBHandledException(Exception):
|
||||
"""
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -31,17 +29,7 @@ class AsyncClient(object):
|
||||
|
||||
async def connect_unix(self, path):
|
||||
async def connect_sock():
|
||||
# AF_UNIX has path length issues so chdir here to workaround
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(os.path.dirname(path))
|
||||
# The socket must be opened synchronously so that CWD doesn't get
|
||||
# changed out from underneath us so we pass as a sock into asyncio
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
|
||||
sock.connect(os.path.basename(path))
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
return await asyncio.open_unix_connection(sock=sock)
|
||||
return await asyncio.open_unix_connection(path)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
|
||||
@@ -126,12 +114,6 @@ class AsyncClient(object):
|
||||
{'ping': {}}
|
||||
)
|
||||
|
||||
async def __aenter__(self):
|
||||
return self
|
||||
|
||||
async def __aexit__(self, exc_type, exc_value, traceback):
|
||||
await self.close()
|
||||
|
||||
|
||||
class Client(object):
|
||||
def __init__(self):
|
||||
@@ -166,8 +148,14 @@ class Client(object):
|
||||
setattr(self, m, self._get_downcall_wrapper(downcall))
|
||||
|
||||
def connect_unix(self, path):
|
||||
self.loop.run_until_complete(self.client.connect_unix(path))
|
||||
self.loop.run_until_complete(self.client.connect())
|
||||
# AF_UNIX has path length issues so chdir here to workaround
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(os.path.dirname(path))
|
||||
self.loop.run_until_complete(self.client.connect_unix(os.path.basename(path)))
|
||||
self.loop.run_until_complete(self.client.connect())
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
@property
|
||||
def max_chunk(self):
|
||||
@@ -182,10 +170,3 @@ class Client(object):
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
return False
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -835,7 +835,11 @@ def stamp_cleanmask_internal(taskname, d, file_name):
|
||||
|
||||
return [cleanmask, cleanmask.replace(taskflagname, taskflagname + "_setscene")]
|
||||
|
||||
def clean_stamp(task, d, file_name = None):
|
||||
def make_stamp(task, d, file_name = None):
|
||||
"""
|
||||
Creates/updates a stamp for a given task
|
||||
(d can be a data dict or dataCache)
|
||||
"""
|
||||
cleanmask = stamp_cleanmask_internal(task, d, file_name)
|
||||
for mask in cleanmask:
|
||||
for name in glob.glob(mask):
|
||||
@@ -846,14 +850,6 @@ def clean_stamp(task, d, file_name = None):
|
||||
if name.endswith('.taint'):
|
||||
continue
|
||||
os.unlink(name)
|
||||
return
|
||||
|
||||
def make_stamp(task, d, file_name = None):
|
||||
"""
|
||||
Creates/updates a stamp for a given task
|
||||
(d can be a data dict or dataCache)
|
||||
"""
|
||||
clean_stamp(task, d, file_name)
|
||||
|
||||
stamp = stamp_internal(task, d, file_name)
|
||||
# Remove the file and recreate to force timestamp
|
||||
|
||||
@@ -27,7 +27,7 @@ import re
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
|
||||
__cache_version__ = "155"
|
||||
__cache_version__ = "154"
|
||||
|
||||
def getCacheFile(path, filename, mc, data_hash):
|
||||
mcspec = ''
|
||||
@@ -619,7 +619,7 @@ class Cache(NoCache):
|
||||
for f in flist:
|
||||
if not f:
|
||||
continue
|
||||
f, exist = f.rsplit(":", 1)
|
||||
f, exist = f.split(":")
|
||||
if (exist == "True" and not os.path.exists(f)) or (exist == "False" and os.path.exists(f)):
|
||||
self.logger.debug2("%s's file checksum list file %s changed",
|
||||
fn, f)
|
||||
|
||||
@@ -11,13 +11,10 @@ import os
|
||||
import stat
|
||||
import bb.utils
|
||||
import logging
|
||||
import re
|
||||
from bb.cache import MultiProcessCache
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
|
||||
filelist_regex = re.compile(r'(?:(?<=:True)|(?<=:False))\s+')
|
||||
|
||||
# mtime cache (non-persistent)
|
||||
# based upon the assumption that files do not change during bitbake run
|
||||
class FileMtimeCache(object):
|
||||
@@ -112,12 +109,7 @@ class FileChecksumCache(MultiProcessCache):
|
||||
return dirchecksums
|
||||
|
||||
checksums = []
|
||||
for pth in filelist_regex.split(filelist):
|
||||
if not pth:
|
||||
continue
|
||||
pth = pth.strip()
|
||||
if not pth:
|
||||
continue
|
||||
for pth in filelist.split():
|
||||
exist = pth.split(":")[1]
|
||||
if exist == "False":
|
||||
continue
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -201,22 +199,6 @@ class DummyLogger():
|
||||
def flush(self):
|
||||
return
|
||||
|
||||
|
||||
# Starting with Python 3.8, the ast module exposes all string nodes as a
|
||||
# Constant. While earlier versions of the module also have the Constant type
|
||||
# those use the Str type to encapsulate strings.
|
||||
if sys.version_info < (3, 8):
|
||||
def node_str_value(node):
|
||||
if isinstance(node, ast.Str):
|
||||
return node.s
|
||||
return None
|
||||
else:
|
||||
def node_str_value(node):
|
||||
if isinstance(node, ast.Constant) and isinstance(node.value, str):
|
||||
return node.value
|
||||
return None
|
||||
|
||||
|
||||
class PythonParser():
|
||||
getvars = (".getVar", ".appendVar", ".prependVar", "oe.utils.conditional")
|
||||
getvarflags = (".getVarFlag", ".appendVarFlag", ".prependVarFlag")
|
||||
@@ -241,22 +223,19 @@ class PythonParser():
|
||||
def visit_Call(self, node):
|
||||
name = self.called_node_name(node.func)
|
||||
if name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs):
|
||||
varname = node_str_value(node.args[0])
|
||||
if varname is not None:
|
||||
arg_str_value = None
|
||||
if len(node.args) >= 2:
|
||||
arg_str_value = node_str_value(node.args[1])
|
||||
if name in self.containsfuncs and arg_str_value is not None:
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
varname = node.args[0].s
|
||||
if name in self.containsfuncs and isinstance(node.args[1], ast.Str):
|
||||
if varname not in self.contains:
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname].add(arg_str_value)
|
||||
elif name in self.containsanyfuncs and arg_str_value is not None:
|
||||
self.contains[varname].add(node.args[1].s)
|
||||
elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Str):
|
||||
if varname not in self.contains:
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname].update(arg_str_value.split())
|
||||
self.contains[varname].update(node.args[1].s.split())
|
||||
elif name.endswith(self.getvarflags):
|
||||
if arg_str_value is not None:
|
||||
self.references.add('%s[%s]' % (varname, arg_str_value))
|
||||
if isinstance(node.args[1], ast.Str):
|
||||
self.references.add('%s[%s]' % (varname, node.args[1].s))
|
||||
else:
|
||||
self.warn(node.func, node.args[1])
|
||||
else:
|
||||
@@ -264,10 +243,10 @@ class PythonParser():
|
||||
else:
|
||||
self.warn(node.func, node.args[0])
|
||||
elif name and name.endswith(".expand"):
|
||||
arg_str_value = node_str_value(node.args[0])
|
||||
if arg_str_value is not None:
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
value = node.args[0].s
|
||||
d = bb.data.init()
|
||||
parser = d.expandWithRefs(arg_str_value, self.name)
|
||||
parser = d.expandWithRefs(value, self.name)
|
||||
self.references |= parser.references
|
||||
self.execs |= parser.execs
|
||||
for varname in parser.contains:
|
||||
@@ -275,9 +254,8 @@ class PythonParser():
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname] |= parser.contains[varname]
|
||||
elif name in self.execfuncs:
|
||||
arg_str_value = node_str_value(node.args[0])
|
||||
if arg_str_value is not None:
|
||||
self.var_execs.add(arg_str_value)
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
self.var_execs.add(node.args[0].s)
|
||||
else:
|
||||
self.warn(node.func, node.args[0])
|
||||
elif name and isinstance(node.func, (ast.Name, ast.Attribute)):
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
# Helper library to implement streaming compression and decompression using an
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ import sys, os, glob, os.path, re, time
|
||||
import itertools
|
||||
import logging
|
||||
import multiprocessing
|
||||
import sre_constants
|
||||
import threading
|
||||
from io import StringIO, UnsupportedOperation
|
||||
from contextlib import closing
|
||||
@@ -743,18 +744,19 @@ class BBCooker:
|
||||
taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc])
|
||||
mcdeps |= set(taskdata[mc].get_mcdepends())
|
||||
new = False
|
||||
for k in mcdeps:
|
||||
if k in seen:
|
||||
continue
|
||||
l = k.split(':')
|
||||
depmc = l[2]
|
||||
if depmc not in self.multiconfigs:
|
||||
bb.fatal("Multiconfig dependency %s depends on nonexistent multiconfig configuration named configuration %s" % (k,depmc))
|
||||
else:
|
||||
logger.debug("Adding providers for multiconfig dependency %s" % l[3])
|
||||
taskdata[depmc].add_provider(localdata[depmc], self.recipecaches[depmc], l[3])
|
||||
seen.add(k)
|
||||
new = True
|
||||
for mc in self.multiconfigs:
|
||||
for k in mcdeps:
|
||||
if k in seen:
|
||||
continue
|
||||
l = k.split(':')
|
||||
depmc = l[2]
|
||||
if depmc not in self.multiconfigs:
|
||||
bb.fatal("Multiconfig dependency %s depends on nonexistent multiconfig configuration named configuration %s" % (k,depmc))
|
||||
else:
|
||||
logger.debug("Adding providers for multiconfig dependency %s" % l[3])
|
||||
taskdata[depmc].add_provider(localdata[depmc], self.recipecaches[depmc], l[3])
|
||||
seen.add(k)
|
||||
new = True
|
||||
|
||||
for mc in self.multiconfigs:
|
||||
taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc])
|
||||
@@ -1906,7 +1908,7 @@ class CookerCollectFiles(object):
|
||||
try:
|
||||
re.compile(mask)
|
||||
bbmasks.append(mask)
|
||||
except re.error:
|
||||
except sre_constants.error:
|
||||
collectlog.critical("BBMASK contains an invalid regular expression, ignoring: %s" % mask)
|
||||
|
||||
# Then validate the combined regular expressions. This should never
|
||||
@@ -1914,7 +1916,7 @@ class CookerCollectFiles(object):
|
||||
bbmask = "|".join(bbmasks)
|
||||
try:
|
||||
bbmask_compiled = re.compile(bbmask)
|
||||
except re.error:
|
||||
except sre_constants.error:
|
||||
collectlog.critical("BBMASK is not a valid regular expression, ignoring: %s" % bbmask)
|
||||
bbmask = None
|
||||
|
||||
|
||||
@@ -160,7 +160,12 @@ def catch_parse_error(func):
|
||||
def wrapped(fn, *args):
|
||||
try:
|
||||
return func(fn, *args)
|
||||
except Exception as exc:
|
||||
except IOError as exc:
|
||||
import traceback
|
||||
parselog.critical(traceback.format_exc())
|
||||
parselog.critical("Unable to parse %s: %s" % (fn, exc))
|
||||
raise bb.BBHandledException()
|
||||
except bb.data_smart.ExpansionError as exc:
|
||||
import traceback
|
||||
|
||||
bbdir = os.path.dirname(__file__) + os.sep
|
||||
@@ -172,6 +177,9 @@ def catch_parse_error(func):
|
||||
break
|
||||
parselog.critical("Unable to parse %s" % fn, exc_info=(exc_class, exc, tb))
|
||||
raise bb.BBHandledException()
|
||||
except bb.parse.ParseError as exc:
|
||||
parselog.critical(str(exc))
|
||||
raise bb.BBHandledException()
|
||||
return wrapped
|
||||
|
||||
@catch_parse_error
|
||||
@@ -293,9 +301,14 @@ class CookerDataBuilder(object):
|
||||
bb.event.fire(bb.event.MultiConfigParsed(self.mcdata), self.data)
|
||||
|
||||
self.data_hash = data_hash.hexdigest()
|
||||
except (SyntaxError, bb.BBHandledException):
|
||||
raise bb.BBHandledException()
|
||||
except bb.data_smart.ExpansionError as e:
|
||||
logger.error(str(e))
|
||||
raise bb.BBHandledException()
|
||||
except Exception:
|
||||
logger.exception("Error parsing configuration files")
|
||||
raise bb.BBHandledException()
|
||||
|
||||
|
||||
# Handle obsolete variable names
|
||||
@@ -422,7 +435,7 @@ class CookerDataBuilder(object):
|
||||
msg += (" and bitbake did not find a conf/bblayers.conf file in"
|
||||
" the expected location.\nMaybe you accidentally"
|
||||
" invoked bitbake from the wrong directory?")
|
||||
bb.fatal(msg)
|
||||
raise SystemExit(msg)
|
||||
|
||||
if not data.getVar("TOPDIR"):
|
||||
data.setVar("TOPDIR", os.path.abspath(os.getcwd()))
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -277,8 +277,6 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, ignored_vars, d):
|
||||
try:
|
||||
if key[-1] == ']':
|
||||
vf = key[:-1].split('[')
|
||||
if vf[1] == "vardepvalueexclude":
|
||||
return deps, ""
|
||||
value, parser = d.getVarFlag(vf[0], vf[1], False, retparser=True)
|
||||
deps |= parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
@@ -310,7 +308,6 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, ignored_vars, d):
|
||||
value += "\n_remove of %s" % r
|
||||
deps |= r2.references
|
||||
deps = deps | (keys & r2.execs)
|
||||
value = handle_contains(value, r2.contains, exclusions, d)
|
||||
return value
|
||||
|
||||
if "vardepvalue" in varflags:
|
||||
|
||||
@@ -261,9 +261,12 @@ class VariableHistory(object):
|
||||
return
|
||||
if 'op' not in loginfo or not loginfo['op']:
|
||||
loginfo['op'] = 'set'
|
||||
if 'detail' in loginfo:
|
||||
loginfo['detail'] = str(loginfo['detail'])
|
||||
if 'variable' not in loginfo or 'file' not in loginfo:
|
||||
raise ValueError("record() missing variable or file.")
|
||||
var = loginfo['variable']
|
||||
|
||||
if var not in self.variables:
|
||||
self.variables[var] = []
|
||||
if not isinstance(self.variables[var], list):
|
||||
@@ -322,8 +325,7 @@ class VariableHistory(object):
|
||||
flag = '[%s] ' % (event['flag'])
|
||||
else:
|
||||
flag = ''
|
||||
o.write("# %s %s:%s%s\n# %s\"%s\"\n" % \
|
||||
(event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', str(event['detail']))))
|
||||
o.write("# %s %s:%s%s\n# %s\"%s\"\n" % (event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', event['detail'])))
|
||||
if len(history) > 1:
|
||||
o.write("# pre-expansion value:\n")
|
||||
o.write('# "%s"\n' % (commentVal))
|
||||
@@ -377,7 +379,7 @@ class VariableHistory(object):
|
||||
if isset and event['op'] == 'set?':
|
||||
continue
|
||||
isset = True
|
||||
items = d.expand(str(event['detail'])).split()
|
||||
items = d.expand(event['detail']).split()
|
||||
for item in items:
|
||||
# This is a little crude but is belt-and-braces to avoid us
|
||||
# having to handle every possible operation type specifically
|
||||
|
||||
@@ -132,14 +132,8 @@ def print_ui_queue():
|
||||
if not _uiready:
|
||||
from bb.msg import BBLogFormatter
|
||||
# Flush any existing buffered content
|
||||
try:
|
||||
sys.stdout.flush()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
sys.stderr.flush()
|
||||
except:
|
||||
pass
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
stdout = logging.StreamHandler(sys.stdout)
|
||||
stderr = logging.StreamHandler(sys.stderr)
|
||||
formatter = BBLogFormatter("%(levelname)s: %(message)s")
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -498,30 +498,30 @@ def fetcher_init(d):
|
||||
Calls before this must not hit the cache.
|
||||
"""
|
||||
|
||||
with bb.persist_data.persist('BB_URI_HEADREVS', d) as revs:
|
||||
try:
|
||||
# fetcher_init is called multiple times, so make sure we only save the
|
||||
# revs the first time it is called.
|
||||
if not bb.fetch2.saved_headrevs:
|
||||
bb.fetch2.saved_headrevs = dict(revs)
|
||||
except:
|
||||
pass
|
||||
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
|
||||
try:
|
||||
# fetcher_init is called multiple times, so make sure we only save the
|
||||
# revs the first time it is called.
|
||||
if not bb.fetch2.saved_headrevs:
|
||||
bb.fetch2.saved_headrevs = dict(revs)
|
||||
except:
|
||||
pass
|
||||
|
||||
# When to drop SCM head revisions controlled by user policy
|
||||
srcrev_policy = d.getVar('BB_SRCREV_POLICY') or "clear"
|
||||
if srcrev_policy == "cache":
|
||||
logger.debug("Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
elif srcrev_policy == "clear":
|
||||
logger.debug("Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
revs.clear()
|
||||
else:
|
||||
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
|
||||
# When to drop SCM head revisions controlled by user policy
|
||||
srcrev_policy = d.getVar('BB_SRCREV_POLICY') or "clear"
|
||||
if srcrev_policy == "cache":
|
||||
logger.debug("Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
elif srcrev_policy == "clear":
|
||||
logger.debug("Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
revs.clear()
|
||||
else:
|
||||
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
|
||||
|
||||
_checksum_cache.init_cache(d)
|
||||
_checksum_cache.init_cache(d)
|
||||
|
||||
for m in methods:
|
||||
if hasattr(m, "init"):
|
||||
m.init(d)
|
||||
for m in methods:
|
||||
if hasattr(m, "init"):
|
||||
m.init(d)
|
||||
|
||||
def fetcher_parse_save():
|
||||
_checksum_cache.save_extras()
|
||||
@@ -535,8 +535,8 @@ def fetcher_compare_revisions(d):
|
||||
when bitbake was started and return true if they have changed.
|
||||
"""
|
||||
|
||||
with dict(bb.persist_data.persist('BB_URI_HEADREVS', d)) as headrevs:
|
||||
return headrevs != bb.fetch2.saved_headrevs
|
||||
headrevs = dict(bb.persist_data.persist('BB_URI_HEADREVS', d))
|
||||
return headrevs != bb.fetch2.saved_headrevs
|
||||
|
||||
def mirror_from_string(data):
|
||||
mirrors = (data or "").replace('\\n',' ').split()
|
||||
@@ -839,7 +839,6 @@ FETCH_EXPORT_VARS = ['HOME', 'PATH',
|
||||
'ALL_PROXY', 'all_proxy',
|
||||
'GIT_PROXY_COMMAND',
|
||||
'GIT_SSH',
|
||||
'GIT_SSH_COMMAND',
|
||||
'GIT_SSL_CAINFO',
|
||||
'GIT_SMART_HTTP',
|
||||
'SSH_AUTH_SOCK', 'SSH_AGENT_PID',
|
||||
@@ -1097,8 +1096,6 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
|
||||
|
||||
def ensure_symlink(target, link_name):
|
||||
if not os.path.exists(link_name):
|
||||
dirname = os.path.dirname(link_name)
|
||||
bb.utils.mkdirhier(dirname)
|
||||
if os.path.islink(link_name):
|
||||
# Broken symbolic link
|
||||
os.unlink(link_name)
|
||||
@@ -1618,13 +1615,13 @@ class FetchMethod(object):
|
||||
if not hasattr(self, "_latest_revision"):
|
||||
raise ParameterError("The fetcher for this URL does not support _latest_revision", ud.url)
|
||||
|
||||
with bb.persist_data.persist('BB_URI_HEADREVS', d) as revs:
|
||||
key = self.generate_revision_key(ud, d, name)
|
||||
try:
|
||||
return revs[key]
|
||||
except KeyError:
|
||||
revs[key] = rev = self._latest_revision(ud, d, name)
|
||||
return rev
|
||||
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
|
||||
key = self.generate_revision_key(ud, d, name)
|
||||
try:
|
||||
return revs[key]
|
||||
except KeyError:
|
||||
revs[key] = rev = self._latest_revision(ud, d, name)
|
||||
return rev
|
||||
|
||||
def sortable_revision(self, ud, d, name):
|
||||
latest_rev = self._build_revision(ud, d, name)
|
||||
|
||||
@@ -13,6 +13,7 @@ BitBake 'Fetch' implementation for crates.io
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import bb
|
||||
from bb.fetch2 import logger, subprocess_setup, UnpackError
|
||||
@@ -71,7 +72,7 @@ class Crate(Wget):
|
||||
ud.parm['downloadfilename'] = "%s-%s.crate" % (name, version)
|
||||
ud.parm['name'] = name
|
||||
|
||||
logger.debug("Fetching %s to %s" % (ud.url, ud.parm['downloadfilename']))
|
||||
logger.debug(2, "Fetching %s to %s" % (ud.url, ud.parm['downloadfilename']))
|
||||
|
||||
def unpack(self, ud, rootdir, d):
|
||||
"""
|
||||
|
||||
@@ -44,8 +44,7 @@ Supported SRC_URI options are:
|
||||
|
||||
- nobranch
|
||||
Don't check the SHA validation for branch. set this option for the recipe
|
||||
referring to commit which is valid in any namespace (branch, tag, ...)
|
||||
instead of branch.
|
||||
referring to commit which is valid in tag instead of branch.
|
||||
The default is "0", set nobranch=1 if needed.
|
||||
|
||||
- usehead
|
||||
@@ -241,7 +240,7 @@ class Git(FetchMethod):
|
||||
for name in ud.names:
|
||||
ud.unresolvedrev[name] = 'HEAD'
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c core.fsyncobjectfiles=0 -c gc.autoDetach=false -c core.pager=cat"
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c core.fsyncobjectfiles=0 -c gc.autoDetach=false"
|
||||
|
||||
write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0"
|
||||
ud.write_tarballs = write_tarballs != "0" or ud.rebaseable
|
||||
@@ -307,10 +306,7 @@ class Git(FetchMethod):
|
||||
return ud.clonedir
|
||||
|
||||
def need_update(self, ud, d):
|
||||
return self.clonedir_need_update(ud, d) \
|
||||
or self.shallow_tarball_need_update(ud) \
|
||||
or self.tarball_need_update(ud) \
|
||||
or self.lfs_need_update(ud, d)
|
||||
return self.clonedir_need_update(ud, d) or self.shallow_tarball_need_update(ud) or self.tarball_need_update(ud)
|
||||
|
||||
def clonedir_need_update(self, ud, d):
|
||||
if not os.path.exists(ud.clonedir):
|
||||
@@ -322,15 +318,6 @@ class Git(FetchMethod):
|
||||
return True
|
||||
return False
|
||||
|
||||
def lfs_need_update(self, ud, d):
|
||||
if self.clonedir_need_update(ud, d):
|
||||
return True
|
||||
|
||||
for name in ud.names:
|
||||
if not self._lfs_objects_downloaded(ud, d, name, ud.clonedir):
|
||||
return True
|
||||
return False
|
||||
|
||||
def clonedir_need_shallow_revs(self, ud, d):
|
||||
for rev in ud.shallow_revs:
|
||||
try:
|
||||
@@ -371,13 +358,9 @@ class Git(FetchMethod):
|
||||
|
||||
# If the repo still doesn't exist, fallback to cloning it
|
||||
if not os.path.exists(ud.clonedir):
|
||||
# We do this since git will use a "-l" option automatically for local urls where possible,
|
||||
# but it doesn't work when git/objects is a symlink, only works when it is a directory.
|
||||
# We do this since git will use a "-l" option automatically for local urls where possible
|
||||
if repourl.startswith("file://"):
|
||||
repourl_path = repourl[7:]
|
||||
objects = os.path.join(repourl_path, 'objects')
|
||||
if os.path.isdir(objects) and not os.path.islink(objects):
|
||||
repourl = repourl_path
|
||||
repourl = repourl[7:]
|
||||
clone_cmd = "LANG=C %s clone --bare --mirror %s %s --progress" % (ud.basecmd, shlex.quote(repourl), ud.clonedir)
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, clone_cmd, ud.url)
|
||||
@@ -391,11 +374,7 @@ class Git(FetchMethod):
|
||||
runfetchcmd("%s remote rm origin" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
|
||||
runfetchcmd("%s remote add --mirror=fetch origin %s" % (ud.basecmd, shlex.quote(repourl)), d, workdir=ud.clonedir)
|
||||
|
||||
if ud.nobranch:
|
||||
fetch_cmd = "LANG=C %s fetch -f --progress %s refs/*:refs/*" % (ud.basecmd, shlex.quote(repourl))
|
||||
else:
|
||||
fetch_cmd = "LANG=C %s fetch -f --progress %s refs/heads/*:refs/heads/* refs/tags/*:refs/tags/*" % (ud.basecmd, shlex.quote(repourl))
|
||||
fetch_cmd = "LANG=C %s fetch -f --progress %s refs/*:refs/*" % (ud.basecmd, shlex.quote(repourl))
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, fetch_cmd, ud.url)
|
||||
progresshandler = GitProgressHandler(d)
|
||||
@@ -418,7 +397,7 @@ class Git(FetchMethod):
|
||||
if missing_rev:
|
||||
raise bb.fetch2.FetchError("Unable to find revision %s even from upstream" % missing_rev)
|
||||
|
||||
if self.lfs_need_update(ud, d):
|
||||
if self._contains_lfs(ud, d, ud.clonedir) and self._need_lfs(ud):
|
||||
# Unpack temporary working copy, use it to run 'git checkout' to force pre-fetching
|
||||
# of all LFS blobs needed at the srcrev.
|
||||
#
|
||||
@@ -485,7 +464,7 @@ class Git(FetchMethod):
|
||||
with create_atomic(ud.fullmirror) as tfile:
|
||||
mtime = runfetchcmd("git log --all -1 --format=%cD", d,
|
||||
quiet=True, workdir=ud.clonedir)
|
||||
runfetchcmd("tar -czf %s --owner oe:0 --group oe:0 --mtime \"%s\" ."
|
||||
runfetchcmd("tar -czf %s --owner pokybuild --group users --mtime \"%s\" ."
|
||||
% (tfile, mtime), d, workdir=ud.clonedir)
|
||||
runfetchcmd("touch %s.done" % ud.fullmirror, d)
|
||||
|
||||
@@ -661,35 +640,6 @@ class Git(FetchMethod):
|
||||
raise bb.fetch2.FetchError("The command '%s' gave output with more then 1 line unexpectedly, output: '%s'" % (cmd, output))
|
||||
return output.split()[0] != "0"
|
||||
|
||||
def _lfs_objects_downloaded(self, ud, d, name, wd):
|
||||
"""
|
||||
Verifies whether the LFS objects for requested revisions have already been downloaded
|
||||
"""
|
||||
# Bail out early if this repository doesn't use LFS
|
||||
if not self._need_lfs(ud) or not self._contains_lfs(ud, d, wd):
|
||||
return True
|
||||
|
||||
# The Git LFS specification specifies ([1]) the LFS folder layout so it should be safe to check for file
|
||||
# existence.
|
||||
# [1] https://github.com/git-lfs/git-lfs/blob/main/docs/spec.md#intercepting-git
|
||||
cmd = "%s lfs ls-files -l %s" \
|
||||
% (ud.basecmd, ud.revisions[name])
|
||||
output = runfetchcmd(cmd, d, quiet=True, workdir=wd).rstrip()
|
||||
# Do not do any further matching if no objects are managed by LFS
|
||||
if not output:
|
||||
return True
|
||||
|
||||
# Match all lines beginning with the hexadecimal OID
|
||||
oid_regex = re.compile("^(([a-fA-F0-9]{2})([a-fA-F0-9]{2})[A-Fa-f0-9]+)")
|
||||
for line in output.split("\n"):
|
||||
oid = re.search(oid_regex, line)
|
||||
if not oid:
|
||||
bb.warn("git lfs ls-files output '%s' did not match expected format." % line)
|
||||
if not os.path.exists(os.path.join(wd, "lfs", "objects", oid.group(2), oid.group(3), oid.group(1))):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _need_lfs(self, ud):
|
||||
return ud.parm.get("lfs", "1") == "1"
|
||||
|
||||
@@ -781,7 +731,7 @@ class Git(FetchMethod):
|
||||
Compute the HEAD revision for the url
|
||||
"""
|
||||
if not d.getVar("__BBSEENSRCREV"):
|
||||
raise bb.fetch2.FetchError("Recipe uses a floating tag/branch '%s' for repo '%s' without a fixed SRCREV yet doesn't call bb.fetch2.get_srcrev() (use SRCPV in PV for OE)." % (ud.unresolvedrev[name], ud.host+ud.path))
|
||||
raise bb.fetch2.FetchError("Recipe uses a floating tag/branch without a fixed SRCREV yet doesn't call bb.fetch2.get_srcrev() (use SRCPV in PV for OE).")
|
||||
|
||||
# Ensure we mark as not cached
|
||||
bb.fetch2.get_autorev(d)
|
||||
@@ -861,8 +811,9 @@ class Git(FetchMethod):
|
||||
commits = None
|
||||
else:
|
||||
if not os.path.exists(rev_file) or not os.path.getsize(rev_file):
|
||||
from pipes import quote
|
||||
commits = bb.fetch2.runfetchcmd(
|
||||
"git rev-list %s -- | wc -l" % shlex.quote(rev),
|
||||
"git rev-list %s -- | wc -l" % quote(rev),
|
||||
d, quiet=True).strip().lstrip('0')
|
||||
if commits:
|
||||
open(rev_file, "w").write("%d\n" % int(commits))
|
||||
|
||||
@@ -88,7 +88,7 @@ class GitSM(Git):
|
||||
subrevision[m] = module_hash.split()[2]
|
||||
|
||||
# Convert relative to absolute uri based on parent uri
|
||||
if uris[m].startswith('..') or uris[m].startswith('./'):
|
||||
if uris[m].startswith('..'):
|
||||
newud = copy.copy(ud)
|
||||
newud.path = os.path.realpath(os.path.join(newud.path, uris[m]))
|
||||
uris[m] = Git._get_repo_url(self, newud)
|
||||
@@ -115,9 +115,6 @@ class GitSM(Git):
|
||||
# This has to be a file reference
|
||||
proto = "file"
|
||||
url = "gitsm://" + uris[module]
|
||||
if url.endswith("{}{}".format(ud.host, ud.path)):
|
||||
raise bb.fetch2.FetchError("Submodule refers to the parent repository. This will cause deadlock situation in current version of Bitbake." \
|
||||
"Consider using git fetcher instead.")
|
||||
|
||||
url += ';protocol=%s' % proto
|
||||
url += ";name=%s" % module
|
||||
@@ -139,19 +136,6 @@ class GitSM(Git):
|
||||
|
||||
return submodules != []
|
||||
|
||||
def call_process_submodules(self, ud, d, extra_check, subfunc):
|
||||
# If we're using a shallow mirror tarball it needs to be
|
||||
# unpacked temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and extra_check:
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
try:
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, subfunc, d)
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, subfunc, d)
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if Git.need_update(self, ud, d):
|
||||
return True
|
||||
@@ -169,7 +153,15 @@ class GitSM(Git):
|
||||
logger.error('gitsm: submodule update check failed: %s %s' % (type(e).__name__, str(e)))
|
||||
need_update_result = True
|
||||
|
||||
self.call_process_submodules(ud, d, not os.path.exists(ud.clonedir), need_update_submodule)
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and not os.path.exists(ud.clonedir):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, need_update_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, need_update_submodule, d)
|
||||
|
||||
if need_update_list:
|
||||
logger.debug('gitsm: Submodules requiring update: %s' % (' '.join(need_update_list)))
|
||||
@@ -192,7 +184,16 @@ class GitSM(Git):
|
||||
raise
|
||||
|
||||
Git.download(self, ud, d)
|
||||
self.call_process_submodules(ud, d, self.need_update(ud, d), download_submodule)
|
||||
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and self.need_update(ud, d):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, download_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, download_submodule, d)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
def unpack_submodules(ud, url, module, modpath, workdir, d):
|
||||
@@ -245,6 +246,14 @@ class GitSM(Git):
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
urldata.extend(newfetch.expanded_urldata())
|
||||
|
||||
self.call_process_submodules(ud, d, ud.method.need_update(ud, d), add_submodule)
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and ud.method.need_update(ud, d):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
subprocess.check_call("tar -xzf %s" % ud.fullshallow, cwd=tmpdir, shell=True)
|
||||
self.process_submodules(ud, tmpdir, add_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, add_submodule, d)
|
||||
|
||||
return urldata
|
||||
|
||||
@@ -156,7 +156,7 @@ class Npm(FetchMethod):
|
||||
raise ParameterError("Invalid 'version' parameter", ud.url)
|
||||
|
||||
# Extract the 'registry' part of the url
|
||||
ud.registry = re.sub(r"^npm://", "https://", ud.url.split(";")[0])
|
||||
ud.registry = re.sub(r"^npm://", "http://", ud.url.split(";")[0])
|
||||
|
||||
# Using the 'downloadfilename' parameter as local filename
|
||||
# or the npm package name.
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
"""
|
||||
@@ -38,7 +36,6 @@ class Osc(FetchMethod):
|
||||
# Create paths to osc checkouts
|
||||
oscdir = d.getVar("OSCDIR") or (d.getVar("DL_DIR") + "/osc")
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.oscdir = oscdir
|
||||
ud.pkgdir = os.path.join(oscdir, ud.host)
|
||||
ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module)
|
||||
|
||||
@@ -46,13 +43,13 @@ class Osc(FetchMethod):
|
||||
ud.revision = ud.parm['rev']
|
||||
else:
|
||||
pv = d.getVar("PV", False)
|
||||
rev = bb.fetch2.srcrev_internal_helper(ud, d, '')
|
||||
rev = bb.fetch2.srcrev_internal_helper(ud, d)
|
||||
if rev:
|
||||
ud.revision = rev
|
||||
else:
|
||||
ud.revision = ""
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), relpath.replace('/', '.'), ud.revision))
|
||||
ud.localfile = d.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision))
|
||||
|
||||
def _buildosccommand(self, ud, d, command):
|
||||
"""
|
||||
@@ -89,7 +86,7 @@ class Osc(FetchMethod):
|
||||
|
||||
logger.debug2("Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
if os.access(ud.moddir, os.R_OK):
|
||||
if os.access(os.path.join(d.getVar('OSCDIR'), ud.path, ud.module), os.R_OK):
|
||||
oscupdatecmd = self._buildosccommand(ud, d, "update")
|
||||
logger.info("Update "+ ud.url)
|
||||
# update sources there
|
||||
@@ -117,23 +114,20 @@ class Osc(FetchMethod):
|
||||
Generate a .oscrc to be used for this run.
|
||||
"""
|
||||
|
||||
config_path = os.path.join(ud.oscdir, "oscrc")
|
||||
if not os.path.exists(ud.oscdir):
|
||||
bb.utils.mkdirhier(ud.oscdir)
|
||||
|
||||
config_path = os.path.join(d.getVar('OSCDIR'), "oscrc")
|
||||
if (os.path.exists(config_path)):
|
||||
os.remove(config_path)
|
||||
|
||||
f = open(config_path, 'w')
|
||||
proto = ud.parm.get('proto', 'https')
|
||||
f.write("[general]\n")
|
||||
f.write("apiurl = %s://%s\n" % (proto, ud.host))
|
||||
f.write("apisrv = %s\n" % ud.host)
|
||||
f.write("scheme = http\n")
|
||||
f.write("su-wrapper = su -c\n")
|
||||
f.write("build-root = %s\n" % d.getVar('WORKDIR'))
|
||||
f.write("urllist = %s\n" % d.getVar("OSCURLLIST"))
|
||||
f.write("extra-pkgs = gzip\n")
|
||||
f.write("\n")
|
||||
f.write("[%s://%s]\n" % (proto, ud.host))
|
||||
f.write("[%s]\n" % ud.host)
|
||||
f.write("user = %s\n" % ud.parm["user"])
|
||||
f.write("pass = %s\n" % ud.parm["pswd"])
|
||||
f.close()
|
||||
|
||||
@@ -32,7 +32,6 @@ IETF secsh internet draft:
|
||||
|
||||
import re, os
|
||||
from bb.fetch2 import check_network_access, FetchMethod, ParameterError, runfetchcmd
|
||||
import urllib
|
||||
|
||||
|
||||
__pattern__ = re.compile(r'''
|
||||
@@ -71,7 +70,6 @@ class SSH(FetchMethod):
|
||||
"git:// prefix with protocol=ssh", urldata.url)
|
||||
m = __pattern__.match(urldata.url)
|
||||
path = m.group('path')
|
||||
path = urllib.parse.unquote(path)
|
||||
host = m.group('host')
|
||||
urldata.localpath = os.path.join(d.getVar('DL_DIR'),
|
||||
os.path.basename(os.path.normpath(path)))
|
||||
@@ -101,7 +99,7 @@ class SSH(FetchMethod):
|
||||
|
||||
if path[0] != '~':
|
||||
path = '/%s' % path
|
||||
path = urllib.parse.unquote(path)
|
||||
path = path.replace("%3A", ":")
|
||||
|
||||
fr += ':%s' % path
|
||||
|
||||
@@ -141,7 +139,7 @@ class SSH(FetchMethod):
|
||||
|
||||
if path[0] != '~':
|
||||
path = '/%s' % path
|
||||
path = urllib.parse.unquote(path)
|
||||
path = path.replace("%3A", ":")
|
||||
|
||||
cmd = 'ssh -o BatchMode=true %s %s [ -f %s ]' % (
|
||||
portarg,
|
||||
|
||||
@@ -88,10 +88,7 @@ class Wget(FetchMethod):
|
||||
if not ud.localfile:
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.host + ud.path).replace("/", "."))
|
||||
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 100"
|
||||
|
||||
if ud.type == 'ftp' or ud.type == 'ftps':
|
||||
self.basecmd += " --passive-ftp"
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 30 --passive-ftp"
|
||||
|
||||
if not self.check_certs(d):
|
||||
self.basecmd += " --no-check-certificate"
|
||||
@@ -109,10 +106,10 @@ class Wget(FetchMethod):
|
||||
|
||||
fetchcmd = self.basecmd
|
||||
|
||||
dldir = os.path.realpath(d.getVar("DL_DIR"))
|
||||
localpath = os.path.join(dldir, ud.localfile) + ".tmp"
|
||||
bb.utils.mkdirhier(os.path.dirname(localpath))
|
||||
fetchcmd += " -O %s" % shlex.quote(localpath)
|
||||
if 'downloadfilename' in ud.parm:
|
||||
localpath = os.path.join(d.getVar("DL_DIR"), ud.localfile)
|
||||
bb.utils.mkdirhier(os.path.dirname(localpath))
|
||||
fetchcmd += " -O %s" % shlex.quote(localpath)
|
||||
|
||||
if ud.user and ud.pswd:
|
||||
fetchcmd += " --auth-no-challenge"
|
||||
@@ -130,16 +127,12 @@ class Wget(FetchMethod):
|
||||
uri = ud.url.split(";")[0]
|
||||
if os.path.exists(ud.localpath):
|
||||
# file exists, but we didnt complete it.. trying again..
|
||||
fetchcmd += " -c -P " + dldir + " '" + uri + "'"
|
||||
fetchcmd += d.expand(" -c -P ${DL_DIR} '%s'" % uri)
|
||||
else:
|
||||
fetchcmd += " -P " + dldir + " '" + uri + "'"
|
||||
fetchcmd += d.expand(" -P ${DL_DIR} '%s'" % uri)
|
||||
|
||||
self._runwget(ud, d, fetchcmd, False)
|
||||
|
||||
# Remove the ".tmp" and move the file into position atomically
|
||||
# Our lock prevents multiple writers but mirroring code may grab incomplete files
|
||||
os.rename(localpath, localpath[:-4])
|
||||
|
||||
# Sanity check since wget can pretend it succeed when it didn't
|
||||
# Also, this used to happen if sourceforge sent us to the mirror page
|
||||
if not os.path.exists(ud.localpath):
|
||||
@@ -365,7 +358,7 @@ class Wget(FetchMethod):
|
||||
except (TypeError, ImportError, IOError, netrc.NetrcParseError):
|
||||
pass
|
||||
|
||||
with opener.open(r, timeout=100) as response:
|
||||
with opener.open(r, timeout=30) as response:
|
||||
pass
|
||||
except urllib.error.URLError as e:
|
||||
if try_again:
|
||||
|
||||
@@ -234,10 +234,9 @@ class diskMonitor:
|
||||
freeInode = st.f_favail
|
||||
|
||||
if minInode and freeInode < minInode:
|
||||
# Some filesystems use dynamic inodes so can't run out.
|
||||
# This is reported by the inode count being 0 (btrfs) or the free
|
||||
# inode count being -1 (cephfs).
|
||||
if st.f_files == 0 or st.f_favail == -1:
|
||||
# Some filesystems use dynamic inodes so can't run out
|
||||
# (e.g. btrfs). This is reported by the inode count being 0.
|
||||
if st.f_files == 0:
|
||||
self.devDict[k][2] = None
|
||||
continue
|
||||
# Always show warning, the self.checked would always be False if the action is WARN
|
||||
|
||||
@@ -133,6 +133,7 @@ class LogFilterShowOnce(logging.Filter):
|
||||
self.seen_errors = set()
|
||||
|
||||
def filter(self, record):
|
||||
msg = record.msg
|
||||
if record.levelno == bb.msg.BBLogFormatter.WARNONCE:
|
||||
if record.msg in self.seen_warnings:
|
||||
return False
|
||||
|
||||
@@ -49,23 +49,20 @@ class SkipPackage(SkipRecipe):
|
||||
__mtime_cache = {}
|
||||
def cached_mtime(f):
|
||||
if f not in __mtime_cache:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
return __mtime_cache[f]
|
||||
|
||||
def cached_mtime_noerror(f):
|
||||
if f not in __mtime_cache:
|
||||
try:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
except OSError:
|
||||
return 0
|
||||
return __mtime_cache[f]
|
||||
|
||||
def update_mtime(f):
|
||||
try:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
except OSError:
|
||||
if f in __mtime_cache:
|
||||
del __mtime_cache[f]
|
||||
|
||||
@@ -223,7 +223,7 @@ class ExportFuncsNode(AstNode):
|
||||
for flag in [ "func", "python" ]:
|
||||
if data.getVarFlag(calledfunc, flag, False):
|
||||
data.setVarFlag(func, flag, data.getVarFlag(calledfunc, flag, False))
|
||||
for flag in ["dirs", "cleandirs", "fakeroot"]:
|
||||
for flag in [ "dirs" ]:
|
||||
if data.getVarFlag(func, flag, False):
|
||||
data.setVarFlag(calledfunc, flag, data.getVarFlag(func, flag, False))
|
||||
data.setVarFlag(func, "filename", "autogenerated")
|
||||
|
||||
@@ -178,10 +178,10 @@ def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
|
||||
if s and s[0] == '#':
|
||||
if len(__residue__) != 0 and __residue__[0][0] != "#":
|
||||
bb.fatal("There is a comment on line %s of file %s:\n'''\n%s\n'''\nwhich is in the middle of a multiline expression. This syntax is invalid, please correct it." % (lineno, fn, s))
|
||||
bb.fatal("There is a comment on line %s of file %s (%s) which is in the middle of a multiline expression.\nBitbake used to ignore these but no longer does so, please fix your metadata as errors are likely as a result of this change." % (lineno, fn, s))
|
||||
|
||||
if len(__residue__) != 0 and __residue__[0][0] == "#" and (not s or s[0] != "#"):
|
||||
bb.fatal("There is a confusing multiline partially commented expression on line %s of file %s:\n%s\nPlease clarify whether this is all a comment or should be parsed." % (lineno - len(__residue__), fn, "\n".join(__residue__)))
|
||||
bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s))
|
||||
|
||||
if s and s[-1] == '\\':
|
||||
__residue__.append(s[:-1])
|
||||
|
||||
@@ -125,21 +125,16 @@ def handle(fn, data, include):
|
||||
s = f.readline()
|
||||
if not s:
|
||||
break
|
||||
origlineno = lineno
|
||||
origline = s
|
||||
w = s.strip()
|
||||
# skip empty lines
|
||||
if not w:
|
||||
continue
|
||||
s = s.rstrip()
|
||||
while s[-1] == '\\':
|
||||
line = f.readline()
|
||||
origline += line
|
||||
s2 = line.rstrip()
|
||||
s2 = f.readline().rstrip()
|
||||
lineno = lineno + 1
|
||||
if (not s2 or s2 and s2[0] != "#") and s[0] == "#" :
|
||||
bb.fatal("There is a confusing multiline, partially commented expression starting on line %s of file %s:\n%s\nPlease clarify whether this is all a comment or should be parsed." % (origlineno, fn, origline))
|
||||
|
||||
bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s))
|
||||
s = s[:-1] + s2
|
||||
# skip comments
|
||||
if s[0] == '#':
|
||||
@@ -152,6 +147,8 @@ def handle(fn, data, include):
|
||||
if oldfile:
|
||||
data.setVar('FILE', oldfile)
|
||||
|
||||
f.close()
|
||||
|
||||
for f in confFilters:
|
||||
f(fn, data)
|
||||
|
||||
|
||||
@@ -154,7 +154,6 @@ class SQLTable(collections.abc.MutableMapping):
|
||||
|
||||
def __exit__(self, *excinfo):
|
||||
self.connection.__exit__(*excinfo)
|
||||
self.connection.close()
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
@@ -209,7 +208,7 @@ class SQLTable(collections.abc.MutableMapping):
|
||||
|
||||
def __lt__(self, other):
|
||||
if not isinstance(other, Mapping):
|
||||
raise NotImplementedError()
|
||||
raise NotImplemented
|
||||
|
||||
return len(self) < len(other)
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -396,8 +396,8 @@ def getRuntimeProviders(dataCache, rdepend):
|
||||
return rproviders
|
||||
|
||||
# Only search dynamic packages if we can't find anything in other variables
|
||||
for pat_key in dataCache.packages_dynamic:
|
||||
pattern = pat_key.replace(r'+', r"\+")
|
||||
for pattern in dataCache.packages_dynamic:
|
||||
pattern = pattern.replace(r'+', r"\+")
|
||||
if pattern in regexp_cache:
|
||||
regexp = regexp_cache[pattern]
|
||||
else:
|
||||
@@ -408,7 +408,7 @@ def getRuntimeProviders(dataCache, rdepend):
|
||||
raise
|
||||
regexp_cache[pattern] = regexp
|
||||
if regexp.match(rdepend):
|
||||
rproviders += dataCache.packages_dynamic[pat_key]
|
||||
rproviders += dataCache.packages_dynamic[pattern]
|
||||
logger.debug("Assuming %s is a dynamic package, but it may not exist" % rdepend)
|
||||
|
||||
return rproviders
|
||||
|
||||
@@ -24,7 +24,6 @@ import pickle
|
||||
from multiprocessing import Process
|
||||
import shlex
|
||||
import pprint
|
||||
import time
|
||||
|
||||
bblogger = logging.getLogger("BitBake")
|
||||
logger = logging.getLogger("BitBake.RunQueue")
|
||||
@@ -160,67 +159,6 @@ class RunQueueScheduler(object):
|
||||
self.buildable.append(tid)
|
||||
|
||||
self.rev_prio_map = None
|
||||
self.is_pressure_usable()
|
||||
|
||||
def is_pressure_usable(self):
|
||||
"""
|
||||
If monitoring pressure, return True if pressure files can be open and read. For example
|
||||
openSUSE /proc/pressure/* files have readable file permissions but when read the error EOPNOTSUPP (Operation not supported)
|
||||
is returned.
|
||||
"""
|
||||
if self.rq.max_cpu_pressure or self.rq.max_io_pressure or self.rq.max_memory_pressure:
|
||||
try:
|
||||
with open("/proc/pressure/cpu") as cpu_pressure_fds, \
|
||||
open("/proc/pressure/io") as io_pressure_fds, \
|
||||
open("/proc/pressure/memory") as memory_pressure_fds:
|
||||
|
||||
self.prev_cpu_pressure = cpu_pressure_fds.readline().split()[4].split("=")[1]
|
||||
self.prev_io_pressure = io_pressure_fds.readline().split()[4].split("=")[1]
|
||||
self.prev_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1]
|
||||
self.prev_pressure_time = time.time()
|
||||
self.check_pressure = True
|
||||
except:
|
||||
bb.note("The /proc/pressure files can't be read. Continuing build without monitoring pressure")
|
||||
self.check_pressure = False
|
||||
else:
|
||||
self.check_pressure = False
|
||||
|
||||
def exceeds_max_pressure(self):
|
||||
"""
|
||||
Monitor the difference in total pressure at least once per second, if
|
||||
BB_PRESSURE_MAX_{CPU|IO|MEMORY} are set, return True if above threshold.
|
||||
"""
|
||||
if self.check_pressure:
|
||||
with open("/proc/pressure/cpu") as cpu_pressure_fds, \
|
||||
open("/proc/pressure/io") as io_pressure_fds, \
|
||||
open("/proc/pressure/memory") as memory_pressure_fds:
|
||||
# extract "total" from /proc/pressure/{cpu|io}
|
||||
curr_cpu_pressure = cpu_pressure_fds.readline().split()[4].split("=")[1]
|
||||
curr_io_pressure = io_pressure_fds.readline().split()[4].split("=")[1]
|
||||
curr_memory_pressure = memory_pressure_fds.readline().split()[4].split("=")[1]
|
||||
now = time.time()
|
||||
tdiff = now - self.prev_pressure_time
|
||||
psi_accumulation_interval = 1.0
|
||||
cpu_pressure = (float(curr_cpu_pressure) - float(self.prev_cpu_pressure)) / tdiff
|
||||
io_pressure = (float(curr_io_pressure) - float(self.prev_io_pressure)) / tdiff
|
||||
memory_pressure = (float(curr_memory_pressure) - float(self.prev_memory_pressure)) / tdiff
|
||||
exceeds_cpu_pressure = self.rq.max_cpu_pressure and cpu_pressure > self.rq.max_cpu_pressure
|
||||
exceeds_io_pressure = self.rq.max_io_pressure and io_pressure > self.rq.max_io_pressure
|
||||
exceeds_memory_pressure = self.rq.max_memory_pressure and memory_pressure > self.rq.max_memory_pressure
|
||||
|
||||
if tdiff > psi_accumulation_interval:
|
||||
self.prev_cpu_pressure = curr_cpu_pressure
|
||||
self.prev_io_pressure = curr_io_pressure
|
||||
self.prev_memory_pressure = curr_memory_pressure
|
||||
self.prev_pressure_time = now
|
||||
|
||||
pressure_state = (exceeds_cpu_pressure, exceeds_io_pressure, exceeds_memory_pressure)
|
||||
pressure_values = (round(cpu_pressure,1), self.rq.max_cpu_pressure, round(io_pressure,1), self.rq.max_io_pressure, round(memory_pressure,1), self.rq.max_memory_pressure)
|
||||
if hasattr(self, "pressure_state") and pressure_state != self.pressure_state:
|
||||
bb.note("Pressure status changed to CPU: %s, IO: %s, Mem: %s (CPU: %s/%s, IO: %s/%s, Mem: %s/%s) - using %s/%s bitbake threads" % (pressure_state + pressure_values + (len(self.rq.runq_running.difference(self.rq.runq_complete)), self.rq.number_tasks)))
|
||||
self.pressure_state = pressure_state
|
||||
return (exceeds_cpu_pressure or exceeds_io_pressure or exceeds_memory_pressure)
|
||||
return False
|
||||
|
||||
def next_buildable_task(self):
|
||||
"""
|
||||
@@ -234,12 +172,6 @@ class RunQueueScheduler(object):
|
||||
if not buildable:
|
||||
return None
|
||||
|
||||
# Bitbake requires that at least one task be active. Only check for pressure if
|
||||
# this is the case, otherwise the pressure limitation could result in no tasks
|
||||
# being active and no new tasks started thereby, at times, breaking the scheduler.
|
||||
if self.rq.stats.active and self.exceeds_max_pressure():
|
||||
return None
|
||||
|
||||
# Filter out tasks that have a max number of threads that have been exceeded
|
||||
skip_buildable = {}
|
||||
for running in self.rq.runq_running.difference(self.rq.runq_complete):
|
||||
@@ -1742,7 +1674,7 @@ class RunQueue:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
|
||||
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
|
||||
h = self.rqdata.runtaskentries[tid].hash
|
||||
matches = bb.siggen.find_siginfo(pn, taskname, [], self.cooker.databuilder.mcdata[mc])
|
||||
matches = bb.siggen.find_siginfo(pn, taskname, [], self.cfgData)
|
||||
match = None
|
||||
for m in matches:
|
||||
if h in m:
|
||||
@@ -1767,9 +1699,6 @@ class RunQueueExecute:
|
||||
|
||||
self.number_tasks = int(self.cfgData.getVar("BB_NUMBER_THREADS") or 1)
|
||||
self.scheduler = self.cfgData.getVar("BB_SCHEDULER") or "speed"
|
||||
self.max_cpu_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_CPU")
|
||||
self.max_io_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_IO")
|
||||
self.max_memory_pressure = self.cfgData.getVar("BB_PRESSURE_MAX_MEMORY")
|
||||
|
||||
self.sq_buildable = set()
|
||||
self.sq_running = set()
|
||||
@@ -1804,29 +1733,6 @@ class RunQueueExecute:
|
||||
if self.number_tasks <= 0:
|
||||
bb.fatal("Invalid BB_NUMBER_THREADS %s" % self.number_tasks)
|
||||
|
||||
lower_limit = 1.0
|
||||
upper_limit = 1000000.0
|
||||
if self.max_cpu_pressure:
|
||||
self.max_cpu_pressure = float(self.max_cpu_pressure)
|
||||
if self.max_cpu_pressure < lower_limit:
|
||||
bb.fatal("Invalid BB_PRESSURE_MAX_CPU %s, minimum value is %s." % (self.max_cpu_pressure, lower_limit))
|
||||
if self.max_cpu_pressure > upper_limit:
|
||||
bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_CPU is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_cpu_pressure))
|
||||
|
||||
if self.max_io_pressure:
|
||||
self.max_io_pressure = float(self.max_io_pressure)
|
||||
if self.max_io_pressure < lower_limit:
|
||||
bb.fatal("Invalid BB_PRESSURE_MAX_IO %s, minimum value is %s." % (self.max_io_pressure, lower_limit))
|
||||
if self.max_io_pressure > upper_limit:
|
||||
bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_IO is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure))
|
||||
|
||||
if self.max_memory_pressure:
|
||||
self.max_memory_pressure = float(self.max_memory_pressure)
|
||||
if self.max_memory_pressure < lower_limit:
|
||||
bb.fatal("Invalid BB_PRESSURE_MAX_MEMORY %s, minimum value is %s." % (self.max_memory_pressure, lower_limit))
|
||||
if self.max_memory_pressure > upper_limit:
|
||||
bb.warn("Your build will be largely unregulated since BB_PRESSURE_MAX_MEMORY is set to %s. It is very unlikely that such high pressure will be experienced." % (self.max_io_pressure))
|
||||
|
||||
# List of setscene tasks which we've covered
|
||||
self.scenequeue_covered = set()
|
||||
# List of tasks which are covered (including setscene ones)
|
||||
@@ -1986,19 +1892,11 @@ class RunQueueExecute:
|
||||
self.setbuildable(revdep)
|
||||
logger.debug("Marking task %s as buildable", revdep)
|
||||
|
||||
found = None
|
||||
for t in sorted(self.sq_deferred.copy()):
|
||||
for t in self.sq_deferred.copy():
|
||||
if self.sq_deferred[t] == task:
|
||||
# Allow the next deferred task to run. Any other deferred tasks should be deferred after that task.
|
||||
# We shouldn't allow all to run at once as it is prone to races.
|
||||
if not found:
|
||||
bb.debug(1, "Deferred task %s now buildable" % t)
|
||||
del self.sq_deferred[t]
|
||||
update_scenequeue_data([t], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
|
||||
found = t
|
||||
else:
|
||||
bb.debug(1, "Deferring %s after %s" % (t, found))
|
||||
self.sq_deferred[t] = found
|
||||
logger.debug2("Deferred task %s now buildable" % t)
|
||||
del self.sq_deferred[t]
|
||||
update_scenequeue_data([t], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
|
||||
|
||||
def task_complete(self, task):
|
||||
self.stats.taskCompleted()
|
||||
@@ -2274,9 +2172,10 @@ class RunQueueExecute:
|
||||
|
||||
# No more tasks can be run. If we have deferred setscene tasks we should run them.
|
||||
if self.sq_deferred:
|
||||
deferred_tid = list(self.sq_deferred.keys())[0]
|
||||
blocking_tid = self.sq_deferred.pop(deferred_tid)
|
||||
logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s blocked by %s" % (deferred_tid, blocking_tid))
|
||||
tid = self.sq_deferred.pop(list(self.sq_deferred.keys())[0])
|
||||
logger.warning("Runqeueue deadlocked on deferred tasks, forcing task %s" % tid)
|
||||
if tid not in self.runq_complete:
|
||||
self.sq_task_failoutright(tid)
|
||||
return True
|
||||
|
||||
if self.failed_tids:
|
||||
@@ -2400,9 +2299,6 @@ class RunQueueExecute:
|
||||
self.rqdata.runtaskentries[hashtid].unihash = unihash
|
||||
bb.parse.siggen.set_unihash(hashtid, unihash)
|
||||
toprocess.add(hashtid)
|
||||
if torehash:
|
||||
# Need to save after set_unihash above
|
||||
bb.parse.siggen.save_unitaskhashes()
|
||||
|
||||
# Work out all tasks which depend upon these
|
||||
total = set()
|
||||
@@ -2509,6 +2405,17 @@ class RunQueueExecute:
|
||||
self.sq_buildable.remove(tid)
|
||||
if tid in self.sq_running:
|
||||
self.sq_running.remove(tid)
|
||||
harddepfail = False
|
||||
for t in self.sqdata.sq_harddeps:
|
||||
if tid in self.sqdata.sq_harddeps[t] and t in self.scenequeue_notcovered:
|
||||
harddepfail = True
|
||||
break
|
||||
if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
|
||||
if tid not in self.sq_buildable:
|
||||
self.sq_buildable.add(tid)
|
||||
if not self.sqdata.sq_revdeps[tid]:
|
||||
self.sq_buildable.add(tid)
|
||||
|
||||
if tid in self.sqdata.outrightfail:
|
||||
self.sqdata.outrightfail.remove(tid)
|
||||
if tid in self.scenequeue_notcovered:
|
||||
@@ -2527,36 +2434,18 @@ class RunQueueExecute:
|
||||
if tid in self.build_stamps:
|
||||
del self.build_stamps[tid]
|
||||
|
||||
update_tasks.append(tid)
|
||||
update_tasks.append((tid, harddepfail, tid in self.sqdata.valid))
|
||||
|
||||
update_tasks2 = []
|
||||
for tid in update_tasks:
|
||||
harddepfail = False
|
||||
for t in self.sqdata.sq_harddeps:
|
||||
if tid in self.sqdata.sq_harddeps[t] and t in self.scenequeue_notcovered:
|
||||
harddepfail = True
|
||||
break
|
||||
if not harddepfail and self.sqdata.sq_revdeps[tid].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
|
||||
if tid not in self.sq_buildable:
|
||||
self.sq_buildable.add(tid)
|
||||
if not self.sqdata.sq_revdeps[tid]:
|
||||
self.sq_buildable.add(tid)
|
||||
|
||||
update_tasks2.append((tid, harddepfail, tid in self.sqdata.valid))
|
||||
|
||||
if update_tasks2:
|
||||
if update_tasks:
|
||||
self.sqdone = False
|
||||
for mc in sorted(self.sqdata.multiconfigs):
|
||||
for tid in sorted([t[0] for t in update_tasks2]):
|
||||
if mc_from_tid(tid) != mc:
|
||||
continue
|
||||
h = pending_hash_index(tid, self.rqdata)
|
||||
if h in self.sqdata.hashes and tid != self.sqdata.hashes[h]:
|
||||
self.sq_deferred[tid] = self.sqdata.hashes[h]
|
||||
bb.note("Deferring %s after %s" % (tid, self.sqdata.hashes[h]))
|
||||
update_scenequeue_data([t[0] for t in update_tasks2], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
|
||||
for tid in [t[0] for t in update_tasks]:
|
||||
h = pending_hash_index(tid, self.rqdata)
|
||||
if h in self.sqdata.hashes and tid != self.sqdata.hashes[h]:
|
||||
self.sq_deferred[tid] = self.sqdata.hashes[h]
|
||||
bb.note("Deferring %s after %s" % (tid, self.sqdata.hashes[h]))
|
||||
update_scenequeue_data([t[0] for t in update_tasks], self.sqdata, self.rqdata, self.rq, self.cooker, self.stampcache, self, summary=False)
|
||||
|
||||
for (tid, harddepfail, origvalid) in update_tasks2:
|
||||
for (tid, harddepfail, origvalid) in update_tasks:
|
||||
if tid in self.sqdata.valid and not origvalid:
|
||||
hashequiv_logger.verbose("Setscene task %s became valid" % tid)
|
||||
if harddepfail:
|
||||
@@ -2775,6 +2664,7 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
|
||||
sq_revdeps_squash[point] = set()
|
||||
if point in rqdata.runq_setscene_tids:
|
||||
sq_revdeps_squash[point] = tasks
|
||||
tasks = set()
|
||||
continue
|
||||
for dep in rqdata.runtaskentries[point].depends:
|
||||
if point in sq_revdeps[dep]:
|
||||
@@ -2904,7 +2794,7 @@ def build_scenequeue_data(sqdata, rqdata, rq, cooker, stampcache, sqrq):
|
||||
sqdata.hashes[h] = tid
|
||||
else:
|
||||
sqrq.sq_deferred[tid] = sqdata.hashes[h]
|
||||
bb.debug(1, "Deferring %s after %s" % (tid, sqdata.hashes[h]))
|
||||
bb.note("Deferring %s after %s" % (tid, sqdata.hashes[h]))
|
||||
|
||||
update_scenequeue_data(sqdata.sq_revdeps, sqdata, rqdata, rq, cooker, stampcache, sqrq, summary=True)
|
||||
|
||||
@@ -3113,7 +3003,7 @@ class runQueuePipe():
|
||||
if pipeout:
|
||||
pipeout.close()
|
||||
bb.utils.nonblockingfd(self.input)
|
||||
self.queue = bytearray()
|
||||
self.queue = b""
|
||||
self.d = d
|
||||
self.rq = rq
|
||||
self.rqexec = rqexec
|
||||
@@ -3132,7 +3022,7 @@ class runQueuePipe():
|
||||
|
||||
start = len(self.queue)
|
||||
try:
|
||||
self.queue.extend(self.input.read(102400) or b"")
|
||||
self.queue = self.queue + (self.input.read(102400) or b"")
|
||||
except (OSError, IOError) as e:
|
||||
if e.errno != errno.EAGAIN:
|
||||
raise
|
||||
|
||||
@@ -20,6 +20,7 @@ import os
|
||||
import sys
|
||||
import time
|
||||
import select
|
||||
import signal
|
||||
import socket
|
||||
import subprocess
|
||||
import errno
|
||||
@@ -757,11 +758,8 @@ class ConnectionWriter(object):
|
||||
process.queue_signals = True
|
||||
self._send(obj)
|
||||
process.queue_signals = False
|
||||
try:
|
||||
for sig in process.signal_received.pop():
|
||||
process.handle_sig(sig, None)
|
||||
except IndexError:
|
||||
pass
|
||||
for sig in process.signal_received.pop():
|
||||
process.handle_sig(sig, None)
|
||||
else:
|
||||
self._send(obj)
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -42,6 +40,7 @@ def init(d):
|
||||
for sg in siggens:
|
||||
if desired == sg.name:
|
||||
return sg(d)
|
||||
break
|
||||
else:
|
||||
logger.error("Invalid signature generator '%s', using default 'noop'\n"
|
||||
"Available generators: %s", desired,
|
||||
@@ -329,19 +328,19 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
|
||||
data = self.basehash[tid]
|
||||
for dep in self.runtaskdeps[tid]:
|
||||
data += self.get_unihash(dep)
|
||||
data = data + self.get_unihash(dep)
|
||||
|
||||
for (f, cs) in sorted(self.file_checksum_values[tid], key=clean_checksum_file_path):
|
||||
for (f, cs) in self.file_checksum_values[tid]:
|
||||
if cs:
|
||||
if "/./" in f:
|
||||
data += "./" + f.split("/./")[1]
|
||||
data += cs
|
||||
data = data + "./" + f.split("/./")[1]
|
||||
data = data + cs
|
||||
|
||||
if tid in self.taints:
|
||||
if self.taints[tid].startswith("nostamp:"):
|
||||
data += self.taints[tid][8:]
|
||||
data = data + self.taints[tid][8:]
|
||||
else:
|
||||
data += self.taints[tid]
|
||||
data = data + self.taints[tid]
|
||||
|
||||
h = hashlib.sha256(data.encode("utf-8")).hexdigest()
|
||||
self.taskhash[tid] = h
|
||||
@@ -393,7 +392,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
if runtime and tid in self.taskhash:
|
||||
data['runtaskdeps'] = self.runtaskdeps[tid]
|
||||
data['file_checksum_values'] = []
|
||||
for f,cs in sorted(self.file_checksum_values[tid], key=clean_checksum_file_path):
|
||||
for f,cs in self.file_checksum_values[tid]:
|
||||
if "/./" in f:
|
||||
data['file_checksum_values'].append(("./" + f.split("/./")[1], cs))
|
||||
else:
|
||||
@@ -421,7 +420,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[tid], tid))
|
||||
sigfile = sigfile.replace(self.taskhash[tid], computed_taskhash)
|
||||
|
||||
fd, tmpfile = bb.utils.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
|
||||
fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
|
||||
try:
|
||||
with bb.compress.zstd.open(fd, "wt", encoding="utf-8", num_threads=1) as f:
|
||||
json.dump(data, f, sort_keys=True, separators=(",", ":"), cls=SetEncoder)
|
||||
@@ -720,12 +719,6 @@ class SignatureGeneratorTestMulticonfigDepends(SignatureGeneratorBasicHash):
|
||||
name = "TestMulticonfigDepends"
|
||||
supports_multiconfig_datacaches = True
|
||||
|
||||
def clean_checksum_file_path(file_checksum_tuple):
|
||||
f, cs = file_checksum_tuple
|
||||
if "/./" in f:
|
||||
return "./" + f.split("/./")[1]
|
||||
return f
|
||||
|
||||
def dump_this_task(outfile, d):
|
||||
import bb.parse
|
||||
fn = d.getVar("BB_FILENAME")
|
||||
@@ -998,8 +991,8 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
|
||||
|
||||
if 'runtaskhashes' in a_data and 'runtaskhashes' in b_data:
|
||||
a = clean_basepaths(a_data['runtaskhashes'])
|
||||
b = clean_basepaths(b_data['runtaskhashes'])
|
||||
a = a_data['runtaskhashes']
|
||||
b = b_data['runtaskhashes']
|
||||
changed, added, removed = dict_diff(a, b)
|
||||
if added:
|
||||
for dep in sorted(added):
|
||||
@@ -1010,7 +1003,7 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
#output.append("Dependency on task %s was replaced by %s with same hash" % (dep, bdep))
|
||||
bdep_found = True
|
||||
if not bdep_found:
|
||||
output.append(color_format("{color_title}Dependency on task %s was added{color_default} with hash %s") % (dep, b[dep]))
|
||||
output.append(color_format("{color_title}Dependency on task %s was added{color_default} with hash %s") % (clean_basepath(dep), b[dep]))
|
||||
if removed:
|
||||
for dep in sorted(removed):
|
||||
adep_found = False
|
||||
@@ -1020,11 +1013,11 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
#output.append("Dependency on task %s was replaced by %s with same hash" % (adep, dep))
|
||||
adep_found = True
|
||||
if not adep_found:
|
||||
output.append(color_format("{color_title}Dependency on task %s was removed{color_default} with hash %s") % (dep, a[dep]))
|
||||
output.append(color_format("{color_title}Dependency on task %s was removed{color_default} with hash %s") % (clean_basepath(dep), a[dep]))
|
||||
if changed:
|
||||
for dep in sorted(changed):
|
||||
if not collapsed:
|
||||
output.append(color_format("{color_title}Hash for task dependency %s changed{color_default} from %s to %s") % (dep, a[dep], b[dep]))
|
||||
output.append(color_format("{color_title}Hash for task dependency %s changed{color_default} from %s to %s") % (clean_basepath(dep), a[dep], b[dep]))
|
||||
if callable(recursecb):
|
||||
recout = recursecb(dep, a[dep], b[dep])
|
||||
if recout:
|
||||
@@ -1034,7 +1027,6 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
# If a dependent hash changed, might as well print the line above and then defer to the changes in
|
||||
# that hash since in all likelyhood, they're the same changes this task also saw.
|
||||
output = [output[-1]] + recout
|
||||
break
|
||||
|
||||
a_taint = a_data.get('taint', None)
|
||||
b_taint = b_data.get('taint', None)
|
||||
|
||||
@@ -430,32 +430,6 @@ esac
|
||||
self.assertEqual(deps, set(["TESTVAR2"]))
|
||||
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval3', 'anothervalue'])
|
||||
|
||||
def test_contains_vardeps_override_operators(self):
|
||||
# Check override operators handle dependencies correctly with the contains functionality
|
||||
expr_plain = 'testval'
|
||||
expr_prepend = '${@bb.utils.filter("TESTVAR1", "testval1", d)} '
|
||||
expr_append = ' ${@bb.utils.filter("TESTVAR2", "testval2", d)}'
|
||||
expr_remove = '${@bb.utils.contains("TESTVAR3", "no-testval", "testval", "", d)}'
|
||||
# Check dependencies
|
||||
self.d.setVar('ANOTHERVAR', expr_plain)
|
||||
self.d.prependVar('ANOTHERVAR', expr_prepend)
|
||||
self.d.appendVar('ANOTHERVAR', expr_append)
|
||||
self.d.setVar('ANOTHERVAR:remove', expr_remove)
|
||||
self.d.setVar('TESTVAR1', 'blah')
|
||||
self.d.setVar('TESTVAR2', 'testval2')
|
||||
self.d.setVar('TESTVAR3', 'no-testval')
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), set(), self.d)
|
||||
self.assertEqual(sorted(values.splitlines()),
|
||||
sorted([
|
||||
expr_prepend + expr_plain + expr_append,
|
||||
'_remove of ' + expr_remove,
|
||||
'TESTVAR1{testval1} = Unset',
|
||||
'TESTVAR2{testval2} = Set',
|
||||
'TESTVAR3{no-testval} = Set',
|
||||
]))
|
||||
# Check final value
|
||||
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['testval2'])
|
||||
|
||||
#Currently no wildcard support
|
||||
#def test_vardeps_wildcards(self):
|
||||
# self.d.setVar("oe_libinstall", "echo test")
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
#
|
||||
# BitBake Tests for cooker.py
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import contextlib
|
||||
import unittest
|
||||
import hashlib
|
||||
import tempfile
|
||||
@@ -654,10 +653,8 @@ class CleanTarballTest(FetcherTest):
|
||||
archive = tarfile.open(os.path.join(self.dldir, self.recipe_tarball))
|
||||
self.assertNotEqual(len(archive.members), 0)
|
||||
for member in archive.members:
|
||||
self.assertEqual(member.uname, 'oe')
|
||||
self.assertEqual(member.uid, 0)
|
||||
self.assertEqual(member.gname, 'oe')
|
||||
self.assertEqual(member.gid, 0)
|
||||
self.assertEqual(member.uname, 'pokybuild')
|
||||
self.assertEqual(member.gname, 'users')
|
||||
self.assertEqual(member.mtime, mtime)
|
||||
|
||||
|
||||
@@ -1333,12 +1330,12 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
("dtc", "git://git.yoctoproject.org/bbfetchtests-dtc.git;branch=master", "65cc4d2748a2c2e6f27f1cf39e07a5dbabd80ebf", "")
|
||||
: "1.4.0",
|
||||
# combination version pattern
|
||||
("sysprof", "git://git.yoctoproject.org/sysprof.git;protocol=https;branch=master", "cd44ee6644c3641507fb53b8a2a69137f2971219", "")
|
||||
("sysprof", "git://gitlab.gnome.org/GNOME/sysprof.git;protocol=https;branch=master", "cd44ee6644c3641507fb53b8a2a69137f2971219", "")
|
||||
: "1.2.0",
|
||||
("u-boot-mkimage", "git://git.yoctoproject.org/bbfetchtests-u-boot.git;branch=master;protocol=https", "62c175fbb8a0f9a926c88294ea9f7e88eb898f6c", "")
|
||||
("u-boot-mkimage", "git://git.denx.de/u-boot.git;branch=master;protocol=git", "62c175fbb8a0f9a926c88294ea9f7e88eb898f6c", "")
|
||||
: "2014.01",
|
||||
# version pattern "yyyymmdd"
|
||||
("mobile-broadband-provider-info", "git://git.yoctoproject.org/mobile-broadband-provider-info.git;protocol=https;branch=master", "4ed19e11c2975105b71b956440acdb25d46a347d", "")
|
||||
("mobile-broadband-provider-info", "git://gitlab.gnome.org/GNOME/mobile-broadband-provider-info.git;protocol=https;branch=master", "4ed19e11c2975105b71b956440acdb25d46a347d", "")
|
||||
: "20120614",
|
||||
# packages with a valid UPSTREAM_CHECK_GITTAGREGEX
|
||||
# mirror of git://anongit.freedesktop.org/xorg/driver/xf86-video-omap since network issues interfered with testing
|
||||
@@ -1417,7 +1414,7 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
|
||||
def test_wget_latest_versionstring(self):
|
||||
testdata = os.path.dirname(os.path.abspath(__file__)) + "/fetch-testdata"
|
||||
server = HTTPService(testdata, host="127.0.0.1")
|
||||
server = HTTPService(testdata)
|
||||
server.start()
|
||||
port = server.port
|
||||
try:
|
||||
@@ -1425,10 +1422,10 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
self.d.setVar("PN", k[0])
|
||||
checkuri = ""
|
||||
if k[2]:
|
||||
checkuri = "http://127.0.0.1:%s/" % port + k[2]
|
||||
checkuri = "http://localhost:%s/" % port + k[2]
|
||||
self.d.setVar("UPSTREAM_CHECK_URI", checkuri)
|
||||
self.d.setVar("UPSTREAM_CHECK_REGEX", k[3])
|
||||
url = "http://127.0.0.1:%s/" % port + k[1]
|
||||
url = "http://localhost:%s/" % port + k[1]
|
||||
ud = bb.fetch2.FetchData(url, self.d)
|
||||
pupver = ud.method.latest_versionstring(ud, self.d)
|
||||
verstring = pupver[0]
|
||||
@@ -1621,8 +1618,6 @@ class GitShallowTest(FetcherTest):
|
||||
if cwd is None:
|
||||
cwd = self.gitdir
|
||||
actual_refs = self.git(['for-each-ref', '--format=%(refname)'], cwd=cwd).splitlines()
|
||||
# Resolve references into the same format as the comparision (needed by git 2.48 onwards)
|
||||
actual_refs = self.git(['rev-parse', '--symbolic-full-name'] + actual_refs, cwd=cwd).splitlines()
|
||||
full_expected = self.git(['rev-parse', '--symbolic-full-name'] + expected_refs, cwd=cwd).splitlines()
|
||||
self.assertEqual(sorted(set(full_expected)), sorted(set(actual_refs)))
|
||||
|
||||
@@ -1837,7 +1832,7 @@ class GitShallowTest(FetcherTest):
|
||||
self.add_empty_file('bsub', cwd=smdir)
|
||||
|
||||
self.git('submodule init', cwd=self.srcdir)
|
||||
self.git('-c protocol.file.allow=always submodule add file://%s' % smdir, cwd=self.srcdir)
|
||||
self.git('submodule add file://%s' % smdir, cwd=self.srcdir)
|
||||
self.git('submodule update', cwd=self.srcdir)
|
||||
self.git('commit -m submodule -a', cwd=self.srcdir)
|
||||
|
||||
@@ -1867,7 +1862,7 @@ class GitShallowTest(FetcherTest):
|
||||
self.add_empty_file('bsub', cwd=smdir)
|
||||
|
||||
self.git('submodule init', cwd=self.srcdir)
|
||||
self.git('-c protocol.file.allow=always submodule add file://%s' % smdir, cwd=self.srcdir)
|
||||
self.git('submodule add file://%s' % smdir, cwd=self.srcdir)
|
||||
self.git('submodule update', cwd=self.srcdir)
|
||||
self.git('commit -m submodule -a', cwd=self.srcdir)
|
||||
|
||||
@@ -2162,12 +2157,6 @@ class GitShallowTest(FetcherTest):
|
||||
self.assertIn("fstests.doap", dir)
|
||||
|
||||
class GitLfsTest(FetcherTest):
|
||||
def skipIfNoGitLFS():
|
||||
import shutil
|
||||
if not shutil.which('git-lfs'):
|
||||
return unittest.skip('git-lfs not installed')
|
||||
return lambda f: f
|
||||
|
||||
def setUp(self):
|
||||
FetcherTest.setUp(self)
|
||||
|
||||
@@ -2185,14 +2174,10 @@ class GitLfsTest(FetcherTest):
|
||||
|
||||
bb.utils.mkdirhier(self.srcdir)
|
||||
self.git_init(cwd=self.srcdir)
|
||||
self.commit_file('.gitattributes', '*.mp3 filter=lfs -text')
|
||||
|
||||
def commit_file(self, filename, content):
|
||||
with open(os.path.join(self.srcdir, filename), "w") as f:
|
||||
f.write(content)
|
||||
self.git(["add", filename], cwd=self.srcdir)
|
||||
self.git(["commit", "-m", "Change"], cwd=self.srcdir)
|
||||
return self.git(["rev-parse", "HEAD"], cwd=self.srcdir).strip()
|
||||
with open(os.path.join(self.srcdir, '.gitattributes'), 'wt') as attrs:
|
||||
attrs.write('*.mp3 filter=lfs -text')
|
||||
self.git(['add', '.gitattributes'], cwd=self.srcdir)
|
||||
self.git(['commit', '-m', "attributes", '.gitattributes'], cwd=self.srcdir)
|
||||
|
||||
def fetch(self, uri=None, download=True):
|
||||
uris = self.d.getVar('SRC_URI').split()
|
||||
@@ -2205,82 +2190,6 @@ class GitLfsTest(FetcherTest):
|
||||
ud = fetcher.ud[uri]
|
||||
return fetcher, ud
|
||||
|
||||
def get_real_git_lfs_file(self):
|
||||
self.d.setVar('PATH', os.environ.get('PATH'))
|
||||
fetcher, ud = self.fetch()
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
unpacked_lfs_file = os.path.join(self.d.getVar('WORKDIR'), 'git', "Cat_poster_1.jpg")
|
||||
return unpacked_lfs_file
|
||||
|
||||
@skipIfNoGitLFS()
|
||||
def test_fetch_lfs_on_srcrev_change(self):
|
||||
"""Test if fetch downloads missing LFS objects when a different revision within an existing repository is requested"""
|
||||
self.git(["lfs", "install", "--local"], cwd=self.srcdir)
|
||||
|
||||
@contextlib.contextmanager
|
||||
def hide_upstream_repository():
|
||||
"""Hide the upstream repository to make sure that git lfs cannot pull from it"""
|
||||
temp_name = self.srcdir + ".bak"
|
||||
os.rename(self.srcdir, temp_name)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
os.rename(temp_name, self.srcdir)
|
||||
|
||||
def fetch_and_verify(revision, filename, content):
|
||||
self.d.setVar('SRCREV', revision)
|
||||
fetcher, ud = self.fetch()
|
||||
|
||||
with hide_upstream_repository():
|
||||
workdir = self.d.getVar('WORKDIR')
|
||||
fetcher.unpack(workdir)
|
||||
|
||||
with open(os.path.join(workdir, "git", filename)) as f:
|
||||
self.assertEqual(f.read(), content)
|
||||
|
||||
commit_1 = self.commit_file("a.mp3", "version 1")
|
||||
commit_2 = self.commit_file("a.mp3", "version 2")
|
||||
|
||||
self.d.setVar('SRC_URI', "git://%s;protocol=file;lfs=1;branch=master" % self.srcdir)
|
||||
|
||||
# Seed the local download folder by fetching the latest commit and verifying that the LFS contents are
|
||||
# available even when the upstream repository disappears.
|
||||
fetch_and_verify(commit_2, "a.mp3", "version 2")
|
||||
# Verify that even when an older revision is fetched, the needed LFS objects are fetched into the download
|
||||
# folder.
|
||||
fetch_and_verify(commit_1, "a.mp3", "version 1")
|
||||
|
||||
@skipIfNoGitLFS()
|
||||
@skipIfNoNetwork()
|
||||
def test_real_git_lfs_repo_succeeds_without_lfs_param(self):
|
||||
self.d.setVar('SRC_URI', "git://gitlab.com/gitlab-examples/lfs.git;protocol=https;branch=master")
|
||||
f = self.get_real_git_lfs_file()
|
||||
self.assertTrue(os.path.exists(f))
|
||||
self.assertEqual("c0baab607a97839c9a328b4310713307", bb.utils.md5_file(f))
|
||||
|
||||
@skipIfNoGitLFS()
|
||||
@skipIfNoNetwork()
|
||||
def test_real_git_lfs_repo_succeeds(self):
|
||||
self.d.setVar('SRC_URI', "git://gitlab.com/gitlab-examples/lfs.git;protocol=https;branch=master;lfs=1")
|
||||
f = self.get_real_git_lfs_file()
|
||||
self.assertTrue(os.path.exists(f))
|
||||
self.assertEqual("c0baab607a97839c9a328b4310713307", bb.utils.md5_file(f))
|
||||
|
||||
@skipIfNoGitLFS()
|
||||
@skipIfNoNetwork()
|
||||
def test_real_git_lfs_repo_succeeds(self):
|
||||
self.d.setVar('SRC_URI', "git://gitlab.com/gitlab-examples/lfs.git;protocol=https;branch=master;lfs=0")
|
||||
f = self.get_real_git_lfs_file()
|
||||
# This is the actual non-smudged placeholder file on the repo if git-lfs does not run
|
||||
lfs_file = (
|
||||
'version https://git-lfs.github.com/spec/v1\n'
|
||||
'oid sha256:34be66b1a39a1955b46a12588df9d5f6fc1da790e05cf01f3c7422f4bbbdc26b\n'
|
||||
'size 11423554\n'
|
||||
)
|
||||
|
||||
with open(f) as fh:
|
||||
self.assertEqual(lfs_file, fh.read())
|
||||
|
||||
def test_lfs_enabled(self):
|
||||
import shutil
|
||||
|
||||
@@ -2299,16 +2208,12 @@ class GitLfsTest(FetcherTest):
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
|
||||
old_find_git_lfs = ud.method._find_git_lfs
|
||||
try:
|
||||
# If git-lfs cannot be found, the unpack should throw an error
|
||||
with self.assertRaises(bb.fetch2.FetchError):
|
||||
fetcher.download()
|
||||
ud.method._find_git_lfs = lambda d: False
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
finally:
|
||||
ud.method._find_git_lfs = old_find_git_lfs
|
||||
# If git-lfs cannot be found, the unpack should throw an error
|
||||
with self.assertRaises(bb.fetch2.FetchError):
|
||||
fetcher.download()
|
||||
ud.method._find_git_lfs = lambda d: False
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
|
||||
def test_lfs_disabled(self):
|
||||
import shutil
|
||||
@@ -2323,21 +2228,17 @@ class GitLfsTest(FetcherTest):
|
||||
fetcher, ud = self.fetch()
|
||||
self.assertIsNotNone(ud.method._find_git_lfs)
|
||||
|
||||
old_find_git_lfs = ud.method._find_git_lfs
|
||||
try:
|
||||
# If git-lfs can be found, the unpack should be successful. A
|
||||
# live copy of git-lfs is not required for this case, so
|
||||
# unconditionally forge its presence.
|
||||
ud.method._find_git_lfs = lambda d: True
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
# If git-lfs cannot be found, the unpack should be successful
|
||||
# If git-lfs can be found, the unpack should be successful. A
|
||||
# live copy of git-lfs is not required for this case, so
|
||||
# unconditionally forge its presence.
|
||||
ud.method._find_git_lfs = lambda d: True
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
|
||||
ud.method._find_git_lfs = lambda d: False
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
finally:
|
||||
ud.method._find_git_lfs = old_find_git_lfs
|
||||
# If git-lfs cannot be found, the unpack should be successful
|
||||
ud.method._find_git_lfs = lambda d: False
|
||||
shutil.rmtree(self.gitdir, ignore_errors=True)
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
|
||||
class GitURLWithSpacesTest(FetcherTest):
|
||||
test_git_urls = {
|
||||
|
||||
@@ -119,7 +119,7 @@ EXTRA_OECONF:class-target = "b"
|
||||
EXTRA_OECONF:append = " c"
|
||||
"""
|
||||
|
||||
def test_parse_overrides2(self):
|
||||
def test_parse_overrides(self):
|
||||
f = self.parsehelper(self.overridetest2)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
d.appendVar("EXTRA_OECONF", " d")
|
||||
@@ -194,26 +194,3 @@ deltask ${EMPTYVAR}
|
||||
self.assertTrue('addtask ignored: " do_patch"' in stdout)
|
||||
#self.assertTrue('dependent task do_foo for do_patch does not exist' in stdout)
|
||||
|
||||
broken_multiline_comment = """
|
||||
# First line of comment \\
|
||||
# Second line of comment \\
|
||||
|
||||
"""
|
||||
def test_parse_broken_multiline_comment(self):
|
||||
f = self.parsehelper(self.broken_multiline_comment)
|
||||
with self.assertRaises(bb.BBHandledException):
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
|
||||
comment_in_var = """
|
||||
VAR = " \\
|
||||
SOMEVAL \\
|
||||
# some comment \\
|
||||
SOMEOTHERVAL \\
|
||||
"
|
||||
"""
|
||||
def test_parse_comment_in_var(self):
|
||||
f = self.parsehelper(self.comment_in_var)
|
||||
with self.assertRaises(bb.BBHandledException):
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
|
||||
@@ -324,11 +324,11 @@ class Tinfoil:
|
||||
self.recipes_parsed = False
|
||||
self.quiet = 0
|
||||
self.oldhandlers = self.logger.handlers[:]
|
||||
self.localhandlers = []
|
||||
if setup_logging:
|
||||
# This is the *client-side* logger, nothing to do with
|
||||
# logging messages from the server
|
||||
bb.msg.logger_create('BitBake', output)
|
||||
self.localhandlers = []
|
||||
for handler in self.logger.handlers:
|
||||
if handler not in self.oldhandlers:
|
||||
self.localhandlers.append(handler)
|
||||
|
||||
@@ -45,7 +45,7 @@ from pprint import pformat
|
||||
import logging
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from django.db import transaction
|
||||
from django.db import transaction, connection
|
||||
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
@@ -496,7 +496,7 @@ class ORMWrapper(object):
|
||||
if not parent_path:
|
||||
parent_path = "/"
|
||||
parent_obj = self._cached_get(Target_File, target = target_obj, path = parent_path, inodetype = Target_File.ITYPE_DIRECTORY)
|
||||
Target_File.objects.create(
|
||||
tf_obj = Target_File.objects.create(
|
||||
target = target_obj,
|
||||
path = path,
|
||||
size = size,
|
||||
@@ -561,7 +561,7 @@ class ORMWrapper(object):
|
||||
|
||||
parent_obj = Target_File.objects.get(target = target_obj, path = parent_path, inodetype = Target_File.ITYPE_DIRECTORY)
|
||||
|
||||
Target_File.objects.create(
|
||||
tf_obj = Target_File.objects.create(
|
||||
target = target_obj,
|
||||
path = path,
|
||||
size = size,
|
||||
@@ -1062,6 +1062,27 @@ class BuildInfoHelper(object):
|
||||
|
||||
return recipe_info
|
||||
|
||||
def _get_path_information(self, task_object):
|
||||
self._ensure_build()
|
||||
|
||||
assert isinstance(task_object, Task)
|
||||
build_stats_format = "{tmpdir}/buildstats/{buildname}/{package}/"
|
||||
build_stats_path = []
|
||||
|
||||
for t in self.internal_state['targets']:
|
||||
buildname = self.internal_state['build'].build_name
|
||||
pe, pv = task_object.recipe.version.split(":",1)
|
||||
if pe:
|
||||
package = task_object.recipe.name + "-" + pe + "_" + pv
|
||||
else:
|
||||
package = task_object.recipe.name + "-" + pv
|
||||
|
||||
build_stats_path.append(build_stats_format.format(tmpdir=self.tmp_dir,
|
||||
buildname=buildname,
|
||||
package=package))
|
||||
|
||||
return build_stats_path
|
||||
|
||||
|
||||
################################
|
||||
## external available methods to store information
|
||||
|
||||
@@ -877,6 +877,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
state_force_shutdown()
|
||||
|
||||
main.shutdown = main.shutdown + 1
|
||||
pass
|
||||
except Exception as e:
|
||||
import traceback
|
||||
sys.stderr.write(traceback.format_exc())
|
||||
|
||||
@@ -13,7 +13,6 @@ import errno
|
||||
import logging
|
||||
import bb
|
||||
import bb.msg
|
||||
import locale
|
||||
import multiprocessing
|
||||
import fcntl
|
||||
import importlib
|
||||
@@ -29,8 +28,6 @@ import signal
|
||||
import collections
|
||||
import copy
|
||||
import ctypes
|
||||
import random
|
||||
import tempfile
|
||||
from subprocess import getstatusoutput
|
||||
from contextlib import contextmanager
|
||||
from ctypes import cdll
|
||||
@@ -432,14 +429,12 @@ def better_eval(source, locals, extraglobals = None):
|
||||
return eval(source, ctx, locals)
|
||||
|
||||
@contextmanager
|
||||
def fileslocked(files, *args, **kwargs):
|
||||
def fileslocked(files):
|
||||
"""Context manager for locking and unlocking file locks."""
|
||||
locks = []
|
||||
if files:
|
||||
for lockfile in files:
|
||||
l = bb.utils.lockfile(lockfile, *args, **kwargs)
|
||||
if l is not None:
|
||||
locks.append(l)
|
||||
locks.append(bb.utils.lockfile(lockfile))
|
||||
|
||||
try:
|
||||
yield
|
||||
@@ -546,12 +541,7 @@ def md5_file(filename):
|
||||
Return the hex string representation of the MD5 checksum of filename.
|
||||
"""
|
||||
import hashlib
|
||||
try:
|
||||
sig = hashlib.new('MD5', usedforsecurity=False)
|
||||
except TypeError:
|
||||
# Some configurations don't appear to support two arguments
|
||||
sig = hashlib.new('MD5')
|
||||
return _hasher(sig, filename)
|
||||
return _hasher(hashlib.new('MD5', usedforsecurity=False), filename)
|
||||
|
||||
def sha256_file(filename):
|
||||
"""
|
||||
@@ -607,21 +597,6 @@ def preserved_envvars():
|
||||
]
|
||||
return v + preserved_envvars_exported()
|
||||
|
||||
def check_system_locale():
|
||||
"""Make sure the required system locale are available and configured"""
|
||||
default_locale = locale.getlocale(locale.LC_CTYPE)
|
||||
|
||||
try:
|
||||
locale.setlocale(locale.LC_CTYPE, ("en_US", "UTF-8"))
|
||||
except:
|
||||
sys.exit("Please make sure locale 'en_US.UTF-8' is available on your system")
|
||||
else:
|
||||
locale.setlocale(locale.LC_CTYPE, default_locale)
|
||||
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\n"
|
||||
"Python can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
|
||||
def filter_environment(good_vars):
|
||||
"""
|
||||
Create a pristine environment for bitbake. This will remove variables that
|
||||
@@ -717,8 +692,8 @@ def remove(path, recurse=False, ionice=False):
|
||||
return
|
||||
if recurse:
|
||||
for name in glob.glob(path):
|
||||
if _check_unsafe_delete_path(name):
|
||||
raise Exception('bb.utils.remove: called with dangerous path "%s" and recurse=True, refusing to delete!' % name)
|
||||
if _check_unsafe_delete_path(path):
|
||||
raise Exception('bb.utils.remove: called with dangerous path "%s" and recurse=True, refusing to delete!' % path)
|
||||
# shutil.rmtree(name) would be ideal but its too slow
|
||||
cmd = []
|
||||
if ionice:
|
||||
@@ -776,7 +751,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
|
||||
if not sstat:
|
||||
sstat = os.lstat(src)
|
||||
except Exception as e:
|
||||
logger.warning("movefile: Stating source file failed...", e)
|
||||
print("movefile: Stating source file failed...", e)
|
||||
return None
|
||||
|
||||
destexists = 1
|
||||
@@ -804,7 +779,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
|
||||
os.unlink(src)
|
||||
return os.lstat(dest)
|
||||
except Exception as e:
|
||||
logger.warning("movefile: failed to properly create symlink:", dest, "->", target, e)
|
||||
print("movefile: failed to properly create symlink:", dest, "->", target, e)
|
||||
return None
|
||||
|
||||
renamefailed = 1
|
||||
@@ -821,7 +796,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
|
||||
except Exception as e:
|
||||
if e.errno != errno.EXDEV:
|
||||
# Some random error.
|
||||
logger.warning("movefile: Failed to move", src, "to", dest, e)
|
||||
print("movefile: Failed to move", src, "to", dest, e)
|
||||
return None
|
||||
# Invalid cross-device-link 'bind' mounted or actually Cross-Device
|
||||
|
||||
@@ -833,13 +808,13 @@ def movefile(src, dest, newmtime = None, sstat = None):
|
||||
bb.utils.rename(destpath + "#new", destpath)
|
||||
didcopy = 1
|
||||
except Exception as e:
|
||||
logger.warning('movefile: copy', src, '->', dest, 'failed.', e)
|
||||
print('movefile: copy', src, '->', dest, 'failed.', e)
|
||||
return None
|
||||
else:
|
||||
#we don't yet handle special, so we need to fall back to /bin/mv
|
||||
a = getstatusoutput("/bin/mv -f " + "'" + src + "' '" + dest + "'")
|
||||
if a[0] != 0:
|
||||
logger.warning("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a)
|
||||
print("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a)
|
||||
return None # failure
|
||||
try:
|
||||
if didcopy:
|
||||
@@ -847,7 +822,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
|
||||
os.chmod(destpath, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
|
||||
os.unlink(src)
|
||||
except Exception as e:
|
||||
logger.warning("movefile: Failed to chown/chmod/unlink", dest, e)
|
||||
print("movefile: Failed to chown/chmod/unlink", dest, e)
|
||||
return None
|
||||
|
||||
if newmtime:
|
||||
@@ -1006,9 +981,6 @@ def to_boolean(string, default=None):
|
||||
if not string:
|
||||
return default
|
||||
|
||||
if isinstance(string, int):
|
||||
return string != 0
|
||||
|
||||
normalized = string.lower()
|
||||
if normalized in ("y", "yes", "1", "true"):
|
||||
return True
|
||||
@@ -1659,20 +1631,23 @@ def disable_network(uid=None, gid=None):
|
||||
|
||||
def export_proxies(d):
|
||||
""" export common proxies variables from datastore to environment """
|
||||
import os
|
||||
|
||||
variables = ['http_proxy', 'HTTP_PROXY', 'https_proxy', 'HTTPS_PROXY',
|
||||
'ftp_proxy', 'FTP_PROXY', 'no_proxy', 'NO_PROXY',
|
||||
'GIT_PROXY_COMMAND', 'SSL_CERT_FILE', 'SSL_CERT_DIR']
|
||||
'GIT_PROXY_COMMAND']
|
||||
exported = False
|
||||
|
||||
origenv = d.getVar("BB_ORIGENV")
|
||||
|
||||
for name in variables:
|
||||
value = d.getVar(name)
|
||||
if not value and origenv:
|
||||
value = origenv.getVar(name)
|
||||
if value:
|
||||
os.environ[name] = value
|
||||
for v in variables:
|
||||
if v in os.environ.keys():
|
||||
exported = True
|
||||
else:
|
||||
v_proxy = d.getVar(v)
|
||||
if v_proxy is not None:
|
||||
os.environ[v] = v_proxy
|
||||
exported = True
|
||||
|
||||
return exported
|
||||
|
||||
|
||||
def load_plugins(logger, plugins, pluginpath):
|
||||
@@ -1779,22 +1754,3 @@ def is_local_uid(uid=''):
|
||||
if str(uid) == line_split[2]:
|
||||
return True
|
||||
return False
|
||||
|
||||
def mkstemp(suffix=None, prefix=None, dir=None, text=False):
|
||||
"""
|
||||
Generates a unique filename, independent of time.
|
||||
|
||||
mkstemp() in glibc (at least) generates unique file names based on the
|
||||
current system time. When combined with highly parallel builds, and
|
||||
operating over NFS (e.g. shared sstate/downloads) this can result in
|
||||
conflicts and race conditions.
|
||||
|
||||
This function adds additional entropy to the file name so that a collision
|
||||
is independent of time and thus extremely unlikely.
|
||||
"""
|
||||
entropy = "".join(random.choices("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890", k=20))
|
||||
if prefix:
|
||||
prefix = prefix + entropy
|
||||
else:
|
||||
prefix = tempfile.gettempprefix() + entropy
|
||||
return tempfile.mkstemp(suffix=suffix, prefix=prefix, dir=dir, text=text)
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -585,7 +585,7 @@ class SiblingTest(TreeTest):
|
||||
</html>'''
|
||||
# All that whitespace looks good but makes the tests more
|
||||
# difficult. Get rid of it.
|
||||
markup = re.compile(r"\n\s*").sub("", markup)
|
||||
markup = re.compile("\n\s*").sub("", markup)
|
||||
self.tree = self.soup(markup)
|
||||
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ class HashEquivalenceTestSetup(object):
|
||||
|
||||
server_index = 0
|
||||
|
||||
def start_server(self, dbpath=None, upstream=None, read_only=False, prefunc=server_prefunc, need_client=True):
|
||||
def start_server(self, dbpath=None, upstream=None, read_only=False, prefunc=server_prefunc):
|
||||
self.server_index += 1
|
||||
if dbpath is None:
|
||||
dbpath = os.path.join(self.temp_dir.name, "db%d.sqlite" % self.server_index)
|
||||
@@ -54,11 +54,8 @@ class HashEquivalenceTestSetup(object):
|
||||
def cleanup_client(client):
|
||||
client.close()
|
||||
|
||||
if need_client:
|
||||
client = create_client(server.address)
|
||||
self.addCleanup(cleanup_client, client)
|
||||
else:
|
||||
client = None
|
||||
client = create_client(server.address)
|
||||
self.addCleanup(cleanup_client, client)
|
||||
|
||||
return (client, server)
|
||||
|
||||
@@ -344,7 +341,7 @@ class HashEquivalenceCommonTests(object):
|
||||
old_signal = signal.signal(signal.SIGTERM, do_nothing)
|
||||
self.addCleanup(signal.signal, signal.SIGTERM, old_signal)
|
||||
|
||||
_, server = self.start_server(prefunc=prefunc, need_client=False)
|
||||
_, server = self.start_server(prefunc=prefunc)
|
||||
server.process.terminate()
|
||||
time.sleep(30)
|
||||
event.set()
|
||||
|
||||
@@ -2798,14 +2798,7 @@ class ParserReflect(object):
|
||||
def signature(self):
|
||||
try:
|
||||
import hashlib
|
||||
except ImportError:
|
||||
raise RuntimeError("Unable to import hashlib")
|
||||
try:
|
||||
sig = hashlib.new('MD5', usedforsecurity=False)
|
||||
except TypeError:
|
||||
# Some configurations don't appear to support two arguments
|
||||
sig = hashlib.new('MD5')
|
||||
try:
|
||||
if self.start:
|
||||
sig.update(self.start.encode('latin-1'))
|
||||
if self.prec:
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
@@ -344,9 +342,9 @@ def auto_shutdown():
|
||||
def ping(host, port):
|
||||
from . import client
|
||||
|
||||
with client.PRClient() as conn:
|
||||
conn.connect_tcp(host, port)
|
||||
return conn.ping()
|
||||
conn = client.PRClient()
|
||||
conn.connect_tcp(host, port)
|
||||
return conn.ping()
|
||||
|
||||
def connect(host, port):
|
||||
from . import client
|
||||
|
||||
@@ -595,23 +595,24 @@ class _ProcessEvent:
|
||||
@type event: Event object
|
||||
@return: By convention when used from the ProcessEvent class:
|
||||
- Returning False or None (default value) means keep on
|
||||
executing next chained functors (see chain.py example).
|
||||
executing next chained functors (see chain.py example).
|
||||
- Returning True instead means do not execute next
|
||||
processing functions.
|
||||
@rtype: bool
|
||||
@raise ProcessEventError: Event object undispatchable,
|
||||
unknown event.
|
||||
"""
|
||||
stripped_mask = event.mask & ~IN_ISDIR
|
||||
# Bitbake hack - we see event masks of 0x6, i.e., IN_MODIFY & IN_ATTRIB.
|
||||
stripped_mask = event.mask - (event.mask & IN_ISDIR)
|
||||
# Bitbake hack - we see event masks of 0x6, IN_MODIFY & IN_ATTRIB
|
||||
# The kernel inotify code can set more than one of the bits in the mask,
|
||||
# fsnotify_change() in linux/fsnotify.h is quite clear that IN_ATTRIB,
|
||||
# IN_MODIFY and IN_ACCESS can arrive together.
|
||||
# This breaks the code below which assume only one mask bit is ever
|
||||
# set in an event. We don't care about attrib or access in bitbake so
|
||||
# drop those.
|
||||
if stripped_mask & IN_MODIFY:
|
||||
stripped_mask &= ~(IN_ATTRIB | IN_ACCESS)
|
||||
# set in an event. We don't care about attrib or access in bitbake so drop those
|
||||
if (stripped_mask & IN_MODIFY) and (stripped_mask & IN_ATTRIB):
|
||||
stripped_mask = stripped_mask - (stripped_mask & IN_ATTRIB)
|
||||
if (stripped_mask & IN_MODIFY) and (stripped_mask & IN_ACCESS):
|
||||
stripped_mask = stripped_mask - (stripped_mask & IN_ACCESS)
|
||||
|
||||
maskname = EventsCodes.ALL_VALUES.get(stripped_mask)
|
||||
if maskname is None:
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@
|
||||
<!-- Releases available -->
|
||||
<object model="orm.release" pk="1">
|
||||
<field type="CharField" name="name">kirkstone</field>
|
||||
<field type="CharField" name="description">Yocto Project 4.0 "Kirkstone"</field>
|
||||
<field type="CharField" name="description">Yocto Project 3.5 "Kirkstone"</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">1</field>
|
||||
<field type="CharField" name="branch_name">kirkstone</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?h=kirkstone">Yocto Project Kirkstone branch</a>.</field>
|
||||
|
||||
@@ -11,7 +11,7 @@ import os
|
||||
import re
|
||||
import logging
|
||||
import json
|
||||
import glob
|
||||
import subprocess
|
||||
from collections import Counter
|
||||
|
||||
from orm.models import Project, ProjectTarget, Build, Layer_Version
|
||||
@@ -227,18 +227,20 @@ class XhrSetDefaultImageUrl(View):
|
||||
# same logical name
|
||||
# * Each project that uses a layer will have its own
|
||||
# LayerVersion and Project Layer for it
|
||||
# * During the Project delete process, when the last
|
||||
# * During the Paroject delete process, when the last
|
||||
# LayerVersion for a 'local_source_dir' layer is deleted
|
||||
# then the Layer record is deleted to remove orphans
|
||||
#
|
||||
|
||||
def scan_layer_content(layer,layer_version):
|
||||
# if this is a local layer directory, we can immediately scan its content
|
||||
if os.path.isdir(layer.local_source_dir):
|
||||
if layer.local_source_dir:
|
||||
try:
|
||||
# recipes-*/*/*.bb
|
||||
recipes_list = glob.glob(os.path.join(layer.local_source_dir, 'recipes-*/*/*.bb'))
|
||||
for recipe in recipes_list:
|
||||
cmd = '%s %s' % ('ls', os.path.join(layer.local_source_dir,'recipes-*/*/*.bb'))
|
||||
recipes_list = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read()
|
||||
recipes_list = recipes_list.decode("utf-8").strip()
|
||||
if recipes_list and 'No such' not in recipes_list:
|
||||
for recipe in recipes_list.split('\n'):
|
||||
recipe_path = recipe[recipe.rfind('recipes-'):]
|
||||
recipe_name = recipe[recipe.rfind('/')+1:].replace('.bb','')
|
||||
@@ -258,9 +260,6 @@ def scan_layer_content(layer,layer_version):
|
||||
|
||||
except Exception as e:
|
||||
logger.warning("ERROR:scan_layer_content: %s" % e)
|
||||
else:
|
||||
logger.warning("ERROR: wrong path given")
|
||||
raise KeyError("local_source_dir")
|
||||
|
||||
class XhrLayer(View):
|
||||
""" Delete, Get, Add and Update Layer information
|
||||
@@ -457,18 +456,15 @@ class XhrLayer(View):
|
||||
'layerdetailurl':
|
||||
layer_dep.get_detailspage_url(project.pk)})
|
||||
|
||||
# Only scan_layer_content if layer is local
|
||||
if layer_data.get('local_source_dir', None):
|
||||
# Scan the layer's content and update components
|
||||
scan_layer_content(layer,layer_version)
|
||||
# Scan the layer's content and update components
|
||||
scan_layer_content(layer,layer_version)
|
||||
|
||||
except Layer_Version.DoesNotExist:
|
||||
return error_response("layer-dep-not-found")
|
||||
except Project.DoesNotExist:
|
||||
return error_response("project-not-found")
|
||||
except KeyError as e:
|
||||
_log("KeyError: %s" % e)
|
||||
return error_response(f"incorrect-parameters")
|
||||
except KeyError:
|
||||
return error_response("incorrect-parameters")
|
||||
|
||||
return JsonResponse({'error': "ok",
|
||||
'imported_layer': {
|
||||
|
||||
2
documentation/.gitignore
vendored
2
documentation/.gitignore
vendored
@@ -5,5 +5,3 @@ sphinx-static/switchers.js
|
||||
.vscode/
|
||||
*/svg/*.png
|
||||
*/svg/*.pdf
|
||||
styles/*
|
||||
!styles/config
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
StylesPath = styles
|
||||
MinAlertLevel = suggestion
|
||||
Packages = RedHat, proselint, write-good, alex, Readability, Joblint
|
||||
Vocab = Yocto, OpenSource
|
||||
[*.rst]
|
||||
BasedOnStyles = Vale, RedHat, proselint, write-good, alex, Readability, Joblint
|
||||
|
||||
@@ -3,18 +3,14 @@
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?= -W --keep-going -j auto
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
# Release notes are excluded because they contain contributor names and commit messages which can't be modified
|
||||
VALEOPTS ?= --no-wrap --glob '!migration-guides/release-notes-*.rst'
|
||||
SOURCEDIR = .
|
||||
VALEDOCS ?= $(SOURCEDIR)
|
||||
SPHINXLINTDOCS ?= $(SOURCEDIR)
|
||||
IMAGEDIRS = */svg
|
||||
BUILDDIR = _build
|
||||
DESTDIR = final
|
||||
SVG2PNG = inkscape
|
||||
SVG2PDF = inkscape
|
||||
SPHINXOPTS ?= -W --keep-going -j auto
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = .
|
||||
IMAGEDIRS = */svg
|
||||
BUILDDIR = _build
|
||||
DESTDIR = final
|
||||
SVG2PNG = inkscape
|
||||
SVG2PDF = inkscape
|
||||
|
||||
ifeq ($(shell if which $(SPHINXBUILD) >/dev/null 2>&1; then echo 1; else echo 0; fi),0)
|
||||
$(error "The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed")
|
||||
@@ -24,7 +20,7 @@ endif
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: all help Makefile clean stylecheck publish epub latexpdf
|
||||
.PHONY: all help Makefile clean publish epub latexpdf
|
||||
|
||||
publish: Makefile html singlehtml
|
||||
rm -rf $(BUILDDIR)/$(DESTDIR)/
|
||||
@@ -50,19 +46,10 @@ PNGs := $(foreach dir, $(IMAGEDIRS), $(patsubst %.svg,%.png,$(wildcard $(SOURCED
|
||||
clean:
|
||||
@rm -rf $(BUILDDIR) $(PNGs) $(PDFs) poky.yaml sphinx-static/switchers.js
|
||||
|
||||
stylecheck:
|
||||
vale sync
|
||||
vale $(VALEOPTS) $(VALEDOCS)
|
||||
|
||||
sphinx-lint:
|
||||
sphinx-lint $(SPHINXLINTDOCS)
|
||||
|
||||
epub: $(PNGs)
|
||||
$(SOURCEDIR)/set_versions.py
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
latexpdf: $(PDFs)
|
||||
$(SOURCEDIR)/set_versions.py
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
all: html epub latexpdf
|
||||
|
||||
@@ -34,18 +34,16 @@ Manual Organization
|
||||
|
||||
Here the folders corresponding to individual manuals:
|
||||
|
||||
* brief-yoctoprojectqs - Yocto Project Quick Start
|
||||
* overview-manual - Yocto Project Overview and Concepts Manual
|
||||
* contributor-guide - Yocto Project and OpenEmbedded Contributor Guide
|
||||
* ref-manual - Yocto Project Reference Manual
|
||||
* sdk-manual - Yocto Project Software Development Kit (SDK) Developer's Guide.
|
||||
* bsp-guide - Yocto Project Board Support Package (BSP) Developer's Guide
|
||||
* dev-manual - Yocto Project Development Tasks Manual
|
||||
* kernel-dev - Yocto Project Linux Kernel Development Manual
|
||||
* ref-manual - Yocto Project Reference Manual
|
||||
* brief-yoctoprojectqs - Yocto Project Quick Start
|
||||
* profile-manual - Yocto Project Profiling and Tracing Manual
|
||||
* sdk-manual - Yocto Project Software Development Kit (SDK) Developer's Guide.
|
||||
* toaster-manual - Toaster User Manual
|
||||
* test-manual - Yocto Project Test Environment Manual
|
||||
* migration-guides - Yocto Project Release and Migration Notes
|
||||
|
||||
Each folder is self-contained regarding content and figures.
|
||||
|
||||
@@ -131,10 +129,6 @@ Also install the "inkscape" package from your distribution.
|
||||
Inkscape is need to convert SVG graphics to PNG (for EPUB
|
||||
export) and to PDF (for PDF export).
|
||||
|
||||
Additionally install "fncychap.sty" TeX font if you want to build PDFs. Debian
|
||||
and Ubuntu have it in "texlive-latex-extra" package while RedHat distributions
|
||||
and OpenSUSE have it in "texlive-fncychap" package for example.
|
||||
|
||||
To build the documentation locally, run:
|
||||
|
||||
$ cd documentation
|
||||
@@ -151,50 +145,6 @@ dependencies in a virtual environment:
|
||||
$ pipenv install
|
||||
$ pipenv run make html
|
||||
|
||||
Style checking the Yocto Project documentation
|
||||
==============================================
|
||||
|
||||
The project is starting to use Vale (https://vale.sh/)
|
||||
to validate the text style.
|
||||
|
||||
To install Vale:
|
||||
|
||||
$ pip install vale
|
||||
|
||||
To run Vale:
|
||||
|
||||
$ make stylecheck
|
||||
|
||||
Style checking the whole documentation might take some time and generate a
|
||||
lot of warnings/errors, thus one can run Vale on a subset of files or
|
||||
directories:
|
||||
|
||||
$ make stylecheck VALEDOCS=<file>
|
||||
$ make stylecheck VALEDOCS="<file1> <file2>"
|
||||
$ make stylecheck VALEDOCS=<dir>
|
||||
|
||||
Lint checking the Yocto Project documentation
|
||||
=============================================
|
||||
|
||||
To fix errors which are not reported by Sphinx itself,
|
||||
the project uses sphinx-lint (https://github.com/sphinx-contrib/sphinx-lint).
|
||||
|
||||
To install sphinx-lint:
|
||||
|
||||
$ pip install sphinx-lint
|
||||
|
||||
To run sphinx-lint:
|
||||
|
||||
$ make sphinx-lint
|
||||
|
||||
Lint checking the whole documentation might take some time and generate a
|
||||
lot of warnings/errors, thus one can run sphinx-lint on a subset of files
|
||||
or directories:
|
||||
|
||||
$ make sphinx-lint SPHINXLINTDOCS=<file>
|
||||
$ make sphinx-lint SPHINXLINTDOCS="<file1> <file2>"
|
||||
$ make sphinx-lint SPHINXLINTDOCS=<dir>
|
||||
|
||||
Sphinx theme and CSS customization
|
||||
==================================
|
||||
|
||||
@@ -321,19 +271,6 @@ websites.
|
||||
More information can be found here:
|
||||
https://sublime-and-sphinx-guide.readthedocs.io/en/latest/references.html.
|
||||
|
||||
For external links, we use this syntax:
|
||||
`link text <link URL>`__
|
||||
|
||||
instead of:
|
||||
`link text <link URL>`_
|
||||
|
||||
Both syntaxes work, but the latter also creates a "link text" reference
|
||||
target which could conflict with other references with the same name.
|
||||
So, only use this variant when you wish to make multiple references
|
||||
to this link, reusing only the target name.
|
||||
|
||||
See https://stackoverflow.com/questions/27420317/restructured-text-rst-http-links-underscore-vs-use
|
||||
|
||||
Anchor (<#link>) links are forbidden as they are not checked by Sphinx during
|
||||
the build and may be broken without knowing about it.
|
||||
|
||||
@@ -403,16 +340,13 @@ The sphinx.ext.intersphinx extension is enabled by default
|
||||
so that we can cross reference content from other Sphinx based
|
||||
documentation projects, such as the BitBake manual.
|
||||
|
||||
References to the BitBake manual can directly be done:
|
||||
References to the BitBake manual can be done:
|
||||
- With a specific description instead of the section name:
|
||||
:ref:`Azure Storage fetcher (az://) <bitbake-user-manual/bitbake-user-manual-fetching:fetchers>`
|
||||
:ref:`Azure Storage fetcher (az://) <bitbake:bitbake-user-manual/bitbake-user-manual-fetching:fetchers>`
|
||||
- With the section name:
|
||||
:ref:`bitbake-user-manual/bitbake-user-manual-intro:usage and syntax` option
|
||||
|
||||
If you want to refer to an entire document (or chapter) in the BitBake manual,
|
||||
you have to use the ":doc:" macro with the "bitbake:" prefix:
|
||||
- :doc:`BitBake User Manual <bitbake:index>`
|
||||
- :doc:`bitbake:bitbake-user-manual/bitbake-user-manual-metadata`" chapter
|
||||
:ref:`bitbake:bitbake-user-manual/bitbake-user-manual-intro:usage and syntax` option
|
||||
- Linking to the entire BitBake manual:
|
||||
:doc:`BitBake User Manual <bitbake:index>`
|
||||
|
||||
Note that a reference to a variable (:term:`VARIABLE`) automatically points to
|
||||
the BitBake manual if the variable is not described in the Reference Manual's Variable Glossary.
|
||||
@@ -421,30 +355,8 @@ BitBake manual as follows:
|
||||
|
||||
:term:`bitbake:BB_NUMBER_PARSE_THREADS`
|
||||
|
||||
This would be the same if we had identical document filenames in
|
||||
both the Yocto Project and BitBake manuals:
|
||||
|
||||
:ref:`bitbake:directory/file:section title`
|
||||
|
||||
Submitting documentation changes
|
||||
================================
|
||||
|
||||
Please refer to our contributor guide here: https://docs.yoctoproject.org/contributor-guide/
|
||||
for full details on how to submit changes.
|
||||
|
||||
As a quick guide, patches should be sent to docs@lists.yoctoproject.org
|
||||
The git command to do that would be:
|
||||
|
||||
git send-email -M -1 --to docs@lists.yoctoproject.org
|
||||
|
||||
The 'To' header can be set as default for this repository:
|
||||
|
||||
git config sendemail.to docs@lists.yoctoproject.org
|
||||
|
||||
Now you can just do 'git send-email origin/master..' to send all local patches.
|
||||
|
||||
Read the other sections in this document and documentation/standards.md for
|
||||
rules to follow when contributing to the documentation.
|
||||
|
||||
Git repository: https://git.yoctoproject.org/yocto-docs
|
||||
Mailing list: docs@lists.yoctoproject.org
|
||||
Please see the top level README file in this repository for details of where
|
||||
to send patches.
|
||||
|
||||
@@ -25,11 +25,18 @@ build a reference embedded OS called Poky.
|
||||
in the Yocto Project Development Tasks Manual for more
|
||||
information.
|
||||
|
||||
- You may use version 2 of Windows Subsystem For Linux (WSL 2) to set
|
||||
up a build host using Windows 10 or later, Windows Server 2019 or later.
|
||||
See the :ref:`dev-manual/start:setting up to use windows subsystem for
|
||||
linux (wsl 2)` section in the Yocto Project Development Tasks Manual
|
||||
for more information.
|
||||
- You may use Windows Subsystem For Linux v2 to set up a build host
|
||||
using Windows 10.
|
||||
|
||||
.. note::
|
||||
|
||||
The Yocto Project is not compatible with WSLv1, it is
|
||||
compatible but not officially supported nor validated with
|
||||
WSLv2, if you still decide to use WSL please upgrade to WSLv2.
|
||||
|
||||
See the :ref:`dev-manual/start:setting up to use windows
|
||||
subsystem for linux (wslv2)` section in the Yocto Project Development
|
||||
Tasks Manual for more information.
|
||||
|
||||
If you want more conceptual or background information on the Yocto
|
||||
Project, see the :doc:`/overview-manual/index`.
|
||||
@@ -40,13 +47,7 @@ Compatible Linux Distribution
|
||||
Make sure your :term:`Build Host` meets the
|
||||
following requirements:
|
||||
|
||||
- At least &MIN_DISK_SPACE; Gbytes of free disk space, though
|
||||
much more will help to run multiple builds and increase
|
||||
performance by reusing build artifacts.
|
||||
|
||||
- At least &MIN_RAM; Gbytes of RAM, though a modern build host with as
|
||||
much RAM and as many CPU cores as possible is strongly recommended to
|
||||
maximize build performance.
|
||||
- 50 Gbytes of free disk space
|
||||
|
||||
- Runs a supported Linux distribution (i.e. recent releases of Fedora,
|
||||
openSUSE, CentOS, Debian, or Ubuntu). For a list of Linux
|
||||
@@ -57,18 +58,17 @@ following requirements:
|
||||
:ref:`dev-manual/start:preparing the build host`
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
- Ensure that the following utilities have these minimum version numbers:
|
||||
-
|
||||
|
||||
- Git &MIN_GIT_VERSION; or greater
|
||||
- tar &MIN_TAR_VERSION; or greater
|
||||
- Python &MIN_PYTHON_VERSION; or greater.
|
||||
- gcc &MIN_GCC_VERSION; or greater.
|
||||
- GNU make &MIN_MAKE_VERSION; or greater
|
||||
|
||||
If your build host does not satisfy all of the above version
|
||||
If your build host does not meet any of these three listed version
|
||||
requirements, you can take steps to prepare the system so that you
|
||||
can still use the Yocto Project. See the
|
||||
:ref:`ref-manual/system-requirements:required git, tar, python, make and gcc versions`
|
||||
:ref:`ref-manual/system-requirements:required git, tar, python and gcc versions`
|
||||
section in the Yocto Project Reference Manual for information.
|
||||
|
||||
Build Host Packages
|
||||
@@ -76,9 +76,11 @@ Build Host Packages
|
||||
|
||||
You must install essential host packages on your build host. The
|
||||
following command installs the host packages based on an Ubuntu
|
||||
distribution::
|
||||
distribution:
|
||||
|
||||
$ sudo apt install &UBUNTU_DEBIAN_HOST_PACKAGES_ESSENTIAL;
|
||||
.. code-block:: shell
|
||||
|
||||
$ sudo apt install &UBUNTU_HOST_PACKAGES_ESSENTIAL;
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -182,7 +184,7 @@ an entire Linux distribution, including the toolchain, from source.
|
||||
page of the Yocto Project Wiki.
|
||||
|
||||
#. **Initialize the Build Environment:** From within the ``poky``
|
||||
directory, run the :ref:`ref-manual/structure:``oe-init-build-env```
|
||||
directory, run the :ref:`ref-manual/structure:\`\`oe-init-build-env\`\``
|
||||
environment
|
||||
setup script to define Yocto Project's build environment on your
|
||||
build host.
|
||||
@@ -248,13 +250,18 @@ an entire Linux distribution, including the toolchain, from source.
|
||||
This is relevant only when your network and the server that you use
|
||||
can download these artifacts faster than you would be able to build them.
|
||||
|
||||
To use such mirrors, uncomment the below lines in your ``conf/local.conf``
|
||||
To use such mirrors, uncomment the below lines in your ``local.conf``
|
||||
file in the :term:`Build Directory`::
|
||||
|
||||
BB_HASHSERVE_UPSTREAM = "hashserv.yoctoproject.org:8686"
|
||||
SSTATE_MIRRORS ?= "file://.* http://sstate.yoctoproject.org/all/PATH;downloadfilename=PATH"
|
||||
BB_HASHSERVE = "auto"
|
||||
BB_SIGNATURE_HANDLER = "OEEquivHash"
|
||||
BB_HASHSERVE = "auto"
|
||||
BB_HASHSERVE_UPSTREAM = "typhoon.yocto.io:8687"
|
||||
SSTATE_MIRRORS ?= "file://.* https://sstate.yoctoproject.org/&YOCTO_DOC_VERSION;/PATH;downloadfilename=PATH"
|
||||
|
||||
The above settings assumed the use of Yocto Project &YOCTO_DOC_VERSION;.
|
||||
If you are using the development version instead, set :term:`SSTATE_MIRRORS` as follows::
|
||||
|
||||
SSTATE_MIRRORS ?= "file://.* https://sstate.yoctoproject.org/dev/PATH;downloadfilename=PATH"
|
||||
|
||||
#. **Start the Build:** Continue with the following command to build an OS
|
||||
image for the target, which is ``core-image-sato`` in this example:
|
||||
@@ -367,7 +374,7 @@ Follow these steps to add a hardware layer:
|
||||
|
||||
You can find
|
||||
more information on adding layers in the
|
||||
:ref:`dev-manual/layers:adding a layer using the \`\`bitbake-layers\`\` script`
|
||||
:ref:`dev-manual/common-tasks:adding a layer using the \`\`bitbake-layers\`\` script`
|
||||
section.
|
||||
|
||||
Completing these steps has added the ``meta-altera`` layer to your Yocto
|
||||
@@ -402,7 +409,7 @@ The following commands run the tool to create a layer named
|
||||
|
||||
For more information
|
||||
on layers and how to create them, see the
|
||||
:ref:`dev-manual/layers:creating a general layer using the \`\`bitbake-layers\`\` script`
|
||||
:ref:`dev-manual/common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
Where To Go Next
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
|
||||
|
||||
**************************************************
|
||||
Board Support Packages (BSP) --- Developer's Guide
|
||||
**************************************************
|
||||
************************************************
|
||||
Board Support Packages (BSP) - Developer's Guide
|
||||
************************************************
|
||||
|
||||
A Board Support Package (BSP) is a collection of information that
|
||||
defines how to support a particular hardware device, set of devices, or
|
||||
@@ -81,7 +81,7 @@ directory of that Layer. This directory is what you add to the
|
||||
``conf/bblayers.conf`` file found in your
|
||||
:term:`Build Directory`, which is
|
||||
established after you run the OpenEmbedded build environment setup
|
||||
script (i.e. :ref:`ref-manual/structure:``oe-init-build-env```).
|
||||
script (i.e. :ref:`ref-manual/structure:\`\`oe-init-build-env\`\``).
|
||||
Adding the root directory allows the :term:`OpenEmbedded Build System`
|
||||
to recognize the BSP
|
||||
layer and from it build an image. Here is an example::
|
||||
@@ -128,7 +128,7 @@ you want to work with, such as::
|
||||
and so on.
|
||||
|
||||
For more information on layers, see the
|
||||
":ref:`dev-manual/layers:understanding and creating layers`"
|
||||
":ref:`dev-manual/common-tasks:understanding and creating layers`"
|
||||
section of the Yocto Project Development Tasks Manual.
|
||||
|
||||
Preparing Your Build Host to Work With BSP Layers
|
||||
@@ -167,7 +167,7 @@ section.
|
||||
BSPs, which are maintained in their own layers or in layers designed
|
||||
to contain several BSPs. To get an idea of machine support through
|
||||
BSP layers, you can look at the
|
||||
:yocto_dl:`index of machines </releases/yocto/&DISTRO_REL_LATEST_TAG;/machines>`
|
||||
:yocto_dl:`index of machines </releases/yocto/yocto-&DISTRO;/machines>`
|
||||
for the release.
|
||||
|
||||
#. *Optionally Clone the meta-intel BSP Layer:* If your hardware is
|
||||
@@ -230,7 +230,7 @@ section.
|
||||
|
||||
#. *Initialize the Build Environment:* While in the root directory of
|
||||
the Source Directory (i.e. ``poky``), run the
|
||||
:ref:`ref-manual/structure:``oe-init-build-env``` environment
|
||||
:ref:`ref-manual/structure:\`\`oe-init-build-env\`\`` environment
|
||||
setup script to define the OpenEmbedded build environment on your
|
||||
build host. ::
|
||||
|
||||
@@ -464,7 +464,7 @@ requirements are handled with the ``COPYING.MIT`` file.
|
||||
Licensing files can be MIT, BSD, GPLv*, and so forth. These files are
|
||||
recommended for the BSP but are optional and totally up to the BSP
|
||||
developer. For information on how to maintain license compliance, see
|
||||
the ":ref:`dev-manual/licenses:maintaining open source license compliance during your product's lifecycle`"
|
||||
the ":ref:`dev-manual/common-tasks:maintaining open source license compliance during your product's lifecycle`"
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
README File
|
||||
@@ -590,7 +590,7 @@ filenames correspond to the values to which users have set the
|
||||
|
||||
These files define things such as the kernel package to use
|
||||
(:term:`PREFERRED_PROVIDER` of
|
||||
:ref:`virtual/kernel <dev-manual/new-recipe:using virtual providers>`),
|
||||
:ref:`virtual/kernel <dev-manual/common-tasks:using virtual providers>`),
|
||||
the hardware drivers to include in different types of images, any
|
||||
special software components that are needed, any bootloader information,
|
||||
and also any special image format requirements.
|
||||
@@ -675,21 +675,21 @@ to the kernel recipe by using a similarly named append file, which is
|
||||
located in the BSP Layer for your target device (e.g. the
|
||||
``meta-bsp_root_name/recipes-kernel/linux`` directory).
|
||||
|
||||
Suppose you are using the ``linux-yocto_6.12.bb`` recipe to build the
|
||||
Suppose you are using the ``linux-yocto_4.4.bb`` recipe to build the
|
||||
kernel. In other words, you have selected the kernel in your
|
||||
``"bsp_root_name".conf`` file by adding
|
||||
:term:`PREFERRED_PROVIDER` and :term:`PREFERRED_VERSION`
|
||||
statements as follows::
|
||||
|
||||
PREFERRED_PROVIDER_virtual/kernel ?= "linux-yocto"
|
||||
PREFERRED_VERSION_linux-yocto ?= "6.12%"
|
||||
PREFERRED_VERSION_linux-yocto ?= "4.4%"
|
||||
|
||||
.. note::
|
||||
|
||||
When the preferred provider is assumed by default, the :term:`PREFERRED_PROVIDER`
|
||||
statement does not appear in the ``"bsp_root_name".conf`` file.
|
||||
|
||||
You would use the ``linux-yocto_6.12.bbappend`` file to append specific
|
||||
You would use the ``linux-yocto_4.4.bbappend`` file to append specific
|
||||
BSP settings to the kernel, thus configuring the kernel for your
|
||||
particular BSP.
|
||||
|
||||
@@ -699,19 +699,14 @@ in the Yocto Project Linux Kernel Development Manual.
|
||||
|
||||
An alternate scenario is when you create your own kernel recipe for the
|
||||
BSP. A good example of this is the Raspberry Pi BSP. If you examine the
|
||||
``recipes-kernel/linux`` directory in that layer you see the following
|
||||
Raspberry Pi-specific recipes and associated files::
|
||||
``recipes-kernel/linux`` directory you see the following::
|
||||
|
||||
files/
|
||||
linux-raspberrypi_6.12.bb
|
||||
linux-raspberrypi_6.1.bb
|
||||
linux-raspberrypi_6.6.bb
|
||||
linux-raspberrypi-dev.bb
|
||||
linux-raspberrypi.inc
|
||||
linux-raspberrypi-v7_6.12.bb
|
||||
linux-raspberrypi-v7_6.1.bb
|
||||
linux-raspberrypi-v7_6.6.bb
|
||||
linux-raspberrypi-v7.inc
|
||||
linux-raspberrypi_4.14.bb
|
||||
linux-raspberrypi_4.9.bb
|
||||
|
||||
The directory contains three kernel recipes and a common include file.
|
||||
|
||||
Developing a Board Support Package (BSP)
|
||||
========================================
|
||||
@@ -762,7 +757,7 @@ workflow.
|
||||
OpenEmbedded build system knows about. For more information on
|
||||
layers, see the ":ref:`overview-manual/yp-intro:the yocto project layer model`"
|
||||
section in the Yocto Project Overview and Concepts Manual. You can also
|
||||
reference the ":ref:`dev-manual/layers:understanding and creating layers`"
|
||||
reference the ":ref:`dev-manual/common-tasks:understanding and creating layers`"
|
||||
section in the Yocto Project Development Tasks Manual. For more
|
||||
information on BSP layers, see the ":ref:`bsp-guide/bsp:bsp layers`"
|
||||
section.
|
||||
@@ -779,6 +774,20 @@ workflow.
|
||||
|
||||
- Two general IA platforms (``genericx86`` and ``genericx86-64``)
|
||||
|
||||
- There are three core Intel BSPs in the Yocto Project
|
||||
release, in the ``meta-intel`` layer:
|
||||
|
||||
- ``intel-core2-32``, which is a BSP optimized for the Core2
|
||||
family of CPUs as well as all CPUs prior to the Silvermont
|
||||
core.
|
||||
|
||||
- ``intel-corei7-64``, which is a BSP optimized for Nehalem
|
||||
and later Core and Xeon CPUs as well as Silvermont and later
|
||||
Atom CPUs, such as the Baytrail SoCs.
|
||||
|
||||
- ``intel-quark``, which is a BSP optimized for the Intel
|
||||
Galileo gen1 & gen2 development boards.
|
||||
|
||||
When you set up a layer for a new BSP, you should follow a standard
|
||||
layout. This layout is described in the ":ref:`bsp-guide/bsp:example filesystem layout`"
|
||||
section. In the standard layout, notice
|
||||
@@ -807,7 +816,7 @@ workflow.
|
||||
key configuration files are configured appropriately: the
|
||||
``conf/local.conf`` and the ``conf/bblayers.conf`` file. You must
|
||||
make the OpenEmbedded build system aware of your new layer. See the
|
||||
":ref:`dev-manual/layers:enabling your layer`"
|
||||
":ref:`dev-manual/common-tasks:enabling your layer`"
|
||||
section in the Yocto Project Development Tasks Manual for information
|
||||
on how to let the build system know about your new layer.
|
||||
|
||||
@@ -836,7 +845,7 @@ Before looking at BSP requirements, you should consider the following:
|
||||
layer that can be added to the Yocto Project. For guidelines on
|
||||
creating a layer that meets these base requirements, see the
|
||||
":ref:`bsp-guide/bsp:bsp layers`" section in this manual and the
|
||||
":ref:`dev-manual/layers:understanding and creating layers`"
|
||||
":ref:`dev-manual/common-tasks:understanding and creating layers`"
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
- The requirements in this section apply regardless of how you package
|
||||
@@ -858,7 +867,8 @@ Before looking at BSP requirements, you should consider the following:
|
||||
dictating that a specific kernel or kernel version be used in a given
|
||||
BSP.
|
||||
|
||||
The requirements for a released BSP that conform to the Yocto Project are:
|
||||
Following are the requirements for a released BSP that conform to the
|
||||
Yocto Project:
|
||||
|
||||
- *Layer Name:* The BSP must have a layer name that follows the Yocto
|
||||
Project standards. For information on BSP layer names, see the
|
||||
@@ -883,8 +893,8 @@ The requirements for a released BSP that conform to the Yocto Project are:
|
||||
``recipes-*`` subdirectories specific to the recipe's function, or
|
||||
within a subdirectory containing a set of closely-related recipes.
|
||||
The recipes themselves should follow the general guidelines for
|
||||
recipes found in the ":doc:`../contributor-guide/recipe-style-guide`"
|
||||
in the Yocto Project and OpenEmbedded Contributor Guide.
|
||||
recipes used in the Yocto Project found in the ":oe_wiki:`OpenEmbedded
|
||||
Style Guide </Styleguide>`".
|
||||
|
||||
- *License File:* You must include a license file in the
|
||||
``meta-bsp_root_name`` directory. This license covers the BSP
|
||||
@@ -917,8 +927,8 @@ The requirements for a released BSP that conform to the Yocto Project are:
|
||||
- The name and contact information for the BSP layer maintainer.
|
||||
This is the person to whom patches and questions should be sent.
|
||||
For information on how to find the right person, see the
|
||||
:doc:`../contributor-guide/submit-changes` section in the Yocto Project and
|
||||
OpenEmbedded Contributor Guide.
|
||||
":ref:`dev-manual/common-tasks:submitting a change to the yocto project`"
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
- Instructions on how to build the BSP using the BSP layer.
|
||||
|
||||
@@ -962,7 +972,7 @@ The requirements for a released BSP that conform to the Yocto Project are:
|
||||
Released BSP Recommendations
|
||||
----------------------------
|
||||
|
||||
Here are recommendations for released BSPs that conform to the
|
||||
Following are recommendations for released BSPs that conform to the
|
||||
Yocto Project:
|
||||
|
||||
- *Bootable Images:* Released BSPs can contain one or more bootable
|
||||
@@ -1003,7 +1013,7 @@ the following:
|
||||
|
||||
- Create a ``*.bbappend`` file for the modified recipe. For information on using
|
||||
append files, see the
|
||||
":ref:`dev-manual/layers:appending other layers metadata with your layer`"
|
||||
":ref:`dev-manual/common-tasks:appending other layers metadata with your layer`"
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
- Ensure your directory structure in the BSP layer that supports your
|
||||
@@ -1024,7 +1034,7 @@ the following:
|
||||
that additional hierarchy and the files would obviously not be able
|
||||
to reside in a machine-specific directory.
|
||||
|
||||
Here is a specific example to help you better understand the
|
||||
Following is a specific example to help you better understand the
|
||||
process. This example customizes a recipe by adding a
|
||||
BSP-specific configuration file named ``interfaces`` to the
|
||||
``init-ifupdown_1.0.bb`` recipe for machine "xyz" where the BSP layer
|
||||
@@ -1107,7 +1117,7 @@ list describes them in order of preference:
|
||||
Specifying the matching license string signifies that you agree to
|
||||
the license. Thus, the build system can build the corresponding
|
||||
recipe and include the component in the image. See the
|
||||
":ref:`dev-manual/licenses:enabling commercially licensed recipes`"
|
||||
":ref:`dev-manual/common-tasks:enabling commercially licensed recipes`"
|
||||
section in the Yocto Project Development Tasks Manual for details on
|
||||
how to use these variables.
|
||||
|
||||
@@ -1159,7 +1169,7 @@ Use these steps to create a BSP layer:
|
||||
``create-layer`` subcommand to create a new general layer. For
|
||||
instructions on how to create a general layer using the
|
||||
``bitbake-layers`` script, see the
|
||||
":ref:`dev-manual/layers:creating a general layer using the \`\`bitbake-layers\`\` script`"
|
||||
":ref:`dev-manual/common-tasks:creating a general layer using the \`\`bitbake-layers\`\` script`"
|
||||
section in the Yocto Project Development Tasks Manual.
|
||||
|
||||
- *Create a Layer Configuration File:* Every layer needs a layer
|
||||
@@ -1169,14 +1179,14 @@ Use these steps to create a BSP layer:
|
||||
:yocto_git:`Source Repositories <>`. To get examples of what you need
|
||||
in your configuration file, locate a layer (e.g. "meta-ti") and
|
||||
examine the
|
||||
:yocto_git:`local.conf </meta-ti/tree/meta-ti-bsp/conf/layer.conf>`
|
||||
:yocto_git:`local.conf </meta-ti/tree/conf/layer.conf>`
|
||||
file.
|
||||
|
||||
- *Create a Machine Configuration File:* Create a
|
||||
``conf/machine/bsp_root_name.conf`` file. See
|
||||
:yocto_git:`meta-yocto-bsp/conf/machine </poky/tree/meta-yocto-bsp/conf/machine>`
|
||||
for sample ``bsp_root_name.conf`` files. There are other samples such as
|
||||
:yocto_git:`meta-ti </meta-ti/tree/meta-ti-bsp/conf/machine>`
|
||||
:yocto_git:`meta-ti </meta-ti/tree/conf/machine>`
|
||||
and
|
||||
:yocto_git:`meta-freescale </meta-freescale/tree/conf/machine>`
|
||||
from other vendors that have more specific machine and tuning
|
||||
@@ -1184,7 +1194,7 @@ Use these steps to create a BSP layer:
|
||||
|
||||
- *Create a Kernel Recipe:* Create a kernel recipe in
|
||||
``recipes-kernel/linux`` by either using a kernel append file or a
|
||||
new custom kernel recipe file (e.g. ``linux-yocto_6.12.bb``). The BSP
|
||||
new custom kernel recipe file (e.g. ``yocto-linux_4.12.bb``). The BSP
|
||||
layers mentioned in the previous step also contain different kernel
|
||||
examples. See the ":ref:`kernel-dev/common:modifying an existing recipe`"
|
||||
section in the Yocto Project Linux Kernel Development Manual for
|
||||
@@ -1199,7 +1209,7 @@ BSP Layer Configuration Example
|
||||
-------------------------------
|
||||
|
||||
The layer's ``conf`` directory contains the ``layer.conf`` configuration
|
||||
file. In this example, the ``conf/layer.conf`` file is the following::
|
||||
file. In this example, the ``conf/layer.conf`` is the following::
|
||||
|
||||
# We have a conf and classes directory, add to BBPATH
|
||||
BBPATH .= ":${LAYERDIR}"
|
||||
@@ -1219,7 +1229,7 @@ configuration files is to examine various files for BSP from the
|
||||
:yocto_git:`Source Repositories <>`.
|
||||
|
||||
For a detailed description of this particular layer configuration file,
|
||||
see ":ref:`step 3 <dev-manual/layers:creating your own layer>`"
|
||||
see ":ref:`step 3 <dev-manual/common-tasks:creating your own layer>`"
|
||||
in the discussion that describes how to create layers in the Yocto
|
||||
Project Development Tasks Manual.
|
||||
|
||||
@@ -1439,39 +1449,39 @@ The kernel recipe used to build the kernel image for the BeagleBone
|
||||
device was established in the machine configuration::
|
||||
|
||||
PREFERRED_PROVIDER_virtual/kernel ?= "linux-yocto"
|
||||
PREFERRED_VERSION_linux-yocto ?= "5.15%"
|
||||
PREFERRED_VERSION_linux-yocto ?= "5.0%"
|
||||
|
||||
The ``meta-yocto-bsp/recipes-kernel/linux`` directory in the layer contains
|
||||
metadata used to build the kernel. In this case, a kernel append file
|
||||
(i.e. ``linux-yocto_5.15.bbappend``) is used to override an established
|
||||
kernel recipe (i.e. ``linux-yocto_5.15.bb``), which is located in
|
||||
:yocto_git:`/poky/tree/meta-yocto-bsp/recipes-kernel/linux`.
|
||||
(i.e. ``linux-yocto_5.0.bbappend``) is used to override an established
|
||||
kernel recipe (i.e. ``linux-yocto_5.0.bb``), which is located in
|
||||
:yocto_git:`/poky/tree/meta/recipes-kernel/linux`.
|
||||
|
||||
The contents of the append file are::
|
||||
Following is the contents of the append file::
|
||||
|
||||
KBRANCH:genericx86 = "v5.15/standard/base"
|
||||
KBRANCH:genericx86-64 = "v5.15/standard/base"
|
||||
KBRANCH:edgerouter = "v5.15/standard/edgerouter"
|
||||
KBRANCH:beaglebone-yocto = "v5.15/standard/beaglebone"
|
||||
KBRANCH:genericx86 = "v5.0/standard/base"
|
||||
KBRANCH:genericx86-64 = "v5.0/standard/base"
|
||||
KBRANCH:edgerouter = "v5.0/standard/edgerouter"
|
||||
KBRANCH:beaglebone-yocto = "v5.0/standard/beaglebone"
|
||||
|
||||
KMACHINE:genericx86 ?= "common-pc"
|
||||
KMACHINE:genericx86-64 ?= "common-pc-64"
|
||||
KMACHINE:beaglebone-yocto ?= "beaglebone"
|
||||
|
||||
SRCREV_machine:genericx86 ?= "0b628306d1f9ea28c0e86369ce9bb87a47893c9c"
|
||||
SRCREV_machine:genericx86-64 ?= "0b628306d1f9ea28c0e86369ce9bb87a47893c9c"
|
||||
SRCREV_machine:edgerouter ?= "90f1ee6589264545f548d731c2480b08a007230f"
|
||||
SRCREV_machine:beaglebone-yocto ?= "9aabbaa89fcb21af7028e814c1f5b61171314d5a"
|
||||
SRCREV_machine:genericx86 ?= "3df4aae6074e94e794e27fe7f17451d9353cdf3d"
|
||||
SRCREV_machine:genericx86-64 ?= "3df4aae6074e94e794e27fe7f17451d9353cdf3d"
|
||||
SRCREV_machine:edgerouter ?= "3df4aae6074e94e794e27fe7f17451d9353cdf3d"
|
||||
SRCREV_machine:beaglebone-yocto ?= "3df4aae6074e94e794e27fe7f17451d9353cdf3d"
|
||||
|
||||
COMPATIBLE_MACHINE:genericx86 = "genericx86"
|
||||
COMPATIBLE_MACHINE:genericx86-64 = "genericx86-64"
|
||||
COMPATIBLE_MACHINE:edgerouter = "edgerouter"
|
||||
COMPATIBLE_MACHINE:beaglebone-yocto = "beaglebone-yocto"
|
||||
|
||||
LINUX_VERSION:genericx86 = "5.15.72"
|
||||
LINUX_VERSION:genericx86-64 = "5.15.72"
|
||||
LINUX_VERSION:edgerouter = "5.15.54"
|
||||
LINUX_VERSION:beaglebone-yocto = "5.15.54"
|
||||
LINUX_VERSION:genericx86 = "5.0.3"
|
||||
LINUX_VERSION:genericx86-64 = "5.0.3"
|
||||
LINUX_VERSION:edgerouter = "5.0.3"
|
||||
LINUX_VERSION:beaglebone-yocto = "5.0.3"
|
||||
|
||||
This particular append file works for all the machines that are
|
||||
part of the ``meta-yocto-bsp`` layer. The relevant statements are
|
||||
|
||||
@@ -13,7 +13,6 @@
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import datetime
|
||||
try:
|
||||
@@ -91,9 +90,7 @@ rst_prolog = """
|
||||
|
||||
# external links and substitutions
|
||||
extlinks = {
|
||||
'bitbake_git': ('https://git.openembedded.org/bitbake%s', None),
|
||||
'cve_mitre': ('https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-%s', 'CVE-%s'),
|
||||
'cve_nist': ('https://nvd.nist.gov/vuln/detail/CVE-%s', 'CVE-%s'),
|
||||
'cve': ('https://nvd.nist.gov/vuln/detail/CVE-%s', 'CVE-%s'),
|
||||
'yocto_home': ('https://www.yoctoproject.org%s', None),
|
||||
'yocto_wiki': ('https://wiki.yoctoproject.org/wiki%s', None),
|
||||
'yocto_dl': ('https://downloads.yoctoproject.org%s', None),
|
||||
@@ -109,7 +106,6 @@ extlinks = {
|
||||
'oe_wiki': ('https://www.openembedded.org/wiki%s', None),
|
||||
'oe_layerindex': ('https://layers.openembedded.org%s', None),
|
||||
'oe_layer': ('https://layers.openembedded.org/layerindex/branch/master/layer%s', None),
|
||||
'wikipedia': ('https://en.wikipedia.org/wiki/%s', None),
|
||||
}
|
||||
|
||||
# Intersphinx config to use cross reference with Bitbake user manual
|
||||
@@ -137,7 +133,6 @@ except ImportError:
|
||||
sys.exit(1)
|
||||
|
||||
html_logo = 'sphinx-static/YoctoProject_Logo_RGB.jpg'
|
||||
html_favicon = 'sphinx-static/favicon.ico'
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
@@ -162,28 +157,10 @@ html_last_updated_fmt = '%b %d, %Y'
|
||||
html_secnumber_suffix = " "
|
||||
|
||||
latex_elements = {
|
||||
'passoptionstopackages': '\\PassOptionsToPackage{bookmarksdepth=5}{hyperref}',
|
||||
'preamble': '\\setcounter{tocdepth}{2}',
|
||||
'passoptionstopackages': '\PassOptionsToPackage{bookmarksdepth=5}{hyperref}',
|
||||
'preamble': '\setcounter{tocdepth}{2}',
|
||||
}
|
||||
|
||||
|
||||
from sphinx.search import SearchEnglish
|
||||
from sphinx.search import languages
|
||||
class DashFriendlySearchEnglish(SearchEnglish):
|
||||
|
||||
# Accept words that can include 'inner' hyphens or dots
|
||||
_word_re = re.compile(r'[\w]+(?:[\.\-][\w]+)*')
|
||||
|
||||
js_splitter_code = r"""
|
||||
function splitQuery(query) {
|
||||
return query
|
||||
.split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}\-\.]+/gu)
|
||||
.filter(term => term.length > 0);
|
||||
}
|
||||
"""
|
||||
|
||||
languages['en'] = DashFriendlySearchEnglish
|
||||
|
||||
# Make the EPUB builder prefer PNG to SVG because of issues rendering Inkscape SVG
|
||||
from sphinx.builders.epub3 import Epub3Builder
|
||||
Epub3Builder.supported_image_types = ['image/png', 'image/gif', 'image/jpeg']
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
|
||||
|
||||
Identify the component
|
||||
**********************
|
||||
|
||||
The Yocto Project and OpenEmbedded ecosystem is built of :term:`layers <Layer>`
|
||||
so the first step is to identify the component where the issue likely lies.
|
||||
For example, if you have a hardware issue, it is likely related to the BSP
|
||||
you are using and the best place to seek advice would be from the BSP provider
|
||||
or :term:`layer`. If the issue is a build/configuration one and a distro is in
|
||||
use, they would likely be the first place to ask questions. If the issue is a
|
||||
generic one and/or in the core classes or metadata, the core layer or BitBake
|
||||
might be the appropriate component.
|
||||
|
||||
Each metadata layer being used should contain a ``README`` file and that should
|
||||
explain where to report issues, where to send changes and how to contact the
|
||||
maintainers.
|
||||
|
||||
If the issue is in the core metadata layer (OpenEmbedded-Core) or in BitBake,
|
||||
issues can be reported in the :yocto_bugs:`Yocto Project Bugzilla <>`. The
|
||||
:yocto_lists:`yocto </g/yocto>` mailing list is a general “catch-all” location
|
||||
where questions can be sent if you can’t work out where something should go.
|
||||
|
||||
:term:`Poky` is a commonly used “combination” repository where multiple
|
||||
components have been combined (:oe_git:`bitbake </bitbake>`,
|
||||
:oe_git:`openembedded-core </openembedded-core>`,
|
||||
:yocto_git:`meta-yocto </meta-yocto>` and
|
||||
:yocto_git:`yocto-docs </yocto-docs>`). Patches should be submitted against the
|
||||
appropriate individual component rather than :term:`Poky` itself as detailed in
|
||||
the appropriate ``README`` file.
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
|
||||
|
||||
================================================
|
||||
Yocto Project and OpenEmbedded Contributor Guide
|
||||
================================================
|
||||
|
||||
The Yocto Project and OpenEmbedded are open-source, community-based projects so
|
||||
contributions are very welcome, it is how the code evolves and everyone can
|
||||
effect change. Contributions take different forms, if you have a fix for an
|
||||
issue you’ve run into, a patch is the most appropriate way to contribute it.
|
||||
If you run into an issue but don’t have a solution, opening a defect in
|
||||
:yocto_bugs:`Bugzilla <>` or asking questions on the mailing lists might be
|
||||
more appropriate. This guide intends to point you in the right direction to
|
||||
this.
|
||||
|
||||
|
||||
.. toctree::
|
||||
:caption: Table of Contents
|
||||
:numbered:
|
||||
|
||||
identify-component
|
||||
report-defect
|
||||
recipe-style-guide
|
||||
submit-changes
|
||||
|
||||
.. include:: /boilerplate.rst
|
||||
@@ -1,425 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
|
||||
|
||||
Recipe Style Guide
|
||||
******************
|
||||
|
||||
Recipe Naming Conventions
|
||||
=========================
|
||||
|
||||
In general, most recipes should follow the naming convention
|
||||
``recipes-category/recipename/recipename_version.bb``. Recipes for related
|
||||
projects may share the same recipe directory. ``recipename`` and ``category``
|
||||
may contain hyphens, but hyphens are not allowed in ``version``.
|
||||
|
||||
If the recipe is tracking a Git revision that does not correspond to a released
|
||||
version of the software, ``version`` may be ``git`` (e.g. ``recipename_git.bb``)
|
||||
and the recipe would set :term:`PV`.
|
||||
|
||||
Version Policy
|
||||
==============
|
||||
|
||||
Our versions follow the form ``<epoch>:<version>-<revision>``
|
||||
or in BitBake variable terms ${:term:`PE`}:${:term:`PV`}-${:term:`PR`}. We
|
||||
generally follow the `Debian <https://www.debian.org/doc/debian-policy/ch-controlfields.html#version>`__
|
||||
version policy which defines these terms.
|
||||
|
||||
In most cases the version :term:`PV` will be set automatically from the recipe
|
||||
file name. It is recommended to use released versions of software as these are
|
||||
revisions that upstream are expecting people to use.
|
||||
|
||||
Recipe versions should always compare and sort correctly so that upgrades work
|
||||
as expected. With conventional versions such as ``1.4`` upgrading ``to 1.5``
|
||||
this happens naturally, but some versions don't sort. For example,
|
||||
``1.5 Release Candidate 2`` could be written as ``1.5rc2`` but this sorts after
|
||||
``1.5``, so upgrades from feeds won't happen correctly.
|
||||
|
||||
Instead the tilde (``~``) operator can be used, which sorts before the empty
|
||||
string so ``1.5~rc2`` comes before ``1.5``. There is a historical syntax which
|
||||
may be found where :term:`PV` is set as a combination of the prior version
|
||||
``+`` the pre-release version, for example ``PV=1.4+1.5rc2``. This is a valid
|
||||
syntax but the tilde form is preferred.
|
||||
|
||||
For version comparisons, the ``opkg-compare-versions`` program from
|
||||
``opkg-utils`` can be useful when attempting to determine how two version
|
||||
numbers compare to each other. Our definitive version comparison algorithm is
|
||||
the one within bitbake which aims to match those of the package managers and
|
||||
Debian policy closely.
|
||||
|
||||
When a recipe references a git revision that does not correspond to a released
|
||||
version of software (e.g. is not a tagged version), the :term:`PV` variable
|
||||
should include the Git revision using the following to make the
|
||||
version clear::
|
||||
|
||||
PV = "<version>+git${SRCPV}"
|
||||
|
||||
In this case, ``<version>`` should be the most recently released version of the
|
||||
software from the current source revision (``git describe`` can be useful for
|
||||
determining this). Whilst not recommended for published layers, this format is
|
||||
also useful when using :term:`AUTOREV` to set the recipe to increment source
|
||||
control revisions automatically, which can be useful during local development.
|
||||
|
||||
Version Number Changes
|
||||
======================
|
||||
|
||||
The :term:`PR` variable is used to indicate different revisions of a recipe
|
||||
that reference the same upstream source version. It can be used to force a
|
||||
new version of a recipe to be installed onto a device from a package feed.
|
||||
These once had to be set manually but in most cases these can now be set and
|
||||
incremented automatically by a PR Server connected with a package feed.
|
||||
|
||||
When :term:`PV` increases, any existing :term:`PR` value can and should be
|
||||
removed.
|
||||
|
||||
If :term:`PV` changes in such a way that it does not increase with respect to
|
||||
the previous value, you need to increase :term:`PE` to ensure package managers
|
||||
will upgrade it correctly. If unset you should set :term:`PE` to "1" since
|
||||
the default of empty is easily confused with "0" depending on the package
|
||||
manager. :term:`PE` can only have an integer value.
|
||||
|
||||
Recipe formatting
|
||||
=================
|
||||
|
||||
Variable Formatting
|
||||
-------------------
|
||||
|
||||
- Variable assignment should a space around each side of the operator, e.g.
|
||||
``FOO = "bar"``, not ``FOO="bar"``.
|
||||
|
||||
- Double quotes should be used on the right-hand side of the assignment,
|
||||
e.g. ``FOO = "bar"`` not ``FOO = 'bar'``
|
||||
|
||||
- Spaces should be used for indenting variables, with 4 spaces per tab
|
||||
|
||||
- Long variables should be split over multiple lines when possible by using
|
||||
the continuation character (``\``)
|
||||
|
||||
- When splitting a long variable over multiple lines, all continuation lines
|
||||
should be indented (with spaces) to align with the start of the quote on the
|
||||
first line::
|
||||
|
||||
FOO = "this line is \
|
||||
long \
|
||||
"
|
||||
|
||||
Instead of::
|
||||
|
||||
FOO = "this line is \
|
||||
long \
|
||||
"
|
||||
|
||||
Python Function formatting
|
||||
--------------------------
|
||||
|
||||
- Spaces must be used for indenting Python code, with 4 spaces per tab
|
||||
|
||||
Shell Function formatting
|
||||
-------------------------
|
||||
|
||||
- The formatting of shell functions should be consistent within layers.
|
||||
Some use tabs, some use spaces.
|
||||
|
||||
Recipe metadata
|
||||
===============
|
||||
|
||||
Required Variables
|
||||
------------------
|
||||
|
||||
The following variables should be included in all recipes:
|
||||
|
||||
- :term:`SUMMARY`: a one line description of the upstream project
|
||||
|
||||
- :term:`DESCRIPTION`: an extended description of the upstream project,
|
||||
possibly with multiple lines. If no reasonable description can be written,
|
||||
this may be omitted as it defaults to :term:`SUMMARY`.
|
||||
|
||||
- :term:`HOMEPAGE`: the URL to the upstream projects homepage.
|
||||
|
||||
- :term:`BUGTRACKER`: the URL upstream projects bug tracking website,
|
||||
if applicable.
|
||||
|
||||
Recipe Ordering
|
||||
---------------
|
||||
|
||||
When a variable is defined in recipes and classes, variables should follow the
|
||||
general order when possible:
|
||||
|
||||
- :term:`SUMMARY`
|
||||
- :term:`DESCRIPTION`
|
||||
- :term:`HOMEPAGE`
|
||||
- :term:`BUGTRACKER`
|
||||
- :term:`SECTION`
|
||||
- :term:`LICENSE`
|
||||
- :term:`LIC_FILES_CHKSUM`
|
||||
- :term:`DEPENDS`
|
||||
- :term:`PROVIDES`
|
||||
- :term:`PV`
|
||||
- :term:`SRC_URI`
|
||||
- :term:`SRCREV`
|
||||
- :term:`S`
|
||||
- ``inherit ...``
|
||||
- :term:`PACKAGECONFIG`
|
||||
- Build class specific variables such as ``EXTRA_QMAKEVARS_POST`` and :term:`EXTRA_OECONF`
|
||||
- Tasks such as :ref:`ref-tasks-configure`
|
||||
- :term:`PACKAGE_ARCH`
|
||||
- :term:`PACKAGES`
|
||||
- :term:`FILES`
|
||||
- :term:`RDEPENDS`
|
||||
- :term:`RRECOMMENDS`
|
||||
- :term:`RSUGGESTS`
|
||||
- :term:`RPROVIDES`
|
||||
- :term:`RCONFLICTS`
|
||||
- :term:`BBCLASSEXTEND`
|
||||
|
||||
There are some cases where ordering is important and these cases would override
|
||||
this default order. Examples include:
|
||||
|
||||
- :term:`PACKAGE_ARCH` needing to be set before ``inherit packagegroup``
|
||||
|
||||
Tasks should be ordered based on the order they generally execute. For commonly
|
||||
used tasks this would be:
|
||||
|
||||
- :ref:`ref-tasks-fetch`
|
||||
- :ref:`ref-tasks-unpack`
|
||||
- :ref:`ref-tasks-patch`
|
||||
- :ref:`ref-tasks-prepare_recipe_sysroot`
|
||||
- :ref:`ref-tasks-configure`
|
||||
- :ref:`ref-tasks-compile`
|
||||
- :ref:`ref-tasks-install`
|
||||
- :ref:`ref-tasks-populate_sysroot`
|
||||
- :ref:`ref-tasks-package`
|
||||
|
||||
Custom tasks should be sorted similarly.
|
||||
|
||||
Package specific variables are typically grouped together, e.g.::
|
||||
|
||||
RDEPENDS:${PN} = “foo”
|
||||
RDEPENDS:${PN}-libs = “bar”
|
||||
|
||||
RRECOMMENDS:${PN} = “one”
|
||||
RRECOMMENDS:${PN}-libs = “two”
|
||||
|
||||
Recipe License Fields
|
||||
---------------------
|
||||
|
||||
Recipes need to define both the :term:`LICENSE` and
|
||||
:term:`LIC_FILES_CHKSUM` variables:
|
||||
|
||||
- :term:`LICENSE`: This variable specifies the license for the software.
|
||||
If you do not know the license under which the software you are
|
||||
building is distributed, you should go to the source code and look
|
||||
for that information. Typical files containing this information
|
||||
include ``COPYING``, :term:`LICENSE`, and ``README`` files. You could
|
||||
also find the information near the top of a source file. For example,
|
||||
given a piece of software licensed under the GNU General Public
|
||||
License version 2, you would set :term:`LICENSE` as follows::
|
||||
|
||||
LICENSE = "GPL-2.0-only"
|
||||
|
||||
The licenses you specify within :term:`LICENSE` can have any name as long
|
||||
as you do not use spaces, since spaces are used as separators between
|
||||
license names. For standard licenses, use the names of the files in
|
||||
``meta/files/common-licenses/`` or the :term:`SPDXLICENSEMAP` flag names
|
||||
defined in ``meta/conf/licenses.conf``.
|
||||
|
||||
.. note::
|
||||
|
||||
Setting a :term:`LICENSE` in a recipe applies to the software to be built
|
||||
by this recipe, not to the recipe file itself. The license of recipes,
|
||||
configuration files and scripts should also be clearly specified, for
|
||||
example via comments or via a license found in the :term:`layer` that
|
||||
holds these files. These license files are usually found at the root of
|
||||
the layer. Exceptions should be clearly stated in the layer README or
|
||||
LICENSE file.
|
||||
|
||||
For example, the :term:`OpenEmbedded-Core (OE-Core)` layer provides both
|
||||
the GPL-2.0-only and MIT license files, and a "LICENSE" file to explain
|
||||
how these two licenses are attributed to files found in the layer.
|
||||
|
||||
- :term:`LIC_FILES_CHKSUM`: The OpenEmbedded build system uses this
|
||||
variable to make sure the license text has not changed. If it has,
|
||||
the build produces an error and it affords you the chance to figure
|
||||
it out and correct the problem.
|
||||
|
||||
You need to specify all applicable licensing files for the software.
|
||||
At the end of the configuration step, the build process will compare
|
||||
the checksums of the files to be sure the text has not changed. Any
|
||||
differences result in an error with the message containing the
|
||||
current checksum. For more explanation and examples of how to set the
|
||||
:term:`LIC_FILES_CHKSUM` variable, see the
|
||||
":ref:`dev-manual/licenses:tracking license changes`" section.
|
||||
|
||||
To determine the correct checksum string, you can list the
|
||||
appropriate files in the :term:`LIC_FILES_CHKSUM` variable with incorrect
|
||||
md5 strings, attempt to build the software, and then note the
|
||||
resulting error messages that will report the correct md5 strings.
|
||||
See the ":ref:`dev-manual/new-recipe:fetching code`" section for
|
||||
additional information.
|
||||
|
||||
Here is an example that assumes the software has a ``COPYING`` file::
|
||||
|
||||
LIC_FILES_CHKSUM = "file://COPYING;md5=xxx"
|
||||
|
||||
When you try to build the
|
||||
software, the build system will produce an error and give you the
|
||||
correct string that you can substitute into the recipe file for a
|
||||
subsequent build.
|
||||
|
||||
License Updates
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
When you change the :term:`LICENSE` or :term:`LIC_FILES_CHKSUM` in the recipe
|
||||
you need to briefly explain the reason for the change via a ``License-Update:``
|
||||
tag. Often it's quite trivial, such as::
|
||||
|
||||
License-Update: copyright years refreshed
|
||||
|
||||
Less often, the actual licensing terms themselves will have changed. If so, do
|
||||
try to link to upstream making/justifying that decision.
|
||||
|
||||
Tips and Guidelines for Writing Recipes
|
||||
---------------------------------------
|
||||
|
||||
- Use :term:`BBCLASSEXTEND` instead of creating separate recipes such as ``-native``
|
||||
and ``-nativesdk`` ones, whenever possible. This avoids having to maintain multiple
|
||||
recipe files at the same time.
|
||||
|
||||
- Recipes should have tasks which are idempotent, i.e. that executing a given task
|
||||
multiple times shouldn't change the end result. The build environment is built upon
|
||||
this assumption and breaking it can cause obscure build failures.
|
||||
|
||||
- For idempotence when modifying files in tasks, it is usually best to:
|
||||
|
||||
- copy a file ``X`` to ``X.orig`` (only if it doesn't exist already)
|
||||
- then, copy ``X.orig`` back to ``X``,
|
||||
- and, finally, modify ``X``.
|
||||
|
||||
This ensures if rerun the task always has the same end result and the
|
||||
original file can be preserved to reuse. It also guards against an
|
||||
interrupted build corrupting the file.
|
||||
|
||||
Patch Upstream Status
|
||||
=====================
|
||||
|
||||
In order to keep track of patches applied by recipes and ultimately reduce the
|
||||
number of patches that need maintaining, the OpenEmbedded build system
|
||||
requires information about the upstream status of each patch.
|
||||
|
||||
In its description, each patch should provide detailed information about the
|
||||
bug that it addresses, such as the URL in a bug tracking system and links
|
||||
to relevant mailing list archives.
|
||||
|
||||
Then, you should also add an ``Upstream-Status:`` tag containing one of the
|
||||
following status strings:
|
||||
|
||||
``Pending``
|
||||
No determination has been made yet, or patch has not yet been submitted to
|
||||
upstream.
|
||||
|
||||
Keep in mind that every patch submitted upstream reduces the maintainance
|
||||
burden in OpenEmbedded and Yocto Project in the long run, so this patch
|
||||
status should only be used in exceptional cases if there are genuine
|
||||
obstacles to submitting a patch upstream; the reason for that should be
|
||||
included in the patch.
|
||||
|
||||
``Submitted [where]``
|
||||
Submitted to upstream, waiting for approval. Optionally include where
|
||||
it was submitted, such as the author, mailing list, etc.
|
||||
|
||||
``Backport [version]``
|
||||
Accepted upstream and included in the next release, or backported from newer
|
||||
upstream version, because we are at a fixed version.
|
||||
Include upstream version info (e.g. commit ID or next expected version).
|
||||
|
||||
``Denied``
|
||||
Not accepted by upstream, include reason in patch.
|
||||
|
||||
``Inactive-Upstream [lastcommit: when (and/or) lastrelease: when]``
|
||||
The upstream is no longer available. This typically means a defunct project
|
||||
where no activity has happened for a long time --- measured in years. To make
|
||||
that judgement, it is recommended to look at not only when the last release
|
||||
happened, but also when the last commit happened, and whether newly made bug
|
||||
reports and merge requests since that time receive no reaction. It is also
|
||||
recommended to add to the patch description any relevant links where the
|
||||
inactivity can be clearly seen.
|
||||
|
||||
``Inappropriate [reason]``
|
||||
The patch is not appropriate for upstream, include a brief reason on the
|
||||
same line enclosed with ``[]``. In the past, there were several different
|
||||
reasons not to submit patches upstream, but we have to consider that every
|
||||
non-upstreamed patch means a maintainance burden for recipe maintainers.
|
||||
Currently, the only reasons to mark patches as inappropriate for upstream
|
||||
submission are:
|
||||
|
||||
- ``oe specific``: the issue is specific to how OpenEmbedded performs builds
|
||||
or sets things up at runtime, and can be resolved only with a patch that
|
||||
is not however relevant or appropriate for general upstream submission.
|
||||
- ``upstream ticket <link>``: the issue is not specific to Open-Embedded
|
||||
and should be fixed upstream, but the patch in its current form is not
|
||||
suitable for merging upstream, and the author lacks sufficient expertise
|
||||
to develop a proper patch. Instead the issue is handled via a bug report
|
||||
(include link).
|
||||
|
||||
Of course, if another person later takes care of submitting this patch upstream,
|
||||
the status should be changed to ``Submitted [where]``, and an additional
|
||||
``Signed-off-by:`` line should be added to the patch by the person claiming
|
||||
responsibility for upstreaming.
|
||||
|
||||
Examples
|
||||
--------
|
||||
|
||||
Here's an example of a patch that has been submitted upstream::
|
||||
|
||||
rpm: Adjusted the foo setting in bar
|
||||
|
||||
[RPM Ticket #65] -- http://rpm5.org/cvs/tktview?tn=65,5
|
||||
|
||||
The foo setting in bar was decreased from X to X-50% in order to
|
||||
ensure we don't exhaust all system memory with foobar threads.
|
||||
|
||||
Upstream-Status: Submitted [rpm5-devel@rpm5.org]
|
||||
|
||||
Signed-off-by: Joe Developer <joe.developer@example.com>
|
||||
|
||||
A future update can change the value to ``Backport`` or ``Denied`` as
|
||||
appropriate.
|
||||
|
||||
Another example of a patch that is specific to OpenEmbedded::
|
||||
|
||||
Do not treat warnings as errors
|
||||
|
||||
There are additional warnings found with musl which are
|
||||
treated as errors and fails the build, we have more combinations
|
||||
than upstream supports to handle.
|
||||
|
||||
Upstream-Status: Inappropriate [oe specific]
|
||||
|
||||
Here's a patch that has been backported from an upstream commit::
|
||||
|
||||
include missing sys/file.h for LOCK_EX
|
||||
|
||||
Upstream-Status: Backport [https://github.com/systemd/systemd/commit/ac8db36cbc26694ee94beecc8dca208ec4b5fd45]
|
||||
|
||||
CVE patches
|
||||
===========
|
||||
|
||||
In order to have a better control of vulnerabilities, patches that fix CVEs must
|
||||
contain a ``CVE:`` tag. This tag list all CVEs fixed by the patch. If more than
|
||||
one CVE is fixed, separate them using spaces.
|
||||
|
||||
CVE Examples
|
||||
------------
|
||||
|
||||
This should be the header of patch that fixes :cve_nist:`2015-8370` in GRUB2::
|
||||
|
||||
grub2: Fix CVE-2015-8370
|
||||
|
||||
[No upstream tracking] -- https://bugzilla.redhat.com/show_bug.cgi?id=1286966
|
||||
|
||||
Back to 28; Grub2 Authentication
|
||||
|
||||
Two functions suffer from integer underflow fault; the grub_username_get() and grub_password_get()located in
|
||||
grub-core/normal/auth.c and lib/crypto.c respectively. This can be exploited to obtain a Grub rescue shell.
|
||||
|
||||
Upstream-Status: Backport [http://git.savannah.gnu.org/cgit/grub.git/commit/?id=451d80e52d851432e109771bb8febafca7a5f1f2]
|
||||
CVE: CVE-2015-8370
|
||||
Signed-off-by: Joe Developer <joe.developer@example.com>
|
||||
@@ -1,67 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
|
||||
|
||||
Reporting a Defect Against the Yocto Project and OpenEmbedded
|
||||
**************************************************************
|
||||
|
||||
You can use the Yocto Project instance of
|
||||
`Bugzilla <https://www.bugzilla.org/about/>`__ to submit a defect (bug)
|
||||
against BitBake, OpenEmbedded-Core, against any other Yocto Project component
|
||||
or for tool issues. For additional information on this implementation of
|
||||
Bugzilla see the ":ref:`Yocto Project Bugzilla <resources-bugtracker>`" section
|
||||
in the Yocto Project Reference Manual. For more detail on any of the following
|
||||
steps, see the Yocto Project
|
||||
:yocto_wiki:`Bugzilla wiki page </Bugzilla_Configuration_and_Bug_Tracking>`.
|
||||
|
||||
Use the following general steps to submit a bug:
|
||||
|
||||
#. Open the Yocto Project implementation of :yocto_bugs:`Bugzilla <>`.
|
||||
|
||||
#. Click "File a Bug" to enter a new bug.
|
||||
|
||||
#. Choose the appropriate "Classification", "Product", and "Component"
|
||||
for which the bug was found. Bugs for the Yocto Project fall into
|
||||
one of several classifications, which in turn break down into
|
||||
several products and components. For example, for a bug against the
|
||||
``meta-intel`` layer, you would choose "Build System, Metadata &
|
||||
Runtime", "BSPs", and "bsps-meta-intel", respectively.
|
||||
|
||||
#. Choose the "Version" of the Yocto Project for which you found the
|
||||
bug (e.g. &DISTRO;).
|
||||
|
||||
#. Determine and select the "Severity" of the bug. The severity
|
||||
indicates how the bug impacted your work.
|
||||
|
||||
#. Choose the "Hardware" that the bug impacts.
|
||||
|
||||
#. Choose the "Architecture" that the bug impacts.
|
||||
|
||||
#. Choose a "Documentation change" item for the bug. Fixing a bug might
|
||||
or might not affect the Yocto Project documentation. If you are
|
||||
unsure of the impact to the documentation, select "Don't Know".
|
||||
|
||||
#. Provide a brief "Summary" of the bug. Try to limit your summary to
|
||||
just a line or two and be sure to capture the essence of the bug.
|
||||
|
||||
#. Provide a detailed "Description" of the bug. You should provide as
|
||||
much detail as you can about the context, behavior, output, and so
|
||||
forth that surrounds the bug. You can even attach supporting files
|
||||
for output from logs by using the "Add an attachment" button.
|
||||
|
||||
#. Click the "Submit Bug" button submit the bug. A new Bugzilla number
|
||||
is assigned to the bug and the defect is logged in the bug tracking
|
||||
system.
|
||||
|
||||
Once you file a bug, the bug is processed by the Yocto Project Bug
|
||||
Triage Team and further details concerning the bug are assigned (e.g.
|
||||
priority and owner). You are the "Submitter" of the bug and any further
|
||||
categorization, progress, or comments on the bug result in Bugzilla
|
||||
sending you an automated email concerning the particular change or
|
||||
progress to the bug.
|
||||
|
||||
There are no guarantees about if or when a bug might be worked on since an
|
||||
open-source project has no dedicated engineering resources. However, the
|
||||
project does have a good track record of resolving common issues over the
|
||||
medium and long term. We do encourage people to file bugs so issues are
|
||||
at least known about. It helps other users when they find somebody having
|
||||
the same issue as they do, and an issue that is unknown is much less likely
|
||||
to ever be fixed!
|
||||
@@ -1,880 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
|
||||
|
||||
Contributing Changes to a Component
|
||||
************************************
|
||||
|
||||
Contributions to the Yocto Project and OpenEmbedded are very welcome.
|
||||
Because the system is extremely configurable and flexible, we recognize
|
||||
that developers will want to extend, configure or optimize it for their
|
||||
specific uses.
|
||||
|
||||
.. _ref-why-mailing-lists:
|
||||
|
||||
Contributing through mailing lists --- Why not using web-based workflows?
|
||||
=========================================================================
|
||||
|
||||
Both Yocto Project and OpenEmbedded have many key components that are
|
||||
maintained by patches being submitted on mailing lists. We appreciate this
|
||||
approach does look a little old fashioned when other workflows are available
|
||||
through web technology such as GitHub, GitLab and others. Since we are often
|
||||
asked this question, we’ve decided to document the reasons for using mailing
|
||||
lists.
|
||||
|
||||
One significant factor is that we value peer review. When a change is proposed
|
||||
to many of the core pieces of the project, it helps to have many eyes of review
|
||||
go over them. Whilst there is ultimately one maintainer who needs to make the
|
||||
final call on accepting or rejecting a patch, the review is made by many eyes
|
||||
and the exact people reviewing it are likely unknown to the maintainer. It is
|
||||
often the surprise reviewer that catches the most interesting issues!
|
||||
|
||||
This is in contrast to the "GitHub" style workflow where either just a
|
||||
maintainer makes that review, or review is specifically requested from
|
||||
nominated people. We believe there is significant value added to the codebase
|
||||
by this peer review and that moving away from mailing lists would be to the
|
||||
detriment of our code.
|
||||
|
||||
We also need to acknowledge that many of our developers are used to this
|
||||
mailing list workflow and have worked with it for years, with tools and
|
||||
processes built around it. Changing away from this would result in a loss
|
||||
of key people from the project, which would again be to its detriment.
|
||||
|
||||
The projects are acutely aware that potential new contributors find the
|
||||
mailing list approach off-putting and would prefer a web-based GUI.
|
||||
Since we don’t believe that can work for us, the project is aiming to ensure
|
||||
`patchwork <https://patchwork.yoctoproject.org/>`__ is available to help track
|
||||
patch status and also looking at how tooling can provide more feedback to users
|
||||
about patch status. We are looking at improving tools such as ``patchtest`` to
|
||||
test user contributions before they hit the mailing lists and also at better
|
||||
documenting how to use such workflows since we recognise that whilst this was
|
||||
common knowledge a decade ago, it might not be as familiar now.
|
||||
|
||||
Preparing Changes for Submission
|
||||
================================
|
||||
|
||||
Set up Git
|
||||
----------
|
||||
|
||||
The first thing to do is to install Git packages. Here is an example
|
||||
on Debian and Ubuntu::
|
||||
|
||||
sudo apt install git-core git-email
|
||||
|
||||
Then, you need to set a name and e-mail address that Git will
|
||||
use to identify your commits::
|
||||
|
||||
git config --global user.name "Ada Lovelace"
|
||||
git config --global user.email "ada.lovelace@gmail.com"
|
||||
|
||||
By default, Git adds a signature line at the end of patches containing the Git
|
||||
version. We suggest to remove it as it doesn't add useful information.
|
||||
|
||||
Remove it with the following command::
|
||||
|
||||
git config --global format.signature ""
|
||||
|
||||
Clone the Git repository for the component to modify
|
||||
----------------------------------------------------
|
||||
|
||||
After identifying the component to modify as described in the
|
||||
":doc:`../contributor-guide/identify-component`" section, clone the
|
||||
corresponding Git repository. Here is an example for OpenEmbedded-Core::
|
||||
|
||||
git clone https://git.openembedded.org/openembedded-core
|
||||
cd openembedded-core
|
||||
|
||||
Create a new branch
|
||||
-------------------
|
||||
|
||||
Then, create a new branch in your local Git repository
|
||||
for your changes, starting from the reference branch in the upstream
|
||||
repository (often called ``master``)::
|
||||
|
||||
$ git checkout <ref-branch>
|
||||
$ git checkout -b my-changes
|
||||
|
||||
If you have completely unrelated sets of changes to submit, you should even
|
||||
create one branch for each set.
|
||||
|
||||
Implement and commit changes
|
||||
----------------------------
|
||||
|
||||
In each branch, you should group your changes into small, controlled and
|
||||
isolated ones. Keeping changes small and isolated aids review, makes
|
||||
merging/rebasing easier and keeps the change history clean should anyone need
|
||||
to refer to it in future.
|
||||
|
||||
To this purpose, you should create *one Git commit per change*,
|
||||
corresponding to each of the patches you will eventually submit.
|
||||
See `further guidance <https://www.kernel.org/doc/html/latest/process/submitting-patches.html#separate-your-changes>`__
|
||||
in the Linux kernel documentation if needed.
|
||||
|
||||
For example, when you intend to add multiple new recipes, each recipe
|
||||
should be added in a separate commit. For upgrades to existing recipes,
|
||||
the previous version should usually be deleted as part of the same commit
|
||||
to add the upgraded version.
|
||||
|
||||
#. *Stage Your Changes:* Stage your changes by using the ``git add``
|
||||
command on each file you modified. If you want to stage all the
|
||||
files you modified, you can even use the ``git add -A`` command.
|
||||
|
||||
#. *Commit Your Changes:* This is when you can create separate commits. For
|
||||
each commit to create, use the ``git commit -s`` command with the files
|
||||
or directories you want to include in the commit::
|
||||
|
||||
$ git commit -s file1 file2 dir1 dir2 ...
|
||||
|
||||
To include all staged files::
|
||||
|
||||
$ git commit -sa
|
||||
|
||||
#. The ``-s`` option of ``git commit`` adds a "Signed-off-by:" line
|
||||
to your commit message. There is the same requirement for contributing
|
||||
to the Linux kernel. Adding such a line signifies that you, the
|
||||
submitter, have agreed to the `Developer's Certificate of Origin 1.1
|
||||
<https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin>`__
|
||||
as follows:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
|
||||
#. Provide a single-line summary of the change and, if more
|
||||
explanation is needed, provide more detail in the description of the
|
||||
commit. This summary is typically viewable in the "shortlist" of
|
||||
changes. Thus, providing something short and descriptive that
|
||||
gives the reader a summary of the change is useful when viewing a
|
||||
list of many commits. You should prefix this short description
|
||||
with the recipe name (if changing a recipe), or else with the
|
||||
short form path to the file being changed.
|
||||
|
||||
.. note::
|
||||
|
||||
To find a suitable prefix for the commit summary, a good idea
|
||||
is to look for prefixes used in previous commits touching the
|
||||
same files or directories::
|
||||
|
||||
git log --oneline <paths>
|
||||
|
||||
#. For the commit description, provide detailed information
|
||||
that describes what you changed, why you made the change, and the
|
||||
approach you used. It might also be helpful if you mention how you
|
||||
tested the change. Provide as much detail as you can in the commit
|
||||
description.
|
||||
|
||||
.. note::
|
||||
|
||||
If the single line summary is enough to describe a simple
|
||||
change, the commit description can be left empty.
|
||||
|
||||
#. If the change addresses a specific bug or issue that is associated
|
||||
with a bug-tracking ID, include a reference to that ID in the body of the
|
||||
commit message. For example, the Yocto Project uses a
|
||||
specific convention for bug references --- any commit that addresses
|
||||
a specific bug should use the following form for the body of the commit
|
||||
message. Be sure to use the actual bug-tracking ID from
|
||||
Bugzilla for bug-id::
|
||||
|
||||
single-line summary of change
|
||||
|
||||
Fixes [YOCTO #bug-id]
|
||||
|
||||
detailed description of change
|
||||
|
||||
#. If other people participated in this patch, add some tags to the commit
|
||||
description to credit other contributors to the change:
|
||||
|
||||
- ``Reported-by``: name and email of a person reporting a bug
|
||||
that your commit is trying to fix. This is a good practice
|
||||
to encourage people to go on reporting bugs and let them
|
||||
know that their reports are taken into account.
|
||||
|
||||
- ``Suggested-by``: name and email of a person to credit for the
|
||||
idea of making the change.
|
||||
|
||||
- ``Tested-by``, ``Reviewed-by``: name and email for people having
|
||||
tested your changes or reviewed their code. These fields are
|
||||
usually added by the maintainer accepting a patch, or by
|
||||
yourself if you submitted your patches to early reviewers,
|
||||
or are submitting an unmodified patch again as part of a
|
||||
new iteration of your patch series.
|
||||
|
||||
- ``Cc``: name and email of people you want to send a copy
|
||||
of your changes to. This field will be used by ``git send-email``.
|
||||
|
||||
See `more guidance about using such tags
|
||||
<https://www.kernel.org/doc/html/latest/process/submitting-patches.html#using-reported-by-tested-by-reviewed-by-suggested-by-and-fixes>`__
|
||||
in the Linux kernel documentation.
|
||||
|
||||
.. note::
|
||||
|
||||
One can amend an existing git commit message to add missing tags for
|
||||
contributors with the ``git commit --amend`` command.
|
||||
|
||||
Test your changes
|
||||
-----------------
|
||||
|
||||
For each contributions you make, you should test your changes as well.
|
||||
For this the Yocto Project offers several types of tests. Those tests cover
|
||||
different areas and it depends on your changes which are feasible. For example run:
|
||||
|
||||
- For changes that affect the build environment:
|
||||
|
||||
- ``bitbake-selftest``: for changes within BitBake
|
||||
|
||||
- ``oe-selftest``: to test combinations of BitBake runs
|
||||
|
||||
- ``oe-build-perf-test``: to test the performance of common build scenarios
|
||||
|
||||
- For changes in a recipe:
|
||||
|
||||
- ``ptest``: run package specific tests, if they exist
|
||||
|
||||
- ``testimage``: build an image, boot it and run testcases on it
|
||||
|
||||
- If applicable, ensure also the ``native`` and ``nativesdk`` variants builds
|
||||
|
||||
- For changes relating to the SDK:
|
||||
|
||||
- ``testsdk``: to build, install and run tests against a SDK
|
||||
|
||||
- ``testsdk_ext``: to build, install and run tests against an extended SDK
|
||||
|
||||
Note that this list just gives suggestions and is not exhaustive. More details can
|
||||
be found here: :ref:`test-manual/intro:Yocto Project Tests --- Types of Testing Overview`.
|
||||
|
||||
Creating Patches
|
||||
================
|
||||
|
||||
Here is the general procedure on how to create patches to be sent through email:
|
||||
|
||||
#. *Describe the Changes in your Branch:* If you have more than one commit
|
||||
in your branch, it's recommended to provide a cover letter describing
|
||||
the series of patches you are about to send.
|
||||
|
||||
For this purpose, a good solution is to store the cover letter contents
|
||||
in the branch itself::
|
||||
|
||||
git branch --edit-description
|
||||
|
||||
This will open a text editor to fill in the description for your
|
||||
changes. This description can be updated when necessary and will
|
||||
be used by Git to create the cover letter together with the patches.
|
||||
|
||||
It is recommended to start this description with a title line which
|
||||
will serve a the subject line for the cover letter.
|
||||
|
||||
#. *Generate Patches for your Branch:* The ``git format-patch`` command will
|
||||
generate patch files for each of the commits in your branch. You need
|
||||
to pass the reference branch your branch starts from.
|
||||
|
||||
If you branch didn't need a description in the previous step::
|
||||
|
||||
$ git format-patch <ref-branch>
|
||||
|
||||
If you filled a description for your branch, you will want to generate
|
||||
a cover letter too::
|
||||
|
||||
$ git format-patch --cover-letter --cover-from-description=auto <ref-branch>
|
||||
|
||||
After the command is run, the current directory contains numbered
|
||||
``.patch`` files for the commits in your branch. If you have a cover
|
||||
letter, it will be in the ``0000-cover-letter.patch``.
|
||||
|
||||
.. note::
|
||||
|
||||
The ``--cover-from-description=auto`` option makes ``git format-patch``
|
||||
use the first paragraph of the branch description as the cover
|
||||
letter title. Another possibility, which is easier to remember, is to pass
|
||||
only the ``--cover-letter`` option, but you will have to edit the
|
||||
subject line manually every time you generate the patches.
|
||||
|
||||
See the `git format-patch manual page <https://git-scm.com/docs/git-format-patch>`__
|
||||
for details.
|
||||
|
||||
#. *Review each of the Patch Files:* This final review of the patches
|
||||
before sending them often allows to view your changes from a different
|
||||
perspective and discover defects such as typos, spacing issues or lines
|
||||
or even files that you didn't intend to modify. This review should
|
||||
include the cover letter patch too.
|
||||
|
||||
If necessary, rework your commits as described in
|
||||
":ref:`contributor-guide/submit-changes:taking patch review into account`".
|
||||
|
||||
Sending the Patches via Email
|
||||
=============================
|
||||
|
||||
Using Git to Send Patches
|
||||
-------------------------
|
||||
|
||||
To submit patches through email, it is very important that you send them
|
||||
without any whitespace or HTML formatting that either you or your mailer
|
||||
introduces. The maintainer that receives your patches needs to be able
|
||||
to save and apply them directly from your emails, using the ``git am``
|
||||
command.
|
||||
|
||||
Using the ``git send-email`` command is the only error-proof way of sending
|
||||
your patches using email since there is no risk of compromising whitespace
|
||||
in the body of the message, which can occur when you use your own mail
|
||||
client. It will also properly include your patches as *inline attachments*,
|
||||
which is not easy to do with standard e-mail clients without breaking lines.
|
||||
If you used your regular e-mail client and shared your patches as regular
|
||||
attachments, reviewers wouldn't be able to quote specific sections of your
|
||||
changes and make comments about them.
|
||||
|
||||
Setting up Git to Send Email
|
||||
----------------------------
|
||||
|
||||
The ``git send-email`` command can send email by using a local or remote
|
||||
Mail Transport Agent (MTA) such as ``msmtp``, ``sendmail``, or
|
||||
through a direct SMTP configuration in your Git ``~/.gitconfig`` file.
|
||||
|
||||
Here are the settings for letting ``git send-email`` send e-mail through your
|
||||
regular STMP server, using a Google Mail account as an example::
|
||||
|
||||
git config --global sendemail.smtpserver smtp.gmail.com
|
||||
git config --global sendemail.smtpserverport 587
|
||||
git config --global sendemail.smtpencryption tls
|
||||
git config --global sendemail.smtpuser ada.lovelace@gmail.com
|
||||
git config --global sendemail.smtppass = XXXXXXXX
|
||||
|
||||
These settings will appear in the ``.gitconfig`` file in your home directory.
|
||||
|
||||
If you neither can use a local MTA nor SMTP, make sure you use an email client
|
||||
that does not touch the message (turning spaces in tabs, wrapping lines, etc.).
|
||||
A good mail client to do so is Pine (or Alpine) or Mutt. For more
|
||||
information about suitable clients, see `Email clients info for Linux
|
||||
<https://www.kernel.org/doc/html/latest/process/email-clients.html>`__
|
||||
in the Linux kernel sources.
|
||||
|
||||
If you use such clients, just include the patch in the body of your email.
|
||||
|
||||
Finding a Suitable Mailing List
|
||||
-------------------------------
|
||||
|
||||
You should send patches to the appropriate mailing list so that they can be
|
||||
reviewed by the right contributors and merged by the appropriate maintainer.
|
||||
The specific mailing list you need to use depends on the location of the code
|
||||
you are changing.
|
||||
|
||||
If people have concerns with any of the patches, they will usually voice
|
||||
their concern over the mailing list. If patches do not receive any negative
|
||||
reviews, the maintainer of the affected layer typically takes them, tests them,
|
||||
and then based on successful testing, merges them.
|
||||
|
||||
In general, each component (e.g. layer) should have a ``README`` file
|
||||
that indicates where to send the changes and which process to follow.
|
||||
|
||||
The "poky" repository, which is the Yocto Project's reference build
|
||||
environment, is a hybrid repository that contains several individual
|
||||
pieces (e.g. BitBake, Metadata, documentation, and so forth) built using
|
||||
the combo-layer tool. The upstream location used for submitting changes
|
||||
varies by component:
|
||||
|
||||
- *Core Metadata:* Send your patches to the
|
||||
:oe_lists:`openembedded-core </g/openembedded-core>`
|
||||
mailing list. For example, a change to anything under the ``meta`` or
|
||||
``scripts`` directories should be sent to this mailing list.
|
||||
|
||||
- *BitBake:* For changes to BitBake (i.e. anything under the
|
||||
``bitbake`` directory), send your patches to the
|
||||
:oe_lists:`bitbake-devel </g/bitbake-devel>`
|
||||
mailing list.
|
||||
|
||||
- *meta-poky* and *meta-yocto-bsp* trees: These trees contain Metadata. Use the
|
||||
:yocto_lists:`poky </g/poky>` mailing list.
|
||||
|
||||
- *Documentation*: For changes to the Yocto Project documentation, use the
|
||||
:yocto_lists:`docs </g/docs>` mailing list.
|
||||
|
||||
For changes to other layers and tools hosted in the Yocto Project source
|
||||
repositories (i.e. :yocto_git:`git.yoctoproject.org <>`), use the
|
||||
:yocto_lists:`yocto-patches </g/yocto-patches/>` general mailing list.
|
||||
|
||||
For changes to other layers hosted in the OpenEmbedded source
|
||||
repositories (i.e. :oe_git:`git.openembedded.org <>`), use
|
||||
the :oe_lists:`openembedded-devel </g/openembedded-devel>`
|
||||
mailing list, unless specified otherwise in the layer's ``README`` file.
|
||||
|
||||
If you intend to submit a new recipe that neither fits into the core Metadata,
|
||||
nor into :oe_git:`meta-openembedded </meta-openembedded/>`, you should
|
||||
look for a suitable layer in https://layers.openembedded.org. If similar
|
||||
recipes can be expected, you may consider :ref:`dev-manual/layers:creating your own layer`.
|
||||
|
||||
If in doubt, please ask on the :yocto_lists:`yocto </g/yocto/>` general mailing list
|
||||
or on the :oe_lists:`openembedded-devel </g/openembedded-devel>` mailing list.
|
||||
|
||||
Subscribing to the Mailing List
|
||||
-------------------------------
|
||||
|
||||
After identifying the right mailing list to use, you will have to subscribe to
|
||||
it if you haven't done it yet.
|
||||
|
||||
If you attempt to send patches to a list you haven't subscribed to, your email
|
||||
will be returned as undelivered.
|
||||
|
||||
However, if you don't want to be receive all the messages sent to a mailing list,
|
||||
you can set your subscription to "no email". You will still be a subscriber able
|
||||
to send messages, but you won't receive any e-mail. If people reply to your message,
|
||||
their e-mail clients will default to including your email address in the
|
||||
conversation anyway.
|
||||
|
||||
Anyway, you'll also be able to access the new messages on mailing list archives,
|
||||
either through a web browser, or for the lists archived on https://lore.kernel.org,
|
||||
through an individual newsgroup feed or a git repository.
|
||||
|
||||
Sending Patches via Email
|
||||
-------------------------
|
||||
|
||||
At this stage, you are ready to send your patches via email. Here's the
|
||||
typical usage of ``git send-email``::
|
||||
|
||||
git send-email --to <mailing-list-address> *.patch
|
||||
|
||||
Then, review each subject line and list of recipients carefully, and then
|
||||
allow the command to send each message.
|
||||
|
||||
You will see that ``git send-email`` will automatically copy the people listed
|
||||
in any commit tags such as ``Signed-off-by`` or ``Reported-by``.
|
||||
|
||||
In case you are sending patches for :oe_git:`meta-openembedded </meta-openembedded/>`
|
||||
or any layer other than :oe_git:`openembedded-core </openembedded-core/>`,
|
||||
please add the appropriate prefix so that it is clear which layer the patch is intended
|
||||
to be applied to::
|
||||
|
||||
git format-patch --subject-prefix="meta-oe][PATCH" ...
|
||||
|
||||
.. note::
|
||||
|
||||
It is actually possible to send patches without generating them
|
||||
first. However, make sure you have reviewed your changes carefully
|
||||
because ``git send-email`` will just show you the title lines of
|
||||
each patch.
|
||||
|
||||
Here's a command you can use if you just have one patch in your
|
||||
branch::
|
||||
|
||||
git send-email --to <mailing-list-address> -1
|
||||
|
||||
If you have multiple patches and a cover letter, you can send
|
||||
patches for all the commits between the reference branch
|
||||
and the tip of your branch::
|
||||
|
||||
git send-email --cover-letter --cover-from-description=auto --to <mailing-list-address> -M <ref-branch>
|
||||
|
||||
See the `git send-email manual page <https://git-scm.com/docs/git-send-email>`__
|
||||
for details.
|
||||
|
||||
Troubleshooting Email Issues
|
||||
----------------------------
|
||||
|
||||
Fixing your From identity
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
We have a frequent issue with contributors whose patches are received through
|
||||
a ``From`` field which doesn't match the ``Signed-off-by`` information. Here is
|
||||
a typical example for people sending from a domain name with :wikipedia:`DMARC`::
|
||||
|
||||
From: "Linus Torvalds via lists.openembedded.org <linus.torvalds=kernel.org@lists.openembedded.org>"
|
||||
|
||||
This ``From`` field is used by ``git am`` to recreate commits with the right
|
||||
author name. The following will ensure that your e-mails have an additional
|
||||
``From`` field at the beginning of the Email body, and therefore that
|
||||
maintainers accepting your patches don't have to fix commit author information
|
||||
manually::
|
||||
|
||||
git config --global sendemail.from "linus.torvalds@kernel.org"
|
||||
|
||||
The ``sendemail.from`` should match your ``user.email`` setting,
|
||||
which appears in the ``Signed-off-by`` line of your commits.
|
||||
|
||||
Streamlining git send-email usage
|
||||
---------------------------------
|
||||
|
||||
If you want to save time and not be forced to remember the right options to use
|
||||
with ``git send-email``, you can use Git configuration settings.
|
||||
|
||||
- To set the right mailing list address for a given repository::
|
||||
|
||||
git config --local sendemail.to openembedded-devel@lists.openembedded.org
|
||||
|
||||
- If the mailing list requires a subject prefix for the layer
|
||||
(this only works when the repository only contains one layer)::
|
||||
|
||||
git config --local format.subjectprefix "meta-something][PATCH"
|
||||
|
||||
Using Scripts to Push a Change Upstream and Request a Pull
|
||||
==========================================================
|
||||
|
||||
For larger patch series it is preferable to send a pull request which not
|
||||
only includes the patch but also a pointer to a branch that can be pulled
|
||||
from. This involves making a local branch for your changes, pushing this
|
||||
branch to an accessible repository and then using the ``create-pull-request``
|
||||
and ``send-pull-request`` scripts from openembedded-core to create and send a
|
||||
patch series with a link to the branch for review.
|
||||
|
||||
Follow this procedure to push a change to an upstream "contrib" Git
|
||||
repository once the steps in
|
||||
":ref:`contributor-guide/submit-changes:preparing changes for submission`"
|
||||
have been followed:
|
||||
|
||||
.. note::
|
||||
|
||||
You can find general Git information on how to push a change upstream
|
||||
in the
|
||||
`Git Community Book <https://git-scm.com/book/en/v2/Distributed-Git-Distributed-Workflows>`__.
|
||||
|
||||
#. *Request Push Access to an "Upstream" Contrib Repository:* Send an email to
|
||||
``helpdesk@yoctoproject.org``:
|
||||
|
||||
- Attach your SSH public key which usually named ``id_rsa.pub.``.
|
||||
If you don't have one generate it by running ``ssh-keygen -t rsa -b 4096 -C "your_email@example.com"``.
|
||||
|
||||
- List the repositories you're planning to contribute to.
|
||||
|
||||
- Include your preferred branch prefix for ``-contrib`` repositories.
|
||||
|
||||
#. *Push Your Commits to the "Contrib" Upstream:* Push your
|
||||
changes to that repository::
|
||||
|
||||
$ git push upstream_remote_repo local_branch_name
|
||||
|
||||
For example, suppose you have permissions to push
|
||||
into the upstream ``meta-intel-contrib`` repository and you are
|
||||
working in a local branch named `your_name`\ ``/README``. The following
|
||||
command pushes your local commits to the ``meta-intel-contrib``
|
||||
upstream repository and puts the commit in a branch named
|
||||
`your_name`\ ``/README``::
|
||||
|
||||
$ git push meta-intel-contrib your_name/README
|
||||
|
||||
#. *Determine Who to Notify:* Determine the maintainer or the mailing
|
||||
list that you need to notify for the change.
|
||||
|
||||
Before submitting any change, you need to be sure who the maintainer
|
||||
is or what mailing list that you need to notify. Use either these
|
||||
methods to find out:
|
||||
|
||||
- *Maintenance File:* Examine the ``maintainers.inc`` file, which is
|
||||
located in the :term:`Source Directory` at
|
||||
``meta/conf/distro/include``, to see who is responsible for code.
|
||||
|
||||
- *Search by File:* Using :ref:`overview-manual/development-environment:git`, you can
|
||||
enter the following command to bring up a short list of all
|
||||
commits against a specific file::
|
||||
|
||||
git shortlog -- filename
|
||||
|
||||
Just provide the name of the file for which you are interested. The
|
||||
information returned is not ordered by history but does include a
|
||||
list of everyone who has committed grouped by name. From the list,
|
||||
you can see who is responsible for the bulk of the changes against
|
||||
the file.
|
||||
|
||||
- *Find the Mailing List to Use:* See the
|
||||
":ref:`contributor-guide/submit-changes:finding a suitable mailing list`"
|
||||
section above.
|
||||
|
||||
#. *Make a Pull Request:* Notify the maintainer or the mailing list that
|
||||
you have pushed a change by making a pull request.
|
||||
|
||||
The Yocto Project provides two scripts that conveniently let you
|
||||
generate and send pull requests to the Yocto Project. These scripts
|
||||
are ``create-pull-request`` and ``send-pull-request``. You can find
|
||||
these scripts in the ``scripts`` directory within the
|
||||
:term:`Source Directory` (e.g.
|
||||
``poky/scripts``).
|
||||
|
||||
Using these scripts correctly formats the requests without
|
||||
introducing any whitespace or HTML formatting. The maintainer that
|
||||
receives your patches either directly or through the mailing list
|
||||
needs to be able to save and apply them directly from your emails.
|
||||
Using these scripts is the preferred method for sending patches.
|
||||
|
||||
First, create the pull request. For example, the following command
|
||||
runs the script, specifies the upstream repository in the contrib
|
||||
directory into which you pushed the change, and provides a subject
|
||||
line in the created patch files::
|
||||
|
||||
$ poky/scripts/create-pull-request -u meta-intel-contrib -s "Updated Manual Section Reference in README"
|
||||
|
||||
Running this script forms ``*.patch`` files in a folder named
|
||||
``pull-``\ `PID` in the current directory. One of the patch files is a
|
||||
cover letter.
|
||||
|
||||
Before running the ``send-pull-request`` script, you must edit the
|
||||
cover letter patch to insert information about your change. After
|
||||
editing the cover letter, send the pull request. For example, the
|
||||
following command runs the script and specifies the patch directory
|
||||
and email address. In this example, the email address is a mailing
|
||||
list::
|
||||
|
||||
$ poky/scripts/send-pull-request -p ~/meta-intel/pull-10565 -t meta-intel@lists.yoctoproject.org
|
||||
|
||||
You need to follow the prompts as the script is interactive.
|
||||
|
||||
.. note::
|
||||
|
||||
For help on using these scripts, simply provide the ``-h``
|
||||
argument as follows::
|
||||
|
||||
$ poky/scripts/create-pull-request -h
|
||||
$ poky/scripts/send-pull-request -h
|
||||
|
||||
Submitting Changes to Stable Release Branches
|
||||
=============================================
|
||||
|
||||
The process for proposing changes to a Yocto Project stable branch differs
|
||||
from the steps described above. Changes to a stable branch must address
|
||||
identified bugs or CVEs and should be made carefully in order to avoid the
|
||||
risk of introducing new bugs or breaking backwards compatibility. Typically
|
||||
bug fixes must already be accepted into the master branch before they can be
|
||||
backported to a stable branch unless the bug in question does not affect the
|
||||
master branch or the fix on the master branch is unsuitable for backporting.
|
||||
|
||||
The list of stable branches along with the status and maintainer for each
|
||||
branch can be obtained from the :yocto_home:`Releases </development/releases/>`
|
||||
page.
|
||||
|
||||
.. note::
|
||||
|
||||
Changes will not typically be accepted for branches which are marked as
|
||||
End-Of-Life (EOL).
|
||||
|
||||
With this in mind, the steps to submit a change for a stable branch are as
|
||||
follows:
|
||||
|
||||
#. *Identify the bug or CVE to be fixed:* This information should be
|
||||
collected so that it can be included in your submission.
|
||||
|
||||
See :ref:`dev-manual/vulnerabilities:checking for vulnerabilities`
|
||||
for details about CVE tracking.
|
||||
|
||||
#. *Check if the fix is already present in the master branch:* This will
|
||||
result in the most straightforward path into the stable branch for the
|
||||
fix.
|
||||
|
||||
#. *If the fix is present in the master branch --- submit a backport request
|
||||
by email:* You should send an email to the relevant stable branch
|
||||
maintainer and the mailing list with details of the bug or CVE to be
|
||||
fixed, the commit hash on the master branch that fixes the issue and
|
||||
the stable branches which you would like this fix to be backported to.
|
||||
|
||||
#. *If the fix is not present in the master branch --- submit the fix to the
|
||||
master branch first:* This will ensure that the fix passes through the
|
||||
project's usual patch review and test processes before being accepted.
|
||||
It will also ensure that bugs are not left unresolved in the master
|
||||
branch itself. Once the fix is accepted in the master branch a backport
|
||||
request can be submitted as above.
|
||||
|
||||
#. *If the fix is unsuitable for the master branch --- submit a patch
|
||||
directly for the stable branch:* This method should be considered as a
|
||||
last resort. It is typically necessary when the master branch is using
|
||||
a newer version of the software which includes an upstream fix for the
|
||||
issue or when the issue has been fixed on the master branch in a way
|
||||
that introduces backwards incompatible changes. In this case follow the
|
||||
steps in ":ref:`contributor-guide/submit-changes:preparing changes for submission`"
|
||||
and in the following sections but modify the subject header of your patch
|
||||
email to include the name of the stable branch which you are
|
||||
targetting. This can be done using the ``--subject-prefix`` argument to
|
||||
``git format-patch``, for example to submit a patch to the
|
||||
"&DISTRO_NAME_NO_CAP_MINUS_ONE;" branch use::
|
||||
|
||||
git format-patch --subject-prefix='&DISTRO_NAME_NO_CAP_MINUS_ONE;][PATCH' ...
|
||||
|
||||
Taking Patch Review into Account
|
||||
================================
|
||||
|
||||
You may get feedback on your submitted patches from other community members
|
||||
or from the automated patchtest service. If issues are identified in your
|
||||
patches then it is usually necessary to address these before the patches are
|
||||
accepted into the project. In this case you should your commits according
|
||||
to the feedback and submit an updated version to the relevant mailing list.
|
||||
|
||||
In any case, never fix reported issues by fixing them in new commits
|
||||
on the tip of your branch. Always come up with a new series of commits
|
||||
without the reported issues.
|
||||
|
||||
.. note::
|
||||
|
||||
It is a good idea to send a copy to the reviewers who provided feedback
|
||||
to the previous version of the patch. You can make sure this happens
|
||||
by adding a ``CC`` tag to the commit description::
|
||||
|
||||
CC: William Shakespeare <bill@yoctoproject.org>
|
||||
|
||||
A single patch can be amended using ``git commit --amend``, and multiple
|
||||
patches can be easily reworked and reordered through an interactive Git rebase::
|
||||
|
||||
git rebase -i <ref-branch>
|
||||
|
||||
See `this tutorial <https://hackernoon.com/beginners-guide-to-interactive-rebasing-346a3f9c3a6d>`__
|
||||
for practical guidance about using Git interactive rebasing.
|
||||
|
||||
You should also modify the ``[PATCH]`` tag in the email subject line when
|
||||
sending the revised patch to mark the new iteration as ``[PATCH v2]``,
|
||||
``[PATCH v3]``, etc as appropriate. This can be done by passing the ``-v``
|
||||
argument to ``git format-patch`` with a version number::
|
||||
|
||||
git format-patch -v2 <ref-branch>
|
||||
|
||||
|
||||
After generating updated patches (v2, v3, and so on) via ``git
|
||||
format-patch``, ideally developers will add a patch version changelog
|
||||
to each patch that describes what has changed between each revision of
|
||||
the patch. Add patch version changelogs after the ``---`` marker in the
|
||||
patch, indicating that this information is part of this patch, but is not
|
||||
suitable for inclusion in the commit message (i.e. the git history) itself.
|
||||
Providing a patch version changelog makes it easier for maintainers and
|
||||
reviewers to succinctly understand what changed in all versions of the
|
||||
patch, without having to consult alternate sources of information, such as
|
||||
searching through messages on a mailing list. For example::
|
||||
|
||||
<patch title>
|
||||
|
||||
<commit message>
|
||||
|
||||
<Signed-off-by/other trailers>
|
||||
---
|
||||
changes in v4:
|
||||
- provide a clearer commit message
|
||||
- fix spelling mistakes
|
||||
|
||||
changes in v3:
|
||||
- replace func() to use other_func() instead
|
||||
|
||||
changes in v2:
|
||||
- this patch was added in v2
|
||||
---
|
||||
<diffstat output>
|
||||
|
||||
<unified diff>
|
||||
|
||||
Lastly please ensure that you also test your revised changes. In particular
|
||||
please don't just edit the patch file written out by ``git format-patch`` and
|
||||
resend it.
|
||||
|
||||
Tracking the Status of Patches
|
||||
==============================
|
||||
|
||||
The Yocto Project uses a `Patchwork instance <https://patchwork.yoctoproject.org/>`__
|
||||
to track the status of patches submitted to the various mailing lists and to
|
||||
support automated patch testing. Each submitted patch is checked for common
|
||||
mistakes and deviations from the expected patch format and submitters are
|
||||
notified by ``patchtest`` if such mistakes are found. This process helps to
|
||||
reduce the burden of patch review on maintainers.
|
||||
|
||||
.. note::
|
||||
|
||||
This system is imperfect and changes can sometimes get lost in the flow.
|
||||
Asking about the status of a patch or change is reasonable if the change
|
||||
has been idle for a while with no feedback.
|
||||
|
||||
If your patches have not had any feedback in a few days, they may have already
|
||||
been merged. You can run ``git pull`` branch to check this. Note that many if
|
||||
not most layer maintainers do not send out acknowledgement emails when they
|
||||
accept patches. Alternatively, if there is no response or merge after a few days
|
||||
the patch may have been missed or the appropriate reviewers may not currently be
|
||||
around. It is then perfectly fine to reply to it yourself with a reminder asking
|
||||
for feedback.
|
||||
|
||||
.. note::
|
||||
|
||||
Patch reviews for feature and recipe upgrade patches are likely be delayed
|
||||
during a feature freeze because these types of patches aren't merged during
|
||||
at that time --- you may have to wait until after the freeze is lifted.
|
||||
|
||||
Maintainers also commonly use ``-next`` branches to test submissions prior to
|
||||
merging patches. Thus, you can get an idea of the status of a patch based on
|
||||
whether the patch has been merged into one of these branches. The commonly
|
||||
used testing branches for OpenEmbedded-Core are as follows:
|
||||
|
||||
- *openembedded-core "master-next" branch:* This branch is part of the
|
||||
:oe_git:`openembedded-core </openembedded-core/>` repository and contains
|
||||
proposed changes to the core metadata.
|
||||
|
||||
- *poky "master-next" branch:* This branch is part of the
|
||||
:yocto_git:`poky </poky/>` repository and combines proposed
|
||||
changes to BitBake, the core metadata and the poky distro.
|
||||
|
||||
Similarly, stable branches maintained by the project may have corresponding
|
||||
``-next`` branches which collect proposed changes. For example,
|
||||
``&DISTRO_NAME_NO_CAP;-next`` and ``&DISTRO_NAME_NO_CAP_MINUS_ONE;-next``
|
||||
branches in both the "openembdedded-core" and "poky" repositories.
|
||||
|
||||
Other layers may have similar testing branches but there is no formal
|
||||
requirement or standard for these so please check the documentation for the
|
||||
layers you are contributing to.
|
||||
|
||||
Acceptance of AI Generated Code
|
||||
===============================
|
||||
|
||||
The Yocto Project and OpenEmbedded follow the guidance of the Linux Foundation
|
||||
in regards to the use of generative AI tools. See:
|
||||
https://www.linuxfoundation.org/legal/generative-ai.
|
||||
|
||||
All of the existing guidelines in this document are expected to be followed,
|
||||
including in the :doc:`recipe-style-guide`, and contributing the changes with
|
||||
additional requirements to the items in section
|
||||
:ref:`contributor-guide/submit-changes:Implement and commit changes`.
|
||||
|
||||
All AI Generated Code must be labeled as such in the commit message,
|
||||
prior to your ``Signed-off-by`` line. It is also strongly recommended,
|
||||
that any patches or code within the commit also have a comment or other
|
||||
indication that this code was AI generated.
|
||||
|
||||
For example, here is a properly formatted commit message::
|
||||
|
||||
component: Add the ability to ...
|
||||
|
||||
AI-Generated: Uses GitHub Copilot
|
||||
|
||||
Signed-off-by: Your Name <your.name@domain>
|
||||
|
||||
The ``Signed-off-by`` line must be written by you, and not the AI helper.
|
||||
As a reminder, when contributing a change, your ``Signed-off-by`` line is
|
||||
required and the stipulations in the `Developer's Statement of Origin
|
||||
1.1 <https://developercertificate.org/>`__ still apply.
|
||||
|
||||
Additionally, you must stipulate AI contributions conform to the Linux
|
||||
Foundation policy, specifically:
|
||||
|
||||
#. Contributors should ensure that the terms and conditions of the generative AI
|
||||
tool do not place any contractual restrictions on how the tool's output can
|
||||
be used that are inconsistent with the project's open source software
|
||||
license, the project's intellectual property policies, or the Open Source
|
||||
Definition.
|
||||
|
||||
#. If any pre-existing copyrighted materials (including pre-existing open
|
||||
source code) authored or owned by third parties are included in the AI tool's
|
||||
output, prior to contributing such output to the project, the Contributor
|
||||
should confirm that they have permission from the third party
|
||||
owners -- such as the form of an open source license or public domain
|
||||
declaration that complies with the project's licensing policies -- to use and
|
||||
modify such pre-existing materials and contribute them to the project.
|
||||
Additionally, the contributor should provide notice and attribution of such
|
||||
third party rights, along with information about the applicable license
|
||||
terms, with their contribution.
|
||||
@@ -1,59 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
|
||||
|
||||
Flashing Images Using ``bmaptool``
|
||||
**********************************
|
||||
|
||||
A fast and easy way to flash an image to a bootable device is to use
|
||||
Bmaptool, which is integrated into the OpenEmbedded build system.
|
||||
Bmaptool is a generic tool that creates a file's block map (bmap) and
|
||||
then uses that map to copy the file. As compared to traditional tools
|
||||
such as dd or cp, Bmaptool can copy (or flash) large files like raw
|
||||
system image files much faster.
|
||||
|
||||
.. note::
|
||||
|
||||
- If you are using Ubuntu or Debian distributions, you can install
|
||||
the ``bmap-tools`` package using the following command and then
|
||||
use the tool without specifying ``PATH`` even from the root
|
||||
account::
|
||||
|
||||
$ sudo apt install bmap-tools
|
||||
|
||||
- If you are unable to install the ``bmap-tools`` package, you will
|
||||
need to build Bmaptool before using it. Use the following command::
|
||||
|
||||
$ bitbake bmap-tools-native
|
||||
|
||||
Following, is an example that shows how to flash a Wic image. Realize
|
||||
that while this example uses a Wic image, you can use Bmaptool to flash
|
||||
any type of image. Use these steps to flash an image using Bmaptool:
|
||||
|
||||
#. *Update your local.conf File:* You need to have the following set
|
||||
in your ``local.conf`` file before building your image::
|
||||
|
||||
IMAGE_FSTYPES += "wic wic.bmap"
|
||||
|
||||
#. *Get Your Image:* Either have your image ready (pre-built with the
|
||||
:term:`IMAGE_FSTYPES`
|
||||
setting previously mentioned) or take the step to build the image::
|
||||
|
||||
$ bitbake image
|
||||
|
||||
#. *Flash the Device:* Flash the device with the image by using Bmaptool
|
||||
depending on your particular setup. The following commands assume the
|
||||
image resides in the :term:`Build Directory`'s ``deploy/images/`` area:
|
||||
|
||||
- If you have write access to the media, use this command form::
|
||||
|
||||
$ oe-run-native bmap-tools-native bmaptool copy build-directory/tmp/deploy/images/machine/image.wic /dev/sdX
|
||||
|
||||
- If you do not have write access to the media, set your permissions
|
||||
first and then use the same command form::
|
||||
|
||||
$ sudo chmod 666 /dev/sdX
|
||||
$ oe-run-native bmap-tools-native bmaptool copy build-directory/tmp/deploy/images/machine/image.wic /dev/sdX
|
||||
|
||||
For help on the ``bmaptool`` command, use the following command::
|
||||
|
||||
$ bmaptool --help
|
||||
|
||||
@@ -1,409 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
|
||||
|
||||
Maintaining Build Output Quality
|
||||
********************************
|
||||
|
||||
Many factors can influence the quality of a build. For example, if you
|
||||
upgrade a recipe to use a new version of an upstream software package or
|
||||
you experiment with some new configuration options, subtle changes can
|
||||
occur that you might not detect until later. Consider the case where
|
||||
your recipe is using a newer version of an upstream package. In this
|
||||
case, a new version of a piece of software might introduce an optional
|
||||
dependency on another library, which is auto-detected. If that library
|
||||
has already been built when the software is building, the software will
|
||||
link to the built library and that library will be pulled into your
|
||||
image along with the new software even if you did not want the library.
|
||||
|
||||
The :ref:`ref-classes-buildhistory` class helps you maintain the quality of
|
||||
your build output. You can use the class to highlight unexpected and possibly
|
||||
unwanted changes in the build output. When you enable build history, it records
|
||||
information about the contents of each package and image and then commits that
|
||||
information to a local Git repository where you can examine the information.
|
||||
|
||||
The remainder of this section describes the following:
|
||||
|
||||
- :ref:`How you can enable and disable build history <dev-manual/build-quality:enabling and disabling build history>`
|
||||
|
||||
- :ref:`How to understand what the build history contains <dev-manual/build-quality:understanding what the build history contains>`
|
||||
|
||||
- :ref:`How to limit the information used for build history <dev-manual/build-quality:using build history to gather image information only>`
|
||||
|
||||
- :ref:`How to examine the build history from both a command-line and web interface <dev-manual/build-quality:examining build history information>`
|
||||
|
||||
Enabling and Disabling Build History
|
||||
====================================
|
||||
|
||||
Build history is disabled by default. To enable it, add the following
|
||||
:term:`INHERIT` statement and set the :term:`BUILDHISTORY_COMMIT` variable to
|
||||
"1" at the end of your ``conf/local.conf`` file found in the
|
||||
:term:`Build Directory`::
|
||||
|
||||
INHERIT += "buildhistory"
|
||||
BUILDHISTORY_COMMIT = "1"
|
||||
|
||||
Enabling build history as
|
||||
previously described causes the OpenEmbedded build system to collect
|
||||
build output information and commit it as a single commit to a local
|
||||
:ref:`overview-manual/development-environment:git` repository.
|
||||
|
||||
.. note::
|
||||
|
||||
Enabling build history increases your build times slightly,
|
||||
particularly for images, and increases the amount of disk space used
|
||||
during the build.
|
||||
|
||||
You can disable build history by removing the previous statements from
|
||||
your ``conf/local.conf`` file.
|
||||
|
||||
Understanding What the Build History Contains
|
||||
=============================================
|
||||
|
||||
Build history information is kept in ``${``\ :term:`TOPDIR`\ ``}/buildhistory``
|
||||
in the :term:`Build Directory` as defined by the :term:`BUILDHISTORY_DIR`
|
||||
variable. Here is an example abbreviated listing:
|
||||
|
||||
.. image:: figures/buildhistory.png
|
||||
:align: center
|
||||
:width: 50%
|
||||
|
||||
At the top level, there is a ``metadata-revs`` file that lists the
|
||||
revisions of the repositories for the enabled layers when the build was
|
||||
produced. The rest of the data splits into separate ``packages``,
|
||||
``images`` and ``sdk`` directories, the contents of which are described
|
||||
as follows.
|
||||
|
||||
Build History Package Information
|
||||
---------------------------------
|
||||
|
||||
The history for each package contains a text file that has name-value
|
||||
pairs with information about the package. For example,
|
||||
``buildhistory/packages/i586-poky-linux/busybox/busybox/latest``
|
||||
contains the following:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
PV = 1.22.1
|
||||
PR = r32
|
||||
RPROVIDES =
|
||||
RDEPENDS = glibc (>= 2.20) update-alternatives-opkg
|
||||
RRECOMMENDS = busybox-syslog busybox-udhcpc update-rc.d
|
||||
PKGSIZE = 540168
|
||||
FILES = /usr/bin/* /usr/sbin/* /usr/lib/busybox/* /usr/lib/lib*.so.* \
|
||||
/etc /com /var /bin/* /sbin/* /lib/*.so.* /lib/udev/rules.d \
|
||||
/usr/lib/udev/rules.d /usr/share/busybox /usr/lib/busybox/* \
|
||||
/usr/share/pixmaps /usr/share/applications /usr/share/idl \
|
||||
/usr/share/omf /usr/share/sounds /usr/lib/bonobo/servers
|
||||
FILELIST = /bin/busybox /bin/busybox.nosuid /bin/busybox.suid /bin/sh \
|
||||
/etc/busybox.links.nosuid /etc/busybox.links.suid
|
||||
|
||||
Most of these
|
||||
name-value pairs correspond to variables used to produce the package.
|
||||
The exceptions are ``FILELIST``, which is the actual list of files in
|
||||
the package, and ``PKGSIZE``, which is the total size of files in the
|
||||
package in bytes.
|
||||
|
||||
There is also a file that corresponds to the recipe from which the package
|
||||
came (e.g. ``buildhistory/packages/i586-poky-linux/busybox/latest``):
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
PV = 1.22.1
|
||||
PR = r32
|
||||
DEPENDS = initscripts kern-tools-native update-rc.d-native \
|
||||
virtual/i586-poky-linux-compilerlibs virtual/i586-poky-linux-gcc \
|
||||
virtual/libc virtual/update-alternatives
|
||||
PACKAGES = busybox-ptest busybox-httpd busybox-udhcpd busybox-udhcpc \
|
||||
busybox-syslog busybox-mdev busybox-hwclock busybox-dbg \
|
||||
busybox-staticdev busybox-dev busybox-doc busybox-locale busybox
|
||||
|
||||
Finally, for those recipes fetched from a version control system (e.g.,
|
||||
Git), there is a file that lists source revisions that are specified in
|
||||
the recipe and the actual revisions used during the build. Listed
|
||||
and actual revisions might differ when
|
||||
:term:`SRCREV` is set to
|
||||
${:term:`AUTOREV`}. Here is an
|
||||
example assuming
|
||||
``buildhistory/packages/qemux86-poky-linux/linux-yocto/latest_srcrev``)::
|
||||
|
||||
# SRCREV_machine = "38cd560d5022ed2dbd1ab0dca9642e47c98a0aa1"
|
||||
SRCREV_machine = "38cd560d5022ed2dbd1ab0dca9642e47c98a0aa1"
|
||||
# SRCREV_meta = "a227f20eff056e511d504b2e490f3774ab260d6f"
|
||||
SRCREV_meta ="a227f20eff056e511d504b2e490f3774ab260d6f"
|
||||
|
||||
You can use the
|
||||
``buildhistory-collect-srcrevs`` command with the ``-a`` option to
|
||||
collect the stored :term:`SRCREV` values from build history and report them
|
||||
in a format suitable for use in global configuration (e.g.,
|
||||
``local.conf`` or a distro include file) to override floating
|
||||
:term:`AUTOREV` values to a fixed set of revisions. Here is some example
|
||||
output from this command::
|
||||
|
||||
$ buildhistory-collect-srcrevs -a
|
||||
# all-poky-linux
|
||||
SRCREV:pn-ca-certificates = "07de54fdcc5806bde549e1edf60738c6bccf50e8"
|
||||
SRCREV:pn-update-rc.d = "8636cf478d426b568c1be11dbd9346f67e03adac"
|
||||
# core2-64-poky-linux
|
||||
SRCREV:pn-binutils = "87d4632d36323091e731eb07b8aa65f90293da66"
|
||||
SRCREV:pn-btrfs-tools = "8ad326b2f28c044cb6ed9016d7c3285e23b673c8"
|
||||
SRCREV_bzip2-tests:pn-bzip2 = "f9061c030a25de5b6829e1abf373057309c734c0"
|
||||
SRCREV:pn-e2fsprogs = "02540dedd3ddc52c6ae8aaa8a95ce75c3f8be1c0"
|
||||
SRCREV:pn-file = "504206e53a89fd6eed71aeaf878aa3512418eab1"
|
||||
SRCREV_glibc:pn-glibc = "24962427071fa532c3c48c918e9d64d719cc8a6c"
|
||||
SRCREV:pn-gnome-desktop-testing = "e346cd4ed2e2102c9b195b614f3c642d23f5f6e7"
|
||||
SRCREV:pn-init-system-helpers = "dbd9197569c0935029acd5c9b02b84c68fd937ee"
|
||||
SRCREV:pn-kmod = "b6ecfc916a17eab8f93be5b09f4e4f845aabd3d1"
|
||||
SRCREV:pn-libnsl2 = "82245c0c58add79a8e34ab0917358217a70e5100"
|
||||
SRCREV:pn-libseccomp = "57357d2741a3b3d3e8425889a6b79a130e0fa2f3"
|
||||
SRCREV:pn-libxcrypt = "50cf2b6dd4fdf04309445f2eec8de7051d953abf"
|
||||
SRCREV:pn-ncurses = "51d0fd9cc3edb975f04224f29f777f8f448e8ced"
|
||||
SRCREV:pn-procps = "19a508ea121c0c4ac6d0224575a036de745eaaf8"
|
||||
SRCREV:pn-psmisc = "5fab6b7ab385080f1db725d6803136ec1841a15f"
|
||||
SRCREV:pn-ptest-runner = "bcb82804daa8f725b6add259dcef2067e61a75aa"
|
||||
SRCREV:pn-shared-mime-info = "18e558fa1c8b90b86757ade09a4ba4d6a6cf8f70"
|
||||
SRCREV:pn-zstd = "e47e674cd09583ff0503f0f6defd6d23d8b718d3"
|
||||
# qemux86_64-poky-linux
|
||||
SRCREV_machine:pn-linux-yocto = "20301aeb1a64164b72bc72af58802b315e025c9c"
|
||||
SRCREV_meta:pn-linux-yocto = "2d38a472b21ae343707c8bd64ac68a9eaca066a0"
|
||||
# x86_64-linux
|
||||
SRCREV:pn-binutils-cross-x86_64 = "87d4632d36323091e731eb07b8aa65f90293da66"
|
||||
SRCREV_glibc:pn-cross-localedef-native = "24962427071fa532c3c48c918e9d64d719cc8a6c"
|
||||
SRCREV_localedef:pn-cross-localedef-native = "794da69788cbf9bf57b59a852f9f11307663fa87"
|
||||
SRCREV:pn-debianutils-native = "de14223e5bffe15e374a441302c528ffc1cbed57"
|
||||
SRCREV:pn-libmodulemd-native = "ee80309bc766d781a144e6879419b29f444d94eb"
|
||||
SRCREV:pn-virglrenderer-native = "363915595e05fb252e70d6514be2f0c0b5ca312b"
|
||||
SRCREV:pn-zstd-native = "e47e674cd09583ff0503f0f6defd6d23d8b718d3"
|
||||
|
||||
.. note::
|
||||
|
||||
Here are some notes on using the ``buildhistory-collect-srcrevs`` command:
|
||||
|
||||
- By default, only values where the :term:`SRCREV` was not hardcoded
|
||||
(usually when :term:`AUTOREV` is used) are reported. Use the ``-a``
|
||||
option to see all :term:`SRCREV` values.
|
||||
|
||||
- The output statements might not have any effect if overrides are
|
||||
applied elsewhere in the build system configuration. Use the
|
||||
``-f`` option to add the ``forcevariable`` override to each output
|
||||
line if you need to work around this restriction.
|
||||
|
||||
- The script does apply special handling when building for multiple
|
||||
machines. However, the script does place a comment before each set
|
||||
of values that specifies which triplet to which they belong as
|
||||
previously shown (e.g., ``i586-poky-linux``).
|
||||
|
||||
Build History Image Information
|
||||
-------------------------------
|
||||
|
||||
The files produced for each image are as follows:
|
||||
|
||||
- ``image-files:`` A directory containing selected files from the root
|
||||
filesystem. The files are defined by
|
||||
:term:`BUILDHISTORY_IMAGE_FILES`.
|
||||
|
||||
- ``build-id.txt:`` Human-readable information about the build
|
||||
configuration and metadata source revisions. This file contains the
|
||||
full build header as printed by BitBake.
|
||||
|
||||
- ``*.dot:`` Dependency graphs for the image that are compatible with
|
||||
``graphviz``.
|
||||
|
||||
- ``files-in-image.txt:`` A list of files in the image with
|
||||
permissions, owner, group, size, and symlink information.
|
||||
|
||||
- ``image-info.txt:`` A text file containing name-value pairs with
|
||||
information about the image. See the following listing example for
|
||||
more information.
|
||||
|
||||
- ``installed-package-names.txt:`` A list of installed packages by name
|
||||
only.
|
||||
|
||||
- ``installed-package-sizes.txt:`` A list of installed packages ordered
|
||||
by size.
|
||||
|
||||
- ``installed-packages.txt:`` A list of installed packages with full
|
||||
package filenames.
|
||||
|
||||
.. note::
|
||||
|
||||
Installed package information is able to be gathered and produced
|
||||
even if package management is disabled for the final image.
|
||||
|
||||
Here is an example of ``image-info.txt``:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
DISTRO = poky
|
||||
DISTRO_VERSION = 3.4+snapshot-a0245d7be08f3d24ea1875e9f8872aa6bbff93be
|
||||
USER_CLASSES = buildstats
|
||||
IMAGE_CLASSES = qemuboot qemuboot license_image
|
||||
IMAGE_FEATURES = debug-tweaks
|
||||
IMAGE_LINGUAS =
|
||||
IMAGE_INSTALL = packagegroup-core-boot speex speexdsp
|
||||
BAD_RECOMMENDATIONS =
|
||||
NO_RECOMMENDATIONS =
|
||||
PACKAGE_EXCLUDE =
|
||||
ROOTFS_POSTPROCESS_COMMAND = write_package_manifest; license_create_manifest; cve_check_write_rootfs_manifest; ssh_allow_empty_password; ssh_allow_root_login; postinst_enable_logging; rootfs_update_timestamp; write_image_test_data; empty_var_volatile; sort_passwd; rootfs_reproducible;
|
||||
IMAGE_POSTPROCESS_COMMAND = buildhistory_get_imageinfo ;
|
||||
IMAGESIZE = 9265
|
||||
|
||||
Other than ``IMAGESIZE``,
|
||||
which is the total size of the files in the image in Kbytes, the
|
||||
name-value pairs are variables that may have influenced the content of
|
||||
the image. This information is often useful when you are trying to
|
||||
determine why a change in the package or file listings has occurred.
|
||||
|
||||
Using Build History to Gather Image Information Only
|
||||
----------------------------------------------------
|
||||
|
||||
As you can see, build history produces image information, including
|
||||
dependency graphs, so you can see why something was pulled into the
|
||||
image. If you are just interested in this information and not interested
|
||||
in collecting specific package or SDK information, you can enable
|
||||
writing only image information without any history by adding the
|
||||
following to your ``conf/local.conf`` file found in the
|
||||
:term:`Build Directory`::
|
||||
|
||||
INHERIT += "buildhistory"
|
||||
BUILDHISTORY_COMMIT = "0"
|
||||
BUILDHISTORY_FEATURES = "image"
|
||||
|
||||
Here, you set the
|
||||
:term:`BUILDHISTORY_FEATURES`
|
||||
variable to use the image feature only.
|
||||
|
||||
Build History SDK Information
|
||||
-----------------------------
|
||||
|
||||
Build history collects similar information on the contents of SDKs (e.g.
|
||||
``bitbake -c populate_sdk imagename``) as compared to information it
|
||||
collects for images. Furthermore, this information differs depending on
|
||||
whether an extensible or standard SDK is being produced.
|
||||
|
||||
The following list shows the files produced for SDKs:
|
||||
|
||||
- ``files-in-sdk.txt:`` A list of files in the SDK with permissions,
|
||||
owner, group, size, and symlink information. This list includes both
|
||||
the host and target parts of the SDK.
|
||||
|
||||
- ``sdk-info.txt:`` A text file containing name-value pairs with
|
||||
information about the SDK. See the following listing example for more
|
||||
information.
|
||||
|
||||
- ``sstate-task-sizes.txt:`` A text file containing name-value pairs
|
||||
with information about task group sizes (e.g. :ref:`ref-tasks-populate_sysroot`
|
||||
tasks have a total size). The ``sstate-task-sizes.txt`` file exists
|
||||
only when an extensible SDK is created.
|
||||
|
||||
- ``sstate-package-sizes.txt:`` A text file containing name-value pairs
|
||||
with information for the shared-state packages and sizes in the SDK.
|
||||
The ``sstate-package-sizes.txt`` file exists only when an extensible
|
||||
SDK is created.
|
||||
|
||||
- ``sdk-files:`` A folder that contains copies of the files mentioned
|
||||
in ``BUILDHISTORY_SDK_FILES`` if the files are present in the output.
|
||||
Additionally, the default value of ``BUILDHISTORY_SDK_FILES`` is
|
||||
specific to the extensible SDK although you can set it differently if
|
||||
you would like to pull in specific files from the standard SDK.
|
||||
|
||||
The default files are ``conf/local.conf``, ``conf/bblayers.conf``,
|
||||
``conf/auto.conf``, ``conf/locked-sigs.inc``, and
|
||||
``conf/devtool.conf``. Thus, for an extensible SDK, these files get
|
||||
copied into the ``sdk-files`` directory.
|
||||
|
||||
- The following information appears under each of the ``host`` and
|
||||
``target`` directories for the portions of the SDK that run on the
|
||||
host and on the target, respectively:
|
||||
|
||||
.. note::
|
||||
|
||||
The following files for the most part are empty when producing an
|
||||
extensible SDK because this type of SDK is not constructed from
|
||||
packages as is the standard SDK.
|
||||
|
||||
- ``depends.dot:`` Dependency graph for the SDK that is compatible
|
||||
with ``graphviz``.
|
||||
|
||||
- ``installed-package-names.txt:`` A list of installed packages by
|
||||
name only.
|
||||
|
||||
- ``installed-package-sizes.txt:`` A list of installed packages
|
||||
ordered by size.
|
||||
|
||||
- ``installed-packages.txt:`` A list of installed packages with full
|
||||
package filenames.
|
||||
|
||||
Here is an example of ``sdk-info.txt``:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
DISTRO = poky
|
||||
DISTRO_VERSION = 1.3+snapshot-20130327
|
||||
SDK_NAME = poky-glibc-i686-arm
|
||||
SDK_VERSION = 1.3+snapshot
|
||||
SDKMACHINE =
|
||||
SDKIMAGE_FEATURES = dev-pkgs dbg-pkgs
|
||||
BAD_RECOMMENDATIONS =
|
||||
SDKSIZE = 352712
|
||||
|
||||
Other than ``SDKSIZE``, which is
|
||||
the total size of the files in the SDK in Kbytes, the name-value pairs
|
||||
are variables that might have influenced the content of the SDK. This
|
||||
information is often useful when you are trying to determine why a
|
||||
change in the package or file listings has occurred.
|
||||
|
||||
Examining Build History Information
|
||||
-----------------------------------
|
||||
|
||||
You can examine build history output from the command line or from a web
|
||||
interface.
|
||||
|
||||
To see any changes that have occurred (assuming you have
|
||||
:term:`BUILDHISTORY_COMMIT` = "1"),
|
||||
you can simply use any Git command that allows you to view the history
|
||||
of a repository. Here is one method::
|
||||
|
||||
$ git log -p
|
||||
|
||||
You need to realize,
|
||||
however, that this method does show changes that are not significant
|
||||
(e.g. a package's size changing by a few bytes).
|
||||
|
||||
There is a command-line tool called ``buildhistory-diff``, though,
|
||||
that queries the Git repository and prints just the differences that
|
||||
might be significant in human-readable form. Here is an example::
|
||||
|
||||
$ poky/poky/scripts/buildhistory-diff . HEAD^
|
||||
Changes to images/qemux86_64/glibc/core-image-minimal (files-in-image.txt):
|
||||
/etc/anotherpkg.conf was added
|
||||
/sbin/anotherpkg was added
|
||||
* (installed-package-names.txt):
|
||||
* anotherpkg was added
|
||||
Changes to images/qemux86_64/glibc/core-image-minimal (installed-package-names.txt):
|
||||
anotherpkg was added
|
||||
packages/qemux86_64-poky-linux/v86d: PACKAGES: added "v86d-extras"
|
||||
* PR changed from "r0" to "r1"
|
||||
* PV changed from "0.1.10" to "0.1.12"
|
||||
packages/qemux86_64-poky-linux/v86d/v86d: PKGSIZE changed from 110579 to 144381 (+30%)
|
||||
* PR changed from "r0" to "r1"
|
||||
* PV changed from "0.1.10" to "0.1.12"
|
||||
|
||||
.. note::
|
||||
|
||||
The ``buildhistory-diff`` tool requires the ``GitPython``
|
||||
package. Be sure to install it using Pip3 as follows::
|
||||
|
||||
$ pip3 install GitPython --user
|
||||
|
||||
|
||||
Alternatively, you can install ``python3-git`` using the appropriate
|
||||
distribution package manager (e.g. ``apt``, ``dnf``, or ``zipper``).
|
||||
|
||||
To see changes to the build history using a web interface, follow the
|
||||
instruction in the ``README`` file
|
||||
:yocto_git:`here </buildhistory-web/>`.
|
||||
|
||||
Here is a sample screenshot of the interface:
|
||||
|
||||
.. image:: figures/buildhistory-web.png
|
||||
:width: 100%
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
11783
documentation/dev-manual/common-tasks.rst
Normal file
11783
documentation/dev-manual/common-tasks.rst
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,135 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
|
||||
|
||||
Creating Your Own Distribution
|
||||
******************************
|
||||
|
||||
When you build an image using the Yocto Project and do not alter any
|
||||
distribution :term:`Metadata`, you are using the Poky distribution.
|
||||
Poky is explicitly a *reference* distribution for testing and
|
||||
development purposes. It enables most hardware and software features
|
||||
so that they can be tested, but this also means that from a security
|
||||
point of view the attack surface is very large. Additionally, at some
|
||||
point it is likely that you will want to gain more control over package
|
||||
alternative selections, compile-time options, and other low-level
|
||||
configurations. For both of these reasons, if you are using the Yocto
|
||||
Project for production use then you are strongly encouraged to create
|
||||
your own distribution.
|
||||
|
||||
To create your own distribution, the basic steps consist of creating
|
||||
your own distribution layer, creating your own distribution
|
||||
configuration file, and then adding any needed code and Metadata to the
|
||||
layer. The following steps provide some more detail:
|
||||
|
||||
- *Create a layer for your new distro:* Create your distribution layer
|
||||
so that you can keep your Metadata and code for the distribution
|
||||
separate. It is strongly recommended that you create and use your own
|
||||
layer for configuration and code. Using your own layer as compared to
|
||||
just placing configurations in a ``local.conf`` configuration file
|
||||
makes it easier to reproduce the same build configuration when using
|
||||
multiple build machines. See the
|
||||
":ref:`dev-manual/layers:creating a general layer using the \`\`bitbake-layers\`\` script`"
|
||||
section for information on how to quickly set up a layer.
|
||||
|
||||
- *Create the distribution configuration file:* The distribution
|
||||
configuration file needs to be created in the ``conf/distro``
|
||||
directory of your layer. You need to name it using your distribution
|
||||
name (e.g. ``mydistro.conf``).
|
||||
|
||||
.. note::
|
||||
|
||||
The :term:`DISTRO` variable in your ``local.conf`` file determines the
|
||||
name of your distribution.
|
||||
|
||||
You can split out parts of your configuration file into include files
|
||||
and then "require" them from within your distribution configuration
|
||||
file. Be sure to place the include files in the
|
||||
``conf/distro/include`` directory of your layer. A common example
|
||||
usage of include files would be to separate out the selection of
|
||||
desired version and revisions for individual recipes.
|
||||
|
||||
Your configuration file needs to set the following required
|
||||
variables:
|
||||
|
||||
- :term:`DISTRO_NAME`
|
||||
|
||||
- :term:`DISTRO_VERSION`
|
||||
|
||||
These following variables are optional and you typically set them
|
||||
from the distribution configuration file:
|
||||
|
||||
- :term:`DISTRO_FEATURES`
|
||||
|
||||
- :term:`DISTRO_EXTRA_RDEPENDS`
|
||||
|
||||
- :term:`DISTRO_EXTRA_RRECOMMENDS`
|
||||
|
||||
- :term:`TCLIBC`
|
||||
|
||||
.. tip::
|
||||
|
||||
If you want to base your distribution configuration file on the
|
||||
very basic configuration from OE-Core, you can use
|
||||
``conf/distro/defaultsetup.conf`` as a reference and just include
|
||||
variables that differ as compared to ``defaultsetup.conf``.
|
||||
Alternatively, you can create a distribution configuration file
|
||||
from scratch using the ``defaultsetup.conf`` file or configuration files
|
||||
from another distribution such as Poky as a reference.
|
||||
|
||||
- *Provide miscellaneous variables:* Be sure to define any other
|
||||
variables for which you want to create a default or enforce as part
|
||||
of the distribution configuration. You can include nearly any
|
||||
variable from the ``local.conf`` file. The variables you use are not
|
||||
limited to the list in the previous bulleted item.
|
||||
|
||||
- *Point to Your distribution configuration file:* In your ``local.conf``
|
||||
file in the :term:`Build Directory`, set your :term:`DISTRO` variable to
|
||||
point to your distribution's configuration file. For example, if your
|
||||
distribution's configuration file is named ``mydistro.conf``, then
|
||||
you point to it as follows::
|
||||
|
||||
DISTRO = "mydistro"
|
||||
|
||||
- *Add more to the layer if necessary:* Use your layer to hold other
|
||||
information needed for the distribution:
|
||||
|
||||
- Add recipes for installing distro-specific configuration files
|
||||
that are not already installed by another recipe. If you have
|
||||
distro-specific configuration files that are included by an
|
||||
existing recipe, you should add an append file (``.bbappend``) for
|
||||
those. For general information and recommendations on how to add
|
||||
recipes to your layer, see the
|
||||
":ref:`dev-manual/layers:creating your own layer`" and
|
||||
":ref:`dev-manual/layers:following best practices when creating layers`"
|
||||
sections.
|
||||
|
||||
- Add any image recipes that are specific to your distribution.
|
||||
|
||||
- Add a ``psplash`` append file for a branded splash screen, using
|
||||
the :term:`SPLASH_IMAGES` variable.
|
||||
|
||||
- Add any other append files to make custom changes that are
|
||||
specific to individual recipes.
|
||||
|
||||
For information on append files, see the
|
||||
":ref:`dev-manual/layers:appending other layers metadata with your layer`"
|
||||
section.
|
||||
|
||||
Copying and modifying the Poky distribution
|
||||
===========================================
|
||||
|
||||
Instead of creating a custom distribution from scratch as per above, you may
|
||||
wish to start your custom distribution configuration by copying the Poky
|
||||
distribution provided within the ``meta-poky`` layer and then modifying it.
|
||||
This is fine, however if you do this you should keep the following in mind:
|
||||
|
||||
- Every reference to Poky needs to be updated in your copy so that it
|
||||
will still apply. This includes override usage within files (e.g. ``:poky``)
|
||||
and in directory names. This is a good opportunity to evaluate each one of
|
||||
these customizations to see if they are needed for your use case.
|
||||
|
||||
- Unless you also intend to use them, the ``poky-tiny``, ``poky-altcfg`` and
|
||||
``poky-bleeding`` variants and any references to them can be removed.
|
||||
|
||||
- More generally, the Poky distribution configuration enables a lot more
|
||||
than you likely need for your production use case. You should evaluate *every*
|
||||
configuration choice made in your copy to determine if it is needed.
|
||||
@@ -1,72 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
|
||||
|
||||
Creating a Custom Template Configuration Directory
|
||||
**************************************************
|
||||
|
||||
If you are producing your own customized version of the build system for
|
||||
use by other users, you might want to customize the message shown by the
|
||||
setup script or you might want to change the template configuration
|
||||
files (i.e. ``local.conf`` and ``bblayers.conf``) that are created in a
|
||||
new build directory.
|
||||
|
||||
The OpenEmbedded build system uses the environment variable
|
||||
``TEMPLATECONF`` to locate the directory from which it gathers
|
||||
configuration information that ultimately ends up in the
|
||||
:term:`Build Directory` ``conf`` directory.
|
||||
By default, ``TEMPLATECONF`` is set as follows in the ``poky``
|
||||
repository::
|
||||
|
||||
TEMPLATECONF=${TEMPLATECONF:-meta-poky/conf}
|
||||
|
||||
This is the
|
||||
directory used by the build system to find templates from which to build
|
||||
some key configuration files. If you look at this directory, you will
|
||||
see the ``bblayers.conf.sample``, ``local.conf.sample``, and
|
||||
``conf-notes.txt`` files. The build system uses these files to form the
|
||||
respective ``bblayers.conf`` file, ``local.conf`` file, and display the
|
||||
list of BitBake targets when running the setup script.
|
||||
|
||||
To override these default configuration files with configurations you
|
||||
want used within every new Build Directory, simply set the
|
||||
``TEMPLATECONF`` variable to your directory. The ``TEMPLATECONF``
|
||||
variable is set in the ``.templateconf`` file, which is in the top-level
|
||||
:term:`Source Directory` folder
|
||||
(e.g. ``poky``). Edit the ``.templateconf`` so that it can locate your
|
||||
directory.
|
||||
|
||||
Best practices dictate that you should keep your template configuration
|
||||
directory in your custom distribution layer. For example, suppose you
|
||||
have a layer named ``meta-mylayer`` located in your home directory and
|
||||
you want your template configuration directory named ``myconf``.
|
||||
Changing the ``.templateconf`` as follows causes the OpenEmbedded build
|
||||
system to look in your directory and base its configuration files on the
|
||||
``*.sample`` configuration files it finds. The final configuration files
|
||||
(i.e. ``local.conf`` and ``bblayers.conf`` ultimately still end up in
|
||||
your Build Directory, but they are based on your ``*.sample`` files.
|
||||
::
|
||||
|
||||
TEMPLATECONF=${TEMPLATECONF:-meta-mylayer/myconf}
|
||||
|
||||
Aside from the ``*.sample`` configuration files, the ``conf-notes.txt``
|
||||
also resides in the default ``meta-poky/conf`` directory. The script
|
||||
that sets up the build environment (i.e.
|
||||
:ref:`structure-core-script`) uses this file to
|
||||
display BitBake targets as part of the script output. Customizing this
|
||||
``conf-notes.txt`` file is a good way to make sure your list of custom
|
||||
targets appears as part of the script's output.
|
||||
|
||||
Here is the default list of targets displayed as a result of running
|
||||
either of the setup scripts::
|
||||
|
||||
You can now run 'bitbake <target>'
|
||||
|
||||
Common targets are:
|
||||
core-image-minimal
|
||||
core-image-sato
|
||||
meta-toolchain
|
||||
meta-ide-support
|
||||
|
||||
Changing the listed common targets is as easy as editing your version of
|
||||
``conf-notes.txt`` in your custom template configuration directory and
|
||||
making sure you have ``TEMPLATECONF`` set to your directory.
|
||||
|
||||
@@ -1,222 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
|
||||
|
||||
Customizing Images
|
||||
******************
|
||||
|
||||
You can customize images to satisfy particular requirements. This
|
||||
section describes several methods and provides guidelines for each.
|
||||
|
||||
Customizing Images Using ``local.conf``
|
||||
=======================================
|
||||
|
||||
Probably the easiest way to customize an image is to add a package by
|
||||
way of the ``local.conf`` configuration file. Because it is limited to
|
||||
local use, this method generally only allows you to add packages and is
|
||||
not as flexible as creating your own customized image. When you add
|
||||
packages using local variables this way, you need to realize that these
|
||||
variable changes are in effect for every build and consequently affect
|
||||
all images, which might not be what you require.
|
||||
|
||||
To add a package to your image using the local configuration file, use
|
||||
the :term:`IMAGE_INSTALL` variable with the ``:append`` operator::
|
||||
|
||||
IMAGE_INSTALL:append = " strace"
|
||||
|
||||
Use of the syntax is important; specifically, the leading space
|
||||
after the opening quote and before the package name, which is
|
||||
``strace`` in this example. This space is required since the ``:append``
|
||||
operator does not add the space.
|
||||
|
||||
Furthermore, you must use ``:append`` instead of the ``+=`` operator if
|
||||
you want to avoid ordering issues. The reason for this is because doing
|
||||
so unconditionally appends to the variable and avoids ordering problems
|
||||
due to the variable being set in image recipes and ``.bbclass`` files
|
||||
with operators like ``?=``. Using ``:append`` ensures the operation
|
||||
takes effect.
|
||||
|
||||
As shown in its simplest use, ``IMAGE_INSTALL:append`` affects all
|
||||
images. It is possible to extend the syntax so that the variable applies
|
||||
to a specific image only. Here is an example::
|
||||
|
||||
IMAGE_INSTALL:append:pn-core-image-minimal = " strace"
|
||||
|
||||
This example adds ``strace`` to the ``core-image-minimal`` image only.
|
||||
|
||||
You can add packages using a similar approach through the
|
||||
:term:`CORE_IMAGE_EXTRA_INSTALL` variable. If you use this variable, only
|
||||
``core-image-*`` images are affected.
|
||||
|
||||
Customizing Images Using Custom ``IMAGE_FEATURES`` and ``EXTRA_IMAGE_FEATURES``
|
||||
===============================================================================
|
||||
|
||||
Another method for customizing your image is to enable or disable
|
||||
high-level image features by using the
|
||||
:term:`IMAGE_FEATURES` and
|
||||
:term:`EXTRA_IMAGE_FEATURES`
|
||||
variables. Although the functions for both variables are nearly
|
||||
equivalent, best practices dictate using :term:`IMAGE_FEATURES` from within
|
||||
a recipe and using :term:`EXTRA_IMAGE_FEATURES` from within your
|
||||
``local.conf`` file, which is found in the :term:`Build Directory`.
|
||||
|
||||
To understand how these features work, the best reference is
|
||||
:ref:`meta/classes/image.bbclass <ref-classes-image>`.
|
||||
This class lists out the available
|
||||
:term:`IMAGE_FEATURES` of which most map to package groups while some, such
|
||||
as ``debug-tweaks`` and ``read-only-rootfs``, resolve as general
|
||||
configuration settings.
|
||||
|
||||
In summary, the file looks at the contents of the :term:`IMAGE_FEATURES`
|
||||
variable and then maps or configures the feature accordingly. Based on
|
||||
this information, the build system automatically adds the appropriate
|
||||
packages or configurations to the
|
||||
:term:`IMAGE_INSTALL` variable.
|
||||
Effectively, you are enabling extra features by extending the class or
|
||||
creating a custom class for use with specialized image ``.bb`` files.
|
||||
|
||||
Use the :term:`EXTRA_IMAGE_FEATURES` variable from within your local
|
||||
configuration file. Using a separate area from which to enable features
|
||||
with this variable helps you avoid overwriting the features in the image
|
||||
recipe that are enabled with :term:`IMAGE_FEATURES`. The value of
|
||||
:term:`EXTRA_IMAGE_FEATURES` is added to :term:`IMAGE_FEATURES` within
|
||||
``meta/conf/bitbake.conf``.
|
||||
|
||||
To illustrate how you can use these variables to modify your image, consider an
|
||||
example that selects the SSH server. The Yocto Project ships with two SSH
|
||||
servers you can use with your images: Dropbear and OpenSSH. Dropbear is a
|
||||
minimal SSH server appropriate for resource-constrained environments, while
|
||||
OpenSSH is a well-known standard SSH server implementation. By default, the
|
||||
``core-image-sato`` image is configured to use Dropbear. The
|
||||
``core-image-full-cmdline`` image includes OpenSSH. The ``core-image-minimal``
|
||||
image does not contain an SSH server.
|
||||
|
||||
You can customize your image and change these defaults. Edit the
|
||||
:term:`IMAGE_FEATURES` variable in your recipe or use the
|
||||
:term:`EXTRA_IMAGE_FEATURES` in your ``local.conf`` file so that it
|
||||
configures the image you are working with to include
|
||||
``ssh-server-dropbear`` or ``ssh-server-openssh``.
|
||||
|
||||
.. note::
|
||||
|
||||
See the ":ref:`ref-manual/features:image features`" section in the Yocto
|
||||
Project Reference Manual for a complete list of image features that ship
|
||||
with the Yocto Project.
|
||||
|
||||
Customizing Images Using Custom .bb Files
|
||||
=========================================
|
||||
|
||||
You can also customize an image by creating a custom recipe that defines
|
||||
additional software as part of the image. The following example shows
|
||||
the form for the two lines you need::
|
||||
|
||||
IMAGE_INSTALL = "packagegroup-core-x11-base package1 package2"
|
||||
inherit core-image
|
||||
|
||||
Defining the software using a custom recipe gives you total control over
|
||||
the contents of the image. It is important to use the correct names of
|
||||
packages in the :term:`IMAGE_INSTALL` variable. You must use the
|
||||
OpenEmbedded notation and not the Debian notation for the names (e.g.
|
||||
``glibc-dev`` instead of ``libc6-dev``).
|
||||
|
||||
The other method for creating a custom image is to base it on an
|
||||
existing image. For example, if you want to create an image based on
|
||||
``core-image-sato`` but add the additional package ``strace`` to the
|
||||
image, copy the ``meta/recipes-sato/images/core-image-sato.bb`` to a new
|
||||
``.bb`` and add the following line to the end of the copy::
|
||||
|
||||
IMAGE_INSTALL += "strace"
|
||||
|
||||
Customizing Images Using Custom Package Groups
|
||||
==============================================
|
||||
|
||||
For complex custom images, the best approach for customizing an image is
|
||||
to create a custom package group recipe that is used to build the image
|
||||
or images. A good example of a package group recipe is
|
||||
``meta/recipes-core/packagegroups/packagegroup-base.bb``.
|
||||
|
||||
If you examine that recipe, you see that the :term:`PACKAGES` variable lists
|
||||
the package group packages to produce. The ``inherit packagegroup``
|
||||
statement sets appropriate default values and automatically adds
|
||||
``-dev``, ``-dbg``, and ``-ptest`` complementary packages for each
|
||||
package specified in the :term:`PACKAGES` statement.
|
||||
|
||||
.. note::
|
||||
|
||||
The ``inherit packagegroup`` line should be located near the top of the
|
||||
recipe, certainly before the :term:`PACKAGES` statement.
|
||||
|
||||
For each package you specify in :term:`PACKAGES`, you can use :term:`RDEPENDS`
|
||||
and :term:`RRECOMMENDS` entries to provide a list of packages the parent
|
||||
task package should contain. You can see examples of these further down
|
||||
in the ``packagegroup-base.bb`` recipe.
|
||||
|
||||
Here is a short, fabricated example showing the same basic pieces for a
|
||||
hypothetical packagegroup defined in ``packagegroup-custom.bb``, where
|
||||
the variable :term:`PN` is the standard way to abbreviate the reference to
|
||||
the full packagegroup name ``packagegroup-custom``::
|
||||
|
||||
DESCRIPTION = "My Custom Package Groups"
|
||||
|
||||
inherit packagegroup
|
||||
|
||||
PACKAGES = "\
|
||||
${PN}-apps \
|
||||
${PN}-tools \
|
||||
"
|
||||
|
||||
RDEPENDS:${PN}-apps = "\
|
||||
dropbear \
|
||||
portmap \
|
||||
psplash"
|
||||
|
||||
RDEPENDS:${PN}-tools = "\
|
||||
oprofile \
|
||||
oprofileui-server \
|
||||
lttng-tools"
|
||||
|
||||
RRECOMMENDS:${PN}-tools = "\
|
||||
kernel-module-oprofile"
|
||||
|
||||
In the previous example, two package group packages are created with
|
||||
their dependencies and their recommended package dependencies listed:
|
||||
``packagegroup-custom-apps``, and ``packagegroup-custom-tools``. To
|
||||
build an image using these package group packages, you need to add
|
||||
``packagegroup-custom-apps`` and/or ``packagegroup-custom-tools`` to
|
||||
:term:`IMAGE_INSTALL`. For other forms of image dependencies see the other
|
||||
areas of this section.
|
||||
|
||||
Customizing an Image Hostname
|
||||
=============================
|
||||
|
||||
By default, the configured hostname (i.e. ``/etc/hostname``) in an image
|
||||
is the same as the machine name. For example, if
|
||||
:term:`MACHINE` equals "qemux86", the
|
||||
configured hostname written to ``/etc/hostname`` is "qemux86".
|
||||
|
||||
You can customize this name by altering the value of the "hostname"
|
||||
variable in the ``base-files`` recipe using either an append file or a
|
||||
configuration file. Use the following in an append file::
|
||||
|
||||
hostname = "myhostname"
|
||||
|
||||
Use the following in a configuration file::
|
||||
|
||||
hostname:pn-base-files = "myhostname"
|
||||
|
||||
Changing the default value of the variable "hostname" can be useful in
|
||||
certain situations. For example, suppose you need to do extensive
|
||||
testing on an image and you would like to easily identify the image
|
||||
under test from existing images with typical default hostnames. In this
|
||||
situation, you could change the default hostname to "testme", which
|
||||
results in all the images using the name "testme". Once testing is
|
||||
complete and you do not need to rebuild the image for test any longer,
|
||||
you can easily reset the default hostname.
|
||||
|
||||
Another point of interest is that if you unset the variable, the image
|
||||
will have no default hostname in the filesystem. Here is an example that
|
||||
unsets the variable in a configuration file::
|
||||
|
||||
hostname:pn-base-files = ""
|
||||
|
||||
Having no default hostname in the filesystem is suitable for
|
||||
environments that use dynamic hostnames such as virtual machines.
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,82 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
|
||||
|
||||
Using a Development Shell
|
||||
*************************
|
||||
|
||||
When debugging certain commands or even when just editing packages,
|
||||
``devshell`` can be a useful tool. When you invoke ``devshell``, all
|
||||
tasks up to and including
|
||||
:ref:`ref-tasks-patch` are run for the
|
||||
specified target. Then, a new terminal is opened and you are placed in
|
||||
``${``\ :term:`S`\ ``}``, the source
|
||||
directory. In the new terminal, all the OpenEmbedded build-related
|
||||
environment variables are still defined so you can use commands such as
|
||||
``configure`` and ``make``. The commands execute just as if the
|
||||
OpenEmbedded build system were executing them. Consequently, working
|
||||
this way can be helpful when debugging a build or preparing software to
|
||||
be used with the OpenEmbedded build system.
|
||||
|
||||
Here is an example that uses ``devshell`` on a target named
|
||||
``matchbox-desktop``::
|
||||
|
||||
$ bitbake matchbox-desktop -c devshell
|
||||
|
||||
This command spawns a terminal with a shell prompt within the
|
||||
OpenEmbedded build environment. The
|
||||
:term:`OE_TERMINAL` variable
|
||||
controls what type of shell is opened.
|
||||
|
||||
For spawned terminals, the following occurs:
|
||||
|
||||
- The ``PATH`` variable includes the cross-toolchain.
|
||||
|
||||
- The ``pkgconfig`` variables find the correct ``.pc`` files.
|
||||
|
||||
- The ``configure`` command finds the Yocto Project site files as well
|
||||
as any other necessary files.
|
||||
|
||||
Within this environment, you can run configure or compile commands as if
|
||||
they were being run by the OpenEmbedded build system itself. As noted
|
||||
earlier, the working directory also automatically changes to the Source
|
||||
Directory (:term:`S`).
|
||||
|
||||
To manually run a specific task using ``devshell``, run the
|
||||
corresponding ``run.*`` script in the
|
||||
``${``\ :term:`WORKDIR`\ ``}/temp``
|
||||
directory (e.g., ``run.do_configure.``\ `pid`). If a task's script does
|
||||
not exist, which would be the case if the task was skipped by way of the
|
||||
sstate cache, you can create the task by first running it outside of the
|
||||
``devshell``::
|
||||
|
||||
$ bitbake -c task
|
||||
|
||||
.. note::
|
||||
|
||||
- Execution of a task's ``run.*`` script and BitBake's execution of
|
||||
a task are identical. In other words, running the script re-runs
|
||||
the task just as it would be run using the ``bitbake -c`` command.
|
||||
|
||||
- Any ``run.*`` file that does not have a ``.pid`` extension is a
|
||||
symbolic link (symlink) to the most recent version of that file.
|
||||
|
||||
Remember, that the ``devshell`` is a mechanism that allows you to get
|
||||
into the BitBake task execution environment. And as such, all commands
|
||||
must be called just as BitBake would call them. That means you need to
|
||||
provide the appropriate options for cross-compilation and so forth as
|
||||
applicable.
|
||||
|
||||
When you are finished using ``devshell``, exit the shell or close the
|
||||
terminal window.
|
||||
|
||||
.. note::
|
||||
|
||||
- It is worth remembering that when using ``devshell`` you need to
|
||||
use the full compiler name such as ``arm-poky-linux-gnueabi-gcc``
|
||||
instead of just using ``gcc``. The same applies to other
|
||||
applications such as ``binutils``, ``libtool`` and so forth.
|
||||
BitBake sets up environment variables such as :term:`CC` to assist
|
||||
applications, such as ``make`` to find the correct tools.
|
||||
|
||||
- It is also worth noting that ``devshell`` still works over X11
|
||||
forwarding and similar situations.
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
|
||||
|
||||
.. _device-manager:
|
||||
|
||||
Selecting a Device Manager
|
||||
**************************
|
||||
|
||||
The Yocto Project provides multiple ways to manage the device manager
|
||||
(``/dev``):
|
||||
|
||||
- Persistent and Pre-Populated ``/dev``: For this case, the ``/dev``
|
||||
directory is persistent and the required device nodes are created
|
||||
during the build.
|
||||
|
||||
- Use ``devtmpfs`` with a Device Manager: For this case, the ``/dev``
|
||||
directory is provided by the kernel as an in-memory file system and
|
||||
is automatically populated by the kernel at runtime. Additional
|
||||
configuration of device nodes is done in user space by a device
|
||||
manager like ``udev`` or ``busybox-mdev``.
|
||||
|
||||
Using Persistent and Pre-Populated ``/dev``
|
||||
===========================================
|
||||
|
||||
To use the static method for device population, you need to set the
|
||||
:term:`USE_DEVFS` variable to "0"
|
||||
as follows::
|
||||
|
||||
USE_DEVFS = "0"
|
||||
|
||||
The content of the resulting ``/dev`` directory is defined in a Device
|
||||
Table file. The
|
||||
:term:`IMAGE_DEVICE_TABLES`
|
||||
variable defines the Device Table to use and should be set in the
|
||||
machine or distro configuration file. Alternatively, you can set this
|
||||
variable in your ``local.conf`` configuration file.
|
||||
|
||||
If you do not define the :term:`IMAGE_DEVICE_TABLES` variable, the default
|
||||
``device_table-minimal.txt`` is used::
|
||||
|
||||
IMAGE_DEVICE_TABLES = "device_table-mymachine.txt"
|
||||
|
||||
The population is handled by the ``makedevs`` utility during image
|
||||
creation:
|
||||
|
||||
Using ``devtmpfs`` and a Device Manager
|
||||
=======================================
|
||||
|
||||
To use the dynamic method for device population, you need to use (or be
|
||||
sure to set) the :term:`USE_DEVFS`
|
||||
variable to "1", which is the default::
|
||||
|
||||
USE_DEVFS = "1"
|
||||
|
||||
With this
|
||||
setting, the resulting ``/dev`` directory is populated by the kernel
|
||||
using ``devtmpfs``. Make sure the corresponding kernel configuration
|
||||
variable ``CONFIG_DEVTMPFS`` is set when building you build a Linux
|
||||
kernel.
|
||||
|
||||
All devices created by ``devtmpfs`` will be owned by ``root`` and have
|
||||
permissions ``0600``.
|
||||
|
||||
To have more control over the device nodes, you can use a device manager like
|
||||
``udev`` or ``busybox-mdev``. You choose the device manager by defining the
|
||||
:term:`VIRTUAL-RUNTIME_dev_manager <VIRTUAL-RUNTIME>` variable in your machine
|
||||
or distro configuration file. Alternatively, you can set this variable in
|
||||
your ``local.conf`` configuration file::
|
||||
|
||||
VIRTUAL-RUNTIME_dev_manager = "udev"
|
||||
|
||||
# Some alternative values
|
||||
# VIRTUAL-RUNTIME_dev_manager = "busybox-mdev"
|
||||
# VIRTUAL-RUNTIME_dev_manager = "systemd"
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user