mirror of
https://git.yoctoproject.org/poky
synced 2026-02-21 17:09:42 +01:00
Compare commits
137 Commits
yocto-2.6.
...
rocko-18.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c2b641c8a0 | ||
|
|
ab4310e7b8 | ||
|
|
551d18e4b8 | ||
|
|
7030d5b4f9 | ||
|
|
f88c841a2d | ||
|
|
1c61ba0a3f | ||
|
|
babf923312 | ||
|
|
a1bff37c3f | ||
|
|
738fc234fa | ||
|
|
f79c0d45fa | ||
|
|
a5e95c2a85 | ||
|
|
4b2d0192b2 | ||
|
|
fd93e26f0d | ||
|
|
15542ff2b3 | ||
|
|
b472addc93 | ||
|
|
2f07e71a9e | ||
|
|
66a0b5b550 | ||
|
|
adaefc1880 | ||
|
|
1a2fb23f56 | ||
|
|
937beb5d94 | ||
|
|
45139bd079 | ||
|
|
8c56b0b2f4 | ||
|
|
f04d6842d3 | ||
|
|
c889bffda2 | ||
|
|
1655dfeffc | ||
|
|
ebf2523922 | ||
|
|
7de56ebc2a | ||
|
|
d0640da88e | ||
|
|
776fb31844 | ||
|
|
80ed9207a7 | ||
|
|
1e4d4762b1 | ||
|
|
968145b24e | ||
|
|
3c28d31fed | ||
|
|
65d09a7d1e | ||
|
|
311245d89f | ||
|
|
0845fa12b8 | ||
|
|
2d9aecf044 | ||
|
|
c32f44ebf5 | ||
|
|
c6d473f460 | ||
|
|
7104d48590 | ||
|
|
d164445477 | ||
|
|
7e0b00fd12 | ||
|
|
25c0d7d891 | ||
|
|
7df22af792 | ||
|
|
351192c314 | ||
|
|
fe51ddba06 | ||
|
|
c8730962a4 | ||
|
|
1e944f79b4 | ||
|
|
994e3674a8 | ||
|
|
69490b4280 | ||
|
|
e27fd333df | ||
|
|
216a839e1b | ||
|
|
bd884dd998 | ||
|
|
e36cf9e621 | ||
|
|
611e4b43d8 | ||
|
|
d4e3893e2d | ||
|
|
99c18e36e2 | ||
|
|
fbddd3917f | ||
|
|
5481891748 | ||
|
|
18b51a13af | ||
|
|
e46fa69897 | ||
|
|
7d934ff315 | ||
|
|
2f6cffd605 | ||
|
|
ed4708db31 | ||
|
|
8d53ceebaf | ||
|
|
f97450203f | ||
|
|
572b9c54a1 | ||
|
|
3f6fbed1e1 | ||
|
|
292c2ae888 | ||
|
|
539a852504 | ||
|
|
10d0ace274 | ||
|
|
852c71956b | ||
|
|
254013ce5f | ||
|
|
de78322f16 | ||
|
|
61f319db67 | ||
|
|
88d92fb301 | ||
|
|
ef6babd638 | ||
|
|
f8c7eff81d | ||
|
|
8d3edc9821 | ||
|
|
8d706de096 | ||
|
|
59cbf69299 | ||
|
|
02f64e5db8 | ||
|
|
c76a25b6ac | ||
|
|
1002359e5e | ||
|
|
3eca58ca70 | ||
|
|
aa6e825bf0 | ||
|
|
2ef0fd2364 | ||
|
|
ee7f665f0a | ||
|
|
ab31d76bc8 | ||
|
|
dd03b7399b | ||
|
|
55d21c7fb6 | ||
|
|
4e28c8d6b7 | ||
|
|
18941419c8 | ||
|
|
da8f32a3bb | ||
|
|
e6fe54ce38 | ||
|
|
23ee931b9d | ||
|
|
e4f256000f | ||
|
|
fbc12e0794 | ||
|
|
c45bdab6b9 | ||
|
|
a97fecb3bd | ||
|
|
87577b8a53 | ||
|
|
5599639b65 | ||
|
|
c388d72c60 | ||
|
|
d40531211c | ||
|
|
8eeed4220e | ||
|
|
0fbaee9077 | ||
|
|
33a48469bd | ||
|
|
e52027eef7 | ||
|
|
9d5296bba5 | ||
|
|
8754f4779c | ||
|
|
e0bfc22475 | ||
|
|
f165c52e57 | ||
|
|
e6c74f7ac9 | ||
|
|
ce3bbc6972 | ||
|
|
f483bdf9c4 | ||
|
|
24c9708492 | ||
|
|
221c4877f1 | ||
|
|
e303b3cadc | ||
|
|
500ce8d139 | ||
|
|
f7e10b532c | ||
|
|
0fc114ba76 | ||
|
|
b21f8e361b | ||
|
|
f515778225 | ||
|
|
40ed9adb53 | ||
|
|
96d525dc03 | ||
|
|
d61b65f35c | ||
|
|
3190ba0b38 | ||
|
|
a027091807 | ||
|
|
9779fc2bdd | ||
|
|
cb258fef83 | ||
|
|
0d84cdfaac | ||
|
|
f609c3f755 | ||
|
|
9217de77b9 | ||
|
|
87f8aafd53 | ||
|
|
6709452171 | ||
|
|
f7b90ab3ea | ||
|
|
7226a1c600 |
11
.gitignore
vendored
11
.gitignore
vendored
@@ -1,7 +1,6 @@
|
||||
*.pyc
|
||||
*.pyo
|
||||
/*.patch
|
||||
/.repo/
|
||||
/build*/
|
||||
pyshtables.py
|
||||
pstage/
|
||||
@@ -19,13 +18,9 @@ hob-image-*.bb
|
||||
!meta-yocto
|
||||
!meta-yocto-bsp
|
||||
!meta-yocto-imported
|
||||
/documentation/*/eclipse/
|
||||
/documentation/*/*.html
|
||||
/documentation/*/*.pdf
|
||||
/documentation/*/*.tgz
|
||||
/bitbake/doc/bitbake-user-manual/bitbake-user-manual.html
|
||||
/bitbake/doc/bitbake-user-manual/bitbake-user-manual.pdf
|
||||
/bitbake/doc/bitbake-user-manual/bitbake-user-manual.tgz
|
||||
documentation/user-manual/user-manual.html
|
||||
documentation/user-manual/user-manual.pdf
|
||||
documentation/user-manual/user-manual.tgz
|
||||
pull-*/
|
||||
bitbake/lib/toaster/contrib/tts/backlog.txt
|
||||
bitbake/lib/toaster/contrib/tts/log/*
|
||||
|
||||
@@ -23,4 +23,3 @@ therefore provides compatibility with the following caveats:
|
||||
* libpng 1.2 isn't provided; oe-core includes the latest release of libpng
|
||||
instead.
|
||||
|
||||
* pax (POSIX standard archive) tool is not provided.
|
||||
|
||||
@@ -38,7 +38,7 @@ from bb.main import bitbake_main, BitBakeConfigParameters, BBMainException
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
|
||||
__version__ = "1.40.0"
|
||||
__version__ = "1.36.0"
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __version__ != bb.__version__:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# bitbake-diffsigs / bitbake-dumpsig
|
||||
# BitBake task signature data dump and comparison utility
|
||||
# bitbake-diffsigs
|
||||
# BitBake task signature data comparison utility
|
||||
#
|
||||
# Copyright (C) 2012-2013, 2017 Intel Corporation
|
||||
#
|
||||
@@ -21,6 +21,7 @@
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
import fnmatch
|
||||
import argparse
|
||||
import logging
|
||||
import pickle
|
||||
@@ -31,10 +32,7 @@ import bb.tinfoil
|
||||
import bb.siggen
|
||||
import bb.msg
|
||||
|
||||
myname = os.path.basename(sys.argv[0])
|
||||
logger = bb.msg.logger_create(myname)
|
||||
|
||||
is_dump = myname == 'bitbake-dumpsig'
|
||||
logger = bb.msg.logger_create('bitbake-diffsigs')
|
||||
|
||||
def find_siginfo(tinfoil, pn, taskname, sigs=None):
|
||||
result = None
|
||||
@@ -61,8 +59,8 @@ def find_siginfo(tinfoil, pn, taskname, sigs=None):
|
||||
sys.exit(2)
|
||||
return result
|
||||
|
||||
def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
|
||||
""" Find the most recent signature files for the specified PN/task """
|
||||
def find_compare_task(bbhandler, pn, taskname, sig1=None, sig2=None, color=False):
|
||||
""" Find the most recent signature files for the specified PN/task and compare them """
|
||||
|
||||
if not taskname.startswith('do_'):
|
||||
taskname = 'do_%s' % taskname
|
||||
@@ -81,81 +79,73 @@ def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
|
||||
latestfiles = [sigfiles[sig1], sigfiles[sig2]]
|
||||
else:
|
||||
filedates = find_siginfo(bbhandler, pn, taskname)
|
||||
latestfiles = sorted(filedates.keys(), key=lambda f: filedates[f])[-2:]
|
||||
latestfiles = sorted(filedates.keys(), key=lambda f: filedates[f])[-3:]
|
||||
if not latestfiles:
|
||||
logger.error('No sigdata files found matching %s %s' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
elif len(latestfiles) < 2:
|
||||
logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
|
||||
return latestfiles
|
||||
# Define recursion callback
|
||||
def recursecb(key, hash1, hash2):
|
||||
hashes = [hash1, hash2]
|
||||
hashfiles = find_siginfo(bbhandler, key, None, hashes)
|
||||
|
||||
recout = []
|
||||
if len(hashfiles) == 0:
|
||||
recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
|
||||
elif not hash1 in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash1))
|
||||
elif not hash2 in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash2))
|
||||
else:
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, color=color)
|
||||
for change in out2:
|
||||
for line in change.splitlines():
|
||||
recout.append(' ' + line)
|
||||
|
||||
# Define recursion callback
|
||||
def recursecb(key, hash1, hash2):
|
||||
hashes = [hash1, hash2]
|
||||
hashfiles = find_siginfo(tinfoil, key, None, hashes)
|
||||
return recout
|
||||
|
||||
recout = []
|
||||
if len(hashfiles) == 0:
|
||||
recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
|
||||
elif not hash1 in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash1))
|
||||
elif not hash2 in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash2))
|
||||
else:
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, color=color)
|
||||
for change in out2:
|
||||
for line in change.splitlines():
|
||||
recout.append(' ' + line)
|
||||
# Recurse into signature comparison
|
||||
logger.debug("Signature file (previous): %s" % latestfiles[-2])
|
||||
logger.debug("Signature file (latest): %s" % latestfiles[-1])
|
||||
output = bb.siggen.compare_sigfiles(latestfiles[-2], latestfiles[-1], recursecb, color=color)
|
||||
if output:
|
||||
print('\n'.join(output))
|
||||
sys.exit(0)
|
||||
|
||||
return recout
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description=("Dumps" if is_dump else "Compares") + " siginfo/sigdata files written out by BitBake")
|
||||
description="Compares siginfo/sigdata files written out by BitBake")
|
||||
|
||||
parser.add_argument('-D', '--debug',
|
||||
parser.add_argument('-d', '--debug',
|
||||
help='Enable debug output',
|
||||
action='store_true')
|
||||
|
||||
if is_dump:
|
||||
parser.add_argument("-t", "--task",
|
||||
help="find the signature data file for the last run of the specified task",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
parser.add_argument('--color',
|
||||
help='Colorize output (where %(metavar)s is %(choices)s)',
|
||||
choices=['auto', 'always', 'never'], default='auto', metavar='color')
|
||||
|
||||
parser.add_argument("sigdatafile1",
|
||||
help="Signature file to dump. Not used when using -t/--task.",
|
||||
action="store", nargs='?', metavar="sigdatafile")
|
||||
else:
|
||||
parser.add_argument('-c', '--color',
|
||||
help='Colorize the output (where %(metavar)s is %(choices)s)',
|
||||
choices=['auto', 'always', 'never'], default='auto', metavar='color')
|
||||
parser.add_argument("-t", "--task",
|
||||
help="find the signature data files for last two runs of the specified task and compare them",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
|
||||
parser.add_argument('-d', '--dump',
|
||||
help='Dump the last signature data instead of comparing (equivalent to using bitbake-dumpsig)',
|
||||
action='store_true')
|
||||
parser.add_argument("-s", "--signature",
|
||||
help="With -t/--task, specify the signatures to look for instead of taking the last two",
|
||||
action="store", dest="sigargs", nargs=2, metavar=('fromsig', 'tosig'))
|
||||
|
||||
parser.add_argument("-t", "--task",
|
||||
help="find the signature data files for the last two runs of the specified task and compare them",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
parser.add_argument("sigdatafile1",
|
||||
help="First signature file to compare (or signature file to dump, if second not specified). Not used when using -t/--task.",
|
||||
action="store", nargs='?')
|
||||
|
||||
parser.add_argument("-s", "--signature",
|
||||
help="With -t/--task, specify the signatures to look for instead of taking the last two",
|
||||
action="store", dest="sigargs", nargs=2, metavar=('fromsig', 'tosig'))
|
||||
parser.add_argument("sigdatafile2",
|
||||
help="Second signature file to compare",
|
||||
action="store", nargs='?')
|
||||
|
||||
parser.add_argument("sigdatafile1",
|
||||
help="First signature file to compare (or signature file to dump, if second not specified). Not used when using -t/--task.",
|
||||
action="store", nargs='?')
|
||||
|
||||
parser.add_argument("sigdatafile2",
|
||||
help="Second signature file to compare",
|
||||
action="store", nargs='?')
|
||||
|
||||
options = parser.parse_args()
|
||||
if is_dump:
|
||||
options.color = 'never'
|
||||
options.dump = True
|
||||
options.sigdatafile2 = None
|
||||
options.sigargs = None
|
||||
|
||||
if options.debug:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
@@ -165,32 +155,17 @@ color = (options.color == 'always' or (options.color == 'auto' and sys.stdout.is
|
||||
if options.taskargs:
|
||||
with bb.tinfoil.Tinfoil() as tinfoil:
|
||||
tinfoil.prepare(config_only=True)
|
||||
if not options.dump and options.sigargs:
|
||||
files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1], options.sigargs[0], options.sigargs[1])
|
||||
if options.sigargs:
|
||||
find_compare_task(tinfoil, options.taskargs[0], options.taskargs[1], options.sigargs[0], options.sigargs[1], color=color)
|
||||
else:
|
||||
files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1])
|
||||
|
||||
if options.dump:
|
||||
logger.debug("Signature file: %s" % files[-1])
|
||||
output = bb.siggen.dump_sigfile(files[-1])
|
||||
else:
|
||||
if len(files) < 2:
|
||||
logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (options.taskargs[0], options.taskargs[1]))
|
||||
sys.exit(1)
|
||||
|
||||
# Recurse into signature comparison
|
||||
logger.debug("Signature file (previous): %s" % files[-2])
|
||||
logger.debug("Signature file (latest): %s" % files[-1])
|
||||
output = bb.siggen.compare_sigfiles(files[-2], files[-1], recursecb, color=color)
|
||||
find_compare_task(tinfoil, options.taskargs[0], options.taskargs[1], color=color)
|
||||
else:
|
||||
if options.sigargs:
|
||||
logger.error('-s/--signature can only be used together with -t/--task')
|
||||
sys.exit(1)
|
||||
try:
|
||||
if not options.dump and options.sigdatafile1 and options.sigdatafile2:
|
||||
with bb.tinfoil.Tinfoil() as tinfoil:
|
||||
tinfoil.prepare(config_only=True)
|
||||
output = bb.siggen.compare_sigfiles(options.sigdatafile1, options.sigdatafile2, recursecb, color=color)
|
||||
if options.sigdatafile1 and options.sigdatafile2:
|
||||
output = bb.siggen.compare_sigfiles(options.sigdatafile1, options.sigdatafile2, color=color)
|
||||
elif options.sigdatafile1:
|
||||
output = bb.siggen.dump_sigfile(options.sigdatafile1)
|
||||
else:
|
||||
@@ -204,5 +179,5 @@ else:
|
||||
logger.error('Invalid signature data - ensure you are specifying sigdata/siginfo files')
|
||||
sys.exit(1)
|
||||
|
||||
if output:
|
||||
print('\n'.join(output))
|
||||
if output:
|
||||
print('\n'.join(output))
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
bitbake-diffsigs
|
||||
94
bitbake/bin/bitbake-dumpsig
Executable file
94
bitbake/bin/bitbake-dumpsig
Executable file
@@ -0,0 +1,94 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# bitbake-dumpsig
|
||||
# BitBake task signature dump utility
|
||||
#
|
||||
# Copyright (C) 2013 Intel Corporation
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
import optparse
|
||||
import logging
|
||||
import pickle
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
|
||||
import bb.tinfoil
|
||||
import bb.siggen
|
||||
import bb.msg
|
||||
|
||||
logger = bb.msg.logger_create('bitbake-dumpsig')
|
||||
|
||||
def find_siginfo_task(bbhandler, pn, taskname):
|
||||
""" Find the most recent signature file for the specified PN/task """
|
||||
|
||||
if not hasattr(bb.siggen, 'find_siginfo'):
|
||||
logger.error('Metadata does not support finding signature data files')
|
||||
sys.exit(1)
|
||||
|
||||
if not taskname.startswith('do_'):
|
||||
taskname = 'do_%s' % taskname
|
||||
|
||||
filedates = bb.siggen.find_siginfo(pn, taskname, None, bbhandler.config_data)
|
||||
latestfiles = sorted(filedates.keys(), key=lambda f: filedates[f])[-1:]
|
||||
if not latestfiles:
|
||||
logger.error('No sigdata files found matching %s %s' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
|
||||
return latestfiles[0]
|
||||
|
||||
parser = optparse.OptionParser(
|
||||
description = "Dumps siginfo/sigdata files written out by BitBake",
|
||||
usage = """
|
||||
%prog -t recipename taskname
|
||||
%prog sigdatafile""")
|
||||
|
||||
parser.add_option("-D", "--debug",
|
||||
help = "enable debug",
|
||||
action = "store_true", dest="debug", default = False)
|
||||
|
||||
parser.add_option("-t", "--task",
|
||||
help = "find the signature data file for the specified task",
|
||||
action="store", dest="taskargs", nargs=2, metavar='recipename taskname')
|
||||
|
||||
options, args = parser.parse_args(sys.argv)
|
||||
|
||||
if options.debug:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
if options.taskargs:
|
||||
tinfoil = bb.tinfoil.Tinfoil()
|
||||
tinfoil.prepare(config_only = True)
|
||||
file = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1])
|
||||
logger.debug("Signature file: %s" % file)
|
||||
elif len(args) == 1:
|
||||
parser.print_help()
|
||||
sys.exit(0)
|
||||
else:
|
||||
file = args[1]
|
||||
|
||||
try:
|
||||
output = bb.siggen.dump_sigfile(file)
|
||||
except IOError as e:
|
||||
logger.error(str(e))
|
||||
sys.exit(1)
|
||||
except (pickle.UnpicklingError, EOFError):
|
||||
logger.error('Invalid signature data - ensure you are specifying a sigdata/siginfo file')
|
||||
sys.exit(1)
|
||||
|
||||
if output:
|
||||
print('\n'.join(output))
|
||||
@@ -22,21 +22,16 @@ sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib
|
||||
import unittest
|
||||
try:
|
||||
import bb
|
||||
import layerindexlib
|
||||
except RuntimeError as exc:
|
||||
sys.exit(str(exc))
|
||||
|
||||
tests = ["bb.tests.codeparser",
|
||||
"bb.tests.cooker",
|
||||
"bb.tests.cow",
|
||||
"bb.tests.data",
|
||||
"bb.tests.event",
|
||||
"bb.tests.fetch",
|
||||
"bb.tests.parse",
|
||||
"bb.tests.utils",
|
||||
"layerindexlib.tests.layerindexobj",
|
||||
"layerindexlib.tests.restapi",
|
||||
"layerindexlib.tests.cooker"]
|
||||
"bb.tests.utils"]
|
||||
|
||||
for t in tests:
|
||||
t = '.'.join(t.split('.')[:3])
|
||||
|
||||
@@ -18,12 +18,10 @@
|
||||
# along with this program. If not, see http://www.gnu.org/licenses/.
|
||||
|
||||
HELP="
|
||||
Usage: source toaster start|stop [webport=<address:port>] [noweb] [nobuild] [toasterdir]
|
||||
Usage: source toaster start|stop [webport=<address:port>] [noweb]
|
||||
Optional arguments:
|
||||
[nobuild] Setup the environment for capturing builds with toaster but disable managed builds
|
||||
[noweb] Setup the environment for capturing builds with toaster but don't start the web server
|
||||
[noweb] Setup the environment for building with toaster but don't start the development server
|
||||
[webport] Set the development server (default: localhost:8000)
|
||||
[toasterdir] Set absolute path to be used as TOASTER_DIR (default: BUILDDIR/../)
|
||||
"
|
||||
|
||||
custom_extention()
|
||||
@@ -69,7 +67,7 @@ webserverKillAll()
|
||||
if [ -f ${pidfile} ]; then
|
||||
pid=`cat ${pidfile}`
|
||||
while kill -0 $pid 2>/dev/null; do
|
||||
kill -SIGTERM $pid 2>/dev/null
|
||||
kill -SIGTERM -$pid 2>/dev/null
|
||||
sleep 1
|
||||
done
|
||||
rm ${pidfile}
|
||||
@@ -92,7 +90,7 @@ webserverStartAll()
|
||||
|
||||
echo "Starting webserver..."
|
||||
|
||||
$MANAGE runserver --noreload "$ADDR_PORT" \
|
||||
$MANAGE runserver "$ADDR_PORT" \
|
||||
</dev/null >>${BUILDDIR}/toaster_web.log 2>&1 \
|
||||
& echo $! >${BUILDDIR}/.toastermain.pid
|
||||
|
||||
@@ -161,9 +159,7 @@ fi
|
||||
|
||||
export BBBASEDIR=`dirname $TOASTER`/..
|
||||
MANAGE="python3 $BBBASEDIR/lib/toaster/manage.py"
|
||||
if [ -z "$OE_ROOT" ]; then
|
||||
OE_ROOT=`dirname $TOASTER`/../..
|
||||
fi
|
||||
OE_ROOT=`dirname $TOASTER`/../..
|
||||
|
||||
# this is the configuraton file we are using for toaster
|
||||
# we are using the same logic that oe-setup-builddir uses
|
||||
@@ -187,18 +183,13 @@ unset OE_ROOT
|
||||
|
||||
|
||||
WEBSERVER=1
|
||||
export TOASTER_BUILDSERVER=1
|
||||
ADDR_PORT="localhost:8000"
|
||||
TOASTERDIR=`dirname $BUILDDIR`
|
||||
unset CMD
|
||||
for param in $*; do
|
||||
case $param in
|
||||
noweb )
|
||||
WEBSERVER=0
|
||||
;;
|
||||
nobuild )
|
||||
TOASTER_BUILDSERVER=0
|
||||
;;
|
||||
start )
|
||||
CMD=$param
|
||||
;;
|
||||
@@ -215,9 +206,6 @@ for param in $*; do
|
||||
ADDR_PORT="localhost:$PORT"
|
||||
fi
|
||||
;;
|
||||
toasterdir=*)
|
||||
TOASTERDIR="${param#*=}"
|
||||
;;
|
||||
--help)
|
||||
echo "$HELP"
|
||||
return 0
|
||||
@@ -248,7 +236,7 @@ fi
|
||||
# 2) the build dir (in build)
|
||||
# 3) the sqlite db if that is being used.
|
||||
# 4) pid's we need to clean up on exit/shutdown
|
||||
export TOASTER_DIR=$TOASTERDIR
|
||||
export TOASTER_DIR=`dirname $BUILDDIR`
|
||||
export BB_ENV_EXTRAWHITE="$BB_ENV_EXTRAWHITE TOASTER_DIR"
|
||||
|
||||
# Determine the action. If specified by arguments, fine, if not, toggle it
|
||||
@@ -298,13 +286,9 @@ case $CMD in
|
||||
return 4
|
||||
fi
|
||||
export BITBAKE_UI='toasterui'
|
||||
if [ $TOASTER_BUILDSERVER -eq 1 ] ; then
|
||||
$MANAGE runbuilds \
|
||||
</dev/null >>${BUILDDIR}/toaster_runbuilds.log 2>&1 \
|
||||
& echo $! >${BUILDDIR}/.runbuilds.pid
|
||||
else
|
||||
echo "Toaster build server not started."
|
||||
fi
|
||||
$MANAGE runbuilds \
|
||||
</dev/null >>${BUILDDIR}/toaster_runbuilds.log 2>&1 \
|
||||
& echo $! >${BUILDDIR}/.runbuilds.pid
|
||||
|
||||
# set fail safe stop system on terminal exit
|
||||
trap stop_system SIGHUP
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# Copyright (C) 2012, 2018 Wind River Systems, Inc.
|
||||
# Copyright (C) 2012 Wind River Systems, Inc.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
@@ -18,68 +18,51 @@
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
#
|
||||
# Used for dumping the bb_cache.dat
|
||||
# This is used for dumping the bb_cache.dat, the output format is:
|
||||
# recipe_path PN PV PACKAGES
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import warnings
|
||||
|
||||
# For importing bb.cache
|
||||
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), '../lib'))
|
||||
from bb.cache import CoreRecipeInfo
|
||||
|
||||
import pickle
|
||||
import pickle as pickle
|
||||
|
||||
class DumpCache(object):
|
||||
def __init__(self):
|
||||
parser = argparse.ArgumentParser(
|
||||
description="bb_cache.dat's dumper",
|
||||
epilog="Use %(prog)s --help to get help")
|
||||
parser.add_argument("-r", "--recipe",
|
||||
help="specify the recipe, default: all recipes", action="store")
|
||||
parser.add_argument("-m", "--members",
|
||||
help = "specify the member, use comma as separator for multiple ones, default: all members", action="store", default="")
|
||||
parser.add_argument("-s", "--skip",
|
||||
help = "skip skipped recipes", action="store_true")
|
||||
parser.add_argument("cachefile",
|
||||
help = "specify bb_cache.dat", nargs = 1, action="store", default="")
|
||||
def main(argv=None):
|
||||
"""
|
||||
Get the mapping for the target recipe.
|
||||
"""
|
||||
if len(argv) != 1:
|
||||
print("Error, need one argument!", file=sys.stderr)
|
||||
return 2
|
||||
|
||||
self.args = parser.parse_args()
|
||||
cachefile = argv[0]
|
||||
|
||||
def main(self):
|
||||
with open(self.args.cachefile[0], "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
while True:
|
||||
try:
|
||||
key = pickled.load()
|
||||
val = pickled.load()
|
||||
except Exception:
|
||||
break
|
||||
if isinstance(val, CoreRecipeInfo):
|
||||
pn = val.pn
|
||||
with open(cachefile, "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
while cachefile:
|
||||
try:
|
||||
key = pickled.load()
|
||||
val = pickled.load()
|
||||
except Exception:
|
||||
break
|
||||
if isinstance(val, CoreRecipeInfo) and (not val.skipped):
|
||||
pn = val.pn
|
||||
# Filter out the native recipes.
|
||||
if key.startswith('virtual:native:') or pn.endswith("-native"):
|
||||
continue
|
||||
|
||||
if self.args.recipe and self.args.recipe != pn:
|
||||
continue
|
||||
# 1.0 is the default version for a no PV recipe.
|
||||
if "pv" in val.__dict__:
|
||||
pv = val.pv
|
||||
else:
|
||||
pv = "1.0"
|
||||
|
||||
if self.args.skip and val.skipped:
|
||||
continue
|
||||
|
||||
if self.args.members:
|
||||
out = key
|
||||
for member in self.args.members.split(','):
|
||||
out += ": %s" % val.__dict__.get(member)
|
||||
print("%s" % out)
|
||||
else:
|
||||
print("%s: %s" % (key, val.__dict__))
|
||||
elif not self.args.recipe:
|
||||
print("%s %s" % (key, val))
|
||||
print("%s %s %s %s" % (key, pn, pv, ' '.join(val.packages)))
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
dump = DumpCache()
|
||||
ret = dump.main()
|
||||
except Exception as esc:
|
||||
ret = 1
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(ret)
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
|
||||
|
||||
@@ -781,7 +781,7 @@
|
||||
The code in <filename>meta/lib/oe/sstatesig.py</filename> shows two examples
|
||||
of this and also illustrates how you can insert your own policy into the system
|
||||
if so desired.
|
||||
This file defines the two basic signature generators OpenEmbedded-Core
|
||||
This file defines the two basic signature generators OpenEmbedded Core
|
||||
uses: "OEBasic" and "OEBasicHash".
|
||||
By default, there is a dummy "noop" signature handler enabled in BitBake.
|
||||
This means that behavior is unchanged from previous versions.
|
||||
|
||||
@@ -588,14 +588,6 @@
|
||||
The name of the path in which to place the checkout.
|
||||
By default, the path is <filename>git/</filename>.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"usehead":</emphasis>
|
||||
Enables local <filename>git://</filename> URLs to use the
|
||||
current branch HEAD as the revision for use with
|
||||
<filename>AUTOREV</filename>.
|
||||
The "usehead" parameter implies no branch and only works
|
||||
when the transfer protocol is
|
||||
<filename>file://</filename>.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
Here are some example URLs:
|
||||
<literallayout class='monospaced'>
|
||||
@@ -785,43 +777,6 @@
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='repo-fetcher'>
|
||||
<title>Repo Fetcher (<filename>repo://</filename>)</title>
|
||||
|
||||
<para>
|
||||
This fetcher submodule fetches code from
|
||||
<filename>google-repo</filename> source control system.
|
||||
The fetcher works by initiating and syncing sources of the
|
||||
repository into
|
||||
<link linkend='var-REPODIR'><filename>REPODIR</filename></link>,
|
||||
which is usually
|
||||
<link linkend='var-DL_DIR'><filename>DL_DIR</filename></link><filename>/repo</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This fetcher supports the following parameters:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<emphasis>"protocol":</emphasis>
|
||||
Protocol to fetch the repository manifest (default: git).
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<emphasis>"branch":</emphasis>
|
||||
Branch or tag of repository to get (default: master).
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<emphasis>"manifest":</emphasis>
|
||||
Name of the manifest file (default: <filename>default.xml</filename>).
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
Here are some example URLs:
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI = "repo://REPOROOT;protocol=git;branch=some_branch;manifest=my_manifest.xml"
|
||||
SRC_URI = "repo://REPOROOT;protocol=file;branch=some_branch;manifest=my_manifest.xml"
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='other-fetchers'>
|
||||
<title>Other Fetchers</title>
|
||||
|
||||
@@ -840,6 +795,9 @@
|
||||
<listitem><para>
|
||||
Secure Shell (<filename>ssh://</filename>)
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Repo (<filename>repo://</filename>)
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
OSC (<filename>osc://</filename>)
|
||||
</para></listitem>
|
||||
|
||||
@@ -260,7 +260,7 @@
|
||||
files.
|
||||
For this example, you need to create the file in your project directory
|
||||
and define some key BitBake variables.
|
||||
For more information on the <filename>bitbake.conf</filename> file,
|
||||
For more information on the <filename>bitbake.conf</filename>,
|
||||
see
|
||||
<ulink url='http://git.openembedded.org/bitbake/tree/conf/bitbake.conf'></ulink>.
|
||||
</para>
|
||||
@@ -273,32 +273,14 @@
|
||||
some editor to create the <filename>bitbake.conf</filename>
|
||||
so that it contains the following:
|
||||
<literallayout class='monospaced'>
|
||||
<link linkend='var-PN'>PN</link> = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE', False),d)[0] or 'defaultpkgname'}"
|
||||
</literallayout>
|
||||
<literallayout class='monospaced'>
|
||||
TMPDIR = "${<link linkend='var-TOPDIR'>TOPDIR</link>}/tmp"
|
||||
<link linkend='var-CACHE'>CACHE</link> = "${TMPDIR}/cache"
|
||||
<link linkend='var-STAMP'>STAMP</link> = "${TMPDIR}/${PN}/stamps"
|
||||
<link linkend='var-T'>T</link> = "${TMPDIR}/${PN}/work"
|
||||
<link linkend='var-B'>B</link> = "${TMPDIR}/${PN}"
|
||||
<link linkend='var-STAMP'>STAMP</link> = "${TMPDIR}/stamps"
|
||||
<link linkend='var-T'>T</link> = "${TMPDIR}/work"
|
||||
<link linkend='var-B'>B</link> = "${TMPDIR}"
|
||||
</literallayout>
|
||||
<note>
|
||||
Without a value for <filename>PN</filename>, the
|
||||
variables <filename>STAMP</filename>,
|
||||
<filename>T</filename>, and <filename>B</filename>,
|
||||
prevent more than one recipe from working. You can fix
|
||||
this by either setting <filename>PN</filename> to have
|
||||
a value similar to what OpenEmbedded and BitBake use
|
||||
in the default <filename>bitbake.conf</filename> file
|
||||
(see previous example). Or, by manually updating each
|
||||
recipe to set <filename>PN</filename>. You will also
|
||||
need to include <filename>PN</filename> as part of the
|
||||
<filename>STAMP</filename>, <filename>T</filename>, and
|
||||
<filename>B</filename> variable definitions in the
|
||||
<filename>local.conf</filename> file.
|
||||
</note>
|
||||
The <filename>TMPDIR</filename> variable establishes a directory
|
||||
that BitBake uses for build output and intermediate files other
|
||||
that BitBake uses for build output and intermediate files (other
|
||||
than the cached information used by the
|
||||
<link linkend='setscene'>Setscene</link> process.
|
||||
Here, the <filename>TMPDIR</filename> directory is set to
|
||||
@@ -318,19 +300,19 @@
|
||||
file exists, you can run the <filename>bitbake</filename>
|
||||
command again:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake
|
||||
ERROR: Traceback (most recent call last):
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 163, in wrapped
|
||||
return func(fn, *args)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 177, in _inherit
|
||||
bb.parse.BBHandler.inherit(bbclass, "configuration INHERITs", 0, data)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py", line 92, in inherit
|
||||
include(fn, file, lineno, d, "inherit")
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/ConfHandler.py", line 100, in include
|
||||
raise ParseError("Could not %(error_out)s file %(fn)s" % vars(), oldfn, lineno)
|
||||
ParseError: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
$ bitbake
|
||||
ERROR: Traceback (most recent call last):
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 163, in wrapped
|
||||
return func(fn, *args)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 177, in _inherit
|
||||
bb.parse.BBHandler.inherit(bbclass, "configuration INHERITs", 0, data)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py", line 92, in inherit
|
||||
include(fn, file, lineno, d, "inherit")
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/ConfHandler.py", line 100, in include
|
||||
raise ParseError("Could not %(error_out)s file %(fn)s" % vars(), oldfn, lineno)
|
||||
ParseError: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
|
||||
ERROR: Unable to parse base: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
ERROR: Unable to parse base: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
</literallayout>
|
||||
In the sample output, BitBake could not find the
|
||||
<filename>classes/base.bbclass</filename> file.
|
||||
@@ -383,10 +365,10 @@
|
||||
code separate from the general metadata used by BitBake.
|
||||
Thus, this example creates and uses a layer called "mylayer".
|
||||
<note>
|
||||
You can find additional information on layers in the
|
||||
"<link linkend='layers'>Layers</link>" section.
|
||||
</note></para>
|
||||
|
||||
You can find additional information on layers at
|
||||
<ulink url='http://www.yoctoproject.org/docs/2.3/bitbake-user-manual/bitbake-user-manual.html#layers'></ulink>.
|
||||
</note>
|
||||
</para>
|
||||
<para>Minimally, you need a recipe file and a layer configuration
|
||||
file in your layer.
|
||||
The configuration file needs to be in the <filename>conf</filename>
|
||||
|
||||
@@ -342,14 +342,13 @@
|
||||
|
||||
<para>
|
||||
When you name an append file, you can use the
|
||||
"<filename>%</filename>" wildcard character to allow for matching
|
||||
recipe names.
|
||||
wildcard character (%) to allow for matching recipe names.
|
||||
For example, suppose you have an append file named
|
||||
as follows:
|
||||
<literallayout class='monospaced'>
|
||||
busybox_1.21.%.bbappend
|
||||
</literallayout>
|
||||
That append file would match any <filename>busybox_1.21.</filename><replaceable>x</replaceable><filename>.bb</filename>
|
||||
That append file would match any <filename>busybox_1.21.x.bb</filename>
|
||||
version of the recipe.
|
||||
So, the append file would match the following recipe names:
|
||||
<literallayout class='monospaced'>
|
||||
@@ -357,14 +356,6 @@
|
||||
busybox_1.21.2.bb
|
||||
busybox_1.21.3.bb
|
||||
</literallayout>
|
||||
<note><title>Important</title>
|
||||
The use of the "<filename>%</filename>" character
|
||||
is limited in that it only works directly in front of the
|
||||
<filename>.bbappend</filename> portion of the append file's
|
||||
name.
|
||||
You cannot use the wildcard character in any other
|
||||
location of the name.
|
||||
</note>
|
||||
If the <filename>busybox</filename> recipe was updated to
|
||||
<filename>busybox_1.3.0.bb</filename>, the append name would not
|
||||
match.
|
||||
@@ -497,6 +488,8 @@
|
||||
target that failed and anything depending on it cannot
|
||||
be built, as much as possible will be built before
|
||||
stopping.
|
||||
-a, --tryaltconfigs Continue with builds by trying to use alternative
|
||||
providers where possible.
|
||||
-f, --force Force the specified targets/task to run (invalidating
|
||||
any existing stamp file).
|
||||
-c CMD, --cmd=CMD Specify the task to execute. The exact options
|
||||
@@ -511,20 +504,19 @@
|
||||
Read the specified file before bitbake.conf.
|
||||
-R POSTFILE, --postread=POSTFILE
|
||||
Read the specified file after bitbake.conf.
|
||||
-v, --verbose Enable tracing of shell tasks (with 'set -x'). Also
|
||||
print bb.note(...) messages to stdout (in addition to
|
||||
writing them to ${T}/log.do_<task>).
|
||||
-D, --debug Increase the debug level. You can specify this more
|
||||
than once. -D sets the debug level to 1, where only
|
||||
bb.debug(1, ...) messages are printed to stdout; -DD
|
||||
sets the debug level to 2, where both bb.debug(1, ...)
|
||||
and bb.debug(2, ...) messages are printed; etc.
|
||||
Without -D, no debug messages are printed. Note that
|
||||
-D only affects output to stdout. All debug messages
|
||||
are written to ${T}/log.do_taskname, regardless of the
|
||||
debug level.
|
||||
-q, --quiet Output less log message data to the terminal. You can
|
||||
specify this more than once.
|
||||
-v, --verbose Enable tracing of shell tasks (with 'set -x').
|
||||
Also print bb.note(...) messages to stdout (in
|
||||
addition to writing them to ${T}/log.do_<task>).
|
||||
-D, --debug Increase the debug level. You can specify this
|
||||
more than once. -D sets the debug level to 1,
|
||||
where only bb.debug(1, ...) messages are printed
|
||||
to stdout; -DD sets the debug level to 2, where
|
||||
both bb.debug(1, ...) and bb.debug(2, ...)
|
||||
messages are printed; etc. Without -D, no debug
|
||||
messages are printed. Note that -D only affects
|
||||
output to stdout. All debug messages are written
|
||||
to ${T}/log.do_taskname, regardless of the debug
|
||||
level.
|
||||
-n, --dry-run Don't execute, just go through the motions.
|
||||
-S SIGNATURE_HANDLER, --dump-signatures=SIGNATURE_HANDLER
|
||||
Dump out the signature construction information, with
|
||||
@@ -547,38 +539,30 @@
|
||||
-l DEBUG_DOMAINS, --log-domains=DEBUG_DOMAINS
|
||||
Show debug logging for the specified logging domains
|
||||
-P, --profile Profile the command and save reports.
|
||||
-u UI, --ui=UI The user interface to use (knotty, ncurses or taskexp
|
||||
- default knotty).
|
||||
-u UI, --ui=UI The user interface to use (taskexp, knotty or
|
||||
ncurses - default knotty).
|
||||
-t SERVERTYPE, --servertype=SERVERTYPE
|
||||
Choose which server type to use (process or xmlrpc -
|
||||
default process).
|
||||
--token=XMLRPCTOKEN Specify the connection token to be used when
|
||||
connecting to a remote server.
|
||||
--revisions-changed Set the exit code depending on whether upstream
|
||||
floating revisions have changed or not.
|
||||
--server-only Run bitbake without a UI, only starting a server
|
||||
(cooker) process.
|
||||
-B BIND, --bind=BIND The name/address for the bitbake xmlrpc server to bind
|
||||
to.
|
||||
-T SERVER_TIMEOUT, --idle-timeout=SERVER_TIMEOUT
|
||||
Set timeout to unload bitbake server due to
|
||||
inactivity, set to -1 means no unload, default:
|
||||
Environment variable BB_SERVER_TIMEOUT.
|
||||
-B BIND, --bind=BIND The name/address for the bitbake server to bind to.
|
||||
--no-setscene Do not run any setscene tasks. sstate will be ignored
|
||||
and everything needed, built.
|
||||
--setscene-only Only run setscene tasks, don't run any real tasks.
|
||||
--remote-server=REMOTE_SERVER
|
||||
Connect to the specified server.
|
||||
-m, --kill-server Terminate any running bitbake server.
|
||||
-m, --kill-server Terminate the remote server.
|
||||
--observe-only Connect to a server as an observing-only client.
|
||||
--status-only Check the status of the remote bitbake server.
|
||||
-w WRITEEVENTLOG, --write-log=WRITEEVENTLOG
|
||||
Writes the event log of the build to a bitbake event
|
||||
json file. Use '' (empty string) to assign the name
|
||||
automatically.
|
||||
--runall=RUNALL Run the specified task for any recipe in the taskgraph
|
||||
of the specified target (even if it wouldn't otherwise
|
||||
have run).
|
||||
--runonly=RUNONLY Run only the specified task within the taskgraph of
|
||||
the specified targets (and any task dependencies those
|
||||
tasks may have).
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
@@ -732,163 +716,6 @@
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='executing-a-multiple-configuration-build'>
|
||||
<title>Executing a Multiple Configuration Build</title>
|
||||
|
||||
<para>
|
||||
BitBake is able to build multiple images or packages
|
||||
using a single command where the different targets
|
||||
require different configurations (multiple configuration
|
||||
builds).
|
||||
Each target, in this scenario, is referred to as a
|
||||
"multiconfig".
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To accomplish a multiple configuration build, you must
|
||||
define each target's configuration separately using
|
||||
a parallel configuration file in the build directory.
|
||||
The location for these multiconfig configuration files
|
||||
is specific.
|
||||
They must reside in the current build directory in
|
||||
a sub-directory of <filename>conf</filename> named
|
||||
<filename>multiconfig</filename>.
|
||||
Following is an example for two separate targets:
|
||||
<imagedata fileref="figures/bb_multiconfig_files.png" align="center" width="4in" depth="3in" />
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The reason for this required file hierarchy
|
||||
is because the <filename>BBPATH</filename> variable
|
||||
is not constructed until the layers are parsed.
|
||||
Consequently, using the configuration file as a
|
||||
pre-configuration file is not possible unless it is
|
||||
located in the current working directory.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Minimally, each configuration file must define the
|
||||
machine and the temporary directory BitBake uses
|
||||
for the build.
|
||||
Suggested practice dictates that you do not
|
||||
overlap the temporary directories used during the
|
||||
builds.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Aside from separate configuration files for each
|
||||
target, you must also enable BitBake to perform multiple
|
||||
configuration builds.
|
||||
Enabling is accomplished by setting the
|
||||
<link linkend='var-BBMULTICONFIG'><filename>BBMULTICONFIG</filename></link>
|
||||
variable in the <filename>local.conf</filename>
|
||||
configuration file.
|
||||
As an example, suppose you had configuration files
|
||||
for <filename>target1</filename> and
|
||||
<filename>target2</filename> defined in the build
|
||||
directory.
|
||||
The following statement in the
|
||||
<filename>local.conf</filename> file both enables
|
||||
BitBake to perform multiple configuration builds and
|
||||
specifies the two multiconfigs:
|
||||
<literallayout class='monospaced'>
|
||||
BBMULTICONFIG = "target1 target2"
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Once the target configuration files are in place and
|
||||
BitBake has been enabled to perform multiple configuration
|
||||
builds, use the following command form to start the
|
||||
builds:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake [multiconfig:<replaceable>multiconfigname</replaceable>:]<replaceable>target</replaceable> [[[multiconfig:<replaceable>multiconfigname</replaceable>:]<replaceable>target</replaceable>] ... ]
|
||||
</literallayout>
|
||||
Here is an example for two multiconfigs:
|
||||
<filename>target1</filename> and
|
||||
<filename>target2</filename>:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake multiconfig:target1:<replaceable>target</replaceable> multiconfig:target2:<replaceable>target</replaceable>
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='bb-enabling-multiple-configuration-build-dependencies'>
|
||||
<title>Enabling Multiple Configuration Build Dependencies</title>
|
||||
|
||||
<para>
|
||||
Sometimes dependencies can exist between targets
|
||||
(multiconfigs) in a multiple configuration build.
|
||||
For example, suppose that in order to build an image
|
||||
for a particular architecture, the root filesystem of
|
||||
another build for a different architecture needs to
|
||||
exist.
|
||||
In other words, the image for the first multiconfig depends
|
||||
on the root filesystem of the second multiconfig.
|
||||
This dependency is essentially that the task in the recipe
|
||||
that builds one multiconfig is dependent on the
|
||||
completion of the task in the recipe that builds
|
||||
another multiconfig.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To enable dependencies in a multiple configuration
|
||||
build, you must declare the dependencies in the recipe
|
||||
using the following statement form:
|
||||
<literallayout class='monospaced'>
|
||||
<replaceable>task_or_package</replaceable>[mcdepends] = "multiconfig:<replaceable>from_multiconfig</replaceable>:<replaceable>to_multiconfig</replaceable>:<replaceable>recipe_name</replaceable>:<replaceable>task_on_which_to_depend</replaceable>"
|
||||
</literallayout>
|
||||
To better show how to use this statement, consider an
|
||||
example with two multiconfigs: <filename>target1</filename>
|
||||
and <filename>target2</filename>:
|
||||
<literallayout class='monospaced'>
|
||||
<replaceable>image_task</replaceable>[mcdepends] = "multiconfig:target1:target2:<replaceable>image2</replaceable>:<replaceable>rootfs_task</replaceable>"
|
||||
</literallayout>
|
||||
In this example, the
|
||||
<replaceable>from_multiconfig</replaceable> is "target1" and
|
||||
the <replaceable>to_multiconfig</replaceable> is "target2".
|
||||
The task on which the image whose recipe contains
|
||||
<replaceable>image_task</replaceable> depends on the
|
||||
completion of the <replaceable>rootfs_task</replaceable>
|
||||
used to build out <replaceable>image2</replaceable>, which
|
||||
is associated with the "target2" multiconfig.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Once you set up this dependency, you can build the
|
||||
"target1" multiconfig using a BitBake command as follows:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake multiconfig:target1:<replaceable>image1</replaceable>
|
||||
</literallayout>
|
||||
This command executes all the tasks needed to create
|
||||
<replaceable>image1</replaceable> for the "target1"
|
||||
multiconfig.
|
||||
Because of the dependency, BitBake also executes through
|
||||
the <replaceable>rootfs_task</replaceable> for the "target2"
|
||||
multiconfig build.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Having a recipe depend on the root filesystem of another
|
||||
build might not seem that useful.
|
||||
Consider this change to the statement in the
|
||||
<replaceable>image1</replaceable> recipe:
|
||||
<literallayout class='monospaced'>
|
||||
<replaceable>image_task</replaceable>[mcdepends] = "multiconfig:target1:target2:<replaceable>image2</replaceable>:<replaceable>image_task</replaceable>"
|
||||
</literallayout>
|
||||
In this case, BitBake must create
|
||||
<replaceable>image2</replaceable> for the "target2"
|
||||
build since the "target1" build depends on it.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Because "target1" and "target2" are enabled for multiple
|
||||
configuration builds and have separate configuration
|
||||
files, BitBake places the artifacts for each build in the
|
||||
respective temporary build directories.
|
||||
</para>
|
||||
</section>
|
||||
</section>
|
||||
</section>
|
||||
</chapter>
|
||||
|
||||
@@ -342,7 +342,7 @@
|
||||
|
||||
<para>
|
||||
When you use this syntax, BitBake expects one or more strings.
|
||||
Surrounding spaces and spacing are preserved.
|
||||
Surrounding spaces are removed as well.
|
||||
Here is an example:
|
||||
<literallayout class='monospaced'>
|
||||
FOO = "123 456 789 123456 123 456 123 456"
|
||||
@@ -352,9 +352,8 @@
|
||||
FOO2_remove = "abc def"
|
||||
</literallayout>
|
||||
The variable <filename>FOO</filename> becomes
|
||||
" 789 123456 "
|
||||
and <filename>FOO2</filename> becomes
|
||||
" ghi abcdef ".
|
||||
"789 123456" and <filename>FOO2</filename> becomes
|
||||
"ghi abcdef".
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -503,7 +502,7 @@
|
||||
</section>
|
||||
|
||||
<section id='unsetting-variables'>
|
||||
<title>Unsetting variables</title>
|
||||
<title>Unseting variables</title>
|
||||
|
||||
<para>
|
||||
It is possible to completely remove a variable or a variable flag
|
||||
@@ -1930,38 +1929,6 @@
|
||||
not careful.
|
||||
</note>
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis><filename>[number_threads]</filename>:</emphasis>
|
||||
Limits tasks to a specific number of simultaneous threads
|
||||
during execution.
|
||||
This varflag is useful when your build host has a large number
|
||||
of cores but certain tasks need to be rate-limited due to various
|
||||
kinds of resource constraints (e.g. to avoid network throttling).
|
||||
<filename>number_threads</filename> works similarly to the
|
||||
<link linkend='var-BB_NUMBER_THREADS'><filename>BB_NUMBER_THREADS</filename></link>
|
||||
variable but is task-specific.</para>
|
||||
|
||||
<para>Set the value globally.
|
||||
For example, the following makes sure the
|
||||
<filename>do_fetch</filename> task uses no more than two
|
||||
simultaneous execution threads:
|
||||
<literallayout class='monospaced'>
|
||||
do_fetch[number_threads] = "2"
|
||||
</literallayout>
|
||||
<note><title>Warnings</title>
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
Setting the varflag in individual recipes rather
|
||||
than globally can result in unpredictable behavior.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Setting the varflag to a value greater than the
|
||||
value used in the <filename>BB_NUMBER_THREADS</filename>
|
||||
variable causes <filename>number_threads</filename>
|
||||
to have no effect.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis><filename>[postfuncs]</filename>:</emphasis>
|
||||
List of functions to call after the completion of the task.
|
||||
</para></listitem>
|
||||
@@ -2165,8 +2132,6 @@
|
||||
<listitem><para>
|
||||
<filename>bb.event.BuildStarted()</filename>:
|
||||
Fired when a new build starts.
|
||||
BitBake fires multiple "BuildStarted" events (one per configuration)
|
||||
when multiple configuration (multiconfig) is enabled.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.build.TaskStarted()</filename>:
|
||||
@@ -2685,97 +2650,48 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
These checksums are stored in
|
||||
<link linkend='var-STAMP'><filename>STAMP</filename></link>.
|
||||
You can examine the checksums using the following BitBake command:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake-dumpsigs
|
||||
</literallayout>
|
||||
This command returns the signature data in a readable format
|
||||
that allows you to examine the inputs used when the
|
||||
OpenEmbedded build system generates signatures.
|
||||
For example, using <filename>bitbake-dumpsigs</filename>
|
||||
allows you to examine the <filename>do_compile</filename>
|
||||
task's “sigdata” for a C application (e.g.
|
||||
<filename>bash</filename>).
|
||||
Running the command also reveals that the “CC” variable is part of
|
||||
the inputs that are hashed.
|
||||
Any changes to this variable would invalidate the stamp and
|
||||
cause the <filename>do_compile</filename> task to run.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The following list describes related variables:
|
||||
This list is a place holder of content existed from previous work
|
||||
on the manual.
|
||||
Some or all of it probably needs integrated into the subsections
|
||||
that make up this section.
|
||||
For now, I have just provided a short glossary-like description
|
||||
for each variable.
|
||||
Ultimately, this list goes away.
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<link linkend='var-BB_HASHCHECK_FUNCTION'><filename>BB_HASHCHECK_FUNCTION</filename></link>:
|
||||
<listitem><para><filename>STAMP</filename>:
|
||||
The base path to create stamp files.</para></listitem>
|
||||
<listitem><para><filename>STAMPCLEAN</filename>
|
||||
Again, the base path to create stamp files but can use wildcards
|
||||
for matching a range of files for clean operations.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>BB_STAMP_WHITELIST</filename>
|
||||
Lists stamp files that are looked at when the stamp policy
|
||||
is "whitelist".
|
||||
</para></listitem>
|
||||
<listitem><para><filename>BB_STAMP_POLICY</filename>
|
||||
Defines the mode for comparing timestamps of stamp files.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>BB_HASHCHECK_FUNCTION</filename>
|
||||
Specifies the name of the function to call during
|
||||
the "setscene" part of the task's execution in order
|
||||
to validate the list of task hashes.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-BB_SETSCENE_DEPVALID'><filename>BB_SETSCENE_DEPVALID</filename></link>:
|
||||
Specifies a function BitBake calls that determines
|
||||
whether BitBake requires a setscene dependency to
|
||||
be met.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-BB_SETSCENE_VERIFY_FUNCTION2'><filename>BB_SETSCENE_VERIFY_FUNCTION2</filename></link>:
|
||||
<listitem><para><filename>BB_SETSCENE_VERIFY_FUNCTION2</filename>
|
||||
Specifies a function to call that verifies the list of
|
||||
planned task execution before the main task execution
|
||||
happens.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-BB_STAMP_POLICY'><filename>BB_STAMP_POLICY</filename></link>:
|
||||
Defines the mode for comparing timestamps of stamp files.
|
||||
<listitem><para><filename>BB_SETSCENE_DEPVALID</filename>
|
||||
Specifies a function BitBake calls that determines
|
||||
whether BitBake requires a setscene dependency to
|
||||
be met.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-BB_STAMP_WHITELIST'><filename>BB_STAMP_WHITELIST</filename></link>:
|
||||
Lists stamp files that are looked at when the stamp policy
|
||||
is "whitelist".
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-BB_TASKHASH'><filename>BB_TASKHASH</filename></link>:
|
||||
<listitem><para><filename>BB_TASKHASH</filename>
|
||||
Within an executing task, this variable holds the hash
|
||||
of the task as returned by the currently enabled
|
||||
signature generator.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-STAMP'><filename>STAMP</filename></link>:
|
||||
The base path to create stamp files.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-STAMPCLEAN'><filename>STAMPCLEAN</filename></link>:
|
||||
Again, the base path to create stamp files but can use wildcards
|
||||
for matching a range of files for clean operations.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='wildcard-support-in-variables'>
|
||||
<title>Wildcard Support in Variables</title>
|
||||
|
||||
<para>
|
||||
Support for wildcard use in variables varies depending on the
|
||||
context in which it is used.
|
||||
For example, some variables and file names allow limited use of
|
||||
wildcards through the "<filename>%</filename>" and
|
||||
"<filename>*</filename>" characters.
|
||||
Other variables or names support Python's
|
||||
<ulink url='https://docs.python.org/3/library/glob.html'><filename>glob</filename></ulink>
|
||||
syntax,
|
||||
<ulink url='https://docs.python.org/3/library/fnmatch.html#module-fnmatch'><filename>fnmatch</filename></ulink>
|
||||
syntax, or
|
||||
<ulink url='https://docs.python.org/3/library/re.html#re'><filename>Regular Expression (re)</filename></ulink>
|
||||
syntax.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For variables that have wildcard suport, the
|
||||
documentation describes which form of wildcard, its
|
||||
use, and its limitations.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
</chapter>
|
||||
|
||||
@@ -78,7 +78,7 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
In OpenEmbedded-Core, <filename>ASSUME_PROVIDED</filename>
|
||||
In OpenEmbedded Core, <filename>ASSUME_PROVIDED</filename>
|
||||
mostly specifies native tools that should not be built.
|
||||
An example is <filename>git-native</filename>, which
|
||||
when specified allows for the Git binary from the host to
|
||||
@@ -115,8 +115,7 @@
|
||||
is either not set or set to "0".
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Limited support for the "<filename>*</filename>"
|
||||
wildcard character for matching against the
|
||||
Limited support for wildcard matching against the
|
||||
beginning of host names exists.
|
||||
For example, the following setting matches
|
||||
<filename>git.gnu.org</filename>,
|
||||
@@ -125,20 +124,6 @@
|
||||
<literallayout class='monospaced'>
|
||||
BB_ALLOWED_NETWORKS = "*.gnu.org"
|
||||
</literallayout>
|
||||
<note><title>Important</title>
|
||||
<para>The use of the "<filename>*</filename>"
|
||||
character only works at the beginning of
|
||||
a host name and it must be isolated from
|
||||
the remainder of the host name.
|
||||
You cannot use the wildcard character in any
|
||||
other location of the name or combined with
|
||||
the front part of the name.</para>
|
||||
|
||||
<para>For example,
|
||||
<filename>*.foo.bar</filename> is supported,
|
||||
while <filename>*aa.foo.bar</filename> is not.
|
||||
</para>
|
||||
</note>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Mirrors not in the host list are skipped and
|
||||
@@ -661,10 +646,10 @@
|
||||
<glossdef>
|
||||
<para>
|
||||
Contains the name of the currently executing task.
|
||||
The value includes the "do_" prefix.
|
||||
The value does not include the "do_" prefix.
|
||||
For example, if the currently executing task is
|
||||
<filename>do_config</filename>, the value is
|
||||
"do_config".
|
||||
"config".
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
@@ -979,7 +964,7 @@
|
||||
Allows you to extend a recipe so that it builds variants
|
||||
of the software.
|
||||
Some examples of these variants for recipes from the
|
||||
OpenEmbedded-Core metadata are "natives" such as
|
||||
OpenEmbedded Core metadata are "natives" such as
|
||||
<filename>quilt-native</filename>, which is a copy of
|
||||
Quilt built to run on the build system; "crosses" such
|
||||
as <filename>gcc-cross</filename>, which is a compiler
|
||||
@@ -995,7 +980,7 @@
|
||||
amount of code, it usually is as simple as adding the
|
||||
variable to your recipe.
|
||||
Here are two examples.
|
||||
The "native" variants are from the OpenEmbedded-Core
|
||||
The "native" variants are from the OpenEmbedded Core
|
||||
metadata:
|
||||
<literallayout class='monospaced'>
|
||||
BBCLASSEXTEND =+ "native nativesdk"
|
||||
@@ -1097,19 +1082,7 @@
|
||||
|
||||
<glossentry id='var-BBFILES'><glossterm>BBFILES</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
A space-separated list of recipe files BitBake uses to
|
||||
build software.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
When specifying recipe files, you can pattern match using
|
||||
Python's
|
||||
<ulink url='https://docs.python.org/3/library/glob.html'><filename>glob</filename></ulink>
|
||||
syntax.
|
||||
For details on the syntax, see the documentation by
|
||||
following the previous link.
|
||||
</para>
|
||||
<para>List of recipe files BitBake uses to build software.</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
@@ -1193,19 +1166,15 @@
|
||||
match any of the expressions.
|
||||
It is as if BitBake does not see them at all.
|
||||
Consequently, matching files are not parsed or otherwise
|
||||
used by BitBake.
|
||||
</para>
|
||||
|
||||
used by BitBake.</para>
|
||||
<para>
|
||||
The values you provide are passed to Python's regular
|
||||
expression compiler.
|
||||
Consequently, the syntax follows Python's Regular
|
||||
Expression (re) syntax.
|
||||
The expressions are compared against the full paths to
|
||||
the files.
|
||||
For complete syntax information, see Python's
|
||||
documentation at
|
||||
<ulink url='http://docs.python.org/3/library/re.html#re'></ulink>.
|
||||
<ulink url='http://docs.python.org/release/2.3/lib/re-syntax.html'></ulink>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -1236,45 +1205,6 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BBMULTICONFIG'><glossterm>BBMULTICONFIG</glossterm>
|
||||
<info>
|
||||
BBMULTICONFIG[doc] = "Enables BitBake to perform multiple configuration builds and lists each separate configuration (multiconfig)."
|
||||
</info>
|
||||
<glossdef>
|
||||
<para role="glossdeffirst">
|
||||
<!-- <para role="glossdeffirst"><imagedata fileref="figures/define-generic.png" /> -->
|
||||
Enables BitBake to perform multiple configuration builds
|
||||
and lists each separate configuration (multiconfig).
|
||||
You can use this variable to cause BitBake to build
|
||||
multiple targets where each target has a separate
|
||||
configuration.
|
||||
Define <filename>BBMULTICONFIG</filename> in your
|
||||
<filename>conf/local.conf</filename> configuration file.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
As an example, the following line specifies three
|
||||
multiconfigs, each having a separate configuration file:
|
||||
<literallayout class='monospaced'>
|
||||
BBMULTIFONFIG = "configA configB configC"
|
||||
</literallayout>
|
||||
Each configuration file you use must reside in the
|
||||
build directory within a directory named
|
||||
<filename>conf/multiconfig</filename> (e.g.
|
||||
<replaceable>build_directory</replaceable><filename>/conf/multiconfig/configA.conf</filename>).
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For information on how to use
|
||||
<filename>BBMULTICONFIG</filename> in an environment that
|
||||
supports building targets with multiple configurations,
|
||||
see the
|
||||
"<link linkend='executing-a-multiple-configuration-build'>Executing a Multiple Configuration Build</link>"
|
||||
section.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BBPATH'><glossterm>BBPATH</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
@@ -1964,27 +1894,15 @@
|
||||
you want to select, and you should set
|
||||
<link linkend='var-PV'><filename>PV</filename></link>
|
||||
accordingly for precedence.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The <filename>PREFERRED_VERSION</filename> variable
|
||||
supports limited wildcard use through the
|
||||
"<filename>%</filename>" character.
|
||||
You can use the character to match any number of
|
||||
characters, which can be useful when specifying versions
|
||||
that contain long revision numbers that potentially change.
|
||||
You can use the "<filename>%</filename>" character as a
|
||||
wildcard to match any number of characters, which can be
|
||||
useful when specifying versions that contain long revision
|
||||
numbers that could potentially change.
|
||||
Here are two examples:
|
||||
<literallayout class='monospaced'>
|
||||
PREFERRED_VERSION_python = "2.7.3"
|
||||
PREFERRED_VERSION_linux-yocto = "4.12%"
|
||||
</literallayout>
|
||||
<note><title>Important</title>
|
||||
The use of the "<filename>%</filename>" character
|
||||
is limited in that it only works at the end of the
|
||||
string.
|
||||
You cannot use the wildcard character in any other
|
||||
location of the string.
|
||||
</note>
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
@@ -2171,16 +2089,6 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-REPODIR'><glossterm>REPODIR</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
The directory in which a local copy of a
|
||||
<filename>google-repo</filename> directory is stored
|
||||
when it is synced.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-RPROVIDES'><glossterm>RPROVIDES</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
-->
|
||||
|
||||
<copyright>
|
||||
<year>2004-2018</year>
|
||||
<year>2004-2016</year>
|
||||
<holder>Richard Purdie</holder>
|
||||
<holder>Chris Larson</holder>
|
||||
<holder>and Phil Blundell</holder>
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 20 KiB |
@@ -150,7 +150,7 @@ class COWDictMeta(COWMeta):
|
||||
yield value
|
||||
if type == "items":
|
||||
yield (key, value)
|
||||
return
|
||||
raise StopIteration()
|
||||
|
||||
def iterkeys(cls):
|
||||
return cls.iter("keys")
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
__version__ = "1.40.0"
|
||||
__version__ = "1.36.0"
|
||||
|
||||
import sys
|
||||
if sys.version_info < (3, 4, 0):
|
||||
@@ -63,10 +63,6 @@ class BBLogger(Logger):
|
||||
def verbose(self, msg, *args, **kwargs):
|
||||
return self.log(logging.INFO - 1, msg, *args, **kwargs)
|
||||
|
||||
def verbnote(self, msg, *args, **kwargs):
|
||||
return self.log(logging.INFO + 2, msg, *args, **kwargs)
|
||||
|
||||
|
||||
logging.raiseExceptions = False
|
||||
logging.setLoggerClass(BBLogger)
|
||||
|
||||
@@ -97,18 +93,6 @@ def debug(lvl, *args):
|
||||
def note(*args):
|
||||
mainlogger.info(''.join(args))
|
||||
|
||||
#
|
||||
# A higher prioity note which will show on the console but isn't a warning
|
||||
#
|
||||
# Something is happening the user should be aware of but they probably did
|
||||
# something to make it happen
|
||||
#
|
||||
def verbnote(*args):
|
||||
mainlogger.verbnote(''.join(args))
|
||||
|
||||
#
|
||||
# Warnings - things the user likely needs to pay attention to and fix
|
||||
#
|
||||
def warn(*args):
|
||||
mainlogger.warning(''.join(args))
|
||||
|
||||
|
||||
@@ -41,6 +41,8 @@ from bb import data, event, utils
|
||||
bblogger = logging.getLogger('BitBake')
|
||||
logger = logging.getLogger('BitBake.Build')
|
||||
|
||||
NULL = open(os.devnull, 'r+')
|
||||
|
||||
__mtime_cache = {}
|
||||
|
||||
def cached_mtime_noerror(f):
|
||||
@@ -531,6 +533,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
self.triggered = True
|
||||
|
||||
# Handle logfiles
|
||||
si = open('/dev/null', 'r')
|
||||
try:
|
||||
bb.utils.mkdirhier(os.path.dirname(logfn))
|
||||
logfile = open(logfn, 'w')
|
||||
@@ -544,8 +547,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()]
|
||||
|
||||
# Replace those fds with our own
|
||||
with open('/dev/null', 'r') as si:
|
||||
os.dup2(si.fileno(), osi[1])
|
||||
os.dup2(si.fileno(), osi[1])
|
||||
os.dup2(logfile.fileno(), oso[1])
|
||||
os.dup2(logfile.fileno(), ose[1])
|
||||
|
||||
@@ -606,6 +608,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
os.close(osi[0])
|
||||
os.close(oso[0])
|
||||
os.close(ose[0])
|
||||
si.close()
|
||||
|
||||
logfile.close()
|
||||
if os.path.exists(logfn) and os.path.getsize(logfn) == 0:
|
||||
@@ -800,7 +803,6 @@ def add_tasks(tasklist, d):
|
||||
if name in flags:
|
||||
deptask = d.expand(flags[name])
|
||||
task_deps[name][task] = deptask
|
||||
getTask('mcdepends')
|
||||
getTask('depends')
|
||||
getTask('rdepends')
|
||||
getTask('deptask')
|
||||
@@ -870,12 +872,6 @@ def preceedtask(task, with_recrdeptasks, d):
|
||||
that this may lead to the task itself being listed.
|
||||
"""
|
||||
preceed = set()
|
||||
|
||||
# Ignore tasks which don't exist
|
||||
tasks = d.getVar('__BBTASKS', False)
|
||||
if task not in tasks:
|
||||
return preceed
|
||||
|
||||
preceed.update(d.getVarFlag(task, 'deps') or [])
|
||||
if with_recrdeptasks:
|
||||
recrdeptask = d.getVarFlag(task, 'recrdeptask')
|
||||
|
||||
@@ -37,7 +37,7 @@ import bb.utils
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
|
||||
__cache_version__ = "152"
|
||||
__cache_version__ = "151"
|
||||
|
||||
def getCacheFile(path, filename, data_hash):
|
||||
return os.path.join(path, filename + "." + data_hash)
|
||||
@@ -395,7 +395,7 @@ class Cache(NoCache):
|
||||
self.has_cache = True
|
||||
self.cachefile = getCacheFile(self.cachedir, "bb_cache.dat", self.data_hash)
|
||||
|
||||
logger.debug(1, "Cache dir: %s", self.cachedir)
|
||||
logger.debug(1, "Using cache in '%s'", self.cachedir)
|
||||
bb.utils.mkdirhier(self.cachedir)
|
||||
|
||||
cache_ok = True
|
||||
@@ -408,8 +408,6 @@ class Cache(NoCache):
|
||||
self.load_cachefile()
|
||||
elif os.path.isfile(self.cachefile):
|
||||
logger.info("Out of date cache found, rebuilding...")
|
||||
else:
|
||||
logger.debug(1, "Cache file %s not found, building..." % self.cachefile)
|
||||
|
||||
def load_cachefile(self):
|
||||
cachesize = 0
|
||||
@@ -426,7 +424,6 @@ class Cache(NoCache):
|
||||
|
||||
for cache_class in self.caches_array:
|
||||
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
|
||||
logger.debug(1, 'Loading cache file: %s' % cachefile)
|
||||
with open(cachefile, "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
# Check cache version information
|
||||
|
||||
@@ -97,8 +97,6 @@ class FileChecksumCache(MultiProcessCache):
|
||||
|
||||
def checksum_dir(pth):
|
||||
# Handle directories recursively
|
||||
if pth == "/":
|
||||
bb.fatal("Refusing to checksum /")
|
||||
dirchecksums = []
|
||||
for root, dirs, files in os.walk(pth):
|
||||
for name in files:
|
||||
|
||||
@@ -140,7 +140,7 @@ class CodeParserCache(MultiProcessCache):
|
||||
# so that an existing cache gets invalidated. Additionally you'll need
|
||||
# to increment __cache_version__ in cache.py in order to ensure that old
|
||||
# recipe caches don't trigger "Taskhash mismatch" errors.
|
||||
CACHE_VERSION = 10
|
||||
CACHE_VERSION = 9
|
||||
|
||||
def __init__(self):
|
||||
MultiProcessCache.__init__(self)
|
||||
@@ -214,7 +214,7 @@ class BufferedLogger(Logger):
|
||||
self.buffer = []
|
||||
|
||||
class PythonParser():
|
||||
getvars = (".getVar", ".appendVar", ".prependVar", "oe.utils.conditional")
|
||||
getvars = (".getVar", ".appendVar", ".prependVar")
|
||||
getvarflags = (".getVarFlag", ".appendVarFlag", ".prependVarFlag")
|
||||
containsfuncs = ("bb.utils.contains", "base_contains")
|
||||
containsanyfuncs = ("bb.utils.contains_any", "bb.utils.filter")
|
||||
|
||||
@@ -175,31 +175,18 @@ class BBCooker:
|
||||
|
||||
self.configuration = configuration
|
||||
|
||||
bb.debug(1, "BBCooker starting %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
|
||||
self.configwatcher = pyinotify.WatchManager()
|
||||
bb.debug(1, "BBCooker pyinotify1 %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
|
||||
self.configwatcher.bbseen = []
|
||||
self.configwatcher.bbwatchedfiles = []
|
||||
self.confignotifier = pyinotify.Notifier(self.configwatcher, self.config_notifications)
|
||||
bb.debug(1, "BBCooker pyinotify2 %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
self.watchmask = pyinotify.IN_CLOSE_WRITE | pyinotify.IN_CREATE | pyinotify.IN_DELETE | \
|
||||
pyinotify.IN_DELETE_SELF | pyinotify.IN_MODIFY | pyinotify.IN_MOVE_SELF | \
|
||||
pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO
|
||||
self.watcher = pyinotify.WatchManager()
|
||||
bb.debug(1, "BBCooker pyinotify3 %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
self.watcher.bbseen = []
|
||||
self.watcher.bbwatchedfiles = []
|
||||
self.notifier = pyinotify.Notifier(self.watcher, self.notifications)
|
||||
|
||||
bb.debug(1, "BBCooker pyinotify complete %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
|
||||
# If being called by something like tinfoil, we need to clean cached data
|
||||
# which may now be invalid
|
||||
bb.parse.clear_cache()
|
||||
@@ -209,9 +196,6 @@ class BBCooker:
|
||||
|
||||
self.initConfigurationData()
|
||||
|
||||
bb.debug(1, "BBCooker parsed base configuration %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
|
||||
# we log all events to a file if so directed
|
||||
if self.configuration.writeeventlog:
|
||||
# register the log file writer as UI Handler
|
||||
@@ -249,9 +233,6 @@ class BBCooker:
|
||||
# Let SIGHUP exit as SIGTERM
|
||||
signal.signal(signal.SIGHUP, self.sigterm_exception)
|
||||
|
||||
bb.debug(1, "BBCooker startup complete %s" % time.time())
|
||||
sys.stdout.flush()
|
||||
|
||||
def process_inotify_updates(self):
|
||||
for n in [self.confignotifier, self.notifier]:
|
||||
if n.check_events(timeout=0):
|
||||
@@ -391,9 +372,8 @@ class BBCooker:
|
||||
if CookerFeatures.BASEDATASTORE_TRACKING in self.featureset:
|
||||
self.disableDataTracking()
|
||||
|
||||
for mc in self.databuilder.mcdata.values():
|
||||
mc.renameVar("__depends", "__base_depends")
|
||||
self.add_filewatch(mc.getVar("__base_depends", False), self.configwatcher)
|
||||
self.data.renameVar("__depends", "__base_depends")
|
||||
self.add_filewatch(self.data.getVar("__base_depends", False), self.configwatcher)
|
||||
|
||||
self.baseconfig_valid = True
|
||||
self.parsecache_valid = False
|
||||
@@ -536,8 +516,6 @@ class BBCooker:
|
||||
fn = runlist[0][3]
|
||||
else:
|
||||
envdata = self.data
|
||||
data.expandKeys(envdata)
|
||||
parse.ast.runAnonFuncs(envdata)
|
||||
|
||||
if fn:
|
||||
try:
|
||||
@@ -558,6 +536,7 @@ class BBCooker:
|
||||
logger.plain(env.getvalue())
|
||||
|
||||
# emit the metadata which isnt valid shell
|
||||
data.expandKeys(envdata)
|
||||
for e in sorted(envdata.keys()):
|
||||
if envdata.getVarFlag(e, 'func', False) and envdata.getVarFlag(e, 'python', False):
|
||||
logger.plain("\npython %s () {\n%s}\n", e, envdata.getVar(e, False))
|
||||
@@ -640,33 +619,6 @@ class BBCooker:
|
||||
runlist.append([mc, k, ktask, fn])
|
||||
bb.event.fire(bb.event.TreeDataPreparationProgress(current, len(fulltargetlist)), self.data)
|
||||
|
||||
|
||||
# No need to do check providers if there are no mcdeps or not an mc build
|
||||
if len(self.multiconfigs) > 1:
|
||||
seen = set()
|
||||
new = True
|
||||
# Make sure we can provide the multiconfig dependency
|
||||
while new:
|
||||
mcdeps = set()
|
||||
# Add unresolved first, so we can get multiconfig indirect dependencies on time
|
||||
for mc in self.multiconfigs:
|
||||
taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc])
|
||||
mcdeps |= set(taskdata[mc].get_mcdepends())
|
||||
new = False
|
||||
for mc in self.multiconfigs:
|
||||
for k in mcdeps:
|
||||
if k in seen:
|
||||
continue
|
||||
l = k.split(':')
|
||||
depmc = l[2]
|
||||
if depmc not in self.multiconfigs:
|
||||
bb.fatal("Multiconfig dependency %s depends on nonexistent mc configuration %s" % (k,depmc))
|
||||
else:
|
||||
logger.debug(1, "Adding providers for multiconfig dependency %s" % l[3])
|
||||
taskdata[depmc].add_provider(localdata[depmc], self.recipecaches[depmc], l[3])
|
||||
seen.add(k)
|
||||
new = True
|
||||
|
||||
for mc in self.multiconfigs:
|
||||
taskdata[mc].add_unresolved(localdata[mc], self.recipecaches[mc])
|
||||
|
||||
@@ -753,8 +705,8 @@ class BBCooker:
|
||||
if not dotname in depend_tree["tdepends"]:
|
||||
depend_tree["tdepends"][dotname] = []
|
||||
for dep in rq.rqdata.runtaskentries[tid].depends:
|
||||
(depmc, depfn, _, deptaskfn) = bb.runqueue.split_tid_mcfn(dep)
|
||||
deppn = self.recipecaches[depmc].pkg_fn[deptaskfn]
|
||||
(depmc, depfn, deptaskname, deptaskfn) = bb.runqueue.split_tid_mcfn(dep)
|
||||
deppn = self.recipecaches[mc].pkg_fn[deptaskfn]
|
||||
depend_tree["tdepends"][dotname].append("%s.%s" % (deppn, bb.runqueue.taskname_from_tid(dep)))
|
||||
if taskfn not in seen_fns:
|
||||
seen_fns.append(taskfn)
|
||||
@@ -904,12 +856,12 @@ class BBCooker:
|
||||
|
||||
with open('task-depends.dot', 'w') as f:
|
||||
f.write("digraph depends {\n")
|
||||
for task in sorted(depgraph["tdepends"]):
|
||||
for task in depgraph["tdepends"]:
|
||||
(pn, taskname) = task.rsplit(".", 1)
|
||||
fn = depgraph["pn"][pn]["filename"]
|
||||
version = depgraph["pn"][pn]["version"]
|
||||
f.write('"%s.%s" [label="%s %s\\n%s\\n%s"]\n' % (pn, taskname, pn, taskname, version, fn))
|
||||
for dep in sorted(depgraph["tdepends"][task]):
|
||||
for dep in depgraph["tdepends"][task]:
|
||||
f.write('"%s" -> "%s"\n' % (task, dep))
|
||||
f.write("}\n")
|
||||
logger.info("Task dependencies saved to 'task-depends.dot'")
|
||||
@@ -917,23 +869,23 @@ class BBCooker:
|
||||
with open('recipe-depends.dot', 'w') as f:
|
||||
f.write("digraph depends {\n")
|
||||
pndeps = {}
|
||||
for task in sorted(depgraph["tdepends"]):
|
||||
for task in depgraph["tdepends"]:
|
||||
(pn, taskname) = task.rsplit(".", 1)
|
||||
if pn not in pndeps:
|
||||
pndeps[pn] = set()
|
||||
for dep in sorted(depgraph["tdepends"][task]):
|
||||
for dep in depgraph["tdepends"][task]:
|
||||
(deppn, deptaskname) = dep.rsplit(".", 1)
|
||||
pndeps[pn].add(deppn)
|
||||
for pn in sorted(pndeps):
|
||||
for pn in pndeps:
|
||||
fn = depgraph["pn"][pn]["filename"]
|
||||
version = depgraph["pn"][pn]["version"]
|
||||
f.write('"%s" [label="%s\\n%s\\n%s"]\n' % (pn, pn, version, fn))
|
||||
for dep in sorted(pndeps[pn]):
|
||||
for dep in pndeps[pn]:
|
||||
if dep == pn:
|
||||
continue
|
||||
f.write('"%s" -> "%s"\n' % (pn, dep))
|
||||
f.write("}\n")
|
||||
logger.info("Flattened recipe dependencies saved to 'recipe-depends.dot'")
|
||||
logger.info("Flatened recipe dependencies saved to 'recipe-depends.dot'")
|
||||
|
||||
def show_appends_with_no_recipes(self):
|
||||
# Determine which bbappends haven't been applied
|
||||
@@ -1218,7 +1170,6 @@ class BBCooker:
|
||||
elif regex == "":
|
||||
parselog.debug(1, "BBFILE_PATTERN_%s is empty" % c)
|
||||
errors = False
|
||||
continue
|
||||
else:
|
||||
try:
|
||||
cre = re.compile(regex)
|
||||
@@ -1613,7 +1564,7 @@ class BBCooker:
|
||||
pkgs_to_build.append(t)
|
||||
|
||||
if 'universe' in pkgs_to_build:
|
||||
parselog.verbnote("The \"universe\" target is only intended for testing and may produce errors.")
|
||||
parselog.warning("The \"universe\" target is only intended for testing and may produce errors.")
|
||||
parselog.debug(1, "collating packages for \"universe\"")
|
||||
pkgs_to_build.remove('universe')
|
||||
for mc in self.multiconfigs:
|
||||
@@ -1652,6 +1603,8 @@ class BBCooker:
|
||||
|
||||
if self.parser:
|
||||
self.parser.shutdown(clean=not force, force=force)
|
||||
self.notifier.stop()
|
||||
self.confignotifier.stop()
|
||||
|
||||
def finishcommand(self):
|
||||
self.state = state.initial
|
||||
@@ -1680,10 +1633,7 @@ class CookerExit(bb.event.Event):
|
||||
class CookerCollectFiles(object):
|
||||
def __init__(self, priorities):
|
||||
self.bbappends = []
|
||||
# Priorities is a list of tupples, with the second element as the pattern.
|
||||
# We need to sort the list with the longest pattern first, and so on to
|
||||
# the shortest. This allows nested layers to be properly evaluated.
|
||||
self.bbfile_config_priorities = sorted(priorities, key=lambda tup: tup[1], reverse=True)
|
||||
self.bbfile_config_priorities = priorities
|
||||
|
||||
def calc_bbfile_priority( self, filename, matched = None ):
|
||||
for _, _, regex, pri in self.bbfile_config_priorities:
|
||||
@@ -1857,25 +1807,21 @@ class CookerCollectFiles(object):
|
||||
realfn, cls, mc = bb.cache.virtualfn2realfn(p)
|
||||
priorities[p] = self.calc_bbfile_priority(realfn, matched)
|
||||
|
||||
# Don't show the warning if the BBFILE_PATTERN did match .bbappend files
|
||||
unmatched = set()
|
||||
for _, _, regex, pri in self.bbfile_config_priorities:
|
||||
if not regex in matched:
|
||||
unmatched.add(regex)
|
||||
|
||||
# Don't show the warning if the BBFILE_PATTERN did match .bbappend files
|
||||
def find_bbappend_match(regex):
|
||||
def findmatch(regex):
|
||||
for b in self.bbappends:
|
||||
(bbfile, append) = b
|
||||
if regex.match(append):
|
||||
# If the bbappend is matched by already "matched set", return False
|
||||
for matched_regex in matched:
|
||||
if matched_regex.match(append):
|
||||
return False
|
||||
return True
|
||||
return False
|
||||
|
||||
for unmatch in unmatched.copy():
|
||||
if find_bbappend_match(unmatch):
|
||||
if findmatch(unmatch):
|
||||
unmatched.remove(unmatch)
|
||||
|
||||
for collection, pattern, regex, _ in self.bbfile_config_priorities:
|
||||
|
||||
@@ -26,7 +26,6 @@ import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import hashlib
|
||||
from functools import wraps
|
||||
import bb
|
||||
from bb import data
|
||||
@@ -144,8 +143,7 @@ class CookerConfiguration(object):
|
||||
self.writeeventlog = False
|
||||
self.server_only = False
|
||||
self.limited_deps = False
|
||||
self.runall = []
|
||||
self.runonly = []
|
||||
self.runall = None
|
||||
|
||||
self.env = {}
|
||||
|
||||
@@ -280,7 +278,6 @@ class CookerDataBuilder(object):
|
||||
self.mcdata = {}
|
||||
|
||||
def parseBaseConfiguration(self):
|
||||
data_hash = hashlib.sha256()
|
||||
try:
|
||||
bb.parse.init_parser(self.basedata)
|
||||
self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles)
|
||||
@@ -304,7 +301,7 @@ class CookerDataBuilder(object):
|
||||
bb.event.fire(bb.event.ConfigParsed(), self.data)
|
||||
|
||||
bb.parse.init_parser(self.data)
|
||||
data_hash.update(self.data.get_hash().encode('utf-8'))
|
||||
self.data_hash = self.data.get_hash()
|
||||
self.mcdata[''] = self.data
|
||||
|
||||
multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
|
||||
@@ -312,11 +309,9 @@ class CookerDataBuilder(object):
|
||||
mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config)
|
||||
bb.event.fire(bb.event.ConfigParsed(), mcdata)
|
||||
self.mcdata[config] = mcdata
|
||||
data_hash.update(mcdata.get_hash().encode('utf-8'))
|
||||
if multiconfig:
|
||||
bb.event.fire(bb.event.MultiConfigParsed(self.mcdata), self.data)
|
||||
|
||||
self.data_hash = data_hash.hexdigest()
|
||||
except (SyntaxError, bb.BBHandledException):
|
||||
raise bb.BBHandledException
|
||||
except bb.data_smart.ExpansionError as e:
|
||||
@@ -400,8 +395,6 @@ class CookerDataBuilder(object):
|
||||
if compat and not (compat & layerseries):
|
||||
bb.fatal("Layer %s is not compatible with the core layer which only supports these series: %s (layer is compatible with %s)"
|
||||
% (c, " ".join(layerseries), " ".join(compat)))
|
||||
elif not compat and not data.getVar("BB_WORKERCONTEXT"):
|
||||
bb.warn("Layer %s should set LAYERSERIES_COMPAT_%s in its conf/layer.conf file to list the core layer names it is compatible with." % (c, c))
|
||||
|
||||
if not data.getVar("BBPATH"):
|
||||
msg = "The BBPATH variable is not set"
|
||||
|
||||
@@ -16,10 +16,6 @@ def createDaemon(function, logfile):
|
||||
background as a daemon, returning control to the caller.
|
||||
"""
|
||||
|
||||
# Ensure stdout/stderror are flushed before forking to avoid duplicate output
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
try:
|
||||
# Fork a child process so the parent can exit. This returns control to
|
||||
# the command-line or shell. It also guarantees that the child will not
|
||||
@@ -53,8 +49,8 @@ def createDaemon(function, logfile):
|
||||
# exit() or _exit()?
|
||||
# _exit is like exit(), but it doesn't call any functions registered
|
||||
# with atexit (and on_exit) or any registered signal handlers. It also
|
||||
# closes any open file descriptors, but doesn't flush any buffered output.
|
||||
# Using exit() may cause all any temporary files to be unexpectedly
|
||||
# closes any open file descriptors. Using exit() may cause all stdio
|
||||
# streams to be flushed twice and any temporary files may be unexpectedly
|
||||
# removed. It's therefore recommended that child branches of a fork()
|
||||
# and the parent branch(es) of a daemon use _exit().
|
||||
os._exit(0)
|
||||
@@ -65,19 +61,17 @@ def createDaemon(function, logfile):
|
||||
# The second child.
|
||||
|
||||
# Replace standard fds with our own
|
||||
with open('/dev/null', 'r') as si:
|
||||
os.dup2(si.fileno(), sys.stdin.fileno())
|
||||
si = open('/dev/null', 'r')
|
||||
os.dup2(si.fileno(), sys.stdin.fileno())
|
||||
|
||||
try:
|
||||
so = open(logfile, 'a+')
|
||||
se = so
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(so.fileno(), sys.stderr.fileno())
|
||||
os.dup2(se.fileno(), sys.stderr.fileno())
|
||||
except io.UnsupportedOperation:
|
||||
sys.stdout = open(logfile, 'a+')
|
||||
|
||||
# Have stdout and stderr be the same so log output matches chronologically
|
||||
# and there aren't two seperate buffers
|
||||
sys.stderr = sys.stdout
|
||||
sys.stderr = sys.stdout
|
||||
|
||||
try:
|
||||
function()
|
||||
@@ -85,9 +79,4 @@ def createDaemon(function, logfile):
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
bb.event.print_ui_queue()
|
||||
# os._exit() doesn't flush open files like os.exit() does. Manually flush
|
||||
# stdout and stderr so that any logging output will be seen, particularly
|
||||
# exception tracebacks.
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
os._exit(0)
|
||||
|
||||
@@ -38,7 +38,6 @@ the speed is more critical here.
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import sys, os, re
|
||||
import hashlib
|
||||
if sys.argv[0][-5:] == "pydoc":
|
||||
path = os.path.dirname(os.path.dirname(sys.argv[1]))
|
||||
else:
|
||||
@@ -284,12 +283,14 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
|
||||
try:
|
||||
if key[-1] == ']':
|
||||
vf = key[:-1].split('[')
|
||||
value, parser = d.getVarFlag(vf[0], vf[1], False, retparser=True)
|
||||
value = d.getVarFlag(vf[0], vf[1], False)
|
||||
parser = d.expandWithRefs(value, key)
|
||||
deps |= parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
return deps, value
|
||||
varflags = d.getVarFlags(key, ["vardeps", "vardepvalue", "vardepsexclude", "exports", "postfuncs", "prefuncs", "lineno", "filename"]) or {}
|
||||
vardeps = varflags.get("vardeps")
|
||||
value = d.getVarFlag(key, "_content", False)
|
||||
|
||||
def handle_contains(value, contains, d):
|
||||
newvalue = ""
|
||||
@@ -308,19 +309,10 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
|
||||
return newvalue
|
||||
return value + newvalue
|
||||
|
||||
def handle_remove(value, deps, removes, d):
|
||||
for r in sorted(removes):
|
||||
r2 = d.expandWithRefs(r, None)
|
||||
value += "\n_remove of %s" % r
|
||||
deps |= r2.references
|
||||
deps = deps | (keys & r2.execs)
|
||||
return value
|
||||
|
||||
if "vardepvalue" in varflags:
|
||||
value = varflags.get("vardepvalue")
|
||||
value = varflags.get("vardepvalue")
|
||||
elif varflags.get("func"):
|
||||
if varflags.get("python"):
|
||||
value = d.getVarFlag(key, "_content", False)
|
||||
parser = bb.codeparser.PythonParser(key, logger)
|
||||
if value and "\t" in value:
|
||||
logger.warning("Variable %s contains tabs, please remove these (%s)" % (key, d.getVar("FILE")))
|
||||
@@ -329,15 +321,13 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
|
||||
deps = deps | (keys & parser.execs)
|
||||
value = handle_contains(value, parser.contains, d)
|
||||
else:
|
||||
value, parsedvar = d.getVarFlag(key, "_content", False, retparser=True)
|
||||
parsedvar = d.expandWithRefs(value, key)
|
||||
parser = bb.codeparser.ShellParser(key, logger)
|
||||
parser.parse_shell(parsedvar.value)
|
||||
deps = deps | shelldeps
|
||||
deps = deps | parsedvar.references
|
||||
deps = deps | (keys & parser.execs) | (keys & parsedvar.execs)
|
||||
value = handle_contains(value, parsedvar.contains, d)
|
||||
if hasattr(parsedvar, "removes"):
|
||||
value = handle_remove(value, deps, parsedvar.removes, d)
|
||||
if vardeps is None:
|
||||
parser.log.flush()
|
||||
if "prefuncs" in varflags:
|
||||
@@ -347,12 +337,10 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
|
||||
if "exports" in varflags:
|
||||
deps = deps | set(varflags["exports"].split())
|
||||
else:
|
||||
value, parser = d.getVarFlag(key, "_content", False, retparser=True)
|
||||
parser = d.expandWithRefs(value, key)
|
||||
deps |= parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
value = handle_contains(value, parser.contains, d)
|
||||
if hasattr(parser, "removes"):
|
||||
value = handle_remove(value, deps, parser.removes, d)
|
||||
|
||||
if "vardepvalueexclude" in varflags:
|
||||
exclude = varflags.get("vardepvalueexclude")
|
||||
@@ -406,43 +394,6 @@ def generate_dependencies(d):
|
||||
#print "For %s: %s" % (task, str(deps[task]))
|
||||
return tasklist, deps, values
|
||||
|
||||
def generate_dependency_hash(tasklist, gendeps, lookupcache, whitelist, fn):
|
||||
taskdeps = {}
|
||||
basehash = {}
|
||||
|
||||
for task in tasklist:
|
||||
data = lookupcache[task]
|
||||
|
||||
if data is None:
|
||||
bb.error("Task %s from %s seems to be empty?!" % (task, fn))
|
||||
data = ''
|
||||
|
||||
gendeps[task] -= whitelist
|
||||
newdeps = gendeps[task]
|
||||
seen = set()
|
||||
while newdeps:
|
||||
nextdeps = newdeps
|
||||
seen |= nextdeps
|
||||
newdeps = set()
|
||||
for dep in nextdeps:
|
||||
if dep in whitelist:
|
||||
continue
|
||||
gendeps[dep] -= whitelist
|
||||
newdeps |= gendeps[dep]
|
||||
newdeps -= seen
|
||||
|
||||
alldeps = sorted(seen)
|
||||
for dep in alldeps:
|
||||
data = data + dep
|
||||
var = lookupcache[dep]
|
||||
if var is not None:
|
||||
data = data + str(var)
|
||||
k = fn + "." + task
|
||||
basehash[k] = hashlib.md5(data.encode("utf-8")).hexdigest()
|
||||
taskdeps[task] = alldeps
|
||||
|
||||
return taskdeps, basehash
|
||||
|
||||
def inherits_class(klass, d):
|
||||
val = d.getVar('__inherit_cache', False) or []
|
||||
needle = os.path.join('classes', '%s.bbclass' % klass)
|
||||
|
||||
@@ -42,7 +42,6 @@ __setvar_keyword__ = ["_append", "_prepend", "_remove"]
|
||||
__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend|_remove)(_(?P<add>[^A-Z]*))?$')
|
||||
__expand_var_regexp__ = re.compile(r"\${[^{}@\n\t :]+}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@.+?}")
|
||||
__whitespace_split__ = re.compile('(\s)')
|
||||
|
||||
def infer_caller_details(loginfo, parent = False, varval = True):
|
||||
"""Save the caller the trouble of specifying everything."""
|
||||
@@ -105,7 +104,11 @@ class VariableParse:
|
||||
if self.varname and key:
|
||||
if self.varname == key:
|
||||
raise Exception("variable %s references itself!" % self.varname)
|
||||
var = self.d.getVarFlag(key, "_content")
|
||||
if key in self.d.expand_cache:
|
||||
varparse = self.d.expand_cache[key]
|
||||
var = varparse.value
|
||||
else:
|
||||
var = self.d.getVarFlag(key, "_content")
|
||||
self.references.add(key)
|
||||
if var is not None:
|
||||
return var
|
||||
@@ -264,16 +267,6 @@ class VariableHistory(object):
|
||||
return
|
||||
self.variables[var].append(loginfo.copy())
|
||||
|
||||
def rename_variable_hist(self, oldvar, newvar):
|
||||
if not self.dataroot._tracking:
|
||||
return
|
||||
if oldvar not in self.variables:
|
||||
return
|
||||
if newvar not in self.variables:
|
||||
self.variables[newvar] = []
|
||||
for i in self.variables[oldvar]:
|
||||
self.variables[newvar].append(i.copy())
|
||||
|
||||
def variable(self, var):
|
||||
remote_connector = self.dataroot.getVar('_remote_data', False)
|
||||
if remote_connector:
|
||||
@@ -408,6 +401,9 @@ class DataSmart(MutableMapping):
|
||||
if not isinstance(s, str): # sanity check
|
||||
return VariableParse(varname, self, s)
|
||||
|
||||
if varname and varname in self.expand_cache:
|
||||
return self.expand_cache[varname]
|
||||
|
||||
varparse = VariableParse(varname, self)
|
||||
|
||||
while s.find('${') != -1:
|
||||
@@ -431,6 +427,9 @@ class DataSmart(MutableMapping):
|
||||
|
||||
varparse.value = s
|
||||
|
||||
if varname:
|
||||
self.expand_cache[varname] = varparse
|
||||
|
||||
return varparse
|
||||
|
||||
def expand(self, s, varname = None):
|
||||
@@ -499,7 +498,6 @@ class DataSmart(MutableMapping):
|
||||
|
||||
def setVar(self, var, value, **loginfo):
|
||||
#print("var=" + str(var) + " val=" + str(value))
|
||||
self.expand_cache = {}
|
||||
parsing=False
|
||||
if 'parsing' in loginfo:
|
||||
parsing=True
|
||||
@@ -512,7 +510,7 @@ class DataSmart(MutableMapping):
|
||||
|
||||
if 'op' not in loginfo:
|
||||
loginfo['op'] = "set"
|
||||
|
||||
self.expand_cache = {}
|
||||
match = __setvar_regexp__.match(var)
|
||||
if match and match.group("keyword") in __setvar_keyword__:
|
||||
base = match.group('base')
|
||||
@@ -621,7 +619,6 @@ class DataSmart(MutableMapping):
|
||||
|
||||
val = self.getVar(key, 0, parsing=True)
|
||||
if val is not None:
|
||||
self.varhistory.rename_variable_hist(key, newkey)
|
||||
loginfo['variable'] = newkey
|
||||
loginfo['op'] = 'rename from %s' % key
|
||||
loginfo['detail'] = val
|
||||
@@ -663,7 +660,6 @@ class DataSmart(MutableMapping):
|
||||
self.setVar(var + "_prepend", value, ignore=True, parsing=True)
|
||||
|
||||
def delVar(self, var, **loginfo):
|
||||
self.expand_cache = {}
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.delVar(var)
|
||||
@@ -673,6 +669,7 @@ class DataSmart(MutableMapping):
|
||||
loginfo['detail'] = ""
|
||||
loginfo['op'] = 'del'
|
||||
self.varhistory.record(**loginfo)
|
||||
self.expand_cache = {}
|
||||
self.dict[var] = {}
|
||||
if var in self.overridedata:
|
||||
del self.overridedata[var]
|
||||
@@ -695,13 +692,13 @@ class DataSmart(MutableMapping):
|
||||
override = None
|
||||
|
||||
def setVarFlag(self, var, flag, value, **loginfo):
|
||||
self.expand_cache = {}
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.setVarFlag(var, flag, value)
|
||||
if not res:
|
||||
return
|
||||
|
||||
self.expand_cache = {}
|
||||
if 'op' not in loginfo:
|
||||
loginfo['op'] = "set"
|
||||
loginfo['flag'] = flag
|
||||
@@ -722,21 +719,9 @@ class DataSmart(MutableMapping):
|
||||
self.dict["__exportlist"]["_content"] = set()
|
||||
self.dict["__exportlist"]["_content"].add(var)
|
||||
|
||||
def getVarFlag(self, var, flag, expand=True, noweakdefault=False, parsing=False, retparser=False):
|
||||
if flag == "_content":
|
||||
cachename = var
|
||||
else:
|
||||
if not flag:
|
||||
bb.warn("Calling getVarFlag with flag unset is invalid")
|
||||
return None
|
||||
cachename = var + "[" + flag + "]"
|
||||
|
||||
if expand and cachename in self.expand_cache:
|
||||
return self.expand_cache[cachename].value
|
||||
|
||||
def getVarFlag(self, var, flag, expand=True, noweakdefault=False, parsing=False):
|
||||
local_var, overridedata = self._findVar(var)
|
||||
value = None
|
||||
removes = set()
|
||||
if flag == "_content" and overridedata is not None and not parsing:
|
||||
match = False
|
||||
active = {}
|
||||
@@ -763,11 +748,7 @@ class DataSmart(MutableMapping):
|
||||
match = active[a]
|
||||
del active[a]
|
||||
if match:
|
||||
value, subparser = self.getVarFlag(match, "_content", False, retparser=True)
|
||||
if hasattr(subparser, "removes"):
|
||||
# We have to carry the removes from the overridden variable to apply at the
|
||||
# end of processing
|
||||
removes = subparser.removes
|
||||
value = self.getVar(match, False)
|
||||
|
||||
if local_var is not None and value is None:
|
||||
if flag in local_var:
|
||||
@@ -803,13 +784,17 @@ class DataSmart(MutableMapping):
|
||||
if match:
|
||||
value = r + value
|
||||
|
||||
parser = None
|
||||
if expand or retparser:
|
||||
parser = self.expandWithRefs(value, cachename)
|
||||
if expand:
|
||||
value = parser.value
|
||||
if expand and value:
|
||||
# Only getvar (flag == _content) hits the expand cache
|
||||
cachename = None
|
||||
if flag == "_content":
|
||||
cachename = var
|
||||
else:
|
||||
cachename = var + "[" + flag + "]"
|
||||
value = self.expand(value, cachename)
|
||||
|
||||
if value and flag == "_content" and local_var is not None and "_remove" in local_var and not parsing:
|
||||
if value and flag == "_content" and local_var is not None and "_remove" in local_var:
|
||||
removes = []
|
||||
self.need_overrides()
|
||||
for (r, o) in local_var["_remove"]:
|
||||
match = True
|
||||
@@ -818,45 +803,26 @@ class DataSmart(MutableMapping):
|
||||
if not o2 in self.overrides:
|
||||
match = False
|
||||
if match:
|
||||
removes.add(r)
|
||||
|
||||
if value and flag == "_content" and not parsing:
|
||||
if removes and parser:
|
||||
expanded_removes = {}
|
||||
for r in removes:
|
||||
expanded_removes[r] = self.expand(r).split()
|
||||
|
||||
parser.removes = set()
|
||||
val = ""
|
||||
for v in __whitespace_split__.split(parser.value):
|
||||
skip = False
|
||||
for r in removes:
|
||||
if v in expanded_removes[r]:
|
||||
parser.removes.add(r)
|
||||
skip = True
|
||||
if skip:
|
||||
continue
|
||||
val = val + v
|
||||
parser.value = val
|
||||
if expand:
|
||||
value = parser.value
|
||||
|
||||
if parser:
|
||||
self.expand_cache[cachename] = parser
|
||||
|
||||
if retparser:
|
||||
return value, parser
|
||||
removes.extend(self.expand(r).split())
|
||||
|
||||
if removes:
|
||||
filtered = filter(lambda v: v not in removes,
|
||||
value.split())
|
||||
value = " ".join(filtered)
|
||||
if expand and var in self.expand_cache:
|
||||
# We need to ensure the expand cache has the correct value
|
||||
# flag == "_content" here
|
||||
self.expand_cache[var].value = value
|
||||
return value
|
||||
|
||||
def delVarFlag(self, var, flag, **loginfo):
|
||||
self.expand_cache = {}
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.delVarFlag(var, flag)
|
||||
if not res:
|
||||
return
|
||||
|
||||
self.expand_cache = {}
|
||||
local_var, _ = self._findVar(var)
|
||||
if not local_var:
|
||||
return
|
||||
|
||||
@@ -141,9 +141,6 @@ def print_ui_queue():
|
||||
logger = logging.getLogger("BitBake")
|
||||
if not _uiready:
|
||||
from bb.msg import BBLogFormatter
|
||||
# Flush any existing buffered content
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
stdout = logging.StreamHandler(sys.stdout)
|
||||
stderr = logging.StreamHandler(sys.stderr)
|
||||
formatter = BBLogFormatter("%(levelname)s: %(message)s")
|
||||
@@ -398,7 +395,7 @@ class RecipeEvent(Event):
|
||||
Event.__init__(self)
|
||||
|
||||
class RecipePreFinalise(RecipeEvent):
|
||||
""" Recipe Parsing Complete but not yet finalised"""
|
||||
""" Recipe Parsing Complete but not yet finialised"""
|
||||
|
||||
class RecipeTaskPreProcess(RecipeEvent):
|
||||
"""
|
||||
@@ -452,6 +449,12 @@ class BuildBase(Event):
|
||||
def setName(self, name):
|
||||
self._name = name
|
||||
|
||||
def getCfg(self):
|
||||
return self.data
|
||||
|
||||
def setCfg(self, cfg):
|
||||
self.data = cfg
|
||||
|
||||
def getFailures(self):
|
||||
"""
|
||||
Return the number of failed packages
|
||||
@@ -460,6 +463,9 @@ class BuildBase(Event):
|
||||
|
||||
pkgs = property(getPkgs, setPkgs, None, "pkgs property")
|
||||
name = property(getName, setName, None, "name property")
|
||||
cfg = property(getCfg, setCfg, None, "cfg property")
|
||||
|
||||
|
||||
|
||||
class BuildInit(BuildBase):
|
||||
"""buildFile or buildTargets was invoked"""
|
||||
|
||||
@@ -256,7 +256,7 @@ class URI(object):
|
||||
|
||||
# Identify if the URI is relative or not
|
||||
if urlp.scheme in self._relative_schemes and \
|
||||
re.compile(r"^\w+:(?!//)").match(uri):
|
||||
re.compile("^\w+:(?!//)").match(uri):
|
||||
self.relative = True
|
||||
|
||||
if not self.relative:
|
||||
@@ -383,7 +383,7 @@ def decodeurl(url):
|
||||
path = location
|
||||
else:
|
||||
host = location
|
||||
path = "/"
|
||||
path = ""
|
||||
if user:
|
||||
m = re.compile('(?P<user>[^:]+)(:?(?P<pswd>.*))').match(user)
|
||||
if m:
|
||||
@@ -452,8 +452,8 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
# Handle URL parameters
|
||||
if i:
|
||||
# Any specified URL parameters must match
|
||||
for k in uri_find_decoded[loc]:
|
||||
if uri_decoded[loc][k] != uri_find_decoded[loc][k]:
|
||||
for k in uri_replace_decoded[loc]:
|
||||
if uri_decoded[loc][k] != uri_replace_decoded[loc][k]:
|
||||
return None
|
||||
# Overwrite any specified replacement parameters
|
||||
for k in uri_replace_decoded[loc]:
|
||||
@@ -643,25 +643,26 @@ def verify_donestamp(ud, d, origud=None):
|
||||
if not ud.needdonestamp or (origud and not origud.needdonestamp):
|
||||
return True
|
||||
|
||||
if not os.path.exists(ud.localpath):
|
||||
# local path does not exist
|
||||
if os.path.exists(ud.donestamp):
|
||||
# done stamp exists, but the downloaded file does not; the done stamp
|
||||
# must be incorrect, re-trigger the download
|
||||
bb.utils.remove(ud.donestamp)
|
||||
if not os.path.exists(ud.donestamp):
|
||||
return False
|
||||
|
||||
if (not ud.method.supports_checksum(ud) or
|
||||
(origud and not origud.method.supports_checksum(origud))):
|
||||
# if done stamp exists and checksums not supported; assume the local
|
||||
# file is current
|
||||
return os.path.exists(ud.donestamp)
|
||||
# done stamp exists, checksums not supported; assume the local file is
|
||||
# current
|
||||
return True
|
||||
|
||||
if not os.path.exists(ud.localpath):
|
||||
# done stamp exists, but the downloaded file does not; the done stamp
|
||||
# must be incorrect, re-trigger the download
|
||||
bb.utils.remove(ud.donestamp)
|
||||
return False
|
||||
|
||||
precomputed_checksums = {}
|
||||
# Only re-use the precomputed checksums if the donestamp is newer than the
|
||||
# file. Do not rely on the mtime of directories, though. If ud.localpath is
|
||||
# a directory, there will probably not be any checksums anyway.
|
||||
if os.path.exists(ud.donestamp) and (os.path.isdir(ud.localpath) or
|
||||
if (os.path.isdir(ud.localpath) or
|
||||
os.path.getmtime(ud.localpath) < os.path.getmtime(ud.donestamp)):
|
||||
try:
|
||||
with open(ud.donestamp, "rb") as cachefile:
|
||||
@@ -827,7 +828,6 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
'NO_PROXY', 'no_proxy',
|
||||
'ALL_PROXY', 'all_proxy',
|
||||
'GIT_PROXY_COMMAND',
|
||||
'GIT_SSH',
|
||||
'GIT_SSL_CAINFO',
|
||||
'GIT_SMART_HTTP',
|
||||
'SSH_AUTH_SOCK', 'SSH_AGENT_PID',
|
||||
@@ -838,16 +838,14 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
if not cleanup:
|
||||
cleanup = []
|
||||
|
||||
# If PATH contains WORKDIR which contains PV-PR which contains SRCPV we
|
||||
# If PATH contains WORKDIR which contains PV which contains SRCPV we
|
||||
# can end up in circular recursion here so give the option of breaking it
|
||||
# in a data store copy.
|
||||
try:
|
||||
d.getVar("PV")
|
||||
d.getVar("PR")
|
||||
except bb.data_smart.ExpansionError:
|
||||
d = bb.data.createCopy(d)
|
||||
d.setVar("PV", "fetcheravoidrecurse")
|
||||
d.setVar("PR", "fetcheravoidrecurse")
|
||||
|
||||
origenv = d.getVar("BB_ORIGENV", False)
|
||||
for var in exportvars:
|
||||
@@ -855,9 +853,6 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
if val:
|
||||
cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd)
|
||||
|
||||
# Disable pseudo as it may affect ssh, potentially causing it to hang.
|
||||
cmd = 'export PSEUDO_DISABLED=1; ' + cmd
|
||||
|
||||
logger.debug(1, "Running %s", cmd)
|
||||
|
||||
success = False
|
||||
@@ -966,8 +961,7 @@ def rename_bad_checksum(ud, suffix):
|
||||
|
||||
new_localpath = "%s_bad-checksum_%s" % (ud.localpath, suffix)
|
||||
bb.warn("Renaming %s to %s" % (ud.localpath, new_localpath))
|
||||
if not bb.utils.movefile(ud.localpath, new_localpath):
|
||||
bb.warn("Renaming %s to %s failed, grep movefile in log.do_fetch to see why" % (ud.localpath, new_localpath))
|
||||
bb.utils.movefile(ud.localpath, new_localpath)
|
||||
|
||||
|
||||
def try_mirror_url(fetch, origud, ud, ld, check = False):
|
||||
@@ -1020,7 +1014,16 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
|
||||
origud.method.build_mirror_data(origud, ld)
|
||||
return origud.localpath
|
||||
# Otherwise the result is a local file:// and we symlink to it
|
||||
ensure_symlink(ud.localpath, origud.localpath)
|
||||
if not os.path.exists(origud.localpath):
|
||||
if os.path.islink(origud.localpath):
|
||||
# Broken symbolic link
|
||||
os.unlink(origud.localpath)
|
||||
|
||||
# As per above, in case two tasks end up here simultaneously.
|
||||
try:
|
||||
os.symlink(ud.localpath, origud.localpath)
|
||||
except FileExistsError:
|
||||
pass
|
||||
update_stamp(origud, ld)
|
||||
return ud.localpath
|
||||
|
||||
@@ -1054,22 +1057,6 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
|
||||
bb.utils.unlockfile(lf)
|
||||
|
||||
|
||||
def ensure_symlink(target, link_name):
|
||||
if not os.path.exists(link_name):
|
||||
if os.path.islink(link_name):
|
||||
# Broken symbolic link
|
||||
os.unlink(link_name)
|
||||
|
||||
# In case this is executing without any file locks held (as is
|
||||
# the case for file:// URLs), two tasks may end up here at the
|
||||
# same time, in which case we do not want the second task to
|
||||
# fail when the link has already been created by the first task.
|
||||
try:
|
||||
os.symlink(target, link_name)
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
|
||||
def try_mirrors(fetch, d, origud, mirrors, check = False):
|
||||
"""
|
||||
Try to use a mirrored version of the sources.
|
||||
@@ -1099,9 +1086,7 @@ def trusted_network(d, url):
|
||||
return True
|
||||
|
||||
pkgname = d.expand(d.getVar('PN', False))
|
||||
trusted_hosts = None
|
||||
if pkgname:
|
||||
trusted_hosts = d.getVarFlag('BB_ALLOWED_NETWORKS', pkgname, False)
|
||||
trusted_hosts = d.getVarFlag('BB_ALLOWED_NETWORKS', pkgname, False)
|
||||
|
||||
if not trusted_hosts:
|
||||
trusted_hosts = d.getVar('BB_ALLOWED_NETWORKS')
|
||||
@@ -1439,7 +1424,7 @@ class FetchMethod(object):
|
||||
cmd = 'gzip -dc %s > %s' % (file, efile)
|
||||
elif file.endswith('.bz2'):
|
||||
cmd = 'bzip2 -dc %s > %s' % (file, efile)
|
||||
elif file.endswith('.txz') or file.endswith('.tar.xz'):
|
||||
elif file.endswith('.tar.xz'):
|
||||
cmd = 'xz -dc %s | tar x --no-same-owner -f -' % file
|
||||
elif file.endswith('.xz'):
|
||||
cmd = 'xz -dc %s > %s' % (file, efile)
|
||||
@@ -1470,7 +1455,7 @@ class FetchMethod(object):
|
||||
else:
|
||||
cmd = 'rpm2cpio.sh %s | cpio -id' % (file)
|
||||
elif file.endswith('.deb') or file.endswith('.ipk'):
|
||||
output = subprocess.check_output(['ar', '-t', file], preexec_fn=subprocess_setup)
|
||||
output = subprocess.check_output('ar -t %s' % file, preexec_fn=subprocess_setup, shell=True)
|
||||
datafile = None
|
||||
if output:
|
||||
for line in output.decode().splitlines():
|
||||
|
||||
@@ -41,9 +41,8 @@ class Bzr(FetchMethod):
|
||||
init bzr specific variable within url data
|
||||
"""
|
||||
# Create paths to bzr checkouts
|
||||
bzrdir = d.getVar("BZRDIR") or (d.getVar("DL_DIR") + "/bzr")
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(bzrdir, ud.host, relpath)
|
||||
ud.pkgdir = os.path.join(d.expand('${BZRDIR}'), ud.host, relpath)
|
||||
|
||||
ud.setup_revisions(d)
|
||||
|
||||
@@ -58,7 +57,7 @@ class Bzr(FetchMethod):
|
||||
command is "fetch", "update", "revno"
|
||||
"""
|
||||
|
||||
basecmd = d.getVar("FETCHCMD_bzr") or "/usr/bin/env bzr"
|
||||
basecmd = d.expand('${FETCHCMD_bzr}')
|
||||
|
||||
proto = ud.parm.get('protocol', 'http')
|
||||
|
||||
|
||||
@@ -69,6 +69,7 @@ from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
from distutils import spawn
|
||||
|
||||
class ClearCase(FetchMethod):
|
||||
"""Class to fetch urls via 'clearcase'"""
|
||||
@@ -106,7 +107,7 @@ class ClearCase(FetchMethod):
|
||||
else:
|
||||
ud.module = ""
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_ccrc") or "/usr/bin/env cleartool || rcleartool"
|
||||
ud.basecmd = d.getVar("FETCHCMD_ccrc") or spawn.find_executable("cleartool") or spawn.find_executable("rcleartool")
|
||||
|
||||
if d.getVar("SRCREV") == "INVALID":
|
||||
raise FetchError("Set a valid SRCREV for the clearcase fetcher in your recipe, e.g. SRCREV = \"/main/LATEST\" or any other label of your choice.")
|
||||
|
||||
@@ -110,7 +110,7 @@ class Cvs(FetchMethod):
|
||||
if ud.tag:
|
||||
options.append("-r %s" % ud.tag)
|
||||
|
||||
cvsbasecmd = d.getVar("FETCHCMD_cvs") or "/usr/bin/env cvs"
|
||||
cvsbasecmd = d.getVar("FETCHCMD_cvs")
|
||||
cvscmd = cvsbasecmd + " '-d" + cvsroot + "' co " + " ".join(options) + " " + ud.module
|
||||
cvsupdatecmd = cvsbasecmd + " '-d" + cvsroot + "' update -d -P " + " ".join(options)
|
||||
|
||||
@@ -121,8 +121,7 @@ class Cvs(FetchMethod):
|
||||
# create module directory
|
||||
logger.debug(2, "Fetch: checking for module directory")
|
||||
pkg = d.getVar('PN')
|
||||
cvsdir = d.getVar("CVSDIR") or (d.getVar("DL_DIR") + "/cvs")
|
||||
pkgdir = os.path.join(cvsdir, pkg)
|
||||
pkgdir = os.path.join(d.getVar('CVSDIR'), pkg)
|
||||
moddir = os.path.join(pkgdir, localdir)
|
||||
workdir = None
|
||||
if os.access(os.path.join(moddir, 'CVS'), os.R_OK):
|
||||
|
||||
@@ -125,9 +125,6 @@ class GitProgressHandler(bb.progress.LineFilterProgressHandler):
|
||||
|
||||
|
||||
class Git(FetchMethod):
|
||||
bitbake_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.join(os.path.abspath(__file__))), '..', '..', '..'))
|
||||
make_shallow_path = os.path.join(bitbake_dir, 'bin', 'git-make-shallow')
|
||||
|
||||
"""Class to fetch a module or modules from git repositories"""
|
||||
def init(self, d):
|
||||
pass
|
||||
@@ -261,7 +258,7 @@ class Git(FetchMethod):
|
||||
gitsrcname = gitsrcname + '_' + ud.revisions[name]
|
||||
|
||||
dl_dir = d.getVar("DL_DIR")
|
||||
gitdir = d.getVar("GITDIR") or (dl_dir + "/git2")
|
||||
gitdir = d.getVar("GITDIR") or (dl_dir + "/git2/")
|
||||
ud.clonedir = os.path.join(gitdir, gitsrcname)
|
||||
ud.localfile = ud.clonedir
|
||||
|
||||
@@ -299,22 +296,17 @@ class Git(FetchMethod):
|
||||
return ud.clonedir
|
||||
|
||||
def need_update(self, ud, d):
|
||||
return self.clonedir_need_update(ud, d) or self.shallow_tarball_need_update(ud) or self.tarball_need_update(ud)
|
||||
|
||||
def clonedir_need_update(self, ud, d):
|
||||
if not os.path.exists(ud.clonedir):
|
||||
return True
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
return True
|
||||
if ud.shallow and ud.write_shallow_tarballs and not os.path.exists(ud.fullshallow):
|
||||
return True
|
||||
if ud.write_tarballs and not os.path.exists(ud.fullmirror):
|
||||
return True
|
||||
return False
|
||||
|
||||
def shallow_tarball_need_update(self, ud):
|
||||
return ud.shallow and ud.write_shallow_tarballs and not os.path.exists(ud.fullshallow)
|
||||
|
||||
def tarball_need_update(self, ud):
|
||||
return ud.write_tarballs and not os.path.exists(ud.fullmirror)
|
||||
|
||||
def try_premirror(self, ud, d):
|
||||
# If we don't do this, updating an existing checkout with only premirrors
|
||||
# is not possible
|
||||
@@ -327,13 +319,16 @@ class Git(FetchMethod):
|
||||
def download(self, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
no_clone = not os.path.exists(ud.clonedir)
|
||||
need_update = no_clone or self.need_update(ud, d)
|
||||
|
||||
# A current clone is preferred to either tarball, a shallow tarball is
|
||||
# preferred to an out of date clone, and a missing clone will use
|
||||
# either tarball.
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and self.need_update(ud, d):
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and need_update:
|
||||
ud.localpath = ud.fullshallow
|
||||
return
|
||||
elif os.path.exists(ud.fullmirror) and not os.path.exists(ud.clonedir):
|
||||
elif os.path.exists(ud.fullmirror) and no_clone:
|
||||
bb.utils.mkdirhier(ud.clonedir)
|
||||
runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=ud.clonedir)
|
||||
|
||||
@@ -355,12 +350,11 @@ class Git(FetchMethod):
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
needupdate = True
|
||||
break
|
||||
|
||||
if needupdate:
|
||||
output = runfetchcmd("%s remote" % ud.basecmd, d, quiet=True, workdir=ud.clonedir)
|
||||
if "origin" in output:
|
||||
runfetchcmd("%s remote rm origin" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
try:
|
||||
runfetchcmd("%s remote rm origin" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
except bb.fetch2.FetchError:
|
||||
logger.debug(1, "No Origin")
|
||||
|
||||
runfetchcmd("%s remote add --mirror=fetch origin %s" % (ud.basecmd, repourl), d, workdir=ud.clonedir)
|
||||
fetch_cmd = "LANG=C %s fetch -f --prune --progress %s refs/*:refs/*" % (ud.basecmd, repourl)
|
||||
@@ -369,14 +363,12 @@ class Git(FetchMethod):
|
||||
progresshandler = GitProgressHandler(d)
|
||||
runfetchcmd(fetch_cmd, d, log=progresshandler, workdir=ud.clonedir)
|
||||
runfetchcmd("%s prune-packed" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
runfetchcmd("%s pack-refs --all" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
runfetchcmd("%s pack-redundant --all | xargs -r rm" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
try:
|
||||
os.unlink(ud.fullmirror)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
raise bb.fetch2.FetchError("Unable to find revision %s in branch %s even from upstream" % (ud.revisions[name], ud.branches[name]))
|
||||
@@ -453,7 +445,7 @@ class Git(FetchMethod):
|
||||
shallow_branches.append(r)
|
||||
|
||||
# Make the repository shallow
|
||||
shallow_cmd = [self.make_shallow_path, '-s']
|
||||
shallow_cmd = ['git', 'make-shallow', '-s']
|
||||
for b in shallow_branches:
|
||||
shallow_cmd.append('-r')
|
||||
shallow_cmd.append(b)
|
||||
@@ -476,30 +468,11 @@ class Git(FetchMethod):
|
||||
if os.path.exists(destdir):
|
||||
bb.utils.prunedir(destdir)
|
||||
|
||||
source_found = False
|
||||
source_error = []
|
||||
|
||||
if not source_found:
|
||||
clonedir_is_up_to_date = not self.clonedir_need_update(ud, d)
|
||||
if clonedir_is_up_to_date:
|
||||
runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, destdir), d)
|
||||
source_found = True
|
||||
else:
|
||||
source_error.append("clone directory not available or not up to date: " + ud.clonedir)
|
||||
|
||||
if not source_found:
|
||||
if ud.shallow:
|
||||
if os.path.exists(ud.fullshallow):
|
||||
bb.utils.mkdirhier(destdir)
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=destdir)
|
||||
source_found = True
|
||||
else:
|
||||
source_error.append("shallow clone not available: " + ud.fullshallow)
|
||||
else:
|
||||
source_error.append("shallow clone not enabled")
|
||||
|
||||
if not source_found:
|
||||
raise bb.fetch2.UnpackError("No up to date source found: " + "; ".join(source_error), ud.url)
|
||||
if ud.shallow and (not os.path.exists(ud.clonedir) or self.need_update(ud, d)):
|
||||
bb.utils.mkdirhier(destdir)
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=destdir)
|
||||
else:
|
||||
runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, destdir), d)
|
||||
|
||||
repourl = self._get_repo_url(ud)
|
||||
runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, repourl), d, workdir=destdir)
|
||||
@@ -618,8 +591,7 @@ class Git(FetchMethod):
|
||||
tagregex = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX') or "(?P<pver>([0-9][\.|_]?)+)")
|
||||
try:
|
||||
output = self._lsremote(ud, d, "refs/tags/*")
|
||||
except (bb.fetch2.FetchError, bb.fetch2.NetworkAccess) as e:
|
||||
bb.note("Could not list remote: %s" % str(e))
|
||||
except bb.fetch2.FetchError or bb.fetch2.NetworkAccess:
|
||||
return pupver
|
||||
|
||||
verstring = ""
|
||||
|
||||
@@ -31,12 +31,9 @@ NOTE: Switching a SRC_URI from "git://" to "gitsm://" requires a clean of your r
|
||||
|
||||
import os
|
||||
import bb
|
||||
import copy
|
||||
from bb.fetch2.git import Git
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import Fetch
|
||||
from bb.fetch2 import BBFetchException
|
||||
|
||||
class GitSM(Git):
|
||||
def supports(self, ud, d):
|
||||
@@ -45,183 +42,94 @@ class GitSM(Git):
|
||||
"""
|
||||
return ud.type in ['gitsm']
|
||||
|
||||
def process_submodules(self, ud, workdir, function, d):
|
||||
"""
|
||||
Iterate over all of the submodules in this repository and execute
|
||||
the 'function' for each of them.
|
||||
"""
|
||||
|
||||
submodules = []
|
||||
paths = {}
|
||||
revision = {}
|
||||
uris = {}
|
||||
subrevision = {}
|
||||
|
||||
def parse_gitmodules(gitmodules):
|
||||
modules = {}
|
||||
module = ""
|
||||
for line in gitmodules.splitlines():
|
||||
if line.startswith('[submodule'):
|
||||
module = line.split('"')[1]
|
||||
modules[module] = {}
|
||||
elif module and line.strip().startswith('path'):
|
||||
path = line.split('=')[1].strip()
|
||||
modules[module]['path'] = path
|
||||
elif module and line.strip().startswith('url'):
|
||||
url = line.split('=')[1].strip()
|
||||
modules[module]['url'] = url
|
||||
return modules
|
||||
|
||||
# Collect the defined submodules, and their attributes
|
||||
def uses_submodules(self, ud, d, wd):
|
||||
for name in ud.names:
|
||||
try:
|
||||
gitmodules = runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revisions[name]), d, quiet=True, workdir=workdir)
|
||||
except:
|
||||
# No submodules to update
|
||||
continue
|
||||
|
||||
for m, md in parse_gitmodules(gitmodules).items():
|
||||
try:
|
||||
module_hash = runfetchcmd("%s ls-tree -z -d %s %s" % (ud.basecmd, ud.revisions[name], md['path']), d, quiet=True, workdir=workdir)
|
||||
except:
|
||||
# If the command fails, we don't have a valid file to check. If it doesn't
|
||||
# fail -- it still might be a failure, see next check...
|
||||
module_hash = ""
|
||||
|
||||
if not module_hash:
|
||||
logger.debug(1, "submodule %s is defined, but is not initialized in the repository. Skipping", m)
|
||||
continue
|
||||
|
||||
submodules.append(m)
|
||||
paths[m] = md['path']
|
||||
revision[m] = ud.revisions[name]
|
||||
uris[m] = md['url']
|
||||
subrevision[m] = module_hash.split()[2]
|
||||
|
||||
# Convert relative to absolute uri based on parent uri
|
||||
if uris[m].startswith('..'):
|
||||
newud = copy.copy(ud)
|
||||
newud.path = os.path.realpath(os.path.join(newud.path, uris[m]))
|
||||
uris[m] = Git._get_repo_url(self, newud)
|
||||
|
||||
for module in submodules:
|
||||
# Translate the module url into a SRC_URI
|
||||
|
||||
if "://" in uris[module]:
|
||||
# Properly formated URL already
|
||||
proto = uris[module].split(':', 1)[0]
|
||||
url = uris[module].replace('%s:' % proto, 'gitsm:', 1)
|
||||
else:
|
||||
if ":" in uris[module]:
|
||||
# Most likely an SSH style reference
|
||||
proto = "ssh"
|
||||
if ":/" in uris[module]:
|
||||
# Absolute reference, easy to convert..
|
||||
url = "gitsm://" + uris[module].replace(':/', '/', 1)
|
||||
else:
|
||||
# Relative reference, no way to know if this is right!
|
||||
logger.warning("Submodule included by %s refers to relative ssh reference %s. References may fail if not absolute." % (ud.url, uris[module]))
|
||||
url = "gitsm://" + uris[module].replace(':', '/', 1)
|
||||
else:
|
||||
# This has to be a file reference
|
||||
proto = "file"
|
||||
url = "gitsm://" + uris[module]
|
||||
|
||||
url += ';protocol=%s' % proto
|
||||
url += ";name=%s" % module
|
||||
url += ";subpath=%s" % module
|
||||
|
||||
ld = d.createCopy()
|
||||
# Not necessary to set SRC_URI, since we're passing the URI to
|
||||
# Fetch.
|
||||
#ld.setVar('SRC_URI', url)
|
||||
ld.setVar('SRCREV_%s' % module, subrevision[module])
|
||||
|
||||
# Workaround for issues with SRCPV/SRCREV_FORMAT errors
|
||||
# error refer to 'multiple' repositories. Only the repository
|
||||
# in the original SRC_URI actually matters...
|
||||
ld.setVar('SRCPV', d.getVar('SRCPV'))
|
||||
ld.setVar('SRCREV_FORMAT', module)
|
||||
|
||||
function(ud, url, module, paths[module], ld)
|
||||
|
||||
return submodules != []
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if Git.need_update(self, ud, d):
|
||||
return True
|
||||
|
||||
try:
|
||||
# Check for the nugget dropped by the download operation
|
||||
known_srcrevs = runfetchcmd("%s config --get-all bitbake.srcrev" % \
|
||||
(ud.basecmd), d, workdir=ud.clonedir)
|
||||
|
||||
if ud.revisions[ud.names[0]] not in known_srcrevs.split():
|
||||
runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revisions[name]), d, quiet=True, workdir=wd)
|
||||
return True
|
||||
except bb.fetch2.FetchError:
|
||||
# No srcrev nuggets, so this is new and needs to be updated
|
||||
return True
|
||||
|
||||
except bb.fetch.FetchError:
|
||||
pass
|
||||
return False
|
||||
|
||||
def _set_relative_paths(self, repopath):
|
||||
"""
|
||||
Fix submodule paths to be relative instead of absolute,
|
||||
so that when we move the repo it doesn't break
|
||||
(In Git 1.7.10+ this is done automatically)
|
||||
"""
|
||||
submodules = []
|
||||
with open(os.path.join(repopath, '.gitmodules'), 'r') as f:
|
||||
for line in f.readlines():
|
||||
if line.startswith('[submodule'):
|
||||
submodules.append(line.split('"')[1])
|
||||
|
||||
for module in submodules:
|
||||
repo_conf = os.path.join(repopath, module, '.git')
|
||||
if os.path.exists(repo_conf):
|
||||
with open(repo_conf, 'r') as f:
|
||||
lines = f.readlines()
|
||||
newpath = ''
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith('gitdir:'):
|
||||
oldpath = line.split(': ')[-1].rstrip()
|
||||
if oldpath.startswith('/'):
|
||||
newpath = '../' * (module.count('/') + 1) + '.git/modules/' + module
|
||||
lines[i] = 'gitdir: %s\n' % newpath
|
||||
break
|
||||
if newpath:
|
||||
with open(repo_conf, 'w') as f:
|
||||
for line in lines:
|
||||
f.write(line)
|
||||
|
||||
repo_conf2 = os.path.join(repopath, '.git', 'modules', module, 'config')
|
||||
if os.path.exists(repo_conf2):
|
||||
with open(repo_conf2, 'r') as f:
|
||||
lines = f.readlines()
|
||||
newpath = ''
|
||||
for i, line in enumerate(lines):
|
||||
if line.lstrip().startswith('worktree = '):
|
||||
oldpath = line.split(' = ')[-1].rstrip()
|
||||
if oldpath.startswith('/'):
|
||||
newpath = '../' * (module.count('/') + 3) + module
|
||||
lines[i] = '\tworktree = %s\n' % newpath
|
||||
break
|
||||
if newpath:
|
||||
with open(repo_conf2, 'w') as f:
|
||||
for line in lines:
|
||||
f.write(line)
|
||||
|
||||
def update_submodules(self, ud, d):
|
||||
# We have to convert bare -> full repo, do the submodule bit, then convert back
|
||||
tmpclonedir = ud.clonedir + ".tmp"
|
||||
gitdir = tmpclonedir + os.sep + ".git"
|
||||
bb.utils.remove(tmpclonedir, True)
|
||||
os.mkdir(tmpclonedir)
|
||||
os.rename(ud.clonedir, gitdir)
|
||||
runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*true/bare = false/'", d)
|
||||
runfetchcmd(ud.basecmd + " reset --hard", d, workdir=tmpclonedir)
|
||||
runfetchcmd(ud.basecmd + " checkout -f " + ud.revisions[ud.names[0]], d, workdir=tmpclonedir)
|
||||
runfetchcmd(ud.basecmd + " submodule update --init --recursive", d, workdir=tmpclonedir)
|
||||
self._set_relative_paths(tmpclonedir)
|
||||
runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*false/bare = true/'", d, workdir=tmpclonedir)
|
||||
os.rename(gitdir, ud.clonedir,)
|
||||
bb.utils.remove(tmpclonedir, True)
|
||||
|
||||
def download(self, ud, d):
|
||||
def download_submodule(ud, url, module, modpath, d):
|
||||
url += ";bareclone=1;nobranch=1"
|
||||
|
||||
# Is the following still needed?
|
||||
#url += ";nocheckout=1"
|
||||
|
||||
try:
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
newfetch.download()
|
||||
# Drop a nugget to add each of the srcrevs we've fetched (used by need_update)
|
||||
runfetchcmd("%s config --add bitbake.srcrev %s" % \
|
||||
(ud.basecmd, ud.revisions[ud.names[0]]), d, workdir=ud.clonedir)
|
||||
except Exception as e:
|
||||
logger.error('gitsm: submodule download failed: %s %s' % (type(e).__name__, str(e)))
|
||||
raise
|
||||
|
||||
Git.download(self, ud, d)
|
||||
self.process_submodules(ud, ud.clonedir, download_submodule, d)
|
||||
|
||||
if not ud.shallow or ud.localpath != ud.fullshallow:
|
||||
submodules = self.uses_submodules(ud, d, ud.clonedir)
|
||||
if submodules:
|
||||
self.update_submodules(ud, d)
|
||||
|
||||
def clone_shallow_local(self, ud, dest, d):
|
||||
super(GitSM, self).clone_shallow_local(ud, dest, d)
|
||||
|
||||
runfetchcmd('cp -fpPRH "%s/modules" "%s/"' % (ud.clonedir, os.path.join(dest, '.git')), d)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
def unpack_submodules(ud, url, module, modpath, d):
|
||||
url += ";bareclone=1;nobranch=1"
|
||||
|
||||
# Figure out where we clone over the bare submodules...
|
||||
if ud.bareclone:
|
||||
repo_conf = ud.destdir
|
||||
else:
|
||||
repo_conf = os.path.join(ud.destdir, '.git')
|
||||
|
||||
try:
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
newfetch.unpack(root=os.path.dirname(os.path.join(repo_conf, 'modules', module)))
|
||||
except Exception as e:
|
||||
logger.error('gitsm: submodule unpack failed: %s %s' % (type(e).__name__, str(e)))
|
||||
raise
|
||||
|
||||
local_path = newfetch.localpath(url)
|
||||
|
||||
# Correct the submodule references to the local download version...
|
||||
runfetchcmd("%(basecmd)s config submodule.%(module)s.url %(url)s" % {'basecmd': ud.basecmd, 'module': module, 'url' : local_path}, d, workdir=ud.destdir)
|
||||
|
||||
if ud.shallow:
|
||||
runfetchcmd("%(basecmd)s config submodule.%(module)s.shallow true" % {'basecmd': ud.basecmd, 'module': module}, d, workdir=ud.destdir)
|
||||
|
||||
# Ensure the submodule repository is NOT set to bare, since we're checking it out...
|
||||
try:
|
||||
runfetchcmd("%s config core.bare false" % (ud.basecmd), d, quiet=True, workdir=os.path.join(repo_conf, 'modules', module))
|
||||
except:
|
||||
logger.error("Unable to set git config core.bare to false for %s" % os.path.join(repo_conf, 'modules', module))
|
||||
raise
|
||||
|
||||
Git.unpack(self, ud, destdir, d)
|
||||
|
||||
ret = self.process_submodules(ud, ud.destdir, unpack_submodules, d)
|
||||
|
||||
if not ud.bareclone and ret:
|
||||
# All submodules should already be downloaded and configured in the tree. This simply sets
|
||||
# up the configuration and checks out the files. The main project config should remain
|
||||
# unmodified, and no download from the internet should occur.
|
||||
runfetchcmd("%s submodule update --recursive --no-fetch" % (ud.basecmd), d, quiet=True, workdir=ud.destdir)
|
||||
if self.uses_submodules(ud, d, ud.destdir):
|
||||
runfetchcmd(ud.basecmd + " checkout " + ud.revisions[ud.names[0]], d, workdir=ud.destdir)
|
||||
runfetchcmd(ud.basecmd + " submodule update --init --recursive", d, workdir=ud.destdir)
|
||||
|
||||
@@ -80,7 +80,7 @@ class Hg(FetchMethod):
|
||||
ud.fullmirror = os.path.join(d.getVar("DL_DIR"), mirrortarball)
|
||||
ud.mirrortarballs = [mirrortarball]
|
||||
|
||||
hgdir = d.getVar("HGDIR") or (d.getVar("DL_DIR") + "/hg")
|
||||
hgdir = d.getVar("HGDIR") or (d.getVar("DL_DIR") + "/hg/")
|
||||
ud.pkgdir = os.path.join(hgdir, hgsrcname)
|
||||
ud.moddir = os.path.join(ud.pkgdir, ud.module)
|
||||
ud.localfile = ud.moddir
|
||||
|
||||
@@ -32,6 +32,7 @@ from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import UnpackError
|
||||
from bb.fetch2 import ParameterError
|
||||
from distutils import spawn
|
||||
|
||||
def subprocess_setup():
|
||||
# Python installs a SIGPIPE handler by default. This is usually not what
|
||||
@@ -194,11 +195,9 @@ class Npm(FetchMethod):
|
||||
outputurl = pdata['dist']['tarball']
|
||||
data[pkg] = {}
|
||||
data[pkg]['tgz'] = os.path.basename(outputurl)
|
||||
if outputurl in fetchedlist:
|
||||
return
|
||||
|
||||
self._runwget(ud, d, "%s --directory-prefix=%s %s" % (self.basecmd, ud.prefixdir, outputurl), False)
|
||||
fetchedlist.append(outputurl)
|
||||
if not outputurl in fetchedlist:
|
||||
self._runwget(ud, d, "%s --directory-prefix=%s %s" % (self.basecmd, ud.prefixdir, outputurl), False)
|
||||
fetchedlist.append(outputurl)
|
||||
|
||||
dependencies = pdata.get('dependencies', {})
|
||||
optionalDependencies = pdata.get('optionalDependencies', {})
|
||||
|
||||
@@ -32,9 +32,8 @@ class Osc(FetchMethod):
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
# Create paths to osc checkouts
|
||||
oscdir = d.getVar("OSCDIR") or (d.getVar("DL_DIR") + "/osc")
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(oscdir, ud.host)
|
||||
ud.pkgdir = os.path.join(d.getVar('OSCDIR'), ud.host)
|
||||
ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module)
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
@@ -55,7 +54,7 @@ class Osc(FetchMethod):
|
||||
command is "fetch", "update", "info"
|
||||
"""
|
||||
|
||||
basecmd = d.getVar("FETCHCMD_osc") or "/usr/bin/env osc"
|
||||
basecmd = d.expand('${FETCHCMD_osc}')
|
||||
|
||||
proto = ud.parm.get('protocol', 'ocs')
|
||||
|
||||
|
||||
@@ -43,9 +43,13 @@ class Perforce(FetchMethod):
|
||||
provided by the env, use it. If P4PORT is specified by the recipe, use
|
||||
its values, which may override the settings in P4CONFIG.
|
||||
"""
|
||||
ud.basecmd = d.getVar("FETCHCMD_p4") or "/usr/bin/env p4"
|
||||
ud.basecmd = d.getVar('FETCHCMD_p4')
|
||||
if not ud.basecmd:
|
||||
ud.basecmd = "/usr/bin/env p4"
|
||||
|
||||
ud.dldir = d.getVar("P4DIR") or (d.getVar("DL_DIR") + "/p4")
|
||||
ud.dldir = d.getVar('P4DIR')
|
||||
if not ud.dldir:
|
||||
ud.dldir = '%s/%s' % (d.getVar('DL_DIR'), 'p4')
|
||||
|
||||
path = ud.url.split('://')[1]
|
||||
path = path.split(';')[0]
|
||||
|
||||
@@ -45,8 +45,6 @@ class Repo(FetchMethod):
|
||||
"master".
|
||||
"""
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_repo") or "/usr/bin/env repo"
|
||||
|
||||
ud.proto = ud.parm.get('protocol', 'git')
|
||||
ud.branch = ud.parm.get('branch', 'master')
|
||||
ud.manifest = ud.parm.get('manifest', 'default.xml')
|
||||
@@ -62,8 +60,8 @@ class Repo(FetchMethod):
|
||||
logger.debug(1, "%s already exists (or was stashed). Skipping repo init / sync.", ud.localpath)
|
||||
return
|
||||
|
||||
repodir = d.getVar("REPODIR") or (d.getVar("DL_DIR") + "/repo")
|
||||
gitsrcname = "%s%s" % (ud.host, ud.path.replace("/", "."))
|
||||
repodir = d.getVar("REPODIR") or os.path.join(d.getVar("DL_DIR"), "repo")
|
||||
codir = os.path.join(repodir, gitsrcname, ud.manifest)
|
||||
|
||||
if ud.user:
|
||||
@@ -74,11 +72,11 @@ class Repo(FetchMethod):
|
||||
repodir = os.path.join(codir, "repo")
|
||||
bb.utils.mkdirhier(repodir)
|
||||
if not os.path.exists(os.path.join(repodir, ".repo")):
|
||||
bb.fetch2.check_network_access(d, "%s init -m %s -b %s -u %s://%s%s%s" % (ud.basecmd, ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), ud.url)
|
||||
runfetchcmd("%s init -m %s -b %s -u %s://%s%s%s" % (ud.basecmd, ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), d, workdir=repodir)
|
||||
bb.fetch2.check_network_access(d, "repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), ud.url)
|
||||
runfetchcmd("repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), d, workdir=repodir)
|
||||
|
||||
bb.fetch2.check_network_access(d, "%s sync %s" % (ud.basecmd, ud.url), ud.url)
|
||||
runfetchcmd("%s sync" % ud.basecmd, d, workdir=repodir)
|
||||
bb.fetch2.check_network_access(d, "repo sync %s" % ud.url, ud.url)
|
||||
runfetchcmd("repo sync", d, workdir=repodir)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
|
||||
@@ -49,7 +49,7 @@ class Svn(FetchMethod):
|
||||
if not "module" in ud.parm:
|
||||
raise MissingParameterError('module', ud.url)
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_svn") or "/usr/bin/env svn --non-interactive --trust-server-cert"
|
||||
ud.basecmd = d.getVar('FETCHCMD_svn')
|
||||
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
@@ -59,13 +59,9 @@ class Svn(FetchMethod):
|
||||
ud.path_spec = ud.parm["path_spec"]
|
||||
|
||||
# Create paths to svn checkouts
|
||||
svndir = d.getVar("SVNDIR") or (d.getVar("DL_DIR") + "/svn")
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(svndir, ud.host, relpath)
|
||||
ud.pkgdir = os.path.join(d.expand('${SVNDIR}'), ud.host, relpath)
|
||||
ud.moddir = os.path.join(ud.pkgdir, ud.module)
|
||||
# Protects the repository from concurrent updates, e.g. from two
|
||||
# recipes fetching different revisions at the same time
|
||||
ud.svnlock = os.path.join(ud.pkgdir, "svn.lock")
|
||||
|
||||
ud.setup_revisions(d)
|
||||
|
||||
@@ -126,40 +122,35 @@ class Svn(FetchMethod):
|
||||
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
lf = bb.utils.lockfile(ud.svnlock)
|
||||
if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK):
|
||||
svnupdatecmd = self._buildsvncommand(ud, d, "update")
|
||||
logger.info("Update " + ud.url)
|
||||
# We need to attempt to run svn upgrade first in case its an older working format
|
||||
try:
|
||||
runfetchcmd(ud.basecmd + " upgrade", d, workdir=ud.moddir)
|
||||
except FetchError:
|
||||
pass
|
||||
logger.debug(1, "Running %s", svnupdatecmd)
|
||||
bb.fetch2.check_network_access(d, svnupdatecmd, ud.url)
|
||||
runfetchcmd(svnupdatecmd, d, workdir=ud.moddir)
|
||||
else:
|
||||
svnfetchcmd = self._buildsvncommand(ud, d, "fetch")
|
||||
logger.info("Fetch " + ud.url)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", svnfetchcmd)
|
||||
bb.fetch2.check_network_access(d, svnfetchcmd, ud.url)
|
||||
runfetchcmd(svnfetchcmd, d, workdir=ud.pkgdir)
|
||||
|
||||
try:
|
||||
if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK):
|
||||
svnupdatecmd = self._buildsvncommand(ud, d, "update")
|
||||
logger.info("Update " + ud.url)
|
||||
# We need to attempt to run svn upgrade first in case its an older working format
|
||||
try:
|
||||
runfetchcmd(ud.basecmd + " upgrade", d, workdir=ud.moddir)
|
||||
except FetchError:
|
||||
pass
|
||||
logger.debug(1, "Running %s", svnupdatecmd)
|
||||
bb.fetch2.check_network_access(d, svnupdatecmd, ud.url)
|
||||
runfetchcmd(svnupdatecmd, d, workdir=ud.moddir)
|
||||
else:
|
||||
svnfetchcmd = self._buildsvncommand(ud, d, "fetch")
|
||||
logger.info("Fetch " + ud.url)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", svnfetchcmd)
|
||||
bb.fetch2.check_network_access(d, svnfetchcmd, ud.url)
|
||||
runfetchcmd(svnfetchcmd, d, workdir=ud.pkgdir)
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude='.svn'"
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude='.svn'"
|
||||
|
||||
# tar them up to a defined filename
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.path_spec), d,
|
||||
cleanup=[ud.localpath], workdir=ud.pkgdir)
|
||||
finally:
|
||||
bb.utils.unlockfile(lf)
|
||||
# tar them up to a defined filename
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.path_spec), d,
|
||||
cleanup=[ud.localpath], workdir=ud.pkgdir)
|
||||
|
||||
def clean(self, ud, d):
|
||||
""" Clean SVN specific files and dirs """
|
||||
|
||||
@@ -250,7 +250,6 @@ class Wget(FetchMethod):
|
||||
return ""
|
||||
def close(self):
|
||||
pass
|
||||
closed = False
|
||||
|
||||
resp = addinfourl(fp_dummy(), r.msg, req.get_full_url())
|
||||
resp.code = r.status
|
||||
@@ -333,8 +332,7 @@ class Wget(FetchMethod):
|
||||
except (TypeError, ImportError, IOError, netrc.NetrcParseError):
|
||||
pass
|
||||
|
||||
with opener.open(r) as response:
|
||||
pass
|
||||
opener.open(r)
|
||||
except urllib.error.URLError as e:
|
||||
if try_again:
|
||||
logger.debug(2, "checkstatus: trying again")
|
||||
|
||||
@@ -292,12 +292,8 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
help="Writes the event log of the build to a bitbake event json file. "
|
||||
"Use '' (empty string) to assign the name automatically.")
|
||||
|
||||
parser.add_option("", "--runall", action="append", dest="runall",
|
||||
help="Run the specified task for any recipe in the taskgraph of the specified target (even if it wouldn't otherwise have run).")
|
||||
|
||||
parser.add_option("", "--runonly", action="append", dest="runonly",
|
||||
help="Run only the specified task within the taskgraph of the specified targets (and any task dependencies those tasks may have).")
|
||||
|
||||
parser.add_option("", "--runall", action="store", dest="runall",
|
||||
help="Run the specified task for all build targets and their dependencies.")
|
||||
|
||||
options, targets = parser.parse_args(argv)
|
||||
|
||||
@@ -405,6 +401,9 @@ def setup_bitbake(configParams, configuration, extrafeatures=None):
|
||||
# In status only mode there are no logs and no UI
|
||||
logger.addHandler(handler)
|
||||
|
||||
# Clear away any spurious environment variables while we stoke up the cooker
|
||||
cleanedvars = bb.utils.clean_environment()
|
||||
|
||||
if configParams.server_only:
|
||||
featureset = []
|
||||
ui_module = None
|
||||
@@ -420,10 +419,6 @@ def setup_bitbake(configParams, configuration, extrafeatures=None):
|
||||
|
||||
server_connection = None
|
||||
|
||||
# Clear away any spurious environment variables while we stoke up the cooker
|
||||
# (done after import_extension_module() above since for example import gi triggers env var usage)
|
||||
cleanedvars = bb.utils.clean_environment()
|
||||
|
||||
if configParams.remote_server:
|
||||
# Connect to a remote XMLRPC server
|
||||
server_connection = bb.server.xmlrpcclient.connectXMLRPC(configParams.remote_server, featureset,
|
||||
@@ -448,7 +443,7 @@ def setup_bitbake(configParams, configuration, extrafeatures=None):
|
||||
else:
|
||||
logger.info("Reconnecting to bitbake server...")
|
||||
if not os.path.exists(sockname):
|
||||
logger.info("Previous bitbake instance shutting down?, waiting to retry...")
|
||||
print("Previous bitbake instance shutting down?, waiting to retry...")
|
||||
i = 0
|
||||
lock = None
|
||||
# Wait for 5s or until we can get the lock
|
||||
|
||||
@@ -40,7 +40,6 @@ class BBLogFormatter(logging.Formatter):
|
||||
VERBOSE = logging.INFO - 1
|
||||
NOTE = logging.INFO
|
||||
PLAIN = logging.INFO + 1
|
||||
VERBNOTE = logging.INFO + 2
|
||||
ERROR = logging.ERROR
|
||||
WARNING = logging.WARNING
|
||||
CRITICAL = logging.CRITICAL
|
||||
@@ -52,7 +51,6 @@ class BBLogFormatter(logging.Formatter):
|
||||
VERBOSE: 'NOTE',
|
||||
NOTE : 'NOTE',
|
||||
PLAIN : '',
|
||||
VERBNOTE: 'NOTE',
|
||||
WARNING : 'WARNING',
|
||||
ERROR : 'ERROR',
|
||||
CRITICAL: 'ERROR',
|
||||
@@ -68,7 +66,6 @@ class BBLogFormatter(logging.Formatter):
|
||||
VERBOSE : BASECOLOR,
|
||||
NOTE : BASECOLOR,
|
||||
PLAIN : BASECOLOR,
|
||||
VERBNOTE: BASECOLOR,
|
||||
WARNING : YELLOW,
|
||||
ERROR : RED,
|
||||
CRITICAL: RED,
|
||||
|
||||
@@ -134,9 +134,8 @@ def resolve_file(fn, d):
|
||||
if not newfn:
|
||||
raise IOError(errno.ENOENT, "file %s not found in %s" % (fn, bbpath))
|
||||
fn = newfn
|
||||
else:
|
||||
mark_dependency(d, fn)
|
||||
|
||||
mark_dependency(d, fn)
|
||||
if not os.path.isfile(fn):
|
||||
raise IOError(errno.ENOENT, "file %s not found" % fn)
|
||||
|
||||
|
||||
@@ -335,39 +335,35 @@ def handleInherit(statements, filename, lineno, m):
|
||||
classes = m.group(1)
|
||||
statements.append(InheritNode(filename, lineno, classes))
|
||||
|
||||
def runAnonFuncs(d):
|
||||
def finalize(fn, d, variant = None):
|
||||
saved_handlers = bb.event.get_handlers().copy()
|
||||
|
||||
for var in d.getVar('__BBHANDLERS', False) or []:
|
||||
# try to add the handler
|
||||
handlerfn = d.getVarFlag(var, "filename", False)
|
||||
if not handlerfn:
|
||||
bb.fatal("Undefined event handler function '%s'" % var)
|
||||
handlerln = int(d.getVarFlag(var, "lineno", False))
|
||||
bb.event.register(var, d.getVar(var, False), (d.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln)
|
||||
|
||||
bb.event.fire(bb.event.RecipePreFinalise(fn), d)
|
||||
|
||||
bb.data.expandKeys(d)
|
||||
code = []
|
||||
for funcname in d.getVar("__BBANONFUNCS", False) or []:
|
||||
code.append("%s(d)" % funcname)
|
||||
bb.utils.better_exec("\n".join(code), {"d": d})
|
||||
|
||||
def finalize(fn, d, variant = None):
|
||||
saved_handlers = bb.event.get_handlers().copy()
|
||||
try:
|
||||
for var in d.getVar('__BBHANDLERS', False) or []:
|
||||
# try to add the handler
|
||||
handlerfn = d.getVarFlag(var, "filename", False)
|
||||
if not handlerfn:
|
||||
bb.fatal("Undefined event handler function '%s'" % var)
|
||||
handlerln = int(d.getVarFlag(var, "lineno", False))
|
||||
bb.event.register(var, d.getVar(var, False), (d.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln)
|
||||
tasklist = d.getVar('__BBTASKS', False) or []
|
||||
bb.event.fire(bb.event.RecipeTaskPreProcess(fn, list(tasklist)), d)
|
||||
bb.build.add_tasks(tasklist, d)
|
||||
|
||||
bb.event.fire(bb.event.RecipePreFinalise(fn), d)
|
||||
bb.parse.siggen.finalise(fn, d, variant)
|
||||
|
||||
bb.data.expandKeys(d)
|
||||
runAnonFuncs(d)
|
||||
d.setVar('BBINCLUDED', bb.parse.get_file_depends(d))
|
||||
|
||||
tasklist = d.getVar('__BBTASKS', False) or []
|
||||
bb.event.fire(bb.event.RecipeTaskPreProcess(fn, list(tasklist)), d)
|
||||
bb.build.add_tasks(tasklist, d)
|
||||
|
||||
bb.parse.siggen.finalise(fn, d, variant)
|
||||
|
||||
d.setVar('BBINCLUDED', bb.parse.get_file_depends(d))
|
||||
|
||||
bb.event.fire(bb.event.RecipeParsed(fn), d)
|
||||
finally:
|
||||
bb.event.set_handlers(saved_handlers)
|
||||
bb.event.fire(bb.event.RecipeParsed(fn), d)
|
||||
bb.event.set_handlers(saved_handlers)
|
||||
|
||||
def _create_variants(datastores, names, function, onlyfinalise):
|
||||
def create_variant(name, orig_d, arg = None):
|
||||
|
||||
@@ -131,6 +131,9 @@ def handle(fn, d, include):
|
||||
|
||||
abs_fn = resolve_file(fn, d)
|
||||
|
||||
if include:
|
||||
bb.parse.mark_dependency(d, abs_fn)
|
||||
|
||||
# actual loading
|
||||
statements = get_statements(fn, abs_fn, base_name)
|
||||
|
||||
|
||||
@@ -134,6 +134,9 @@ def handle(fn, data, include):
|
||||
abs_fn = resolve_file(fn, data)
|
||||
f = open(abs_fn, 'r')
|
||||
|
||||
if include:
|
||||
bb.parse.mark_dependency(data, abs_fn)
|
||||
|
||||
statements = ast.StatementGroup()
|
||||
lineno = 0
|
||||
while True:
|
||||
|
||||
@@ -244,17 +244,17 @@ def _filterProviders(providers, item, cfgData, dataCache):
|
||||
pkg_pn[pn] = []
|
||||
pkg_pn[pn].append(p)
|
||||
|
||||
logger.debug(1, "providers for %s are: %s", item, list(sorted(pkg_pn.keys())))
|
||||
logger.debug(1, "providers for %s are: %s", item, list(pkg_pn.keys()))
|
||||
|
||||
# First add PREFERRED_VERSIONS
|
||||
for pn in sorted(pkg_pn):
|
||||
for pn in pkg_pn:
|
||||
sortpkg_pn[pn] = sortPriorities(pn, dataCache, pkg_pn)
|
||||
preferred_versions[pn] = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item)
|
||||
if preferred_versions[pn][1]:
|
||||
eligible.append(preferred_versions[pn][1])
|
||||
|
||||
# Now add latest versions
|
||||
for pn in sorted(sortpkg_pn):
|
||||
for pn in sortpkg_pn:
|
||||
if pn in preferred_versions and preferred_versions[pn][1]:
|
||||
continue
|
||||
preferred_versions[pn] = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[pn][0])
|
||||
|
||||
@@ -49,11 +49,6 @@ def fn_from_tid(tid):
|
||||
def taskname_from_tid(tid):
|
||||
return tid.rsplit(":", 1)[1]
|
||||
|
||||
def mc_from_tid(tid):
|
||||
if tid.startswith('multiconfig:'):
|
||||
return tid.split(':')[1]
|
||||
return ""
|
||||
|
||||
def split_tid(tid):
|
||||
(mc, fn, taskname, _) = split_tid_mcfn(tid)
|
||||
return (mc, fn, taskname)
|
||||
@@ -99,13 +94,13 @@ class RunQueueStats:
|
||||
self.active = self.active - 1
|
||||
self.failed = self.failed + 1
|
||||
|
||||
def taskCompleted(self):
|
||||
self.active = self.active - 1
|
||||
self.completed = self.completed + 1
|
||||
def taskCompleted(self, number = 1):
|
||||
self.active = self.active - number
|
||||
self.completed = self.completed + number
|
||||
|
||||
def taskSkipped(self):
|
||||
self.active = self.active + 1
|
||||
self.skipped = self.skipped + 1
|
||||
def taskSkipped(self, number = 1):
|
||||
self.active = self.active + number
|
||||
self.skipped = self.skipped + number
|
||||
|
||||
def taskActive(self):
|
||||
self.active = self.active + 1
|
||||
@@ -139,7 +134,6 @@ class RunQueueScheduler(object):
|
||||
self.prio_map = [self.rqdata.runtaskentries.keys()]
|
||||
|
||||
self.buildable = []
|
||||
self.skip_maxthread = {}
|
||||
self.stamps = {}
|
||||
for tid in self.rqdata.runtaskentries:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(tid)
|
||||
@@ -156,25 +150,8 @@ class RunQueueScheduler(object):
|
||||
self.buildable = [x for x in self.buildable if x not in self.rq.runq_running]
|
||||
if not self.buildable:
|
||||
return None
|
||||
|
||||
# Filter out tasks that have a max number of threads that have been exceeded
|
||||
skip_buildable = {}
|
||||
for running in self.rq.runq_running.difference(self.rq.runq_complete):
|
||||
rtaskname = taskname_from_tid(running)
|
||||
if rtaskname not in self.skip_maxthread:
|
||||
self.skip_maxthread[rtaskname] = self.rq.cfgData.getVarFlag(rtaskname, "number_threads")
|
||||
if not self.skip_maxthread[rtaskname]:
|
||||
continue
|
||||
if rtaskname in skip_buildable:
|
||||
skip_buildable[rtaskname] += 1
|
||||
else:
|
||||
skip_buildable[rtaskname] = 1
|
||||
|
||||
if len(self.buildable) == 1:
|
||||
tid = self.buildable[0]
|
||||
taskname = taskname_from_tid(tid)
|
||||
if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
|
||||
return None
|
||||
stamp = self.stamps[tid]
|
||||
if stamp not in self.rq.build_stamps.values():
|
||||
return tid
|
||||
@@ -187,9 +164,6 @@ class RunQueueScheduler(object):
|
||||
best = None
|
||||
bestprio = None
|
||||
for tid in self.buildable:
|
||||
taskname = taskname_from_tid(tid)
|
||||
if taskname in skip_buildable and skip_buildable[taskname] >= int(self.skip_maxthread[taskname]):
|
||||
continue
|
||||
prio = self.rev_prio_map[tid]
|
||||
if bestprio is None or bestprio > prio:
|
||||
stamp = self.stamps[tid]
|
||||
@@ -204,10 +178,10 @@ class RunQueueScheduler(object):
|
||||
"""
|
||||
Return the id of the task we should build next
|
||||
"""
|
||||
if self.rq.can_start_task():
|
||||
if self.rq.stats.active < self.rq.number_tasks:
|
||||
return self.next_buildable_task()
|
||||
|
||||
def newbuildable(self, task):
|
||||
def newbuilable(self, task):
|
||||
self.buildable.append(task)
|
||||
|
||||
def describe_task(self, taskid):
|
||||
@@ -410,9 +384,6 @@ class RunQueueData:
|
||||
explored_deps = {}
|
||||
msgs = []
|
||||
|
||||
class TooManyLoops(Exception):
|
||||
pass
|
||||
|
||||
def chain_reorder(chain):
|
||||
"""
|
||||
Reorder a dependency chain so the lowest task id is first
|
||||
@@ -465,7 +436,7 @@ class RunQueueData:
|
||||
msgs.append("\n")
|
||||
if len(valid_chains) > 10:
|
||||
msgs.append("Aborted dependency loops search after 10 matches.\n")
|
||||
raise TooManyLoops
|
||||
return msgs
|
||||
continue
|
||||
scan = False
|
||||
if revdep not in explored_deps:
|
||||
@@ -484,11 +455,8 @@ class RunQueueData:
|
||||
|
||||
explored_deps[tid] = total_deps
|
||||
|
||||
try:
|
||||
for task in tasks:
|
||||
find_chains(task, [])
|
||||
except TooManyLoops:
|
||||
pass
|
||||
for task in tasks:
|
||||
find_chains(task, [])
|
||||
|
||||
return msgs
|
||||
|
||||
@@ -613,18 +581,11 @@ class RunQueueData:
|
||||
if t in taskData[mc].taskentries:
|
||||
depends.add(t)
|
||||
|
||||
def add_mc_dependencies(mc, tid):
|
||||
mcdeps = taskData[mc].get_mcdepends()
|
||||
for dep in mcdeps:
|
||||
mcdependency = dep.split(':')
|
||||
pn = mcdependency[3]
|
||||
frommc = mcdependency[1]
|
||||
mcdep = mcdependency[2]
|
||||
deptask = mcdependency[4]
|
||||
if mc == frommc:
|
||||
fn = taskData[mcdep].build_targets[pn][0]
|
||||
newdep = '%s:%s' % (fn,deptask)
|
||||
taskData[mc].taskentries[tid].tdepends.append(newdep)
|
||||
def add_resolved_dependencies(mc, fn, tasknames, depends):
|
||||
for taskname in tasknames:
|
||||
tid = build_tid(mc, fn, taskname)
|
||||
if tid in self.runtaskentries:
|
||||
depends.add(tid)
|
||||
|
||||
for mc in taskData:
|
||||
for tid in taskData[mc].taskentries:
|
||||
@@ -642,16 +603,12 @@ class RunQueueData:
|
||||
if fn in taskData[mc].failed_fns:
|
||||
continue
|
||||
|
||||
# We add multiconfig dependencies before processing internal task deps (tdepends)
|
||||
if 'mcdepends' in task_deps and taskname in task_deps['mcdepends']:
|
||||
add_mc_dependencies(mc, tid)
|
||||
|
||||
# Resolve task internal dependencies
|
||||
#
|
||||
# e.g. addtask before X after Y
|
||||
for t in taskData[mc].taskentries[tid].tdepends:
|
||||
(depmc, depfn, deptaskname, _) = split_tid_mcfn(t)
|
||||
depends.add(build_tid(depmc, depfn, deptaskname))
|
||||
(_, depfn, deptaskname, _) = split_tid_mcfn(t)
|
||||
depends.add(build_tid(mc, depfn, deptaskname))
|
||||
|
||||
# Resolve 'deptask' dependencies
|
||||
#
|
||||
@@ -716,106 +673,57 @@ class RunQueueData:
|
||||
recursiveitasks[tid].append(newdep)
|
||||
|
||||
self.runtaskentries[tid].depends = depends
|
||||
# Remove all self references
|
||||
self.runtaskentries[tid].depends.discard(tid)
|
||||
|
||||
#self.dump_data()
|
||||
|
||||
self.init_progress_reporter.next_stage()
|
||||
|
||||
# Resolve recursive 'recrdeptask' dependencies (Part B)
|
||||
#
|
||||
# e.g. do_sometask[recrdeptask] = "do_someothertask"
|
||||
# (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
|
||||
# We need to do this separately since we need all of runtaskentries[*].depends to be complete before this is processed
|
||||
self.init_progress_reporter.next_stage(len(recursivetasks))
|
||||
extradeps = {}
|
||||
for taskcounter, tid in enumerate(recursivetasks):
|
||||
extradeps[tid] = set(self.runtaskentries[tid].depends)
|
||||
|
||||
# Generating/interating recursive lists of dependencies is painful and potentially slow
|
||||
# Precompute recursive task dependencies here by:
|
||||
# a) create a temp list of reverse dependencies (revdeps)
|
||||
# b) walk up the ends of the chains (when a given task no longer has dependencies i.e. len(deps) == 0)
|
||||
# c) combine the total list of dependencies in cumulativedeps
|
||||
# d) optimise by pre-truncating 'task' off the items in cumulativedeps (keeps items in sets lower)
|
||||
tasknames = recursivetasks[tid]
|
||||
seendeps = set()
|
||||
|
||||
def generate_recdeps(t):
|
||||
newdeps = set()
|
||||
(mc, fn, taskname, _) = split_tid_mcfn(t)
|
||||
add_resolved_dependencies(mc, fn, tasknames, newdeps)
|
||||
extradeps[tid].update(newdeps)
|
||||
seendeps.add(t)
|
||||
newdeps.add(t)
|
||||
for i in newdeps:
|
||||
if i not in self.runtaskentries:
|
||||
# Not all recipes might have the recrdeptask task as a task
|
||||
continue
|
||||
task = self.runtaskentries[i].task
|
||||
for n in self.runtaskentries[i].depends:
|
||||
if n not in seendeps:
|
||||
generate_recdeps(n)
|
||||
generate_recdeps(tid)
|
||||
|
||||
if tid in recursiveitasks:
|
||||
for dep in recursiveitasks[tid]:
|
||||
generate_recdeps(dep)
|
||||
self.init_progress_reporter.update(taskcounter)
|
||||
|
||||
# Remove circular references so that do_a[recrdeptask] = "do_a do_b" can work
|
||||
for tid in recursivetasks:
|
||||
extradeps[tid].difference_update(recursivetasksselfref)
|
||||
|
||||
revdeps = {}
|
||||
deps = {}
|
||||
cumulativedeps = {}
|
||||
for tid in self.runtaskentries:
|
||||
deps[tid] = set(self.runtaskentries[tid].depends)
|
||||
revdeps[tid] = set()
|
||||
cumulativedeps[tid] = set()
|
||||
# Generate a temp list of reverse dependencies
|
||||
for tid in self.runtaskentries:
|
||||
for dep in self.runtaskentries[tid].depends:
|
||||
revdeps[dep].add(tid)
|
||||
# Find the dependency chain endpoints
|
||||
endpoints = set()
|
||||
for tid in self.runtaskentries:
|
||||
if len(deps[tid]) == 0:
|
||||
endpoints.add(tid)
|
||||
# Iterate the chains collating dependencies
|
||||
while endpoints:
|
||||
next = set()
|
||||
for tid in endpoints:
|
||||
for dep in revdeps[tid]:
|
||||
cumulativedeps[dep].add(fn_from_tid(tid))
|
||||
cumulativedeps[dep].update(cumulativedeps[tid])
|
||||
if tid in deps[dep]:
|
||||
deps[dep].remove(tid)
|
||||
if len(deps[dep]) == 0:
|
||||
next.add(dep)
|
||||
endpoints = next
|
||||
#for tid in deps:
|
||||
# if len(deps[tid]) != 0:
|
||||
# bb.warn("Sanity test failure, dependencies left for %s (%s)" % (tid, deps[tid]))
|
||||
|
||||
# Loop here since recrdeptasks can depend upon other recrdeptasks and we have to
|
||||
# resolve these recursively until we aren't adding any further extra dependencies
|
||||
extradeps = True
|
||||
while extradeps:
|
||||
extradeps = 0
|
||||
for tid in recursivetasks:
|
||||
tasknames = recursivetasks[tid]
|
||||
|
||||
totaldeps = set(self.runtaskentries[tid].depends)
|
||||
if tid in recursiveitasks:
|
||||
totaldeps.update(recursiveitasks[tid])
|
||||
for dep in recursiveitasks[tid]:
|
||||
if dep not in self.runtaskentries:
|
||||
continue
|
||||
totaldeps.update(self.runtaskentries[dep].depends)
|
||||
|
||||
deps = set()
|
||||
for dep in totaldeps:
|
||||
if dep in cumulativedeps:
|
||||
deps.update(cumulativedeps[dep])
|
||||
|
||||
for t in deps:
|
||||
for taskname in tasknames:
|
||||
newtid = t + ":" + taskname
|
||||
if newtid == tid:
|
||||
continue
|
||||
if newtid in self.runtaskentries and newtid not in self.runtaskentries[tid].depends:
|
||||
extradeps += 1
|
||||
self.runtaskentries[tid].depends.add(newtid)
|
||||
|
||||
# Handle recursive tasks which depend upon other recursive tasks
|
||||
deps = set()
|
||||
for dep in self.runtaskentries[tid].depends.intersection(recursivetasks):
|
||||
deps.update(self.runtaskentries[dep].depends.difference(self.runtaskentries[tid].depends))
|
||||
for newtid in deps:
|
||||
for taskname in tasknames:
|
||||
if not newtid.endswith(":" + taskname):
|
||||
continue
|
||||
if newtid in self.runtaskentries:
|
||||
extradeps += 1
|
||||
self.runtaskentries[tid].depends.add(newtid)
|
||||
|
||||
bb.debug(1, "Added %s recursive dependencies in this loop" % extradeps)
|
||||
|
||||
# Remove recrdeptask circular references so that do_a[recrdeptask] = "do_a do_b" can work
|
||||
for tid in recursivetasksselfref:
|
||||
self.runtaskentries[tid].depends.difference_update(recursivetasksselfref)
|
||||
task = self.runtaskentries[tid].task
|
||||
# Add in extra dependencies
|
||||
if tid in extradeps:
|
||||
self.runtaskentries[tid].depends = extradeps[tid]
|
||||
# Remove all self references
|
||||
if tid in self.runtaskentries[tid].depends:
|
||||
logger.debug(2, "Task %s contains self reference!", tid)
|
||||
self.runtaskentries[tid].depends.remove(tid)
|
||||
|
||||
self.init_progress_reporter.next_stage()
|
||||
|
||||
@@ -890,57 +798,30 @@ class RunQueueData:
|
||||
#
|
||||
# Once all active tasks are marked, prune the ones we don't need.
|
||||
|
||||
delcount = {}
|
||||
delcount = 0
|
||||
for tid in list(self.runtaskentries.keys()):
|
||||
if tid not in runq_build:
|
||||
delcount[tid] = self.runtaskentries[tid]
|
||||
del self.runtaskentries[tid]
|
||||
|
||||
# Handle --runall
|
||||
if self.cooker.configuration.runall:
|
||||
# re-run the mark_active and then drop unused tasks from new list
|
||||
runq_build = {}
|
||||
|
||||
for task in self.cooker.configuration.runall:
|
||||
runall_tids = set()
|
||||
for tid in list(self.runtaskentries):
|
||||
wanttid = fn_from_tid(tid) + ":do_%s" % task
|
||||
if wanttid in delcount:
|
||||
self.runtaskentries[wanttid] = delcount[wanttid]
|
||||
if wanttid in self.runtaskentries:
|
||||
runall_tids.add(wanttid)
|
||||
|
||||
for tid in list(runall_tids):
|
||||
mark_active(tid,1)
|
||||
|
||||
for tid in list(self.runtaskentries.keys()):
|
||||
if tid not in runq_build:
|
||||
delcount[tid] = self.runtaskentries[tid]
|
||||
del self.runtaskentries[tid]
|
||||
|
||||
if len(self.runtaskentries) == 0:
|
||||
bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the recipes of the taskgraphs of the targets %s" % (str(self.cooker.configuration.runall), str(self.targets)))
|
||||
delcount += 1
|
||||
|
||||
self.init_progress_reporter.next_stage()
|
||||
|
||||
# Handle runonly
|
||||
if self.cooker.configuration.runonly:
|
||||
if self.cooker.configuration.runall is not None:
|
||||
runall = "do_%s" % self.cooker.configuration.runall
|
||||
runall_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == runall }
|
||||
|
||||
# re-run the mark_active and then drop unused tasks from new list
|
||||
runq_build = {}
|
||||
|
||||
for task in self.cooker.configuration.runonly:
|
||||
runonly_tids = { k: v for k, v in self.runtaskentries.items() if taskname_from_tid(k) == "do_%s" % task }
|
||||
|
||||
for tid in list(runonly_tids):
|
||||
mark_active(tid,1)
|
||||
for tid in list(runall_tids):
|
||||
mark_active(tid,1)
|
||||
|
||||
for tid in list(self.runtaskentries.keys()):
|
||||
if tid not in runq_build:
|
||||
delcount[tid] = self.runtaskentries[tid]
|
||||
del self.runtaskentries[tid]
|
||||
delcount += 1
|
||||
|
||||
if len(self.runtaskentries) == 0:
|
||||
bb.msg.fatal("RunQueue", "Could not find any tasks with the tasknames %s to run within the taskgraphs of the targets %s" % (str(self.cooker.configuration.runonly), str(self.targets)))
|
||||
bb.msg.fatal("RunQueue", "No remaining tasks to run for build target %s with runall %s" % (target, runall))
|
||||
|
||||
#
|
||||
# Step D - Sanity checks and computation
|
||||
@@ -953,7 +834,7 @@ class RunQueueData:
|
||||
else:
|
||||
bb.msg.fatal("RunQueue", "No active tasks and not in --continue mode?! Please report this bug.")
|
||||
|
||||
logger.verbose("Pruned %s inactive tasks, %s left", len(delcount), len(self.runtaskentries))
|
||||
logger.verbose("Pruned %s inactive tasks, %s left", delcount, len(self.runtaskentries))
|
||||
|
||||
logger.verbose("Assign Weightings")
|
||||
|
||||
@@ -1081,7 +962,7 @@ class RunQueueData:
|
||||
msg += "\n%s has unique rprovides:\n %s" % (provfn, "\n ".join(rprovide_results[provfn] - commonrprovs))
|
||||
|
||||
if self.warn_multi_bb:
|
||||
logger.verbnote(msg)
|
||||
logger.warning(msg)
|
||||
else:
|
||||
logger.error(msg)
|
||||
|
||||
@@ -1089,7 +970,7 @@ class RunQueueData:
|
||||
|
||||
# Create a whitelist usable by the stamp checks
|
||||
self.stampfnwhitelist = {}
|
||||
for mc in self.taskData:
|
||||
for mc in self.taskData:
|
||||
self.stampfnwhitelist[mc] = []
|
||||
for entry in self.stampwhitelist.split():
|
||||
if entry not in self.taskData[mc].build_targets:
|
||||
@@ -1121,7 +1002,7 @@ class RunQueueData:
|
||||
bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
|
||||
else:
|
||||
logger.verbose("Invalidate task %s, %s", taskname, fn)
|
||||
bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], taskfn)
|
||||
bb.parse.siggen.invalidate_task(taskname, self.dataCaches[mc], fn)
|
||||
|
||||
self.init_progress_reporter.next_stage()
|
||||
|
||||
@@ -1420,12 +1301,6 @@ class RunQueue:
|
||||
bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.cooker.data)
|
||||
|
||||
if self.state is runQueueSceneInit:
|
||||
if not self.dm_event_handler_registered:
|
||||
res = bb.event.register(self.dm_event_handler_name,
|
||||
lambda x: self.dm.check(self) if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp] else False,
|
||||
('bb.event.HeartbeatEvent',))
|
||||
self.dm_event_handler_registered = True
|
||||
|
||||
dump = self.cooker.configuration.dump_signatures
|
||||
if dump:
|
||||
self.rqdata.init_progress_reporter.finish()
|
||||
@@ -1442,6 +1317,11 @@ class RunQueue:
|
||||
self.rqexe = RunQueueExecuteScenequeue(self)
|
||||
|
||||
if self.state is runQueueSceneRun:
|
||||
if not self.dm_event_handler_registered:
|
||||
res = bb.event.register(self.dm_event_handler_name,
|
||||
lambda x: self.dm.check(self) if self.state in [runQueueSceneRun, runQueueRunning, runQueueCleanUp] else False,
|
||||
('bb.event.HeartbeatEvent',))
|
||||
self.dm_event_handler_registered = True
|
||||
retval = self.rqexe.execute()
|
||||
|
||||
if self.state is runQueueRunInit:
|
||||
@@ -1766,10 +1646,6 @@ class RunQueueExecute:
|
||||
valid = bb.utils.better_eval(call, locs)
|
||||
return valid
|
||||
|
||||
def can_start_task(self):
|
||||
can_start = self.stats.active < self.number_tasks
|
||||
return can_start
|
||||
|
||||
class RunQueueExecuteDummy(RunQueueExecute):
|
||||
def __init__(self, rq):
|
||||
self.rq = rq
|
||||
@@ -1843,14 +1719,13 @@ class RunQueueExecuteTasks(RunQueueExecute):
|
||||
bb.build.del_stamp(taskname, self.rqdata.dataCaches[mc], taskfn)
|
||||
self.rq.scenequeue_covered.remove(tid)
|
||||
|
||||
toremove = covered_remove | self.rq.scenequeue_notcovered
|
||||
toremove = covered_remove
|
||||
for task in toremove:
|
||||
logger.debug(1, 'Not skipping task %s due to setsceneverify', task)
|
||||
while toremove:
|
||||
covered_remove = []
|
||||
for task in toremove:
|
||||
if task in self.rq.scenequeue_covered:
|
||||
removecoveredtask(task)
|
||||
removecoveredtask(task)
|
||||
for deptask in self.rqdata.runtaskentries[task].depends:
|
||||
if deptask not in self.rq.scenequeue_covered:
|
||||
continue
|
||||
@@ -1906,7 +1781,7 @@ class RunQueueExecuteTasks(RunQueueExecute):
|
||||
|
||||
def setbuildable(self, task):
|
||||
self.runq_buildable.add(task)
|
||||
self.sched.newbuildable(task)
|
||||
self.sched.newbuilable(task)
|
||||
|
||||
def task_completeoutright(self, task):
|
||||
"""
|
||||
@@ -1920,13 +1795,14 @@ class RunQueueExecuteTasks(RunQueueExecute):
|
||||
continue
|
||||
if revdep in self.runq_buildable:
|
||||
continue
|
||||
alldeps = True
|
||||
alldeps = 1
|
||||
for dep in self.rqdata.runtaskentries[revdep].depends:
|
||||
if dep not in self.runq_complete:
|
||||
alldeps = False
|
||||
break
|
||||
if alldeps:
|
||||
alldeps = 0
|
||||
if alldeps == 1:
|
||||
self.setbuildable(revdep)
|
||||
fn = fn_from_tid(revdep)
|
||||
taskname = taskname_from_tid(revdep)
|
||||
logger.debug(1, "Marking task %s as buildable", revdep)
|
||||
|
||||
def task_complete(self, task):
|
||||
@@ -1950,8 +1826,8 @@ class RunQueueExecuteTasks(RunQueueExecute):
|
||||
self.setbuildable(task)
|
||||
bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
|
||||
self.task_completeoutright(task)
|
||||
self.stats.taskSkipped()
|
||||
self.stats.taskCompleted()
|
||||
self.stats.taskSkipped()
|
||||
|
||||
def execute(self):
|
||||
"""
|
||||
@@ -2061,7 +1937,7 @@ class RunQueueExecuteTasks(RunQueueExecute):
|
||||
self.build_stamps2.append(self.build_stamps[task])
|
||||
self.runq_running.add(task)
|
||||
self.stats.taskActive()
|
||||
if self.can_start_task():
|
||||
if self.stats.active < self.number_tasks:
|
||||
return True
|
||||
|
||||
if self.stats.active > 0:
|
||||
@@ -2084,23 +1960,10 @@ class RunQueueExecuteTasks(RunQueueExecute):
|
||||
|
||||
return True
|
||||
|
||||
def filtermcdeps(self, task, deps):
|
||||
ret = set()
|
||||
mainmc = mc_from_tid(task)
|
||||
for dep in deps:
|
||||
mc = mc_from_tid(dep)
|
||||
if mc != mainmc:
|
||||
continue
|
||||
ret.add(dep)
|
||||
return ret
|
||||
|
||||
# We filter out multiconfig dependencies from taskdepdata we pass to the tasks
|
||||
# as most code can't handle them
|
||||
def build_taskdepdata(self, task):
|
||||
taskdepdata = {}
|
||||
next = self.rqdata.runtaskentries[task].depends
|
||||
next.add(task)
|
||||
next = self.filtermcdeps(task, next)
|
||||
while next:
|
||||
additional = []
|
||||
for revdep in next:
|
||||
@@ -2110,7 +1973,6 @@ class RunQueueExecuteTasks(RunQueueExecute):
|
||||
provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
|
||||
taskhash = self.rqdata.runtaskentries[revdep].hash
|
||||
taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash]
|
||||
deps = self.filtermcdeps(task, deps)
|
||||
for revdep2 in deps:
|
||||
if revdep2 not in taskdepdata:
|
||||
additional.append(revdep2)
|
||||
@@ -2130,7 +1992,6 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
|
||||
# If we don't have any setscene functions, skip this step
|
||||
if len(self.rqdata.runq_setscene_tids) == 0:
|
||||
rq.scenequeue_covered = set()
|
||||
rq.scenequeue_notcovered = set()
|
||||
rq.state = runQueueRunInit
|
||||
return
|
||||
|
||||
@@ -2346,15 +2207,10 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
|
||||
sq_hash.append(self.rqdata.runtaskentries[tid].hash)
|
||||
sq_taskname.append(taskname)
|
||||
sq_task.append(tid)
|
||||
|
||||
self.cooker.data.setVar("BB_SETSCENE_STAMPCURRENT_COUNT", len(stamppresent))
|
||||
|
||||
call = self.rq.hashvalidate + "(sq_fn, sq_task, sq_hash, sq_hashfn, d)"
|
||||
locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.data }
|
||||
valid = bb.utils.better_eval(call, locs)
|
||||
|
||||
self.cooker.data.delVar("BB_SETSCENE_STAMPCURRENT_COUNT")
|
||||
|
||||
valid_new = stamppresent
|
||||
for v in valid:
|
||||
valid_new.append(sq_task[v])
|
||||
@@ -2416,8 +2272,8 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
|
||||
def task_failoutright(self, task):
|
||||
self.runq_running.add(task)
|
||||
self.runq_buildable.add(task)
|
||||
self.stats.taskSkipped()
|
||||
self.stats.taskCompleted()
|
||||
self.stats.taskSkipped()
|
||||
self.scenequeue_notcovered.add(task)
|
||||
self.scenequeue_updatecounters(task, True)
|
||||
|
||||
@@ -2425,8 +2281,8 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
|
||||
self.runq_running.add(task)
|
||||
self.runq_buildable.add(task)
|
||||
self.task_completeoutright(task)
|
||||
self.stats.taskSkipped()
|
||||
self.stats.taskCompleted()
|
||||
self.stats.taskSkipped()
|
||||
|
||||
def execute(self):
|
||||
"""
|
||||
@@ -2436,7 +2292,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
|
||||
self.rq.read_workers()
|
||||
|
||||
task = None
|
||||
if self.can_start_task():
|
||||
if self.stats.active < self.number_tasks:
|
||||
# Find the next setscene to run
|
||||
for nexttask in self.rqdata.runq_setscene_tids:
|
||||
if nexttask in self.runq_buildable and nexttask not in self.runq_running and self.stamps[nexttask] not in self.build_stamps.values():
|
||||
@@ -2495,7 +2351,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
|
||||
self.build_stamps2.append(self.build_stamps[task])
|
||||
self.runq_running.add(task)
|
||||
self.stats.taskActive()
|
||||
if self.can_start_task():
|
||||
if self.stats.active < self.number_tasks:
|
||||
return True
|
||||
|
||||
if self.stats.active > 0:
|
||||
|
||||
@@ -130,7 +130,6 @@ class ProcessServer(multiprocessing.Process):
|
||||
bb.utils.set_process_name("Cooker")
|
||||
|
||||
ready = []
|
||||
newconnections = []
|
||||
|
||||
self.controllersock = False
|
||||
fds = [self.sock]
|
||||
@@ -139,48 +138,37 @@ class ProcessServer(multiprocessing.Process):
|
||||
print("Entering server connection loop")
|
||||
|
||||
def disconnect_client(self, fds):
|
||||
if not self.haveui:
|
||||
return
|
||||
print("Disconnecting Client")
|
||||
if self.controllersock:
|
||||
fds.remove(self.controllersock)
|
||||
self.controllersock.close()
|
||||
self.controllersock = False
|
||||
if self.haveui:
|
||||
fds.remove(self.command_channel)
|
||||
bb.event.unregister_UIHhandler(self.event_handle, True)
|
||||
self.command_channel_reply.writer.close()
|
||||
self.event_writer.writer.close()
|
||||
self.command_channel.close()
|
||||
self.command_channel = False
|
||||
del self.event_writer
|
||||
self.lastui = time.time()
|
||||
self.cooker.clientComplete()
|
||||
self.haveui = False
|
||||
ready = select.select(fds,[],[],0)[0]
|
||||
if newconnections:
|
||||
print("Starting new client")
|
||||
conn = newconnections.pop(-1)
|
||||
fds.append(conn)
|
||||
self.controllersock = conn
|
||||
elif self.timeout is None and not ready:
|
||||
fds.remove(self.controllersock)
|
||||
fds.remove(self.command_channel)
|
||||
bb.event.unregister_UIHhandler(self.event_handle, True)
|
||||
self.command_channel_reply.writer.close()
|
||||
self.event_writer.writer.close()
|
||||
del self.event_writer
|
||||
self.controllersock.close()
|
||||
self.controllersock = False
|
||||
self.haveui = False
|
||||
self.lastui = time.time()
|
||||
self.cooker.clientComplete()
|
||||
if self.timeout is None:
|
||||
print("No timeout, exiting.")
|
||||
self.quit = True
|
||||
|
||||
while not self.quit:
|
||||
if self.sock in ready:
|
||||
while select.select([self.sock],[],[],0)[0]:
|
||||
controllersock, address = self.sock.accept()
|
||||
if self.controllersock:
|
||||
print("Queuing %s (%s)" % (str(ready), str(newconnections)))
|
||||
newconnections.append(controllersock)
|
||||
else:
|
||||
print("Accepting %s (%s)" % (str(ready), str(newconnections)))
|
||||
self.controllersock = controllersock
|
||||
fds.append(controllersock)
|
||||
self.controllersock, address = self.sock.accept()
|
||||
if self.haveui:
|
||||
print("Dropping connection attempt as we have a UI %s" % (str(ready)))
|
||||
self.controllersock.close()
|
||||
else:
|
||||
print("Accepting %s" % (str(ready)))
|
||||
fds.append(self.controllersock)
|
||||
if self.controllersock in ready:
|
||||
try:
|
||||
print("Processing Client")
|
||||
ui_fds = recvfds(self.controllersock, 3)
|
||||
print("Connecting Client")
|
||||
ui_fds = recvfds(self.controllersock, 3)
|
||||
|
||||
# Where to write events to
|
||||
writer = ConnectionWriter(ui_fds[0])
|
||||
@@ -235,8 +223,6 @@ class ProcessServer(multiprocessing.Process):
|
||||
|
||||
try:
|
||||
self.cooker.shutdown(True)
|
||||
self.cooker.notifier.stop()
|
||||
self.cooker.confignotifier.stop()
|
||||
except:
|
||||
pass
|
||||
|
||||
@@ -251,12 +237,6 @@ class ProcessServer(multiprocessing.Process):
|
||||
while not lock:
|
||||
with bb.utils.timeout(3):
|
||||
lock = bb.utils.lockfile(lockfile, shared=False, retry=False, block=True)
|
||||
if lock:
|
||||
# We hold the lock so we can remove the file (hide stale pid data)
|
||||
bb.utils.remove(lockfile)
|
||||
bb.utils.unlockfile(lock)
|
||||
return
|
||||
|
||||
if not lock:
|
||||
# Some systems may not have lsof available
|
||||
procs = None
|
||||
@@ -277,6 +257,10 @@ class ProcessServer(multiprocessing.Process):
|
||||
if procs:
|
||||
msg += ":\n%s" % str(procs)
|
||||
print(msg)
|
||||
return
|
||||
# We hold the lock so we can remove the file (hide stale pid data)
|
||||
bb.utils.remove(lockfile)
|
||||
bb.utils.unlockfile(lock)
|
||||
|
||||
def idle_commands(self, delay, fds=None):
|
||||
nextsleep = delay
|
||||
@@ -391,12 +375,11 @@ class BitBakeServer(object):
|
||||
if os.path.exists(sockname):
|
||||
os.unlink(sockname)
|
||||
|
||||
# Place the log in the builddirectory alongside the lock file
|
||||
logfile = os.path.join(os.path.dirname(self.bitbake_lock.name), "bitbake-cookerdaemon.log")
|
||||
|
||||
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
# AF_UNIX has path length issues so chdir here to workaround
|
||||
cwd = os.getcwd()
|
||||
logfile = os.path.join(cwd, "bitbake-cookerdaemon.log")
|
||||
|
||||
try:
|
||||
os.chdir(os.path.dirname(sockname))
|
||||
self.sock.bind(os.path.basename(sockname))
|
||||
@@ -409,72 +392,55 @@ class BitBakeServer(object):
|
||||
bb.daemonize.createDaemon(self._startServer, logfile)
|
||||
self.sock.close()
|
||||
self.bitbake_lock.close()
|
||||
os.close(self.readypipein)
|
||||
|
||||
ready = ConnectionReader(self.readypipe)
|
||||
r = ready.poll(5)
|
||||
if not r:
|
||||
bb.note("Bitbake server didn't start within 5 seconds, waiting for 90")
|
||||
r = ready.poll(90)
|
||||
r = ready.poll(30)
|
||||
if r:
|
||||
try:
|
||||
r = ready.get()
|
||||
except EOFError:
|
||||
# Trap the child exitting/closing the pipe and error out
|
||||
r = None
|
||||
if not r or r[0] != "r":
|
||||
r = ready.get()
|
||||
if not r or r != "ready":
|
||||
ready.close()
|
||||
bb.error("Unable to start bitbake server (%s)" % str(r))
|
||||
bb.error("Unable to start bitbake server")
|
||||
if os.path.exists(logfile):
|
||||
logstart_re = re.compile(self.start_log_format % ('([0-9]+)', '([0-9-]+ [0-9:.]+)'))
|
||||
started = False
|
||||
lines = []
|
||||
lastlines = []
|
||||
with open(logfile, "r") as f:
|
||||
for line in f:
|
||||
if started:
|
||||
lines.append(line)
|
||||
else:
|
||||
lastlines.append(line)
|
||||
res = logstart_re.match(line.rstrip())
|
||||
if res:
|
||||
ldatetime = datetime.datetime.strptime(res.group(2), self.start_log_datetime_format)
|
||||
if ldatetime >= startdatetime:
|
||||
started = True
|
||||
lines.append(line)
|
||||
if len(lastlines) > 60:
|
||||
lastlines = lastlines[-60:]
|
||||
if lines:
|
||||
if len(lines) > 60:
|
||||
bb.error("Last 60 lines of server log for this session (%s):\n%s" % (logfile, "".join(lines[-60:])))
|
||||
if len(lines) > 10:
|
||||
bb.error("Last 10 lines of server log for this session (%s):\n%s" % (logfile, "".join(lines[-10:])))
|
||||
else:
|
||||
bb.error("Server log for this session (%s):\n%s" % (logfile, "".join(lines)))
|
||||
elif lastlines:
|
||||
bb.error("Server didn't start, last 60 loglines (%s):\n%s" % (logfile, "".join(lastlines)))
|
||||
else:
|
||||
bb.error("%s doesn't exist" % logfile)
|
||||
|
||||
raise SystemExit(1)
|
||||
|
||||
ready.close()
|
||||
os.close(self.readypipein)
|
||||
|
||||
def _startServer(self):
|
||||
print(self.start_log_format % (os.getpid(), datetime.datetime.now().strftime(self.start_log_datetime_format)))
|
||||
sys.stdout.flush()
|
||||
|
||||
server = ProcessServer(self.bitbake_lock, self.sock, self.sockname)
|
||||
self.configuration.setServerRegIdleCallback(server.register_idle_function)
|
||||
os.close(self.readypipe)
|
||||
writer = ConnectionWriter(self.readypipein)
|
||||
self.cooker = bb.cooker.BBCooker(self.configuration, self.featureset)
|
||||
writer.send("r")
|
||||
writer.close()
|
||||
try:
|
||||
self.cooker = bb.cooker.BBCooker(self.configuration, self.featureset)
|
||||
writer.send("ready")
|
||||
except:
|
||||
writer.send("fail")
|
||||
raise
|
||||
finally:
|
||||
os.close(self.readypipein)
|
||||
server.cooker = self.cooker
|
||||
server.server_timeout = self.configuration.server_timeout
|
||||
server.xmlrpcinterface = self.configuration.xmlrpcinterface
|
||||
print("Started bitbake server pid %d" % os.getpid())
|
||||
sys.stdout.flush()
|
||||
|
||||
server.start()
|
||||
|
||||
def connectProcessServer(sockname, featureset):
|
||||
@@ -483,25 +449,16 @@ def connectProcessServer(sockname, featureset):
|
||||
# AF_UNIX has path length issues so chdir here to workaround
|
||||
cwd = os.getcwd()
|
||||
|
||||
try:
|
||||
os.chdir(os.path.dirname(sockname))
|
||||
sock.connect(os.path.basename(sockname))
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
readfd = writefd = readfd1 = writefd1 = readfd2 = writefd2 = None
|
||||
eq = command_chan_recv = command_chan = None
|
||||
|
||||
sock.settimeout(10)
|
||||
|
||||
try:
|
||||
try:
|
||||
os.chdir(os.path.dirname(sockname))
|
||||
finished = False
|
||||
while not finished:
|
||||
try:
|
||||
sock.connect(os.path.basename(sockname))
|
||||
finished = True
|
||||
except IOError as e:
|
||||
if e.errno == errno.EWOULDBLOCK:
|
||||
pass
|
||||
raise
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
# Send an fd for the remote to write events to
|
||||
readfd, writefd = os.pipe()
|
||||
@@ -530,8 +487,7 @@ def connectProcessServer(sockname, featureset):
|
||||
command_chan.close()
|
||||
for i in [writefd, readfd1, writefd2]:
|
||||
try:
|
||||
if i:
|
||||
os.close(i)
|
||||
os.close(i)
|
||||
except OSError:
|
||||
pass
|
||||
sock.close()
|
||||
|
||||
820
bitbake/lib/bb/shell.py
Normal file
820
bitbake/lib/bb/shell.py
Normal file
@@ -0,0 +1,820 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
##########################################################################
|
||||
#
|
||||
# Copyright (C) 2005-2006 Michael 'Mickey' Lauer <mickey@Vanille.de>
|
||||
# Copyright (C) 2005-2006 Vanille Media
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
##########################################################################
|
||||
#
|
||||
# Thanks to:
|
||||
# * Holger Freyther <zecke@handhelds.org>
|
||||
# * Justin Patrin <papercrane@reversefold.com>
|
||||
#
|
||||
##########################################################################
|
||||
|
||||
"""
|
||||
BitBake Shell
|
||||
|
||||
IDEAS:
|
||||
* list defined tasks per package
|
||||
* list classes
|
||||
* toggle force
|
||||
* command to reparse just one (or more) bbfile(s)
|
||||
* automatic check if reparsing is necessary (inotify?)
|
||||
* frontend for bb file manipulation
|
||||
* more shell-like features:
|
||||
- output control, i.e. pipe output into grep, sort, etc.
|
||||
- job control, i.e. bring running commands into background and foreground
|
||||
* start parsing in background right after startup
|
||||
* ncurses interface
|
||||
|
||||
PROBLEMS:
|
||||
* force doesn't always work
|
||||
* readline completion for commands with more than one parameters
|
||||
|
||||
"""
|
||||
|
||||
##########################################################################
|
||||
# Import and setup global variables
|
||||
##########################################################################
|
||||
|
||||
from __future__ import print_function
|
||||
from functools import reduce
|
||||
try:
|
||||
set
|
||||
except NameError:
|
||||
from sets import Set as set
|
||||
import sys, os, readline, socket, httplib, urllib, commands, popen2, shlex, Queue, fnmatch
|
||||
from bb import data, parse, build, cache, taskdata, runqueue, providers as Providers
|
||||
|
||||
__version__ = "0.5.3.1"
|
||||
__credits__ = """BitBake Shell Version %s (C) 2005 Michael 'Mickey' Lauer <mickey@Vanille.de>
|
||||
Type 'help' for more information, press CTRL-D to exit.""" % __version__
|
||||
|
||||
cmds = {}
|
||||
leave_mainloop = False
|
||||
last_exception = None
|
||||
cooker = None
|
||||
parsed = False
|
||||
debug = os.environ.get( "BBSHELL_DEBUG", "" )
|
||||
|
||||
##########################################################################
|
||||
# Class BitBakeShellCommands
|
||||
##########################################################################
|
||||
|
||||
class BitBakeShellCommands:
|
||||
"""This class contains the valid commands for the shell"""
|
||||
|
||||
def __init__( self, shell ):
|
||||
"""Register all the commands"""
|
||||
self._shell = shell
|
||||
for attr in BitBakeShellCommands.__dict__:
|
||||
if not attr.startswith( "_" ):
|
||||
if attr.endswith( "_" ):
|
||||
command = attr[:-1].lower()
|
||||
else:
|
||||
command = attr[:].lower()
|
||||
method = getattr( BitBakeShellCommands, attr )
|
||||
debugOut( "registering command '%s'" % command )
|
||||
# scan number of arguments
|
||||
usage = getattr( method, "usage", "" )
|
||||
if usage != "<...>":
|
||||
numArgs = len( usage.split() )
|
||||
else:
|
||||
numArgs = -1
|
||||
shell.registerCommand( command, method, numArgs, "%s %s" % ( command, usage ), method.__doc__ )
|
||||
|
||||
def _checkParsed( self ):
|
||||
if not parsed:
|
||||
print("SHELL: This command needs to parse bbfiles...")
|
||||
self.parse( None )
|
||||
|
||||
def _findProvider( self, item ):
|
||||
self._checkParsed()
|
||||
# Need to use taskData for this information
|
||||
preferred = data.getVar( "PREFERRED_PROVIDER_%s" % item, cooker.configuration.data, 1 )
|
||||
if not preferred: preferred = item
|
||||
try:
|
||||
lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status)
|
||||
except KeyError:
|
||||
if item in cooker.status.providers:
|
||||
pf = cooker.status.providers[item][0]
|
||||
else:
|
||||
pf = None
|
||||
return pf
|
||||
|
||||
def alias( self, params ):
|
||||
"""Register a new name for a command"""
|
||||
new, old = params
|
||||
if not old in cmds:
|
||||
print("ERROR: Command '%s' not known" % old)
|
||||
else:
|
||||
cmds[new] = cmds[old]
|
||||
print("OK")
|
||||
alias.usage = "<alias> <command>"
|
||||
|
||||
def buffer( self, params ):
|
||||
"""Dump specified output buffer"""
|
||||
index = params[0]
|
||||
print(self._shell.myout.buffer( int( index ) ))
|
||||
buffer.usage = "<index>"
|
||||
|
||||
def buffers( self, params ):
|
||||
"""Show the available output buffers"""
|
||||
commands = self._shell.myout.bufferedCommands()
|
||||
if not commands:
|
||||
print("SHELL: No buffered commands available yet. Start doing something.")
|
||||
else:
|
||||
print("="*35, "Available Output Buffers", "="*27)
|
||||
for index, cmd in enumerate( commands ):
|
||||
print("| %s %s" % ( str( index ).ljust( 3 ), cmd ))
|
||||
print("="*88)
|
||||
|
||||
def build( self, params, cmd = "build" ):
|
||||
"""Build a providee"""
|
||||
global last_exception
|
||||
globexpr = params[0]
|
||||
self._checkParsed()
|
||||
names = globfilter( cooker.status.pkg_pn, globexpr )
|
||||
if len( names ) == 0: names = [ globexpr ]
|
||||
print("SHELL: Building %s" % ' '.join( names ))
|
||||
|
||||
td = taskdata.TaskData(cooker.configuration.abort)
|
||||
localdata = data.createCopy(cooker.configuration.data)
|
||||
data.update_data(localdata)
|
||||
data.expandKeys(localdata)
|
||||
|
||||
try:
|
||||
tasks = []
|
||||
for name in names:
|
||||
td.add_provider(localdata, cooker.status, name)
|
||||
providers = td.get_provider(name)
|
||||
|
||||
if len(providers) == 0:
|
||||
raise Providers.NoProvider
|
||||
|
||||
tasks.append([name, "do_%s" % cmd])
|
||||
|
||||
td.add_unresolved(localdata, cooker.status)
|
||||
|
||||
rq = runqueue.RunQueue(cooker, localdata, cooker.status, td, tasks)
|
||||
rq.prepare_runqueue()
|
||||
rq.execute_runqueue()
|
||||
|
||||
except Providers.NoProvider:
|
||||
print("ERROR: No Provider")
|
||||
last_exception = Providers.NoProvider
|
||||
|
||||
except runqueue.TaskFailure as fnids:
|
||||
last_exception = runqueue.TaskFailure
|
||||
|
||||
except build.FuncFailed as e:
|
||||
print("ERROR: Couldn't build '%s'" % names)
|
||||
last_exception = e
|
||||
|
||||
|
||||
build.usage = "<providee>"
|
||||
|
||||
def clean( self, params ):
|
||||
"""Clean a providee"""
|
||||
self.build( params, "clean" )
|
||||
clean.usage = "<providee>"
|
||||
|
||||
def compile( self, params ):
|
||||
"""Execute 'compile' on a providee"""
|
||||
self.build( params, "compile" )
|
||||
compile.usage = "<providee>"
|
||||
|
||||
def configure( self, params ):
|
||||
"""Execute 'configure' on a providee"""
|
||||
self.build( params, "configure" )
|
||||
configure.usage = "<providee>"
|
||||
|
||||
def install( self, params ):
|
||||
"""Execute 'install' on a providee"""
|
||||
self.build( params, "install" )
|
||||
install.usage = "<providee>"
|
||||
|
||||
def edit( self, params ):
|
||||
"""Call $EDITOR on a providee"""
|
||||
name = params[0]
|
||||
bbfile = self._findProvider( name )
|
||||
if bbfile is not None:
|
||||
os.system( "%s %s" % ( os.environ.get( "EDITOR", "vi" ), bbfile ) )
|
||||
else:
|
||||
print("ERROR: Nothing provides '%s'" % name)
|
||||
edit.usage = "<providee>"
|
||||
|
||||
def environment( self, params ):
|
||||
"""Dump out the outer BitBake environment"""
|
||||
cooker.showEnvironment()
|
||||
|
||||
def exit_( self, params ):
|
||||
"""Leave the BitBake Shell"""
|
||||
debugOut( "setting leave_mainloop to true" )
|
||||
global leave_mainloop
|
||||
leave_mainloop = True
|
||||
|
||||
def fetch( self, params ):
|
||||
"""Fetch a providee"""
|
||||
self.build( params, "fetch" )
|
||||
fetch.usage = "<providee>"
|
||||
|
||||
def fileBuild( self, params, cmd = "build" ):
|
||||
"""Parse and build a .bb file"""
|
||||
global last_exception
|
||||
name = params[0]
|
||||
bf = completeFilePath( name )
|
||||
print("SHELL: Calling '%s' on '%s'" % ( cmd, bf ))
|
||||
|
||||
try:
|
||||
cooker.buildFile(bf, cmd)
|
||||
except parse.ParseError:
|
||||
print("ERROR: Unable to open or parse '%s'" % bf)
|
||||
except build.FuncFailed as e:
|
||||
print("ERROR: Couldn't build '%s'" % name)
|
||||
last_exception = e
|
||||
|
||||
fileBuild.usage = "<bbfile>"
|
||||
|
||||
def fileClean( self, params ):
|
||||
"""Clean a .bb file"""
|
||||
self.fileBuild( params, "clean" )
|
||||
fileClean.usage = "<bbfile>"
|
||||
|
||||
def fileEdit( self, params ):
|
||||
"""Call $EDITOR on a .bb file"""
|
||||
name = params[0]
|
||||
os.system( "%s %s" % ( os.environ.get( "EDITOR", "vi" ), completeFilePath( name ) ) )
|
||||
fileEdit.usage = "<bbfile>"
|
||||
|
||||
def fileRebuild( self, params ):
|
||||
"""Rebuild (clean & build) a .bb file"""
|
||||
self.fileBuild( params, "rebuild" )
|
||||
fileRebuild.usage = "<bbfile>"
|
||||
|
||||
def fileReparse( self, params ):
|
||||
"""(re)Parse a bb file"""
|
||||
bbfile = params[0]
|
||||
print("SHELL: Parsing '%s'" % bbfile)
|
||||
parse.update_mtime( bbfile )
|
||||
cooker.parser.reparse(bbfile)
|
||||
if False: #fromCache:
|
||||
print("SHELL: File has not been updated, not reparsing")
|
||||
else:
|
||||
print("SHELL: Parsed")
|
||||
fileReparse.usage = "<bbfile>"
|
||||
|
||||
def abort( self, params ):
|
||||
"""Toggle abort task execution flag (see bitbake -k)"""
|
||||
cooker.configuration.abort = not cooker.configuration.abort
|
||||
print("SHELL: Abort Flag is now '%s'" % repr( cooker.configuration.abort ))
|
||||
|
||||
def force( self, params ):
|
||||
"""Toggle force task execution flag (see bitbake -f)"""
|
||||
cooker.configuration.force = not cooker.configuration.force
|
||||
print("SHELL: Force Flag is now '%s'" % repr( cooker.configuration.force ))
|
||||
|
||||
def help( self, params ):
|
||||
"""Show a comprehensive list of commands and their purpose"""
|
||||
print("="*30, "Available Commands", "="*30)
|
||||
for cmd in sorted(cmds):
|
||||
function, numparams, usage, helptext = cmds[cmd]
|
||||
print("| %s | %s" % (usage.ljust(30), helptext))
|
||||
print("="*78)
|
||||
|
||||
def lastError( self, params ):
|
||||
"""Show the reason or log that was produced by the last BitBake event exception"""
|
||||
if last_exception is None:
|
||||
print("SHELL: No Errors yet (Phew)...")
|
||||
else:
|
||||
reason, event = last_exception.args
|
||||
print("SHELL: Reason for the last error: '%s'" % reason)
|
||||
if ':' in reason:
|
||||
msg, filename = reason.split( ':' )
|
||||
filename = filename.strip()
|
||||
print("SHELL: Dumping log file for last error:")
|
||||
try:
|
||||
print(open( filename ).read())
|
||||
except IOError:
|
||||
print("ERROR: Couldn't open '%s'" % filename)
|
||||
|
||||
def match( self, params ):
|
||||
"""Dump all files or providers matching a glob expression"""
|
||||
what, globexpr = params
|
||||
if what == "files":
|
||||
self._checkParsed()
|
||||
for key in globfilter( cooker.status.pkg_fn, globexpr ): print(key)
|
||||
elif what == "providers":
|
||||
self._checkParsed()
|
||||
for key in globfilter( cooker.status.pkg_pn, globexpr ): print(key)
|
||||
else:
|
||||
print("Usage: match %s" % self.print_.usage)
|
||||
match.usage = "<files|providers> <glob>"
|
||||
|
||||
def new( self, params ):
|
||||
"""Create a new .bb file and open the editor"""
|
||||
dirname, filename = params
|
||||
packages = '/'.join( data.getVar( "BBFILES", cooker.configuration.data, 1 ).split('/')[:-2] )
|
||||
fulldirname = "%s/%s" % ( packages, dirname )
|
||||
|
||||
if not os.path.exists( fulldirname ):
|
||||
print("SHELL: Creating '%s'" % fulldirname)
|
||||
os.mkdir( fulldirname )
|
||||
if os.path.exists( fulldirname ) and os.path.isdir( fulldirname ):
|
||||
if os.path.exists( "%s/%s" % ( fulldirname, filename ) ):
|
||||
print("SHELL: ERROR: %s/%s already exists" % ( fulldirname, filename ))
|
||||
return False
|
||||
print("SHELL: Creating '%s/%s'" % ( fulldirname, filename ))
|
||||
newpackage = open( "%s/%s" % ( fulldirname, filename ), "w" )
|
||||
print("""DESCRIPTION = ""
|
||||
SECTION = ""
|
||||
AUTHOR = ""
|
||||
HOMEPAGE = ""
|
||||
MAINTAINER = ""
|
||||
LICENSE = "GPL"
|
||||
PR = "r0"
|
||||
|
||||
SRC_URI = ""
|
||||
|
||||
#inherit base
|
||||
|
||||
#do_configure() {
|
||||
#
|
||||
#}
|
||||
|
||||
#do_compile() {
|
||||
#
|
||||
#}
|
||||
|
||||
#do_stage() {
|
||||
#
|
||||
#}
|
||||
|
||||
#do_install() {
|
||||
#
|
||||
#}
|
||||
""", file=newpackage)
|
||||
newpackage.close()
|
||||
os.system( "%s %s/%s" % ( os.environ.get( "EDITOR" ), fulldirname, filename ) )
|
||||
new.usage = "<directory> <filename>"
|
||||
|
||||
def package( self, params ):
|
||||
"""Execute 'package' on a providee"""
|
||||
self.build( params, "package" )
|
||||
package.usage = "<providee>"
|
||||
|
||||
def pasteBin( self, params ):
|
||||
"""Send a command + output buffer to the pastebin at http://rafb.net/paste"""
|
||||
index = params[0]
|
||||
contents = self._shell.myout.buffer( int( index ) )
|
||||
sendToPastebin( "output of " + params[0], contents )
|
||||
pasteBin.usage = "<index>"
|
||||
|
||||
def pasteLog( self, params ):
|
||||
"""Send the last event exception error log (if there is one) to http://rafb.net/paste"""
|
||||
if last_exception is None:
|
||||
print("SHELL: No Errors yet (Phew)...")
|
||||
else:
|
||||
reason, event = last_exception.args
|
||||
print("SHELL: Reason for the last error: '%s'" % reason)
|
||||
if ':' in reason:
|
||||
msg, filename = reason.split( ':' )
|
||||
filename = filename.strip()
|
||||
print("SHELL: Pasting log file to pastebin...")
|
||||
|
||||
file = open( filename ).read()
|
||||
sendToPastebin( "contents of " + filename, file )
|
||||
|
||||
def patch( self, params ):
|
||||
"""Execute 'patch' command on a providee"""
|
||||
self.build( params, "patch" )
|
||||
patch.usage = "<providee>"
|
||||
|
||||
def parse( self, params ):
|
||||
"""(Re-)parse .bb files and calculate the dependency graph"""
|
||||
cooker.status = cache.CacheData(cooker.caches_array)
|
||||
ignore = data.getVar("ASSUME_PROVIDED", cooker.configuration.data, 1) or ""
|
||||
cooker.status.ignored_dependencies = set( ignore.split() )
|
||||
cooker.handleCollections( data.getVar("BBFILE_COLLECTIONS", cooker.configuration.data, 1) )
|
||||
|
||||
(filelist, masked) = cooker.collect_bbfiles()
|
||||
cooker.parse_bbfiles(filelist, masked, cooker.myProgressCallback)
|
||||
cooker.buildDepgraph()
|
||||
global parsed
|
||||
parsed = True
|
||||
print()
|
||||
|
||||
def reparse( self, params ):
|
||||
"""(re)Parse a providee's bb file"""
|
||||
bbfile = self._findProvider( params[0] )
|
||||
if bbfile is not None:
|
||||
print("SHELL: Found bbfile '%s' for '%s'" % ( bbfile, params[0] ))
|
||||
self.fileReparse( [ bbfile ] )
|
||||
else:
|
||||
print("ERROR: Nothing provides '%s'" % params[0])
|
||||
reparse.usage = "<providee>"
|
||||
|
||||
def getvar( self, params ):
|
||||
"""Dump the contents of an outer BitBake environment variable"""
|
||||
var = params[0]
|
||||
value = data.getVar( var, cooker.configuration.data, 1 )
|
||||
print(value)
|
||||
getvar.usage = "<variable>"
|
||||
|
||||
def peek( self, params ):
|
||||
"""Dump contents of variable defined in providee's metadata"""
|
||||
name, var = params
|
||||
bbfile = self._findProvider( name )
|
||||
if bbfile is not None:
|
||||
the_data = cache.Cache.loadDataFull(bbfile, cooker.configuration.data)
|
||||
value = the_data.getVar( var, 1 )
|
||||
print(value)
|
||||
else:
|
||||
print("ERROR: Nothing provides '%s'" % name)
|
||||
peek.usage = "<providee> <variable>"
|
||||
|
||||
def poke( self, params ):
|
||||
"""Set contents of variable defined in providee's metadata"""
|
||||
name, var, value = params
|
||||
bbfile = self._findProvider( name )
|
||||
if bbfile is not None:
|
||||
print("ERROR: Sorry, this functionality is currently broken")
|
||||
#d = cooker.pkgdata[bbfile]
|
||||
#data.setVar( var, value, d )
|
||||
|
||||
# mark the change semi persistant
|
||||
#cooker.pkgdata.setDirty(bbfile, d)
|
||||
#print "OK"
|
||||
else:
|
||||
print("ERROR: Nothing provides '%s'" % name)
|
||||
poke.usage = "<providee> <variable> <value>"
|
||||
|
||||
def print_( self, params ):
|
||||
"""Dump all files or providers"""
|
||||
what = params[0]
|
||||
if what == "files":
|
||||
self._checkParsed()
|
||||
for key in cooker.status.pkg_fn: print(key)
|
||||
elif what == "providers":
|
||||
self._checkParsed()
|
||||
for key in cooker.status.providers: print(key)
|
||||
else:
|
||||
print("Usage: print %s" % self.print_.usage)
|
||||
print_.usage = "<files|providers>"
|
||||
|
||||
def python( self, params ):
|
||||
"""Enter the expert mode - an interactive BitBake Python Interpreter"""
|
||||
sys.ps1 = "EXPERT BB>>> "
|
||||
sys.ps2 = "EXPERT BB... "
|
||||
import code
|
||||
interpreter = code.InteractiveConsole( dict( globals() ) )
|
||||
interpreter.interact( "SHELL: Expert Mode - BitBake Python %s\nType 'help' for more information, press CTRL-D to switch back to BBSHELL." % sys.version )
|
||||
|
||||
def showdata( self, params ):
|
||||
"""Execute 'showdata' on a providee"""
|
||||
cooker.showEnvironment(None, params)
|
||||
showdata.usage = "<providee>"
|
||||
|
||||
def setVar( self, params ):
|
||||
"""Set an outer BitBake environment variable"""
|
||||
var, value = params
|
||||
data.setVar( var, value, cooker.configuration.data )
|
||||
print("OK")
|
||||
setVar.usage = "<variable> <value>"
|
||||
|
||||
def rebuild( self, params ):
|
||||
"""Clean and rebuild a .bb file or a providee"""
|
||||
self.build( params, "clean" )
|
||||
self.build( params, "build" )
|
||||
rebuild.usage = "<providee>"
|
||||
|
||||
def shell( self, params ):
|
||||
"""Execute a shell command and dump the output"""
|
||||
if params != "":
|
||||
print(commands.getoutput( " ".join( params ) ))
|
||||
shell.usage = "<...>"
|
||||
|
||||
def stage( self, params ):
|
||||
"""Execute 'stage' on a providee"""
|
||||
self.build( params, "populate_staging" )
|
||||
stage.usage = "<providee>"
|
||||
|
||||
def status( self, params ):
|
||||
"""<just for testing>"""
|
||||
print("-" * 78)
|
||||
print("building list = '%s'" % cooker.building_list)
|
||||
print("build path = '%s'" % cooker.build_path)
|
||||
print("consider_msgs_cache = '%s'" % cooker.consider_msgs_cache)
|
||||
print("build stats = '%s'" % cooker.stats)
|
||||
if last_exception is not None: print("last_exception = '%s'" % repr( last_exception.args ))
|
||||
print("memory output contents = '%s'" % self._shell.myout._buffer)
|
||||
|
||||
def test( self, params ):
|
||||
"""<just for testing>"""
|
||||
print("testCommand called with '%s'" % params)
|
||||
|
||||
def unpack( self, params ):
|
||||
"""Execute 'unpack' on a providee"""
|
||||
self.build( params, "unpack" )
|
||||
unpack.usage = "<providee>"
|
||||
|
||||
def which( self, params ):
|
||||
"""Computes the providers for a given providee"""
|
||||
# Need to use taskData for this information
|
||||
item = params[0]
|
||||
|
||||
self._checkParsed()
|
||||
|
||||
preferred = data.getVar( "PREFERRED_PROVIDER_%s" % item, cooker.configuration.data, 1 )
|
||||
if not preferred: preferred = item
|
||||
|
||||
try:
|
||||
lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status)
|
||||
except KeyError:
|
||||
lv, lf, pv, pf = (None,)*4
|
||||
|
||||
try:
|
||||
providers = cooker.status.providers[item]
|
||||
except KeyError:
|
||||
print("SHELL: ERROR: Nothing provides", preferred)
|
||||
else:
|
||||
for provider in providers:
|
||||
if provider == pf: provider = " (***) %s" % provider
|
||||
else: provider = " %s" % provider
|
||||
print(provider)
|
||||
which.usage = "<providee>"
|
||||
|
||||
##########################################################################
|
||||
# Common helper functions
|
||||
##########################################################################
|
||||
|
||||
def completeFilePath( bbfile ):
|
||||
"""Get the complete bbfile path"""
|
||||
if not cooker.status: return bbfile
|
||||
if not cooker.status.pkg_fn: return bbfile
|
||||
for key in cooker.status.pkg_fn:
|
||||
if key.endswith( bbfile ):
|
||||
return key
|
||||
return bbfile
|
||||
|
||||
def sendToPastebin( desc, content ):
|
||||
"""Send content to http://oe.pastebin.com"""
|
||||
mydata = {}
|
||||
mydata["lang"] = "Plain Text"
|
||||
mydata["desc"] = desc
|
||||
mydata["cvt_tabs"] = "No"
|
||||
mydata["nick"] = "%s@%s" % ( os.environ.get( "USER", "unknown" ), socket.gethostname() or "unknown" )
|
||||
mydata["text"] = content
|
||||
params = urllib.urlencode( mydata )
|
||||
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
|
||||
|
||||
host = "rafb.net"
|
||||
conn = httplib.HTTPConnection( "%s:80" % host )
|
||||
conn.request("POST", "/paste/paste.php", params, headers )
|
||||
|
||||
response = conn.getresponse()
|
||||
conn.close()
|
||||
|
||||
if response.status == 302:
|
||||
location = response.getheader( "location" ) or "unknown"
|
||||
print("SHELL: Pasted to http://%s%s" % ( host, location ))
|
||||
else:
|
||||
print("ERROR: %s %s" % ( response.status, response.reason ))
|
||||
|
||||
def completer( text, state ):
|
||||
"""Return a possible readline completion"""
|
||||
debugOut( "completer called with text='%s', state='%d'" % ( text, state ) )
|
||||
|
||||
if state == 0:
|
||||
line = readline.get_line_buffer()
|
||||
if " " in line:
|
||||
line = line.split()
|
||||
# we are in second (or more) argument
|
||||
if line[0] in cmds and hasattr( cmds[line[0]][0], "usage" ): # known command and usage
|
||||
u = getattr( cmds[line[0]][0], "usage" ).split()[0]
|
||||
if u == "<variable>":
|
||||
allmatches = cooker.configuration.data.keys()
|
||||
elif u == "<bbfile>":
|
||||
if cooker.status.pkg_fn is None: allmatches = [ "(No Matches Available. Parsed yet?)" ]
|
||||
else: allmatches = [ x.split("/")[-1] for x in cooker.status.pkg_fn ]
|
||||
elif u == "<providee>":
|
||||
if cooker.status.pkg_fn is None: allmatches = [ "(No Matches Available. Parsed yet?)" ]
|
||||
else: allmatches = cooker.status.providers.iterkeys()
|
||||
else: allmatches = [ "(No tab completion available for this command)" ]
|
||||
else: allmatches = [ "(No tab completion available for this command)" ]
|
||||
else:
|
||||
# we are in first argument
|
||||
allmatches = cmds.iterkeys()
|
||||
|
||||
completer.matches = [ x for x in allmatches if x[:len(text)] == text ]
|
||||
#print "completer.matches = '%s'" % completer.matches
|
||||
if len( completer.matches ) > state:
|
||||
return completer.matches[state]
|
||||
else:
|
||||
return None
|
||||
|
||||
def debugOut( text ):
|
||||
if debug:
|
||||
sys.stderr.write( "( %s )\n" % text )
|
||||
|
||||
def columnize( alist, width = 80 ):
|
||||
"""
|
||||
A word-wrap function that preserves existing line breaks
|
||||
and most spaces in the text. Expects that existing line
|
||||
breaks are posix newlines (\n).
|
||||
"""
|
||||
return reduce(lambda line, word, width=width: '%s%s%s' %
|
||||
(line,
|
||||
' \n'[(len(line[line.rfind('\n')+1:])
|
||||
+ len(word.split('\n', 1)[0]
|
||||
) >= width)],
|
||||
word),
|
||||
alist
|
||||
)
|
||||
|
||||
def globfilter( names, pattern ):
|
||||
return fnmatch.filter( names, pattern )
|
||||
|
||||
##########################################################################
|
||||
# Class MemoryOutput
|
||||
##########################################################################
|
||||
|
||||
class MemoryOutput:
|
||||
"""File-like output class buffering the output of the last 10 commands"""
|
||||
def __init__( self, delegate ):
|
||||
self.delegate = delegate
|
||||
self._buffer = []
|
||||
self.text = []
|
||||
self._command = None
|
||||
|
||||
def startCommand( self, command ):
|
||||
self._command = command
|
||||
self.text = []
|
||||
def endCommand( self ):
|
||||
if self._command is not None:
|
||||
if len( self._buffer ) == 10: del self._buffer[0]
|
||||
self._buffer.append( ( self._command, self.text ) )
|
||||
def removeLast( self ):
|
||||
if self._buffer:
|
||||
del self._buffer[ len( self._buffer ) - 1 ]
|
||||
self.text = []
|
||||
self._command = None
|
||||
def lastBuffer( self ):
|
||||
if self._buffer:
|
||||
return self._buffer[ len( self._buffer ) -1 ][1]
|
||||
def bufferedCommands( self ):
|
||||
return [ cmd for cmd, output in self._buffer ]
|
||||
def buffer( self, i ):
|
||||
if i < len( self._buffer ):
|
||||
return "BB>> %s\n%s" % ( self._buffer[i][0], "".join( self._buffer[i][1] ) )
|
||||
else: return "ERROR: Invalid buffer number. Buffer needs to be in (0, %d)" % ( len( self._buffer ) - 1 )
|
||||
def write( self, text ):
|
||||
if self._command is not None and text != "BB>> ": self.text.append( text )
|
||||
if self.delegate is not None: self.delegate.write( text )
|
||||
def flush( self ):
|
||||
return self.delegate.flush()
|
||||
def fileno( self ):
|
||||
return self.delegate.fileno()
|
||||
def isatty( self ):
|
||||
return self.delegate.isatty()
|
||||
|
||||
##########################################################################
|
||||
# Class BitBakeShell
|
||||
##########################################################################
|
||||
|
||||
class BitBakeShell:
|
||||
|
||||
def __init__( self ):
|
||||
"""Register commands and set up readline"""
|
||||
self.commandQ = Queue.Queue()
|
||||
self.commands = BitBakeShellCommands( self )
|
||||
self.myout = MemoryOutput( sys.stdout )
|
||||
self.historyfilename = os.path.expanduser( "~/.bbsh_history" )
|
||||
self.startupfilename = os.path.expanduser( "~/.bbsh_startup" )
|
||||
|
||||
readline.set_completer( completer )
|
||||
readline.set_completer_delims( " " )
|
||||
readline.parse_and_bind("tab: complete")
|
||||
|
||||
try:
|
||||
readline.read_history_file( self.historyfilename )
|
||||
except IOError:
|
||||
pass # It doesn't exist yet.
|
||||
|
||||
print(__credits__)
|
||||
|
||||
def cleanup( self ):
|
||||
"""Write readline history and clean up resources"""
|
||||
debugOut( "writing command history" )
|
||||
try:
|
||||
readline.write_history_file( self.historyfilename )
|
||||
except:
|
||||
print("SHELL: Unable to save command history")
|
||||
|
||||
def registerCommand( self, command, function, numparams = 0, usage = "", helptext = "" ):
|
||||
"""Register a command"""
|
||||
if usage == "": usage = command
|
||||
if helptext == "": helptext = function.__doc__ or "<not yet documented>"
|
||||
cmds[command] = ( function, numparams, usage, helptext )
|
||||
|
||||
def processCommand( self, command, params ):
|
||||
"""Process a command. Check number of params and print a usage string, if appropriate"""
|
||||
debugOut( "processing command '%s'..." % command )
|
||||
try:
|
||||
function, numparams, usage, helptext = cmds[command]
|
||||
except KeyError:
|
||||
print("SHELL: ERROR: '%s' command is not a valid command." % command)
|
||||
self.myout.removeLast()
|
||||
else:
|
||||
if (numparams != -1) and (not len( params ) == numparams):
|
||||
print("Usage: '%s'" % usage)
|
||||
return
|
||||
|
||||
result = function( self.commands, params )
|
||||
debugOut( "result was '%s'" % result )
|
||||
|
||||
def processStartupFile( self ):
|
||||
"""Read and execute all commands found in $HOME/.bbsh_startup"""
|
||||
if os.path.exists( self.startupfilename ):
|
||||
startupfile = open( self.startupfilename, "r" )
|
||||
for cmdline in startupfile:
|
||||
debugOut( "processing startup line '%s'" % cmdline )
|
||||
if not cmdline:
|
||||
continue
|
||||
if "|" in cmdline:
|
||||
print("ERROR: '|' in startup file is not allowed. Ignoring line")
|
||||
continue
|
||||
self.commandQ.put( cmdline.strip() )
|
||||
|
||||
def main( self ):
|
||||
"""The main command loop"""
|
||||
while not leave_mainloop:
|
||||
try:
|
||||
if self.commandQ.empty():
|
||||
sys.stdout = self.myout.delegate
|
||||
cmdline = raw_input( "BB>> " )
|
||||
sys.stdout = self.myout
|
||||
else:
|
||||
cmdline = self.commandQ.get()
|
||||
if cmdline:
|
||||
allCommands = cmdline.split( ';' )
|
||||
for command in allCommands:
|
||||
pipecmd = None
|
||||
#
|
||||
# special case for expert mode
|
||||
if command == 'python':
|
||||
sys.stdout = self.myout.delegate
|
||||
self.processCommand( command, "" )
|
||||
sys.stdout = self.myout
|
||||
else:
|
||||
self.myout.startCommand( command )
|
||||
if '|' in command: # disable output
|
||||
command, pipecmd = command.split( '|' )
|
||||
delegate = self.myout.delegate
|
||||
self.myout.delegate = None
|
||||
tokens = shlex.split( command, True )
|
||||
self.processCommand( tokens[0], tokens[1:] or "" )
|
||||
self.myout.endCommand()
|
||||
if pipecmd is not None: # restore output
|
||||
self.myout.delegate = delegate
|
||||
|
||||
pipe = popen2.Popen4( pipecmd )
|
||||
pipe.tochild.write( "\n".join( self.myout.lastBuffer() ) )
|
||||
pipe.tochild.close()
|
||||
sys.stdout.write( pipe.fromchild.read() )
|
||||
#
|
||||
except EOFError:
|
||||
print()
|
||||
return
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
##########################################################################
|
||||
# Start function - called from the BitBake command line utility
|
||||
##########################################################################
|
||||
|
||||
def start( aCooker ):
|
||||
global cooker
|
||||
cooker = aCooker
|
||||
bbshell = BitBakeShell()
|
||||
bbshell.processStartupFile()
|
||||
bbshell.main()
|
||||
bbshell.cleanup()
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("SHELL: Sorry, this program should only be called by BitBake.")
|
||||
@@ -110,13 +110,42 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
ignore_mismatch = ((d.getVar("BB_HASH_IGNORE_MISMATCH") or '') == '1')
|
||||
tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d)
|
||||
|
||||
taskdeps, basehash = bb.data.generate_dependency_hash(tasklist, gendeps, lookupcache, self.basewhitelist, fn)
|
||||
taskdeps = {}
|
||||
basehash = {}
|
||||
|
||||
for task in tasklist:
|
||||
data = lookupcache[task]
|
||||
|
||||
if data is None:
|
||||
bb.error("Task %s from %s seems to be empty?!" % (task, fn))
|
||||
data = ''
|
||||
|
||||
gendeps[task] -= self.basewhitelist
|
||||
newdeps = gendeps[task]
|
||||
seen = set()
|
||||
while newdeps:
|
||||
nextdeps = newdeps
|
||||
seen |= nextdeps
|
||||
newdeps = set()
|
||||
for dep in nextdeps:
|
||||
if dep in self.basewhitelist:
|
||||
continue
|
||||
gendeps[dep] -= self.basewhitelist
|
||||
newdeps |= gendeps[dep]
|
||||
newdeps -= seen
|
||||
|
||||
alldeps = sorted(seen)
|
||||
for dep in alldeps:
|
||||
data = data + dep
|
||||
var = lookupcache[dep]
|
||||
if var is not None:
|
||||
data = data + str(var)
|
||||
datahash = hashlib.md5(data.encode("utf-8")).hexdigest()
|
||||
k = fn + "." + task
|
||||
if not ignore_mismatch and k in self.basehash and self.basehash[k] != basehash[k]:
|
||||
bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (k, self.basehash[k], basehash[k]))
|
||||
self.basehash[k] = basehash[k]
|
||||
if not ignore_mismatch and k in self.basehash and self.basehash[k] != datahash:
|
||||
bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (k, self.basehash[k], datahash))
|
||||
self.basehash[k] = datahash
|
||||
taskdeps[task] = alldeps
|
||||
|
||||
self.taskdeps[fn] = taskdeps
|
||||
self.gendeps[fn] = gendeps
|
||||
@@ -164,30 +193,19 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
return taint
|
||||
|
||||
def get_taskhash(self, fn, task, deps, dataCache):
|
||||
|
||||
mc = ''
|
||||
if fn.startswith('multiconfig:'):
|
||||
mc = fn.split(':')[1]
|
||||
k = fn + "." + task
|
||||
|
||||
data = dataCache.basetaskhash[k]
|
||||
self.basehash[k] = data
|
||||
self.runtaskdeps[k] = []
|
||||
self.file_checksum_values[k] = []
|
||||
recipename = dataCache.pkg_fn[fn]
|
||||
|
||||
for dep in sorted(deps, key=clean_basepath):
|
||||
pkgname = self.pkgnameextract.search(dep).group('fn')
|
||||
if mc:
|
||||
depmc = pkgname.split(':')[1]
|
||||
if mc != depmc:
|
||||
continue
|
||||
if dep.startswith("multiconfig:") and not mc:
|
||||
continue
|
||||
depname = dataCache.pkg_fn[pkgname]
|
||||
depname = dataCache.pkg_fn[self.pkgnameextract.search(dep).group('fn')]
|
||||
if not self.rundep_check(fn, recipename, task, dep, depname, dataCache):
|
||||
continue
|
||||
if dep not in self.taskhash:
|
||||
bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?" % dep)
|
||||
bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?", dep)
|
||||
data = data + self.taskhash[dep]
|
||||
self.runtaskdeps[k].append(dep)
|
||||
|
||||
@@ -329,7 +347,7 @@ class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
|
||||
|
||||
def stampcleanmask(self, stampbase, fn, taskname, extrainfo):
|
||||
return self.stampfile(stampbase, fn, taskname, extrainfo, clean=True)
|
||||
|
||||
|
||||
def invalidate_task(self, task, d, fn):
|
||||
bb.note("Tainting hash to force rebuild of task %s, %s" % (fn, task))
|
||||
bb.build.write_taint(task, d, fn)
|
||||
@@ -344,10 +362,10 @@ def dump_this_task(outfile, d):
|
||||
def init_colors(enable_color):
|
||||
"""Initialise colour dict for passing to compare_sigfiles()"""
|
||||
# First set up the colours
|
||||
colors = {'color_title': '\033[1m',
|
||||
'color_default': '\033[0m',
|
||||
'color_add': '\033[0;32m',
|
||||
'color_remove': '\033[0;31m',
|
||||
colors = {'color_title': '\033[1;37;40m',
|
||||
'color_default': '\033[0;37;40m',
|
||||
'color_add': '\033[1;32;40m',
|
||||
'color_remove': '\033[1;31;40m',
|
||||
}
|
||||
# Leave all keys present but clear the values
|
||||
if not enable_color:
|
||||
@@ -618,7 +636,7 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
if collapsed:
|
||||
output.extend(recout)
|
||||
else:
|
||||
# If a dependent hash changed, might as well print the line above and then defer to the changes in
|
||||
# If a dependent hash changed, might as well print the line above and then defer to the changes in
|
||||
# that hash since in all likelyhood, they're the same changes this task also saw.
|
||||
output = [output[-1]] + recout
|
||||
|
||||
|
||||
@@ -70,8 +70,6 @@ class TaskData:
|
||||
|
||||
self.skiplist = skiplist
|
||||
|
||||
self.mcdepends = []
|
||||
|
||||
def add_tasks(self, fn, dataCache):
|
||||
"""
|
||||
Add tasks for a given fn to the database
|
||||
@@ -90,13 +88,6 @@ class TaskData:
|
||||
|
||||
self.add_extra_deps(fn, dataCache)
|
||||
|
||||
def add_mcdepends(task):
|
||||
for dep in task_deps['mcdepends'][task].split():
|
||||
if len(dep.split(':')) != 5:
|
||||
bb.msg.fatal("TaskData", "Error for %s:%s[%s], multiconfig dependency %s does not contain exactly four ':' characters.\n Task '%s' should be specified in the form 'multiconfig:fromMC:toMC:packagename:task'" % (fn, task, 'mcdepends', dep, 'mcdepends'))
|
||||
if dep not in self.mcdepends:
|
||||
self.mcdepends.append(dep)
|
||||
|
||||
# Common code for dep_name/depends = 'depends'/idepends and 'rdepends'/irdepends
|
||||
def handle_deps(task, dep_name, depends, seen):
|
||||
if dep_name in task_deps and task in task_deps[dep_name]:
|
||||
@@ -119,20 +110,16 @@ class TaskData:
|
||||
parentids = []
|
||||
for dep in task_deps['parents'][task]:
|
||||
if dep not in task_deps['tasks']:
|
||||
bb.debug(2, "Not adding dependency of %s on %s since %s does not exist" % (task, dep, dep))
|
||||
bb.debug(2, "Not adding dependeny of %s on %s since %s does not exist" % (task, dep, dep))
|
||||
continue
|
||||
parentid = "%s:%s" % (fn, dep)
|
||||
parentids.append(parentid)
|
||||
self.taskentries[tid].tdepends.extend(parentids)
|
||||
|
||||
|
||||
# Touch all intertask dependencies
|
||||
handle_deps(task, 'depends', self.taskentries[tid].idepends, self.seen_build_target)
|
||||
handle_deps(task, 'rdepends', self.taskentries[tid].irdepends, self.seen_run_target)
|
||||
|
||||
if 'mcdepends' in task_deps and task in task_deps['mcdepends']:
|
||||
add_mcdepends(task)
|
||||
|
||||
# Work out build dependencies
|
||||
if not fn in self.depids:
|
||||
dependids = set()
|
||||
@@ -550,9 +537,6 @@ class TaskData:
|
||||
provmap[name] = provider[0]
|
||||
return provmap
|
||||
|
||||
def get_mcdepends(self):
|
||||
return self.mcdepends
|
||||
|
||||
def dump_data(self):
|
||||
"""
|
||||
Dump some debug information on the internal data structures
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# BitBake Tests for cooker.py
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
|
||||
import unittest
|
||||
import tempfile
|
||||
import os
|
||||
import bb, bb.cooker
|
||||
import re
|
||||
import logging
|
||||
|
||||
# Cooker tests
|
||||
class CookerTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# At least one variable needs to be set
|
||||
self.d = bb.data.init()
|
||||
topdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testdata/cooker")
|
||||
self.d.setVar('TOPDIR', topdir)
|
||||
|
||||
def test_CookerCollectFiles_sublayers(self):
|
||||
'''Test that a sublayer of an existing layer does not trigger
|
||||
No bb files matched ...'''
|
||||
|
||||
def append_collection(topdir, path, d):
|
||||
collection = path.split('/')[-1]
|
||||
pattern = "^" + topdir + "/" + path + "/"
|
||||
regex = re.compile(pattern)
|
||||
priority = 5
|
||||
|
||||
d.setVar('BBFILE_COLLECTIONS', (d.getVar('BBFILE_COLLECTIONS') or "") + " " + collection)
|
||||
d.setVar('BBFILE_PATTERN_%s' % (collection), pattern)
|
||||
d.setVar('BBFILE_PRIORITY_%s' % (collection), priority)
|
||||
|
||||
return (collection, pattern, regex, priority)
|
||||
|
||||
topdir = self.d.getVar("TOPDIR")
|
||||
|
||||
# Priorities: list of (collection, pattern, regex, priority)
|
||||
bbfile_config_priorities = []
|
||||
# Order is important for this test, shortest to longest is typical failure case
|
||||
bbfile_config_priorities.append( append_collection(topdir, 'first', self.d) )
|
||||
bbfile_config_priorities.append( append_collection(topdir, 'second', self.d) )
|
||||
bbfile_config_priorities.append( append_collection(topdir, 'second/third', self.d) )
|
||||
|
||||
pkgfns = [ topdir + '/first/recipes/sample1_1.0.bb',
|
||||
topdir + '/second/recipes/sample2_1.0.bb',
|
||||
topdir + '/second/third/recipes/sample3_1.0.bb' ]
|
||||
|
||||
class LogHandler(logging.Handler):
|
||||
def __init__(self):
|
||||
logging.Handler.__init__(self)
|
||||
self.logdata = []
|
||||
|
||||
def emit(self, record):
|
||||
self.logdata.append(record.getMessage())
|
||||
|
||||
# Move cooker to use my special logging
|
||||
logger = bb.cooker.logger
|
||||
log_handler = LogHandler()
|
||||
logger.addHandler(log_handler)
|
||||
collection = bb.cooker.CookerCollectFiles(bbfile_config_priorities)
|
||||
collection.collection_priorities(pkgfns, self.d)
|
||||
logger.removeHandler(log_handler)
|
||||
|
||||
# Should be empty (no generated messages)
|
||||
expected = []
|
||||
|
||||
self.assertEqual(log_handler.logdata, expected)
|
||||
@@ -281,7 +281,7 @@ class TestConcatOverride(unittest.TestCase):
|
||||
def test_remove(self):
|
||||
self.d.setVar("TEST", "${VAL} ${BAR}")
|
||||
self.d.setVar("TEST_remove", "val")
|
||||
self.assertEqual(self.d.getVar("TEST"), " bar")
|
||||
self.assertEqual(self.d.getVar("TEST"), "bar")
|
||||
|
||||
def test_remove_cleared(self):
|
||||
self.d.setVar("TEST", "${VAL} ${BAR}")
|
||||
@@ -300,7 +300,7 @@ class TestConcatOverride(unittest.TestCase):
|
||||
self.d.setVar("TEST", "${VAL} ${BAR}")
|
||||
self.d.setVar("TEST_remove", "val")
|
||||
self.d.setVar("TEST_TEST", "${TEST} ${TEST}")
|
||||
self.assertEqual(self.d.getVar("TEST_TEST"), " bar bar")
|
||||
self.assertEqual(self.d.getVar("TEST_TEST"), "bar bar")
|
||||
|
||||
def test_empty_remove(self):
|
||||
self.d.setVar("TEST", "")
|
||||
@@ -311,25 +311,13 @@ class TestConcatOverride(unittest.TestCase):
|
||||
self.d.setVar("BAR", "Z")
|
||||
self.d.setVar("TEST", "${BAR}/X Y")
|
||||
self.d.setVar("TEST_remove", "${BAR}/X")
|
||||
self.assertEqual(self.d.getVar("TEST"), " Y")
|
||||
self.assertEqual(self.d.getVar("TEST"), "Y")
|
||||
|
||||
def test_remove_expansion_items(self):
|
||||
self.d.setVar("TEST", "A B C D")
|
||||
self.d.setVar("BAR", "B D")
|
||||
self.d.setVar("TEST_remove", "${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "A C ")
|
||||
|
||||
def test_remove_preserve_whitespace(self):
|
||||
# When the removal isn't active, the original value should be preserved
|
||||
self.d.setVar("TEST", " A B")
|
||||
self.d.setVar("TEST_remove", "C")
|
||||
self.assertEqual(self.d.getVar("TEST"), " A B")
|
||||
|
||||
def test_remove_preserve_whitespace2(self):
|
||||
# When the removal is active preserve the whitespace
|
||||
self.d.setVar("TEST", " A B")
|
||||
self.d.setVar("TEST_remove", "B")
|
||||
self.assertEqual(self.d.getVar("TEST"), " A ")
|
||||
self.assertEqual(self.d.getVar("TEST"), "A C")
|
||||
|
||||
class TestOverrides(unittest.TestCase):
|
||||
def setUp(self):
|
||||
@@ -386,15 +374,6 @@ class TestOverrides(unittest.TestCase):
|
||||
self.d.setVar("OVERRIDES", "foo:bar:some_val")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
|
||||
|
||||
def test_remove_with_override(self):
|
||||
self.d.setVar("TEST_bar", "testvalue2")
|
||||
self.d.setVar("TEST_some_val", "testvalue3 testvalue5")
|
||||
self.d.setVar("TEST_some_val_remove", "testvalue3")
|
||||
self.d.setVar("TEST_foo", "testvalue4")
|
||||
self.d.setVar("OVERRIDES", "foo:bar:some_val")
|
||||
self.assertEqual(self.d.getVar("TEST"), " testvalue5")
|
||||
|
||||
|
||||
class TestKeyExpansion(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.d = bb.data.init()
|
||||
@@ -464,54 +443,6 @@ class Contains(unittest.TestCase):
|
||||
self.assertFalse(bb.utils.contains_any("SOMEFLAG", "x y z", True, False, self.d))
|
||||
|
||||
|
||||
class TaskHash(unittest.TestCase):
|
||||
def test_taskhashes(self):
|
||||
def gettask_bashhash(taskname, d):
|
||||
tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d)
|
||||
taskdeps, basehash = bb.data.generate_dependency_hash(tasklist, gendeps, lookupcache, set(), "somefile")
|
||||
bb.warn(str(lookupcache))
|
||||
return basehash["somefile." + taskname]
|
||||
|
||||
d = bb.data.init()
|
||||
d.setVar("__BBTASKS", ["mytask"])
|
||||
d.setVar("__exportlist", [])
|
||||
d.setVar("mytask", "${MYCOMMAND}")
|
||||
d.setVar("MYCOMMAND", "${VAR}; foo; bar; exit 0")
|
||||
d.setVar("VAR", "val")
|
||||
orighash = gettask_bashhash("mytask", d)
|
||||
|
||||
# Changing a variable should change the hash
|
||||
d.setVar("VAR", "val2")
|
||||
nexthash = gettask_bashhash("mytask", d)
|
||||
self.assertNotEqual(orighash, nexthash)
|
||||
|
||||
d.setVar("VAR", "val")
|
||||
# Adding an inactive removal shouldn't change the hash
|
||||
d.setVar("BAR", "notbar")
|
||||
d.setVar("MYCOMMAND_remove", "${BAR}")
|
||||
nexthash = gettask_bashhash("mytask", d)
|
||||
self.assertEqual(orighash, nexthash)
|
||||
|
||||
# Adding an active removal should change the hash
|
||||
d.setVar("BAR", "bar;")
|
||||
nexthash = gettask_bashhash("mytask", d)
|
||||
self.assertNotEqual(orighash, nexthash)
|
||||
|
||||
# Setup an inactive contains()
|
||||
d.setVar("VAR", "${@bb.utils.contains('VAR2', 'A', 'val', '', d)}")
|
||||
orighash = gettask_bashhash("mytask", d)
|
||||
|
||||
# Activate the contains() and the hash should change
|
||||
d.setVar("VAR2", "A")
|
||||
nexthash = gettask_bashhash("mytask", d)
|
||||
self.assertNotEqual(orighash, nexthash)
|
||||
|
||||
# The contains should be inactive but even though VAR2 has a
|
||||
# different value the hash should match the original
|
||||
d.setVar("VAR2", "B")
|
||||
nexthash = gettask_bashhash("mytask", d)
|
||||
self.assertEqual(orighash, nexthash)
|
||||
|
||||
class Serialize(unittest.TestCase):
|
||||
|
||||
def test_serialize(self):
|
||||
|
||||
@@ -30,45 +30,28 @@ import time
|
||||
import pickle
|
||||
from unittest.mock import Mock
|
||||
from unittest.mock import call
|
||||
from bb.msg import BBLogFormatter
|
||||
|
||||
|
||||
class EventQueueStubBase(object):
|
||||
""" Base class for EventQueueStub classes """
|
||||
def __init__(self):
|
||||
self.event_calls = []
|
||||
return
|
||||
|
||||
def _store_event_data_string(self, event):
|
||||
if isinstance(event, logging.LogRecord):
|
||||
formatter = BBLogFormatter("%(levelname)s: %(message)s")
|
||||
self.event_calls.append(formatter.format(event))
|
||||
else:
|
||||
self.event_calls.append(bb.event.getName(event))
|
||||
return
|
||||
|
||||
|
||||
class EventQueueStub(EventQueueStubBase):
|
||||
class EventQueueStub():
|
||||
""" Class used as specification for UI event handler queue stub objects """
|
||||
def __init__(self):
|
||||
super(EventQueueStub, self).__init__()
|
||||
return
|
||||
|
||||
def send(self, event):
|
||||
super(EventQueueStub, self)._store_event_data_string(event)
|
||||
return
|
||||
|
||||
|
||||
class PickleEventQueueStub(EventQueueStubBase):
|
||||
class PickleEventQueueStub():
|
||||
""" Class used as specification for UI event handler queue stub objects
|
||||
with sendpickle method """
|
||||
def __init__(self):
|
||||
super(PickleEventQueueStub, self).__init__()
|
||||
return
|
||||
|
||||
def sendpickle(self, pickled_event):
|
||||
event = pickle.loads(pickled_event)
|
||||
super(PickleEventQueueStub, self)._store_event_data_string(event)
|
||||
return
|
||||
|
||||
|
||||
class UIClientStub(object):
|
||||
class UIClientStub():
|
||||
""" Class used as specification for UI event handler stub objects """
|
||||
def __init__(self):
|
||||
self.event = None
|
||||
@@ -76,7 +59,7 @@ class UIClientStub(object):
|
||||
|
||||
class EventHandlingTest(unittest.TestCase):
|
||||
""" Event handling test class """
|
||||
|
||||
_threadlock_test_calls = []
|
||||
|
||||
def setUp(self):
|
||||
self._test_process = Mock()
|
||||
@@ -196,33 +179,6 @@ class EventHandlingTest(unittest.TestCase):
|
||||
self.assertEqual(self._test_process.event_handler2.call_args_list,
|
||||
expected_event_handler2)
|
||||
|
||||
def test_class_handler_filters(self):
|
||||
""" Test filters for class handlers """
|
||||
mask = ["bb.event.OperationStarted"]
|
||||
result = bb.event.register("event_handler1",
|
||||
self._test_process.event_handler1,
|
||||
mask)
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
result = bb.event.register("event_handler2",
|
||||
self._test_process.event_handler2,
|
||||
"*")
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
bb.event.set_eventfilter(
|
||||
lambda name, handler, event, d :
|
||||
name == 'event_handler2' and
|
||||
bb.event.getName(event) == "OperationStarted")
|
||||
event1 = bb.event.OperationStarted()
|
||||
event2 = bb.event.OperationCompleted(total=123)
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected_event_handler1 = []
|
||||
expected_event_handler2 = [call(event1)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected_event_handler1)
|
||||
self.assertEqual(self._test_process.event_handler2.call_args_list,
|
||||
expected_event_handler2)
|
||||
|
||||
def test_change_handler_event_mapping(self):
|
||||
""" Test changing the event mapping for class handlers """
|
||||
event1 = bb.event.OperationStarted()
|
||||
@@ -240,8 +196,8 @@ class EventHandlingTest(unittest.TestCase):
|
||||
expected)
|
||||
|
||||
# unregister handler and register it only for OperationStarted
|
||||
bb.event.remove("event_handler1",
|
||||
self._test_process.event_handler1)
|
||||
result = bb.event.remove("event_handler1",
|
||||
self._test_process.event_handler1)
|
||||
mask = ["bb.event.OperationStarted"]
|
||||
result = bb.event.register("event_handler1",
|
||||
self._test_process.event_handler1,
|
||||
@@ -254,8 +210,8 @@ class EventHandlingTest(unittest.TestCase):
|
||||
expected)
|
||||
|
||||
# unregister handler and register it only for OperationCompleted
|
||||
bb.event.remove("event_handler1",
|
||||
self._test_process.event_handler1)
|
||||
result = bb.event.remove("event_handler1",
|
||||
self._test_process.event_handler1)
|
||||
mask = ["bb.event.OperationCompleted"]
|
||||
result = bb.event.register("event_handler1",
|
||||
self._test_process.event_handler1,
|
||||
@@ -303,61 +259,6 @@ class EventHandlingTest(unittest.TestCase):
|
||||
self.assertEqual(self._test_ui2.event.sendpickle.call_args_list,
|
||||
expected)
|
||||
|
||||
def test_ui_handler_mask_filter(self):
|
||||
""" Test filters for UI handlers """
|
||||
mask = ["bb.event.OperationStarted"]
|
||||
debug_domains = {}
|
||||
self._test_ui1.event = Mock(spec_set=EventQueueStub)
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
bb.event.set_UIHmask(result, logging.INFO, debug_domains, mask)
|
||||
self._test_ui2.event = Mock(spec_set=PickleEventQueueStub)
|
||||
result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
|
||||
bb.event.set_UIHmask(result, logging.INFO, debug_domains, mask)
|
||||
|
||||
event1 = bb.event.OperationStarted()
|
||||
event2 = bb.event.OperationCompleted(total=1)
|
||||
|
||||
bb.event.fire_ui_handlers(event1, None)
|
||||
bb.event.fire_ui_handlers(event2, None)
|
||||
expected = [call(event1)]
|
||||
self.assertEqual(self._test_ui1.event.send.call_args_list,
|
||||
expected)
|
||||
expected = [call(pickle.dumps(event1))]
|
||||
self.assertEqual(self._test_ui2.event.sendpickle.call_args_list,
|
||||
expected)
|
||||
|
||||
def test_ui_handler_log_filter(self):
|
||||
""" Test log filters for UI handlers """
|
||||
mask = ["*"]
|
||||
debug_domains = {'BitBake.Foo': logging.WARNING}
|
||||
|
||||
self._test_ui1.event = EventQueueStub()
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
bb.event.set_UIHmask(result, logging.ERROR, debug_domains, mask)
|
||||
self._test_ui2.event = PickleEventQueueStub()
|
||||
result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
|
||||
bb.event.set_UIHmask(result, logging.ERROR, debug_domains, mask)
|
||||
|
||||
event1 = bb.event.OperationStarted()
|
||||
bb.event.fire_ui_handlers(event1, None) # All events match
|
||||
|
||||
event_log_handler = bb.event.LogHandler()
|
||||
logger = logging.getLogger("BitBake")
|
||||
logger.addHandler(event_log_handler)
|
||||
logger1 = logging.getLogger("BitBake.Foo")
|
||||
logger1.warning("Test warning LogRecord1") # Matches debug_domains level
|
||||
logger1.info("Test info LogRecord") # Filtered out
|
||||
logger2 = logging.getLogger("BitBake.Bar")
|
||||
logger2.error("Test error LogRecord") # Matches filter base level
|
||||
logger2.warning("Test warning LogRecord2") # Filtered out
|
||||
logger.removeHandler(event_log_handler)
|
||||
|
||||
expected = ['OperationStarted',
|
||||
'WARNING: Test warning LogRecord1',
|
||||
'ERROR: Test error LogRecord']
|
||||
self.assertEqual(self._test_ui1.event.event_calls, expected)
|
||||
self.assertEqual(self._test_ui2.event.event_calls, expected)
|
||||
|
||||
def test_fire(self):
|
||||
""" Test fire method used to trigger class and ui event handlers """
|
||||
mask = ["bb.event.ConfigParsed"]
|
||||
@@ -388,28 +289,18 @@ class EventHandlingTest(unittest.TestCase):
|
||||
self.assertEqual(self._test_ui1.event.send.call_args_list,
|
||||
expected)
|
||||
|
||||
def test_worker_fire(self):
|
||||
""" Test the triggering of bb.event.worker_fire callback """
|
||||
bb.event.worker_fire = Mock()
|
||||
event = bb.event.Event()
|
||||
bb.event.fire(event, None)
|
||||
expected = [call(event, None)]
|
||||
self.assertEqual(bb.event.worker_fire.call_args_list, expected)
|
||||
|
||||
def test_print_ui_queue(self):
|
||||
""" Test print_ui_queue method """
|
||||
event1 = bb.event.OperationStarted()
|
||||
event2 = bb.event.OperationCompleted(total=123)
|
||||
bb.event.fire(event1, None)
|
||||
bb.event.fire(event2, None)
|
||||
event_log_handler = bb.event.LogHandler()
|
||||
logger = logging.getLogger("BitBake")
|
||||
logger.addHandler(event_log_handler)
|
||||
logger.addHandler(bb.event.LogHandler())
|
||||
logger.info("Test info LogRecord")
|
||||
logger.warning("Test warning LogRecord")
|
||||
with self.assertLogs("BitBake", level="INFO") as cm:
|
||||
bb.event.print_ui_queue()
|
||||
logger.removeHandler(event_log_handler)
|
||||
self.assertEqual(cm.output,
|
||||
["INFO:BitBake:Test info LogRecord",
|
||||
"WARNING:BitBake:Test warning LogRecord"])
|
||||
@@ -473,7 +364,6 @@ class EventHandlingTest(unittest.TestCase):
|
||||
self.assertEqual(self._threadlock_test_calls,
|
||||
["w1_ui1", "w1_ui2", "w2_ui1", "w2_ui2"])
|
||||
|
||||
|
||||
def test_disable_threadlock(self):
|
||||
""" Test disable_threadlock method """
|
||||
self._set_threadlock_test_mockups()
|
||||
@@ -485,502 +375,3 @@ class EventHandlingTest(unittest.TestCase):
|
||||
# processed before finishing handling the first worker event.
|
||||
self.assertEqual(self._threadlock_test_calls,
|
||||
["w1_ui1", "w2_ui1", "w1_ui2", "w2_ui2"])
|
||||
|
||||
|
||||
class EventClassesTest(unittest.TestCase):
|
||||
""" Event classes test class """
|
||||
|
||||
_worker_pid = 54321
|
||||
|
||||
def setUp(self):
|
||||
bb.event.worker_pid = EventClassesTest._worker_pid
|
||||
|
||||
def test_Event(self):
|
||||
""" Test the Event base class """
|
||||
event = bb.event.Event()
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_HeartbeatEvent(self):
|
||||
""" Test the HeartbeatEvent class """
|
||||
time = 10
|
||||
event = bb.event.HeartbeatEvent(time)
|
||||
self.assertEqual(event.time, time)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_OperationStarted(self):
|
||||
""" Test OperationStarted event class """
|
||||
msg = "Foo Bar"
|
||||
event = bb.event.OperationStarted(msg)
|
||||
self.assertEqual(event.msg, msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_OperationCompleted(self):
|
||||
""" Test OperationCompleted event class """
|
||||
msg = "Foo Bar"
|
||||
total = 123
|
||||
event = bb.event.OperationCompleted(total, msg)
|
||||
self.assertEqual(event.msg, msg)
|
||||
self.assertEqual(event.total, total)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_OperationProgress(self):
|
||||
""" Test OperationProgress event class """
|
||||
msg = "Foo Bar"
|
||||
total = 123
|
||||
current = 111
|
||||
event = bb.event.OperationProgress(current, total, msg)
|
||||
self.assertEqual(event.msg, msg + ": %s/%s" % (current, total))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ConfigParsed(self):
|
||||
""" Test the ConfigParsed class """
|
||||
event = bb.event.ConfigParsed()
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_MultiConfigParsed(self):
|
||||
""" Test MultiConfigParsed event class """
|
||||
mcdata = {"foobar": "Foo Bar"}
|
||||
event = bb.event.MultiConfigParsed(mcdata)
|
||||
self.assertEqual(event.mcdata, mcdata)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_RecipeEvent(self):
|
||||
""" Test RecipeEvent event base class """
|
||||
callback = lambda a: 2 * a
|
||||
event = bb.event.RecipeEvent(callback)
|
||||
self.assertEqual(event.fn(1), callback(1))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_RecipePreFinalise(self):
|
||||
""" Test RecipePreFinalise event class """
|
||||
callback = lambda a: 2 * a
|
||||
event = bb.event.RecipePreFinalise(callback)
|
||||
self.assertEqual(event.fn(1), callback(1))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_RecipeTaskPreProcess(self):
|
||||
""" Test RecipeTaskPreProcess event class """
|
||||
callback = lambda a: 2 * a
|
||||
tasklist = [("foobar", callback)]
|
||||
event = bb.event.RecipeTaskPreProcess(callback, tasklist)
|
||||
self.assertEqual(event.fn(1), callback(1))
|
||||
self.assertEqual(event.tasklist, tasklist)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_RecipeParsed(self):
|
||||
""" Test RecipeParsed event base class """
|
||||
callback = lambda a: 2 * a
|
||||
event = bb.event.RecipeParsed(callback)
|
||||
self.assertEqual(event.fn(1), callback(1))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_StampUpdate(self):
|
||||
targets = ["foo", "bar"]
|
||||
stampfns = [lambda:"foobar"]
|
||||
event = bb.event.StampUpdate(targets, stampfns)
|
||||
self.assertEqual(event.targets, targets)
|
||||
self.assertEqual(event.stampPrefix, stampfns)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_BuildBase(self):
|
||||
""" Test base class for bitbake build events """
|
||||
name = "foo"
|
||||
pkgs = ["bar"]
|
||||
failures = 123
|
||||
event = bb.event.BuildBase(name, pkgs, failures)
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), failures)
|
||||
name = event.name = "bar"
|
||||
pkgs = event.pkgs = ["foo"]
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), failures)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_BuildInit(self):
|
||||
""" Test class for bitbake build invocation events """
|
||||
event = bb.event.BuildInit()
|
||||
self.assertEqual(event.name, None)
|
||||
self.assertEqual(event.pkgs, [])
|
||||
self.assertEqual(event.getFailures(), 0)
|
||||
name = event.name = "bar"
|
||||
pkgs = event.pkgs = ["foo"]
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), 0)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_BuildStarted(self):
|
||||
""" Test class for build started events """
|
||||
name = "foo"
|
||||
pkgs = ["bar"]
|
||||
failures = 123
|
||||
event = bb.event.BuildStarted(name, pkgs, failures)
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), failures)
|
||||
self.assertEqual(event.msg, "Building Started")
|
||||
name = event.name = "bar"
|
||||
pkgs = event.pkgs = ["foo"]
|
||||
msg = event.msg = "foobar"
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), failures)
|
||||
self.assertEqual(event.msg, msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_BuildCompleted(self):
|
||||
""" Test class for build completed events """
|
||||
total = 1000
|
||||
name = "foo"
|
||||
pkgs = ["bar"]
|
||||
failures = 123
|
||||
interrupted = 1
|
||||
event = bb.event.BuildCompleted(total, name, pkgs, failures,
|
||||
interrupted)
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), failures)
|
||||
self.assertEqual(event.msg, "Building Failed")
|
||||
event2 = bb.event.BuildCompleted(total, name, pkgs)
|
||||
self.assertEqual(event2.name, name)
|
||||
self.assertEqual(event2.pkgs, pkgs)
|
||||
self.assertEqual(event2.getFailures(), 0)
|
||||
self.assertEqual(event2.msg, "Building Succeeded")
|
||||
self.assertEqual(event2.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_DiskFull(self):
|
||||
""" Test DiskFull event class """
|
||||
dev = "/dev/foo"
|
||||
type = "ext4"
|
||||
freespace = "104M"
|
||||
mountpoint = "/"
|
||||
event = bb.event.DiskFull(dev, type, freespace, mountpoint)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_MonitorDiskEvent(self):
|
||||
""" Test MonitorDiskEvent class """
|
||||
available_bytes = 10000000
|
||||
free_bytes = 90000000
|
||||
total_bytes = 1000000000
|
||||
du = bb.event.DiskUsageSample(available_bytes, free_bytes,
|
||||
total_bytes)
|
||||
event = bb.event.MonitorDiskEvent(du)
|
||||
self.assertEqual(event.disk_usage.available_bytes, available_bytes)
|
||||
self.assertEqual(event.disk_usage.free_bytes, free_bytes)
|
||||
self.assertEqual(event.disk_usage.total_bytes, total_bytes)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_NoProvider(self):
|
||||
""" Test NoProvider event class """
|
||||
item = "foobar"
|
||||
event1 = bb.event.NoProvider(item)
|
||||
self.assertEqual(event1.getItem(), item)
|
||||
self.assertEqual(event1.isRuntime(), False)
|
||||
self.assertEqual(str(event1), "Nothing PROVIDES 'foobar'")
|
||||
runtime = True
|
||||
dependees = ["foo", "bar"]
|
||||
reasons = None
|
||||
close_matches = ["foibar", "footbar"]
|
||||
event2 = bb.event.NoProvider(item, runtime, dependees, reasons,
|
||||
close_matches)
|
||||
self.assertEqual(event2.isRuntime(), True)
|
||||
expected = ("Nothing RPROVIDES 'foobar' (but foo, bar RDEPENDS"
|
||||
" on or otherwise requires it). Close matches:\n"
|
||||
" foibar\n"
|
||||
" footbar")
|
||||
self.assertEqual(str(event2), expected)
|
||||
reasons = ["Item does not exist on database"]
|
||||
close_matches = ["foibar", "footbar"]
|
||||
event3 = bb.event.NoProvider(item, runtime, dependees, reasons,
|
||||
close_matches)
|
||||
expected = ("Nothing RPROVIDES 'foobar' (but foo, bar RDEPENDS"
|
||||
" on or otherwise requires it)\n"
|
||||
"Item does not exist on database")
|
||||
self.assertEqual(str(event3), expected)
|
||||
self.assertEqual(event3.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_MultipleProviders(self):
|
||||
""" Test MultipleProviders event class """
|
||||
item = "foobar"
|
||||
candidates = ["foobarv1", "foobars"]
|
||||
event1 = bb.event.MultipleProviders(item, candidates)
|
||||
self.assertEqual(event1.isRuntime(), False)
|
||||
self.assertEqual(event1.getItem(), item)
|
||||
self.assertEqual(event1.getCandidates(), candidates)
|
||||
expected = ("Multiple providers are available for foobar (foobarv1,"
|
||||
" foobars)\n"
|
||||
"Consider defining a PREFERRED_PROVIDER entry to match "
|
||||
"foobar")
|
||||
self.assertEqual(str(event1), expected)
|
||||
runtime = True
|
||||
event2 = bb.event.MultipleProviders(item, candidates, runtime)
|
||||
self.assertEqual(event2.isRuntime(), runtime)
|
||||
expected = ("Multiple providers are available for runtime foobar "
|
||||
"(foobarv1, foobars)\n"
|
||||
"Consider defining a PREFERRED_RPROVIDER entry to match "
|
||||
"foobar")
|
||||
self.assertEqual(str(event2), expected)
|
||||
self.assertEqual(event2.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ParseStarted(self):
|
||||
""" Test ParseStarted event class """
|
||||
total = 123
|
||||
event = bb.event.ParseStarted(total)
|
||||
self.assertEqual(event.msg, "Recipe parsing Started")
|
||||
self.assertEqual(event.total, total)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ParseCompleted(self):
|
||||
""" Test ParseCompleted event class """
|
||||
cached = 10
|
||||
parsed = 13
|
||||
skipped = 7
|
||||
virtuals = 2
|
||||
masked = 1
|
||||
errors = 0
|
||||
total = 23
|
||||
event = bb.event.ParseCompleted(cached, parsed, skipped, masked,
|
||||
virtuals, errors, total)
|
||||
self.assertEqual(event.msg, "Recipe parsing Completed")
|
||||
expected = [cached, parsed, skipped, virtuals, masked, errors,
|
||||
cached + parsed, total]
|
||||
actual = [event.cached, event.parsed, event.skipped, event.virtuals,
|
||||
event.masked, event.errors, event.sofar, event.total]
|
||||
self.assertEqual(str(actual), str(expected))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ParseProgress(self):
|
||||
""" Test ParseProgress event class """
|
||||
current = 10
|
||||
total = 100
|
||||
event = bb.event.ParseProgress(current, total)
|
||||
self.assertEqual(event.msg,
|
||||
"Recipe parsing" + ": %s/%s" % (current, total))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_CacheLoadStarted(self):
|
||||
""" Test CacheLoadStarted event class """
|
||||
total = 123
|
||||
event = bb.event.CacheLoadStarted(total)
|
||||
self.assertEqual(event.msg, "Loading cache Started")
|
||||
self.assertEqual(event.total, total)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_CacheLoadProgress(self):
|
||||
""" Test CacheLoadProgress event class """
|
||||
current = 10
|
||||
total = 100
|
||||
event = bb.event.CacheLoadProgress(current, total)
|
||||
self.assertEqual(event.msg,
|
||||
"Loading cache" + ": %s/%s" % (current, total))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_CacheLoadCompleted(self):
|
||||
""" Test CacheLoadCompleted event class """
|
||||
total = 23
|
||||
num_entries = 12
|
||||
event = bb.event.CacheLoadCompleted(total, num_entries)
|
||||
self.assertEqual(event.msg, "Loading cache Completed")
|
||||
expected = [total, num_entries]
|
||||
actual = [event.total, event.num_entries]
|
||||
self.assertEqual(str(actual), str(expected))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_TreeDataPreparationStarted(self):
|
||||
""" Test TreeDataPreparationStarted event class """
|
||||
event = bb.event.TreeDataPreparationStarted()
|
||||
self.assertEqual(event.msg, "Preparing tree data Started")
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_TreeDataPreparationProgress(self):
|
||||
""" Test TreeDataPreparationProgress event class """
|
||||
current = 10
|
||||
total = 100
|
||||
event = bb.event.TreeDataPreparationProgress(current, total)
|
||||
self.assertEqual(event.msg,
|
||||
"Preparing tree data" + ": %s/%s" % (current, total))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_TreeDataPreparationCompleted(self):
|
||||
""" Test TreeDataPreparationCompleted event class """
|
||||
total = 23
|
||||
event = bb.event.TreeDataPreparationCompleted(total)
|
||||
self.assertEqual(event.msg, "Preparing tree data Completed")
|
||||
self.assertEqual(event.total, total)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_DepTreeGenerated(self):
|
||||
""" Test DepTreeGenerated event class """
|
||||
depgraph = Mock()
|
||||
event = bb.event.DepTreeGenerated(depgraph)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_TargetsTreeGenerated(self):
|
||||
""" Test TargetsTreeGenerated event class """
|
||||
model = Mock()
|
||||
event = bb.event.TargetsTreeGenerated(model)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ReachableStamps(self):
|
||||
""" Test ReachableStamps event class """
|
||||
stamps = [Mock(), Mock()]
|
||||
event = bb.event.ReachableStamps(stamps)
|
||||
self.assertEqual(event.stamps, stamps)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_FilesMatchingFound(self):
|
||||
""" Test FilesMatchingFound event class """
|
||||
pattern = "foo.*bar"
|
||||
matches = ["foobar"]
|
||||
event = bb.event.FilesMatchingFound(pattern, matches)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ConfigFilesFound(self):
|
||||
""" Test ConfigFilesFound event class """
|
||||
variable = "FOO_BAR"
|
||||
values = ["foo", "bar"]
|
||||
event = bb.event.ConfigFilesFound(variable, values)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ConfigFilePathFound(self):
|
||||
""" Test ConfigFilePathFound event class """
|
||||
path = "/foo/bar"
|
||||
event = bb.event.ConfigFilePathFound(path)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_message_classes(self):
|
||||
""" Test message event classes """
|
||||
msg = "foobar foo bar"
|
||||
event = bb.event.MsgBase(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgDebug(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgNote(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgWarn(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgError(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgFatal(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgPlain(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_LogExecTTY(self):
|
||||
""" Test LogExecTTY event class """
|
||||
msg = "foo bar"
|
||||
prog = "foo.sh"
|
||||
sleep_delay = 10
|
||||
retries = 3
|
||||
event = bb.event.LogExecTTY(msg, prog, sleep_delay, retries)
|
||||
self.assertEqual(event.msg, msg)
|
||||
self.assertEqual(event.prog, prog)
|
||||
self.assertEqual(event.sleep_delay, sleep_delay)
|
||||
self.assertEqual(event.retries, retries)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def _throw_zero_division_exception(self):
|
||||
a = 1 / 0
|
||||
return
|
||||
|
||||
def _worker_handler(self, event, d):
|
||||
self._returned_event = event
|
||||
return
|
||||
|
||||
def test_LogHandler(self):
|
||||
""" Test LogHandler class """
|
||||
logger = logging.getLogger("TestEventClasses")
|
||||
logger.propagate = False
|
||||
handler = bb.event.LogHandler(logging.INFO)
|
||||
logger.addHandler(handler)
|
||||
bb.event.worker_fire = self._worker_handler
|
||||
try:
|
||||
self._throw_zero_division_exception()
|
||||
except ZeroDivisionError as ex:
|
||||
logger.exception(ex)
|
||||
event = self._returned_event
|
||||
try:
|
||||
pe = pickle.dumps(event)
|
||||
newevent = pickle.loads(pe)
|
||||
except:
|
||||
self.fail('Logged event is not serializable')
|
||||
self.assertEqual(event.taskpid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_MetadataEvent(self):
|
||||
""" Test MetadataEvent class """
|
||||
eventtype = "footype"
|
||||
eventdata = {"foo": "bar"}
|
||||
event = bb.event.MetadataEvent(eventtype, eventdata)
|
||||
self.assertEqual(event.type, eventtype)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ProcessStarted(self):
|
||||
""" Test ProcessStarted class """
|
||||
processname = "foo"
|
||||
total = 9783128974
|
||||
event = bb.event.ProcessStarted(processname, total)
|
||||
self.assertEqual(event.processname, processname)
|
||||
self.assertEqual(event.total, total)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ProcessProgress(self):
|
||||
""" Test ProcessProgress class """
|
||||
processname = "foo"
|
||||
progress = 243224
|
||||
event = bb.event.ProcessProgress(processname, progress)
|
||||
self.assertEqual(event.processname, processname)
|
||||
self.assertEqual(event.progress, progress)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ProcessFinished(self):
|
||||
""" Test ProcessFinished class """
|
||||
processname = "foo"
|
||||
total = 1242342344
|
||||
event = bb.event.ProcessFinished(processname)
|
||||
self.assertEqual(event.processname, processname)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_SanityCheck(self):
|
||||
""" Test SanityCheck class """
|
||||
event1 = bb.event.SanityCheck()
|
||||
self.assertEqual(event1.generateevents, True)
|
||||
self.assertEqual(event1.pid, EventClassesTest._worker_pid)
|
||||
generateevents = False
|
||||
event2 = bb.event.SanityCheck(generateevents)
|
||||
self.assertEqual(event2.generateevents, generateevents)
|
||||
self.assertEqual(event2.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_SanityCheckPassed(self):
|
||||
""" Test SanityCheckPassed class """
|
||||
event = bb.event.SanityCheckPassed()
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_SanityCheckFailed(self):
|
||||
""" Test SanityCheckFailed class """
|
||||
msg = "The sanity test failed."
|
||||
event1 = bb.event.SanityCheckFailed(msg)
|
||||
self.assertEqual(event1.pid, EventClassesTest._worker_pid)
|
||||
network_error = True
|
||||
event2 = bb.event.SanityCheckFailed(msg, network_error)
|
||||
self.assertEqual(event2.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_network_event_classes(self):
|
||||
""" Test network event classes """
|
||||
event1 = bb.event.NetworkTest()
|
||||
generateevents = False
|
||||
self.assertEqual(event1.pid, EventClassesTest._worker_pid)
|
||||
event2 = bb.event.NetworkTest(generateevents)
|
||||
self.assertEqual(event2.pid, EventClassesTest._worker_pid)
|
||||
event3 = bb.event.NetworkTestPassed()
|
||||
self.assertEqual(event3.pid, EventClassesTest._worker_pid)
|
||||
event4 = bb.event.NetworkTestFailed()
|
||||
self.assertEqual(event4.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_FindSigInfoResult(self):
|
||||
""" Test FindSigInfoResult event class """
|
||||
result = [Mock()]
|
||||
event = bb.event.FindSigInfoResult(result)
|
||||
self.assertEqual(event.result, result)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
@@ -20,7 +20,6 @@
|
||||
#
|
||||
|
||||
import unittest
|
||||
import hashlib
|
||||
import tempfile
|
||||
import subprocess
|
||||
import collections
|
||||
@@ -402,12 +401,6 @@ class MirrorUriTest(FetcherTest):
|
||||
: "git://somewhere.org/somedir/mtd-utils.git;tag=1234567890123456789012345678901234567890;protocol=http",
|
||||
("git://git.invalid.infradead.org/foo/mtd-utils.git;tag=1234567890123456789012345678901234567890", "git://.*/.*", "git://somewhere.org/somedir/MIRRORNAME;protocol=http")
|
||||
: "git://somewhere.org/somedir/git.invalid.infradead.org.foo.mtd-utils.git;tag=1234567890123456789012345678901234567890;protocol=http",
|
||||
("http://somewhere.org/somedir1/somedir2/somefile_1.2.3.tar.gz", "http://.*/.*", "http://somewhere2.org")
|
||||
: "http://somewhere2.org/somefile_1.2.3.tar.gz",
|
||||
("http://somewhere.org/somedir1/somedir2/somefile_1.2.3.tar.gz", "http://.*/.*", "http://somewhere2.org/")
|
||||
: "http://somewhere2.org/somefile_1.2.3.tar.gz",
|
||||
("git://someserver.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master", "git://someserver.org/bitbake;branch=master", "git://git.openembedded.org/bitbake;protocol=http")
|
||||
: "git://git.openembedded.org/bitbake;tag=1234567890123456789012345678901234567890;branch=master;protocol=http",
|
||||
|
||||
#Renaming files doesn't work
|
||||
#("http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere2.org/somedir3/somefile_2.3.4.tar.gz") : "http://somewhere2.org/somedir3/somefile_2.3.4.tar.gz"
|
||||
@@ -463,124 +456,6 @@ class MirrorUriTest(FetcherTest):
|
||||
'https://BBBB/B/B/B/bitbake/bitbake-1.0.tar.gz',
|
||||
'http://AAAA/A/A/A/B/B/bitbake/bitbake-1.0.tar.gz'])
|
||||
|
||||
|
||||
class GitDownloadDirectoryNamingTest(FetcherTest):
|
||||
def setUp(self):
|
||||
super(GitDownloadDirectoryNamingTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake"
|
||||
self.recipe_dir = "git.openembedded.org.bitbake"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git"
|
||||
self.mirror_dir = "github.com.openembedded.bitbake.git"
|
||||
|
||||
self.d.setVar('SRCREV', '82ea737a0b42a8b53e11c9cde141e9e9c0bd8c40')
|
||||
|
||||
def setup_mirror_rewrite(self):
|
||||
self.d.setVar("PREMIRRORS", self.recipe_url + " " + self.mirror_url + " \n")
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_that_directory_is_named_after_recipe_url_when_no_mirroring_is_used(self):
|
||||
self.setup_mirror_rewrite()
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
|
||||
fetcher.download()
|
||||
|
||||
dir = os.listdir(self.dldir + "/git2")
|
||||
self.assertIn(self.recipe_dir, dir)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_that_directory_exists_for_mirrored_url_and_recipe_url_when_mirroring_is_used(self):
|
||||
self.setup_mirror_rewrite()
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
|
||||
fetcher.download()
|
||||
|
||||
dir = os.listdir(self.dldir + "/git2")
|
||||
self.assertIn(self.mirror_dir, dir)
|
||||
self.assertIn(self.recipe_dir, dir)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_that_recipe_directory_and_mirrored_directory_exists_when_mirroring_is_used_and_the_mirrored_directory_already_exists(self):
|
||||
self.setup_mirror_rewrite()
|
||||
fetcher = bb.fetch.Fetch([self.mirror_url], self.d)
|
||||
fetcher.download()
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
|
||||
fetcher.download()
|
||||
|
||||
dir = os.listdir(self.dldir + "/git2")
|
||||
self.assertIn(self.mirror_dir, dir)
|
||||
self.assertIn(self.recipe_dir, dir)
|
||||
|
||||
|
||||
class TarballNamingTest(FetcherTest):
|
||||
def setUp(self):
|
||||
super(TarballNamingTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake"
|
||||
self.recipe_tarball = "git2_git.openembedded.org.bitbake.tar.gz"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git"
|
||||
self.mirror_tarball = "git2_github.com.openembedded.bitbake.git.tar.gz"
|
||||
|
||||
self.d.setVar('BB_GENERATE_MIRROR_TARBALLS', '1')
|
||||
self.d.setVar('SRCREV', '82ea737a0b42a8b53e11c9cde141e9e9c0bd8c40')
|
||||
|
||||
def setup_mirror_rewrite(self):
|
||||
self.d.setVar("PREMIRRORS", self.recipe_url + " " + self.mirror_url + " \n")
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_that_the_recipe_tarball_is_created_when_no_mirroring_is_used(self):
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
|
||||
fetcher.download()
|
||||
|
||||
dir = os.listdir(self.dldir)
|
||||
self.assertIn(self.recipe_tarball, dir)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_that_the_mirror_tarball_is_created_when_mirroring_is_used(self):
|
||||
self.setup_mirror_rewrite()
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
|
||||
fetcher.download()
|
||||
|
||||
dir = os.listdir(self.dldir)
|
||||
self.assertIn(self.mirror_tarball, dir)
|
||||
|
||||
|
||||
class GitShallowTarballNamingTest(FetcherTest):
|
||||
def setUp(self):
|
||||
super(GitShallowTarballNamingTest, self).setUp()
|
||||
self.recipe_url = "git://git.openembedded.org/bitbake"
|
||||
self.recipe_tarball = "gitshallow_git.openembedded.org.bitbake_82ea737-1_master.tar.gz"
|
||||
self.mirror_url = "git://github.com/openembedded/bitbake.git"
|
||||
self.mirror_tarball = "gitshallow_github.com.openembedded.bitbake.git_82ea737-1_master.tar.gz"
|
||||
|
||||
self.d.setVar('BB_GIT_SHALLOW', '1')
|
||||
self.d.setVar('BB_GENERATE_SHALLOW_TARBALLS', '1')
|
||||
self.d.setVar('SRCREV', '82ea737a0b42a8b53e11c9cde141e9e9c0bd8c40')
|
||||
|
||||
def setup_mirror_rewrite(self):
|
||||
self.d.setVar("PREMIRRORS", self.recipe_url + " " + self.mirror_url + " \n")
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_that_the_tarball_is_named_after_recipe_url_when_no_mirroring_is_used(self):
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
|
||||
fetcher.download()
|
||||
|
||||
dir = os.listdir(self.dldir)
|
||||
self.assertIn(self.recipe_tarball, dir)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_that_the_mirror_tarball_is_created_when_mirroring_is_used(self):
|
||||
self.setup_mirror_rewrite()
|
||||
fetcher = bb.fetch.Fetch([self.recipe_url], self.d)
|
||||
|
||||
fetcher.download()
|
||||
|
||||
dir = os.listdir(self.dldir)
|
||||
self.assertIn(self.mirror_tarball, dir)
|
||||
|
||||
|
||||
class FetcherLocalTest(FetcherTest):
|
||||
def setUp(self):
|
||||
def touch(fn):
|
||||
@@ -647,109 +522,6 @@ class FetcherLocalTest(FetcherTest):
|
||||
with self.assertRaises(bb.fetch2.UnpackError):
|
||||
self.fetchUnpack(['file://a;subdir=/bin/sh'])
|
||||
|
||||
class FetcherNoNetworkTest(FetcherTest):
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
# all test cases are based on not having network
|
||||
self.d.setVar("BB_NO_NETWORK", "1")
|
||||
|
||||
def test_missing(self):
|
||||
string = "this is a test file\n".encode("utf-8")
|
||||
self.d.setVarFlag("SRC_URI", "md5sum", hashlib.md5(string).hexdigest())
|
||||
self.d.setVarFlag("SRC_URI", "sha256sum", hashlib.sha256(string).hexdigest())
|
||||
|
||||
self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
|
||||
self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
|
||||
fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/test-file.tar.gz"], self.d)
|
||||
with self.assertRaises(bb.fetch2.NetworkAccess):
|
||||
fetcher.download()
|
||||
|
||||
def test_valid_missing_donestamp(self):
|
||||
# create the file in the download directory with correct hash
|
||||
string = "this is a test file\n".encode("utf-8")
|
||||
with open(os.path.join(self.dldir, "test-file.tar.gz"), "wb") as f:
|
||||
f.write(string)
|
||||
|
||||
self.d.setVarFlag("SRC_URI", "md5sum", hashlib.md5(string).hexdigest())
|
||||
self.d.setVarFlag("SRC_URI", "sha256sum", hashlib.sha256(string).hexdigest())
|
||||
|
||||
self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
|
||||
self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
|
||||
fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/test-file.tar.gz"], self.d)
|
||||
fetcher.download()
|
||||
self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
|
||||
|
||||
def test_invalid_missing_donestamp(self):
|
||||
# create an invalid file in the download directory with incorrect hash
|
||||
string = "this is a test file\n".encode("utf-8")
|
||||
with open(os.path.join(self.dldir, "test-file.tar.gz"), "wb"):
|
||||
pass
|
||||
|
||||
self.d.setVarFlag("SRC_URI", "md5sum", hashlib.md5(string).hexdigest())
|
||||
self.d.setVarFlag("SRC_URI", "sha256sum", hashlib.sha256(string).hexdigest())
|
||||
|
||||
self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
|
||||
self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
|
||||
fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/test-file.tar.gz"], self.d)
|
||||
with self.assertRaises(bb.fetch2.NetworkAccess):
|
||||
fetcher.download()
|
||||
# the existing file should not exist or should have be moved to "bad-checksum"
|
||||
self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
|
||||
|
||||
def test_nochecksums_missing(self):
|
||||
self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
|
||||
self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
|
||||
# ssh fetch does not support checksums
|
||||
fetcher = bb.fetch.Fetch(["ssh://invalid@invalid.yoctoproject.org/test-file.tar.gz"], self.d)
|
||||
# attempts to download with missing donestamp
|
||||
with self.assertRaises(bb.fetch2.NetworkAccess):
|
||||
fetcher.download()
|
||||
|
||||
def test_nochecksums_missing_donestamp(self):
|
||||
# create a file in the download directory
|
||||
with open(os.path.join(self.dldir, "test-file.tar.gz"), "wb"):
|
||||
pass
|
||||
|
||||
self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
|
||||
self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
|
||||
# ssh fetch does not support checksums
|
||||
fetcher = bb.fetch.Fetch(["ssh://invalid@invalid.yoctoproject.org/test-file.tar.gz"], self.d)
|
||||
# attempts to download with missing donestamp
|
||||
with self.assertRaises(bb.fetch2.NetworkAccess):
|
||||
fetcher.download()
|
||||
|
||||
def test_nochecksums_has_donestamp(self):
|
||||
# create a file in the download directory with the donestamp
|
||||
with open(os.path.join(self.dldir, "test-file.tar.gz"), "wb"):
|
||||
pass
|
||||
with open(os.path.join(self.dldir, "test-file.tar.gz.done"), "wb"):
|
||||
pass
|
||||
|
||||
self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
|
||||
self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
|
||||
# ssh fetch does not support checksums
|
||||
fetcher = bb.fetch.Fetch(["ssh://invalid@invalid.yoctoproject.org/test-file.tar.gz"], self.d)
|
||||
# should not fetch
|
||||
fetcher.download()
|
||||
# both files should still exist
|
||||
self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
|
||||
self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
|
||||
|
||||
def test_nochecksums_missing_has_donestamp(self):
|
||||
# create a file in the download directory with the donestamp
|
||||
with open(os.path.join(self.dldir, "test-file.tar.gz.done"), "wb"):
|
||||
pass
|
||||
|
||||
self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
|
||||
self.assertTrue(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
|
||||
# ssh fetch does not support checksums
|
||||
fetcher = bb.fetch.Fetch(["ssh://invalid@invalid.yoctoproject.org/test-file.tar.gz"], self.d)
|
||||
with self.assertRaises(bb.fetch2.NetworkAccess):
|
||||
fetcher.download()
|
||||
# both files should still exist
|
||||
self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz")))
|
||||
self.assertFalse(os.path.exists(os.path.join(self.dldir, "test-file.tar.gz.done")))
|
||||
|
||||
class FetcherNetworkTest(FetcherTest):
|
||||
@skipIfNoNetwork()
|
||||
def test_fetch(self):
|
||||
@@ -869,140 +641,36 @@ class FetcherNetworkTest(FetcherTest):
|
||||
self.assertRaises(bb.fetch.ParameterError, self.gitfetcher, url, url)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_finds_local_tarball_for_mirrored_url_when_previous_downloaded_by_the_recipe_url(self):
|
||||
recipeurl = "git://git.openembedded.org/bitbake"
|
||||
mirrorurl = "git://someserver.org/bitbake"
|
||||
def test_gitfetch_premirror(self):
|
||||
url1 = "git://git.openembedded.org/bitbake"
|
||||
url2 = "git://someserver.org/bitbake"
|
||||
self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake \n")
|
||||
self.gitfetcher(recipeurl, mirrorurl)
|
||||
self.gitfetcher(url1, url2)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_finds_local_tarball_when_previous_downloaded_from_a_premirror(self):
|
||||
recipeurl = "git://someserver.org/bitbake"
|
||||
def test_gitfetch_premirror2(self):
|
||||
url1 = url2 = "git://someserver.org/bitbake"
|
||||
self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake \n")
|
||||
self.gitfetcher(recipeurl, recipeurl)
|
||||
self.gitfetcher(url1, url2)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gitfetch_finds_local_repository_when_premirror_rewrites_the_recipe_url(self):
|
||||
def test_gitfetch_premirror3(self):
|
||||
realurl = "git://git.openembedded.org/bitbake"
|
||||
recipeurl = "git://someserver.org/bitbake"
|
||||
dummyurl = "git://someserver.org/bitbake"
|
||||
self.sourcedir = self.unpackdir.replace("unpacked", "sourcemirror.git")
|
||||
os.chdir(self.tempdir)
|
||||
bb.process.run("git clone %s %s 2> /dev/null" % (realurl, self.sourcedir), shell=True)
|
||||
self.d.setVar("PREMIRRORS", "%s git://%s;protocol=file \n" % (recipeurl, self.sourcedir))
|
||||
self.gitfetcher(recipeurl, recipeurl)
|
||||
self.d.setVar("PREMIRRORS", "%s git://%s;protocol=file \n" % (dummyurl, self.sourcedir))
|
||||
self.gitfetcher(dummyurl, dummyurl)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_git_submodule(self):
|
||||
# URL with ssh submodules
|
||||
url = "gitsm://git.yoctoproject.org/git-submodule-test;branch=ssh-gitsm-tests;rev=049da4a6cb198d7c0302e9e8b243a1443cb809a7"
|
||||
# Original URL (comment this if you have ssh access to git.yoctoproject.org)
|
||||
url = "gitsm://git.yoctoproject.org/git-submodule-test;branch=master;rev=a2885dd7d25380d23627e7544b7bbb55014b16ee"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher = bb.fetch.Fetch(["gitsm://git.yoctoproject.org/git-submodule-test;rev=f12e57f2edf0aa534cf1616fa983d165a92b0842"], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
os.chdir(os.path.dirname(self.unpackdir))
|
||||
fetcher.unpack(self.unpackdir)
|
||||
|
||||
repo_path = os.path.join(self.tempdir, 'unpacked', 'git')
|
||||
self.assertTrue(os.path.exists(repo_path), msg='Unpacked repository missing')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'bitbake')), msg='bitbake submodule missing')
|
||||
self.assertFalse(os.path.exists(os.path.join(repo_path, 'na')), msg='uninitialized submodule present')
|
||||
|
||||
# Only when we're running the extended test with a submodule's submodule, can we check this.
|
||||
if os.path.exists(os.path.join(repo_path, 'bitbake-gitsm-test1')):
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'bitbake-gitsm-test1', 'bitbake')), msg='submodule of submodule missing')
|
||||
|
||||
def test_git_submodule_dbus_broker(self):
|
||||
# The following external repositories have show failures in fetch and unpack operations
|
||||
# We want to avoid regressions!
|
||||
url = "gitsm://github.com/bus1/dbus-broker;protocol=git;rev=fc874afa0992d0c75ec25acb43d344679f0ee7d2"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
os.chdir(os.path.dirname(self.unpackdir))
|
||||
fetcher.unpack(self.unpackdir)
|
||||
|
||||
repo_path = os.path.join(self.tempdir, 'unpacked', 'git')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/subprojects/c-dvar/config')), msg='Missing submodule config "subprojects/c-dvar"')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/subprojects/c-list/config')), msg='Missing submodule config "subprojects/c-list"')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/subprojects/c-rbtree/config')), msg='Missing submodule config "subprojects/c-rbtree"')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/subprojects/c-sundry/config')), msg='Missing submodule config "subprojects/c-sundry"')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/subprojects/c-utf8/config')), msg='Missing submodule config "subprojects/c-utf8"')
|
||||
|
||||
def test_git_submodule_CLI11(self):
|
||||
url = "gitsm://github.com/CLIUtils/CLI11;protocol=git;rev=bd4dc911847d0cde7a6b41dfa626a85aab213baf"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
os.chdir(os.path.dirname(self.unpackdir))
|
||||
fetcher.unpack(self.unpackdir)
|
||||
|
||||
repo_path = os.path.join(self.tempdir, 'unpacked', 'git')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/googletest/config')), msg='Missing submodule config "extern/googletest"')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/json/config')), msg='Missing submodule config "extern/json"')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/sanitizers/config')), msg='Missing submodule config "extern/sanitizers"')
|
||||
|
||||
def test_git_submodule_update_CLI11(self):
|
||||
""" Prevent regression on update detection not finding missing submodule, or modules without needed commits """
|
||||
url = "gitsm://github.com/CLIUtils/CLI11;protocol=git;rev=cf6a99fa69aaefe477cc52e3ef4a7d2d7fa40714"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
|
||||
# CLI11 that pulls in a newer nlohmann-json
|
||||
url = "gitsm://github.com/CLIUtils/CLI11;protocol=git;rev=49ac989a9527ee9bb496de9ded7b4872c2e0e5ca"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
os.chdir(os.path.dirname(self.unpackdir))
|
||||
fetcher.unpack(self.unpackdir)
|
||||
|
||||
repo_path = os.path.join(self.tempdir, 'unpacked', 'git')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/googletest/config')), msg='Missing submodule config "extern/googletest"')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/json/config')), msg='Missing submodule config "extern/json"')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/extern/sanitizers/config')), msg='Missing submodule config "extern/sanitizers"')
|
||||
|
||||
def test_git_submodule_aktualizr(self):
|
||||
url = "gitsm://github.com/advancedtelematic/aktualizr;branch=master;protocol=git;rev=d00d1a04cc2366d1a5f143b84b9f507f8bd32c44"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
os.chdir(os.path.dirname(self.unpackdir))
|
||||
fetcher.unpack(self.unpackdir)
|
||||
|
||||
repo_path = os.path.join(self.tempdir, 'unpacked', 'git')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/partial/extern/isotp-c/config')), msg='Missing submodule config "partial/extern/isotp-c/config"')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/partial/extern/isotp-c/modules/deps/bitfield-c/config')), msg='Missing submodule config "partial/extern/isotp-c/modules/deps/bitfield-c/config"')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'partial/extern/isotp-c/deps/bitfield-c/.git')), msg="Submodule of submodule isotp-c did not unpack properly")
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/tests/tuf-test-vectors/config')), msg='Missing submodule config "tests/tuf-test-vectors/config"')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/third_party/googletest/config')), msg='Missing submodule config "third_party/googletest/config"')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, '.git/modules/third_party/HdrHistogram_c/config')), msg='Missing submodule config "third_party/HdrHistogram_c/config"')
|
||||
|
||||
def test_git_submodule_iotedge(self):
|
||||
""" Prevent regression on deeply nested submodules not being checked out properly, even though they were fetched. """
|
||||
|
||||
# This repository also has submodules where the module (name), path and url do not align
|
||||
url = "gitsm://github.com/azure/iotedge.git;protocol=git;rev=d76e0316c6f324345d77c48a83ce836d09392699"
|
||||
fetcher = bb.fetch.Fetch([url], self.d)
|
||||
fetcher.download()
|
||||
# Previous cwd has been deleted
|
||||
os.chdir(os.path.dirname(self.unpackdir))
|
||||
fetcher.unpack(self.unpackdir)
|
||||
|
||||
repo_path = os.path.join(self.tempdir, 'unpacked', 'git')
|
||||
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/c-shared/README.md')), msg='Missing submodule checkout')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/c-shared/testtools/ctest/README.md')), msg='Missing submodule checkout')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/c-shared/testtools/testrunner/readme.md')), msg='Missing submodule checkout')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/c-shared/testtools/umock-c/readme.md')), msg='Missing submodule checkout')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/c-shared/testtools/umock-c/deps/ctest/README.md')), msg='Missing submodule checkout')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/c-shared/testtools/umock-c/deps/testrunner/readme.md')), msg='Missing submodule checkout')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/utpm/README.md')), msg='Missing submodule checkout')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/utpm/deps/c-utility/README.md')), msg='Missing submodule checkout')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/utpm/deps/c-utility/testtools/ctest/README.md')), msg='Missing submodule checkout')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/utpm/deps/c-utility/testtools/testrunner/readme.md')), msg='Missing submodule checkout')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/utpm/deps/c-utility/testtools/umock-c/readme.md')), msg='Missing submodule checkout')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/utpm/deps/c-utility/testtools/umock-c/deps/ctest/README.md')), msg='Missing submodule checkout')
|
||||
self.assertTrue(os.path.exists(os.path.join(repo_path, 'edgelet/hsm-sys/azure-iot-hsm-c/deps/utpm/deps/c-utility/testtools/umock-c/deps/testrunner/readme.md')), msg='Missing submodule checkout')
|
||||
|
||||
class TrustedNetworksTest(FetcherTest):
|
||||
def test_trusted_network(self):
|
||||
@@ -1060,7 +728,7 @@ class URLHandle(unittest.TestCase):
|
||||
# decodeurl and we need to handle them
|
||||
decodedata = datatable.copy()
|
||||
decodedata.update({
|
||||
"http://somesite.net;someparam=1": ('http', 'somesite.net', '/', '', '', {'someparam': '1'}),
|
||||
"http://somesite.net;someparam=1": ('http', 'somesite.net', '', '', '', {'someparam': '1'}),
|
||||
})
|
||||
|
||||
def test_decodeurl(self):
|
||||
@@ -1089,12 +757,12 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
("dtc", "git://git.qemu.org/dtc.git", "65cc4d2748a2c2e6f27f1cf39e07a5dbabd80ebf", "")
|
||||
: "1.4.0",
|
||||
# combination version pattern
|
||||
("sysprof", "git://gitlab.gnome.org/GNOME/sysprof.git;protocol=https", "cd44ee6644c3641507fb53b8a2a69137f2971219", "")
|
||||
("sysprof", "git://git.gnome.org/sysprof", "cd44ee6644c3641507fb53b8a2a69137f2971219", "")
|
||||
: "1.2.0",
|
||||
("u-boot-mkimage", "git://git.denx.de/u-boot.git;branch=master;protocol=git", "62c175fbb8a0f9a926c88294ea9f7e88eb898f6c", "")
|
||||
: "2014.01",
|
||||
# version pattern "yyyymmdd"
|
||||
("mobile-broadband-provider-info", "git://gitlab.gnome.org/GNOME/mobile-broadband-provider-info.git;protocol=https", "4ed19e11c2975105b71b956440acdb25d46a347d", "")
|
||||
("mobile-broadband-provider-info", "git://git.gnome.org/mobile-broadband-provider-info", "4ed19e11c2975105b71b956440acdb25d46a347d", "")
|
||||
: "20120614",
|
||||
# packages with a valid UPSTREAM_CHECK_GITTAGREGEX
|
||||
("xf86-video-omap", "git://anongit.freedesktop.org/xorg/driver/xf86-video-omap", "ae0394e687f1a77e966cf72f895da91840dffb8f", "(?P<pver>(\d+\.(\d\.?)*))")
|
||||
@@ -1128,8 +796,8 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
# packages with valid UPSTREAM_CHECK_URI and UPSTREAM_CHECK_REGEX
|
||||
("cups", "http://www.cups.org/software/1.7.2/cups-1.7.2-source.tar.bz2", "https://github.com/apple/cups/releases", "(?P<name>cups\-)(?P<pver>((\d+[\.\-_]*)+))\-source\.tar\.gz")
|
||||
: "2.0.0",
|
||||
("db", "http://download.oracle.com/berkeley-db/db-5.3.21.tar.gz", "http://ftp.debian.org/debian/pool/main/d/db5.3/", "(?P<name>db5\.3_)(?P<pver>\d+(\.\d+)+).+\.orig\.tar\.xz")
|
||||
: "5.3.10",
|
||||
("db", "http://download.oracle.com/berkeley-db/db-5.3.21.tar.gz", "http://www.oracle.com/technetwork/products/berkeleydb/downloads/index-082944.html", "http://download.oracle.com/otn/berkeley-db/(?P<name>db-)(?P<pver>((\d+[\.\-_]*)+))\.tar\.gz")
|
||||
: "6.1.19",
|
||||
}
|
||||
|
||||
@skipIfNoNetwork()
|
||||
@@ -1141,7 +809,7 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
ud = bb.fetch2.FetchData(k[1], self.d)
|
||||
pupver= ud.method.latest_versionstring(ud, self.d)
|
||||
verstring = pupver[0]
|
||||
self.assertTrue(verstring, msg="Could not find upstream version for %s" % k[0])
|
||||
self.assertTrue(verstring, msg="Could not find upstream version")
|
||||
r = bb.utils.vercmp_string(v, verstring)
|
||||
self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], v, verstring))
|
||||
|
||||
@@ -1154,7 +822,7 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
ud = bb.fetch2.FetchData(k[1], self.d)
|
||||
pupver = ud.method.latest_versionstring(ud, self.d)
|
||||
verstring = pupver[0]
|
||||
self.assertTrue(verstring, msg="Could not find upstream version for %s" % k[0])
|
||||
self.assertTrue(verstring, msg="Could not find upstream version")
|
||||
r = bb.utils.vercmp_string(v, verstring)
|
||||
self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], v, verstring))
|
||||
|
||||
@@ -1206,6 +874,9 @@ class FetchCheckStatusTest(FetcherTest):
|
||||
|
||||
|
||||
class GitMakeShallowTest(FetcherTest):
|
||||
bitbake_dir = os.path.join(os.path.dirname(os.path.join(__file__)), '..', '..', '..')
|
||||
make_shallow_path = os.path.join(bitbake_dir, 'bin', 'git-make-shallow')
|
||||
|
||||
def setUp(self):
|
||||
FetcherTest.setUp(self)
|
||||
self.gitdir = os.path.join(self.tempdir, 'gitshallow')
|
||||
@@ -1234,7 +905,7 @@ class GitMakeShallowTest(FetcherTest):
|
||||
def make_shallow(self, args=None):
|
||||
if args is None:
|
||||
args = ['HEAD']
|
||||
return bb.process.run([bb.fetch2.git.Git.make_shallow_path] + args, cwd=self.gitdir)
|
||||
return bb.process.run([self.make_shallow_path] + args, cwd=self.gitdir)
|
||||
|
||||
def add_empty_file(self, path, msg=None):
|
||||
if msg is None:
|
||||
@@ -1416,7 +1087,6 @@ class GitShallowTest(FetcherTest):
|
||||
# fetch and unpack, from the shallow tarball
|
||||
bb.utils.remove(self.gitdir, recurse=True)
|
||||
bb.utils.remove(ud.clonedir, recurse=True)
|
||||
bb.utils.remove(ud.clonedir.replace('gitsource', 'gitsubmodule'), recurse=True)
|
||||
|
||||
# confirm that the unpacked repo is used when no git clone or git
|
||||
# mirror tarball is available
|
||||
@@ -1567,11 +1237,7 @@ class GitShallowTest(FetcherTest):
|
||||
smdir = os.path.join(self.tempdir, 'gitsubmodule')
|
||||
bb.utils.mkdirhier(smdir)
|
||||
self.git('init', cwd=smdir)
|
||||
# Make this look like it was cloned from a remote...
|
||||
self.git('config --add remote.origin.url "%s"' % smdir, cwd=smdir)
|
||||
self.git('config --add remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*"', cwd=smdir)
|
||||
self.add_empty_file('asub', cwd=smdir)
|
||||
self.add_empty_file('bsub', cwd=smdir)
|
||||
|
||||
self.git('submodule init', cwd=self.srcdir)
|
||||
self.git('submodule add file://%s' % smdir, cwd=self.srcdir)
|
||||
@@ -1581,16 +1247,10 @@ class GitShallowTest(FetcherTest):
|
||||
uri = 'gitsm://%s;protocol=file;subdir=${S}' % self.srcdir
|
||||
fetcher, ud = self.fetch_shallow(uri)
|
||||
|
||||
# Verify the main repository is shallow
|
||||
self.assertRevCount(1)
|
||||
|
||||
# Verify the gitsubmodule directory is present
|
||||
assert './.git/modules/' in bb.process.run('tar -tzf %s' % os.path.join(self.dldir, ud.mirrortarballs[0]))[0]
|
||||
assert os.listdir(os.path.join(self.gitdir, 'gitsubmodule'))
|
||||
|
||||
# Verify the submodule is also shallow
|
||||
self.assertRevCount(1, cwd=os.path.join(self.gitdir, 'gitsubmodule'))
|
||||
|
||||
|
||||
if any(os.path.exists(os.path.join(p, 'git-annex')) for p in os.environ.get('PATH').split(':')):
|
||||
def test_shallow_annex(self):
|
||||
self.add_empty_file('a')
|
||||
@@ -1810,29 +1470,3 @@ class GitShallowTest(FetcherTest):
|
||||
self.assertNotEqual(orig_revs, revs)
|
||||
self.assertRefs(['master', 'origin/master'])
|
||||
self.assertRevCount(orig_revs - 1758)
|
||||
|
||||
def test_that_unpack_throws_an_error_when_the_git_clone_nor_shallow_tarball_exist(self):
|
||||
self.add_empty_file('a')
|
||||
fetcher, ud = self.fetch()
|
||||
bb.utils.remove(self.gitdir, recurse=True)
|
||||
bb.utils.remove(self.dldir, recurse=True)
|
||||
|
||||
with self.assertRaises(bb.fetch2.UnpackError) as context:
|
||||
fetcher.unpack(self.d.getVar('WORKDIR'))
|
||||
|
||||
self.assertIn("No up to date source found", context.exception.msg)
|
||||
self.assertIn("clone directory not available or not up to date", context.exception.msg)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_that_unpack_does_work_when_using_git_shallow_tarball_but_tarball_is_not_available(self):
|
||||
self.d.setVar('SRCREV', 'e5939ff608b95cdd4d0ab0e1935781ab9a276ac0')
|
||||
self.d.setVar('BB_GIT_SHALLOW', '1')
|
||||
self.d.setVar('BB_GENERATE_SHALLOW_TARBALLS', '1')
|
||||
fetcher = bb.fetch.Fetch(["git://git.yoctoproject.org/fstests"], self.d)
|
||||
fetcher.download()
|
||||
|
||||
bb.utils.remove(self.dldir + "/*.tar.gz")
|
||||
fetcher.unpack(self.unpackdir)
|
||||
|
||||
dir = os.listdir(self.unpackdir + "/git/")
|
||||
self.assertIn("fstests.doap", dir)
|
||||
|
||||
@@ -44,13 +44,9 @@ C = "3"
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
self.origdir = os.getcwd()
|
||||
self.d = bb.data.init()
|
||||
bb.parse.siggen = bb.siggen.init(self.d)
|
||||
|
||||
def tearDown(self):
|
||||
os.chdir(self.origdir)
|
||||
|
||||
def parsehelper(self, content, suffix = ".bb"):
|
||||
|
||||
f = tempfile.NamedTemporaryFile(suffix = suffix)
|
||||
|
||||
@@ -42,10 +42,6 @@ class VerCmpString(unittest.TestCase):
|
||||
self.assertTrue(result < 0)
|
||||
result = bb.utils.vercmp_string('1.1', '1.0+1.1-beta1')
|
||||
self.assertTrue(result > 0)
|
||||
result = bb.utils.vercmp_string('1.', '1.1')
|
||||
self.assertTrue(result < 0)
|
||||
result = bb.utils.vercmp_string('1.1', '1.')
|
||||
self.assertTrue(result > 0)
|
||||
|
||||
def test_explode_dep_versions(self):
|
||||
correctresult = {"foo" : ["= 1.10"]}
|
||||
|
||||
@@ -604,16 +604,13 @@ class Tinfoil:
|
||||
recipecache = self.cooker.recipecaches[mc]
|
||||
prov = self.find_best_provider(pn)
|
||||
fn = prov[3]
|
||||
if fn:
|
||||
actual_pn = recipecache.pkg_fn[fn]
|
||||
recipe = TinfoilRecipeInfo(recipecache,
|
||||
self.config_data,
|
||||
pn=actual_pn,
|
||||
fn=fn,
|
||||
fns=recipecache.pkg_pn[actual_pn])
|
||||
return recipe
|
||||
else:
|
||||
return None
|
||||
actual_pn = recipecache.pkg_fn[fn]
|
||||
recipe = TinfoilRecipeInfo(recipecache,
|
||||
self.config_data,
|
||||
pn=actual_pn,
|
||||
fn=fn,
|
||||
fns=recipecache.pkg_pn[actual_pn])
|
||||
return recipe
|
||||
|
||||
def parse_recipe(self, pn):
|
||||
"""
|
||||
|
||||
@@ -719,11 +719,7 @@ class ORMWrapper(object):
|
||||
|
||||
def save_build_package_information(self, build_obj, package_info, recipes,
|
||||
built_package):
|
||||
# assert isinstance(build_obj, Build)
|
||||
|
||||
if not 'PN' in package_info.keys():
|
||||
# no package data to save (e.g. 'OPKGN'="lib64-*"|"lib32-*")
|
||||
return None
|
||||
# assert isinstance(build_obj, Build)
|
||||
|
||||
# create and save the object
|
||||
pname = package_info['PKG']
|
||||
@@ -1603,14 +1599,14 @@ class BuildInfoHelper(object):
|
||||
mockevent.lineno = -1
|
||||
self.store_log_event(mockevent)
|
||||
|
||||
def store_log_event(self, event,cli_backlog=True):
|
||||
def store_log_event(self, event):
|
||||
self._ensure_build()
|
||||
|
||||
if event.levelno < formatter.WARNING:
|
||||
return
|
||||
|
||||
# early return for CLI builds
|
||||
if cli_backlog and self.brbe is None:
|
||||
if self.brbe is None:
|
||||
if not 'backlog' in self.internal_state:
|
||||
self.internal_state['backlog'] = []
|
||||
self.internal_state['backlog'].append(event)
|
||||
@@ -1622,7 +1618,7 @@ class BuildInfoHelper(object):
|
||||
tempevent = self.internal_state['backlog'].pop()
|
||||
logger.debug(1, "buildinfohelper: Saving stored event %s "
|
||||
% tempevent)
|
||||
self.store_log_event(tempevent,cli_backlog)
|
||||
self.store_log_event(tempevent)
|
||||
else:
|
||||
logger.info("buildinfohelper: All events saved")
|
||||
del self.internal_state['backlog']
|
||||
@@ -1987,8 +1983,7 @@ class BuildInfoHelper(object):
|
||||
if 'backlog' in self.internal_state:
|
||||
# we save missed events in the database for the current build
|
||||
tempevent = self.internal_state['backlog'].pop()
|
||||
# Do not skip command line build events
|
||||
self.store_log_event(tempevent,False)
|
||||
self.store_log_event(tempevent)
|
||||
|
||||
if not connection.features.autocommits_when_autocommit_is_off:
|
||||
transaction.set_autocommit(True)
|
||||
|
||||
@@ -103,16 +103,9 @@ class DepExplorer(Gtk.Window):
|
||||
self.pkg_treeview.get_selection().connect("changed", self.on_cursor_changed)
|
||||
column = Gtk.TreeViewColumn("Package", Gtk.CellRendererText(), text=COL_PKG_NAME)
|
||||
self.pkg_treeview.append_column(column)
|
||||
pane.add1(scrolled)
|
||||
scrolled.add(self.pkg_treeview)
|
||||
|
||||
self.search_entry = Gtk.SearchEntry.new()
|
||||
self.pkg_treeview.set_search_entry(self.search_entry)
|
||||
|
||||
left_panel = Gtk.VPaned()
|
||||
left_panel.add(self.search_entry)
|
||||
left_panel.add(scrolled)
|
||||
pane.add1(left_panel)
|
||||
|
||||
box = Gtk.VBox(homogeneous=True, spacing=4)
|
||||
|
||||
# Task Depends
|
||||
@@ -136,7 +129,6 @@ class DepExplorer(Gtk.Window):
|
||||
pane.add2(box)
|
||||
|
||||
self.show_all()
|
||||
self.search_entry.grab_focus()
|
||||
|
||||
def on_package_activated(self, treeview, path, column, data_col):
|
||||
model = treeview.get_model()
|
||||
|
||||
@@ -120,10 +120,6 @@ def vercmp_part(a, b):
|
||||
return -1
|
||||
elif oa > ob:
|
||||
return 1
|
||||
elif ca is None:
|
||||
return -1
|
||||
elif cb is None:
|
||||
return 1
|
||||
elif ca < cb:
|
||||
return -1
|
||||
elif ca > cb:
|
||||
@@ -191,7 +187,7 @@ def explode_deps(s):
|
||||
#r[-1] += ' ' + ' '.join(j)
|
||||
return r
|
||||
|
||||
def explode_dep_versions2(s, *, sort=True):
|
||||
def explode_dep_versions2(s):
|
||||
"""
|
||||
Take an RDEPENDS style string of format:
|
||||
"DEPEND1 (optional version) DEPEND2 (optional version) ..."
|
||||
@@ -254,8 +250,7 @@ def explode_dep_versions2(s, *, sort=True):
|
||||
if not (i in r and r[i]):
|
||||
r[lastdep] = []
|
||||
|
||||
if sort:
|
||||
r = collections.OrderedDict(sorted(r.items(), key=lambda x: x[0]))
|
||||
r = collections.OrderedDict(sorted(r.items(), key=lambda x: x[0]))
|
||||
return r
|
||||
|
||||
def explode_dep_versions(s):
|
||||
@@ -501,11 +496,7 @@ def lockfile(name, shared=False, retry=True, block=False):
|
||||
if statinfo.st_ino == statinfo2.st_ino:
|
||||
return lf
|
||||
lf.close()
|
||||
except OSError as e:
|
||||
if e.errno == errno.EACCES:
|
||||
logger.error("Unable to acquire lock '%s', %s",
|
||||
e.strerror, name)
|
||||
sys.exit(1)
|
||||
except Exception:
|
||||
try:
|
||||
lf.close()
|
||||
except Exception:
|
||||
@@ -532,17 +523,12 @@ def md5_file(filename):
|
||||
"""
|
||||
Return the hex string representation of the MD5 checksum of filename.
|
||||
"""
|
||||
import hashlib, mmap
|
||||
import hashlib
|
||||
m = hashlib.md5()
|
||||
|
||||
with open(filename, "rb") as f:
|
||||
m = hashlib.md5()
|
||||
try:
|
||||
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mm:
|
||||
for chunk in iter(lambda: mm.read(8192), b''):
|
||||
m.update(chunk)
|
||||
except ValueError:
|
||||
# You can't mmap() an empty file so silence this exception
|
||||
pass
|
||||
for line in f:
|
||||
m.update(line)
|
||||
return m.hexdigest()
|
||||
|
||||
def sha256_file(filename):
|
||||
@@ -796,7 +782,7 @@ def movefile(src, dest, newmtime = None, sstat = None):
|
||||
os.rename(src, destpath)
|
||||
renamefailed = 0
|
||||
except Exception as e:
|
||||
if e.errno != errno.EXDEV:
|
||||
if e[0] != errno.EXDEV:
|
||||
# Some random error.
|
||||
print("movefile: Failed to move", src, "to", dest, e)
|
||||
return None
|
||||
@@ -820,8 +806,8 @@ def movefile(src, dest, newmtime = None, sstat = None):
|
||||
return None # failure
|
||||
try:
|
||||
if didcopy:
|
||||
os.lchown(destpath, sstat[stat.ST_UID], sstat[stat.ST_GID])
|
||||
os.chmod(destpath, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
|
||||
os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
|
||||
os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
|
||||
os.unlink(src)
|
||||
except Exception as e:
|
||||
print("movefile: Failed to chown/chmod/unlink", dest, e)
|
||||
@@ -914,23 +900,6 @@ def copyfile(src, dest, newmtime = None, sstat = None):
|
||||
newmtime = sstat[stat.ST_MTIME]
|
||||
return newmtime
|
||||
|
||||
def break_hardlinks(src, sstat = None):
|
||||
"""
|
||||
Ensures src is the only hardlink to this file. Other hardlinks,
|
||||
if any, are not affected (other than in their st_nlink value, of
|
||||
course). Returns true on success and false on failure.
|
||||
|
||||
"""
|
||||
try:
|
||||
if not sstat:
|
||||
sstat = os.lstat(src)
|
||||
except Exception as e:
|
||||
logger.warning("break_hardlinks: stat of %s failed (%s)" % (src, e))
|
||||
return False
|
||||
if sstat[stat.ST_NLINK] == 1:
|
||||
return True
|
||||
return copyfile(src, src, sstat=sstat)
|
||||
|
||||
def which(path, item, direction = 0, history = False, executable=False):
|
||||
"""
|
||||
Locate `item` in the list of paths `path` (colon separated string like $PATH).
|
||||
@@ -1315,7 +1284,7 @@ def edit_metadata_file(meta_file, variables, varfunc):
|
||||
return updated
|
||||
|
||||
|
||||
def edit_bblayers_conf(bblayers_conf, add, remove, edit_cb=None):
|
||||
def edit_bblayers_conf(bblayers_conf, add, remove):
|
||||
"""Edit bblayers.conf, adding and/or removing layers
|
||||
Parameters:
|
||||
bblayers_conf: path to bblayers.conf file to edit
|
||||
@@ -1323,8 +1292,6 @@ def edit_bblayers_conf(bblayers_conf, add, remove, edit_cb=None):
|
||||
list to add nothing
|
||||
remove: layer path (or list of layer paths) to remove; None or
|
||||
empty list to remove nothing
|
||||
edit_cb: optional callback function that will be called after
|
||||
processing adds/removes once per existing entry.
|
||||
Returns a tuple:
|
||||
notadded: list of layers specified to be added but weren't
|
||||
(because they were already in the list)
|
||||
@@ -1388,17 +1355,6 @@ def edit_bblayers_conf(bblayers_conf, add, remove, edit_cb=None):
|
||||
bblayers.append(addlayer)
|
||||
del addlayers[:]
|
||||
|
||||
if edit_cb:
|
||||
newlist = []
|
||||
for layer in bblayers:
|
||||
res = edit_cb(layer, canonicalise_path(layer))
|
||||
if res != layer:
|
||||
newlist.append(res)
|
||||
updated = True
|
||||
else:
|
||||
newlist.append(layer)
|
||||
bblayers = newlist
|
||||
|
||||
if updated:
|
||||
if op == '+=' and not bblayers:
|
||||
bblayers = None
|
||||
|
||||
@@ -18,18 +18,16 @@ def plugin_init(plugins):
|
||||
|
||||
class ActionPlugin(LayerPlugin):
|
||||
def do_add_layer(self, args):
|
||||
"""Add one or more layers to bblayers.conf."""
|
||||
layerdirs = [os.path.abspath(ldir) for ldir in args.layerdir]
|
||||
"""Add a layer to bblayers.conf."""
|
||||
layerdir = os.path.abspath(args.layerdir)
|
||||
if not os.path.exists(layerdir):
|
||||
sys.stderr.write("Specified layer directory doesn't exist\n")
|
||||
return 1
|
||||
|
||||
for layerdir in layerdirs:
|
||||
if not os.path.exists(layerdir):
|
||||
sys.stderr.write("Specified layer directory %s doesn't exist\n" % layerdir)
|
||||
return 1
|
||||
|
||||
layer_conf = os.path.join(layerdir, 'conf', 'layer.conf')
|
||||
if not os.path.exists(layer_conf):
|
||||
sys.stderr.write("Specified layer directory %s doesn't contain a conf/layer.conf file\n" % layerdir)
|
||||
return 1
|
||||
layer_conf = os.path.join(layerdir, 'conf', 'layer.conf')
|
||||
if not os.path.exists(layer_conf):
|
||||
sys.stderr.write("Specified layer directory doesn't contain a conf/layer.conf file\n")
|
||||
return 1
|
||||
|
||||
bblayers_conf = os.path.join('conf', 'bblayers.conf')
|
||||
if not os.path.exists(bblayers_conf):
|
||||
@@ -42,10 +40,10 @@ class ActionPlugin(LayerPlugin):
|
||||
shutil.copy2(bblayers_conf, backup)
|
||||
|
||||
try:
|
||||
notadded, _ = bb.utils.edit_bblayers_conf(bblayers_conf, layerdirs, None)
|
||||
notadded, _ = bb.utils.edit_bblayers_conf(bblayers_conf, layerdir, None)
|
||||
if not (args.force or notadded):
|
||||
try:
|
||||
self.tinfoil.run_command('parseConfiguration')
|
||||
self.tinfoil.parseRecipes()
|
||||
except bb.tinfoil.TinfoilUIException:
|
||||
# Restore the back up copy of bblayers.conf
|
||||
shutil.copy2(backup, bblayers_conf)
|
||||
@@ -58,22 +56,19 @@ class ActionPlugin(LayerPlugin):
|
||||
shutil.rmtree(tempdir)
|
||||
|
||||
def do_remove_layer(self, args):
|
||||
"""Remove one or more layers from bblayers.conf."""
|
||||
"""Remove a layer from bblayers.conf."""
|
||||
bblayers_conf = os.path.join('conf', 'bblayers.conf')
|
||||
if not os.path.exists(bblayers_conf):
|
||||
sys.stderr.write("Unable to find bblayers.conf\n")
|
||||
return 1
|
||||
|
||||
layerdirs = []
|
||||
for item in args.layerdir:
|
||||
if item.startswith('*'):
|
||||
layerdir = item
|
||||
elif not '/' in item:
|
||||
layerdir = '*/%s' % item
|
||||
else:
|
||||
layerdir = os.path.abspath(item)
|
||||
layerdirs.append(layerdir)
|
||||
(_, notremoved) = bb.utils.edit_bblayers_conf(bblayers_conf, None, layerdirs)
|
||||
if args.layerdir.startswith('*'):
|
||||
layerdir = args.layerdir
|
||||
elif not '/' in args.layerdir:
|
||||
layerdir = '*/%s' % args.layerdir
|
||||
else:
|
||||
layerdir = os.path.abspath(args.layerdir)
|
||||
(_, notremoved) = bb.utils.edit_bblayers_conf(bblayers_conf, None, layerdir)
|
||||
if notremoved:
|
||||
for item in notremoved:
|
||||
sys.stderr.write("No layers matching %s found in BBLAYERS\n" % item)
|
||||
@@ -245,10 +240,10 @@ build results (as the layer priority order has effectively changed).
|
||||
|
||||
def register_commands(self, sp):
|
||||
parser_add_layer = self.add_command(sp, 'add-layer', self.do_add_layer, parserecipes=False)
|
||||
parser_add_layer.add_argument('layerdir', nargs='+', help='Layer directory/directories to add')
|
||||
parser_add_layer.add_argument('layerdir', help='Layer directory to add')
|
||||
|
||||
parser_remove_layer = self.add_command(sp, 'remove-layer', self.do_remove_layer, parserecipes=False)
|
||||
parser_remove_layer.add_argument('layerdir', nargs='+', help='Layer directory/directories to remove (wildcards allowed, enclose in quotes to avoid shell expansion)')
|
||||
parser_remove_layer.add_argument('layerdir', help='Layer directory to remove (wildcards allowed, enclose in quotes to avoid shell expansion)')
|
||||
parser_remove_layer.set_defaults(func=self.do_remove_layer)
|
||||
|
||||
parser_flatten = self.add_command(sp, 'flatten', self.do_flatten)
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import layerindexlib
|
||||
|
||||
import argparse
|
||||
import http.client
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import urllib.parse
|
||||
|
||||
from bblayers.action import ActionPlugin
|
||||
|
||||
@@ -20,6 +21,110 @@ class LayerIndexPlugin(ActionPlugin):
|
||||
This class inherits ActionPlugin to get do_add_layer.
|
||||
"""
|
||||
|
||||
def get_json_data(self, apiurl):
|
||||
proxy_settings = os.environ.get("http_proxy", None)
|
||||
conn = None
|
||||
_parsedurl = urllib.parse.urlparse(apiurl)
|
||||
path = _parsedurl.path
|
||||
query = _parsedurl.query
|
||||
|
||||
def parse_url(url):
|
||||
parsedurl = urllib.parse.urlparse(url)
|
||||
if parsedurl.netloc[0] == '[':
|
||||
host, port = parsedurl.netloc[1:].split(']', 1)
|
||||
if ':' in port:
|
||||
port = port.rsplit(':', 1)[1]
|
||||
else:
|
||||
port = None
|
||||
else:
|
||||
if parsedurl.netloc.count(':') == 1:
|
||||
(host, port) = parsedurl.netloc.split(":")
|
||||
else:
|
||||
host = parsedurl.netloc
|
||||
port = None
|
||||
return (host, 80 if port is None else int(port))
|
||||
|
||||
if proxy_settings is None:
|
||||
host, port = parse_url(apiurl)
|
||||
conn = http.client.HTTPConnection(host, port)
|
||||
conn.request("GET", path + "?" + query)
|
||||
else:
|
||||
host, port = parse_url(proxy_settings)
|
||||
conn = http.client.HTTPConnection(host, port)
|
||||
conn.request("GET", apiurl)
|
||||
|
||||
r = conn.getresponse()
|
||||
if r.status != 200:
|
||||
raise Exception("Failed to read " + path + ": %d %s" % (r.status, r.reason))
|
||||
return json.loads(r.read().decode())
|
||||
|
||||
def get_layer_deps(self, layername, layeritems, layerbranches, layerdependencies, branchnum, selfname=False):
|
||||
def layeritems_info_id(items_name, layeritems):
|
||||
litems_id = None
|
||||
for li in layeritems:
|
||||
if li['name'] == items_name:
|
||||
litems_id = li['id']
|
||||
break
|
||||
return litems_id
|
||||
|
||||
def layerbranches_info(items_id, layerbranches):
|
||||
lbranch = {}
|
||||
for lb in layerbranches:
|
||||
if lb['layer'] == items_id and lb['branch'] == branchnum:
|
||||
lbranch['id'] = lb['id']
|
||||
lbranch['vcs_subdir'] = lb['vcs_subdir']
|
||||
break
|
||||
return lbranch
|
||||
|
||||
def layerdependencies_info(lb_id, layerdependencies):
|
||||
ld_deps = []
|
||||
for ld in layerdependencies:
|
||||
if ld['layerbranch'] == lb_id and not ld['dependency'] in ld_deps:
|
||||
ld_deps.append(ld['dependency'])
|
||||
if not ld_deps:
|
||||
logger.error("The dependency of layerDependencies is not found.")
|
||||
return ld_deps
|
||||
|
||||
def layeritems_info_name_subdir(items_id, layeritems):
|
||||
litems = {}
|
||||
for li in layeritems:
|
||||
if li['id'] == items_id:
|
||||
litems['vcs_url'] = li['vcs_url']
|
||||
litems['name'] = li['name']
|
||||
break
|
||||
return litems
|
||||
|
||||
if selfname:
|
||||
selfid = layeritems_info_id(layername, layeritems)
|
||||
lbinfo = layerbranches_info(selfid, layerbranches)
|
||||
if lbinfo:
|
||||
selfsubdir = lbinfo['vcs_subdir']
|
||||
else:
|
||||
logger.error("%s is not found in the specified branch" % layername)
|
||||
return
|
||||
selfurl = layeritems_info_name_subdir(selfid, layeritems)['vcs_url']
|
||||
if selfurl:
|
||||
return selfurl, selfsubdir
|
||||
else:
|
||||
logger.error("Cannot get layer %s git repo and subdir" % layername)
|
||||
return
|
||||
ldict = {}
|
||||
itemsid = layeritems_info_id(layername, layeritems)
|
||||
if not itemsid:
|
||||
return layername, None
|
||||
lbid = layerbranches_info(itemsid, layerbranches)
|
||||
if lbid:
|
||||
lbid = layerbranches_info(itemsid, layerbranches)['id']
|
||||
else:
|
||||
logger.error("%s is not found in the specified branch" % layername)
|
||||
return None, None
|
||||
for dependency in layerdependencies_info(lbid, layerdependencies):
|
||||
lname = layeritems_info_name_subdir(dependency, layeritems)['name']
|
||||
lurl = layeritems_info_name_subdir(dependency, layeritems)['vcs_url']
|
||||
lsubdir = layerbranches_info(dependency, layerbranches)['vcs_subdir']
|
||||
ldict[lname] = lurl, lsubdir
|
||||
return None, ldict
|
||||
|
||||
def get_fetch_layer(self, fetchdir, url, subdir, fetch_layer):
|
||||
layername = self.get_layer_name(url)
|
||||
if os.path.splitext(layername)[1] == '.git':
|
||||
@@ -28,127 +133,98 @@ class LayerIndexPlugin(ActionPlugin):
|
||||
layerdir = os.path.join(repodir, subdir)
|
||||
if not os.path.exists(repodir):
|
||||
if fetch_layer:
|
||||
result = subprocess.call(['git', 'clone', url, repodir])
|
||||
result = subprocess.call('git clone %s %s' % (url, repodir), shell = True)
|
||||
if result:
|
||||
logger.error("Failed to download %s" % url)
|
||||
return None, None, None
|
||||
return None, None
|
||||
else:
|
||||
return subdir, layername, layerdir
|
||||
return layername, layerdir
|
||||
else:
|
||||
logger.plain("Repository %s needs to be fetched" % url)
|
||||
return subdir, layername, layerdir
|
||||
return layername, layerdir
|
||||
elif os.path.exists(layerdir):
|
||||
return subdir, layername, layerdir
|
||||
return layername, layerdir
|
||||
else:
|
||||
logger.error("%s is not in %s" % (url, subdir))
|
||||
return None, None, None
|
||||
return None, None
|
||||
|
||||
def do_layerindex_fetch(self, args):
|
||||
"""Fetches a layer from a layer index along with its dependent layers, and adds them to conf/bblayers.conf.
|
||||
"""
|
||||
|
||||
def _construct_url(baseurls, branches):
|
||||
urls = []
|
||||
for baseurl in baseurls:
|
||||
if baseurl[-1] != '/':
|
||||
baseurl += '/'
|
||||
|
||||
if not baseurl.startswith('cooker'):
|
||||
baseurl += "api/"
|
||||
|
||||
if branches:
|
||||
baseurl += ";branch=%s" % ','.join(branches)
|
||||
|
||||
urls.append(baseurl)
|
||||
|
||||
return urls
|
||||
|
||||
|
||||
# Set the default...
|
||||
if args.branch:
|
||||
branches = [args.branch]
|
||||
apiurl = self.tinfoil.config_data.getVar('BBLAYERS_LAYERINDEX_URL')
|
||||
if not apiurl:
|
||||
logger.error("Cannot get BBLAYERS_LAYERINDEX_URL")
|
||||
return 1
|
||||
else:
|
||||
branches = (self.tinfoil.config_data.getVar('LAYERSERIES_CORENAMES') or 'master').split()
|
||||
logger.debug(1, 'Trying branches: %s' % branches)
|
||||
if apiurl[-1] != '/':
|
||||
apiurl += '/'
|
||||
apiurl += "api/"
|
||||
apilinks = self.get_json_data(apiurl)
|
||||
branches = self.get_json_data(apilinks['branches'])
|
||||
|
||||
branchnum = 0
|
||||
for branch in branches:
|
||||
if branch['name'] == args.branch:
|
||||
branchnum = branch['id']
|
||||
break
|
||||
if branchnum == 0:
|
||||
validbranches = ', '.join([branch['name'] for branch in branches])
|
||||
logger.error('Invalid layer branch name "%s". Valid branches: %s' % (args.branch, validbranches))
|
||||
return 1
|
||||
|
||||
ignore_layers = []
|
||||
for collection in self.tinfoil.config_data.getVar('BBFILE_COLLECTIONS').split():
|
||||
lname = self.tinfoil.config_data.getVar('BBLAYERS_LAYERINDEX_NAME_%s' % collection)
|
||||
if lname:
|
||||
ignore_layers.append(lname)
|
||||
|
||||
if args.ignore:
|
||||
ignore_layers.extend(args.ignore.split(','))
|
||||
|
||||
# Load the cooker DB
|
||||
cookerIndex = layerindexlib.LayerIndex(self.tinfoil.config_data)
|
||||
cookerIndex.load_layerindex('cooker://', load='layerDependencies')
|
||||
layeritems = self.get_json_data(apilinks['layerItems'])
|
||||
layerbranches = self.get_json_data(apilinks['layerBranches'])
|
||||
layerdependencies = self.get_json_data(apilinks['layerDependencies'])
|
||||
invaluenames = []
|
||||
repourls = {}
|
||||
printlayers = []
|
||||
|
||||
# Fast path, check if we already have what has been requested!
|
||||
(dependencies, invalidnames) = cookerIndex.find_dependencies(names=args.layername, ignores=ignore_layers)
|
||||
if not args.show_only and not invalidnames:
|
||||
logger.plain("You already have the requested layer(s): %s" % args.layername)
|
||||
return 0
|
||||
def query_dependencies(layers, layeritems, layerbranches, layerdependencies, branchnum):
|
||||
depslayer = []
|
||||
for layername in layers:
|
||||
invaluename, layerdict = self.get_layer_deps(layername, layeritems, layerbranches, layerdependencies, branchnum)
|
||||
if layerdict:
|
||||
repourls[layername] = self.get_layer_deps(layername, layeritems, layerbranches, layerdependencies, branchnum, selfname=True)
|
||||
for layer in layerdict:
|
||||
if not layer in ignore_layers:
|
||||
depslayer.append(layer)
|
||||
printlayers.append((layername, layer, layerdict[layer][0], layerdict[layer][1]))
|
||||
if not layer in ignore_layers and not layer in repourls:
|
||||
repourls[layer] = (layerdict[layer][0], layerdict[layer][1])
|
||||
if invaluename and not invaluename in invaluenames:
|
||||
invaluenames.append(invaluename)
|
||||
return depslayer
|
||||
|
||||
# The information to show is already in the cookerIndex
|
||||
if invalidnames:
|
||||
# General URL to use to access the layer index
|
||||
# While there is ONE right now, we're expect users could enter several
|
||||
apiurl = self.tinfoil.config_data.getVar('BBLAYERS_LAYERINDEX_URL').split()
|
||||
if not apiurl:
|
||||
logger.error("Cannot get BBLAYERS_LAYERINDEX_URL")
|
||||
return 1
|
||||
depslayers = query_dependencies(args.layername, layeritems, layerbranches, layerdependencies, branchnum)
|
||||
while depslayers:
|
||||
depslayer = query_dependencies(depslayers, layeritems, layerbranches, layerdependencies, branchnum)
|
||||
depslayers = depslayer
|
||||
if invaluenames:
|
||||
for invaluename in invaluenames:
|
||||
logger.error('Layer "%s" not found in layer index' % invaluename)
|
||||
return 1
|
||||
logger.plain("%s %s %s %s" % ("Layer".ljust(19), "Required by".ljust(19), "Git repository".ljust(54), "Subdirectory"))
|
||||
logger.plain('=' * 115)
|
||||
for layername in args.layername:
|
||||
layerurl = repourls[layername]
|
||||
logger.plain("%s %s %s %s" % (layername.ljust(20), '-'.ljust(20), layerurl[0].ljust(55), layerurl[1]))
|
||||
printedlayers = []
|
||||
for layer, dependency, gitrepo, subdirectory in printlayers:
|
||||
if dependency in printedlayers:
|
||||
continue
|
||||
logger.plain("%s %s %s %s" % (dependency.ljust(20), layer.ljust(20), gitrepo.ljust(55), subdirectory))
|
||||
printedlayers.append(dependency)
|
||||
|
||||
remoteIndex = layerindexlib.LayerIndex(self.tinfoil.config_data)
|
||||
|
||||
for remoteurl in _construct_url(apiurl, branches):
|
||||
logger.plain("Loading %s..." % remoteurl)
|
||||
remoteIndex.load_layerindex(remoteurl)
|
||||
|
||||
if remoteIndex.is_empty():
|
||||
logger.error("Remote layer index %s is empty for branches %s" % (apiurl, branches))
|
||||
return 1
|
||||
|
||||
lIndex = cookerIndex + remoteIndex
|
||||
|
||||
(dependencies, invalidnames) = lIndex.find_dependencies(names=args.layername, ignores=ignore_layers)
|
||||
|
||||
if invalidnames:
|
||||
for invaluename in invalidnames:
|
||||
logger.error('Layer "%s" not found in layer index' % invaluename)
|
||||
return 1
|
||||
|
||||
logger.plain("%s %s %s" % ("Layer".ljust(49), "Git repository (branch)".ljust(54), "Subdirectory"))
|
||||
logger.plain('=' * 125)
|
||||
|
||||
for deplayerbranch in dependencies:
|
||||
layerBranch = dependencies[deplayerbranch][0]
|
||||
|
||||
# TODO: Determine display behavior
|
||||
# This is the local content, uncomment to hide local
|
||||
# layers from the display.
|
||||
#if layerBranch.index.config['TYPE'] == 'cooker':
|
||||
# continue
|
||||
|
||||
layerDeps = dependencies[deplayerbranch][1:]
|
||||
|
||||
requiredby = []
|
||||
recommendedby = []
|
||||
for dep in layerDeps:
|
||||
if dep.required:
|
||||
requiredby.append(dep.layer.name)
|
||||
else:
|
||||
recommendedby.append(dep.layer.name)
|
||||
|
||||
logger.plain('%s %s %s' % (("%s:%s:%s" %
|
||||
(layerBranch.index.config['DESCRIPTION'],
|
||||
layerBranch.branch.name,
|
||||
layerBranch.layer.name)).ljust(50),
|
||||
("%s (%s)" % (layerBranch.layer.vcs_url,
|
||||
layerBranch.actual_branch)).ljust(55),
|
||||
layerBranch.vcs_subdir
|
||||
))
|
||||
if requiredby:
|
||||
logger.plain(' required by: %s' % ' '.join(requiredby))
|
||||
if recommendedby:
|
||||
logger.plain(' recommended by: %s' % ' '.join(recommendedby))
|
||||
|
||||
if dependencies:
|
||||
if repourls:
|
||||
fetchdir = self.tinfoil.config_data.getVar('BBLAYERS_FETCH_DIR')
|
||||
if not fetchdir:
|
||||
logger.error("Cannot get BBLAYERS_FETCH_DIR")
|
||||
@@ -156,39 +232,26 @@ class LayerIndexPlugin(ActionPlugin):
|
||||
if not os.path.exists(fetchdir):
|
||||
os.makedirs(fetchdir)
|
||||
addlayers = []
|
||||
|
||||
for deplayerbranch in dependencies:
|
||||
layerBranch = dependencies[deplayerbranch][0]
|
||||
|
||||
if layerBranch.index.config['TYPE'] == 'cooker':
|
||||
# Anything loaded via cooker is already local, skip it
|
||||
continue
|
||||
|
||||
subdir, name, layerdir = self.get_fetch_layer(fetchdir,
|
||||
layerBranch.layer.vcs_url,
|
||||
layerBranch.vcs_subdir,
|
||||
not args.show_only)
|
||||
for repourl, subdir in repourls.values():
|
||||
name, layerdir = self.get_fetch_layer(fetchdir, repourl, subdir, not args.show_only)
|
||||
if not name:
|
||||
# Error already shown
|
||||
return 1
|
||||
addlayers.append((subdir, name, layerdir))
|
||||
if not args.show_only:
|
||||
localargs = argparse.Namespace()
|
||||
localargs.layerdir = []
|
||||
localargs.force = args.force
|
||||
for subdir, name, layerdir in addlayers:
|
||||
for subdir, name, layerdir in set(addlayers):
|
||||
if os.path.exists(layerdir):
|
||||
if subdir:
|
||||
logger.plain("Adding layer \"%s\" (%s) to conf/bblayers.conf" % (subdir, layerdir))
|
||||
logger.plain("Adding layer \"%s\" to conf/bblayers.conf" % subdir)
|
||||
else:
|
||||
logger.plain("Adding layer \"%s\" (%s) to conf/bblayers.conf" % (name, layerdir))
|
||||
localargs.layerdir.append(layerdir)
|
||||
logger.plain("Adding layer \"%s\" to conf/bblayers.conf" % name)
|
||||
localargs = argparse.Namespace()
|
||||
localargs.layerdir = layerdir
|
||||
localargs.force = args.force
|
||||
self.do_add_layer(localargs)
|
||||
else:
|
||||
break
|
||||
|
||||
if localargs.layerdir:
|
||||
self.do_add_layer(localargs)
|
||||
|
||||
def do_layerindex_show_depends(self, args):
|
||||
"""Find layer dependencies from layer index.
|
||||
"""
|
||||
@@ -197,12 +260,12 @@ class LayerIndexPlugin(ActionPlugin):
|
||||
self.do_layerindex_fetch(args)
|
||||
|
||||
def register_commands(self, sp):
|
||||
parser_layerindex_fetch = self.add_command(sp, 'layerindex-fetch', self.do_layerindex_fetch, parserecipes=False)
|
||||
parser_layerindex_fetch = self.add_command(sp, 'layerindex-fetch', self.do_layerindex_fetch)
|
||||
parser_layerindex_fetch.add_argument('-n', '--show-only', help='show dependencies and do nothing else', action='store_true')
|
||||
parser_layerindex_fetch.add_argument('-b', '--branch', help='branch name to fetch')
|
||||
parser_layerindex_fetch.add_argument('-b', '--branch', help='branch name to fetch (default %(default)s)', default='master')
|
||||
parser_layerindex_fetch.add_argument('-i', '--ignore', help='assume the specified layers do not need to be fetched/added (separate multiple layers with commas, no spaces)', metavar='LAYER')
|
||||
parser_layerindex_fetch.add_argument('layername', nargs='+', help='layer to fetch')
|
||||
|
||||
parser_layerindex_show_depends = self.add_command(sp, 'layerindex-show-depends', self.do_layerindex_show_depends, parserecipes=False)
|
||||
parser_layerindex_show_depends.add_argument('-b', '--branch', help='branch name to fetch')
|
||||
parser_layerindex_show_depends = self.add_command(sp, 'layerindex-show-depends', self.do_layerindex_show_depends)
|
||||
parser_layerindex_show_depends.add_argument('-b', '--branch', help='branch name to fetch (default %(default)s)', default='master')
|
||||
parser_layerindex_show_depends.add_argument('layername', nargs='+', help='layer to query')
|
||||
|
||||
@@ -161,12 +161,7 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
|
||||
items_listed = False
|
||||
for p in sorted(pkg_pn):
|
||||
if pnspec:
|
||||
found=False
|
||||
for pnm in pnspec:
|
||||
if fnmatch.fnmatch(p, pnm):
|
||||
found=True
|
||||
break
|
||||
if not found:
|
||||
if not fnmatch.fnmatch(p, pnspec):
|
||||
continue
|
||||
|
||||
if len(allproviders[p]) > 1 or not show_multi_provider_only:
|
||||
@@ -256,14 +251,8 @@ Lists recipes with the bbappends that apply to them as subitems.
|
||||
pnlist.sort()
|
||||
appends = False
|
||||
for pn in pnlist:
|
||||
if args.pnspec:
|
||||
found=False
|
||||
for pnm in args.pnspec:
|
||||
if fnmatch.fnmatch(pn, pnm):
|
||||
found=True
|
||||
break
|
||||
if not found:
|
||||
continue
|
||||
if args.pnspec and pn != args.pnspec:
|
||||
continue
|
||||
|
||||
if self.show_appends_for_pn(pn):
|
||||
appends = True
|
||||
@@ -490,11 +479,11 @@ NOTE: .bbappend files can impact the dependencies.
|
||||
parser_show_recipes = self.add_command(sp, 'show-recipes', self.do_show_recipes)
|
||||
parser_show_recipes.add_argument('-f', '--filenames', help='instead of the default formatting, list filenames of higher priority recipes with the ones they overlay indented underneath', action='store_true')
|
||||
parser_show_recipes.add_argument('-m', '--multiple', help='only list where multiple recipes (in the same layer or different layers) exist for the same recipe name', action='store_true')
|
||||
parser_show_recipes.add_argument('-i', '--inherits', help='only list recipes that inherit the named class(es) - separate multiple classes using , (without spaces)', metavar='CLASS', default='')
|
||||
parser_show_recipes.add_argument('pnspec', nargs='*', help='optional recipe name specification (wildcards allowed, enclose in quotes to avoid shell expansion)')
|
||||
parser_show_recipes.add_argument('-i', '--inherits', help='only list recipes that inherit the named class', metavar='CLASS', default='')
|
||||
parser_show_recipes.add_argument('pnspec', nargs='?', help='optional recipe name specification (wildcards allowed, enclose in quotes to avoid shell expansion)')
|
||||
|
||||
parser_show_appends = self.add_command(sp, 'show-appends', self.do_show_appends)
|
||||
parser_show_appends.add_argument('pnspec', nargs='*', help='optional recipe name specification (wildcards allowed, enclose in quotes to avoid shell expansion)')
|
||||
parser_show_appends.add_argument('pnspec', nargs='?', help='optional recipe name specification (wildcards allowed, enclose in quotes to avoid shell expansion)')
|
||||
|
||||
parser_show_cross_depends = self.add_command(sp, 'show-cross-depends', self.do_show_cross_depends)
|
||||
parser_show_cross_depends.add_argument('-f', '--filenames', help='show full file path', action='store_true')
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
The layerindexlib module is designed to permit programs to work directly
|
||||
with layer index information. (See layers.openembedded.org...)
|
||||
|
||||
The layerindexlib module includes a plugin interface that is used to extend
|
||||
the basic functionality. There are two primary plugins available: restapi
|
||||
and cooker.
|
||||
|
||||
The restapi plugin works with a web based REST Api compatible with the
|
||||
layerindex-web project, as well as the ability to store and retried a
|
||||
the information for one or more files on the disk.
|
||||
|
||||
The cooker plugin works by reading the information from the current build
|
||||
project and processing it as if it were a layer index.
|
||||
|
||||
|
||||
TODO:
|
||||
|
||||
__init__.py:
|
||||
Implement local on-disk caching (using the rest api store/load)
|
||||
Implement layer index style query operations on a combined index
|
||||
|
||||
common.py:
|
||||
Stop network access if BB_NO_NETWORK or allowed hosts is restricted
|
||||
|
||||
cooker.py:
|
||||
Cooker - Implement recipe parsing
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,344 +0,0 @@
|
||||
# Copyright (C) 2016-2018 Wind River Systems, Inc.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
import logging
|
||||
import json
|
||||
|
||||
from collections import OrderedDict, defaultdict
|
||||
|
||||
from urllib.parse import unquote, urlparse
|
||||
|
||||
import layerindexlib
|
||||
|
||||
import layerindexlib.plugin
|
||||
|
||||
logger = logging.getLogger('BitBake.layerindexlib.cooker')
|
||||
|
||||
import bb.utils
|
||||
|
||||
def plugin_init(plugins):
|
||||
return CookerPlugin()
|
||||
|
||||
class CookerPlugin(layerindexlib.plugin.IndexPlugin):
|
||||
def __init__(self):
|
||||
self.type = "cooker"
|
||||
|
||||
self.server_connection = None
|
||||
self.ui_module = None
|
||||
self.server = None
|
||||
|
||||
def _run_command(self, command, path, default=None):
|
||||
try:
|
||||
result, _ = bb.process.run(command, cwd=path)
|
||||
result = result.strip()
|
||||
except bb.process.ExecutionError:
|
||||
result = default
|
||||
return result
|
||||
|
||||
def _handle_git_remote(self, remote):
|
||||
if "://" not in remote:
|
||||
if ':' in remote:
|
||||
# This is assumed to be ssh
|
||||
remote = "ssh://" + remote
|
||||
else:
|
||||
# This is assumed to be a file path
|
||||
remote = "file://" + remote
|
||||
return remote
|
||||
|
||||
def _get_bitbake_info(self):
|
||||
"""Return a tuple of bitbake information"""
|
||||
|
||||
# Our path SHOULD be .../bitbake/lib/layerindex/cooker.py
|
||||
bb_path = os.path.dirname(__file__) # .../bitbake/lib/layerindex/cooker.py
|
||||
bb_path = os.path.dirname(bb_path) # .../bitbake/lib/layerindex
|
||||
bb_path = os.path.dirname(bb_path) # .../bitbake/lib
|
||||
bb_path = os.path.dirname(bb_path) # .../bitbake
|
||||
bb_path = self._run_command('git rev-parse --show-toplevel', os.path.dirname(__file__), default=bb_path)
|
||||
bb_branch = self._run_command('git rev-parse --abbrev-ref HEAD', bb_path, default="<unknown>")
|
||||
bb_rev = self._run_command('git rev-parse HEAD', bb_path, default="<unknown>")
|
||||
for remotes in self._run_command('git remote -v', bb_path, default="").split("\n"):
|
||||
remote = remotes.split("\t")[1].split(" ")[0]
|
||||
if "(fetch)" == remotes.split("\t")[1].split(" ")[1]:
|
||||
bb_remote = self._handle_git_remote(remote)
|
||||
break
|
||||
else:
|
||||
bb_remote = self._handle_git_remote(bb_path)
|
||||
|
||||
return (bb_remote, bb_branch, bb_rev, bb_path)
|
||||
|
||||
def _load_bblayers(self, branches=None):
|
||||
"""Load the BBLAYERS and related collection information"""
|
||||
|
||||
d = self.layerindex.data
|
||||
|
||||
if not branches:
|
||||
raise LayerIndexFetchError("No branches specified for _load_bblayers!")
|
||||
|
||||
index = layerindexlib.LayerIndexObj()
|
||||
|
||||
branchId = 0
|
||||
index.branches = {}
|
||||
|
||||
layerItemId = 0
|
||||
index.layerItems = {}
|
||||
|
||||
layerBranchId = 0
|
||||
index.layerBranches = {}
|
||||
|
||||
bblayers = d.getVar('BBLAYERS').split()
|
||||
|
||||
if not bblayers:
|
||||
# It's blank! Nothing to process...
|
||||
return index
|
||||
|
||||
collections = d.getVar('BBFILE_COLLECTIONS')
|
||||
layerconfs = d.varhistory.get_variable_items_files('BBFILE_COLLECTIONS', d)
|
||||
bbfile_collections = {layer: os.path.dirname(os.path.dirname(path)) for layer, path in layerconfs.items()}
|
||||
|
||||
(_, bb_branch, _, _) = self._get_bitbake_info()
|
||||
|
||||
for branch in branches:
|
||||
branchId += 1
|
||||
index.branches[branchId] = layerindexlib.Branch(index, None)
|
||||
index.branches[branchId].define_data(branchId, branch, bb_branch)
|
||||
|
||||
for entry in collections.split():
|
||||
layerpath = entry
|
||||
if entry in bbfile_collections:
|
||||
layerpath = bbfile_collections[entry]
|
||||
|
||||
layername = d.getVar('BBLAYERS_LAYERINDEX_NAME_%s' % entry) or os.path.basename(layerpath)
|
||||
layerversion = d.getVar('LAYERVERSION_%s' % entry) or ""
|
||||
layerurl = self._handle_git_remote(layerpath)
|
||||
|
||||
layersubdir = ""
|
||||
layerrev = "<unknown>"
|
||||
layerbranch = "<unknown>"
|
||||
|
||||
if os.path.isdir(layerpath):
|
||||
layerbasepath = self._run_command('git rev-parse --show-toplevel', layerpath, default=layerpath)
|
||||
if os.path.abspath(layerpath) != os.path.abspath(layerbasepath):
|
||||
layersubdir = os.path.abspath(layerpath)[len(layerbasepath) + 1:]
|
||||
|
||||
layerbranch = self._run_command('git rev-parse --abbrev-ref HEAD', layerpath, default="<unknown>")
|
||||
layerrev = self._run_command('git rev-parse HEAD', layerpath, default="<unknown>")
|
||||
|
||||
for remotes in self._run_command('git remote -v', layerpath, default="").split("\n"):
|
||||
if not remotes:
|
||||
layerurl = self._handle_git_remote(layerpath)
|
||||
else:
|
||||
remote = remotes.split("\t")[1].split(" ")[0]
|
||||
if "(fetch)" == remotes.split("\t")[1].split(" ")[1]:
|
||||
layerurl = self._handle_git_remote(remote)
|
||||
break
|
||||
|
||||
layerItemId += 1
|
||||
index.layerItems[layerItemId] = layerindexlib.LayerItem(index, None)
|
||||
index.layerItems[layerItemId].define_data(layerItemId, layername, description=layerpath, vcs_url=layerurl)
|
||||
|
||||
for branchId in index.branches:
|
||||
layerBranchId += 1
|
||||
index.layerBranches[layerBranchId] = layerindexlib.LayerBranch(index, None)
|
||||
index.layerBranches[layerBranchId].define_data(layerBranchId, entry, layerversion, layerItemId, branchId,
|
||||
vcs_subdir=layersubdir, vcs_last_rev=layerrev, actual_branch=layerbranch)
|
||||
|
||||
return index
|
||||
|
||||
|
||||
def load_index(self, url, load):
|
||||
"""
|
||||
Fetches layer information from a build configuration.
|
||||
|
||||
The return value is a dictionary containing API,
|
||||
layer, branch, dependency, recipe, machine, distro, information.
|
||||
|
||||
url type should be 'cooker'.
|
||||
url path is ignored
|
||||
"""
|
||||
|
||||
up = urlparse(url)
|
||||
|
||||
if up.scheme != 'cooker':
|
||||
raise layerindexlib.plugin.LayerIndexPluginUrlError(self.type, url)
|
||||
|
||||
d = self.layerindex.data
|
||||
|
||||
params = self.layerindex._parse_params(up.params)
|
||||
|
||||
# Only reason to pass a branch is to emulate them...
|
||||
if 'branch' in params:
|
||||
branches = params['branch'].split(',')
|
||||
else:
|
||||
branches = ['HEAD']
|
||||
|
||||
logger.debug(1, "Loading cooker data branches %s" % branches)
|
||||
|
||||
index = self._load_bblayers(branches=branches)
|
||||
|
||||
index.config = {}
|
||||
index.config['TYPE'] = self.type
|
||||
index.config['URL'] = url
|
||||
|
||||
if 'desc' in params:
|
||||
index.config['DESCRIPTION'] = unquote(params['desc'])
|
||||
else:
|
||||
index.config['DESCRIPTION'] = 'local'
|
||||
|
||||
if 'cache' in params:
|
||||
index.config['CACHE'] = params['cache']
|
||||
|
||||
index.config['BRANCH'] = branches
|
||||
|
||||
# ("layerDependencies", layerindexlib.LayerDependency)
|
||||
layerDependencyId = 0
|
||||
if "layerDependencies" in load:
|
||||
index.layerDependencies = {}
|
||||
for layerBranchId in index.layerBranches:
|
||||
branchName = index.layerBranches[layerBranchId].branch.name
|
||||
collection = index.layerBranches[layerBranchId].collection
|
||||
|
||||
def add_dependency(layerDependencyId, index, deps, required):
|
||||
try:
|
||||
depDict = bb.utils.explode_dep_versions2(deps)
|
||||
except bb.utils.VersionStringException as vse:
|
||||
bb.fatal('Error parsing LAYERDEPENDS_%s: %s' % (c, str(vse)))
|
||||
|
||||
for dep, oplist in list(depDict.items()):
|
||||
# We need to search ourselves, so use the _ version...
|
||||
depLayerBranch = index.find_collection(dep, branches=[branchName])
|
||||
if not depLayerBranch:
|
||||
# Missing dependency?!
|
||||
logger.error('Missing dependency %s (%s)' % (dep, branchName))
|
||||
continue
|
||||
|
||||
# We assume that the oplist matches...
|
||||
layerDependencyId += 1
|
||||
layerDependency = layerindexlib.LayerDependency(index, None)
|
||||
layerDependency.define_data(id=layerDependencyId,
|
||||
required=required, layerbranch=layerBranchId,
|
||||
dependency=depLayerBranch.layer_id)
|
||||
|
||||
logger.debug(1, '%s requires %s' % (layerDependency.layer.name, layerDependency.dependency.name))
|
||||
index.add_element("layerDependencies", [layerDependency])
|
||||
|
||||
return layerDependencyId
|
||||
|
||||
deps = d.getVar("LAYERDEPENDS_%s" % collection)
|
||||
if deps:
|
||||
layerDependencyId = add_dependency(layerDependencyId, index, deps, True)
|
||||
|
||||
deps = d.getVar("LAYERRECOMMENDS_%s" % collection)
|
||||
if deps:
|
||||
layerDependencyId = add_dependency(layerDependencyId, index, deps, False)
|
||||
|
||||
# Need to load recipes here (requires cooker access)
|
||||
recipeId = 0
|
||||
## TODO: NOT IMPLEMENTED
|
||||
# The code following this is an example of what needs to be
|
||||
# implemented. However, it does not work as-is.
|
||||
if False and 'recipes' in load:
|
||||
index.recipes = {}
|
||||
|
||||
ret = self.ui_module.main(self.server_connection.connection, self.server_connection.events, config_params)
|
||||
|
||||
all_versions = self._run_command('allProviders')
|
||||
|
||||
all_versions_list = defaultdict(list, all_versions)
|
||||
for pn in all_versions_list:
|
||||
for ((pe, pv, pr), fpath) in all_versions_list[pn]:
|
||||
realfn = bb.cache.virtualfn2realfn(fpath)
|
||||
|
||||
filepath = os.path.dirname(realfn[0])
|
||||
filename = os.path.basename(realfn[0])
|
||||
|
||||
# This is all HORRIBLY slow, and likely unnecessary
|
||||
#dscon = self._run_command('parseRecipeFile', fpath, False, [])
|
||||
#connector = myDataStoreConnector(self, dscon.dsindex)
|
||||
#recipe_data = bb.data.init()
|
||||
#recipe_data.setVar('_remote_data', connector)
|
||||
|
||||
#summary = recipe_data.getVar('SUMMARY')
|
||||
#description = recipe_data.getVar('DESCRIPTION')
|
||||
#section = recipe_data.getVar('SECTION')
|
||||
#license = recipe_data.getVar('LICENSE')
|
||||
#homepage = recipe_data.getVar('HOMEPAGE')
|
||||
#bugtracker = recipe_data.getVar('BUGTRACKER')
|
||||
#provides = recipe_data.getVar('PROVIDES')
|
||||
|
||||
layer = bb.utils.get_file_layer(realfn[0], self.config_data)
|
||||
|
||||
depBranchId = collection_layerbranch[layer]
|
||||
|
||||
recipeId += 1
|
||||
recipe = layerindexlib.Recipe(index, None)
|
||||
recipe.define_data(id=recipeId,
|
||||
filename=filename, filepath=filepath,
|
||||
pn=pn, pv=pv,
|
||||
summary=pn, description=pn, section='?',
|
||||
license='?', homepage='?', bugtracker='?',
|
||||
provides='?', bbclassextend='?', inherits='?',
|
||||
blacklisted='?', layerbranch=depBranchId)
|
||||
|
||||
index = addElement("recipes", [recipe], index)
|
||||
|
||||
# ("machines", layerindexlib.Machine)
|
||||
machineId = 0
|
||||
if 'machines' in load:
|
||||
index.machines = {}
|
||||
|
||||
for layerBranchId in index.layerBranches:
|
||||
# load_bblayers uses the description to cache the actual path...
|
||||
machine_path = index.layerBranches[layerBranchId].layer.description
|
||||
machine_path = os.path.join(machine_path, 'conf/machine')
|
||||
if os.path.isdir(machine_path):
|
||||
for (dirpath, _, filenames) in os.walk(machine_path):
|
||||
# Ignore subdirs...
|
||||
if not dirpath.endswith('conf/machine'):
|
||||
continue
|
||||
for fname in filenames:
|
||||
if fname.endswith('.conf'):
|
||||
machineId += 1
|
||||
machine = layerindexlib.Machine(index, None)
|
||||
machine.define_data(id=machineId, name=fname[:-5],
|
||||
description=fname[:-5],
|
||||
layerbranch=index.layerBranches[layerBranchId])
|
||||
|
||||
index.add_element("machines", [machine])
|
||||
|
||||
# ("distros", layerindexlib.Distro)
|
||||
distroId = 0
|
||||
if 'distros' in load:
|
||||
index.distros = {}
|
||||
|
||||
for layerBranchId in index.layerBranches:
|
||||
# load_bblayers uses the description to cache the actual path...
|
||||
distro_path = index.layerBranches[layerBranchId].layer.description
|
||||
distro_path = os.path.join(distro_path, 'conf/distro')
|
||||
if os.path.isdir(distro_path):
|
||||
for (dirpath, _, filenames) in os.walk(distro_path):
|
||||
# Ignore subdirs...
|
||||
if not dirpath.endswith('conf/distro'):
|
||||
continue
|
||||
for fname in filenames:
|
||||
if fname.endswith('.conf'):
|
||||
distroId += 1
|
||||
distro = layerindexlib.Distro(index, None)
|
||||
distro.define_data(id=distroId, name=fname[:-5],
|
||||
description=fname[:-5],
|
||||
layerbranch=index.layerBranches[layerBranchId])
|
||||
|
||||
index.add_element("distros", [distro])
|
||||
|
||||
return index
|
||||
@@ -1,60 +0,0 @@
|
||||
# Copyright (C) 2016-2018 Wind River Systems, Inc.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
# The file contains:
|
||||
# LayerIndex exceptions
|
||||
# Plugin base class
|
||||
# Utility Functions for working on layerindex data
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import bb.msg
|
||||
|
||||
logger = logging.getLogger('BitBake.layerindexlib.plugin')
|
||||
|
||||
class LayerIndexPluginException(Exception):
|
||||
"""LayerIndex Generic Exception"""
|
||||
def __init__(self, message):
|
||||
self.msg = message
|
||||
Exception.__init__(self, message)
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
class LayerIndexPluginUrlError(LayerIndexPluginException):
|
||||
"""Exception raised when a plugin does not support a given URL type"""
|
||||
def __init__(self, plugin, url):
|
||||
msg = "%s does not support %s:" % (plugin, url)
|
||||
self.plugin = plugin
|
||||
self.url = url
|
||||
LayerIndexPluginException.__init__(self, msg)
|
||||
|
||||
class IndexPlugin():
|
||||
def __init__(self):
|
||||
self.type = None
|
||||
|
||||
def init(self, layerindex):
|
||||
self.layerindex = layerindex
|
||||
|
||||
def plugin_type(self):
|
||||
return self.type
|
||||
|
||||
def load_index(self, uri):
|
||||
raise NotImplementedError('load_index is not implemented')
|
||||
|
||||
def store_index(self, uri, index):
|
||||
raise NotImplementedError('store_index is not implemented')
|
||||
|
||||
@@ -1,398 +0,0 @@
|
||||
# Copyright (C) 2016-2018 Wind River Systems, Inc.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
import logging
|
||||
import json
|
||||
from urllib.parse import unquote
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import layerindexlib
|
||||
import layerindexlib.plugin
|
||||
|
||||
logger = logging.getLogger('BitBake.layerindexlib.restapi')
|
||||
|
||||
def plugin_init(plugins):
|
||||
return RestApiPlugin()
|
||||
|
||||
class RestApiPlugin(layerindexlib.plugin.IndexPlugin):
|
||||
def __init__(self):
|
||||
self.type = "restapi"
|
||||
|
||||
def load_index(self, url, load):
|
||||
"""
|
||||
Fetches layer information from a local or remote layer index.
|
||||
|
||||
The return value is a LayerIndexObj.
|
||||
|
||||
url is the url to the rest api of the layer index, such as:
|
||||
http://layers.openembedded.org/layerindex/api/
|
||||
|
||||
Or a local file...
|
||||
"""
|
||||
|
||||
up = urlparse(url)
|
||||
|
||||
if up.scheme == 'file':
|
||||
return self.load_index_file(up, url, load)
|
||||
|
||||
if up.scheme == 'http' or up.scheme == 'https':
|
||||
return self.load_index_web(up, url, load)
|
||||
|
||||
raise layerindexlib.plugin.LayerIndexPluginUrlError(self.type, url)
|
||||
|
||||
|
||||
def load_index_file(self, up, url, load):
|
||||
"""
|
||||
Fetches layer information from a local file or directory.
|
||||
|
||||
The return value is a LayerIndexObj.
|
||||
|
||||
ud is the parsed url to the local file or directory.
|
||||
"""
|
||||
if not os.path.exists(up.path):
|
||||
raise FileNotFoundError(up.path)
|
||||
|
||||
index = layerindexlib.LayerIndexObj()
|
||||
|
||||
index.config = {}
|
||||
index.config['TYPE'] = self.type
|
||||
index.config['URL'] = url
|
||||
|
||||
params = self.layerindex._parse_params(up.params)
|
||||
|
||||
if 'desc' in params:
|
||||
index.config['DESCRIPTION'] = unquote(params['desc'])
|
||||
else:
|
||||
index.config['DESCRIPTION'] = up.path
|
||||
|
||||
if 'cache' in params:
|
||||
index.config['CACHE'] = params['cache']
|
||||
|
||||
if 'branch' in params:
|
||||
branches = params['branch'].split(',')
|
||||
index.config['BRANCH'] = branches
|
||||
else:
|
||||
branches = ['*']
|
||||
|
||||
|
||||
def load_cache(path, index, branches=[]):
|
||||
logger.debug(1, 'Loading json file %s' % path)
|
||||
with open(path, 'rt', encoding='utf-8') as f:
|
||||
pindex = json.load(f)
|
||||
|
||||
# Filter the branches on loaded files...
|
||||
newpBranch = []
|
||||
for branch in branches:
|
||||
if branch != '*':
|
||||
if 'branches' in pindex:
|
||||
for br in pindex['branches']:
|
||||
if br['name'] == branch:
|
||||
newpBranch.append(br)
|
||||
else:
|
||||
if 'branches' in pindex:
|
||||
for br in pindex['branches']:
|
||||
newpBranch.append(br)
|
||||
|
||||
if newpBranch:
|
||||
index.add_raw_element('branches', layerindexlib.Branch, newpBranch)
|
||||
else:
|
||||
logger.debug(1, 'No matching branches (%s) in index file(s)' % branches)
|
||||
# No matching branches.. return nothing...
|
||||
return
|
||||
|
||||
for (lName, lType) in [("layerItems", layerindexlib.LayerItem),
|
||||
("layerBranches", layerindexlib.LayerBranch),
|
||||
("layerDependencies", layerindexlib.LayerDependency),
|
||||
("recipes", layerindexlib.Recipe),
|
||||
("machines", layerindexlib.Machine),
|
||||
("distros", layerindexlib.Distro)]:
|
||||
if lName in pindex:
|
||||
index.add_raw_element(lName, lType, pindex[lName])
|
||||
|
||||
|
||||
if not os.path.isdir(up.path):
|
||||
load_cache(up.path, index, branches)
|
||||
return index
|
||||
|
||||
logger.debug(1, 'Loading from dir %s...' % (up.path))
|
||||
for (dirpath, _, filenames) in os.walk(up.path):
|
||||
for filename in filenames:
|
||||
if not filename.endswith('.json'):
|
||||
continue
|
||||
fpath = os.path.join(dirpath, filename)
|
||||
load_cache(fpath, index, branches)
|
||||
|
||||
return index
|
||||
|
||||
|
||||
def load_index_web(self, up, url, load):
|
||||
"""
|
||||
Fetches layer information from a remote layer index.
|
||||
|
||||
The return value is a LayerIndexObj.
|
||||
|
||||
ud is the parsed url to the rest api of the layer index, such as:
|
||||
http://layers.openembedded.org/layerindex/api/
|
||||
"""
|
||||
|
||||
def _get_json_response(apiurl=None, username=None, password=None, retry=True):
|
||||
assert apiurl is not None
|
||||
|
||||
logger.debug(1, "fetching %s" % apiurl)
|
||||
|
||||
up = urlparse(apiurl)
|
||||
|
||||
username=up.username
|
||||
password=up.password
|
||||
|
||||
# Strip username/password and params
|
||||
if up.port:
|
||||
up_stripped = up._replace(params="", netloc="%s:%s" % (up.hostname, up.port))
|
||||
else:
|
||||
up_stripped = up._replace(params="", netloc=up.hostname)
|
||||
|
||||
res = self.layerindex._fetch_url(up_stripped.geturl(), username=username, password=password)
|
||||
|
||||
try:
|
||||
parsed = json.loads(res.read().decode('utf-8'))
|
||||
except ConnectionResetError:
|
||||
if retry:
|
||||
logger.debug(1, "%s: Connection reset by peer. Retrying..." % url)
|
||||
parsed = _get_json_response(apiurl=up_stripped.geturl(), username=username, password=password, retry=False)
|
||||
logger.debug(1, "%s: retry successful.")
|
||||
else:
|
||||
raise LayerIndexFetchError('%s: Connection reset by peer. Is there a firewall blocking your connection?' % apiurl)
|
||||
|
||||
return parsed
|
||||
|
||||
index = layerindexlib.LayerIndexObj()
|
||||
|
||||
index.config = {}
|
||||
index.config['TYPE'] = self.type
|
||||
index.config['URL'] = url
|
||||
|
||||
params = self.layerindex._parse_params(up.params)
|
||||
|
||||
if 'desc' in params:
|
||||
index.config['DESCRIPTION'] = unquote(params['desc'])
|
||||
else:
|
||||
index.config['DESCRIPTION'] = up.hostname
|
||||
|
||||
if 'cache' in params:
|
||||
index.config['CACHE'] = params['cache']
|
||||
|
||||
if 'branch' in params:
|
||||
branches = params['branch'].split(',')
|
||||
index.config['BRANCH'] = branches
|
||||
else:
|
||||
branches = ['*']
|
||||
|
||||
try:
|
||||
index.apilinks = _get_json_response(apiurl=url, username=up.username, password=up.password)
|
||||
except Exception as e:
|
||||
raise layerindexlib.LayerIndexFetchError(url, e)
|
||||
|
||||
# Local raw index set...
|
||||
pindex = {}
|
||||
|
||||
# Load all the requested branches at the same time time,
|
||||
# a special branch of '*' means load all branches
|
||||
filter = ""
|
||||
if "*" not in branches:
|
||||
filter = "?filter=name:%s" % "OR".join(branches)
|
||||
|
||||
logger.debug(1, "Loading %s from %s" % (branches, index.apilinks['branches']))
|
||||
|
||||
# The link won't include username/password, so pull it from the original url
|
||||
pindex['branches'] = _get_json_response(index.apilinks['branches'] + filter,
|
||||
username=up.username, password=up.password)
|
||||
if not pindex['branches']:
|
||||
logger.debug(1, "No valid branches (%s) found at url %s." % (branch, url))
|
||||
return index
|
||||
index.add_raw_element("branches", layerindexlib.Branch, pindex['branches'])
|
||||
|
||||
# Load all of the layerItems (these can not be easily filtered)
|
||||
logger.debug(1, "Loading %s from %s" % ('layerItems', index.apilinks['layerItems']))
|
||||
|
||||
|
||||
# The link won't include username/password, so pull it from the original url
|
||||
pindex['layerItems'] = _get_json_response(index.apilinks['layerItems'],
|
||||
username=up.username, password=up.password)
|
||||
if not pindex['layerItems']:
|
||||
logger.debug(1, "No layers were found at url %s." % (url))
|
||||
return index
|
||||
index.add_raw_element("layerItems", layerindexlib.LayerItem, pindex['layerItems'])
|
||||
|
||||
|
||||
# From this point on load the contents for each branch. Otherwise we
|
||||
# could run into a timeout.
|
||||
for branch in index.branches:
|
||||
filter = "?filter=branch__name:%s" % index.branches[branch].name
|
||||
|
||||
logger.debug(1, "Loading %s from %s" % ('layerBranches', index.apilinks['layerBranches']))
|
||||
|
||||
# The link won't include username/password, so pull it from the original url
|
||||
pindex['layerBranches'] = _get_json_response(index.apilinks['layerBranches'] + filter,
|
||||
username=up.username, password=up.password)
|
||||
if not pindex['layerBranches']:
|
||||
logger.debug(1, "No valid layer branches (%s) found at url %s." % (branches or "*", url))
|
||||
return index
|
||||
index.add_raw_element("layerBranches", layerindexlib.LayerBranch, pindex['layerBranches'])
|
||||
|
||||
|
||||
# Load the rest, they all have a similar format
|
||||
# Note: the layer index has a few more items, we can add them if necessary
|
||||
# in the future.
|
||||
filter = "?filter=layerbranch__branch__name:%s" % index.branches[branch].name
|
||||
for (lName, lType) in [("layerDependencies", layerindexlib.LayerDependency),
|
||||
("recipes", layerindexlib.Recipe),
|
||||
("machines", layerindexlib.Machine),
|
||||
("distros", layerindexlib.Distro)]:
|
||||
if lName not in load:
|
||||
continue
|
||||
logger.debug(1, "Loading %s from %s" % (lName, index.apilinks[lName]))
|
||||
|
||||
# The link won't include username/password, so pull it from the original url
|
||||
pindex[lName] = _get_json_response(index.apilinks[lName] + filter,
|
||||
username=up.username, password=up.password)
|
||||
index.add_raw_element(lName, lType, pindex[lName])
|
||||
|
||||
return index
|
||||
|
||||
def store_index(self, url, index):
|
||||
"""
|
||||
Store layer information into a local file/dir.
|
||||
|
||||
The return value is a dictionary containing API,
|
||||
layer, branch, dependency, recipe, machine, distro, information.
|
||||
|
||||
ud is a parsed url to a directory or file. If the path is a
|
||||
directory, we will split the files into one file per layer.
|
||||
If the path is to a file (exists or not) the entire DB will be
|
||||
dumped into that one file.
|
||||
"""
|
||||
|
||||
up = urlparse(url)
|
||||
|
||||
if up.scheme != 'file':
|
||||
raise layerindexlib.plugin.LayerIndexPluginUrlError(self.type, url)
|
||||
|
||||
logger.debug(1, "Storing to %s..." % up.path)
|
||||
|
||||
try:
|
||||
layerbranches = index.layerBranches
|
||||
except KeyError:
|
||||
logger.error('No layerBranches to write.')
|
||||
return
|
||||
|
||||
|
||||
def filter_item(layerbranchid, objects):
|
||||
filtered = []
|
||||
for obj in getattr(index, objects, None):
|
||||
try:
|
||||
if getattr(index, objects)[obj].layerbranch_id == layerbranchid:
|
||||
filtered.append(getattr(index, objects)[obj]._data)
|
||||
except AttributeError:
|
||||
logger.debug(1, 'No obj.layerbranch_id: %s' % objects)
|
||||
# No simple filter method, just include it...
|
||||
try:
|
||||
filtered.append(getattr(index, objects)[obj]._data)
|
||||
except AttributeError:
|
||||
logger.debug(1, 'No obj._data: %s %s' % (objects, type(obj)))
|
||||
filtered.append(obj)
|
||||
return filtered
|
||||
|
||||
|
||||
# Write out to a single file.
|
||||
# Filter out unnecessary items, then sort as we write for determinism
|
||||
if not os.path.isdir(up.path):
|
||||
pindex = {}
|
||||
|
||||
pindex['branches'] = []
|
||||
pindex['layerItems'] = []
|
||||
pindex['layerBranches'] = []
|
||||
|
||||
for layerbranchid in layerbranches:
|
||||
if layerbranches[layerbranchid].branch._data not in pindex['branches']:
|
||||
pindex['branches'].append(layerbranches[layerbranchid].branch._data)
|
||||
|
||||
if layerbranches[layerbranchid].layer._data not in pindex['layerItems']:
|
||||
pindex['layerItems'].append(layerbranches[layerbranchid].layer._data)
|
||||
|
||||
if layerbranches[layerbranchid]._data not in pindex['layerBranches']:
|
||||
pindex['layerBranches'].append(layerbranches[layerbranchid]._data)
|
||||
|
||||
for entry in index._index:
|
||||
# Skip local items, apilinks and items already processed
|
||||
if entry in index.config['local'] or \
|
||||
entry == 'apilinks' or \
|
||||
entry == 'branches' or \
|
||||
entry == 'layerBranches' or \
|
||||
entry == 'layerItems':
|
||||
continue
|
||||
if entry not in pindex:
|
||||
pindex[entry] = []
|
||||
pindex[entry].extend(filter_item(layerbranchid, entry))
|
||||
|
||||
bb.debug(1, 'Writing index to %s' % up.path)
|
||||
with open(up.path, 'wt') as f:
|
||||
json.dump(layerindexlib.sort_entry(pindex), f, indent=4)
|
||||
return
|
||||
|
||||
|
||||
# Write out to a directory one file per layerBranch
|
||||
# Prepare all layer related items, to create a minimal file.
|
||||
# We have to sort the entries as we write so they are deterministic
|
||||
for layerbranchid in layerbranches:
|
||||
pindex = {}
|
||||
|
||||
for entry in index._index:
|
||||
# Skip local items, apilinks and items already processed
|
||||
if entry in index.config['local'] or \
|
||||
entry == 'apilinks' or \
|
||||
entry == 'branches' or \
|
||||
entry == 'layerBranches' or \
|
||||
entry == 'layerItems':
|
||||
continue
|
||||
pindex[entry] = filter_item(layerbranchid, entry)
|
||||
|
||||
# Add the layer we're processing as the first one...
|
||||
pindex['branches'] = [layerbranches[layerbranchid].branch._data]
|
||||
pindex['layerItems'] = [layerbranches[layerbranchid].layer._data]
|
||||
pindex['layerBranches'] = [layerbranches[layerbranchid]._data]
|
||||
|
||||
# We also need to include the layerbranch for any dependencies...
|
||||
for layerdep in pindex['layerDependencies']:
|
||||
layerdependency = layerindexlib.LayerDependency(index, layerdep)
|
||||
|
||||
layeritem = layerdependency.dependency
|
||||
layerbranch = layerdependency.dependency_layerBranch
|
||||
|
||||
# We need to avoid duplicates...
|
||||
if layeritem._data not in pindex['layerItems']:
|
||||
pindex['layerItems'].append(layeritem._data)
|
||||
|
||||
if layerbranch._data not in pindex['layerBranches']:
|
||||
pindex['layerBranches'].append(layerbranch._data)
|
||||
|
||||
# apply mirroring adjustments here....
|
||||
|
||||
fname = index.config['DESCRIPTION'] + '__' + pindex['branches'][0]['name'] + '__' + pindex['layerItems'][0]['name']
|
||||
fname = fname.translate(str.maketrans('/ ', '__'))
|
||||
fpath = os.path.join(up.path, fname)
|
||||
|
||||
bb.debug(1, 'Writing index to %s' % fpath + '.json')
|
||||
with open(fpath + '.json', 'wt') as f:
|
||||
json.dump(layerindexlib.sort_entry(pindex), f, indent=4)
|
||||
@@ -1,43 +0,0 @@
|
||||
# Copyright (C) 2017-2018 Wind River Systems, Inc.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
import unittest
|
||||
import tempfile
|
||||
import os
|
||||
import bb
|
||||
|
||||
import logging
|
||||
|
||||
class LayersTest(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.origdir = os.getcwd()
|
||||
self.d = bb.data.init()
|
||||
# At least one variable needs to be set
|
||||
self.d.setVar('DL_DIR', os.getcwd())
|
||||
|
||||
if os.environ.get("BB_SKIP_NETTESTS") == "yes":
|
||||
self.d.setVar('BB_NO_NETWORK', '1')
|
||||
|
||||
self.tempdir = tempfile.mkdtemp()
|
||||
self.logger = logging.getLogger("BitBake")
|
||||
|
||||
def tearDown(self):
|
||||
os.chdir(self.origdir)
|
||||
if os.environ.get("BB_TMPDIR_NOCLEAN") == "yes":
|
||||
print("Not cleaning up %s. Please remove manually." % self.tempdir)
|
||||
else:
|
||||
bb.utils.prunedir(self.tempdir)
|
||||
|
||||
@@ -1,123 +0,0 @@
|
||||
# Copyright (C) 2018 Wind River Systems, Inc.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
import unittest
|
||||
import tempfile
|
||||
import os
|
||||
import bb
|
||||
|
||||
import layerindexlib
|
||||
from layerindexlib.tests.common import LayersTest
|
||||
|
||||
import logging
|
||||
|
||||
class LayerIndexCookerTest(LayersTest):
|
||||
|
||||
def setUp(self):
|
||||
LayersTest.setUp(self)
|
||||
|
||||
# Note this is NOT a comprehensive test of cooker, as we can't easily
|
||||
# configure the test data. But we can emulate the basics of the layer.conf
|
||||
# files, so that is what we will do.
|
||||
|
||||
new_topdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testdata")
|
||||
new_bbpath = os.path.join(new_topdir, "build")
|
||||
|
||||
self.d.setVar('TOPDIR', new_topdir)
|
||||
self.d.setVar('BBPATH', new_bbpath)
|
||||
|
||||
self.d = bb.parse.handle("%s/conf/bblayers.conf" % new_bbpath, self.d, True)
|
||||
for layer in self.d.getVar('BBLAYERS').split():
|
||||
self.d = bb.parse.handle("%s/conf/layer.conf" % layer, self.d, True)
|
||||
|
||||
self.layerindex = layerindexlib.LayerIndex(self.d)
|
||||
self.layerindex.load_layerindex('cooker://', load=['layerDependencies'])
|
||||
|
||||
def test_layerindex_is_empty(self):
|
||||
self.assertFalse(self.layerindex.is_empty(), msg="Layerindex is not empty!")
|
||||
|
||||
def test_dependency_resolution(self):
|
||||
# Verify depth first searching...
|
||||
(dependencies, invalidnames) = self.layerindex.find_dependencies(names=['meta-python'])
|
||||
|
||||
first = True
|
||||
for deplayerbranch in dependencies:
|
||||
layerBranch = dependencies[deplayerbranch][0]
|
||||
layerDeps = dependencies[deplayerbranch][1:]
|
||||
|
||||
if not first:
|
||||
continue
|
||||
|
||||
first = False
|
||||
|
||||
# Top of the deps should be openembedded-core, since everything depends on it.
|
||||
self.assertEqual(layerBranch.layer.name, "openembedded-core", msg='Top dependency not openembedded-core')
|
||||
|
||||
# meta-python should cause an openembedded-core dependency, if not assert!
|
||||
for dep in layerDeps:
|
||||
if dep.layer.name == 'meta-python':
|
||||
break
|
||||
else:
|
||||
self.assertTrue(False, msg='meta-python was not found')
|
||||
|
||||
# Only check the first element...
|
||||
break
|
||||
else:
|
||||
if first:
|
||||
# Empty list, this is bad.
|
||||
self.assertTrue(False, msg='Empty list of dependencies')
|
||||
|
||||
# Last dep should be the requested item
|
||||
layerBranch = dependencies[deplayerbranch][0]
|
||||
self.assertEqual(layerBranch.layer.name, "meta-python", msg='Last dependency not meta-python')
|
||||
|
||||
def test_find_collection(self):
|
||||
def _check(collection, expected):
|
||||
self.logger.debug(1, "Looking for collection %s..." % collection)
|
||||
result = self.layerindex.find_collection(collection)
|
||||
if expected:
|
||||
self.assertIsNotNone(result, msg="Did not find %s when it shouldn't be there" % collection)
|
||||
else:
|
||||
self.assertIsNone(result, msg="Found %s when it should be there" % collection)
|
||||
|
||||
tests = [ ('core', True),
|
||||
('openembedded-core', False),
|
||||
('networking-layer', True),
|
||||
('meta-python', True),
|
||||
('openembedded-layer', True),
|
||||
('notpresent', False) ]
|
||||
|
||||
for collection,result in tests:
|
||||
_check(collection, result)
|
||||
|
||||
def test_find_layerbranch(self):
|
||||
def _check(name, expected):
|
||||
self.logger.debug(1, "Looking for layerbranch %s..." % name)
|
||||
result = self.layerindex.find_layerbranch(name)
|
||||
if expected:
|
||||
self.assertIsNotNone(result, msg="Did not find %s when it shouldn't be there" % collection)
|
||||
else:
|
||||
self.assertIsNone(result, msg="Found %s when it should be there" % collection)
|
||||
|
||||
tests = [ ('openembedded-core', True),
|
||||
('core', False),
|
||||
('networking-layer', True),
|
||||
('meta-python', True),
|
||||
('openembedded-layer', True),
|
||||
('notpresent', False) ]
|
||||
|
||||
for collection,result in tests:
|
||||
_check(collection, result)
|
||||
|
||||
@@ -1,226 +0,0 @@
|
||||
# Copyright (C) 2017-2018 Wind River Systems, Inc.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
import unittest
|
||||
import tempfile
|
||||
import os
|
||||
import bb
|
||||
|
||||
from layerindexlib.tests.common import LayersTest
|
||||
|
||||
import logging
|
||||
|
||||
class LayerIndexObjectsTest(LayersTest):
|
||||
def setUp(self):
|
||||
from layerindexlib import LayerIndexObj, Branch, LayerItem, LayerBranch, LayerDependency, Recipe, Machine, Distro
|
||||
|
||||
LayersTest.setUp(self)
|
||||
|
||||
self.index = LayerIndexObj()
|
||||
|
||||
branchId = 0
|
||||
layerItemId = 0
|
||||
layerBranchId = 0
|
||||
layerDependencyId = 0
|
||||
recipeId = 0
|
||||
machineId = 0
|
||||
distroId = 0
|
||||
|
||||
self.index.branches = {}
|
||||
self.index.layerItems = {}
|
||||
self.index.layerBranches = {}
|
||||
self.index.layerDependencies = {}
|
||||
self.index.recipes = {}
|
||||
self.index.machines = {}
|
||||
self.index.distros = {}
|
||||
|
||||
branchId += 1
|
||||
self.index.branches[branchId] = Branch(self.index)
|
||||
self.index.branches[branchId].define_data(branchId,
|
||||
'test_branch', 'bb_test_branch')
|
||||
self.index.branches[branchId].lockData()
|
||||
|
||||
layerItemId +=1
|
||||
self.index.layerItems[layerItemId] = LayerItem(self.index)
|
||||
self.index.layerItems[layerItemId].define_data(layerItemId,
|
||||
'test_layerItem', vcs_url='git://git_test_url/test_layerItem')
|
||||
self.index.layerItems[layerItemId].lockData()
|
||||
|
||||
layerBranchId +=1
|
||||
self.index.layerBranches[layerBranchId] = LayerBranch(self.index)
|
||||
self.index.layerBranches[layerBranchId].define_data(layerBranchId,
|
||||
'test_collection', '99', layerItemId,
|
||||
branchId)
|
||||
|
||||
recipeId += 1
|
||||
self.index.recipes[recipeId] = Recipe(self.index)
|
||||
self.index.recipes[recipeId].define_data(recipeId, 'test_git.bb',
|
||||
'recipes-test', 'test', 'git',
|
||||
layerBranchId)
|
||||
|
||||
machineId += 1
|
||||
self.index.machines[machineId] = Machine(self.index)
|
||||
self.index.machines[machineId].define_data(machineId,
|
||||
'test_machine', 'test_machine',
|
||||
layerBranchId)
|
||||
|
||||
distroId += 1
|
||||
self.index.distros[distroId] = Distro(self.index)
|
||||
self.index.distros[distroId].define_data(distroId,
|
||||
'test_distro', 'test_distro',
|
||||
layerBranchId)
|
||||
|
||||
layerItemId +=1
|
||||
self.index.layerItems[layerItemId] = LayerItem(self.index)
|
||||
self.index.layerItems[layerItemId].define_data(layerItemId, 'test_layerItem 2',
|
||||
vcs_url='git://git_test_url/test_layerItem')
|
||||
|
||||
layerBranchId +=1
|
||||
self.index.layerBranches[layerBranchId] = LayerBranch(self.index)
|
||||
self.index.layerBranches[layerBranchId].define_data(layerBranchId,
|
||||
'test_collection_2', '72', layerItemId,
|
||||
branchId, actual_branch='some_other_branch')
|
||||
|
||||
layerDependencyId += 1
|
||||
self.index.layerDependencies[layerDependencyId] = LayerDependency(self.index)
|
||||
self.index.layerDependencies[layerDependencyId].define_data(layerDependencyId,
|
||||
layerBranchId, 1)
|
||||
|
||||
layerDependencyId += 1
|
||||
self.index.layerDependencies[layerDependencyId] = LayerDependency(self.index)
|
||||
self.index.layerDependencies[layerDependencyId].define_data(layerDependencyId,
|
||||
layerBranchId, 1, required=False)
|
||||
|
||||
def test_branch(self):
|
||||
branch = self.index.branches[1]
|
||||
self.assertEqual(branch.id, 1)
|
||||
self.assertEqual(branch.name, 'test_branch')
|
||||
self.assertEqual(branch.short_description, 'test_branch')
|
||||
self.assertEqual(branch.bitbake_branch, 'bb_test_branch')
|
||||
|
||||
def test_layerItem(self):
|
||||
layerItem = self.index.layerItems[1]
|
||||
self.assertEqual(layerItem.id, 1)
|
||||
self.assertEqual(layerItem.name, 'test_layerItem')
|
||||
self.assertEqual(layerItem.summary, 'test_layerItem')
|
||||
self.assertEqual(layerItem.description, 'test_layerItem')
|
||||
self.assertEqual(layerItem.vcs_url, 'git://git_test_url/test_layerItem')
|
||||
self.assertEqual(layerItem.vcs_web_url, None)
|
||||
self.assertIsNone(layerItem.vcs_web_tree_base_url)
|
||||
self.assertIsNone(layerItem.vcs_web_file_base_url)
|
||||
self.assertIsNotNone(layerItem.updated)
|
||||
|
||||
layerItem = self.index.layerItems[2]
|
||||
self.assertEqual(layerItem.id, 2)
|
||||
self.assertEqual(layerItem.name, 'test_layerItem 2')
|
||||
self.assertEqual(layerItem.summary, 'test_layerItem 2')
|
||||
self.assertEqual(layerItem.description, 'test_layerItem 2')
|
||||
self.assertEqual(layerItem.vcs_url, 'git://git_test_url/test_layerItem')
|
||||
self.assertIsNone(layerItem.vcs_web_url)
|
||||
self.assertIsNone(layerItem.vcs_web_tree_base_url)
|
||||
self.assertIsNone(layerItem.vcs_web_file_base_url)
|
||||
self.assertIsNotNone(layerItem.updated)
|
||||
|
||||
def test_layerBranch(self):
|
||||
layerBranch = self.index.layerBranches[1]
|
||||
self.assertEqual(layerBranch.id, 1)
|
||||
self.assertEqual(layerBranch.collection, 'test_collection')
|
||||
self.assertEqual(layerBranch.version, '99')
|
||||
self.assertEqual(layerBranch.vcs_subdir, '')
|
||||
self.assertEqual(layerBranch.actual_branch, 'test_branch')
|
||||
self.assertIsNotNone(layerBranch.updated)
|
||||
self.assertEqual(layerBranch.layer_id, 1)
|
||||
self.assertEqual(layerBranch.branch_id, 1)
|
||||
self.assertEqual(layerBranch.layer, self.index.layerItems[1])
|
||||
self.assertEqual(layerBranch.branch, self.index.branches[1])
|
||||
|
||||
layerBranch = self.index.layerBranches[2]
|
||||
self.assertEqual(layerBranch.id, 2)
|
||||
self.assertEqual(layerBranch.collection, 'test_collection_2')
|
||||
self.assertEqual(layerBranch.version, '72')
|
||||
self.assertEqual(layerBranch.vcs_subdir, '')
|
||||
self.assertEqual(layerBranch.actual_branch, 'some_other_branch')
|
||||
self.assertIsNotNone(layerBranch.updated)
|
||||
self.assertEqual(layerBranch.layer_id, 2)
|
||||
self.assertEqual(layerBranch.branch_id, 1)
|
||||
self.assertEqual(layerBranch.layer, self.index.layerItems[2])
|
||||
self.assertEqual(layerBranch.branch, self.index.branches[1])
|
||||
|
||||
def test_layerDependency(self):
|
||||
layerDependency = self.index.layerDependencies[1]
|
||||
self.assertEqual(layerDependency.id, 1)
|
||||
self.assertEqual(layerDependency.layerbranch_id, 2)
|
||||
self.assertEqual(layerDependency.layerbranch, self.index.layerBranches[2])
|
||||
self.assertEqual(layerDependency.layer_id, 2)
|
||||
self.assertEqual(layerDependency.layer, self.index.layerItems[2])
|
||||
self.assertTrue(layerDependency.required)
|
||||
self.assertEqual(layerDependency.dependency_id, 1)
|
||||
self.assertEqual(layerDependency.dependency, self.index.layerItems[1])
|
||||
self.assertEqual(layerDependency.dependency_layerBranch, self.index.layerBranches[1])
|
||||
|
||||
layerDependency = self.index.layerDependencies[2]
|
||||
self.assertEqual(layerDependency.id, 2)
|
||||
self.assertEqual(layerDependency.layerbranch_id, 2)
|
||||
self.assertEqual(layerDependency.layerbranch, self.index.layerBranches[2])
|
||||
self.assertEqual(layerDependency.layer_id, 2)
|
||||
self.assertEqual(layerDependency.layer, self.index.layerItems[2])
|
||||
self.assertFalse(layerDependency.required)
|
||||
self.assertEqual(layerDependency.dependency_id, 1)
|
||||
self.assertEqual(layerDependency.dependency, self.index.layerItems[1])
|
||||
self.assertEqual(layerDependency.dependency_layerBranch, self.index.layerBranches[1])
|
||||
|
||||
def test_recipe(self):
|
||||
recipe = self.index.recipes[1]
|
||||
self.assertEqual(recipe.id, 1)
|
||||
self.assertEqual(recipe.layerbranch_id, 1)
|
||||
self.assertEqual(recipe.layerbranch, self.index.layerBranches[1])
|
||||
self.assertEqual(recipe.layer_id, 1)
|
||||
self.assertEqual(recipe.layer, self.index.layerItems[1])
|
||||
self.assertEqual(recipe.filename, 'test_git.bb')
|
||||
self.assertEqual(recipe.filepath, 'recipes-test')
|
||||
self.assertEqual(recipe.fullpath, 'recipes-test/test_git.bb')
|
||||
self.assertEqual(recipe.summary, "")
|
||||
self.assertEqual(recipe.description, "")
|
||||
self.assertEqual(recipe.section, "")
|
||||
self.assertEqual(recipe.pn, 'test')
|
||||
self.assertEqual(recipe.pv, 'git')
|
||||
self.assertEqual(recipe.license, "")
|
||||
self.assertEqual(recipe.homepage, "")
|
||||
self.assertEqual(recipe.bugtracker, "")
|
||||
self.assertEqual(recipe.provides, "")
|
||||
self.assertIsNotNone(recipe.updated)
|
||||
self.assertEqual(recipe.inherits, "")
|
||||
|
||||
def test_machine(self):
|
||||
machine = self.index.machines[1]
|
||||
self.assertEqual(machine.id, 1)
|
||||
self.assertEqual(machine.layerbranch_id, 1)
|
||||
self.assertEqual(machine.layerbranch, self.index.layerBranches[1])
|
||||
self.assertEqual(machine.layer_id, 1)
|
||||
self.assertEqual(machine.layer, self.index.layerItems[1])
|
||||
self.assertEqual(machine.name, 'test_machine')
|
||||
self.assertEqual(machine.description, 'test_machine')
|
||||
self.assertIsNotNone(machine.updated)
|
||||
|
||||
def test_distro(self):
|
||||
distro = self.index.distros[1]
|
||||
self.assertEqual(distro.id, 1)
|
||||
self.assertEqual(distro.layerbranch_id, 1)
|
||||
self.assertEqual(distro.layerbranch, self.index.layerBranches[1])
|
||||
self.assertEqual(distro.layer_id, 1)
|
||||
self.assertEqual(distro.layer, self.index.layerItems[1])
|
||||
self.assertEqual(distro.name, 'test_distro')
|
||||
self.assertEqual(distro.description, 'test_distro')
|
||||
self.assertIsNotNone(distro.updated)
|
||||
@@ -1,184 +0,0 @@
|
||||
# Copyright (C) 2017-2018 Wind River Systems, Inc.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
import unittest
|
||||
import tempfile
|
||||
import os
|
||||
import bb
|
||||
|
||||
import layerindexlib
|
||||
from layerindexlib.tests.common import LayersTest
|
||||
|
||||
import logging
|
||||
|
||||
def skipIfNoNetwork():
|
||||
if os.environ.get("BB_SKIP_NETTESTS") == "yes":
|
||||
return unittest.skip("Network tests being skipped")
|
||||
return lambda f: f
|
||||
|
||||
class LayerIndexWebRestApiTest(LayersTest):
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def setUp(self):
|
||||
self.assertFalse(os.environ.get("BB_SKIP_NETTESTS") == "yes", msg="BB_SKIP_NETTESTS set, but we tried to test anyway")
|
||||
LayersTest.setUp(self)
|
||||
self.layerindex = layerindexlib.LayerIndex(self.d)
|
||||
self.layerindex.load_layerindex('http://layers.openembedded.org/layerindex/api/;branch=sumo', load=['layerDependencies'])
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_layerindex_is_empty(self):
|
||||
self.assertFalse(self.layerindex.is_empty(), msg="Layerindex is empty")
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_layerindex_store_file(self):
|
||||
self.layerindex.store_layerindex('file://%s/file.json' % self.tempdir, self.layerindex.indexes[0])
|
||||
|
||||
self.assertTrue(os.path.isfile('%s/file.json' % self.tempdir), msg="Temporary file was not created by store_layerindex")
|
||||
|
||||
reload = layerindexlib.LayerIndex(self.d)
|
||||
reload.load_layerindex('file://%s/file.json' % self.tempdir)
|
||||
|
||||
self.assertFalse(reload.is_empty(), msg="Layerindex is empty")
|
||||
|
||||
# Calculate layerItems in original index that should NOT be in reload
|
||||
layerItemNames = []
|
||||
for itemId in self.layerindex.indexes[0].layerItems:
|
||||
layerItemNames.append(self.layerindex.indexes[0].layerItems[itemId].name)
|
||||
|
||||
for layerBranchId in self.layerindex.indexes[0].layerBranches:
|
||||
layerItemNames.remove(self.layerindex.indexes[0].layerBranches[layerBranchId].layer.name)
|
||||
|
||||
for itemId in reload.indexes[0].layerItems:
|
||||
self.assertFalse(reload.indexes[0].layerItems[itemId].name in layerItemNames, msg="Item reloaded when it shouldn't have been")
|
||||
|
||||
# Compare the original to what we wrote...
|
||||
for type in self.layerindex.indexes[0]._index:
|
||||
if type == 'apilinks' or \
|
||||
type == 'layerItems' or \
|
||||
type in self.layerindex.indexes[0].config['local']:
|
||||
continue
|
||||
for id in getattr(self.layerindex.indexes[0], type):
|
||||
self.logger.debug(1, "type %s" % (type))
|
||||
|
||||
self.assertTrue(id in getattr(reload.indexes[0], type), msg="Id number not in reloaded index")
|
||||
|
||||
self.logger.debug(1, "%s ? %s" % (getattr(self.layerindex.indexes[0], type)[id], getattr(reload.indexes[0], type)[id]))
|
||||
|
||||
self.assertEqual(getattr(self.layerindex.indexes[0], type)[id], getattr(reload.indexes[0], type)[id], msg="Reloaded contents different")
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_layerindex_store_split(self):
|
||||
self.layerindex.store_layerindex('file://%s' % self.tempdir, self.layerindex.indexes[0])
|
||||
|
||||
reload = layerindexlib.LayerIndex(self.d)
|
||||
reload.load_layerindex('file://%s' % self.tempdir)
|
||||
|
||||
self.assertFalse(reload.is_empty(), msg="Layer index is empty")
|
||||
|
||||
for type in self.layerindex.indexes[0]._index:
|
||||
if type == 'apilinks' or \
|
||||
type == 'layerItems' or \
|
||||
type in self.layerindex.indexes[0].config['local']:
|
||||
continue
|
||||
for id in getattr(self.layerindex.indexes[0] ,type):
|
||||
self.logger.debug(1, "type %s" % (type))
|
||||
|
||||
self.assertTrue(id in getattr(reload.indexes[0], type), msg="Id number missing from reloaded data")
|
||||
|
||||
self.logger.debug(1, "%s ? %s" % (getattr(self.layerindex.indexes[0] ,type)[id], getattr(reload.indexes[0], type)[id]))
|
||||
|
||||
self.assertEqual(getattr(self.layerindex.indexes[0] ,type)[id], getattr(reload.indexes[0], type)[id], msg="reloaded data does not match original")
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_dependency_resolution(self):
|
||||
# Verify depth first searching...
|
||||
(dependencies, invalidnames) = self.layerindex.find_dependencies(names=['meta-python'])
|
||||
|
||||
first = True
|
||||
for deplayerbranch in dependencies:
|
||||
layerBranch = dependencies[deplayerbranch][0]
|
||||
layerDeps = dependencies[deplayerbranch][1:]
|
||||
|
||||
if not first:
|
||||
continue
|
||||
|
||||
first = False
|
||||
|
||||
# Top of the deps should be openembedded-core, since everything depends on it.
|
||||
self.assertEqual(layerBranch.layer.name, "openembedded-core", msg='OpenEmbedded-Core is no the first dependency')
|
||||
|
||||
# meta-python should cause an openembedded-core dependency, if not assert!
|
||||
for dep in layerDeps:
|
||||
if dep.layer.name == 'meta-python':
|
||||
break
|
||||
else:
|
||||
self.logger.debug(1, "meta-python was not found")
|
||||
self.assetTrue(False)
|
||||
|
||||
# Only check the first element...
|
||||
break
|
||||
else:
|
||||
# Empty list, this is bad.
|
||||
self.logger.debug(1, "Empty list of dependencies")
|
||||
self.assertIsNotNone(first, msg="Empty list of dependencies")
|
||||
|
||||
# Last dep should be the requested item
|
||||
layerBranch = dependencies[deplayerbranch][0]
|
||||
self.assertEqual(layerBranch.layer.name, "meta-python", msg="Last dependency not meta-python")
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_find_collection(self):
|
||||
def _check(collection, expected):
|
||||
self.logger.debug(1, "Looking for collection %s..." % collection)
|
||||
result = self.layerindex.find_collection(collection)
|
||||
if expected:
|
||||
self.assertIsNotNone(result, msg="Did not find %s when it should be there" % collection)
|
||||
else:
|
||||
self.assertIsNone(result, msg="Found %s when it shouldn't be there" % collection)
|
||||
|
||||
tests = [ ('core', True),
|
||||
('openembedded-core', False),
|
||||
('networking-layer', True),
|
||||
('meta-python', True),
|
||||
('openembedded-layer', True),
|
||||
('notpresent', False) ]
|
||||
|
||||
for collection,result in tests:
|
||||
_check(collection, result)
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_find_layerbranch(self):
|
||||
def _check(name, expected):
|
||||
self.logger.debug(1, "Looking for layerbranch %s..." % name)
|
||||
|
||||
for index in self.layerindex.indexes:
|
||||
for layerbranchid in index.layerBranches:
|
||||
self.logger.debug(1, "Present: %s" % index.layerBranches[layerbranchid].layer.name)
|
||||
result = self.layerindex.find_layerbranch(name)
|
||||
if expected:
|
||||
self.assertIsNotNone(result, msg="Did not find %s when it should be there" % collection)
|
||||
else:
|
||||
self.assertIsNone(result, msg="Found %s when it shouldn't be there" % collection)
|
||||
|
||||
tests = [ ('openembedded-core', True),
|
||||
('core', False),
|
||||
('meta-networking', True),
|
||||
('meta-python', True),
|
||||
('meta-oe', True),
|
||||
('notpresent', False) ]
|
||||
|
||||
for collection,result in tests:
|
||||
_check(collection, result)
|
||||
|
||||
11
bitbake/lib/layerindexlib/tests/testdata/README
vendored
11
bitbake/lib/layerindexlib/tests/testdata/README
vendored
@@ -1,11 +0,0 @@
|
||||
This test data is used to verify the 'cooker' module of the layerindex.
|
||||
|
||||
The module consists of a faux project bblayers.conf with four layers defined.
|
||||
|
||||
layer1 - openembedded-core
|
||||
layer2 - networking-layer
|
||||
layer3 - meta-python
|
||||
layer4 - openembedded-layer (meta-oe)
|
||||
|
||||
Since we do not have a fully populated cooker, we use this to test the
|
||||
basic index generation, and not any deep recipe based contents.
|
||||
@@ -1,15 +0,0 @@
|
||||
LAYERSERIES_CORENAMES = "sumo"
|
||||
|
||||
# LAYER_CONF_VERSION is increased each time build/conf/bblayers.conf
|
||||
# changes incompatibly
|
||||
LCONF_VERSION = "7"
|
||||
|
||||
BBPATH = "${TOPDIR}"
|
||||
BBFILES ?= ""
|
||||
|
||||
BBLAYERS ?= " \
|
||||
${TOPDIR}/layer1 \
|
||||
${TOPDIR}/layer2 \
|
||||
${TOPDIR}/layer3 \
|
||||
${TOPDIR}/layer4 \
|
||||
"
|
||||
@@ -1,17 +0,0 @@
|
||||
# We have a conf and classes directory, add to BBPATH
|
||||
BBPATH .= ":${LAYERDIR}"
|
||||
# We have recipes-* directories, add to BBFILES
|
||||
BBFILES += "${LAYERDIR}/recipes-*/*/*.bb"
|
||||
|
||||
BBFILE_COLLECTIONS += "core"
|
||||
BBFILE_PATTERN_core = "^${LAYERDIR}/"
|
||||
BBFILE_PRIORITY_core = "5"
|
||||
|
||||
LAYERSERIES_CORENAMES = "sumo"
|
||||
|
||||
# This should only be incremented on significant changes that will
|
||||
# cause compatibility issues with other layers
|
||||
LAYERVERSION_core = "11"
|
||||
LAYERSERIES_COMPAT_core = "sumo"
|
||||
|
||||
BBLAYERS_LAYERINDEX_NAME_core = "openembedded-core"
|
||||
@@ -1,20 +0,0 @@
|
||||
# We have a conf and classes directory, add to BBPATH
|
||||
BBPATH .= ":${LAYERDIR}"
|
||||
|
||||
# We have a packages directory, add to BBFILES
|
||||
BBFILES += "${LAYERDIR}/recipes-*/*/*.bb \
|
||||
${LAYERDIR}/recipes-*/*/*.bbappend"
|
||||
|
||||
BBFILE_COLLECTIONS += "networking-layer"
|
||||
BBFILE_PATTERN_networking-layer := "^${LAYERDIR}/"
|
||||
BBFILE_PRIORITY_networking-layer = "5"
|
||||
|
||||
# This should only be incremented on significant changes that will
|
||||
# cause compatibility issues with other layers
|
||||
LAYERVERSION_networking-layer = "1"
|
||||
|
||||
LAYERDEPENDS_networking-layer = "core"
|
||||
LAYERDEPENDS_networking-layer += "openembedded-layer"
|
||||
LAYERDEPENDS_networking-layer += "meta-python"
|
||||
|
||||
LAYERSERIES_COMPAT_networking-layer = "sumo"
|
||||
@@ -1,19 +0,0 @@
|
||||
# We might have a conf and classes directory, append to BBPATH
|
||||
BBPATH .= ":${LAYERDIR}"
|
||||
|
||||
# We have recipes directories, add to BBFILES
|
||||
BBFILES += "${LAYERDIR}/recipes*/*/*.bb ${LAYERDIR}/recipes*/*/*.bbappend"
|
||||
|
||||
BBFILE_COLLECTIONS += "meta-python"
|
||||
BBFILE_PATTERN_meta-python := "^${LAYERDIR}/"
|
||||
BBFILE_PRIORITY_meta-python = "7"
|
||||
|
||||
# This should only be incremented on significant changes that will
|
||||
# cause compatibility issues with other layers
|
||||
LAYERVERSION_meta-python = "1"
|
||||
|
||||
LAYERDEPENDS_meta-python = "core openembedded-layer"
|
||||
|
||||
LAYERSERIES_COMPAT_meta-python = "sumo"
|
||||
|
||||
LICENSE_PATH += "${LAYERDIR}/licenses"
|
||||
@@ -1,22 +0,0 @@
|
||||
# We have a conf and classes directory, append to BBPATH
|
||||
BBPATH .= ":${LAYERDIR}"
|
||||
|
||||
# We have a recipes directory, add to BBFILES
|
||||
BBFILES += "${LAYERDIR}/recipes-*/*/*.bb ${LAYERDIR}/recipes-*/*/*.bbappend"
|
||||
|
||||
BBFILE_COLLECTIONS += "openembedded-layer"
|
||||
BBFILE_PATTERN_openembedded-layer := "^${LAYERDIR}/"
|
||||
|
||||
# Define the priority for recipes (.bb files) from this layer,
|
||||
# choosing carefully how this layer interacts with all of the
|
||||
# other layers.
|
||||
|
||||
BBFILE_PRIORITY_openembedded-layer = "6"
|
||||
|
||||
# This should only be incremented on significant changes that will
|
||||
# cause compatibility issues with other layers
|
||||
LAYERVERSION_openembedded-layer = "1"
|
||||
|
||||
LAYERDEPENDS_openembedded-layer = "core"
|
||||
|
||||
LAYERSERIES_COMPAT_openembedded-layer = "sumo"
|
||||
@@ -17,7 +17,7 @@
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
|
||||
from django.conf.urls import include, url
|
||||
from django.conf.urls import patterns, include, url
|
||||
|
||||
import bldcollector.views
|
||||
|
||||
|
||||
@@ -27,9 +27,8 @@ import shutil
|
||||
import time
|
||||
from django.db import transaction
|
||||
from django.db.models import Q
|
||||
from bldcontrol.models import BuildEnvironment, BuildRequest, BRLayer, BRVariable, BRTarget, BRBitbake, Build
|
||||
from orm.models import CustomImageRecipe, Layer, Layer_Version, Project, ProjectLayer, ToasterSetting
|
||||
from orm.models import signal_runbuilds
|
||||
from bldcontrol.models import BuildEnvironment, BRLayer, BRVariable, BRTarget, BRBitbake
|
||||
from orm.models import CustomImageRecipe, Layer, Layer_Version, ProjectLayer, ToasterSetting
|
||||
import subprocess
|
||||
|
||||
from toastermain import settings
|
||||
@@ -39,8 +38,6 @@ from bldcontrol.bbcontroller import BuildEnvironmentController, ShellCmdExceptio
|
||||
import logging
|
||||
logger = logging.getLogger("toaster")
|
||||
|
||||
install_dir = os.environ.get('TOASTER_DIR')
|
||||
|
||||
from pprint import pprint, pformat
|
||||
|
||||
class LocalhostBEController(BuildEnvironmentController):
|
||||
@@ -55,14 +52,12 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
self.pokydirname = None
|
||||
self.islayerset = False
|
||||
|
||||
def _shellcmd(self, command, cwd=None, nowait=False,env=None):
|
||||
def _shellcmd(self, command, cwd=None, nowait=False):
|
||||
if cwd is None:
|
||||
cwd = self.be.sourcedir
|
||||
if env is None:
|
||||
env=os.environ.copy()
|
||||
|
||||
logger.debug("lbc_shellcmd: (%s) %s" % (cwd, command))
|
||||
p = subprocess.Popen(command, cwd = cwd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
|
||||
logger.debug("lbc_shellcmmd: (%s) %s" % (cwd, command))
|
||||
p = subprocess.Popen(command, cwd = cwd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
if nowait:
|
||||
return
|
||||
(out,err) = p.communicate()
|
||||
@@ -90,10 +85,10 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
#logger.debug("localhostbecontroller: using HEAD checkout in %s" % local_checkout_path)
|
||||
return local_checkout_path
|
||||
|
||||
def setCloneStatus(self,bitbake,status,total,current,repo_name):
|
||||
|
||||
def setCloneStatus(self,bitbake,status,total,current):
|
||||
bitbake.req.build.repos_cloned=current
|
||||
bitbake.req.build.repos_to_clone=total
|
||||
bitbake.req.build.progress_item=repo_name
|
||||
bitbake.req.build.save()
|
||||
|
||||
def setLayers(self, bitbake, layers, targets):
|
||||
@@ -103,9 +98,6 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
|
||||
layerlist = []
|
||||
nongitlayerlist = []
|
||||
layer_index = 0
|
||||
git_env = os.environ.copy()
|
||||
# (note: add custom environment settings here)
|
||||
|
||||
# set layers in the layersource
|
||||
|
||||
@@ -117,7 +109,7 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
if bitbake.giturl and bitbake.commit:
|
||||
gitrepos[(bitbake.giturl, bitbake.commit)] = []
|
||||
gitrepos[(bitbake.giturl, bitbake.commit)].append(
|
||||
("bitbake", bitbake.dirpath, 0))
|
||||
("bitbake", bitbake.dirpath))
|
||||
|
||||
for layer in layers:
|
||||
# We don't need to git clone the layer for the CustomImageRecipe
|
||||
@@ -128,13 +120,12 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
# If we have local layers then we don't need clone them
|
||||
# For local layers giturl will be empty
|
||||
if not layer.giturl:
|
||||
nongitlayerlist.append( "%03d:%s" % (layer_index,layer.local_source_dir) )
|
||||
nongitlayerlist.append(layer.layer_version.layer.local_source_dir)
|
||||
continue
|
||||
|
||||
if not (layer.giturl, layer.commit) in gitrepos:
|
||||
gitrepos[(layer.giturl, layer.commit)] = []
|
||||
gitrepos[(layer.giturl, layer.commit)].append( (layer.name,layer.dirpath,layer_index) )
|
||||
layer_index += 1
|
||||
gitrepos[(layer.giturl, layer.commit)].append( (layer.name, layer.dirpath) )
|
||||
|
||||
|
||||
logger.debug("localhostbecontroller, our git repos are %s" % pformat(gitrepos))
|
||||
@@ -147,7 +138,7 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
cached_layers = {}
|
||||
|
||||
try:
|
||||
for remotes in self._shellcmd("git remote -v", self.be.sourcedir,env=git_env).split("\n"):
|
||||
for remotes in self._shellcmd("git remote -v", self.be.sourcedir).split("\n"):
|
||||
try:
|
||||
remote = remotes.split("\t")[1].split(" ")[0]
|
||||
if remote not in cached_layers:
|
||||
@@ -164,9 +155,9 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
# 3. checkout the repositories
|
||||
clone_count=0
|
||||
clone_total=len(gitrepos.keys())
|
||||
self.setCloneStatus(bitbake,'Started',clone_total,clone_count,'')
|
||||
self.setCloneStatus(bitbake,'Started',clone_total,clone_count)
|
||||
for giturl, commit in gitrepos.keys():
|
||||
self.setCloneStatus(bitbake,'progress',clone_total,clone_count,gitrepos[(giturl, commit)][0][0])
|
||||
self.setCloneStatus(bitbake,'progress',clone_total,clone_count)
|
||||
clone_count += 1
|
||||
|
||||
localdirname = os.path.join(self.be.sourcedir, self.getGitCloneDirectory(giturl, commit))
|
||||
@@ -176,12 +167,9 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
if os.path.exists(localdirname):
|
||||
try:
|
||||
localremotes = self._shellcmd("git remote -v",
|
||||
localdirname,env=git_env)
|
||||
# NOTE: this nice-to-have check breaks when using git remaping to get past firewall
|
||||
# Re-enable later with .gitconfig remapping checks
|
||||
#if not giturl in localremotes and commit != 'HEAD':
|
||||
# raise BuildSetupException("Existing git repository at %s, but with different remotes ('%s', expected '%s'). Toaster will not continue out of fear of damaging something." % (localdirname, ", ".join(localremotes.split("\n")), giturl))
|
||||
pass
|
||||
localdirname)
|
||||
if not giturl in localremotes and commit != 'HEAD':
|
||||
raise BuildSetupException("Existing git repository at %s, but with different remotes ('%s', expected '%s'). Toaster will not continue out of fear of damaging something." % (localdirname, ", ".join(localremotes.split("\n")), giturl))
|
||||
except ShellCmdException:
|
||||
# our localdirname might not be a git repository
|
||||
#- that's fine
|
||||
@@ -189,18 +177,18 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
else:
|
||||
if giturl in cached_layers:
|
||||
logger.debug("localhostbecontroller git-copying %s to %s" % (cached_layers[giturl], localdirname))
|
||||
self._shellcmd("git clone \"%s\" \"%s\"" % (cached_layers[giturl], localdirname),env=git_env)
|
||||
self._shellcmd("git remote remove origin", localdirname,env=git_env)
|
||||
self._shellcmd("git remote add origin \"%s\"" % giturl, localdirname,env=git_env)
|
||||
self._shellcmd("git clone \"%s\" \"%s\"" % (cached_layers[giturl], localdirname))
|
||||
self._shellcmd("git remote remove origin", localdirname)
|
||||
self._shellcmd("git remote add origin \"%s\"" % giturl, localdirname)
|
||||
else:
|
||||
logger.debug("localhostbecontroller: cloning %s in %s" % (giturl, localdirname))
|
||||
self._shellcmd('git clone "%s" "%s"' % (giturl, localdirname),env=git_env)
|
||||
self._shellcmd('git clone "%s" "%s"' % (giturl, localdirname))
|
||||
|
||||
# branch magic name "HEAD" will inhibit checkout
|
||||
if commit != "HEAD":
|
||||
logger.debug("localhostbecontroller: checking out commit %s to %s " % (commit, localdirname))
|
||||
ref = commit if re.match('^[a-fA-F0-9]+$', commit) else 'origin/%s' % commit
|
||||
self._shellcmd('git fetch && git reset --hard "%s"' % ref, localdirname,env=git_env)
|
||||
self._shellcmd('git fetch --all && git reset --hard "%s"' % ref, localdirname)
|
||||
|
||||
# take the localdirname as poky dir if we can find the oe-init-build-env
|
||||
if self.pokydirname is None and os.path.exists(os.path.join(localdirname, "oe-init-build-env")):
|
||||
@@ -210,36 +198,24 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
# make sure we have a working bitbake
|
||||
if not os.path.exists(os.path.join(self.pokydirname, 'bitbake')):
|
||||
logger.debug("localhostbecontroller: checking bitbake into the poky dirname %s " % self.pokydirname)
|
||||
self._shellcmd("git clone -b \"%s\" \"%s\" \"%s\" " % (bitbake.commit, bitbake.giturl, os.path.join(self.pokydirname, 'bitbake')),env=git_env)
|
||||
self._shellcmd("git clone -b \"%s\" \"%s\" \"%s\" " % (bitbake.commit, bitbake.giturl, os.path.join(self.pokydirname, 'bitbake')))
|
||||
|
||||
# verify our repositories
|
||||
for name, dirpath, index in gitrepos[(giturl, commit)]:
|
||||
for name, dirpath in gitrepos[(giturl, commit)]:
|
||||
localdirpath = os.path.join(localdirname, dirpath)
|
||||
logger.debug("localhostbecontroller: localdirpath expects '%s'" % localdirpath)
|
||||
logger.debug("localhostbecontroller: localdirpath expected '%s'" % localdirpath)
|
||||
if not os.path.exists(localdirpath):
|
||||
raise BuildSetupException("Cannot find layer git path '%s' in checked out repository '%s:%s'. Aborting." % (localdirpath, giturl, commit))
|
||||
|
||||
if name != "bitbake":
|
||||
layerlist.append("%03d:%s" % (index,localdirpath.rstrip("/")))
|
||||
layerlist.append(localdirpath.rstrip("/"))
|
||||
|
||||
self.setCloneStatus(bitbake,'complete',clone_total,clone_count,'')
|
||||
self.setCloneStatus(bitbake,'complete',clone_total,clone_count)
|
||||
logger.debug("localhostbecontroller: current layer list %s " % pformat(layerlist))
|
||||
|
||||
# Resolve self.pokydirname if not resolved yet, consider the scenario
|
||||
# where all layers are local, that's the else clause
|
||||
if self.pokydirname is None:
|
||||
if os.path.exists(os.path.join(self.be.sourcedir, "oe-init-build-env")):
|
||||
logger.debug("localhostbecontroller: selected poky dir name %s" % self.be.sourcedir)
|
||||
self.pokydirname = self.be.sourcedir
|
||||
else:
|
||||
# Alternatively, scan local layers for relative "oe-init-build-env" location
|
||||
for layer in layers:
|
||||
if os.path.exists(os.path.join(layer.layer_version.layer.local_source_dir,"..","oe-init-build-env")):
|
||||
logger.debug("localhostbecontroller, setting pokydirname to %s" % (layer.layer_version.layer.local_source_dir))
|
||||
self.pokydirname = os.path.join(layer.layer_version.layer.local_source_dir,"..")
|
||||
break
|
||||
else:
|
||||
logger.error("pokydirname is not set, you will run into trouble!")
|
||||
if self.pokydirname is None and os.path.exists(os.path.join(self.be.sourcedir, "oe-init-build-env")):
|
||||
logger.debug("localhostbecontroller: selected poky dir name %s" % self.be.sourcedir)
|
||||
self.pokydirname = self.be.sourcedir
|
||||
|
||||
# 5. create custom layer and add custom recipes to it
|
||||
for target in targets:
|
||||
@@ -252,7 +228,7 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
customrecipe, layers)
|
||||
|
||||
if os.path.isdir(custom_layer_path):
|
||||
layerlist.append("%03d:%s" % (layer_index,custom_layer_path))
|
||||
layerlist.append(custom_layer_path)
|
||||
|
||||
except CustomImageRecipe.DoesNotExist:
|
||||
continue # not a custom recipe, skip
|
||||
@@ -260,11 +236,7 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
layerlist.extend(nongitlayerlist)
|
||||
logger.debug("\n\nset layers gives this list %s" % pformat(layerlist))
|
||||
self.islayerset = True
|
||||
|
||||
# restore the order of layer list for bblayers.conf
|
||||
layerlist.sort()
|
||||
sorted_layerlist = [l[4:] for l in layerlist]
|
||||
return sorted_layerlist
|
||||
return layerlist
|
||||
|
||||
def setup_custom_image_recipe(self, customrecipe, layers):
|
||||
""" Set up toaster-custom-images layer and recipe files """
|
||||
@@ -334,144 +306,41 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
|
||||
def triggerBuild(self, bitbake, layers, variables, targets, brbe):
|
||||
layers = self.setLayers(bitbake, layers, targets)
|
||||
is_merged_attr = bitbake.req.project.merged_attr
|
||||
|
||||
git_env = os.environ.copy()
|
||||
# (note: add custom environment settings here)
|
||||
try:
|
||||
# insure that the project init/build uses the selected bitbake, and not Toaster's
|
||||
del git_env['TEMPLATECONF']
|
||||
del git_env['BBBASEDIR']
|
||||
del git_env['BUILDDIR']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# init build environment from the clone
|
||||
if bitbake.req.project.builddir:
|
||||
builddir = bitbake.req.project.builddir
|
||||
else:
|
||||
builddir = '%s-toaster-%d' % (self.be.builddir, bitbake.req.project.id)
|
||||
builddir = '%s-toaster-%d' % (self.be.builddir, bitbake.req.project.id)
|
||||
oe_init = os.path.join(self.pokydirname, 'oe-init-build-env')
|
||||
# init build environment
|
||||
try:
|
||||
custom_script = ToasterSetting.objects.get(name="CUSTOM_BUILD_INIT_SCRIPT").value
|
||||
custom_script = custom_script.replace("%BUILDDIR%" ,builddir)
|
||||
self._shellcmd("bash -c 'source %s'" % (custom_script),env=git_env)
|
||||
self._shellcmd("bash -c 'source %s'" % (custom_script))
|
||||
except ToasterSetting.DoesNotExist:
|
||||
self._shellcmd("bash -c 'source %s %s'" % (oe_init, builddir),
|
||||
self.be.sourcedir,env=git_env)
|
||||
self.be.sourcedir)
|
||||
|
||||
# update bblayers.conf
|
||||
if not is_merged_attr:
|
||||
bblconfpath = os.path.join(builddir, "conf/toaster-bblayers.conf")
|
||||
with open(bblconfpath, 'w') as bblayers:
|
||||
bblayers.write('# line added by toaster build control\n'
|
||||
'BBLAYERS = "%s"' % ' '.join(layers))
|
||||
bblconfpath = os.path.join(builddir, "conf/toaster-bblayers.conf")
|
||||
with open(bblconfpath, 'w') as bblayers:
|
||||
bblayers.write('# line added by toaster build control\n'
|
||||
'BBLAYERS = "%s"' % ' '.join(layers))
|
||||
|
||||
# write configuration file
|
||||
confpath = os.path.join(builddir, 'conf/toaster.conf')
|
||||
with open(confpath, 'w') as conf:
|
||||
for var in variables:
|
||||
conf.write('%s="%s"\n' % (var.name, var.value))
|
||||
conf.write('INHERIT+="toaster buildhistory"')
|
||||
else:
|
||||
# Append the Toaster-specific values directly to the bblayers.conf
|
||||
bblconfpath = os.path.join(builddir, "conf/bblayers.conf")
|
||||
bblconfpath_save = os.path.join(builddir, "conf/bblayers.conf.save")
|
||||
shutil.copyfile(bblconfpath, bblconfpath_save)
|
||||
with open(bblconfpath) as bblayers:
|
||||
content = bblayers.readlines()
|
||||
do_write = True
|
||||
was_toaster = False
|
||||
with open(bblconfpath,'w') as bblayers:
|
||||
for line in content:
|
||||
#line = line.strip('\n')
|
||||
if 'TOASTER_CONFIG_PROLOG' in line:
|
||||
do_write = False
|
||||
was_toaster = True
|
||||
elif 'TOASTER_CONFIG_EPILOG' in line:
|
||||
do_write = True
|
||||
elif do_write:
|
||||
bblayers.write(line)
|
||||
if not was_toaster:
|
||||
bblayers.write('\n')
|
||||
bblayers.write('#=== TOASTER_CONFIG_PROLOG ===\n')
|
||||
bblayers.write('BBLAYERS = "\\\n')
|
||||
for layer in layers:
|
||||
bblayers.write(' %s \\\n' % layer)
|
||||
bblayers.write(' "\n')
|
||||
bblayers.write('#=== TOASTER_CONFIG_EPILOG ===\n')
|
||||
# Append the Toaster-specific values directly to the local.conf
|
||||
bbconfpath = os.path.join(builddir, "conf/local.conf")
|
||||
bbconfpath_save = os.path.join(builddir, "conf/local.conf.save")
|
||||
shutil.copyfile(bbconfpath, bbconfpath_save)
|
||||
with open(bbconfpath) as f:
|
||||
content = f.readlines()
|
||||
do_write = True
|
||||
was_toaster = False
|
||||
with open(bbconfpath,'w') as conf:
|
||||
for line in content:
|
||||
#line = line.strip('\n')
|
||||
if 'TOASTER_CONFIG_PROLOG' in line:
|
||||
do_write = False
|
||||
was_toaster = True
|
||||
elif 'TOASTER_CONFIG_EPILOG' in line:
|
||||
do_write = True
|
||||
elif do_write:
|
||||
conf.write(line)
|
||||
if not was_toaster:
|
||||
conf.write('\n')
|
||||
conf.write('#=== TOASTER_CONFIG_PROLOG ===\n')
|
||||
for var in variables:
|
||||
if (not var.name.startswith("INTERNAL_")) and (not var.name == "BBLAYERS"):
|
||||
conf.write('%s="%s"\n' % (var.name, var.value))
|
||||
conf.write('#=== TOASTER_CONFIG_EPILOG ===\n')
|
||||
# write configuration file
|
||||
confpath = os.path.join(builddir, 'conf/toaster.conf')
|
||||
with open(confpath, 'w') as conf:
|
||||
for var in variables:
|
||||
conf.write('%s="%s"\n' % (var.name, var.value))
|
||||
conf.write('INHERIT+="toaster buildhistory"')
|
||||
|
||||
# If 'target' is just the project preparation target, then we are done
|
||||
for target in targets:
|
||||
if "_PROJECT_PREPARE_" == target.target:
|
||||
logger.debug('localhostbecontroller: Project has been prepared. Done.')
|
||||
# Update the Build Request and release the build environment
|
||||
bitbake.req.state = BuildRequest.REQ_COMPLETED
|
||||
bitbake.req.save()
|
||||
self.be.lock = BuildEnvironment.LOCK_FREE
|
||||
self.be.save()
|
||||
# Close the project build and progress bar
|
||||
bitbake.req.build.outcome = Build.SUCCEEDED
|
||||
bitbake.req.build.save()
|
||||
# Update the project status
|
||||
bitbake.req.project.set_variable(Project.PROJECT_SPECIFIC_STATUS,Project.PROJECT_SPECIFIC_CLONING_SUCCESS)
|
||||
signal_runbuilds()
|
||||
return
|
||||
|
||||
# clean the Toaster to build environment
|
||||
env_clean = 'unset BBPATH;' # clean BBPATH for <= YP-2.4.0
|
||||
|
||||
# run bitbake server from the clone if available
|
||||
# otherwise pick it from the PATH
|
||||
# run bitbake server from the clone
|
||||
bitbake = os.path.join(self.pokydirname, 'bitbake', 'bin', 'bitbake')
|
||||
if not os.path.exists(bitbake):
|
||||
logger.info("Bitbake not available under %s, will try to use it from PATH" %
|
||||
self.pokydirname)
|
||||
for path in os.environ["PATH"].split(os.pathsep):
|
||||
if os.path.exists(os.path.join(path, 'bitbake')):
|
||||
bitbake = os.path.join(path, 'bitbake')
|
||||
break
|
||||
else:
|
||||
logger.error("Looks like Bitbake is not available, please fix your environment")
|
||||
|
||||
toasterlayers = os.path.join(builddir,"conf/toaster-bblayers.conf")
|
||||
if not is_merged_attr:
|
||||
self._shellcmd('%s bash -c \"source %s %s; BITBAKE_UI="knotty" %s --read %s --read %s '
|
||||
'--server-only -B 0.0.0.0:0\"' % (env_clean, oe_init,
|
||||
builddir, bitbake, confpath, toasterlayers), self.be.sourcedir)
|
||||
else:
|
||||
self._shellcmd('%s bash -c \"source %s %s; BITBAKE_UI="knotty" %s '
|
||||
'--server-only -B 0.0.0.0:0\"' % (env_clean, oe_init,
|
||||
builddir, bitbake), self.be.sourcedir)
|
||||
self._shellcmd('bash -c \"source %s %s; BITBAKE_UI="knotty" %s --read %s --read %s '
|
||||
'--server-only -B 0.0.0.0:0\"' % (oe_init,
|
||||
builddir, bitbake, confpath, toasterlayers), self.be.sourcedir)
|
||||
|
||||
# read port number from bitbake.lock
|
||||
self.be.bbport = -1
|
||||
self.be.bbport = ""
|
||||
bblock = os.path.join(builddir, 'bitbake.lock')
|
||||
# allow 10 seconds for bb lock file to appear but also be populated
|
||||
for lock_check in range(10):
|
||||
@@ -483,9 +352,6 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
break
|
||||
logger.debug("localhostbecontroller: waiting for bblock content to appear")
|
||||
time.sleep(1)
|
||||
else:
|
||||
raise BuildSetupException("Cannot find bitbake server lock file '%s'. Aborting." % bblock)
|
||||
|
||||
with open(bblock) as fplock:
|
||||
for line in fplock:
|
||||
if ":" in line:
|
||||
@@ -493,7 +359,7 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
logger.debug("localhostbecontroller: bitbake port %s", self.be.bbport)
|
||||
break
|
||||
|
||||
if -1 == self.be.bbport:
|
||||
if not self.be.bbport:
|
||||
raise BuildSetupException("localhostbecontroller: can't read bitbake port from %s" % bblock)
|
||||
|
||||
self.be.bbaddress = "localhost"
|
||||
@@ -514,18 +380,10 @@ class LocalhostBEController(BuildEnvironmentController):
|
||||
log = os.path.join(builddir, 'toaster_ui.log')
|
||||
local_bitbake = os.path.join(os.path.dirname(os.getenv('BBBASEDIR')),
|
||||
'bitbake')
|
||||
if not is_merged_attr:
|
||||
self._shellcmd(['%s bash -c \"(TOASTER_BRBE="%s" BBSERVER="0.0.0.0:%s" '
|
||||
self._shellcmd(['bash -c \"(TOASTER_BRBE="%s" BBSERVER="0.0.0.0:%s" '
|
||||
'%s %s -u toasterui --read %s --read %s --token="" >>%s 2>&1;'
|
||||
'BITBAKE_UI="knotty" BBSERVER=0.0.0.0:%s %s -m)&\"' \
|
||||
% (env_clean, brbe, self.be.bbport, local_bitbake, bbtargets, confpath, toasterlayers, log,
|
||||
self.be.bbport, bitbake,)],
|
||||
builddir, nowait=True)
|
||||
else:
|
||||
self._shellcmd(['%s bash -c \"(TOASTER_BRBE="%s" BBSERVER="0.0.0.0:%s" '
|
||||
'%s %s -u toasterui --token="" >>%s 2>&1;'
|
||||
'BITBAKE_UI="knotty" BBSERVER=0.0.0.0:%s %s -m)&\"' \
|
||||
% (env_clean, brbe, self.be.bbport, local_bitbake, bbtargets, log,
|
||||
% (brbe, self.be.bbport, local_bitbake, bbtargets, confpath, toasterlayers, log,
|
||||
self.be.bbport, bitbake,)],
|
||||
builddir, nowait=True)
|
||||
|
||||
|
||||
@@ -74,9 +74,8 @@ class Command(BaseCommand):
|
||||
print("Loading default settings")
|
||||
call_command("loaddata", "settings")
|
||||
template_conf = os.environ.get("TEMPLATECONF", "")
|
||||
custom_xml_only = os.environ.get("CUSTOM_XML_ONLY")
|
||||
|
||||
if ToasterSetting.objects.filter(name='CUSTOM_XML_ONLY').count() > 0 or (not custom_xml_only == None):
|
||||
if ToasterSetting.objects.filter(name='CUSTOM_XML_ONLY').count() > 0:
|
||||
# only use the custom settings
|
||||
pass
|
||||
elif "poky" in template_conf:
|
||||
@@ -108,10 +107,7 @@ class Command(BaseCommand):
|
||||
action="ignore",
|
||||
message="^.*No fixture named.*$")
|
||||
print("Importing custom settings if present")
|
||||
try:
|
||||
call_command("loaddata", "custom")
|
||||
except:
|
||||
print("NOTE: optional fixture 'custom' not found")
|
||||
call_command("loaddata", "custom")
|
||||
|
||||
# we run lsupdates after config update
|
||||
print("\nFetching information from the layer index, "
|
||||
|
||||
@@ -49,7 +49,7 @@ class Command(BaseCommand):
|
||||
# we could not find a BEC; postpone the BR
|
||||
br.state = BuildRequest.REQ_QUEUED
|
||||
br.save()
|
||||
logger.debug("runbuilds: No build env (%s)" % e)
|
||||
logger.debug("runbuilds: No build env")
|
||||
return
|
||||
|
||||
logger.info("runbuilds: starting build %s, environment %s" %
|
||||
|
||||
@@ -8,9 +8,9 @@
|
||||
|
||||
<!-- Bitbake versions which correspond to the metadata release -->
|
||||
<object model="orm.bitbakeversion" pk="1">
|
||||
<field type="CharField" name="name">sumo</field>
|
||||
<field type="CharField" name="name">rocko</field>
|
||||
<field type="CharField" name="giturl">git://git.openembedded.org/bitbake</field>
|
||||
<field type="CharField" name="branch">1.38</field>
|
||||
<field type="CharField" name="branch">1.36</field>
|
||||
</object>
|
||||
<object model="orm.bitbakeversion" pk="2">
|
||||
<field type="CharField" name="name">HEAD</field>
|
||||
@@ -22,19 +22,14 @@
|
||||
<field type="CharField" name="giturl">git://git.openembedded.org/bitbake</field>
|
||||
<field type="CharField" name="branch">master</field>
|
||||
</object>
|
||||
<object model="orm.bitbakeversion" pk="4">
|
||||
<field type="CharField" name="name">thud</field>
|
||||
<field type="CharField" name="giturl">git://git.openembedded.org/bitbake</field>
|
||||
<field type="CharField" name="branch">1.40</field>
|
||||
</object>
|
||||
|
||||
<!-- Releases available -->
|
||||
<object model="orm.release" pk="1">
|
||||
<field type="CharField" name="name">sumo</field>
|
||||
<field type="CharField" name="description">Openembedded Sumo</field>
|
||||
<field type="CharField" name="name">rocko</field>
|
||||
<field type="CharField" name="description">Openembedded Rocko</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">1</field>
|
||||
<field type="CharField" name="branch_name">sumo</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href=\"http://cgit.openembedded.org/openembedded-core/log/?h=sumo\">OpenEmbedded Sumo</a> branch.</field>
|
||||
<field type="CharField" name="branch_name">rocko</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href=\"http://cgit.openembedded.org/openembedded-core/log/?h=rocko\">OpenEmbedded Rocko</a> branch.</field>
|
||||
</object>
|
||||
<object model="orm.release" pk="2">
|
||||
<field type="CharField" name="name">local</field>
|
||||
@@ -50,13 +45,6 @@
|
||||
<field type="CharField" name="branch_name">master</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href=\"http://cgit.openembedded.org/openembedded-core/log/\">OpenEmbedded master</a> branch.</field>
|
||||
</object>
|
||||
<object model="orm.release" pk="4">
|
||||
<field type="CharField" name="name">thud</field>
|
||||
<field type="CharField" name="description">Openembedded Thud</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">4</field>
|
||||
<field type="CharField" name="branch_name">thud</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href=\"http://cgit.openembedded.org/openembedded-core/log/?h=thud\">OpenEmbedded Thud</a> branch.</field>
|
||||
</object>
|
||||
|
||||
<!-- Default layers for each release -->
|
||||
<object model="orm.releasedefaultlayer" pk="1">
|
||||
@@ -71,10 +59,6 @@
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">3</field>
|
||||
<field type="CharField" name="layer_name">openembedded-core</field>
|
||||
</object>
|
||||
<object model="orm.releasedefaultlayer" pk="4">
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">4</field>
|
||||
<field type="CharField" name="layer_name">openembedded-core</field>
|
||||
</object>
|
||||
|
||||
|
||||
<!-- Layer for the Local release -->
|
||||
|
||||
@@ -8,9 +8,9 @@
|
||||
|
||||
<!-- Bitbake versions which correspond to the metadata release -->
|
||||
<object model="orm.bitbakeversion" pk="1">
|
||||
<field type="CharField" name="name">sumo</field>
|
||||
<field type="CharField" name="name">rocko</field>
|
||||
<field type="CharField" name="giturl">git://git.yoctoproject.org/poky</field>
|
||||
<field type="CharField" name="branch">sumo</field>
|
||||
<field type="CharField" name="branch">rocko</field>
|
||||
<field type="CharField" name="dirpath">bitbake</field>
|
||||
</object>
|
||||
<object model="orm.bitbakeversion" pk="2">
|
||||
@@ -25,21 +25,15 @@
|
||||
<field type="CharField" name="branch">master</field>
|
||||
<field type="CharField" name="dirpath">bitbake</field>
|
||||
</object>
|
||||
<object model="orm.bitbakeversion" pk="4">
|
||||
<field type="CharField" name="name">thud</field>
|
||||
<field type="CharField" name="giturl">git://git.yoctoproject.org/poky</field>
|
||||
<field type="CharField" name="branch">thud</field>
|
||||
<field type="CharField" name="dirpath">bitbake</field>
|
||||
</object>
|
||||
|
||||
|
||||
<!-- Releases available -->
|
||||
<object model="orm.release" pk="1">
|
||||
<field type="CharField" name="name">sumo</field>
|
||||
<field type="CharField" name="description">Yocto Project 2.5 "Sumo"</field>
|
||||
<field type="CharField" name="name">rocko</field>
|
||||
<field type="CharField" name="description">Yocto Project 2.4 "Rocko"</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">1</field>
|
||||
<field type="CharField" name="branch_name">sumo</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="http://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?h=sumo">Yocto Project Sumo branch</a>.</field>
|
||||
<field type="CharField" name="branch_name">rocko</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="http://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?h=rocko">Yocto Project Rocko branch</a>.</field>
|
||||
</object>
|
||||
<object model="orm.release" pk="2">
|
||||
<field type="CharField" name="name">local</field>
|
||||
@@ -55,13 +49,6 @@
|
||||
<field type="CharField" name="branch_name">master</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="http://git.yoctoproject.org/cgit/cgit.cgi/poky/log/">Yocto Project Master branch</a>.</field>
|
||||
</object>
|
||||
<object model="orm.release" pk="4">
|
||||
<field type="CharField" name="name">thud</field>
|
||||
<field type="CharField" name="description">Yocto Project 2.6 "Thud"</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">4</field>
|
||||
<field type="CharField" name="branch_name">thud</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="http://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?h=thud">Yocto Project Thud branch</a>.</field>
|
||||
</object>
|
||||
|
||||
<!-- Default project layers for each release -->
|
||||
<object model="orm.releasedefaultlayer" pk="1">
|
||||
@@ -100,18 +87,6 @@
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">3</field>
|
||||
<field type="CharField" name="layer_name">meta-yocto-bsp</field>
|
||||
</object>
|
||||
<object model="orm.releasedefaultlayer" pk="10">
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">4</field>
|
||||
<field type="CharField" name="layer_name">openembedded-core</field>
|
||||
</object>
|
||||
<object model="orm.releasedefaultlayer" pk="11">
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">4</field>
|
||||
<field type="CharField" name="layer_name">meta-poky</field>
|
||||
</object>
|
||||
<object model="orm.releasedefaultlayer" pk="12">
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">4</field>
|
||||
<field type="CharField" name="layer_name">meta-yocto-bsp</field>
|
||||
</object>
|
||||
|
||||
<!-- Default layers provided by poky
|
||||
openembedded-core
|
||||
@@ -130,7 +105,7 @@
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">1</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">1</field>
|
||||
<field type="CharField" name="branch">sumo</field>
|
||||
<field type="CharField" name="branch">rocko</field>
|
||||
<field type="CharField" name="dirpath">meta</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="2">
|
||||
@@ -148,13 +123,6 @@
|
||||
<field type="CharField" name="branch">master</field>
|
||||
<field type="CharField" name="dirpath">meta</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="4">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">1</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">4</field>
|
||||
<field type="CharField" name="branch">thud</field>
|
||||
<field type="CharField" name="dirpath">meta</field>
|
||||
</object>
|
||||
|
||||
<object model="orm.layer" pk="2">
|
||||
<field type="CharField" name="name">meta-poky</field>
|
||||
@@ -164,14 +132,14 @@
|
||||
<field type="CharField" name="vcs_web_tree_base_url">http://git.yoctoproject.org/cgit/cgit.cgi/poky/tree/%path%?h=%branch%</field>
|
||||
<field type="CharField" name="vcs_web_file_base_url">http://git.yoctoproject.org/cgit/cgit.cgi/poky/tree/%path%?h=%branch%</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="5">
|
||||
<object model="orm.layer_version" pk="4">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">2</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">1</field>
|
||||
<field type="CharField" name="branch">sumo</field>
|
||||
<field type="CharField" name="branch">rocko</field>
|
||||
<field type="CharField" name="dirpath">meta-poky</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="6">
|
||||
<object model="orm.layer_version" pk="5">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">2</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">2</field>
|
||||
@@ -179,20 +147,13 @@
|
||||
<field type="CharField" name="commit">HEAD</field>
|
||||
<field type="CharField" name="dirpath">meta-poky</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="7">
|
||||
<object model="orm.layer_version" pk="6">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">2</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">3</field>
|
||||
<field type="CharField" name="branch">master</field>
|
||||
<field type="CharField" name="dirpath">meta-poky</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="8">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">2</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">4</field>
|
||||
<field type="CharField" name="branch">thud</field>
|
||||
<field type="CharField" name="dirpath">meta-poky</field>
|
||||
</object>
|
||||
|
||||
<object model="orm.layer" pk="3">
|
||||
<field type="CharField" name="name">meta-yocto-bsp</field>
|
||||
@@ -202,14 +163,14 @@
|
||||
<field type="CharField" name="vcs_web_tree_base_url">http://git.yoctoproject.org/cgit/cgit.cgi/poky/tree/%path%?h=%branch%</field>
|
||||
<field type="CharField" name="vcs_web_file_base_url">http://git.yoctoproject.org/cgit/cgit.cgi/poky/tree/%path%?h=%branch%</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="9">
|
||||
<object model="orm.layer_version" pk="7">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">3</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">1</field>
|
||||
<field type="CharField" name="branch">sumo</field>
|
||||
<field type="CharField" name="branch">rocko</field>
|
||||
<field type="CharField" name="dirpath">meta-yocto-bsp</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="10">
|
||||
<object model="orm.layer_version" pk="8">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">3</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">2</field>
|
||||
@@ -217,18 +178,11 @@
|
||||
<field type="CharField" name="commit">HEAD</field>
|
||||
<field type="CharField" name="dirpath">meta-yocto-bsp</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="11">
|
||||
<object model="orm.layer_version" pk="9">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">3</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">3</field>
|
||||
<field type="CharField" name="branch">master</field>
|
||||
<field type="CharField" name="dirpath">meta-yocto-bsp</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="12">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">3</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">4</field>
|
||||
<field type="CharField" name="branch">thud</field>
|
||||
<field type="CharField" name="dirpath">meta-yocto-bsp</field>
|
||||
</object>
|
||||
</django-objects>
|
||||
|
||||
@@ -29,6 +29,7 @@ from orm.models import ToasterSetting
|
||||
import os
|
||||
import sys
|
||||
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
@@ -36,18 +37,6 @@ logger = logging.getLogger("toaster")
|
||||
|
||||
DEFAULT_LAYERINDEX_SERVER = "http://layers.openembedded.org/layerindex/api/"
|
||||
|
||||
# Add path to bitbake modules for layerindexlib
|
||||
# lib/toaster/orm/management/commands/lsupdates.py (abspath)
|
||||
# lib/toaster/orm/management/commands (dirname)
|
||||
# lib/toaster/orm/management (dirname)
|
||||
# lib/toaster/orm (dirname)
|
||||
# lib/toaster/ (dirname)
|
||||
# lib/ (dirname)
|
||||
path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
|
||||
sys.path.insert(0, path)
|
||||
|
||||
import layerindexlib
|
||||
|
||||
|
||||
class Spinner(threading.Thread):
|
||||
""" A simple progress spinner to indicate download/parsing is happening"""
|
||||
@@ -97,6 +86,45 @@ class Command(BaseCommand):
|
||||
self.apiurl = ToasterSetting.objects.get(name = 'CUSTOM_LAYERINDEX_SERVER').value
|
||||
|
||||
assert self.apiurl is not None
|
||||
try:
|
||||
from urllib.request import urlopen, URLError
|
||||
from urllib.parse import urlparse
|
||||
except ImportError:
|
||||
from urllib2 import urlopen, URLError
|
||||
from urlparse import urlparse
|
||||
|
||||
proxy_settings = os.environ.get("http_proxy", None)
|
||||
|
||||
def _get_json_response(apiurl=None):
|
||||
if None == apiurl:
|
||||
apiurl=self.apiurl
|
||||
http_progress = Spinner()
|
||||
http_progress.start()
|
||||
|
||||
_parsedurl = urlparse(apiurl)
|
||||
path = _parsedurl.path
|
||||
|
||||
# logger.debug("Fetching %s", apiurl)
|
||||
try:
|
||||
res = urlopen(apiurl)
|
||||
except URLError as e:
|
||||
raise Exception("Failed to read %s: %s" % (path, e.reason))
|
||||
|
||||
parsed = json.loads(res.read().decode('utf-8'))
|
||||
|
||||
http_progress.stop()
|
||||
return parsed
|
||||
|
||||
# verify we can get the basic api
|
||||
try:
|
||||
apilinks = _get_json_response()
|
||||
except Exception as e:
|
||||
import traceback
|
||||
if proxy_settings is not None:
|
||||
logger.info("EE: Using proxy %s" % proxy_settings)
|
||||
logger.warning("EE: could not connect to %s, skipping update:"
|
||||
"%s\n%s" % (self.apiurl, e, traceback.format_exc()))
|
||||
return
|
||||
|
||||
# update branches; only those that we already have names listed in the
|
||||
# Releases table
|
||||
@@ -105,118 +133,112 @@ class Command(BaseCommand):
|
||||
if len(whitelist_branch_names) == 0:
|
||||
raise Exception("Failed to make list of branches to fetch")
|
||||
|
||||
logger.info("Fetching metadata for %s",
|
||||
logger.info("Fetching metadata releases for %s",
|
||||
" ".join(whitelist_branch_names))
|
||||
|
||||
# We require a non-empty bb.data, but we can fake it with a dictionary
|
||||
layerindex = layerindexlib.LayerIndex({"DUMMY" : "VALUE"})
|
||||
|
||||
http_progress = Spinner()
|
||||
http_progress.start()
|
||||
|
||||
if whitelist_branch_names:
|
||||
url_branches = ";branch=%s" % ','.join(whitelist_branch_names)
|
||||
else:
|
||||
url_branches = ""
|
||||
layerindex.load_layerindex("%s%s" % (self.apiurl, url_branches))
|
||||
|
||||
http_progress.stop()
|
||||
|
||||
# We know we're only processing one entry, so we reference it here
|
||||
# (this is cheating...)
|
||||
index = layerindex.indexes[0]
|
||||
branches_info = _get_json_response(apilinks['branches'] +
|
||||
"?filter=name:%s"
|
||||
% "OR".join(whitelist_branch_names))
|
||||
|
||||
# Map the layer index branches to toaster releases
|
||||
li_branch_id_to_toaster_release = {}
|
||||
|
||||
logger.info("Processing releases")
|
||||
|
||||
total = len(index.branches)
|
||||
for i, id in enumerate(index.branches):
|
||||
li_branch_id_to_toaster_release[id] = \
|
||||
Release.objects.get(name=index.branches[id].name)
|
||||
total = len(branches_info)
|
||||
for i, branch in enumerate(branches_info):
|
||||
li_branch_id_to_toaster_release[branch['id']] = \
|
||||
Release.objects.get(name=branch['name'])
|
||||
self.mini_progress("Releases", i, total)
|
||||
|
||||
# keep a track of the layerindex (li) id mappings so that
|
||||
# layer_versions can be created for these layers later on
|
||||
li_layer_id_to_toaster_layer_id = {}
|
||||
|
||||
logger.info("Processing layers")
|
||||
logger.info("Fetching layers")
|
||||
|
||||
total = len(index.layerItems)
|
||||
for i, id in enumerate(index.layerItems):
|
||||
layers_info = _get_json_response(apilinks['layerItems'])
|
||||
|
||||
total = len(layers_info)
|
||||
for i, li in enumerate(layers_info):
|
||||
try:
|
||||
l, created = Layer.objects.get_or_create(name=index.layerItems[id].name)
|
||||
l.up_date = index.layerItems[id].updated
|
||||
l.summary = index.layerItems[id].summary
|
||||
l.description = index.layerItems[id].description
|
||||
l, created = Layer.objects.get_or_create(name=li['name'])
|
||||
l.up_date = li['updated']
|
||||
l.summary = li['summary']
|
||||
l.description = li['description']
|
||||
|
||||
if created:
|
||||
# predefined layers in the fixtures (for example poky.xml)
|
||||
# always preempt the Layer Index for these values
|
||||
l.vcs_url = index.layerItems[id].vcs_url
|
||||
l.vcs_web_url = index.layerItems[id].vcs_web_url
|
||||
l.vcs_web_tree_base_url = index.layerItems[id].vcs_web_tree_base_url
|
||||
l.vcs_web_file_base_url = index.layerItems[id].vcs_web_file_base_url
|
||||
l.vcs_url = li['vcs_url']
|
||||
l.vcs_web_url = li['vcs_web_url']
|
||||
l.vcs_web_tree_base_url = li['vcs_web_tree_base_url']
|
||||
l.vcs_web_file_base_url = li['vcs_web_file_base_url']
|
||||
l.save()
|
||||
except Layer.MultipleObjectsReturned:
|
||||
logger.info("Skipped %s as we found multiple layers and "
|
||||
"don't know which to update" %
|
||||
index.layerItems[id].name)
|
||||
li['name'])
|
||||
|
||||
li_layer_id_to_toaster_layer_id[id] = l.pk
|
||||
li_layer_id_to_toaster_layer_id[li['id']] = l.pk
|
||||
|
||||
self.mini_progress("layers", i, total)
|
||||
|
||||
# update layer_versions
|
||||
logger.info("Processing layer versions")
|
||||
logger.info("Fetching layer versions")
|
||||
layerbranches_info = _get_json_response(
|
||||
apilinks['layerBranches'] + "?filter=branch__name:%s" %
|
||||
"OR".join(whitelist_branch_names))
|
||||
|
||||
# Map Layer index layer_branch object id to
|
||||
# layer_version toaster object id
|
||||
li_layer_branch_id_to_toaster_lv_id = {}
|
||||
|
||||
total = len(index.layerBranches)
|
||||
for i, id in enumerate(index.layerBranches):
|
||||
total = len(layerbranches_info)
|
||||
for i, lbi in enumerate(layerbranches_info):
|
||||
# release as defined by toaster map to layerindex branch
|
||||
release = li_branch_id_to_toaster_release[index.layerBranches[id].branch_id]
|
||||
release = li_branch_id_to_toaster_release[lbi['branch']]
|
||||
|
||||
try:
|
||||
lv, created = Layer_Version.objects.get_or_create(
|
||||
layer=Layer.objects.get(
|
||||
pk=li_layer_id_to_toaster_layer_id[index.layerBranches[id].layer_id]),
|
||||
pk=li_layer_id_to_toaster_layer_id[lbi['layer']]),
|
||||
release=release
|
||||
)
|
||||
except KeyError:
|
||||
logger.warning(
|
||||
"No such layerindex layer referenced by layerbranch %d" %
|
||||
index.layerBranches[id].layer_id)
|
||||
lbi['layer'])
|
||||
continue
|
||||
|
||||
if created:
|
||||
lv.release = li_branch_id_to_toaster_release[index.layerBranches[id].branch_id]
|
||||
lv.up_date = index.layerBranches[id].updated
|
||||
lv.commit = index.layerBranches[id].actual_branch
|
||||
lv.dirpath = index.layerBranches[id].vcs_subdir
|
||||
lv.release = li_branch_id_to_toaster_release[lbi['branch']]
|
||||
lv.up_date = lbi['updated']
|
||||
lv.commit = lbi['actual_branch']
|
||||
lv.dirpath = lbi['vcs_subdir']
|
||||
lv.save()
|
||||
|
||||
li_layer_branch_id_to_toaster_lv_id[index.layerBranches[id].id] =\
|
||||
li_layer_branch_id_to_toaster_lv_id[lbi['id']] =\
|
||||
lv.pk
|
||||
self.mini_progress("layer versions", i, total)
|
||||
|
||||
logger.info("Processing layer version dependencies")
|
||||
logger.info("Fetching layer version dependencies")
|
||||
# update layer dependencies
|
||||
layerdependencies_info = _get_json_response(
|
||||
apilinks['layerDependencies'] +
|
||||
"?filter=layerbranch__branch__name:%s" %
|
||||
"OR".join(whitelist_branch_names))
|
||||
|
||||
dependlist = {}
|
||||
for id in index.layerDependencies:
|
||||
for ldi in layerdependencies_info:
|
||||
try:
|
||||
lv = Layer_Version.objects.get(
|
||||
pk=li_layer_branch_id_to_toaster_lv_id[index.layerDependencies[id].layerbranch_id])
|
||||
pk=li_layer_branch_id_to_toaster_lv_id[ldi['layerbranch']])
|
||||
except Layer_Version.DoesNotExist as e:
|
||||
continue
|
||||
|
||||
if lv not in dependlist:
|
||||
dependlist[lv] = []
|
||||
try:
|
||||
layer_id = li_layer_id_to_toaster_layer_id[index.layerDependencies[id].dependency_id]
|
||||
layer_id = li_layer_id_to_toaster_layer_id[ldi['dependency']]
|
||||
|
||||
dependlist[lv].append(
|
||||
Layer_Version.objects.get(layer__pk=layer_id,
|
||||
@@ -225,7 +247,7 @@ class Command(BaseCommand):
|
||||
except Layer_Version.DoesNotExist:
|
||||
logger.warning("Cannot find layer version (ls:%s),"
|
||||
"up_id:%s lv:%s" %
|
||||
(self, index.layerDependencies[id].dependency_id, lv))
|
||||
(self, ldi['dependency'], lv))
|
||||
|
||||
total = len(dependlist)
|
||||
for i, lv in enumerate(dependlist):
|
||||
@@ -236,61 +258,73 @@ class Command(BaseCommand):
|
||||
self.mini_progress("Layer version dependencies", i, total)
|
||||
|
||||
# update Distros
|
||||
logger.info("Processing distro information")
|
||||
logger.info("Fetching distro information")
|
||||
distros_info = _get_json_response(
|
||||
apilinks['distros'] + "?filter=layerbranch__branch__name:%s" %
|
||||
"OR".join(whitelist_branch_names))
|
||||
|
||||
total = len(index.distros)
|
||||
for i, id in enumerate(index.distros):
|
||||
total = len(distros_info)
|
||||
for i, di in enumerate(distros_info):
|
||||
distro, created = Distro.objects.get_or_create(
|
||||
name=index.distros[id].name,
|
||||
name=di['name'],
|
||||
layer_version=Layer_Version.objects.get(
|
||||
pk=li_layer_branch_id_to_toaster_lv_id[index.distros[id].layerbranch_id]))
|
||||
distro.up_date = index.distros[id].updated
|
||||
distro.name = index.distros[id].name
|
||||
distro.description = index.distros[id].description
|
||||
pk=li_layer_branch_id_to_toaster_lv_id[di['layerbranch']]))
|
||||
distro.up_date = di['updated']
|
||||
distro.name = di['name']
|
||||
distro.description = di['description']
|
||||
distro.save()
|
||||
self.mini_progress("distros", i, total)
|
||||
|
||||
# update machines
|
||||
logger.info("Processing machine information")
|
||||
logger.info("Fetching machine information")
|
||||
machines_info = _get_json_response(
|
||||
apilinks['machines'] + "?filter=layerbranch__branch__name:%s" %
|
||||
"OR".join(whitelist_branch_names))
|
||||
|
||||
total = len(index.machines)
|
||||
for i, id in enumerate(index.machines):
|
||||
total = len(machines_info)
|
||||
for i, mi in enumerate(machines_info):
|
||||
mo, created = Machine.objects.get_or_create(
|
||||
name=index.machines[id].name,
|
||||
name=mi['name'],
|
||||
layer_version=Layer_Version.objects.get(
|
||||
pk=li_layer_branch_id_to_toaster_lv_id[index.machines[id].layerbranch_id]))
|
||||
mo.up_date = index.machines[id].updated
|
||||
mo.name = index.machines[id].name
|
||||
mo.description = index.machines[id].description
|
||||
pk=li_layer_branch_id_to_toaster_lv_id[mi['layerbranch']]))
|
||||
mo.up_date = mi['updated']
|
||||
mo.name = mi['name']
|
||||
mo.description = mi['description']
|
||||
mo.save()
|
||||
self.mini_progress("machines", i, total)
|
||||
|
||||
# update recipes; paginate by layer version / layer branch
|
||||
logger.info("Processing recipe information")
|
||||
logger.info("Fetching recipe information")
|
||||
recipes_info = _get_json_response(
|
||||
apilinks['recipes'] + "?filter=layerbranch__branch__name:%s" %
|
||||
"OR".join(whitelist_branch_names))
|
||||
|
||||
total = len(index.recipes)
|
||||
for i, id in enumerate(index.recipes):
|
||||
total = len(recipes_info)
|
||||
for i, ri in enumerate(recipes_info):
|
||||
try:
|
||||
lv_id = li_layer_branch_id_to_toaster_lv_id[index.recipes[id].layerbranch_id]
|
||||
lv_id = li_layer_branch_id_to_toaster_lv_id[ri['layerbranch']]
|
||||
lv = Layer_Version.objects.get(pk=lv_id)
|
||||
|
||||
ro, created = Recipe.objects.get_or_create(
|
||||
layer_version=lv,
|
||||
name=index.recipes[id].pn
|
||||
name=ri['pn']
|
||||
)
|
||||
|
||||
ro.layer_version = lv
|
||||
ro.up_date = index.recipes[id].updated
|
||||
ro.name = index.recipes[id].pn
|
||||
ro.version = index.recipes[id].pv
|
||||
ro.summary = index.recipes[id].summary
|
||||
ro.description = index.recipes[id].description
|
||||
ro.section = index.recipes[id].section
|
||||
ro.license = index.recipes[id].license
|
||||
ro.homepage = index.recipes[id].homepage
|
||||
ro.bugtracker = index.recipes[id].bugtracker
|
||||
ro.file_path = index.recipes[id].fullpath
|
||||
ro.is_image = 'image' in index.recipes[id].inherits.split()
|
||||
ro.up_date = ri['updated']
|
||||
ro.name = ri['pn']
|
||||
ro.version = ri['pv']
|
||||
ro.summary = ri['summary']
|
||||
ro.description = ri['description']
|
||||
ro.section = ri['section']
|
||||
ro.license = ri['license']
|
||||
ro.homepage = ri['homepage']
|
||||
ro.bugtracker = ri['bugtracker']
|
||||
ro.file_path = ri['filepath'] + "/" + ri['filename']
|
||||
if 'inherits' in ri:
|
||||
ro.is_image = 'image' in ri['inherits'].split()
|
||||
else: # workaround for old style layer index
|
||||
ro.is_image = "-image-" in ri['pn']
|
||||
ro.save()
|
||||
except Exception as e:
|
||||
logger.warning("Failed saving recipe %s", e)
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from __future__ import unicode_literals
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('orm', '0017_distro_clone'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='Project',
|
||||
name='builddir',
|
||||
field=models.TextField(),
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='Project',
|
||||
name='merged_attr',
|
||||
field=models.BooleanField(default=False)
|
||||
),
|
||||
migrations.AddField(
|
||||
model_name='Build',
|
||||
name='progress_item',
|
||||
field=models.CharField(max_length=40)
|
||||
),
|
||||
]
|
||||
@@ -121,15 +121,8 @@ class ToasterSetting(models.Model):
|
||||
|
||||
|
||||
class ProjectManager(models.Manager):
|
||||
def create_project(self, name, release, existing_project=None):
|
||||
if existing_project and (release is not None):
|
||||
prj = existing_project
|
||||
prj.bitbake_version = release.bitbake_version
|
||||
prj.release = release
|
||||
# Delete the previous ProjectLayer mappings
|
||||
for pl in ProjectLayer.objects.filter(project=prj):
|
||||
pl.delete()
|
||||
elif release is not None:
|
||||
def create_project(self, name, release):
|
||||
if release is not None:
|
||||
prj = self.model(name=name,
|
||||
bitbake_version=release.bitbake_version,
|
||||
release=release)
|
||||
@@ -137,14 +130,15 @@ class ProjectManager(models.Manager):
|
||||
prj = self.model(name=name,
|
||||
bitbake_version=None,
|
||||
release=None)
|
||||
|
||||
prj.save()
|
||||
|
||||
for defaultconf in ToasterSetting.objects.filter(
|
||||
name__startswith="DEFCONF_"):
|
||||
name = defaultconf.name[8:]
|
||||
pv,create = ProjectVariable.objects.get_or_create(project=prj,name=name)
|
||||
pv.value = defaultconf.value
|
||||
pv.save()
|
||||
ProjectVariable.objects.create(project=prj,
|
||||
name=name,
|
||||
value=defaultconf.value)
|
||||
|
||||
if release is None:
|
||||
return prj
|
||||
@@ -203,11 +197,6 @@ class Project(models.Model):
|
||||
user_id = models.IntegerField(null=True)
|
||||
objects = ProjectManager()
|
||||
|
||||
# build directory override (e.g. imported)
|
||||
builddir = models.TextField()
|
||||
# merge the Toaster configure attributes directly into the standard conf files
|
||||
merged_attr = models.BooleanField(default=False)
|
||||
|
||||
# set to True for the project which is the default container
|
||||
# for builds initiated by the command line etc.
|
||||
is_default= models.BooleanField(default=False)
|
||||
@@ -316,15 +305,6 @@ class Project(models.Model):
|
||||
return layer_versions
|
||||
|
||||
|
||||
def get_default_image_recipe(self):
|
||||
try:
|
||||
return self.projectvariable_set.get(name="DEFAULT_IMAGE").value
|
||||
except (ProjectVariable.DoesNotExist,IndexError):
|
||||
return None;
|
||||
|
||||
def get_is_new(self):
|
||||
return self.get_variable(Project.PROJECT_SPECIFIC_ISNEW)
|
||||
|
||||
def get_available_machines(self):
|
||||
""" Returns QuerySet of all Machines which are provided by the
|
||||
Layers currently added to the Project """
|
||||
@@ -373,32 +353,6 @@ class Project(models.Model):
|
||||
|
||||
return queryset
|
||||
|
||||
# Project Specific status management
|
||||
PROJECT_SPECIFIC_STATUS = 'INTERNAL_PROJECT_SPECIFIC_STATUS'
|
||||
PROJECT_SPECIFIC_CALLBACK = 'INTERNAL_PROJECT_SPECIFIC_CALLBACK'
|
||||
PROJECT_SPECIFIC_ISNEW = 'INTERNAL_PROJECT_SPECIFIC_ISNEW'
|
||||
PROJECT_SPECIFIC_DEFAULTIMAGE = 'PROJECT_SPECIFIC_DEFAULTIMAGE'
|
||||
PROJECT_SPECIFIC_NONE = ''
|
||||
PROJECT_SPECIFIC_NEW = '1'
|
||||
PROJECT_SPECIFIC_EDIT = '2'
|
||||
PROJECT_SPECIFIC_CLONING = '3'
|
||||
PROJECT_SPECIFIC_CLONING_SUCCESS = '4'
|
||||
PROJECT_SPECIFIC_CLONING_FAIL = '5'
|
||||
|
||||
def get_variable(self,variable,default_value = ''):
|
||||
try:
|
||||
return self.projectvariable_set.get(name=variable).value
|
||||
except (ProjectVariable.DoesNotExist,IndexError):
|
||||
return default_value
|
||||
|
||||
def set_variable(self,variable,value):
|
||||
pv,create = ProjectVariable.objects.get_or_create(project = self, name = variable)
|
||||
pv.value = value
|
||||
pv.save()
|
||||
|
||||
def get_default_image(self):
|
||||
return self.get_variable(Project.PROJECT_SPECIFIC_DEFAULTIMAGE)
|
||||
|
||||
def schedule_build(self):
|
||||
|
||||
from bldcontrol.models import BuildRequest, BRTarget, BRLayer
|
||||
@@ -505,9 +459,6 @@ class Build(models.Model):
|
||||
# number of repos cloned so far for this build (default off)
|
||||
repos_cloned = models.IntegerField(default=1)
|
||||
|
||||
# Hint on current progress item
|
||||
progress_item = models.CharField(max_length=40)
|
||||
|
||||
@staticmethod
|
||||
def get_recent(project=None):
|
||||
"""
|
||||
@@ -1712,9 +1663,6 @@ class CustomImageRecipe(Recipe):
|
||||
|
||||
path_schema_two = self.base_recipe.file_path
|
||||
|
||||
path_schema_three = "%s/%s" % (self.base_recipe.layer_version.layer.local_source_dir,
|
||||
self.base_recipe.file_path)
|
||||
|
||||
if os.path.exists(path_schema_one):
|
||||
return path_schema_one
|
||||
|
||||
@@ -1722,10 +1670,6 @@ class CustomImageRecipe(Recipe):
|
||||
if os.path.exists(path_schema_two):
|
||||
return path_schema_two
|
||||
|
||||
# Or a local path if all layers are local
|
||||
if os.path.exists(path_schema_three):
|
||||
return path_schema_three
|
||||
|
||||
return None
|
||||
|
||||
def generate_recipe_file_contents(self):
|
||||
@@ -1750,8 +1694,8 @@ class CustomImageRecipe(Recipe):
|
||||
if base_recipe_path:
|
||||
base_recipe = open(base_recipe_path, 'r').read()
|
||||
else:
|
||||
# Pass back None to trigger error message to user
|
||||
return None
|
||||
raise IOError("Based on recipe file not found: %s" %
|
||||
base_recipe_path)
|
||||
|
||||
# Add a special case for when the recipe we have based a custom image
|
||||
# recipe on requires another recipe.
|
||||
@@ -1877,7 +1821,7 @@ class Distro(models.Model):
|
||||
description = models.CharField(max_length=255)
|
||||
|
||||
def get_vcs_distro_file_link_url(self):
|
||||
path = 'conf/distro/%s.conf' % self.name
|
||||
path = self.name+'.conf'
|
||||
return self.layer_version.get_vcs_file_link_url(path)
|
||||
|
||||
def __unicode__(self):
|
||||
|
||||
@@ -22,9 +22,7 @@ import os
|
||||
import re
|
||||
import logging
|
||||
import json
|
||||
import subprocess
|
||||
from collections import Counter
|
||||
from shutil import copyfile
|
||||
|
||||
from orm.models import Project, ProjectTarget, Build, Layer_Version
|
||||
from orm.models import LayerVersionDependency, LayerSource, ProjectLayer
|
||||
@@ -40,18 +38,6 @@ from django.core.urlresolvers import reverse
|
||||
from django.db.models import Q, F
|
||||
from django.db import Error
|
||||
from toastergui.templatetags.projecttags import filtered_filesizeformat
|
||||
from django.utils import timezone
|
||||
import pytz
|
||||
|
||||
# development/debugging support
|
||||
verbose = 2
|
||||
def _log(msg):
|
||||
if 1 == verbose:
|
||||
print(msg)
|
||||
elif 2 == verbose:
|
||||
f1=open('/tmp/toaster.log', 'a')
|
||||
f1.write("|" + msg + "|\n" )
|
||||
f1.close()
|
||||
|
||||
logger = logging.getLogger("toaster")
|
||||
|
||||
@@ -151,130 +137,6 @@ class XhrBuildRequest(View):
|
||||
return response
|
||||
|
||||
|
||||
class XhrProjectUpdate(View):
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
return HttpResponse()
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
"""
|
||||
Project Update
|
||||
|
||||
Entry point: /xhr_projectupdate/<project_id>
|
||||
Method: POST
|
||||
|
||||
Args:
|
||||
pid: pid of project to update
|
||||
|
||||
Returns:
|
||||
{"error": "ok"}
|
||||
or
|
||||
{"error": <error message>}
|
||||
"""
|
||||
|
||||
project = Project.objects.get(pk=kwargs['pid'])
|
||||
logger.debug("ProjectUpdateCallback:project.pk=%d,project.builddir=%s" % (project.pk,project.builddir))
|
||||
|
||||
if 'do_update' in request.POST:
|
||||
|
||||
# Extract any default image recipe
|
||||
if 'default_image' in request.POST:
|
||||
project.set_variable(Project.PROJECT_SPECIFIC_DEFAULTIMAGE,str(request.POST['default_image']))
|
||||
else:
|
||||
project.set_variable(Project.PROJECT_SPECIFIC_DEFAULTIMAGE,'')
|
||||
|
||||
logger.debug("ProjectUpdateCallback:Chain to the build request")
|
||||
|
||||
# Chain to the build request
|
||||
xhrBuildRequest = XhrBuildRequest()
|
||||
return xhrBuildRequest.post(request, *args, **kwargs)
|
||||
|
||||
logger.warning("ERROR:XhrProjectUpdate")
|
||||
response = HttpResponse()
|
||||
response.status_code = 500
|
||||
return response
|
||||
|
||||
class XhrSetDefaultImageUrl(View):
|
||||
|
||||
def get(self, request, *args, **kwargs):
|
||||
return HttpResponse()
|
||||
|
||||
def post(self, request, *args, **kwargs):
|
||||
"""
|
||||
Project Update
|
||||
|
||||
Entry point: /xhr_setdefaultimage/<project_id>
|
||||
Method: POST
|
||||
|
||||
Args:
|
||||
pid: pid of project to update default image
|
||||
|
||||
Returns:
|
||||
{"error": "ok"}
|
||||
or
|
||||
{"error": <error message>}
|
||||
"""
|
||||
|
||||
project = Project.objects.get(pk=kwargs['pid'])
|
||||
logger.debug("XhrSetDefaultImageUrl:project.pk=%d" % (project.pk))
|
||||
|
||||
# set any default image recipe
|
||||
if 'targets' in request.POST:
|
||||
default_target = str(request.POST['targets'])
|
||||
project.set_variable(Project.PROJECT_SPECIFIC_DEFAULTIMAGE,default_target)
|
||||
logger.debug("XhrSetDefaultImageUrl,project.pk=%d,project.builddir=%s" % (project.pk,project.builddir))
|
||||
return error_response('ok')
|
||||
|
||||
logger.warning("ERROR:XhrSetDefaultImageUrl")
|
||||
response = HttpResponse()
|
||||
response.status_code = 500
|
||||
return response
|
||||
|
||||
|
||||
#
|
||||
# Layer Management
|
||||
#
|
||||
# Rules for 'local_source_dir' layers
|
||||
# * Layers must have a unique name in the Layers table
|
||||
# * A 'local_source_dir' layer is supposed to be shared
|
||||
# by all projects that use it, so that it can have the
|
||||
# same logical name
|
||||
# * Each project that uses a layer will have its own
|
||||
# LayerVersion and Project Layer for it
|
||||
# * During the Paroject delete process, when the last
|
||||
# LayerVersion for a 'local_source_dir' layer is deleted
|
||||
# then the Layer record is deleted to remove orphans
|
||||
#
|
||||
|
||||
def scan_layer_content(layer,layer_version):
|
||||
# if this is a local layer directory, we can immediately scan its content
|
||||
if layer.local_source_dir:
|
||||
try:
|
||||
# recipes-*/*/*.bb
|
||||
cmd = '%s %s' % ('ls', os.path.join(layer.local_source_dir,'recipes-*/*/*.bb'))
|
||||
recipes_list = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read()
|
||||
recipes_list = recipes_list.decode("utf-8").strip()
|
||||
if recipes_list and 'No such' not in recipes_list:
|
||||
for recipe in recipes_list.split('\n'):
|
||||
recipe_path = recipe[recipe.rfind('recipes-'):]
|
||||
recipe_name = recipe[recipe.rfind('/')+1:].replace('.bb','')
|
||||
recipe_ver = recipe_name.rfind('_')
|
||||
if recipe_ver > 0:
|
||||
recipe_name = recipe_name[0:recipe_ver]
|
||||
if recipe_name:
|
||||
ro, created = Recipe.objects.get_or_create(
|
||||
layer_version=layer_version,
|
||||
name=recipe_name
|
||||
)
|
||||
if created:
|
||||
ro.file_path = recipe_path
|
||||
ro.summary = 'Recipe %s from layer %s' % (recipe_name,layer.name)
|
||||
ro.description = ro.summary
|
||||
ro.save()
|
||||
|
||||
except Exception as e:
|
||||
logger.warning("ERROR:scan_layer_content: %s" % e)
|
||||
|
||||
class XhrLayer(View):
|
||||
""" Delete, Get, Add and Update Layer information
|
||||
|
||||
@@ -403,7 +265,6 @@ class XhrLayer(View):
|
||||
(csv)]
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
project = Project.objects.get(pk=kwargs['pid'])
|
||||
|
||||
@@ -424,13 +285,7 @@ class XhrLayer(View):
|
||||
if layer_data['name'] in existing_layers:
|
||||
return JsonResponse({"error": "layer-name-exists"})
|
||||
|
||||
if ('local_source_dir' in layer_data):
|
||||
# Local layer can be shared across projects. They have no 'release'
|
||||
# and are not included in get_all_compatible_layer_versions() above
|
||||
layer,created = Layer.objects.get_or_create(name=layer_data['name'])
|
||||
_log("Local Layer created=%s" % created)
|
||||
else:
|
||||
layer = Layer.objects.create(name=layer_data['name'])
|
||||
layer = Layer.objects.create(name=layer_data['name'])
|
||||
|
||||
layer_version = Layer_Version.objects.create(
|
||||
layer=layer,
|
||||
@@ -438,7 +293,7 @@ class XhrLayer(View):
|
||||
layer_source=LayerSource.TYPE_IMPORTED)
|
||||
|
||||
# Local layer
|
||||
if ('local_source_dir' in layer_data): ### and layer.local_source_dir:
|
||||
if ('local_source_dir' in layer_data) and layer.local_source_dir:
|
||||
layer.local_source_dir = layer_data['local_source_dir']
|
||||
# git layer
|
||||
elif 'vcs_url' in layer_data:
|
||||
@@ -470,9 +325,6 @@ class XhrLayer(View):
|
||||
'layerdetailurl':
|
||||
layer_dep.get_detailspage_url(project.pk)})
|
||||
|
||||
# Scan the layer's content and update components
|
||||
scan_layer_content(layer,layer_version)
|
||||
|
||||
except Layer_Version.DoesNotExist:
|
||||
return error_response("layer-dep-not-found")
|
||||
except Project.DoesNotExist:
|
||||
@@ -677,13 +529,7 @@ class XhrCustomRecipe(View):
|
||||
recipe_path = os.path.join(layerpath, "recipes", "%s.bb" %
|
||||
recipe.name)
|
||||
with open(recipe_path, "w") as recipef:
|
||||
content = recipe.generate_recipe_file_contents()
|
||||
if not content:
|
||||
# Delete this incomplete image recipe object
|
||||
recipe.delete()
|
||||
return error_response("recipe-parent-not-exist")
|
||||
else:
|
||||
recipef.write(recipe.generate_recipe_file_contents())
|
||||
recipef.write(recipe.generate_recipe_file_contents())
|
||||
|
||||
return JsonResponse(
|
||||
{"error": "ok",
|
||||
@@ -1168,24 +1014,8 @@ class XhrProject(View):
|
||||
state=BuildRequest.REQ_INPROGRESS):
|
||||
XhrBuildRequest.cancel_build(br)
|
||||
|
||||
# gather potential orphaned local layers attached to this project
|
||||
project_local_layer_list = []
|
||||
for pl in ProjectLayer.objects.filter(project=project):
|
||||
if pl.layercommit.layer_source == LayerSource.TYPE_IMPORTED:
|
||||
project_local_layer_list.append(pl.layercommit.layer)
|
||||
|
||||
# deep delete the project and its dependencies
|
||||
project.delete()
|
||||
|
||||
# delete any local layers now orphaned
|
||||
_log("LAYER_ORPHAN_CHECK:Check for orphaned layers")
|
||||
for layer in project_local_layer_list:
|
||||
layer_refs = Layer_Version.objects.filter(layer=layer)
|
||||
_log("LAYER_ORPHAN_CHECK:Ref Count for '%s' = %d" % (layer.name,len(layer_refs)))
|
||||
if 0 == len(layer_refs):
|
||||
_log("LAYER_ORPHAN_CHECK:DELETE orpahned '%s'" % (layer.name))
|
||||
Layer.objects.filter(pk=layer.id).delete()
|
||||
|
||||
except Project.DoesNotExist:
|
||||
return error_response("Project %s does not exist" %
|
||||
kwargs['project_id'])
|
||||
|
||||
@@ -67,18 +67,6 @@ function layerBtnsInit() {
|
||||
});
|
||||
});
|
||||
|
||||
$("td .set-default-recipe-btn").unbind('click');
|
||||
$("td .set-default-recipe-btn").click(function(e){
|
||||
e.preventDefault();
|
||||
var recipe = $(this).data('recipe-name');
|
||||
|
||||
libtoaster.setDefaultImage(null, recipe,
|
||||
function(){
|
||||
/* Success */
|
||||
window.location.replace(libtoaster.ctx.projectSpecificPageUrl);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
$(".customise-btn").unbind('click');
|
||||
$(".customise-btn").click(function(e){
|
||||
|
||||
@@ -359,8 +359,7 @@ function layerDetailsPageInit (ctx) {
|
||||
if ($(this).is("dt")) {
|
||||
var dd = $(this).next("dd");
|
||||
if (!dd.children("form:visible")|| !dd.find(".current-value").html()){
|
||||
if (ctx.layerVersion.layer_source == ctx.layerSourceTypes.TYPE_IMPORTED ||
|
||||
ctx.layerVersion.layer_source == ctx.layerSourceTypes.TYPE_LOCAL) {
|
||||
if (ctx.layerVersion.layer_source == ctx.layerSourceTypes.TYPE_IMPORTED){
|
||||
/* There's no current value and the layer is editable
|
||||
* so show the "Not set" and hide the delete icon
|
||||
*/
|
||||
|
||||
@@ -275,8 +275,7 @@ var libtoaster = (function () {
|
||||
|
||||
function _addRmLayer(layerObj, add, doneCb){
|
||||
if (layerObj.xhrLayerUrl === undefined){
|
||||
alert("ERROR: missing xhrLayerUrl object. Please file a bug.");
|
||||
return;
|
||||
throw("xhrLayerUrl is undefined")
|
||||
}
|
||||
|
||||
if (add === true) {
|
||||
@@ -466,108 +465,6 @@ var libtoaster = (function () {
|
||||
$.cookie('toaster-notification', JSON.stringify(data), { path: '/'});
|
||||
}
|
||||
|
||||
/* _updateProject:
|
||||
* url: xhrProjectUpdateUrl or null for current project
|
||||
* onsuccess: callback for successful execution
|
||||
* onfail: callback for failed execution
|
||||
*/
|
||||
function _updateProject (url, targets, default_image, onsuccess, onfail) {
|
||||
|
||||
if (!url)
|
||||
url = libtoaster.ctx.xhrProjectUpdateUrl;
|
||||
|
||||
/* Flatten the array of targets into a space spearated list */
|
||||
if (targets instanceof Array){
|
||||
targets = targets.reduce(function(prevV, nextV){
|
||||
return prev + ' ' + next;
|
||||
});
|
||||
}
|
||||
|
||||
$.ajax( {
|
||||
type: "POST",
|
||||
url: url,
|
||||
data: { 'do_update' : 'True' , 'targets' : targets , 'default_image' : default_image , },
|
||||
headers: { 'X-CSRFToken' : $.cookie('csrftoken')},
|
||||
success: function (_data) {
|
||||
if (_data.error !== "ok") {
|
||||
console.warn(_data.error);
|
||||
} else {
|
||||
if (onsuccess !== undefined) onsuccess(_data);
|
||||
}
|
||||
},
|
||||
error: function (_data) {
|
||||
console.warn("Call failed");
|
||||
console.warn(_data);
|
||||
if (onfail) onfail(data);
|
||||
} });
|
||||
}
|
||||
|
||||
/* _cancelProject:
|
||||
* url: xhrProjectUpdateUrl or null for current project
|
||||
* onsuccess: callback for successful execution
|
||||
* onfail: callback for failed execution
|
||||
*/
|
||||
function _cancelProject (url, onsuccess, onfail) {
|
||||
|
||||
if (!url)
|
||||
url = libtoaster.ctx.xhrProjectCancelUrl;
|
||||
|
||||
$.ajax( {
|
||||
type: "POST",
|
||||
url: url,
|
||||
data: { 'do_cancel' : 'True' },
|
||||
headers: { 'X-CSRFToken' : $.cookie('csrftoken')},
|
||||
success: function (_data) {
|
||||
if (_data.error !== "ok") {
|
||||
console.warn(_data.error);
|
||||
} else {
|
||||
if (onsuccess !== undefined) onsuccess(_data);
|
||||
}
|
||||
},
|
||||
error: function (_data) {
|
||||
console.warn("Call failed");
|
||||
console.warn(_data);
|
||||
if (onfail) onfail(data);
|
||||
} });
|
||||
}
|
||||
|
||||
/* _setDefaultImage:
|
||||
* url: xhrSetDefaultImageUrl or null for current project
|
||||
* targets: an array or space separated list of targets to set as default
|
||||
* onsuccess: callback for successful execution
|
||||
* onfail: callback for failed execution
|
||||
*/
|
||||
function _setDefaultImage (url, targets, onsuccess, onfail) {
|
||||
|
||||
if (!url)
|
||||
url = libtoaster.ctx.xhrSetDefaultImageUrl;
|
||||
|
||||
/* Flatten the array of targets into a space spearated list */
|
||||
if (targets instanceof Array){
|
||||
targets = targets.reduce(function(prevV, nextV){
|
||||
return prev + ' ' + next;
|
||||
});
|
||||
}
|
||||
|
||||
$.ajax( {
|
||||
type: "POST",
|
||||
url: url,
|
||||
data: { 'targets' : targets },
|
||||
headers: { 'X-CSRFToken' : $.cookie('csrftoken')},
|
||||
success: function (_data) {
|
||||
if (_data.error !== "ok") {
|
||||
console.warn(_data.error);
|
||||
} else {
|
||||
if (onsuccess !== undefined) onsuccess(_data);
|
||||
}
|
||||
},
|
||||
error: function (_data) {
|
||||
console.warn("Call failed");
|
||||
console.warn(_data);
|
||||
if (onfail) onfail(data);
|
||||
} });
|
||||
}
|
||||
|
||||
return {
|
||||
enableAjaxLoadingTimer: _enableAjaxLoadingTimer,
|
||||
disableAjaxLoadingTimer: _disableAjaxLoadingTimer,
|
||||
@@ -588,9 +485,6 @@ var libtoaster = (function () {
|
||||
createCustomRecipe: _createCustomRecipe,
|
||||
makeProjectNameValidation: _makeProjectNameValidation,
|
||||
setNotification: _setNotification,
|
||||
updateProject : _updateProject,
|
||||
cancelProject : _cancelProject,
|
||||
setDefaultImage : _setDefaultImage,
|
||||
};
|
||||
})();
|
||||
|
||||
|
||||
@@ -86,7 +86,7 @@ function mrbSectionInit(ctx){
|
||||
if (buildFinished(build)) {
|
||||
// a build finished: reload the whole page so that the build
|
||||
// shows up in the builds table
|
||||
window.location.reload(true);
|
||||
window.location.reload();
|
||||
}
|
||||
else if (stateChanged(build)) {
|
||||
// update the whole template
|
||||
@@ -110,8 +110,6 @@ function mrbSectionInit(ctx){
|
||||
// update the clone progress text
|
||||
selector = '#repos-cloned-percentage-' + build.id;
|
||||
$(selector).html(build.repos_cloned_percentage);
|
||||
selector = '#repos-cloned-progressitem-' + build.id;
|
||||
$(selector).html('('+build.progress_item+')');
|
||||
|
||||
// update the recipe progress bar
|
||||
selector = '#repos-cloned-percentage-bar-' + build.id;
|
||||
|
||||
@@ -25,8 +25,6 @@ function newCustomImageModalInit(){
|
||||
var duplicateNameMsg = "An image with this name already exists. Image names must be unique.";
|
||||
var duplicateImageInProjectMsg = "An image with this name already exists in this project."
|
||||
var invalidBaseRecipeIdMsg = "Please select an image to customise.";
|
||||
var missingParentRecipe = "The parent recipe file was not found. Cancel this action, build any target (like 'quilt-native') to force all new layers to clone, and try again";
|
||||
var unknownError = "Unexpected error: ";
|
||||
|
||||
// set button to "submit" state and enable text entry so user can
|
||||
// enter the custom recipe name
|
||||
@@ -64,7 +62,6 @@ function newCustomImageModalInit(){
|
||||
if (nameInput.val().length > 0) {
|
||||
libtoaster.createCustomRecipe(nameInput.val(), baseRecipeId,
|
||||
function(ret) {
|
||||
showSubmitState();
|
||||
if (ret.error !== "ok") {
|
||||
console.warn(ret.error);
|
||||
if (ret.error === "invalid-name") {
|
||||
@@ -76,10 +73,6 @@ function newCustomImageModalInit(){
|
||||
} else if (ret.error === "image-already-exists") {
|
||||
showNameError(duplicateImageInProjectMsg);
|
||||
return;
|
||||
} else if (ret.error === "recipe-parent-not-exist") {
|
||||
showNameError(missingParentRecipe);
|
||||
} else {
|
||||
showNameError(unknownError + ret.error);
|
||||
}
|
||||
} else {
|
||||
imgCustomModal.modal('hide');
|
||||
|
||||
@@ -14,9 +14,6 @@ function projectTopBarInit(ctx) {
|
||||
var newBuildTargetBuildBtn = $("#build-button");
|
||||
var selectedTarget;
|
||||
|
||||
var updateProjectBtn = $("#update-project-button");
|
||||
var cancelProjectBtn = $("#cancel-project-button");
|
||||
|
||||
/* Project name change functionality */
|
||||
projectNameFormToggle.click(function(e){
|
||||
e.preventDefault();
|
||||
@@ -92,25 +89,6 @@ function projectTopBarInit(ctx) {
|
||||
}, null);
|
||||
});
|
||||
|
||||
updateProjectBtn.click(function (e) {
|
||||
e.preventDefault();
|
||||
|
||||
selectedTarget = { name: "_PROJECT_PREPARE_" };
|
||||
|
||||
/* Save current default build image, fire off the build */
|
||||
libtoaster.updateProject(null, selectedTarget.name, newBuildTargetInput.val().trim(),
|
||||
function(){
|
||||
window.location.replace(libtoaster.ctx.projectSpecificPageUrl);
|
||||
}, null);
|
||||
});
|
||||
|
||||
cancelProjectBtn.click(function (e) {
|
||||
e.preventDefault();
|
||||
|
||||
/* redirect to 'done/canceled' landing page */
|
||||
window.location.replace(libtoaster.ctx.landingSpecificCancelURL);
|
||||
});
|
||||
|
||||
/* Call makeProjectNameValidation function */
|
||||
libtoaster.makeProjectNameValidation($("#project-name-change-input"),
|
||||
$("#hint-error-project-name"), $("#validate-project-name"),
|
||||
|
||||
@@ -35,8 +35,6 @@ from toastergui.tablefilter import TableFilterActionToggle
|
||||
from toastergui.tablefilter import TableFilterActionDateRange
|
||||
from toastergui.tablefilter import TableFilterActionDay
|
||||
|
||||
import os
|
||||
|
||||
class ProjectFilters(object):
|
||||
@staticmethod
|
||||
def in_project(project_layers):
|
||||
@@ -341,8 +339,6 @@ class RecipesTable(ToasterTable):
|
||||
'filter_name' : "in_current_project",
|
||||
'static_data_name' : "add-del-layers",
|
||||
'static_data_template' : '{% include "recipe_btn.html" %}'}
|
||||
if '1' == os.environ.get('TOASTER_PROJECTSPECIFIC'):
|
||||
build_col['static_data_template'] = '{% include "recipe_add_btn.html" %}'
|
||||
|
||||
def get_context_data(self, **kwargs):
|
||||
project = Project.objects.get(pk=kwargs['pid'])
|
||||
@@ -1615,12 +1611,14 @@ class DistrosTable(ToasterTable):
|
||||
hidden=True,
|
||||
field_name="layer_version__get_vcs_reference")
|
||||
|
||||
distro_file_template = '''<code>conf/distro/{{data.name}}.conf</code>
|
||||
{% if 'None' not in data.get_vcs_distro_file_link_url %}<a href="{{data.get_vcs_distro_file_link_url}}" target="_blank"><span class="glyphicon glyphicon-new-window"></i></a>{% endif %}'''
|
||||
wrtemplate_file_template = '''<code>conf/machine/{{data.name}}.conf</code>
|
||||
<a href="{{data.get_vcs_machine_file_link_url}}" target="_blank"><span class="glyphicon glyphicon-new-window"></i></a>'''
|
||||
|
||||
self.add_column(title="Distro file",
|
||||
hidden=True,
|
||||
static_data_name="templatefile",
|
||||
static_data_template=distro_file_template)
|
||||
static_data_template=wrtemplate_file_template)
|
||||
|
||||
|
||||
self.add_column(title="Select",
|
||||
help_text="Sets the selected distro to the project",
|
||||
|
||||
@@ -110,7 +110,6 @@
|
||||
All builds
|
||||
</a>
|
||||
</li>
|
||||
{% if project_enable %}
|
||||
<li id="navbar-all-projects"
|
||||
{% if request.resolver_match.url_name == 'all-projects' %}
|
||||
class="active"
|
||||
@@ -120,7 +119,6 @@
|
||||
All projects
|
||||
</a>
|
||||
</li>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
<li id="navbar-docs">
|
||||
<a target="_blank" href="http://www.yoctoproject.org/docs/latest/toaster-manual/toaster-manual.html">
|
||||
@@ -129,9 +127,7 @@
|
||||
</a>
|
||||
</li>
|
||||
</ul>
|
||||
{% if project_enable %}
|
||||
<a class="btn btn-default navbar-btn navbar-right" id="new-project-button" href="{% url 'newproject' %}">New project</a>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user