mirror of
https://git.yoctoproject.org/poky
synced 2026-02-16 05:33:03 +01:00
Compare commits
380 Commits
yocto-2.6.
...
morty-16.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60402978fe | ||
|
|
666c8a87b2 | ||
|
|
f764659f54 | ||
|
|
f8dd325b85 | ||
|
|
d47a5f1a15 | ||
|
|
17597d2213 | ||
|
|
38613fdacf | ||
|
|
767caa9cf3 | ||
|
|
0255195de4 | ||
|
|
2a70e84643 | ||
|
|
5224bf7c72 | ||
|
|
1d063ba492 | ||
|
|
895229c183 | ||
|
|
dac0411384 | ||
|
|
cd79140864 | ||
|
|
341726ffdb | ||
|
|
07e5111828 | ||
|
|
7cc37c5390 | ||
|
|
5e4adbd780 | ||
|
|
81f8a454d0 | ||
|
|
0f33bfad25 | ||
|
|
e45e0dbcca | ||
|
|
0ec430131e | ||
|
|
d5b5efafb4 | ||
|
|
40a97fa9a7 | ||
|
|
17fa797283 | ||
|
|
2d80c47b18 | ||
|
|
e6955b7d0d | ||
|
|
5b2a7393f9 | ||
|
|
a746a4c171 | ||
|
|
b8987f1a23 | ||
|
|
fa4a5024fc | ||
|
|
4f064564fd | ||
|
|
da6dd6f9fb | ||
|
|
c35d3a6c5e | ||
|
|
a5dfa90ea8 | ||
|
|
bd631d76b8 | ||
|
|
2c638f46ba | ||
|
|
f85376290f | ||
|
|
72084f7513 | ||
|
|
5f7d84821f | ||
|
|
1188ce3e3f | ||
|
|
5d6e240db6 | ||
|
|
c7e48934c9 | ||
|
|
6131129c0a | ||
|
|
63eab150b2 | ||
|
|
af43028b3e | ||
|
|
e6a1c94eef | ||
|
|
604b2b2c6c | ||
|
|
0d91032b84 | ||
|
|
4812871780 | ||
|
|
88ad6d975e | ||
|
|
7d5822bf4c | ||
|
|
2e132efa2f | ||
|
|
bbe268e060 | ||
|
|
fd7f2d644a | ||
|
|
d4b41223d3 | ||
|
|
a992a31803 | ||
|
|
b4cf48ecee | ||
|
|
cf781e2f9b | ||
|
|
e707865a6a | ||
|
|
47fc2d6139 | ||
|
|
8a96509017 | ||
|
|
4a48386e91 | ||
|
|
3643da7804 | ||
|
|
61b729a3a8 | ||
|
|
f7c6d8da56 | ||
|
|
a55be63437 | ||
|
|
7ff5544bf3 | ||
|
|
96bdd99f36 | ||
|
|
b6a1004c73 | ||
|
|
21aa71dbaa | ||
|
|
d05941ae45 | ||
|
|
e92165f5ce | ||
|
|
48a8c893ad | ||
|
|
078915fbf5 | ||
|
|
3e5a09491d | ||
|
|
8bbf2e6903 | ||
|
|
c2ade72828 | ||
|
|
00966fd90d | ||
|
|
e7f948f28a | ||
|
|
78890ea227 | ||
|
|
1fb5079557 | ||
|
|
0c5ab30a30 | ||
|
|
3b12b433c0 | ||
|
|
01c4b5499d | ||
|
|
1e26fcb8bb | ||
|
|
b37b775e77 | ||
|
|
c90540df8b | ||
|
|
293288ef1c | ||
|
|
5b46dfb4fe | ||
|
|
ec212b2220 | ||
|
|
22cd02a09d | ||
|
|
18ae18f6ea | ||
|
|
4eb24dc6ac | ||
|
|
15e0b3178f | ||
|
|
e309d144d7 | ||
|
|
c69d0d3756 | ||
|
|
4b4f7aee6d | ||
|
|
8f829dfb12 | ||
|
|
2de92f2768 | ||
|
|
094b64ea8b | ||
|
|
254336d09b | ||
|
|
27258d249e | ||
|
|
1693b28967 | ||
|
|
5476c5d9d8 | ||
|
|
7391de7bf6 | ||
|
|
017b38fc78 | ||
|
|
be54ac0ebd | ||
|
|
e9c681777b | ||
|
|
a8500d9c75 | ||
|
|
46b3cc9d8d | ||
|
|
53b19e7e7f | ||
|
|
3305872894 | ||
|
|
8a2eb1a75e | ||
|
|
7549ae82cd | ||
|
|
99c6c878ee | ||
|
|
6c3d10ea62 | ||
|
|
7c0a184f8b | ||
|
|
ae95c1e7cb | ||
|
|
458b9e6949 | ||
|
|
b5fe7ef2ed | ||
|
|
561ff936f1 | ||
|
|
c567b4e058 | ||
|
|
fe4703c473 | ||
|
|
c7fc10a5f6 | ||
|
|
af70b781ae | ||
|
|
bd2009ecfc | ||
|
|
b6e0d7c99f | ||
|
|
30166f042d | ||
|
|
924e576b89 | ||
|
|
e292e935b0 | ||
|
|
555d776785 | ||
|
|
ca020278b3 | ||
|
|
ca66b08b3b | ||
|
|
c8daf50729 | ||
|
|
6c08cf277a | ||
|
|
35c95af7a8 | ||
|
|
5e874ac0b1 | ||
|
|
fd78c35086 | ||
|
|
1038cb98e6 | ||
|
|
8ab3fa3758 | ||
|
|
b0f3c0fb99 | ||
|
|
6a1f33cc40 | ||
|
|
8ba5b9eae3 | ||
|
|
a2f06ef254 | ||
|
|
eed433faba | ||
|
|
4f991d93f6 | ||
|
|
b45822fe66 | ||
|
|
7cf454e23c | ||
|
|
5bdf7c980b | ||
|
|
1371bb4c43 | ||
|
|
e732dfb8df | ||
|
|
62175510dc | ||
|
|
f2ff6a40c4 | ||
|
|
28b0f21342 | ||
|
|
6e43936704 | ||
|
|
314c727daa | ||
|
|
a3fa5ce876 | ||
|
|
fd30939f5f | ||
|
|
e0b862b68c | ||
|
|
4b8ddc4322 | ||
|
|
ae1e127566 | ||
|
|
a9e1475efc | ||
|
|
25032be4a3 | ||
|
|
1b3d0e5168 | ||
|
|
0a94b71878 | ||
|
|
1e51b7173a | ||
|
|
5068f2c960 | ||
|
|
ba8fc212de | ||
|
|
ac6773117a | ||
|
|
13d9371e40 | ||
|
|
13783cce0b | ||
|
|
e0d8921bf0 | ||
|
|
61d424fbb3 | ||
|
|
b50444c90e | ||
|
|
11cc61020e | ||
|
|
a81ec4c00f | ||
|
|
84b3a5ac35 | ||
|
|
7bab6ffc45 | ||
|
|
6b49da40e7 | ||
|
|
4bcf8babc4 | ||
|
|
ba5ee79aba | ||
|
|
f94e71c40a | ||
|
|
30259219ca | ||
|
|
86f91f680a | ||
|
|
e171e9a088 | ||
|
|
a411abf823 | ||
|
|
6a3f93a377 | ||
|
|
b4f432e390 | ||
|
|
6f65543778 | ||
|
|
124df4ee35 | ||
|
|
6c5a52ca8f | ||
|
|
51e2f226bd | ||
|
|
8108c47740 | ||
|
|
c8f4fb15de | ||
|
|
359189b6e6 | ||
|
|
5ba779d5ab | ||
|
|
41e8df8228 | ||
|
|
baf73313b3 | ||
|
|
985beaaa13 | ||
|
|
c5489cb14a | ||
|
|
80d90725f5 | ||
|
|
389d0a02d7 | ||
|
|
ea584f108c | ||
|
|
29184b22d7 | ||
|
|
0533d4e8ec | ||
|
|
2cfd1582df | ||
|
|
6011f5658d | ||
|
|
b9879ad3f9 | ||
|
|
1c8b9df38c | ||
|
|
1933b492ca | ||
|
|
da3332f2e2 | ||
|
|
50cf219c3a | ||
|
|
c3d06886b7 | ||
|
|
5580c222b4 | ||
|
|
a17d78b33c | ||
|
|
521b01ab7c | ||
|
|
87e5e492bc | ||
|
|
9db1372e45 | ||
|
|
d33274cd75 | ||
|
|
b3484ab7f6 | ||
|
|
45c87f4b4b | ||
|
|
b6df490f47 | ||
|
|
5b0b9943f5 | ||
|
|
2d74e26228 | ||
|
|
1dcc0ab902 | ||
|
|
e8b00a62b1 | ||
|
|
4dd5ede472 | ||
|
|
7760427c61 | ||
|
|
5d2c218f57 | ||
|
|
b6d7bc53e2 | ||
|
|
3c6b603bc7 | ||
|
|
ef7ccf837b | ||
|
|
56fb4af388 | ||
|
|
587b9ecd37 | ||
|
|
443ce32b63 | ||
|
|
9a36dcdb9b | ||
|
|
1ca43bb304 | ||
|
|
bbc7130507 | ||
|
|
745517685b | ||
|
|
ed9e0b8aaf | ||
|
|
926f3dfe2d | ||
|
|
4073438491 | ||
|
|
6fe39199a4 | ||
|
|
3f0c6a8f5c | ||
|
|
2fa2739850 | ||
|
|
d87772571f | ||
|
|
cebbc4372a | ||
|
|
14f514d93c | ||
|
|
793092d1a1 | ||
|
|
903c4158a5 | ||
|
|
468b78967a | ||
|
|
93b13d9995 | ||
|
|
67965302e2 | ||
|
|
71c8568b38 | ||
|
|
3a96984d3a | ||
|
|
44a13f115d | ||
|
|
eee2e545d0 | ||
|
|
35207b387f | ||
|
|
a871504b70 | ||
|
|
c3be679a54 | ||
|
|
02bfc6a0dc | ||
|
|
6a25e7a5fd | ||
|
|
da9ceffa9e | ||
|
|
7033e66e9f | ||
|
|
3c9a34aaea | ||
|
|
91ec543003 | ||
|
|
d6f478d9df | ||
|
|
0a25fbfdb4 | ||
|
|
0c5596e5d3 | ||
|
|
25cd713c44 | ||
|
|
abcb751f02 | ||
|
|
ebbaae9005 | ||
|
|
545a4d2e56 | ||
|
|
8325c93df9 | ||
|
|
cb2967921e | ||
|
|
460bbf456b | ||
|
|
8d1afc2a0d | ||
|
|
31d4000b15 | ||
|
|
a469b6efc6 | ||
|
|
6d57ce3fa3 | ||
|
|
c852c9352f | ||
|
|
c0526ab007 | ||
|
|
1a118330c3 | ||
|
|
53ad465b40 | ||
|
|
2336faa9aa | ||
|
|
a4c7f45b8b | ||
|
|
6b736febdc | ||
|
|
4c1020aad6 | ||
|
|
afaef2018f | ||
|
|
724fa5a489 | ||
|
|
101173d6cf | ||
|
|
b0b646ab3c | ||
|
|
7dc792da69 | ||
|
|
5aa481dfed | ||
|
|
872d4bccc8 | ||
|
|
d9e3f7629b | ||
|
|
2a6bba4378 | ||
|
|
e56cd9d7d0 | ||
|
|
73454473d7 | ||
|
|
ee6fb7521b | ||
|
|
6f33cfec73 | ||
|
|
e706bc85a7 | ||
|
|
0de5f9472d | ||
|
|
047790941e | ||
|
|
0a22517801 | ||
|
|
38366d30fc | ||
|
|
71f907cbe1 | ||
|
|
604a71f7c0 | ||
|
|
56238f3676 | ||
|
|
feb145f6ce | ||
|
|
8472ed0c89 | ||
|
|
a34582a85e | ||
|
|
f17446b645 | ||
|
|
aea27a0554 | ||
|
|
350df1de9c | ||
|
|
350a850d44 | ||
|
|
87d51bd79b | ||
|
|
d16356eb26 | ||
|
|
a531ca4636 | ||
|
|
8d1d9cfdb5 | ||
|
|
c2fd90f245 | ||
|
|
0572d73d11 | ||
|
|
1bad6c2d73 | ||
|
|
f068277f90 | ||
|
|
ff95390bee | ||
|
|
19bee74728 | ||
|
|
849863fdaf | ||
|
|
ac8981c01d | ||
|
|
3081194a9f | ||
|
|
b6335e4251 | ||
|
|
274d77860f | ||
|
|
8d2e24810e | ||
|
|
a368478133 | ||
|
|
77bc68c7ac | ||
|
|
4708f7af51 | ||
|
|
17a550b9de | ||
|
|
30cdca66c5 | ||
|
|
54e8fa5060 | ||
|
|
9600da51e4 | ||
|
|
ee6ff5ce83 | ||
|
|
7a308a7919 | ||
|
|
6a1ef8a965 | ||
|
|
95a10d1dfc | ||
|
|
914e89709c | ||
|
|
f33e739fc2 | ||
|
|
719a8dcd0d | ||
|
|
d94c22d4e4 | ||
|
|
ee2a19d38e | ||
|
|
57bcb0b509 | ||
|
|
698bbcad68 | ||
|
|
25fa4e9e8f | ||
|
|
3d471b811d | ||
|
|
8b84b31dc2 | ||
|
|
2e6624293c | ||
|
|
8c69cdacea | ||
|
|
b814b6df0b | ||
|
|
26e8ebc511 | ||
|
|
874ccd1bb3 | ||
|
|
a628a17a44 | ||
|
|
4d30681e8b | ||
|
|
4722bfbda9 | ||
|
|
6c9f6b5f70 | ||
|
|
dc8508f609 | ||
|
|
bf5dd36042 | ||
|
|
746c681be4 | ||
|
|
73aa36e3ee | ||
|
|
cddb7f10b8 | ||
|
|
399903724b | ||
|
|
e09163a08b | ||
|
|
2c0efd2f33 | ||
|
|
fb1df184b9 | ||
|
|
e127d017e1 | ||
|
|
0915ee7dc3 | ||
|
|
7c3cdf8a17 | ||
|
|
9ae4ab56e7 | ||
|
|
e40a8d739a | ||
|
|
5e0d6341ab | ||
|
|
41e74881b0 |
11
.gitignore
vendored
11
.gitignore
vendored
@@ -1,7 +1,6 @@
|
||||
*.pyc
|
||||
*.pyo
|
||||
/*.patch
|
||||
/.repo/
|
||||
/build*/
|
||||
pyshtables.py
|
||||
pstage/
|
||||
@@ -19,13 +18,9 @@ hob-image-*.bb
|
||||
!meta-yocto
|
||||
!meta-yocto-bsp
|
||||
!meta-yocto-imported
|
||||
/documentation/*/eclipse/
|
||||
/documentation/*/*.html
|
||||
/documentation/*/*.pdf
|
||||
/documentation/*/*.tgz
|
||||
/bitbake/doc/bitbake-user-manual/bitbake-user-manual.html
|
||||
/bitbake/doc/bitbake-user-manual/bitbake-user-manual.pdf
|
||||
/bitbake/doc/bitbake-user-manual/bitbake-user-manual.tgz
|
||||
documentation/user-manual/user-manual.html
|
||||
documentation/user-manual/user-manual.pdf
|
||||
documentation/user-manual/user-manual.tgz
|
||||
pull-*/
|
||||
bitbake/lib/toaster/contrib/tts/backlog.txt
|
||||
bitbake/lib/toaster/contrib/tts/log/*
|
||||
|
||||
58
README
Normal file
58
README
Normal file
@@ -0,0 +1,58 @@
|
||||
Poky
|
||||
====
|
||||
|
||||
Poky is an integration of various components to form a complete prepackaged
|
||||
build system and development environment. It features support for building
|
||||
customised embedded device style images. There are reference demo images
|
||||
featuring a X11/Matchbox/GTK themed UI called Sato. The system supports
|
||||
cross-architecture application development using QEMU emulation and a
|
||||
standalone toolchain and SDK with IDE integration.
|
||||
|
||||
Additional information on the specifics of hardware that Poky supports
|
||||
is available in README.hardware. Further hardware support can easily be added
|
||||
in the form of layers which extend the systems capabilities in a modular way.
|
||||
|
||||
As an integration layer Poky consists of several upstream projects such as
|
||||
BitBake, OpenEmbedded-Core, Yocto documentation and various sources of information
|
||||
e.g. for the hardware support. Poky is in turn a component of the Yocto Project.
|
||||
|
||||
The Yocto Project has extensive documentation about the system including a
|
||||
reference manual which can be found at:
|
||||
http://yoctoproject.org/documentation
|
||||
|
||||
OpenEmbedded-Core is a layer containing the core metadata for current versions
|
||||
of OpenEmbedded. It is distro-less (can build a functional image with
|
||||
DISTRO = "nodistro") and contains only emulated machine support.
|
||||
|
||||
For information about OpenEmbedded, see the OpenEmbedded website:
|
||||
http://www.openembedded.org/
|
||||
|
||||
Where to Send Patches
|
||||
=====================
|
||||
|
||||
As Poky is an integration repository (built using a tool called combo-layer),
|
||||
patches against the various components should be sent to their respective
|
||||
upstreams:
|
||||
|
||||
bitbake:
|
||||
Git repository: http://git.openembedded.org/bitbake/
|
||||
Mailing list: bitbake-devel@lists.openembedded.org
|
||||
|
||||
documentation:
|
||||
Git repository: http://git.yoctoproject.org/cgit/cgit.cgi/yocto-docs/
|
||||
Mailing list: yocto@yoctoproject.org
|
||||
|
||||
meta-poky, meta-yocto-bsp:
|
||||
Git repository: http://git.yoctoproject.org/cgit/cgit.cgi/meta-yocto(-bsp)
|
||||
Mailing list: poky@yoctoproject.org
|
||||
|
||||
Everything else should be sent to the OpenEmbedded Core mailing list. If in
|
||||
doubt, check the oe-core git repository for the content you intend to modify.
|
||||
Before sending, be sure the patches apply cleanly to the current oe-core git
|
||||
repository.
|
||||
|
||||
Git repository: http://git.openembedded.org/openembedded-core/
|
||||
Mailing list: openembedded-core@lists.openembedded.org
|
||||
|
||||
Note: The scripts directory should be treated with extra care as it is a mix of
|
||||
oe-core and poky-specific files.
|
||||
26
README.LSB
26
README.LSB
@@ -1,26 +0,0 @@
|
||||
OE-Core aims to be able to provide basic LSB compatible images. There
|
||||
are some challenges for OE as LSB isn't always 100% relevant to its
|
||||
target embedded and IoT audiences.
|
||||
|
||||
One challenge is that the LSB spec is no longer being actively
|
||||
developed [https://github.com/LinuxStandardBase/lsb] and has
|
||||
components which are end of life or significantly dated. OE
|
||||
therefore provides compatibility with the following caveats:
|
||||
|
||||
* Qt4 is provided by the separate meta-qt4 layer. Its noted that Qt4
|
||||
is end of life and this isn't something the core project regularly
|
||||
tests any longer. Users are recommended to group together to support
|
||||
maintenance of that layer. [http://git.yoctoproject.org/cgit/cgit.cgi/meta-qt4/]
|
||||
|
||||
* mailx has been dropped since its no longer being developed upstream
|
||||
and there are better, more modern replacements such as s-nail
|
||||
(http://sdaoden.eu/code.html) or mailutils (http://mailutils.org/).
|
||||
|
||||
* A few perl modules that were required by LSB 4.x aren't provided:
|
||||
libclass-isa, libenv, libdumpvalue, libfile-checktree,
|
||||
libi18n-collate, libpod-plainer.
|
||||
|
||||
* libpng 1.2 isn't provided; oe-core includes the latest release of libpng
|
||||
instead.
|
||||
|
||||
* pax (POSIX standard archive) tool is not provided.
|
||||
@@ -1 +0,0 @@
|
||||
meta-yocto-bsp/README.hardware
|
||||
360
README.hardware
Normal file
360
README.hardware
Normal file
@@ -0,0 +1,360 @@
|
||||
Poky Hardware README
|
||||
====================
|
||||
|
||||
This file gives details about using Poky with the reference machines
|
||||
supported out of the box. A full list of supported reference target machines
|
||||
can be found by looking in the following directories:
|
||||
|
||||
meta/conf/machine/
|
||||
meta-yocto-bsp/conf/machine/
|
||||
|
||||
If you are in doubt about using Poky/OpenEmbedded with your hardware, consult
|
||||
the documentation for your board/device.
|
||||
|
||||
Support for additional devices is normally added by creating BSP layers - for
|
||||
more information please see the Yocto Board Support Package (BSP) Developer's
|
||||
Guide - documentation source is in documentation/bspguide or download the PDF
|
||||
from:
|
||||
|
||||
http://yoctoproject.org/documentation
|
||||
|
||||
Support for physical reference hardware has now been split out into a
|
||||
meta-yocto-bsp layer which can be removed separately from other layers if not
|
||||
needed.
|
||||
|
||||
|
||||
QEMU Emulation Targets
|
||||
======================
|
||||
|
||||
To simplify development, the build system supports building images to
|
||||
work with the QEMU emulator in system emulation mode. Several architectures
|
||||
are currently supported:
|
||||
|
||||
* ARM (qemuarm)
|
||||
* x86 (qemux86)
|
||||
* x86-64 (qemux86-64)
|
||||
* PowerPC (qemuppc)
|
||||
* MIPS (qemumips)
|
||||
|
||||
Use of the QEMU images is covered in the Yocto Project Reference Manual.
|
||||
The appropriate MACHINE variable value corresponding to the target is given
|
||||
in brackets.
|
||||
|
||||
|
||||
Hardware Reference Boards
|
||||
=========================
|
||||
|
||||
The following boards are supported by the meta-yocto-bsp layer:
|
||||
|
||||
* Texas Instruments Beaglebone (beaglebone)
|
||||
* Freescale MPC8315E-RDB (mpc8315e-rdb)
|
||||
|
||||
For more information see the board's section below. The appropriate MACHINE
|
||||
variable value corresponding to the board is given in brackets.
|
||||
|
||||
Reference Board Maintenance
|
||||
===========================
|
||||
|
||||
Send pull requests, patches, comments or questions about meta-yocto-bsps to poky@yoctoproject.org
|
||||
|
||||
Maintainers: Kevin Hao <kexin.hao@windriver.com>
|
||||
Bruce Ashfield <bruce.ashfield@windriver.com>
|
||||
|
||||
Consumer Devices
|
||||
================
|
||||
|
||||
The following consumer devices are supported by the meta-yocto-bsp layer:
|
||||
|
||||
* Intel x86 based PCs and devices (genericx86)
|
||||
* Ubiquiti Networks EdgeRouter Lite (edgerouter)
|
||||
|
||||
For more information see the device's section below. The appropriate MACHINE
|
||||
variable value corresponding to the device is given in brackets.
|
||||
|
||||
|
||||
|
||||
Specific Hardware Documentation
|
||||
===============================
|
||||
|
||||
|
||||
Intel x86 based PCs and devices (genericx86*)
|
||||
=============================================
|
||||
|
||||
The genericx86 and genericx86-64 MACHINE are tested on the following platforms:
|
||||
|
||||
Intel Xeon/Core i-Series:
|
||||
+ Intel NUC5 Series - ix-52xx Series SOC (Broadwell)
|
||||
+ Intel NUC6 Series - ix-62xx Series SOC (Skylake)
|
||||
+ Intel Shumway Xeon Server
|
||||
|
||||
Intel Atom platforms:
|
||||
+ MinnowBoard MAX - E3825 SOC (Bay Trail)
|
||||
+ MinnowBoard MAX - Turbot (ADI Engineering) - E3826 SOC (Bay Trail)
|
||||
- These boards can be either 32bot or 64bit modes depending on firmware
|
||||
- See minnowboard.org for details
|
||||
+ Intel Braswell SOC
|
||||
|
||||
and is likely to work on many unlisted Atom/Core/Xeon based devices. The MACHINE
|
||||
type supports ethernet, wifi, sound, and Intel/vesa graphics by default in
|
||||
addition to common PC input devices, busses, and so on.
|
||||
|
||||
Depending on the device, it can boot from a traditional hard-disk, a USB device,
|
||||
or over the network. Writing generated images to physical media is
|
||||
straightforward with a caveat for USB devices. The following examples assume the
|
||||
target boot device is /dev/sdb, be sure to verify this and use the correct
|
||||
device as the following commands are run as root and are not reversable.
|
||||
|
||||
USB Device:
|
||||
1. Build a live image. This image type consists of a simple filesystem
|
||||
without a partition table, which is suitable for USB keys, and with the
|
||||
default setup for the genericx86 machine, this image type is built
|
||||
automatically for any image you build. For example:
|
||||
|
||||
$ bitbake core-image-minimal
|
||||
|
||||
2. Use the "dd" utility to write the image to the raw block device. For
|
||||
example:
|
||||
|
||||
# dd if=core-image-minimal-genericx86.hddimg of=/dev/sdb
|
||||
|
||||
If the device fails to boot with "Boot error" displayed, or apparently
|
||||
stops just after the SYSLINUX version banner, it is likely the BIOS cannot
|
||||
understand the physical layout of the disk (or rather it expects a
|
||||
particular layout and cannot handle anything else). There are two possible
|
||||
solutions to this problem:
|
||||
|
||||
1. Change the BIOS USB Device setting to HDD mode. The label will vary by
|
||||
device, but the idea is to force BIOS to read the Cylinder/Head/Sector
|
||||
geometry from the device.
|
||||
|
||||
2. Use a ".wic" image with an EFI partition
|
||||
|
||||
a) With a default grub-efi bootloader:
|
||||
# dd if=core-image-minimal-genericx86-64.wic of=/dev/sdb
|
||||
|
||||
b) Use systemd-boot instead
|
||||
- Build an image with EFI_PROVIDER="systemd-boot" then use the above
|
||||
dd command to write the image to a USB stick.
|
||||
|
||||
|
||||
Texas Instruments Beaglebone (beaglebone)
|
||||
=========================================
|
||||
|
||||
The Beaglebone is an ARM Cortex-A8 development board with USB, Ethernet, 2D/3D
|
||||
accelerated graphics, audio, serial, JTAG, and SD/MMC. The Black adds a faster
|
||||
CPU, more RAM, eMMC flash and a micro HDMI port. The beaglebone MACHINE is
|
||||
tested on the following platforms:
|
||||
|
||||
o Beaglebone Black A6
|
||||
o Beaglebone A6 (the original "White" model)
|
||||
|
||||
The Beaglebone Black has eMMC, while the White does not. Pressing the USER/BOOT
|
||||
button when powering on will temporarily change the boot order. But for the sake
|
||||
of simplicity, these instructions assume you have erased the eMMC on the Black,
|
||||
so its boot behavior matches that of the White and boots off of SD card. To do
|
||||
this, issue the following commands from the u-boot prompt:
|
||||
|
||||
# mmc dev 1
|
||||
# mmc erase 0 512
|
||||
|
||||
To further tailor these instructions for your board, please refer to the
|
||||
documentation at http://www.beagleboard.org/bone and http://www.beagleboard.org/black
|
||||
|
||||
From a Linux system with access to the image files perform the following steps:
|
||||
|
||||
1. Build an image. For example:
|
||||
|
||||
$ bitbake core-image-minimal
|
||||
|
||||
2. Use the "dd" utility to write the image to the SD card. For example:
|
||||
|
||||
# dd core-image-minimal-beaglebone.wic of=/dev/sdb
|
||||
|
||||
3. Insert the SD card into the Beaglebone and boot the board.
|
||||
|
||||
Freescale MPC8315E-RDB (mpc8315e-rdb)
|
||||
=====================================
|
||||
|
||||
The MPC8315 PowerPC reference platform (MPC8315E-RDB) is aimed at hardware and
|
||||
software development of network attached storage (NAS) and digital media server
|
||||
applications. The MPC8315E-RDB features the PowerQUICC II Pro processor, which
|
||||
includes a built-in security accelerator.
|
||||
|
||||
(Note: you may find it easier to order MPC8315E-RDBA; this appears to be the
|
||||
same board in an enclosure with accessories. In any case it is fully
|
||||
compatible with the instructions given here.)
|
||||
|
||||
Setup instructions
|
||||
------------------
|
||||
|
||||
You will need the following:
|
||||
* NFS root setup on your workstation
|
||||
* TFTP server installed on your workstation
|
||||
* Straight-thru 9-conductor serial cable (DB9, M/F) connected from your
|
||||
PC to UART1
|
||||
* Ethernet connected to the first ethernet port on the board
|
||||
|
||||
--- Preparation ---
|
||||
|
||||
Note: if you have altered your board's ethernet MAC address(es) from the
|
||||
defaults, or you need to do so because you want multiple boards on the same
|
||||
network, then you will need to change the values in the dts file (patch
|
||||
linux/arch/powerpc/boot/dts/mpc8315erdb.dts within the kernel source). If
|
||||
you have left them at the factory default then you shouldn't need to do
|
||||
anything here.
|
||||
|
||||
--- Booting from NFS root ---
|
||||
|
||||
Load the kernel and dtb (device tree blob), and boot the system as follows:
|
||||
|
||||
1. Get the kernel (uImage-mpc8315e-rdb.bin) and dtb (uImage-mpc8315e-rdb.dtb)
|
||||
files from the tmp/deploy directory, and make them available on your TFTP
|
||||
server.
|
||||
|
||||
2. Connect the board's first serial port to your workstation and then start up
|
||||
your favourite serial terminal so that you will be able to interact with
|
||||
the serial console. If you don't have a favourite, picocom is suggested:
|
||||
|
||||
$ picocom /dev/ttyUSB0 -b 115200
|
||||
|
||||
3. Power up or reset the board and press a key on the terminal when prompted
|
||||
to get to the U-Boot command line
|
||||
|
||||
4. Set up the environment in U-Boot:
|
||||
|
||||
=> setenv ipaddr <board ip>
|
||||
=> setenv serverip <tftp server ip>
|
||||
=> setenv bootargs root=/dev/nfs rw nfsroot=<nfsroot ip>:<rootfs path> ip=<board ip>:<server ip>:<gateway ip>:255.255.255.0:mpc8315e:eth0:off console=ttyS0,115200
|
||||
|
||||
5. Download the kernel and dtb, and boot:
|
||||
|
||||
=> tftp 1000000 uImage-mpc8315e-rdb.bin
|
||||
=> tftp 2000000 uImage-mpc8315e-rdb.dtb
|
||||
=> bootm 1000000 - 2000000
|
||||
|
||||
--- Booting from JFFS2 root ---
|
||||
|
||||
1. First boot the board with NFS root.
|
||||
|
||||
2. Erase the MTD partition which will be used as root:
|
||||
|
||||
$ flash_eraseall /dev/mtd3
|
||||
|
||||
3. Copy the JFFS2 image to the MTD partition:
|
||||
|
||||
$ flashcp core-image-minimal-mpc8315e-rdb.jffs2 /dev/mtd3
|
||||
|
||||
4. Then reboot the board and set up the environment in U-Boot:
|
||||
|
||||
=> setenv bootargs root=/dev/mtdblock3 rootfstype=jffs2 console=ttyS0,115200
|
||||
|
||||
|
||||
Ubiquiti Networks EdgeRouter Lite (edgerouter)
|
||||
==============================================
|
||||
|
||||
The EdgeRouter Lite is part of the EdgeMax series. It is a MIPS64 router
|
||||
(based on the Cavium Octeon processor) with 512MB of RAM, which uses an
|
||||
internal USB pendrive for storage.
|
||||
|
||||
Setup instructions
|
||||
------------------
|
||||
|
||||
You will need the following:
|
||||
* RJ45 -> serial ("rollover") cable connected from your PC to the CONSOLE
|
||||
port on the device
|
||||
* Ethernet connected to the first ethernet port on the board
|
||||
|
||||
If using NFS as part of the setup process, you will also need:
|
||||
* NFS root setup on your workstation
|
||||
* TFTP server installed on your workstation (if fetching the kernel from
|
||||
TFTP, see below).
|
||||
|
||||
--- Preparation ---
|
||||
|
||||
Build an image (e.g. core-image-minimal) using "edgerouter" as the MACHINE.
|
||||
In the following instruction it is based on core-image-minimal. Another target
|
||||
may be similiar with it.
|
||||
|
||||
--- Booting from NFS root / kernel via TFTP ---
|
||||
|
||||
Load the kernel, and boot the system as follows:
|
||||
|
||||
1. Get the kernel (vmlinux) file from the tmp/deploy/images/edgerouter
|
||||
directory, and make them available on your TFTP server.
|
||||
|
||||
2. Connect the board's first serial port to your workstation and then start up
|
||||
your favourite serial terminal so that you will be able to interact with
|
||||
the serial console. If you don't have a favourite, picocom is suggested:
|
||||
|
||||
$ picocom /dev/ttyS0 -b 115200
|
||||
|
||||
3. Power up or reset the board and press a key on the terminal when prompted
|
||||
to get to the U-Boot command line
|
||||
|
||||
4. Set up the environment in U-Boot:
|
||||
|
||||
=> setenv ipaddr <board ip>
|
||||
=> setenv serverip <tftp server ip>
|
||||
|
||||
5. Download the kernel and boot:
|
||||
|
||||
=> tftp tftp $loadaddr vmlinux
|
||||
=> bootoctlinux $loadaddr coremask=0x3 root=/dev/nfs rw nfsroot=<nfsroot ip>:<rootfs path> ip=<board ip>:<server ip>:<gateway ip>:<netmask>:edgerouter:eth0:off mtdparts=phys_mapped_flash:512k(boot0),512k(boot1),64k@3072k(eeprom)
|
||||
|
||||
--- Booting from USB disk ---
|
||||
|
||||
To boot from the USB disk, you either need to remove it from the edgerouter
|
||||
box and populate it from another computer, or use a previously booted NFS
|
||||
image and populate from the edgerouter itself.
|
||||
|
||||
Type 1: Use partitioned image
|
||||
-----------------------------
|
||||
|
||||
Steps:
|
||||
|
||||
1. Remove the USB disk from the edgerouter and insert it into a computer
|
||||
that has access to your build artifacts.
|
||||
|
||||
2. Flash the image.
|
||||
|
||||
# dd core-image-minimal-edgerouter.wic of=/dev/sdb
|
||||
|
||||
3. Insert USB disk into the edgerouter and boot it.
|
||||
|
||||
Type 2: NFS
|
||||
-----------
|
||||
|
||||
Note: If you place the kernel on the ext3 partition, you must re-create the
|
||||
ext3 filesystem, since the factory u-boot can only handle 128 byte inodes and
|
||||
cannot read the partition otherwise.
|
||||
|
||||
These boot instructions assume that you have recreated the ext3 filesystem with
|
||||
128 byte inodes, you have an updated uboot or you are running and image capable
|
||||
of making the filesystem on the board itself.
|
||||
|
||||
|
||||
1. Boot from NFS root
|
||||
|
||||
2. Mount the USB disk partition 2 and then extract the contents of
|
||||
tmp/deploy/core-image-XXXX.tar.bz2 into it.
|
||||
|
||||
Before starting, copy core-image-minimal-xxx.tar.bz2 and vmlinux into
|
||||
rootfs path on your workstation.
|
||||
|
||||
and then,
|
||||
|
||||
# mount /dev/sda2 /media/sda2
|
||||
# tar -xvjpf core-image-minimal-XXX.tar.bz2 -C /media/sda2
|
||||
# cp vmlinux /media/sda2/boot/vmlinux
|
||||
# umount /media/sda2
|
||||
# reboot
|
||||
|
||||
3. Reboot the board and press a key on the terminal when prompted to get to the U-Boot
|
||||
command line:
|
||||
|
||||
# reboot
|
||||
|
||||
4. Load the kernel and boot:
|
||||
|
||||
=> ext2load usb 0:2 $loadaddr boot/vmlinux
|
||||
=> bootoctlinux $loadaddr coremask=0x3 root=/dev/sda2 rw rootwait mtdparts=phys_mapped_flash:512k(boot0),512k(boot1),64k@3072k(eeprom)
|
||||
@@ -1 +0,0 @@
|
||||
meta-poky/README.poky
|
||||
15
README.qemu
15
README.qemu
@@ -1,15 +0,0 @@
|
||||
QEMU Emulation Targets
|
||||
======================
|
||||
|
||||
To simplify development, the build system supports building images to
|
||||
work with the QEMU emulator in system emulation mode. Several architectures
|
||||
are currently supported in 32 and 64 bit variants:
|
||||
|
||||
* ARM (qemuarm + qemuarm64)
|
||||
* x86 (qemux86 + qemux86-64)
|
||||
* PowerPC (qemuppc only)
|
||||
* MIPS (qemumips + qemumips64)
|
||||
|
||||
Use of the QEMU images is covered in the Yocto Project Reference Manual.
|
||||
The appropriate MACHINE variable value corresponding to the target is given
|
||||
in brackets.
|
||||
@@ -15,5 +15,3 @@ Foundation and individual contributors.
|
||||
* QUnit is redistributed under the MIT license.
|
||||
|
||||
* Font Awesome fonts redistributed under the SIL Open Font License 1.1
|
||||
|
||||
* simplediff is distributed under the zlib license.
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
Bitbake
|
||||
=======
|
||||
|
||||
BitBake is a generic task execution engine that allows shell and Python tasks to be run
|
||||
efficiently and in parallel while working within complex inter-task dependency constraints.
|
||||
One of BitBake's main users, OpenEmbedded, takes this core and builds embedded Linux software
|
||||
stacks using a task-oriented approach.
|
||||
|
||||
For information about Bitbake, see the OpenEmbedded website:
|
||||
http://www.openembedded.org/
|
||||
|
||||
Bitbake plain documentation can be found under the doc directory or its integrated
|
||||
html version at the Yocto Project website:
|
||||
http://yoctoproject.org/documentation
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to
|
||||
http://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
for guidelines on how to submit patches, just note that the latter documentation is intended
|
||||
for OpenEmbedded (and its core) not bitbake patches (bitbake-devel@lists.openembedded.org)
|
||||
but in general main guidelines apply. Once the commit(s) have been created, the way to send
|
||||
the patch is through git-send-email. For example, to send the last commit (HEAD) on current
|
||||
branch, type:
|
||||
|
||||
git send-email -M -1 --to bitbake-devel@lists.openembedded.org
|
||||
|
||||
Mailing list:
|
||||
|
||||
http://lists.openembedded.org/mailman/listinfo/bitbake-devel
|
||||
|
||||
Source code:
|
||||
|
||||
http://git.openembedded.org/bitbake/
|
||||
@@ -36,9 +36,9 @@ from bb import cookerdata
|
||||
from bb.main import bitbake_main, BitBakeConfigParameters, BBMainException
|
||||
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
sys.exit("Please use a locale setting which supports utf-8.\nPython can't change the filesystem locale after loading so we need a utf-8 when python starts or things won't work.")
|
||||
|
||||
__version__ = "1.40.0"
|
||||
__version__ = "1.32.0"
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __version__ != bb.__version__:
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# bitbake-diffsigs / bitbake-dumpsig
|
||||
# BitBake task signature data dump and comparison utility
|
||||
# bitbake-diffsigs
|
||||
# BitBake task signature data comparison utility
|
||||
#
|
||||
# Copyright (C) 2012-2013, 2017 Intel Corporation
|
||||
# Copyright (C) 2012-2013 Intel Corporation
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
@@ -21,7 +21,8 @@
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
import argparse
|
||||
import fnmatch
|
||||
import optparse
|
||||
import logging
|
||||
import pickle
|
||||
|
||||
@@ -29,180 +30,109 @@ sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), '
|
||||
|
||||
import bb.tinfoil
|
||||
import bb.siggen
|
||||
import bb.msg
|
||||
|
||||
myname = os.path.basename(sys.argv[0])
|
||||
logger = bb.msg.logger_create(myname)
|
||||
def logger_create(name, output=sys.stderr):
|
||||
logger = logging.getLogger(name)
|
||||
console = logging.StreamHandler(output)
|
||||
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
if output.isatty():
|
||||
format.enable_color()
|
||||
console.setFormatter(format)
|
||||
logger.addHandler(console)
|
||||
logger.setLevel(logging.INFO)
|
||||
return logger
|
||||
|
||||
is_dump = myname == 'bitbake-dumpsig'
|
||||
logger = logger_create('bitbake-diffsigs')
|
||||
|
||||
def find_siginfo(tinfoil, pn, taskname, sigs=None):
|
||||
result = None
|
||||
tinfoil.set_event_mask(['bb.event.FindSigInfoResult',
|
||||
'logging.LogRecord',
|
||||
'bb.command.CommandCompleted',
|
||||
'bb.command.CommandFailed'])
|
||||
ret = tinfoil.run_command('findSigInfo', pn, taskname, sigs)
|
||||
if ret:
|
||||
while True:
|
||||
event = tinfoil.wait_event(1)
|
||||
if event:
|
||||
if isinstance(event, bb.command.CommandCompleted):
|
||||
break
|
||||
elif isinstance(event, bb.command.CommandFailed):
|
||||
logger.error(str(event))
|
||||
sys.exit(2)
|
||||
elif isinstance(event, bb.event.FindSigInfoResult):
|
||||
result = event.result
|
||||
elif isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
else:
|
||||
logger.error('No result returned from findSigInfo command')
|
||||
sys.exit(2)
|
||||
return result
|
||||
def find_compare_task(bbhandler, pn, taskname):
|
||||
""" Find the most recent signature files for the specified PN/task and compare them """
|
||||
|
||||
def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
|
||||
""" Find the most recent signature files for the specified PN/task """
|
||||
def get_hashval(siginfo):
|
||||
if siginfo.endswith('.siginfo'):
|
||||
return siginfo.rpartition(':')[2].partition('_')[0]
|
||||
else:
|
||||
return siginfo.rpartition('.')[2]
|
||||
|
||||
if not hasattr(bb.siggen, 'find_siginfo'):
|
||||
logger.error('Metadata does not support finding signature data files')
|
||||
sys.exit(1)
|
||||
|
||||
if not taskname.startswith('do_'):
|
||||
taskname = 'do_%s' % taskname
|
||||
|
||||
if sig1 and sig2:
|
||||
sigfiles = find_siginfo(bbhandler, pn, taskname, [sig1, sig2])
|
||||
if len(sigfiles) == 0:
|
||||
logger.error('No sigdata files found matching %s %s matching either %s or %s' % (pn, taskname, sig1, sig2))
|
||||
sys.exit(1)
|
||||
elif not sig1 in sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig1))
|
||||
sys.exit(1)
|
||||
elif not sig2 in sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig2))
|
||||
sys.exit(1)
|
||||
latestfiles = [sigfiles[sig1], sigfiles[sig2]]
|
||||
filedates = bb.siggen.find_siginfo(pn, taskname, None, bbhandler.config_data)
|
||||
latestfiles = sorted(filedates.keys(), key=lambda f: filedates[f])[-3:]
|
||||
if not latestfiles:
|
||||
logger.error('No sigdata files found matching %s %s' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
elif len(latestfiles) < 2:
|
||||
logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
else:
|
||||
filedates = find_siginfo(bbhandler, pn, taskname)
|
||||
latestfiles = sorted(filedates.keys(), key=lambda f: filedates[f])[-2:]
|
||||
if not latestfiles:
|
||||
logger.error('No sigdata files found matching %s %s' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
# It's possible that latestfiles contain 3 elements and the first two have the same hash value.
|
||||
# In this case, we delete the second element.
|
||||
# The above case is actually the most common one. Because we may have sigdata file and siginfo
|
||||
# file having the same hash value. Comparing such two files makes no sense.
|
||||
if len(latestfiles) == 3:
|
||||
hash0 = get_hashval(latestfiles[0])
|
||||
hash1 = get_hashval(latestfiles[1])
|
||||
if hash0 == hash1:
|
||||
latestfiles.pop(1)
|
||||
|
||||
return latestfiles
|
||||
# Define recursion callback
|
||||
def recursecb(key, hash1, hash2):
|
||||
hashes = [hash1, hash2]
|
||||
hashfiles = bb.siggen.find_siginfo(key, None, hashes, bbhandler.config_data)
|
||||
|
||||
recout = []
|
||||
if len(hashfiles) == 2:
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
|
||||
recout.extend(list(' ' + l for l in out2))
|
||||
else:
|
||||
recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
|
||||
|
||||
return recout
|
||||
|
||||
# Recurse into signature comparison
|
||||
output = bb.siggen.compare_sigfiles(latestfiles[0], latestfiles[1], recursecb)
|
||||
if output:
|
||||
print('\n'.join(output))
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
# Define recursion callback
|
||||
def recursecb(key, hash1, hash2):
|
||||
hashes = [hash1, hash2]
|
||||
hashfiles = find_siginfo(tinfoil, key, None, hashes)
|
||||
|
||||
recout = []
|
||||
if len(hashfiles) == 0:
|
||||
recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
|
||||
elif not hash1 in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash1))
|
||||
elif not hash2 in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash2))
|
||||
else:
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, color=color)
|
||||
for change in out2:
|
||||
for line in change.splitlines():
|
||||
recout.append(' ' + line)
|
||||
parser = optparse.OptionParser(
|
||||
description = "Compares siginfo/sigdata files written out by BitBake",
|
||||
usage = """
|
||||
%prog -t recipename taskname
|
||||
%prog sigdatafile1 sigdatafile2
|
||||
%prog sigdatafile1""")
|
||||
|
||||
return recout
|
||||
parser.add_option("-t", "--task",
|
||||
help = "find the signature data files for last two runs of the specified task and compare them",
|
||||
action="store", dest="taskargs", nargs=2, metavar='recipename taskname')
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description=("Dumps" if is_dump else "Compares") + " siginfo/sigdata files written out by BitBake")
|
||||
|
||||
parser.add_argument('-D', '--debug',
|
||||
help='Enable debug output',
|
||||
action='store_true')
|
||||
|
||||
if is_dump:
|
||||
parser.add_argument("-t", "--task",
|
||||
help="find the signature data file for the last run of the specified task",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
|
||||
parser.add_argument("sigdatafile1",
|
||||
help="Signature file to dump. Not used when using -t/--task.",
|
||||
action="store", nargs='?', metavar="sigdatafile")
|
||||
else:
|
||||
parser.add_argument('-c', '--color',
|
||||
help='Colorize the output (where %(metavar)s is %(choices)s)',
|
||||
choices=['auto', 'always', 'never'], default='auto', metavar='color')
|
||||
|
||||
parser.add_argument('-d', '--dump',
|
||||
help='Dump the last signature data instead of comparing (equivalent to using bitbake-dumpsig)',
|
||||
action='store_true')
|
||||
|
||||
parser.add_argument("-t", "--task",
|
||||
help="find the signature data files for the last two runs of the specified task and compare them",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
|
||||
parser.add_argument("-s", "--signature",
|
||||
help="With -t/--task, specify the signatures to look for instead of taking the last two",
|
||||
action="store", dest="sigargs", nargs=2, metavar=('fromsig', 'tosig'))
|
||||
|
||||
parser.add_argument("sigdatafile1",
|
||||
help="First signature file to compare (or signature file to dump, if second not specified). Not used when using -t/--task.",
|
||||
action="store", nargs='?')
|
||||
|
||||
parser.add_argument("sigdatafile2",
|
||||
help="Second signature file to compare",
|
||||
action="store", nargs='?')
|
||||
|
||||
options = parser.parse_args()
|
||||
if is_dump:
|
||||
options.color = 'never'
|
||||
options.dump = True
|
||||
options.sigdatafile2 = None
|
||||
options.sigargs = None
|
||||
|
||||
if options.debug:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
color = (options.color == 'always' or (options.color == 'auto' and sys.stdout.isatty()))
|
||||
options, args = parser.parse_args(sys.argv)
|
||||
|
||||
if options.taskargs:
|
||||
with bb.tinfoil.Tinfoil() as tinfoil:
|
||||
tinfoil.prepare(config_only=True)
|
||||
if not options.dump and options.sigargs:
|
||||
files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1], options.sigargs[0], options.sigargs[1])
|
||||
else:
|
||||
files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1])
|
||||
|
||||
if options.dump:
|
||||
logger.debug("Signature file: %s" % files[-1])
|
||||
output = bb.siggen.dump_sigfile(files[-1])
|
||||
else:
|
||||
if len(files) < 2:
|
||||
logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (options.taskargs[0], options.taskargs[1]))
|
||||
sys.exit(1)
|
||||
|
||||
# Recurse into signature comparison
|
||||
logger.debug("Signature file (previous): %s" % files[-2])
|
||||
logger.debug("Signature file (latest): %s" % files[-1])
|
||||
output = bb.siggen.compare_sigfiles(files[-2], files[-1], recursecb, color=color)
|
||||
find_compare_task(tinfoil, options.taskargs[0], options.taskargs[1])
|
||||
else:
|
||||
if options.sigargs:
|
||||
logger.error('-s/--signature can only be used together with -t/--task')
|
||||
sys.exit(1)
|
||||
try:
|
||||
if not options.dump and options.sigdatafile1 and options.sigdatafile2:
|
||||
with bb.tinfoil.Tinfoil() as tinfoil:
|
||||
tinfoil.prepare(config_only=True)
|
||||
output = bb.siggen.compare_sigfiles(options.sigdatafile1, options.sigdatafile2, recursecb, color=color)
|
||||
elif options.sigdatafile1:
|
||||
output = bb.siggen.dump_sigfile(options.sigdatafile1)
|
||||
else:
|
||||
logger.error('Must specify signature file(s) or -t/--task')
|
||||
parser.print_help()
|
||||
if len(args) == 1:
|
||||
parser.print_help()
|
||||
else:
|
||||
try:
|
||||
if len(args) == 2:
|
||||
output = bb.siggen.dump_sigfile(sys.argv[1])
|
||||
else:
|
||||
output = bb.siggen.compare_sigfiles(sys.argv[1], sys.argv[2])
|
||||
except IOError as e:
|
||||
logger.error(str(e))
|
||||
sys.exit(1)
|
||||
except (pickle.UnpicklingError, EOFError):
|
||||
logger.error('Invalid signature data - ensure you are specifying sigdata/siginfo files')
|
||||
sys.exit(1)
|
||||
except IOError as e:
|
||||
logger.error(str(e))
|
||||
sys.exit(1)
|
||||
except (pickle.UnpicklingError, EOFError):
|
||||
logger.error('Invalid signature data - ensure you are specifying sigdata/siginfo files')
|
||||
sys.exit(1)
|
||||
|
||||
if output:
|
||||
print('\n'.join(output))
|
||||
if output:
|
||||
print('\n'.join(output))
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
bitbake-diffsigs
|
||||
65
bitbake/bin/bitbake-dumpsig
Executable file
65
bitbake/bin/bitbake-dumpsig
Executable file
@@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# bitbake-dumpsig
|
||||
# BitBake task signature dump utility
|
||||
#
|
||||
# Copyright (C) 2013 Intel Corporation
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
import optparse
|
||||
import logging
|
||||
import pickle
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
|
||||
import bb.siggen
|
||||
|
||||
def logger_create(name, output=sys.stderr):
|
||||
logger = logging.getLogger(name)
|
||||
console = logging.StreamHandler(output)
|
||||
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
if output.isatty():
|
||||
format.enable_color()
|
||||
console.setFormatter(format)
|
||||
logger.addHandler(console)
|
||||
logger.setLevel(logging.INFO)
|
||||
return logger
|
||||
|
||||
logger = logger_create('bitbake-dumpsig')
|
||||
|
||||
parser = optparse.OptionParser(
|
||||
description = "Dumps siginfo/sigdata files written out by BitBake",
|
||||
usage = """
|
||||
%prog sigdatafile""")
|
||||
|
||||
options, args = parser.parse_args(sys.argv)
|
||||
|
||||
if len(args) == 1:
|
||||
parser.print_help()
|
||||
else:
|
||||
try:
|
||||
output = bb.siggen.dump_sigfile(args[1])
|
||||
except IOError as e:
|
||||
logger.error(str(e))
|
||||
sys.exit(1)
|
||||
except (pickle.UnpicklingError, EOFError):
|
||||
logger.error('Invalid signature data - ensure you are specifying a sigdata/siginfo file')
|
||||
sys.exit(1)
|
||||
|
||||
if output:
|
||||
print('\n'.join(output))
|
||||
@@ -24,26 +24,49 @@ import logging
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import signal
|
||||
|
||||
bindir = os.path.dirname(__file__)
|
||||
topdir = os.path.dirname(bindir)
|
||||
sys.path[0:0] = [os.path.join(topdir, 'lib')]
|
||||
|
||||
import bb.tinfoil
|
||||
import bb.msg
|
||||
|
||||
logger = bb.msg.logger_create('bitbake-layers', sys.stdout)
|
||||
|
||||
def tinfoil_init(parserecipes):
|
||||
import bb.tinfoil
|
||||
tinfoil = bb.tinfoil.Tinfoil(tracking=True)
|
||||
tinfoil.prepare(not parserecipes)
|
||||
tinfoil.logger.setLevel(logger.getEffectiveLevel())
|
||||
return tinfoil
|
||||
|
||||
|
||||
def logger_create(name, output=sys.stderr):
|
||||
logger = logging.getLogger(name)
|
||||
loggerhandler = logging.StreamHandler(output)
|
||||
loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
|
||||
logger.addHandler(loggerhandler)
|
||||
logger.setLevel(logging.INFO)
|
||||
return logger
|
||||
|
||||
def logger_setup_color(logger, color='auto'):
|
||||
from bb.msg import BBLogFormatter
|
||||
console = logging.StreamHandler(sys.stdout)
|
||||
formatter = BBLogFormatter("%(levelname)s: %(message)s")
|
||||
console.setFormatter(formatter)
|
||||
logger.handlers = [console]
|
||||
if color == 'always' or (color == 'auto' and console.stream.isatty()):
|
||||
formatter.enable_color()
|
||||
|
||||
|
||||
logger = logger_create('bitbake-layers', sys.stdout)
|
||||
|
||||
def main():
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||
parser = argparse.ArgumentParser(
|
||||
description="BitBake layers utility",
|
||||
epilog="Use %(prog)s <subcommand> --help to get help on a specific command",
|
||||
add_help=False)
|
||||
parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
|
||||
parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
|
||||
parser.add_argument('-F', '--force', help='Force add without recipe parse verification', action='store_true')
|
||||
parser.add_argument('--color', choices=['auto', 'always', 'never'], default='auto', help='Colorize output (where %(metavar)s is %(choices)s)', metavar='COLOR')
|
||||
|
||||
global_args, unparsed_args = parser.parse_known_args()
|
||||
@@ -60,17 +83,13 @@ def main():
|
||||
elif global_args.quiet:
|
||||
logger.setLevel(logging.ERROR)
|
||||
|
||||
# Need to re-run logger_create with color argument
|
||||
# (will be the same logger since it has the same name)
|
||||
bb.msg.logger_create('bitbake-layers', output=sys.stdout, color=global_args.color)
|
||||
logger_setup_color(logger, global_args.color)
|
||||
|
||||
plugins = []
|
||||
tinfoil = bb.tinfoil.Tinfoil(tracking=True)
|
||||
tinfoil.logger.setLevel(logger.getEffectiveLevel())
|
||||
tinfoil = tinfoil_init(False)
|
||||
try:
|
||||
tinfoil.prepare(True)
|
||||
for path in ([topdir] +
|
||||
tinfoil.config_data.getVar('BBPATH').split(':')):
|
||||
tinfoil.config_data.getVar('BBPATH', True).split(':')):
|
||||
pluginpath = os.path.join(path, 'lib', 'bblayers')
|
||||
bb.utils.load_plugins(logger, plugins, pluginpath)
|
||||
|
||||
@@ -90,7 +109,7 @@ def main():
|
||||
|
||||
if getattr(args, 'parserecipes', False):
|
||||
tinfoil.config_data.disableTracking()
|
||||
tinfoil.parse_recipes()
|
||||
tinfoil.parseRecipes()
|
||||
tinfoil.config_data.enableTracking()
|
||||
|
||||
return args.func(args)
|
||||
|
||||
@@ -22,21 +22,15 @@ sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib
|
||||
import unittest
|
||||
try:
|
||||
import bb
|
||||
import layerindexlib
|
||||
except RuntimeError as exc:
|
||||
sys.exit(str(exc))
|
||||
|
||||
tests = ["bb.tests.codeparser",
|
||||
"bb.tests.cooker",
|
||||
"bb.tests.cow",
|
||||
"bb.tests.data",
|
||||
"bb.tests.event",
|
||||
"bb.tests.fetch",
|
||||
"bb.tests.parse",
|
||||
"bb.tests.utils",
|
||||
"layerindexlib.tests.layerindexobj",
|
||||
"layerindexlib.tests.restapi",
|
||||
"layerindexlib.tests.cooker"]
|
||||
"bb.tests.utils"]
|
||||
|
||||
for t in tests:
|
||||
t = '.'.join(t.split('.')[:3])
|
||||
|
||||
@@ -17,7 +17,7 @@ from multiprocessing import Lock
|
||||
from threading import Thread
|
||||
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
sys.exit("Please use a locale setting which supports utf-8.\nPython can't change the filesystem locale after loading so we need a utf-8 when python starts or things won't work.")
|
||||
|
||||
# Users shouldn't be running this code directly
|
||||
if len(sys.argv) != 2 or not sys.argv[1].startswith("decafbad"):
|
||||
@@ -95,7 +95,6 @@ def worker_flush(worker_queue):
|
||||
pass
|
||||
while (worker_queue_int or not worker_queue.empty()):
|
||||
try:
|
||||
(_, ready, _) = select.select([], [worker_pipe], [], 1)
|
||||
if not worker_queue.empty():
|
||||
worker_queue_int = worker_queue_int + worker_queue.get()
|
||||
written = os.write(worker_pipe, worker_queue_int)
|
||||
@@ -136,7 +135,7 @@ def sigterm_handler(signum, frame):
|
||||
os.killpg(0, signal.SIGTERM)
|
||||
sys.exit()
|
||||
|
||||
def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, appends, taskdepdata, extraconfigdata, quieterrors=False, dry_run_exec=False):
|
||||
def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, appends, taskdepdata, quieterrors=False):
|
||||
# We need to setup the environment BEFORE the fork, since
|
||||
# a fork() or exec*() activates PSEUDO...
|
||||
|
||||
@@ -152,10 +151,8 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, append
|
||||
except TypeError:
|
||||
umask = taskdep['umask'][taskname]
|
||||
|
||||
dry_run = cfg.dry_run or dry_run_exec
|
||||
|
||||
# We can't use the fakeroot environment in a dry run as it possibly hasn't been built
|
||||
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not dry_run:
|
||||
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not cfg.dry_run:
|
||||
envvars = (workerdata["fakerootenv"][fn] or "").split()
|
||||
for key, value in (var.split('=') for var in envvars):
|
||||
envbackup[key] = os.environ.get(key)
|
||||
@@ -222,21 +219,16 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, append
|
||||
the_data = databuilder.mcdata[mc]
|
||||
the_data.setVar("BB_WORKERCONTEXT", "1")
|
||||
the_data.setVar("BB_TASKDEPDATA", taskdepdata)
|
||||
if cfg.limited_deps:
|
||||
the_data.setVar("BB_LIMITEDDEPS", "1")
|
||||
the_data.setVar("BUILDNAME", workerdata["buildname"])
|
||||
the_data.setVar("DATE", workerdata["date"])
|
||||
the_data.setVar("TIME", workerdata["time"])
|
||||
for varname, value in extraconfigdata.items():
|
||||
the_data.setVar(varname, value)
|
||||
|
||||
bb.parse.siggen.set_taskdata(workerdata["sigdata"])
|
||||
ret = 0
|
||||
|
||||
the_data = bb_cache.loadDataFull(fn, appends)
|
||||
the_data.setVar('BB_TASKHASH', workerdata["runq_hash"][task])
|
||||
|
||||
bb.utils.set_process_name("%s:%s" % (the_data.getVar("PN"), taskname.replace("do_", "")))
|
||||
bb.utils.set_process_name("%s:%s" % (the_data.getVar("PN", True), taskname.replace("do_", "")))
|
||||
|
||||
# exported_vars() returns a generator which *cannot* be passed to os.environ.update()
|
||||
# successfully. We also need to unset anything from the environment which shouldn't be there
|
||||
@@ -251,11 +243,11 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, append
|
||||
the_data.setVar(e, fakeenv[e])
|
||||
the_data.setVarFlag(e, 'export', "1")
|
||||
|
||||
task_exports = the_data.getVarFlag(taskname, 'exports')
|
||||
task_exports = the_data.getVarFlag(taskname, 'exports', True)
|
||||
if task_exports:
|
||||
for e in task_exports.split():
|
||||
the_data.setVarFlag(e, 'export', '1')
|
||||
v = the_data.getVar(e)
|
||||
v = the_data.getVar(e, True)
|
||||
if v is not None:
|
||||
os.environ[e] = v
|
||||
|
||||
@@ -267,7 +259,7 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, append
|
||||
logger.critical(traceback.format_exc())
|
||||
os._exit(1)
|
||||
try:
|
||||
if dry_run:
|
||||
if cfg.dry_run:
|
||||
return 0
|
||||
return bb.build.exec_task(fn, taskname, the_data, cfg.profile)
|
||||
except:
|
||||
@@ -336,7 +328,6 @@ class BitbakeWorker(object):
|
||||
self.cookercfg = None
|
||||
self.databuilder = None
|
||||
self.data = None
|
||||
self.extraconfigdata = None
|
||||
self.build_pids = {}
|
||||
self.build_pipes = {}
|
||||
|
||||
@@ -371,7 +362,6 @@ class BitbakeWorker(object):
|
||||
pass
|
||||
if len(self.queue):
|
||||
self.handle_item(b"cookerconfig", self.handle_cookercfg)
|
||||
self.handle_item(b"extraconfigdata", self.handle_extraconfigdata)
|
||||
self.handle_item(b"workerdata", self.handle_workerdata)
|
||||
self.handle_item(b"runtask", self.handle_runtask)
|
||||
self.handle_item(b"finishnow", self.handle_finishnow)
|
||||
@@ -379,11 +369,9 @@ class BitbakeWorker(object):
|
||||
self.handle_item(b"quit", self.handle_quit)
|
||||
|
||||
for pipe in self.build_pipes:
|
||||
if self.build_pipes[pipe].input in ready:
|
||||
self.build_pipes[pipe].read()
|
||||
self.build_pipes[pipe].read()
|
||||
if len(self.build_pids):
|
||||
while self.process_waitpid():
|
||||
continue
|
||||
self.process_waitpid()
|
||||
|
||||
|
||||
def handle_item(self, item, func):
|
||||
@@ -400,9 +388,6 @@ class BitbakeWorker(object):
|
||||
self.databuilder.parseBaseConfiguration()
|
||||
self.data = self.databuilder.data
|
||||
|
||||
def handle_extraconfigdata(self, data):
|
||||
self.extraconfigdata = pickle.loads(data)
|
||||
|
||||
def handle_workerdata(self, data):
|
||||
self.workerdata = pickle.loads(data)
|
||||
bb.msg.loggerDefaultDebugLevel = self.workerdata["logdefaultdebug"]
|
||||
@@ -425,10 +410,10 @@ class BitbakeWorker(object):
|
||||
sys.exit(0)
|
||||
|
||||
def handle_runtask(self, data):
|
||||
fn, task, taskname, quieterrors, appends, taskdepdata, dry_run_exec = pickle.loads(data)
|
||||
fn, task, taskname, quieterrors, appends, taskdepdata = pickle.loads(data)
|
||||
workerlog_write("Handling runtask %s %s %s\n" % (task, fn, taskname))
|
||||
|
||||
pid, pipein, pipeout = fork_off_task(self.cookercfg, self.data, self.databuilder, self.workerdata, fn, task, taskname, appends, taskdepdata, self.extraconfigdata, quieterrors, dry_run_exec)
|
||||
pid, pipein, pipeout = fork_off_task(self.cookercfg, self.data, self.databuilder, self.workerdata, fn, task, taskname, appends, taskdepdata, quieterrors)
|
||||
|
||||
self.build_pids[pid] = task
|
||||
self.build_pipes[pid] = runQueueWorkerPipe(pipein, pipeout)
|
||||
@@ -441,9 +426,9 @@ class BitbakeWorker(object):
|
||||
try:
|
||||
pid, status = os.waitpid(-1, os.WNOHANG)
|
||||
if pid == 0 or os.WIFSTOPPED(status):
|
||||
return False
|
||||
return None
|
||||
except OSError:
|
||||
return False
|
||||
return None
|
||||
|
||||
workerlog_write("Exit code of %s for pid %s\n" % (status, pid))
|
||||
|
||||
@@ -462,8 +447,6 @@ class BitbakeWorker(object):
|
||||
|
||||
worker_fire_prepickled(b"<exitcode>" + pickle.dumps((task, status)) + b"</exitcode>")
|
||||
|
||||
return True
|
||||
|
||||
def handle_finishnow(self, _):
|
||||
if self.build_pids:
|
||||
logger.info("Sending SIGTERM to remaining %s tasks", len(self.build_pids))
|
||||
@@ -499,3 +482,4 @@ worker_thread.join()
|
||||
|
||||
workerlog_write("exitting")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
@@ -1,165 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""git-make-shallow: make the current git repository shallow
|
||||
|
||||
Remove the history of the specified revisions, then optionally filter the
|
||||
available refs to those specified.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import collections
|
||||
import errno
|
||||
import itertools
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
version = 1.0
|
||||
|
||||
|
||||
def main():
|
||||
if sys.version_info < (3, 4, 0):
|
||||
sys.exit('Python 3.4 or greater is required')
|
||||
|
||||
git_dir = check_output(['git', 'rev-parse', '--git-dir']).rstrip()
|
||||
shallow_file = os.path.join(git_dir, 'shallow')
|
||||
if os.path.exists(shallow_file):
|
||||
try:
|
||||
check_output(['git', 'fetch', '--unshallow'])
|
||||
except subprocess.CalledProcessError:
|
||||
try:
|
||||
os.unlink(shallow_file)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
args = process_args()
|
||||
revs = check_output(['git', 'rev-list'] + args.revisions).splitlines()
|
||||
|
||||
make_shallow(shallow_file, args.revisions, args.refs)
|
||||
|
||||
ref_revs = check_output(['git', 'rev-list'] + args.refs).splitlines()
|
||||
remaining_history = set(revs) & set(ref_revs)
|
||||
for rev in remaining_history:
|
||||
if check_output(['git', 'rev-parse', '{}^@'.format(rev)]):
|
||||
sys.exit('Error: %s was not made shallow' % rev)
|
||||
|
||||
filter_refs(args.refs)
|
||||
|
||||
if args.shrink:
|
||||
shrink_repo(git_dir)
|
||||
subprocess.check_call(['git', 'fsck', '--unreachable'])
|
||||
|
||||
|
||||
def process_args():
|
||||
# TODO: add argument to automatically keep local-only refs, since they
|
||||
# can't be easily restored with a git fetch.
|
||||
parser = argparse.ArgumentParser(description='Remove the history of the specified revisions, then optionally filter the available refs to those specified.')
|
||||
parser.add_argument('--ref', '-r', metavar='REF', action='append', dest='refs', help='remove all but the specified refs (cumulative)')
|
||||
parser.add_argument('--shrink', '-s', action='store_true', help='shrink the git repository by repacking and pruning')
|
||||
parser.add_argument('revisions', metavar='REVISION', nargs='+', help='a git revision/commit')
|
||||
if len(sys.argv) < 2:
|
||||
parser.print_help()
|
||||
sys.exit(2)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.refs:
|
||||
args.refs = check_output(['git', 'rev-parse', '--symbolic-full-name'] + args.refs).splitlines()
|
||||
else:
|
||||
args.refs = get_all_refs(lambda r, t, tt: t == 'commit' or tt == 'commit')
|
||||
|
||||
args.refs = list(filter(lambda r: not r.endswith('/HEAD'), args.refs))
|
||||
args.revisions = check_output(['git', 'rev-parse'] + ['%s^{}' % i for i in args.revisions]).splitlines()
|
||||
return args
|
||||
|
||||
|
||||
def check_output(cmd, input=None):
|
||||
return subprocess.check_output(cmd, universal_newlines=True, input=input)
|
||||
|
||||
|
||||
def make_shallow(shallow_file, revisions, refs):
|
||||
"""Remove the history of the specified revisions."""
|
||||
for rev in follow_history_intersections(revisions, refs):
|
||||
print("Processing %s" % rev)
|
||||
with open(shallow_file, 'a') as f:
|
||||
f.write(rev + '\n')
|
||||
|
||||
|
||||
def get_all_refs(ref_filter=None):
|
||||
"""Return all the existing refs in this repository, optionally filtering the refs."""
|
||||
ref_output = check_output(['git', 'for-each-ref', '--format=%(refname)\t%(objecttype)\t%(*objecttype)'])
|
||||
ref_split = [tuple(iter_extend(l.rsplit('\t'), 3)) for l in ref_output.splitlines()]
|
||||
if ref_filter:
|
||||
ref_split = (e for e in ref_split if ref_filter(*e))
|
||||
refs = [r[0] for r in ref_split]
|
||||
return refs
|
||||
|
||||
|
||||
def iter_extend(iterable, length, obj=None):
|
||||
"""Ensure that iterable is the specified length by extending with obj."""
|
||||
return itertools.islice(itertools.chain(iterable, itertools.repeat(obj)), length)
|
||||
|
||||
|
||||
def filter_refs(refs):
|
||||
"""Remove all but the specified refs from the git repository."""
|
||||
all_refs = get_all_refs()
|
||||
to_remove = set(all_refs) - set(refs)
|
||||
if to_remove:
|
||||
check_output(['xargs', '-0', '-n', '1', 'git', 'update-ref', '-d', '--no-deref'],
|
||||
input=''.join(l + '\0' for l in to_remove))
|
||||
|
||||
|
||||
def follow_history_intersections(revisions, refs):
|
||||
"""Determine all the points where the history of the specified revisions intersects the specified refs."""
|
||||
queue = collections.deque(revisions)
|
||||
seen = set()
|
||||
|
||||
for rev in iter_except(queue.popleft, IndexError):
|
||||
if rev in seen:
|
||||
continue
|
||||
|
||||
parents = check_output(['git', 'rev-parse', '%s^@' % rev]).splitlines()
|
||||
|
||||
yield rev
|
||||
seen.add(rev)
|
||||
|
||||
if not parents:
|
||||
continue
|
||||
|
||||
check_refs = check_output(['git', 'merge-base', '--independent'] + sorted(refs)).splitlines()
|
||||
for parent in parents:
|
||||
for ref in check_refs:
|
||||
print("Checking %s vs %s" % (parent, ref))
|
||||
try:
|
||||
merge_base = check_output(['git', 'merge-base', parent, ref]).rstrip()
|
||||
except subprocess.CalledProcessError:
|
||||
continue
|
||||
else:
|
||||
queue.append(merge_base)
|
||||
|
||||
|
||||
def iter_except(func, exception, start=None):
|
||||
"""Yield a function repeatedly until it raises an exception."""
|
||||
try:
|
||||
if start is not None:
|
||||
yield start()
|
||||
while True:
|
||||
yield func()
|
||||
except exception:
|
||||
pass
|
||||
|
||||
|
||||
def shrink_repo(git_dir):
|
||||
"""Shrink the newly shallow repository, removing the unreachable objects."""
|
||||
subprocess.check_call(['git', 'reflog', 'expire', '--expire-unreachable=now', '--all'])
|
||||
subprocess.check_call(['git', 'repack', '-ad'])
|
||||
try:
|
||||
os.unlink(os.path.join(git_dir, 'objects', 'info', 'alternates'))
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
subprocess.check_call(['git', 'prune', '--expire', 'now'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -18,24 +18,35 @@
|
||||
# along with this program. If not, see http://www.gnu.org/licenses/.
|
||||
|
||||
HELP="
|
||||
Usage: source toaster start|stop [webport=<address:port>] [noweb] [nobuild] [toasterdir]
|
||||
Usage: source toaster start|stop [webport=<address:port>] [noweb]
|
||||
Optional arguments:
|
||||
[nobuild] Setup the environment for capturing builds with toaster but disable managed builds
|
||||
[noweb] Setup the environment for capturing builds with toaster but don't start the web server
|
||||
[noweb] Setup the environment for building with toaster but don't start the development server
|
||||
[webport] Set the development server (default: localhost:8000)
|
||||
[toasterdir] Set absolute path to be used as TOASTER_DIR (default: BUILDDIR/../)
|
||||
"
|
||||
|
||||
custom_extention()
|
||||
webserverKillAll()
|
||||
{
|
||||
custom_extension=$BBBASEDIR/lib/toaster/orm/fixtures/custom_toaster_append.sh
|
||||
if [ -f $custom_extension ] ; then
|
||||
$custom_extension $*
|
||||
fi
|
||||
local pidfile
|
||||
for pidfile in ${BUILDDIR}/.toastermain.pid ${BUILDDIR}/.runbuilds.pid; do
|
||||
if [ -f ${pidfile} ]; then
|
||||
pid=`cat ${pidfile}`
|
||||
while kill -0 $pid 2>/dev/null; do
|
||||
kill -SIGTERM -$pid 2>/dev/null
|
||||
sleep 1
|
||||
done
|
||||
rm ${pidfile}
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
databaseCheck()
|
||||
webserverStartAll()
|
||||
{
|
||||
# do not start if toastermain points to a valid process
|
||||
if ! cat "${BUILDDIR}/.toastermain.pid" 2>/dev/null | xargs -I{} kill -0 {} ; then
|
||||
retval=1
|
||||
rm "${BUILDDIR}/.toastermain.pid"
|
||||
fi
|
||||
|
||||
retval=0
|
||||
# you can always add a superuser later via
|
||||
# ../bitbake/lib/toaster/manage.py createsuperuser --username=<ME>
|
||||
@@ -54,45 +65,9 @@ databaseCheck()
|
||||
return $retval
|
||||
fi
|
||||
|
||||
return $retval
|
||||
}
|
||||
|
||||
webserverKillAll()
|
||||
{
|
||||
local pidfile
|
||||
if [ -f ${BUILDDIR}/.toastermain.pid ] ; then
|
||||
custom_extention web_stop_postpend
|
||||
else
|
||||
custom_extention noweb_stop_postpend
|
||||
fi
|
||||
for pidfile in ${BUILDDIR}/.toastermain.pid ${BUILDDIR}/.runbuilds.pid; do
|
||||
if [ -f ${pidfile} ]; then
|
||||
pid=`cat ${pidfile}`
|
||||
while kill -0 $pid 2>/dev/null; do
|
||||
kill -SIGTERM $pid 2>/dev/null
|
||||
sleep 1
|
||||
done
|
||||
rm ${pidfile}
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
webserverStartAll()
|
||||
{
|
||||
# do not start if toastermain points to a valid process
|
||||
if ! cat "${BUILDDIR}/.toastermain.pid" 2>/dev/null | xargs -I{} kill -0 {} ; then
|
||||
retval=1
|
||||
rm "${BUILDDIR}/.toastermain.pid"
|
||||
fi
|
||||
|
||||
retval=0
|
||||
|
||||
# check the database
|
||||
databaseCheck || return 1
|
||||
|
||||
echo "Starting webserver..."
|
||||
|
||||
$MANAGE runserver --noreload "$ADDR_PORT" \
|
||||
$MANAGE runserver "$ADDR_PORT" \
|
||||
</dev/null >>${BUILDDIR}/toaster_web.log 2>&1 \
|
||||
& echo $! >${BUILDDIR}/.toastermain.pid
|
||||
|
||||
@@ -104,7 +79,6 @@ webserverStartAll()
|
||||
else
|
||||
echo "Toaster development webserver started at http://$ADDR_PORT"
|
||||
echo -e "\nYou can now run 'bitbake <target>' on the command line and monitor your build in Toaster.\nYou can also use a Toaster project to configure and run a build.\n"
|
||||
custom_extention web_start_postpend $ADDR_PORT
|
||||
fi
|
||||
|
||||
return $retval
|
||||
@@ -132,18 +106,12 @@ verify_prereq() {
|
||||
# Verify Django version
|
||||
reqfile=$(python3 -c "import os; print(os.path.realpath('$BBBASEDIR/toaster-requirements.txt'))")
|
||||
exp='s/Django\([><=]\+\)\([^,]\+\),\([><=]\+\)\(.\+\)/'
|
||||
# expand version parts to 2 digits to support 1.10.x > 1.8
|
||||
# (note:helper functions hard to insert in-line)
|
||||
exp=$exp'import sys,django;'
|
||||
exp=$exp'version=["%02d" % int(n) for n in django.get_version().split(".")];'
|
||||
exp=$exp'vmin=["%02d" % int(n) for n in "\2".split(".")];'
|
||||
exp=$exp'vmax=["%02d" % int(n) for n in "\4".split(".")];'
|
||||
exp=$exp'sys.exit(not (version \1 vmin and version \3 vmax))'
|
||||
exp=$exp'/p'
|
||||
exp=$exp'import sys,django;version=django.get_version().split(".");'
|
||||
exp=$exp'sys.exit(not (version \1 "\2".split(".") and version \3 "\4".split(".")))/p'
|
||||
if ! sed -n "$exp" $reqfile | python3 - ; then
|
||||
req=`grep ^Django $reqfile`
|
||||
echo "This program needs $req"
|
||||
echo "Please install with pip3 install -r $reqfile"
|
||||
echo "Please install with pip install -r $reqfile"
|
||||
return 2
|
||||
fi
|
||||
|
||||
@@ -161,9 +129,7 @@ fi
|
||||
|
||||
export BBBASEDIR=`dirname $TOASTER`/..
|
||||
MANAGE="python3 $BBBASEDIR/lib/toaster/manage.py"
|
||||
if [ -z "$OE_ROOT" ]; then
|
||||
OE_ROOT=`dirname $TOASTER`/../..
|
||||
fi
|
||||
OE_ROOT=`dirname $TOASTER`/../..
|
||||
|
||||
# this is the configuraton file we are using for toaster
|
||||
# we are using the same logic that oe-setup-builddir uses
|
||||
@@ -185,20 +151,23 @@ fi
|
||||
|
||||
unset OE_ROOT
|
||||
|
||||
# this defines the dir toaster will use for
|
||||
# 1) clones of layers (in _toaster_clones )
|
||||
# 2) the build dir (in build)
|
||||
# 3) the sqlite db if that is being used.
|
||||
# 4) pid's we need to clean up on exit/shutdown
|
||||
# note: for future. in order to make this an arbitrary directory, we need to
|
||||
# make sure that the toaster.sqlite file doesn't default to `pwd` like it currently does.
|
||||
export TOASTER_DIR=`pwd`
|
||||
|
||||
WEBSERVER=1
|
||||
export TOASTER_BUILDSERVER=1
|
||||
ADDR_PORT="localhost:8000"
|
||||
TOASTERDIR=`dirname $BUILDDIR`
|
||||
unset CMD
|
||||
for param in $*; do
|
||||
case $param in
|
||||
noweb )
|
||||
WEBSERVER=0
|
||||
;;
|
||||
nobuild )
|
||||
TOASTER_BUILDSERVER=0
|
||||
;;
|
||||
start )
|
||||
CMD=$param
|
||||
;;
|
||||
@@ -215,9 +184,6 @@ for param in $*; do
|
||||
ADDR_PORT="localhost:$PORT"
|
||||
fi
|
||||
;;
|
||||
toasterdir=*)
|
||||
TOASTERDIR="${param#*=}"
|
||||
;;
|
||||
--help)
|
||||
echo "$HELP"
|
||||
return 0
|
||||
@@ -248,8 +214,10 @@ fi
|
||||
# 2) the build dir (in build)
|
||||
# 3) the sqlite db if that is being used.
|
||||
# 4) pid's we need to clean up on exit/shutdown
|
||||
export TOASTER_DIR=$TOASTERDIR
|
||||
export BB_ENV_EXTRAWHITE="$BB_ENV_EXTRAWHITE TOASTER_DIR"
|
||||
# note: for future. in order to make this an arbitrary directory, we need to
|
||||
# make sure that the toaster.sqlite file doesn't default to `pwd`
|
||||
# like it currently does.
|
||||
export TOASTER_DIR=`dirname $BUILDDIR`
|
||||
|
||||
# Determine the action. If specified by arguments, fine, if not, toggle it
|
||||
if [ "$CMD" = "start" ] ; then
|
||||
@@ -266,7 +234,6 @@ fi
|
||||
echo "The system will $CMD."
|
||||
|
||||
# Execute the commands
|
||||
custom_extention toaster_prepend $CMD $ADDR_PORT
|
||||
|
||||
case $CMD in
|
||||
start )
|
||||
@@ -282,34 +249,15 @@ case $CMD in
|
||||
line='INHERIT+="toaster buildhistory"'
|
||||
grep -q "$line" $conf || echo $line >> $conf
|
||||
|
||||
if [ $WEBSERVER -eq 0 ] ; then
|
||||
# Do not update the database for "noweb" unless
|
||||
# it does not yet exist
|
||||
if [ ! -f "$TOASTER_DIR/toaster.sqlite" ] ; then
|
||||
if ! databaseCheck; then
|
||||
echo "Failed ${CMD}."
|
||||
return 4
|
||||
fi
|
||||
fi
|
||||
custom_extention noweb_start_postpend $ADDR_PORT
|
||||
fi
|
||||
if [ $WEBSERVER -gt 0 ] && ! webserverStartAll; then
|
||||
echo "Failed ${CMD}."
|
||||
return 4
|
||||
fi
|
||||
export BITBAKE_UI='toasterui'
|
||||
if [ $TOASTER_BUILDSERVER -eq 1 ] ; then
|
||||
$MANAGE runbuilds \
|
||||
</dev/null >>${BUILDDIR}/toaster_runbuilds.log 2>&1 \
|
||||
& echo $! >${BUILDDIR}/.runbuilds.pid
|
||||
else
|
||||
echo "Toaster build server not started."
|
||||
fi
|
||||
|
||||
$MANAGE runbuilds & echo $! >${BUILDDIR}/.runbuilds.pid
|
||||
# set fail safe stop system on terminal exit
|
||||
trap stop_system SIGHUP
|
||||
echo "Successful ${CMD}."
|
||||
custom_extention toaster_postpend $CMD $ADDR_PORT
|
||||
return 0
|
||||
;;
|
||||
stop )
|
||||
@@ -317,5 +265,3 @@ case $CMD in
|
||||
echo "Successful ${CMD}."
|
||||
;;
|
||||
esac
|
||||
custom_extention toaster_postpend $CMD $ADDR_PORT
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# Copyright (C) 2012, 2018 Wind River Systems, Inc.
|
||||
# Copyright (C) 2012 Wind River Systems, Inc.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
@@ -18,68 +18,51 @@
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
#
|
||||
# Used for dumping the bb_cache.dat
|
||||
# This is used for dumping the bb_cache.dat, the output format is:
|
||||
# recipe_path PN PV PACKAGES
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import warnings
|
||||
|
||||
# For importing bb.cache
|
||||
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), '../lib'))
|
||||
from bb.cache import CoreRecipeInfo
|
||||
|
||||
import pickle
|
||||
import pickle as pickle
|
||||
|
||||
class DumpCache(object):
|
||||
def __init__(self):
|
||||
parser = argparse.ArgumentParser(
|
||||
description="bb_cache.dat's dumper",
|
||||
epilog="Use %(prog)s --help to get help")
|
||||
parser.add_argument("-r", "--recipe",
|
||||
help="specify the recipe, default: all recipes", action="store")
|
||||
parser.add_argument("-m", "--members",
|
||||
help = "specify the member, use comma as separator for multiple ones, default: all members", action="store", default="")
|
||||
parser.add_argument("-s", "--skip",
|
||||
help = "skip skipped recipes", action="store_true")
|
||||
parser.add_argument("cachefile",
|
||||
help = "specify bb_cache.dat", nargs = 1, action="store", default="")
|
||||
def main(argv=None):
|
||||
"""
|
||||
Get the mapping for the target recipe.
|
||||
"""
|
||||
if len(argv) != 1:
|
||||
print("Error, need one argument!", file=sys.stderr)
|
||||
return 2
|
||||
|
||||
self.args = parser.parse_args()
|
||||
cachefile = argv[0]
|
||||
|
||||
def main(self):
|
||||
with open(self.args.cachefile[0], "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
while True:
|
||||
try:
|
||||
key = pickled.load()
|
||||
val = pickled.load()
|
||||
except Exception:
|
||||
break
|
||||
if isinstance(val, CoreRecipeInfo):
|
||||
pn = val.pn
|
||||
with open(cachefile, "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
while cachefile:
|
||||
try:
|
||||
key = pickled.load()
|
||||
val = pickled.load()
|
||||
except Exception:
|
||||
break
|
||||
if isinstance(val, CoreRecipeInfo) and (not val.skipped):
|
||||
pn = val.pn
|
||||
# Filter out the native recipes.
|
||||
if key.startswith('virtual:native:') or pn.endswith("-native"):
|
||||
continue
|
||||
|
||||
if self.args.recipe and self.args.recipe != pn:
|
||||
continue
|
||||
# 1.0 is the default version for a no PV recipe.
|
||||
if "pv" in val.__dict__:
|
||||
pv = val.pv
|
||||
else:
|
||||
pv = "1.0"
|
||||
|
||||
if self.args.skip and val.skipped:
|
||||
continue
|
||||
|
||||
if self.args.members:
|
||||
out = key
|
||||
for member in self.args.members.split(','):
|
||||
out += ": %s" % val.__dict__.get(member)
|
||||
print("%s" % out)
|
||||
else:
|
||||
print("%s: %s" % (key, val.__dict__))
|
||||
elif not self.args.recipe:
|
||||
print("%s %s" % (key, val))
|
||||
print("%s %s %s %s" % (key, pn, pv, ' '.join(val.packages)))
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
dump = DumpCache()
|
||||
ret = dump.main()
|
||||
except Exception as esc:
|
||||
ret = 1
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(ret)
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
|
||||
|
||||
@@ -781,7 +781,7 @@
|
||||
The code in <filename>meta/lib/oe/sstatesig.py</filename> shows two examples
|
||||
of this and also illustrates how you can insert your own policy into the system
|
||||
if so desired.
|
||||
This file defines the two basic signature generators OpenEmbedded-Core
|
||||
This file defines the two basic signature generators OpenEmbedded Core
|
||||
uses: "OEBasic" and "OEBasicHash".
|
||||
By default, there is a dummy "noop" signature handler enabled in BitBake.
|
||||
This means that behavior is unchanged from previous versions.
|
||||
|
||||
@@ -38,7 +38,7 @@
|
||||
The code to execute the first part of this process, a fetch,
|
||||
looks something like the following:
|
||||
<literallayout class='monospaced'>
|
||||
src_uri = (d.getVar('SRC_URI') or "").split()
|
||||
src_uri = (d.getVar('SRC_URI', True) or "").split()
|
||||
fetcher = bb.fetch2.Fetch(src_uri, d)
|
||||
fetcher.download()
|
||||
</literallayout>
|
||||
@@ -52,7 +52,7 @@
|
||||
<para>
|
||||
The instantiation of the fetch class is usually followed by:
|
||||
<literallayout class='monospaced'>
|
||||
rootdir = l.getVar('WORKDIR')
|
||||
rootdir = l.getVar('WORKDIR', True)
|
||||
fetcher.unpack(rootdir)
|
||||
</literallayout>
|
||||
This code unpacks the downloaded files to the
|
||||
@@ -268,6 +268,15 @@
|
||||
<link linkend='var-FILESPATH'><filename>FILESPATH</filename></link>
|
||||
variable is used in the same way
|
||||
<filename>PATH</filename> is used to find executables.
|
||||
Failing that,
|
||||
<link linkend='var-FILESDIR'><filename>FILESDIR</filename></link>
|
||||
is used to find the appropriate relative file.
|
||||
<note>
|
||||
<filename>FILESDIR</filename> is deprecated and can
|
||||
be replaced with <filename>FILESPATH</filename>.
|
||||
Because <filename>FILESDIR</filename> is likely to be
|
||||
removed, you should not use this variable in any new code.
|
||||
</note>
|
||||
If the file cannot be found, it is assumed that it is available in
|
||||
<link linkend='var-DL_DIR'><filename>DL_DIR</filename></link>
|
||||
by the time the <filename>download()</filename> method is called.
|
||||
@@ -376,8 +385,7 @@
|
||||
The supported parameters are as follows:
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis>"method":</emphasis>
|
||||
The protocol over which to communicate with the CVS
|
||||
server.
|
||||
The protocol over which to communicate with the CVS server.
|
||||
By default, this protocol is "pserver".
|
||||
If "method" is set to "ext", BitBake examines the
|
||||
"rsh" parameter and sets <filename>CVS_RSH</filename>.
|
||||
@@ -461,29 +469,25 @@
|
||||
You can think of this parameter as the top-level
|
||||
directory of the repository data you want.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"path_spec":</emphasis>
|
||||
A specific directory in which to checkout the
|
||||
specified svn module.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"protocol":</emphasis>
|
||||
The protocol to use, which defaults to "svn".
|
||||
If "protocol" is set to "svn+ssh", the "ssh"
|
||||
parameter is also used.
|
||||
Other options are "svn+ssh" and "rsh".
|
||||
For "rsh", the "rsh" parameter is also used.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"rev":</emphasis>
|
||||
The revision of the source code to checkout.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"date":</emphasis>
|
||||
The date of the source code to checkout.
|
||||
Specific revisions are generally much safer to checkout
|
||||
rather than by date as they do not involve timezones
|
||||
(e.g. they are much more deterministic).
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"scmdata":</emphasis>
|
||||
Causes the “.svn” directories to be available during
|
||||
compile-time when set to "keep".
|
||||
By default, these directories are removed.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"ssh":</emphasis>
|
||||
An optional parameter used when "protocol" is set
|
||||
to "svn+ssh".
|
||||
You can use this parameter to specify the ssh
|
||||
program used by svn.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"transportuser":</emphasis>
|
||||
When required, sets the username for the transport.
|
||||
By default, this parameter is empty.
|
||||
@@ -492,11 +496,10 @@
|
||||
command.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
Following are three examples using svn:
|
||||
Following are two examples using svn:
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI = "svn://myrepos/proj1;module=vip;protocol=http;rev=667"
|
||||
SRC_URI = "svn://myrepos/proj1;module=opie;protocol=svn+ssh"
|
||||
SRC_URI = "svn://myrepos/proj1;module=trunk;protocol=http;path_spec=${MY_DIR}/proj1"
|
||||
SRC_URI = "svn://svn.oe.handhelds.org/svn;module=vip;proto=http;rev=667"
|
||||
SRC_URI = "svn://svn.oe.handhelds.org/svn/;module=opie;proto=svn+ssh;date=20060126"
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
@@ -588,14 +591,6 @@
|
||||
The name of the path in which to place the checkout.
|
||||
By default, the path is <filename>git/</filename>.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"usehead":</emphasis>
|
||||
Enables local <filename>git://</filename> URLs to use the
|
||||
current branch HEAD as the revision for use with
|
||||
<filename>AUTOREV</filename>.
|
||||
The "usehead" parameter implies no branch and only works
|
||||
when the transfer protocol is
|
||||
<filename>file://</filename>.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
Here are some example URLs:
|
||||
<literallayout class='monospaced'>
|
||||
@@ -628,9 +623,7 @@
|
||||
The Git Submodules fetcher is not a complete fetcher
|
||||
implementation.
|
||||
The fetcher has known issues where it does not use the
|
||||
normal source mirroring infrastructure properly. Further,
|
||||
the submodule sources it fetches are not visible to the
|
||||
licensing and source archiving infrastructures.
|
||||
normal source mirroring infrastructure properly.
|
||||
</para>
|
||||
</note>
|
||||
</para>
|
||||
@@ -785,43 +778,6 @@
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='repo-fetcher'>
|
||||
<title>Repo Fetcher (<filename>repo://</filename>)</title>
|
||||
|
||||
<para>
|
||||
This fetcher submodule fetches code from
|
||||
<filename>google-repo</filename> source control system.
|
||||
The fetcher works by initiating and syncing sources of the
|
||||
repository into
|
||||
<link linkend='var-REPODIR'><filename>REPODIR</filename></link>,
|
||||
which is usually
|
||||
<link linkend='var-DL_DIR'><filename>DL_DIR</filename></link><filename>/repo</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This fetcher supports the following parameters:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<emphasis>"protocol":</emphasis>
|
||||
Protocol to fetch the repository manifest (default: git).
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<emphasis>"branch":</emphasis>
|
||||
Branch or tag of repository to get (default: master).
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<emphasis>"manifest":</emphasis>
|
||||
Name of the manifest file (default: <filename>default.xml</filename>).
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
Here are some example URLs:
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI = "repo://REPOROOT;protocol=git;branch=some_branch;manifest=my_manifest.xml"
|
||||
SRC_URI = "repo://REPOROOT;protocol=file;branch=some_branch;manifest=my_manifest.xml"
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='other-fetchers'>
|
||||
<title>Other Fetchers</title>
|
||||
|
||||
@@ -840,6 +796,9 @@
|
||||
<listitem><para>
|
||||
Secure Shell (<filename>ssh://</filename>)
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Repo (<filename>repo://</filename>)
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
OSC (<filename>osc://</filename>)
|
||||
</para></listitem>
|
||||
|
||||
@@ -128,8 +128,15 @@
|
||||
</para>
|
||||
|
||||
<note>
|
||||
This example was inspired by and drew heavily from
|
||||
<ulink url="http://www.mail-archive.com/yocto@yoctoproject.org/msg09379.html">Mailing List post - The BitBake equivalent of "Hello, World!"</ulink>.
|
||||
This example was inspired by and drew heavily from these sources:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<ulink url="http://www.mail-archive.com/yocto@yoctoproject.org/msg09379.html">Mailing List post - The BitBake equivalent of "Hello, World!"</ulink>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<ulink url="http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/">Hambedded Linux blog post - From Bitbake Hello World to an Image</ulink>
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
|
||||
<para>
|
||||
@@ -260,9 +267,9 @@
|
||||
files.
|
||||
For this example, you need to create the file in your project directory
|
||||
and define some key BitBake variables.
|
||||
For more information on the <filename>bitbake.conf</filename> file,
|
||||
For more information on the <filename>bitbake.conf</filename>,
|
||||
see
|
||||
<ulink url='http://git.openembedded.org/bitbake/tree/conf/bitbake.conf'></ulink>.
|
||||
<ulink url='http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#an-overview-of-bitbakeconf'></ulink>
|
||||
</para>
|
||||
<para>Use the following commands to create the <filename>conf</filename>
|
||||
directory in the project directory:
|
||||
@@ -273,32 +280,14 @@
|
||||
some editor to create the <filename>bitbake.conf</filename>
|
||||
so that it contains the following:
|
||||
<literallayout class='monospaced'>
|
||||
<link linkend='var-PN'>PN</link> = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE', False),d)[0] or 'defaultpkgname'}"
|
||||
</literallayout>
|
||||
<literallayout class='monospaced'>
|
||||
TMPDIR = "${<link linkend='var-TOPDIR'>TOPDIR</link>}/tmp"
|
||||
<link linkend='var-CACHE'>CACHE</link> = "${TMPDIR}/cache"
|
||||
<link linkend='var-STAMP'>STAMP</link> = "${TMPDIR}/${PN}/stamps"
|
||||
<link linkend='var-T'>T</link> = "${TMPDIR}/${PN}/work"
|
||||
<link linkend='var-B'>B</link> = "${TMPDIR}/${PN}"
|
||||
<link linkend='var-STAMP'>STAMP</link> = "${TMPDIR}/stamps"
|
||||
<link linkend='var-T'>T</link> = "${TMPDIR}/work"
|
||||
<link linkend='var-B'>B</link> = "${TMPDIR}"
|
||||
</literallayout>
|
||||
<note>
|
||||
Without a value for <filename>PN</filename>, the
|
||||
variables <filename>STAMP</filename>,
|
||||
<filename>T</filename>, and <filename>B</filename>,
|
||||
prevent more than one recipe from working. You can fix
|
||||
this by either setting <filename>PN</filename> to have
|
||||
a value similar to what OpenEmbedded and BitBake use
|
||||
in the default <filename>bitbake.conf</filename> file
|
||||
(see previous example). Or, by manually updating each
|
||||
recipe to set <filename>PN</filename>. You will also
|
||||
need to include <filename>PN</filename> as part of the
|
||||
<filename>STAMP</filename>, <filename>T</filename>, and
|
||||
<filename>B</filename> variable definitions in the
|
||||
<filename>local.conf</filename> file.
|
||||
</note>
|
||||
The <filename>TMPDIR</filename> variable establishes a directory
|
||||
that BitBake uses for build output and intermediate files other
|
||||
that BitBake uses for build output and intermediate files (other
|
||||
than the cached information used by the
|
||||
<link linkend='setscene'>Setscene</link> process.
|
||||
Here, the <filename>TMPDIR</filename> directory is set to
|
||||
@@ -318,19 +307,19 @@
|
||||
file exists, you can run the <filename>bitbake</filename>
|
||||
command again:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake
|
||||
ERROR: Traceback (most recent call last):
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 163, in wrapped
|
||||
return func(fn, *args)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 177, in _inherit
|
||||
bb.parse.BBHandler.inherit(bbclass, "configuration INHERITs", 0, data)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py", line 92, in inherit
|
||||
include(fn, file, lineno, d, "inherit")
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/ConfHandler.py", line 100, in include
|
||||
raise ParseError("Could not %(error_out)s file %(fn)s" % vars(), oldfn, lineno)
|
||||
ParseError: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
$ bitbake
|
||||
ERROR: Traceback (most recent call last):
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 163, in wrapped
|
||||
return func(fn, *args)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 177, in _inherit
|
||||
bb.parse.BBHandler.inherit(bbclass, "configuration INHERITs", 0, data)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py", line 92, in inherit
|
||||
include(fn, file, lineno, d, "inherit")
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/ConfHandler.py", line 100, in include
|
||||
raise ParseError("Could not %(error_out)s file %(fn)s" % vars(), oldfn, lineno)
|
||||
ParseError: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
|
||||
ERROR: Unable to parse base: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
ERROR: Unable to parse base: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
</literallayout>
|
||||
In the sample output, BitBake could not find the
|
||||
<filename>classes/base.bbclass</filename> file.
|
||||
@@ -363,6 +352,9 @@
|
||||
Of course, the <filename>base.bbclass</filename> can have much
|
||||
more depending on which build environments BitBake is
|
||||
supporting.
|
||||
For more information on the <filename>base.bbclass</filename> file,
|
||||
you can look at
|
||||
<ulink url='http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#tasks'></ulink>.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>Run Bitbake:</emphasis>
|
||||
After making sure that the <filename>classes/base.bbclass</filename>
|
||||
@@ -383,10 +375,10 @@
|
||||
code separate from the general metadata used by BitBake.
|
||||
Thus, this example creates and uses a layer called "mylayer".
|
||||
<note>
|
||||
You can find additional information on layers in the
|
||||
"<link linkend='layers'>Layers</link>" section.
|
||||
</note></para>
|
||||
|
||||
You can find additional information on adding a layer at
|
||||
<ulink url='http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#adding-an-example-layer'></ulink>.
|
||||
</note>
|
||||
</para>
|
||||
<para>Minimally, you need a recipe file and a layer configuration
|
||||
file in your layer.
|
||||
The configuration file needs to be in the <filename>conf</filename>
|
||||
|
||||
@@ -342,14 +342,13 @@
|
||||
|
||||
<para>
|
||||
When you name an append file, you can use the
|
||||
"<filename>%</filename>" wildcard character to allow for matching
|
||||
recipe names.
|
||||
wildcard character (%) to allow for matching recipe names.
|
||||
For example, suppose you have an append file named
|
||||
as follows:
|
||||
<literallayout class='monospaced'>
|
||||
busybox_1.21.%.bbappend
|
||||
</literallayout>
|
||||
That append file would match any <filename>busybox_1.21.</filename><replaceable>x</replaceable><filename>.bb</filename>
|
||||
That append file would match any <filename>busybox_1.21.x.bb</filename>
|
||||
version of the recipe.
|
||||
So, the append file would match the following recipe names:
|
||||
<literallayout class='monospaced'>
|
||||
@@ -357,14 +356,6 @@
|
||||
busybox_1.21.2.bb
|
||||
busybox_1.21.3.bb
|
||||
</literallayout>
|
||||
<note><title>Important</title>
|
||||
The use of the "<filename>%</filename>" character
|
||||
is limited in that it only works directly in front of the
|
||||
<filename>.bbappend</filename> portion of the append file's
|
||||
name.
|
||||
You cannot use the wildcard character in any other
|
||||
location of the name.
|
||||
</note>
|
||||
If the <filename>busybox</filename> recipe was updated to
|
||||
<filename>busybox_1.3.0.bb</filename>, the append name would not
|
||||
match.
|
||||
@@ -449,7 +440,7 @@
|
||||
Build Checkout:</emphasis>
|
||||
A final possibility for getting a copy of BitBake is that it
|
||||
already comes with your checkout of a larger Bitbake-based build
|
||||
system, such as Poky.
|
||||
system, such as Poky or Yocto Project.
|
||||
Rather than manually checking out individual layers and
|
||||
gluing them together yourself, you can check
|
||||
out an entire build system.
|
||||
@@ -497,6 +488,8 @@
|
||||
target that failed and anything depending on it cannot
|
||||
be built, as much as possible will be built before
|
||||
stopping.
|
||||
-a, --tryaltconfigs Continue with builds by trying to use alternative
|
||||
providers where possible.
|
||||
-f, --force Force the specified targets/task to run (invalidating
|
||||
any existing stamp file).
|
||||
-c CMD, --cmd=CMD Specify the task to execute. The exact options
|
||||
@@ -511,20 +504,9 @@
|
||||
Read the specified file before bitbake.conf.
|
||||
-R POSTFILE, --postread=POSTFILE
|
||||
Read the specified file after bitbake.conf.
|
||||
-v, --verbose Enable tracing of shell tasks (with 'set -x'). Also
|
||||
print bb.note(...) messages to stdout (in addition to
|
||||
writing them to ${T}/log.do_<task>).
|
||||
-v, --verbose Output more log message data to the terminal.
|
||||
-D, --debug Increase the debug level. You can specify this more
|
||||
than once. -D sets the debug level to 1, where only
|
||||
bb.debug(1, ...) messages are printed to stdout; -DD
|
||||
sets the debug level to 2, where both bb.debug(1, ...)
|
||||
and bb.debug(2, ...) messages are printed; etc.
|
||||
Without -D, no debug messages are printed. Note that
|
||||
-D only affects output to stdout. All debug messages
|
||||
are written to ${T}/log.do_taskname, regardless of the
|
||||
debug level.
|
||||
-q, --quiet Output less log message data to the terminal. You can
|
||||
specify this more than once.
|
||||
than once.
|
||||
-n, --dry-run Don't execute, just go through the motions.
|
||||
-S SIGNATURE_HANDLER, --dump-signatures=SIGNATURE_HANDLER
|
||||
Dump out the signature construction information, with
|
||||
@@ -547,38 +529,29 @@
|
||||
-l DEBUG_DOMAINS, --log-domains=DEBUG_DOMAINS
|
||||
Show debug logging for the specified logging domains
|
||||
-P, --profile Profile the command and save reports.
|
||||
-u UI, --ui=UI The user interface to use (knotty, ncurses or taskexp
|
||||
- default knotty).
|
||||
-u UI, --ui=UI The user interface to use (depexp, goggle, hob, knotty
|
||||
or ncurses - default knotty).
|
||||
-t SERVERTYPE, --servertype=SERVERTYPE
|
||||
Choose which server type to use (process or xmlrpc -
|
||||
default process).
|
||||
--token=XMLRPCTOKEN Specify the connection token to be used when
|
||||
connecting to a remote server.
|
||||
--revisions-changed Set the exit code depending on whether upstream
|
||||
floating revisions have changed or not.
|
||||
--server-only Run bitbake without a UI, only starting a server
|
||||
(cooker) process.
|
||||
-B BIND, --bind=BIND The name/address for the bitbake xmlrpc server to bind
|
||||
to.
|
||||
-T SERVER_TIMEOUT, --idle-timeout=SERVER_TIMEOUT
|
||||
Set timeout to unload bitbake server due to
|
||||
inactivity, set to -1 means no unload, default:
|
||||
Environment variable BB_SERVER_TIMEOUT.
|
||||
-B BIND, --bind=BIND The name/address for the bitbake server to bind to.
|
||||
--no-setscene Do not run any setscene tasks. sstate will be ignored
|
||||
and everything needed, built.
|
||||
--setscene-only Only run setscene tasks, don't run any real tasks.
|
||||
--remote-server=REMOTE_SERVER
|
||||
Connect to the specified server.
|
||||
-m, --kill-server Terminate any running bitbake server.
|
||||
-m, --kill-server Terminate the remote server.
|
||||
--observe-only Connect to a server as an observing-only client.
|
||||
--status-only Check the status of the remote bitbake server.
|
||||
-w WRITEEVENTLOG, --write-log=WRITEEVENTLOG
|
||||
Writes the event log of the build to a bitbake event
|
||||
json file. Use '' (empty string) to assign the name
|
||||
automatically.
|
||||
--runall=RUNALL Run the specified task for any recipe in the taskgraph
|
||||
of the specified target (even if it wouldn't otherwise
|
||||
have run).
|
||||
--runonly=RUNONLY Run only the specified task within the taskgraph of
|
||||
the specified targets (and any task dependencies those
|
||||
tasks may have).
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
@@ -692,21 +665,21 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
When you generate a dependency graph, BitBake writes three files
|
||||
When you generate a dependency graph, BitBake writes four files
|
||||
to the current working directory:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<emphasis><filename>recipe-depends.dot</filename>:</emphasis>
|
||||
Shows dependencies between recipes (i.e. a collapsed version of
|
||||
<filename>task-depends.dot</filename>).
|
||||
<listitem><para><emphasis><filename>package-depends.dot</filename>:</emphasis>
|
||||
Shows BitBake's knowledge of dependencies between
|
||||
runtime targets.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<emphasis><filename>task-depends.dot</filename>:</emphasis>
|
||||
<listitem><para><emphasis><filename>pn-depends.dot</filename>:</emphasis>
|
||||
Shows dependencies between build-time targets
|
||||
(i.e. recipes).
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis><filename>task-depends.dot</filename>:</emphasis>
|
||||
Shows dependencies between tasks.
|
||||
These dependencies match BitBake's internal task execution list.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<emphasis><filename>pn-buildlist</filename>:</emphasis>
|
||||
<listitem><para><emphasis><filename>pn-buildlist</filename>:</emphasis>
|
||||
Shows a simple list of targets that are to be built.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
@@ -732,163 +705,6 @@
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='executing-a-multiple-configuration-build'>
|
||||
<title>Executing a Multiple Configuration Build</title>
|
||||
|
||||
<para>
|
||||
BitBake is able to build multiple images or packages
|
||||
using a single command where the different targets
|
||||
require different configurations (multiple configuration
|
||||
builds).
|
||||
Each target, in this scenario, is referred to as a
|
||||
"multiconfig".
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To accomplish a multiple configuration build, you must
|
||||
define each target's configuration separately using
|
||||
a parallel configuration file in the build directory.
|
||||
The location for these multiconfig configuration files
|
||||
is specific.
|
||||
They must reside in the current build directory in
|
||||
a sub-directory of <filename>conf</filename> named
|
||||
<filename>multiconfig</filename>.
|
||||
Following is an example for two separate targets:
|
||||
<imagedata fileref="figures/bb_multiconfig_files.png" align="center" width="4in" depth="3in" />
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The reason for this required file hierarchy
|
||||
is because the <filename>BBPATH</filename> variable
|
||||
is not constructed until the layers are parsed.
|
||||
Consequently, using the configuration file as a
|
||||
pre-configuration file is not possible unless it is
|
||||
located in the current working directory.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Minimally, each configuration file must define the
|
||||
machine and the temporary directory BitBake uses
|
||||
for the build.
|
||||
Suggested practice dictates that you do not
|
||||
overlap the temporary directories used during the
|
||||
builds.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Aside from separate configuration files for each
|
||||
target, you must also enable BitBake to perform multiple
|
||||
configuration builds.
|
||||
Enabling is accomplished by setting the
|
||||
<link linkend='var-BBMULTICONFIG'><filename>BBMULTICONFIG</filename></link>
|
||||
variable in the <filename>local.conf</filename>
|
||||
configuration file.
|
||||
As an example, suppose you had configuration files
|
||||
for <filename>target1</filename> and
|
||||
<filename>target2</filename> defined in the build
|
||||
directory.
|
||||
The following statement in the
|
||||
<filename>local.conf</filename> file both enables
|
||||
BitBake to perform multiple configuration builds and
|
||||
specifies the two multiconfigs:
|
||||
<literallayout class='monospaced'>
|
||||
BBMULTICONFIG = "target1 target2"
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Once the target configuration files are in place and
|
||||
BitBake has been enabled to perform multiple configuration
|
||||
builds, use the following command form to start the
|
||||
builds:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake [multiconfig:<replaceable>multiconfigname</replaceable>:]<replaceable>target</replaceable> [[[multiconfig:<replaceable>multiconfigname</replaceable>:]<replaceable>target</replaceable>] ... ]
|
||||
</literallayout>
|
||||
Here is an example for two multiconfigs:
|
||||
<filename>target1</filename> and
|
||||
<filename>target2</filename>:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake multiconfig:target1:<replaceable>target</replaceable> multiconfig:target2:<replaceable>target</replaceable>
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='bb-enabling-multiple-configuration-build-dependencies'>
|
||||
<title>Enabling Multiple Configuration Build Dependencies</title>
|
||||
|
||||
<para>
|
||||
Sometimes dependencies can exist between targets
|
||||
(multiconfigs) in a multiple configuration build.
|
||||
For example, suppose that in order to build an image
|
||||
for a particular architecture, the root filesystem of
|
||||
another build for a different architecture needs to
|
||||
exist.
|
||||
In other words, the image for the first multiconfig depends
|
||||
on the root filesystem of the second multiconfig.
|
||||
This dependency is essentially that the task in the recipe
|
||||
that builds one multiconfig is dependent on the
|
||||
completion of the task in the recipe that builds
|
||||
another multiconfig.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To enable dependencies in a multiple configuration
|
||||
build, you must declare the dependencies in the recipe
|
||||
using the following statement form:
|
||||
<literallayout class='monospaced'>
|
||||
<replaceable>task_or_package</replaceable>[mcdepends] = "multiconfig:<replaceable>from_multiconfig</replaceable>:<replaceable>to_multiconfig</replaceable>:<replaceable>recipe_name</replaceable>:<replaceable>task_on_which_to_depend</replaceable>"
|
||||
</literallayout>
|
||||
To better show how to use this statement, consider an
|
||||
example with two multiconfigs: <filename>target1</filename>
|
||||
and <filename>target2</filename>:
|
||||
<literallayout class='monospaced'>
|
||||
<replaceable>image_task</replaceable>[mcdepends] = "multiconfig:target1:target2:<replaceable>image2</replaceable>:<replaceable>rootfs_task</replaceable>"
|
||||
</literallayout>
|
||||
In this example, the
|
||||
<replaceable>from_multiconfig</replaceable> is "target1" and
|
||||
the <replaceable>to_multiconfig</replaceable> is "target2".
|
||||
The task on which the image whose recipe contains
|
||||
<replaceable>image_task</replaceable> depends on the
|
||||
completion of the <replaceable>rootfs_task</replaceable>
|
||||
used to build out <replaceable>image2</replaceable>, which
|
||||
is associated with the "target2" multiconfig.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Once you set up this dependency, you can build the
|
||||
"target1" multiconfig using a BitBake command as follows:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake multiconfig:target1:<replaceable>image1</replaceable>
|
||||
</literallayout>
|
||||
This command executes all the tasks needed to create
|
||||
<replaceable>image1</replaceable> for the "target1"
|
||||
multiconfig.
|
||||
Because of the dependency, BitBake also executes through
|
||||
the <replaceable>rootfs_task</replaceable> for the "target2"
|
||||
multiconfig build.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Having a recipe depend on the root filesystem of another
|
||||
build might not seem that useful.
|
||||
Consider this change to the statement in the
|
||||
<replaceable>image1</replaceable> recipe:
|
||||
<literallayout class='monospaced'>
|
||||
<replaceable>image_task</replaceable>[mcdepends] = "multiconfig:target1:target2:<replaceable>image2</replaceable>:<replaceable>image_task</replaceable>"
|
||||
</literallayout>
|
||||
In this case, BitBake must create
|
||||
<replaceable>image2</replaceable> for the "target2"
|
||||
build since the "target1" build depends on it.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Because "target1" and "target2" are enabled for multiple
|
||||
configuration builds and have separate configuration
|
||||
files, BitBake places the artifacts for each build in the
|
||||
respective temporary build directories.
|
||||
</para>
|
||||
</section>
|
||||
</section>
|
||||
</section>
|
||||
</chapter>
|
||||
|
||||
@@ -61,48 +61,6 @@
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='line-joining'>
|
||||
<title>Line Joining</title>
|
||||
|
||||
<para>
|
||||
Outside of
|
||||
<link linkend='functions'>functions</link>, BitBake joins
|
||||
any line ending in a backslash character ("\")
|
||||
with the following line before parsing statements.
|
||||
The most common use for the "\" character is to split variable
|
||||
assignments over multiple lines, as in the following example:
|
||||
<literallayout class='monospaced'>
|
||||
FOO = "bar \
|
||||
baz \
|
||||
qaz"
|
||||
</literallayout>
|
||||
Both the "\" character and the newline character
|
||||
that follow it are removed when joining lines.
|
||||
Thus, no newline characters end up in the value of
|
||||
<filename>FOO</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Consider this additional example where the two
|
||||
assignments both assign "barbaz" to
|
||||
<filename>FOO</filename>:
|
||||
<literallayout class='monospaced'>
|
||||
FOO = "barbaz"
|
||||
|
||||
FOO = "bar\
|
||||
baz"
|
||||
</literallayout>
|
||||
<note>
|
||||
BitBake does not interpret escape sequences like
|
||||
"\n" in variable values.
|
||||
For these to have an effect, the value must be passed
|
||||
to some utility that interprets escape sequences,
|
||||
such as <filename>printf</filename> or
|
||||
<filename>echo -n</filename>.
|
||||
</note>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='variable-expansion'>
|
||||
<title>Variable Expansion</title>
|
||||
|
||||
@@ -342,7 +300,7 @@
|
||||
|
||||
<para>
|
||||
When you use this syntax, BitBake expects one or more strings.
|
||||
Surrounding spaces and spacing are preserved.
|
||||
Surrounding spaces are removed as well.
|
||||
Here is an example:
|
||||
<literallayout class='monospaced'>
|
||||
FOO = "123 456 789 123456 123 456 123 456"
|
||||
@@ -352,9 +310,8 @@
|
||||
FOO2_remove = "abc def"
|
||||
</literallayout>
|
||||
The variable <filename>FOO</filename> becomes
|
||||
" 789 123456 "
|
||||
and <filename>FOO2</filename> becomes
|
||||
" ghi abcdef ".
|
||||
"789 123456" and <filename>FOO2</filename> becomes
|
||||
"ghi abcdef".
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -503,17 +460,17 @@
|
||||
</section>
|
||||
|
||||
<section id='unsetting-variables'>
|
||||
<title>Unsetting variables</title>
|
||||
<title>Unseting variables</title>
|
||||
|
||||
<para>
|
||||
It is possible to completely remove a variable or a variable flag
|
||||
It is possible to completely remove a variable or a variable flag
|
||||
from BitBake's internal data dictionary by using the "unset" keyword.
|
||||
Here is an example:
|
||||
<literallayout class='monospaced'>
|
||||
unset DATE
|
||||
unset do_fetch[noexec]
|
||||
</literallayout>
|
||||
These two statements remove the <filename>DATE</filename> and the
|
||||
These two statements remove the <filename>DATE</filename> and the
|
||||
<filename>do_fetch[noexec]</filename> flag.
|
||||
</para>
|
||||
|
||||
@@ -670,7 +627,7 @@
|
||||
<literallayout class='monospaced'>
|
||||
DEPENDS = "glibc ncurses"
|
||||
OVERRIDES = "machine:local"
|
||||
DEPENDS_append_machine = " libmad"
|
||||
DEPENDS_append_machine = "libmad"
|
||||
</literallayout>
|
||||
In this example, <filename>DEPENDS</filename> becomes
|
||||
"glibc ncurses libmad".
|
||||
@@ -900,12 +857,11 @@
|
||||
|
||||
<para>
|
||||
The <filename>inherit</filename> directive is a rudimentary
|
||||
means of specifying functionality contained in class files
|
||||
that your recipes require.
|
||||
means of specifying what classes of functionality your
|
||||
recipes require.
|
||||
For example, you can easily abstract out the tasks involved in
|
||||
building a package that uses Autoconf and Automake and put
|
||||
those tasks into a class file and then have your recipe
|
||||
inherit that class file.
|
||||
those tasks into a class file that can be used by your recipe.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -924,24 +880,13 @@
|
||||
inherited class within your recipe by doing so
|
||||
after the "inherit" statement.
|
||||
</note>
|
||||
If you want to use the directive to inherit
|
||||
multiple classes, separate them with spaces.
|
||||
The following example shows how to inherit both the
|
||||
<filename>buildhistory</filename> and <filename>rm_work</filename>
|
||||
classes:
|
||||
<literallayout class='monospaced'>
|
||||
inherit buildhistory rm_work
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
An advantage with the inherit directive as compared to both
|
||||
the
|
||||
<link linkend='include-directive'>include</link> and
|
||||
<link linkend='require-inclusion'>require</link> directives
|
||||
is that you can inherit class files conditionally.
|
||||
You can accomplish this by using a variable expression
|
||||
after the <filename>inherit</filename> statement.
|
||||
If necessary, it is possible to inherit a class
|
||||
conditionally by using
|
||||
a variable expression after the <filename>inherit</filename>
|
||||
statement.
|
||||
Here is an example:
|
||||
<literallayout class='monospaced'>
|
||||
inherit ${VARNAME}
|
||||
@@ -997,17 +942,6 @@
|
||||
within <filename>BBPATH</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The include directive is a more generic method of including
|
||||
functionality as compared to the
|
||||
<link linkend='inherit-directive'>inherit</link> directive,
|
||||
which is restricted to class (i.e. <filename>.bbclass</filename>)
|
||||
files.
|
||||
The include directive is applicable for any other kind of
|
||||
shared or encapsulated functionality or configuration that
|
||||
does not suit a <filename>.bbclass</filename> file.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
As an example, suppose you needed a recipe to include some
|
||||
self-test definitions:
|
||||
@@ -1041,18 +975,6 @@
|
||||
being parsed at the location of the directive.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The require directive, like the include directive previously
|
||||
described, is a more generic method of including
|
||||
functionality as compared to the
|
||||
<link linkend='inherit-directive'>inherit</link> directive,
|
||||
which is restricted to class (i.e. <filename>.bbclass</filename>)
|
||||
files.
|
||||
The require directive is applicable for any other kind of
|
||||
shared or encapsulated functionality or configuration that
|
||||
does not suit a <filename>.bbclass</filename> file.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Similar to how BitBake handles
|
||||
<link linkend='include-directive'><filename>include</filename></link>,
|
||||
@@ -1085,9 +1007,8 @@
|
||||
|
||||
<para>
|
||||
When creating a configuration file (<filename>.conf</filename>),
|
||||
you can use the
|
||||
<link linkend='var-INHERIT'><filename>INHERIT</filename></link>
|
||||
configuration directive to inherit a class.
|
||||
you can use the <filename>INHERIT</filename> directive to
|
||||
inherit a class.
|
||||
BitBake only supports this directive when used within
|
||||
a configuration file.
|
||||
</para>
|
||||
@@ -1120,7 +1041,7 @@
|
||||
<filename>autotools</filename> and <filename>pkgconfig</filename>
|
||||
classes:
|
||||
<literallayout class='monospaced'>
|
||||
INHERIT += "autotools pkgconfig"
|
||||
inherit autotools pkgconfig
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
@@ -1244,7 +1165,7 @@
|
||||
<literallayout class='monospaced'>
|
||||
python some_python_function () {
|
||||
d.setVar("TEXT", "Hello World")
|
||||
print d.getVar("TEXT")
|
||||
print d.getVar("TEXT", True)
|
||||
}
|
||||
</literallayout>
|
||||
Because the Python "bb" and "os" modules are already
|
||||
@@ -1259,7 +1180,7 @@
|
||||
to freely set variable values to expandable expressions
|
||||
without having them expanded prematurely.
|
||||
If you do wish to expand a variable within a Python
|
||||
function, use <filename>d.getVar("X")</filename>.
|
||||
function, use <filename>d.getVar("X", True)</filename>.
|
||||
Or, for more complicated expressions, use
|
||||
<filename>d.expand()</filename>.
|
||||
</note>
|
||||
@@ -1311,7 +1232,7 @@
|
||||
Here is an example:
|
||||
<literallayout class='monospaced'>
|
||||
def get_depends(d):
|
||||
if d.getVar('SOMECONDITION'):
|
||||
if d.getVar('SOMECONDITION', True):
|
||||
return "dependencywithcond"
|
||||
else:
|
||||
return "dependency"
|
||||
@@ -1446,7 +1367,7 @@
|
||||
based on the value of another variable:
|
||||
<literallayout class='monospaced'>
|
||||
python () {
|
||||
if d.getVar('SOMEVAR') == 'value':
|
||||
if d.getVar('SOMEVAR', True) == 'value':
|
||||
d.setVar('ANOTHERVAR', 'value2')
|
||||
}
|
||||
</literallayout>
|
||||
@@ -1930,38 +1851,6 @@
|
||||
not careful.
|
||||
</note>
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis><filename>[number_threads]</filename>:</emphasis>
|
||||
Limits tasks to a specific number of simultaneous threads
|
||||
during execution.
|
||||
This varflag is useful when your build host has a large number
|
||||
of cores but certain tasks need to be rate-limited due to various
|
||||
kinds of resource constraints (e.g. to avoid network throttling).
|
||||
<filename>number_threads</filename> works similarly to the
|
||||
<link linkend='var-BB_NUMBER_THREADS'><filename>BB_NUMBER_THREADS</filename></link>
|
||||
variable but is task-specific.</para>
|
||||
|
||||
<para>Set the value globally.
|
||||
For example, the following makes sure the
|
||||
<filename>do_fetch</filename> task uses no more than two
|
||||
simultaneous execution threads:
|
||||
<literallayout class='monospaced'>
|
||||
do_fetch[number_threads] = "2"
|
||||
</literallayout>
|
||||
<note><title>Warnings</title>
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
Setting the varflag in individual recipes rather
|
||||
than globally can result in unpredictable behavior.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Setting the varflag to a value greater than the
|
||||
value used in the <filename>BB_NUMBER_THREADS</filename>
|
||||
variable causes <filename>number_threads</filename>
|
||||
to have no effect.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis><filename>[postfuncs]</filename>:</emphasis>
|
||||
List of functions to call after the completion of the task.
|
||||
</para></listitem>
|
||||
@@ -2053,196 +1942,128 @@
|
||||
<title>Events</title>
|
||||
|
||||
<para>
|
||||
BitBake allows installation of event handlers within recipe
|
||||
and class files.
|
||||
Events are triggered at certain points during operation, such
|
||||
as the beginning of operation against a given recipe
|
||||
(i.e. <filename>*.bb</filename>), the start of a given task,
|
||||
a task failure, a task success, and so forth.
|
||||
BitBake allows installation of event handlers within
|
||||
recipe and class files.
|
||||
Events are triggered at certain points during operation,
|
||||
such as the beginning of an operation against a given recipe
|
||||
(<filename>*.bb</filename> file), the start of a given task,
|
||||
task failure, task success, and so forth.
|
||||
The intent is to make it easy to do things like email
|
||||
notification on build failures.
|
||||
notification on build failure.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Following is an example event handler that prints the name
|
||||
of the event and the content of the
|
||||
<filename>FILE</filename> variable:
|
||||
Following is an example event handler that
|
||||
prints the name of the event and the content of
|
||||
the <filename>FILE</filename> variable:
|
||||
<literallayout class='monospaced'>
|
||||
addhandler myclass_eventhandler
|
||||
python myclass_eventhandler() {
|
||||
from bb.event import getName
|
||||
from bb import data
|
||||
print("The name of the Event is %s" % getName(e))
|
||||
print("The file we run for is %s" % d.getVar('FILE'))
|
||||
print("The file we run for is %s" % data.getVar('FILE', e.data, True))
|
||||
}
|
||||
myclass_eventhandler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted"
|
||||
</literallayout>
|
||||
In the previous example, an eventmask has been set so that
|
||||
the handler only sees the "BuildStarted" and "BuildCompleted"
|
||||
events.
|
||||
This event handler gets called every time an event matching
|
||||
the eventmask is triggered.
|
||||
A global variable "e" is defined, which represents the current
|
||||
event.
|
||||
With the <filename>getName(e)</filename> method, you can get
|
||||
This event handler gets called every time an event is
|
||||
triggered.
|
||||
A global variable "<filename>e</filename>" is defined and
|
||||
"<filename>e.data</filename>" contains an instance of
|
||||
"<filename>bb.data</filename>".
|
||||
With the <filename>getName(e)</filename> method, one can get
|
||||
the name of the triggered event.
|
||||
The global datastore is available as "d".
|
||||
In legacy code, you might see "e.data" used to get the datastore.
|
||||
However, realize that "e.data" is deprecated and you should use
|
||||
"d" going forward.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The context of the datastore is appropriate to the event
|
||||
in question.
|
||||
For example, "BuildStarted" and "BuildCompleted" events run
|
||||
before any tasks are executed so would be in the global
|
||||
configuration datastore namespace.
|
||||
No recipe-specific metadata exists in that namespace.
|
||||
The "BuildStarted" and "BuildCompleted" events also run in
|
||||
the main cooker/server process rather than any worker context.
|
||||
Thus, any changes made to the datastore would be seen by other
|
||||
cooker/server events within the current build but not seen
|
||||
outside of that build or in any worker context.
|
||||
Task events run in the actual tasks in question consequently
|
||||
have recipe-specific and task-specific contents.
|
||||
These events run in the worker context and are discarded at
|
||||
the end of task execution.
|
||||
Because you probably are only interested in a subset of events,
|
||||
you would likely use the <filename>[eventmask]</filename> flag
|
||||
for your event handler to be sure that only certain events
|
||||
trigger the handler.
|
||||
Given the previous example, suppose you only wanted the
|
||||
<filename>bb.build.TaskFailed</filename> event to trigger that
|
||||
event handler.
|
||||
Use the flag as follows:
|
||||
<literallayout class='monospaced'>
|
||||
addhandler myclass_eventhandler
|
||||
myclass_eventhandler[eventmask] = "bb.build.TaskFailed"
|
||||
python myclass_eventhandler() {
|
||||
from bb.event import getName
|
||||
from bb import data
|
||||
print("The name of the Event is %s" % getName(e))
|
||||
print("The file we run for is %s" % data.getVar('FILE', e.data, True))
|
||||
}
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
During a standard build, the following common events might
|
||||
occur.
|
||||
The following events are the most common kinds of events that
|
||||
most metadata might have an interest in viewing:
|
||||
During a standard build, the following common events might occur:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<filename>bb.event.ConfigParsed()</filename>:
|
||||
Fired when the base configuration; which consists of
|
||||
<filename>bitbake.conf</filename>,
|
||||
<filename>base.bbclass</filename> and any global
|
||||
<filename>INHERIT</filename> statements; has been parsed.
|
||||
You can see multiple such events when each of the
|
||||
workers parse the base configuration or if the server
|
||||
changes configuration and reparses.
|
||||
Any given datastore only has one such event executed
|
||||
against it, however.
|
||||
If
|
||||
<link linkende='var-BB_INVALIDCONF'><filename>BB_INVALIDCONF</filename></link>
|
||||
is set in the datastore by the event handler, the
|
||||
configuration is reparsed and a new event triggered,
|
||||
allowing the metadata to update configuration.
|
||||
<filename>bb.event.ConfigParsed()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.HeartbeatEvent()</filename>:
|
||||
Fires at regular time intervals of one second.
|
||||
You can configure the interval time using the
|
||||
<filename>BB_HEARTBEAT_EVENT</filename> variable.
|
||||
The event's "time" attribute is the
|
||||
<filename>time.time()</filename> value when the
|
||||
event is triggered.
|
||||
This event is useful for activities such as
|
||||
system state monitoring.
|
||||
<filename>bb.event.ParseStarted()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.ParseStarted()</filename>:
|
||||
Fired when BitBake is about to start parsing recipes.
|
||||
This event's "total" attribute represents the number of
|
||||
recipes BitBake plans to parse.
|
||||
<filename>bb.event.ParseProgress()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.ParseProgress()</filename>:
|
||||
Fired as parsing progresses.
|
||||
This event's "current" attribute is the number of
|
||||
recipes parsed as well as the "total" attribute.
|
||||
<filename>bb.event.ParseCompleted()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.ParseCompleted()</filename>:
|
||||
Fired when parsing is complete.
|
||||
This event's "cached", "parsed", "skipped", "virtuals",
|
||||
"masked", and "errors" attributes provide statistics
|
||||
for the parsing results.
|
||||
<filename>bb.event.BuildStarted()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.BuildStarted()</filename>:
|
||||
Fired when a new build starts.
|
||||
BitBake fires multiple "BuildStarted" events (one per configuration)
|
||||
when multiple configuration (multiconfig) is enabled.
|
||||
<filename>bb.build.TaskStarted()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.build.TaskStarted()</filename>:
|
||||
Fired when a task starts.
|
||||
This event's "taskfile" attribute points to the recipe
|
||||
from which the task originates.
|
||||
The "taskname" attribute, which is the task's name,
|
||||
includes the <filename>do_</filename> prefix, and the
|
||||
"logfile" attribute point to where the task's output is
|
||||
stored.
|
||||
Finally, the "time" attribute is the task's execution start
|
||||
time.
|
||||
<filename>bb.build.TaskInvalid()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.build.TaskInvalid()</filename>:
|
||||
Fired if BitBake tries to execute a task that does not exist.
|
||||
<filename>bb.build.TaskFailedSilent()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.build.TaskFailedSilent()</filename>:
|
||||
Fired for setscene tasks that fail and should not be
|
||||
presented to the user verbosely.
|
||||
<filename>bb.build.TaskFailed()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.build.TaskFailed()</filename>:
|
||||
Fired for normal tasks that fail.
|
||||
<filename>bb.build.TaskSucceeded()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.build.TaskSucceeded()</filename>:
|
||||
Fired when a task successfully completes.
|
||||
<filename>bb.event.BuildCompleted()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.BuildCompleted()</filename>:
|
||||
Fired when a build finishes.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.cooker.CookerExit()</filename>:
|
||||
Fired when the BitBake server/cooker shuts down.
|
||||
This event is usually only seen by the UIs as a
|
||||
sign they should also shutdown.
|
||||
<filename>bb.cooker.CookerExit()</filename>
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This next list of example events occur based on specific
|
||||
requests to the server.
|
||||
These events are often used to communicate larger pieces of
|
||||
information from the BitBake server to other parts of
|
||||
BitBake such as user interfaces:
|
||||
Here is a list of other events that occur based on specific requests
|
||||
to the server:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<filename>bb.event.TreeDataPreparationStarted()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.TreeDataPreparationProgress()</filename>
|
||||
<filename>bb.event.TreeDataPreparationProgress</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.TreeDataPreparationCompleted()</filename>
|
||||
<filename>bb.event.TreeDataPreparationCompleted</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.DepTreeGenerated()</filename>
|
||||
<filename>bb.event.DepTreeGenerated</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.CoreBaseFilesFound()</filename>
|
||||
<filename>bb.event.CoreBaseFilesFound</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.ConfigFilePathFound()</filename>
|
||||
<filename>bb.event.ConfigFilePathFound</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.FilesMatchingFound()</filename>
|
||||
<filename>bb.event.FilesMatchingFound</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.ConfigFilesFound()</filename>
|
||||
<filename>bb.event.ConfigFilesFound</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.TargetsTreeGenerated()</filename>
|
||||
<filename>bb.event.TargetsTreeGenerated</filename>
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
@@ -2685,97 +2506,48 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
These checksums are stored in
|
||||
<link linkend='var-STAMP'><filename>STAMP</filename></link>.
|
||||
You can examine the checksums using the following BitBake command:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake-dumpsigs
|
||||
</literallayout>
|
||||
This command returns the signature data in a readable format
|
||||
that allows you to examine the inputs used when the
|
||||
OpenEmbedded build system generates signatures.
|
||||
For example, using <filename>bitbake-dumpsigs</filename>
|
||||
allows you to examine the <filename>do_compile</filename>
|
||||
task's “sigdata” for a C application (e.g.
|
||||
<filename>bash</filename>).
|
||||
Running the command also reveals that the “CC” variable is part of
|
||||
the inputs that are hashed.
|
||||
Any changes to this variable would invalidate the stamp and
|
||||
cause the <filename>do_compile</filename> task to run.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The following list describes related variables:
|
||||
This list is a place holder of content existed from previous work
|
||||
on the manual.
|
||||
Some or all of it probably needs integrated into the subsections
|
||||
that make up this section.
|
||||
For now, I have just provided a short glossary-like description
|
||||
for each variable.
|
||||
Ultimately, this list goes away.
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<link linkend='var-BB_HASHCHECK_FUNCTION'><filename>BB_HASHCHECK_FUNCTION</filename></link>:
|
||||
<listitem><para><filename>STAMP</filename>:
|
||||
The base path to create stamp files.</para></listitem>
|
||||
<listitem><para><filename>STAMPCLEAN</filename>
|
||||
Again, the base path to create stamp files but can use wildcards
|
||||
for matching a range of files for clean operations.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>BB_STAMP_WHITELIST</filename>
|
||||
Lists stamp files that are looked at when the stamp policy
|
||||
is "whitelist".
|
||||
</para></listitem>
|
||||
<listitem><para><filename>BB_STAMP_POLICY</filename>
|
||||
Defines the mode for comparing timestamps of stamp files.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>BB_HASHCHECK_FUNCTION</filename>
|
||||
Specifies the name of the function to call during
|
||||
the "setscene" part of the task's execution in order
|
||||
to validate the list of task hashes.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-BB_SETSCENE_DEPVALID'><filename>BB_SETSCENE_DEPVALID</filename></link>:
|
||||
Specifies a function BitBake calls that determines
|
||||
whether BitBake requires a setscene dependency to
|
||||
be met.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-BB_SETSCENE_VERIFY_FUNCTION2'><filename>BB_SETSCENE_VERIFY_FUNCTION2</filename></link>:
|
||||
<listitem><para><filename>BB_SETSCENE_VERIFY_FUNCTION2</filename>
|
||||
Specifies a function to call that verifies the list of
|
||||
planned task execution before the main task execution
|
||||
happens.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-BB_STAMP_POLICY'><filename>BB_STAMP_POLICY</filename></link>:
|
||||
Defines the mode for comparing timestamps of stamp files.
|
||||
<listitem><para><filename>BB_SETSCENE_DEPVALID</filename>
|
||||
Specifies a function BitBake calls that determines
|
||||
whether BitBake requires a setscene dependency to
|
||||
be met.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-BB_STAMP_WHITELIST'><filename>BB_STAMP_WHITELIST</filename></link>:
|
||||
Lists stamp files that are looked at when the stamp policy
|
||||
is "whitelist".
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-BB_TASKHASH'><filename>BB_TASKHASH</filename></link>:
|
||||
<listitem><para><filename>BB_TASKHASH</filename>
|
||||
Within an executing task, this variable holds the hash
|
||||
of the task as returned by the currently enabled
|
||||
signature generator.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-STAMP'><filename>STAMP</filename></link>:
|
||||
The base path to create stamp files.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-STAMPCLEAN'><filename>STAMPCLEAN</filename></link>:
|
||||
Again, the base path to create stamp files but can use wildcards
|
||||
for matching a range of files for clean operations.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='wildcard-support-in-variables'>
|
||||
<title>Wildcard Support in Variables</title>
|
||||
|
||||
<para>
|
||||
Support for wildcard use in variables varies depending on the
|
||||
context in which it is used.
|
||||
For example, some variables and file names allow limited use of
|
||||
wildcards through the "<filename>%</filename>" and
|
||||
"<filename>*</filename>" characters.
|
||||
Other variables or names support Python's
|
||||
<ulink url='https://docs.python.org/3/library/glob.html'><filename>glob</filename></ulink>
|
||||
syntax,
|
||||
<ulink url='https://docs.python.org/3/library/fnmatch.html#module-fnmatch'><filename>fnmatch</filename></ulink>
|
||||
syntax, or
|
||||
<ulink url='https://docs.python.org/3/library/re.html#re'><filename>Regular Expression (re)</filename></ulink>
|
||||
syntax.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For variables that have wildcard suport, the
|
||||
documentation describes which form of wildcard, its
|
||||
use, and its limitations.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
</chapter>
|
||||
|
||||
@@ -78,7 +78,7 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
In OpenEmbedded-Core, <filename>ASSUME_PROVIDED</filename>
|
||||
In OpenEmbedded Core, <filename>ASSUME_PROVIDED</filename>
|
||||
mostly specifies native tools that should not be built.
|
||||
An example is <filename>git-native</filename>, which
|
||||
when specified allows for the Git binary from the host to
|
||||
@@ -115,8 +115,7 @@
|
||||
is either not set or set to "0".
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Limited support for the "<filename>*</filename>"
|
||||
wildcard character for matching against the
|
||||
Limited support for wildcard matching against the
|
||||
beginning of host names exists.
|
||||
For example, the following setting matches
|
||||
<filename>git.gnu.org</filename>,
|
||||
@@ -125,20 +124,6 @@
|
||||
<literallayout class='monospaced'>
|
||||
BB_ALLOWED_NETWORKS = "*.gnu.org"
|
||||
</literallayout>
|
||||
<note><title>Important</title>
|
||||
<para>The use of the "<filename>*</filename>"
|
||||
character only works at the beginning of
|
||||
a host name and it must be isolated from
|
||||
the remainder of the host name.
|
||||
You cannot use the wildcard character in any
|
||||
other location of the name or combined with
|
||||
the front part of the name.</para>
|
||||
|
||||
<para>For example,
|
||||
<filename>*.foo.bar</filename> is supported,
|
||||
while <filename>*aa.foo.bar</filename> is not.
|
||||
</para>
|
||||
</note>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Mirrors not in the host list are skipped and
|
||||
@@ -661,10 +646,10 @@
|
||||
<glossdef>
|
||||
<para>
|
||||
Contains the name of the currently executing task.
|
||||
The value includes the "do_" prefix.
|
||||
The value does not include the "do_" prefix.
|
||||
For example, if the currently executing task is
|
||||
<filename>do_config</filename>, the value is
|
||||
"do_config".
|
||||
"config".
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
@@ -979,7 +964,7 @@
|
||||
Allows you to extend a recipe so that it builds variants
|
||||
of the software.
|
||||
Some examples of these variants for recipes from the
|
||||
OpenEmbedded-Core metadata are "natives" such as
|
||||
OpenEmbedded Core metadata are "natives" such as
|
||||
<filename>quilt-native</filename>, which is a copy of
|
||||
Quilt built to run on the build system; "crosses" such
|
||||
as <filename>gcc-cross</filename>, which is a compiler
|
||||
@@ -995,7 +980,7 @@
|
||||
amount of code, it usually is as simple as adding the
|
||||
variable to your recipe.
|
||||
Here are two examples.
|
||||
The "native" variants are from the OpenEmbedded-Core
|
||||
The "native" variants are from the OpenEmbedded Core
|
||||
metadata:
|
||||
<literallayout class='monospaced'>
|
||||
BBCLASSEXTEND =+ "native nativesdk"
|
||||
@@ -1097,19 +1082,7 @@
|
||||
|
||||
<glossentry id='var-BBFILES'><glossterm>BBFILES</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
A space-separated list of recipe files BitBake uses to
|
||||
build software.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
When specifying recipe files, you can pattern match using
|
||||
Python's
|
||||
<ulink url='https://docs.python.org/3/library/glob.html'><filename>glob</filename></ulink>
|
||||
syntax.
|
||||
For details on the syntax, see the documentation by
|
||||
following the previous link.
|
||||
</para>
|
||||
<para>List of recipe files BitBake uses to build software.</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
@@ -1170,6 +1143,8 @@
|
||||
<glossdef>
|
||||
<para>
|
||||
Sets the base location where layers are stored.
|
||||
By default, this location is set to
|
||||
<filename>${COREBASE}</filename>.
|
||||
This setting is used in conjunction with
|
||||
<filename>bitbake-layers layerindex-fetch</filename> and
|
||||
tells <filename>bitbake-layers</filename> where to place
|
||||
@@ -1193,19 +1168,15 @@
|
||||
match any of the expressions.
|
||||
It is as if BitBake does not see them at all.
|
||||
Consequently, matching files are not parsed or otherwise
|
||||
used by BitBake.
|
||||
</para>
|
||||
|
||||
used by BitBake.</para>
|
||||
<para>
|
||||
The values you provide are passed to Python's regular
|
||||
expression compiler.
|
||||
Consequently, the syntax follows Python's Regular
|
||||
Expression (re) syntax.
|
||||
The expressions are compared against the full paths to
|
||||
the files.
|
||||
For complete syntax information, see Python's
|
||||
documentation at
|
||||
<ulink url='http://docs.python.org/3/library/re.html#re'></ulink>.
|
||||
<ulink url='http://docs.python.org/release/2.3/lib/re-syntax.html'></ulink>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -1236,45 +1207,6 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BBMULTICONFIG'><glossterm>BBMULTICONFIG</glossterm>
|
||||
<info>
|
||||
BBMULTICONFIG[doc] = "Enables BitBake to perform multiple configuration builds and lists each separate configuration (multiconfig)."
|
||||
</info>
|
||||
<glossdef>
|
||||
<para role="glossdeffirst">
|
||||
<!-- <para role="glossdeffirst"><imagedata fileref="figures/define-generic.png" /> -->
|
||||
Enables BitBake to perform multiple configuration builds
|
||||
and lists each separate configuration (multiconfig).
|
||||
You can use this variable to cause BitBake to build
|
||||
multiple targets where each target has a separate
|
||||
configuration.
|
||||
Define <filename>BBMULTICONFIG</filename> in your
|
||||
<filename>conf/local.conf</filename> configuration file.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
As an example, the following line specifies three
|
||||
multiconfigs, each having a separate configuration file:
|
||||
<literallayout class='monospaced'>
|
||||
BBMULTIFONFIG = "configA configB configC"
|
||||
</literallayout>
|
||||
Each configuration file you use must reside in the
|
||||
build directory within a directory named
|
||||
<filename>conf/multiconfig</filename> (e.g.
|
||||
<replaceable>build_directory</replaceable><filename>/conf/multiconfig/configA.conf</filename>).
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For information on how to use
|
||||
<filename>BBMULTICONFIG</filename> in an environment that
|
||||
supports building targets with multiple configurations,
|
||||
see the
|
||||
"<link linkend='executing-a-multiple-configuration-build'>Executing a Multiple Configuration Build</link>"
|
||||
section.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BBPATH'><glossterm>BBPATH</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
@@ -1607,6 +1539,24 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-FILESDIR'><glossterm>FILESDIR</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
Specifies directories BitBake uses when searching for
|
||||
patches and files.
|
||||
The "local" fetcher module uses these directories when
|
||||
handling <filename>file://</filename> URLs if the file
|
||||
was not found using
|
||||
<link linkend='var-FILESPATH'><filename>FILESPATH</filename></link>.
|
||||
<note>
|
||||
The <filename>FILESDIR</filename> variable is
|
||||
deprecated and you should use
|
||||
<filename>FILESPATH</filename> in all new code.
|
||||
</note>
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-FILESPATH'><glossterm>FILESPATH</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
@@ -1664,19 +1614,9 @@
|
||||
<glossentry id='var-INHERIT'><glossterm>INHERIT</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
Causes the named class or classes to be inherited globally.
|
||||
Anonymous functions in the class or classes
|
||||
are not executed for the
|
||||
base configuration and in each individual recipe.
|
||||
The OpenEmbedded build system ignores changes to
|
||||
<filename>INHERIT</filename> in individual recipes.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For more information on <filename>INHERIT</filename>, see
|
||||
the
|
||||
"<link linkend="inherit-configuration-directive"><filename>INHERIT</filename> Configuration Directive</link>"
|
||||
section.
|
||||
Causes the named class to be inherited at
|
||||
this point during parsing.
|
||||
The variable is only valid in configuration files.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
@@ -1964,27 +1904,15 @@
|
||||
you want to select, and you should set
|
||||
<link linkend='var-PV'><filename>PV</filename></link>
|
||||
accordingly for precedence.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The <filename>PREFERRED_VERSION</filename> variable
|
||||
supports limited wildcard use through the
|
||||
"<filename>%</filename>" character.
|
||||
You can use the character to match any number of
|
||||
characters, which can be useful when specifying versions
|
||||
that contain long revision numbers that potentially change.
|
||||
You can use the "<filename>%</filename>" character as a
|
||||
wildcard to match any number of characters, which can be
|
||||
useful when specifying versions that contain long revision
|
||||
numbers that could potentially change.
|
||||
Here are two examples:
|
||||
<literallayout class='monospaced'>
|
||||
PREFERRED_VERSION_python = "2.7.3"
|
||||
PREFERRED_VERSION_linux-yocto = "4.12%"
|
||||
PREFERRED_VERSION_linux-yocto = "3.10%"
|
||||
</literallayout>
|
||||
<note><title>Important</title>
|
||||
The use of the "<filename>%</filename>" character
|
||||
is limited in that it only works at the end of the
|
||||
string.
|
||||
You cannot use the wildcard character in any other
|
||||
location of the string.
|
||||
</note>
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
@@ -2171,16 +2099,6 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-REPODIR'><glossterm>REPODIR</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
The directory in which a local copy of a
|
||||
<filename>google-repo</filename> directory is stored
|
||||
when it is synced.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-RPROVIDES'><glossterm>RPROVIDES</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
-->
|
||||
|
||||
<copyright>
|
||||
<year>2004-2018</year>
|
||||
<year>2004-2016</year>
|
||||
<holder>Richard Purdie</holder>
|
||||
<holder>Chris Larson</holder>
|
||||
<holder>and Phil Blundell</holder>
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 20 KiB |
@@ -105,7 +105,7 @@ Show debug logging for the specified logging domains
|
||||
profile the command and print a report
|
||||
.TP
|
||||
.B \-uUI, \-\-ui=UI
|
||||
User interface to use. Currently, knotty, taskexp or ncurses can be specified as UI.
|
||||
User interface to use. Currently, hob, depexp, goggle or ncurses can be specified as UI.
|
||||
.TP
|
||||
.B \-tSERVERTYPE, \-\-servertype=SERVERTYPE
|
||||
Choose which server to use, none, process or xmlrpc.
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
#
|
||||
# This is a copy on write dictionary and set which abuses classes to try and be nice and fast.
|
||||
#
|
||||
# Copyright (C) 2006 Tim Ansell
|
||||
# Copyright (C) 2006 Tim Amsell
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
@@ -150,7 +150,7 @@ class COWDictMeta(COWMeta):
|
||||
yield value
|
||||
if type == "items":
|
||||
yield (key, value)
|
||||
return
|
||||
raise StopIteration()
|
||||
|
||||
def iterkeys(cls):
|
||||
return cls.iter("keys")
|
||||
@@ -213,11 +213,11 @@ if __name__ == "__main__":
|
||||
print()
|
||||
|
||||
print("a", a)
|
||||
for x in a.iteritems():
|
||||
for x in a.items():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
for x in b.iteritems():
|
||||
for x in b.items():
|
||||
print(x)
|
||||
print()
|
||||
|
||||
@@ -225,11 +225,11 @@ if __name__ == "__main__":
|
||||
b['a'] = 'c'
|
||||
|
||||
print("a", a)
|
||||
for x in a.iteritems():
|
||||
for x in a.items():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
for x in b.iteritems():
|
||||
for x in b.items():
|
||||
print(x)
|
||||
print()
|
||||
|
||||
@@ -244,22 +244,22 @@ if __name__ == "__main__":
|
||||
a['set'].add("o2")
|
||||
|
||||
print("a", a)
|
||||
for x in a['set'].itervalues():
|
||||
for x in a['set'].values():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
for x in b['set'].itervalues():
|
||||
for x in b['set'].values():
|
||||
print(x)
|
||||
print()
|
||||
|
||||
b['set'].add('o3')
|
||||
|
||||
print("a", a)
|
||||
for x in a['set'].itervalues():
|
||||
for x in a['set'].values():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
for x in b['set'].itervalues():
|
||||
for x in b['set'].values():
|
||||
print(x)
|
||||
print()
|
||||
|
||||
@@ -269,7 +269,7 @@ if __name__ == "__main__":
|
||||
a['set2'].add("o2")
|
||||
|
||||
print("a", a)
|
||||
for x in a.iteritems():
|
||||
for x in a.items():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
@@ -289,7 +289,7 @@ if __name__ == "__main__":
|
||||
print("Yay - has_key with delete works!")
|
||||
|
||||
print("a", a)
|
||||
for x in a.iteritems():
|
||||
for x in a.items():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
@@ -300,7 +300,7 @@ if __name__ == "__main__":
|
||||
b.__revertitem__('b')
|
||||
|
||||
print("a", a)
|
||||
for x in a.iteritems():
|
||||
for x in a.items():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
@@ -310,7 +310,7 @@ if __name__ == "__main__":
|
||||
|
||||
b.__revertitem__('dict')
|
||||
print("a", a)
|
||||
for x in a.iteritems():
|
||||
for x in a.items():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
__version__ = "1.40.0"
|
||||
__version__ = "1.32.0"
|
||||
|
||||
import sys
|
||||
if sys.version_info < (3, 4, 0):
|
||||
@@ -63,10 +63,6 @@ class BBLogger(Logger):
|
||||
def verbose(self, msg, *args, **kwargs):
|
||||
return self.log(logging.INFO - 1, msg, *args, **kwargs)
|
||||
|
||||
def verbnote(self, msg, *args, **kwargs):
|
||||
return self.log(logging.INFO + 2, msg, *args, **kwargs)
|
||||
|
||||
|
||||
logging.raiseExceptions = False
|
||||
logging.setLoggerClass(BBLogger)
|
||||
|
||||
@@ -97,18 +93,6 @@ def debug(lvl, *args):
|
||||
def note(*args):
|
||||
mainlogger.info(''.join(args))
|
||||
|
||||
#
|
||||
# A higher prioity note which will show on the console but isn't a warning
|
||||
#
|
||||
# Something is happening the user should be aware of but they probably did
|
||||
# something to make it happen
|
||||
#
|
||||
def verbnote(*args):
|
||||
mainlogger.verbnote(''.join(args))
|
||||
|
||||
#
|
||||
# Warnings - things the user likely needs to pay attention to and fix
|
||||
#
|
||||
def warn(*args):
|
||||
mainlogger.warning(''.join(args))
|
||||
|
||||
|
||||
@@ -41,6 +41,8 @@ from bb import data, event, utils
|
||||
bblogger = logging.getLogger('BitBake')
|
||||
logger = logging.getLogger('BitBake.Build')
|
||||
|
||||
NULL = open(os.devnull, 'r+')
|
||||
|
||||
__mtime_cache = {}
|
||||
|
||||
def cached_mtime_noerror(f):
|
||||
@@ -89,14 +91,14 @@ class TaskBase(event.Event):
|
||||
|
||||
def __init__(self, t, logfile, d):
|
||||
self._task = t
|
||||
self._package = d.getVar("PF")
|
||||
self._mc = d.getVar("BB_CURRENT_MC")
|
||||
self.taskfile = d.getVar("FILE")
|
||||
self._package = d.getVar("PF", True)
|
||||
self._mc = d.getVar("BB_CURRENT_MC", True)
|
||||
self.taskfile = d.getVar("FILE", True)
|
||||
self.taskname = self._task
|
||||
self.logfile = logfile
|
||||
self.time = time.time()
|
||||
event.Event.__init__(self)
|
||||
self._message = "recipe %s: task %s: %s" % (d.getVar("PF"), t, self.getDisplayName())
|
||||
self._message = "recipe %s: task %s: %s" % (d.getVar("PF", True), t, self.getDisplayName())
|
||||
|
||||
def getTask(self):
|
||||
return self._task
|
||||
@@ -193,13 +195,13 @@ def exec_func(func, d, dirs = None, pythonexception=False):
|
||||
oldcwd = None
|
||||
|
||||
flags = d.getVarFlags(func)
|
||||
cleandirs = flags.get('cleandirs') if flags else None
|
||||
cleandirs = flags.get('cleandirs')
|
||||
if cleandirs:
|
||||
for cdir in d.expand(cleandirs).split():
|
||||
bb.utils.remove(cdir, True)
|
||||
bb.utils.mkdirhier(cdir)
|
||||
|
||||
if flags and dirs is None:
|
||||
if dirs is None:
|
||||
dirs = flags.get('dirs')
|
||||
if dirs:
|
||||
dirs = d.expand(dirs).split()
|
||||
@@ -225,17 +227,17 @@ def exec_func(func, d, dirs = None, pythonexception=False):
|
||||
else:
|
||||
lockfiles = None
|
||||
|
||||
tempdir = d.getVar('T')
|
||||
tempdir = d.getVar('T', True)
|
||||
|
||||
# or func allows items to be executed outside of the normal
|
||||
# task set, such as buildhistory
|
||||
task = d.getVar('BB_RUNTASK') or func
|
||||
task = d.getVar('BB_RUNTASK', True) or func
|
||||
if task == func:
|
||||
taskfunc = task
|
||||
else:
|
||||
taskfunc = "%s.%s" % (task, func)
|
||||
|
||||
runfmt = d.getVar('BB_RUNFMT') or "run.{func}.{pid}"
|
||||
runfmt = d.getVar('BB_RUNFMT', True) or "run.{func}.{pid}"
|
||||
runfn = runfmt.format(taskfunc=taskfunc, task=task, func=func, pid=os.getpid())
|
||||
runfile = os.path.join(tempdir, runfn)
|
||||
bb.utils.mkdirhier(os.path.dirname(runfile))
|
||||
@@ -367,7 +369,7 @@ exit $ret
|
||||
|
||||
cmd = runfile
|
||||
if d.getVarFlag(func, 'fakeroot', False):
|
||||
fakerootcmd = d.getVar('FAKEROOT')
|
||||
fakerootcmd = d.getVar('FAKEROOT', True)
|
||||
if fakerootcmd:
|
||||
cmd = [fakerootcmd, runfile]
|
||||
|
||||
@@ -376,7 +378,7 @@ exit $ret
|
||||
else:
|
||||
logfile = sys.stdout
|
||||
|
||||
progress = d.getVarFlag(func, 'progress')
|
||||
progress = d.getVarFlag(func, 'progress', True)
|
||||
if progress:
|
||||
if progress == 'percent':
|
||||
# Use default regex
|
||||
@@ -428,7 +430,7 @@ exit $ret
|
||||
else:
|
||||
break
|
||||
|
||||
tempdir = d.getVar('T')
|
||||
tempdir = d.getVar('T', True)
|
||||
fifopath = os.path.join(tempdir, 'fifo.%s' % os.getpid())
|
||||
if os.path.exists(fifopath):
|
||||
os.unlink(fifopath)
|
||||
@@ -441,7 +443,7 @@ exit $ret
|
||||
with open(os.devnull, 'r+') as stdin:
|
||||
bb.process.run(cmd, shell=False, stdin=stdin, log=logfile, extrafiles=[(fifo,readfifo)])
|
||||
except bb.process.CmdError:
|
||||
logfn = d.getVar('BB_LOGFILE')
|
||||
logfn = d.getVar('BB_LOGFILE', True)
|
||||
raise FuncFailed(func, logfn)
|
||||
finally:
|
||||
os.unlink(fifopath)
|
||||
@@ -472,18 +474,18 @@ def _exec_task(fn, task, d, quieterr):
|
||||
logger.debug(1, "Executing task %s", task)
|
||||
|
||||
localdata = _task_data(fn, task, d)
|
||||
tempdir = localdata.getVar('T')
|
||||
tempdir = localdata.getVar('T', True)
|
||||
if not tempdir:
|
||||
bb.fatal("T variable not set, unable to build")
|
||||
|
||||
# Change nice level if we're asked to
|
||||
nice = localdata.getVar("BB_TASK_NICE_LEVEL")
|
||||
nice = localdata.getVar("BB_TASK_NICE_LEVEL", True)
|
||||
if nice:
|
||||
curnice = os.nice(0)
|
||||
nice = int(nice) - curnice
|
||||
newnice = os.nice(nice)
|
||||
logger.debug(1, "Renice to %s " % newnice)
|
||||
ionice = localdata.getVar("BB_TASK_IONICE_LEVEL")
|
||||
ionice = localdata.getVar("BB_TASK_IONICE_LEVEL", True)
|
||||
if ionice:
|
||||
try:
|
||||
cls, prio = ionice.split(".", 1)
|
||||
@@ -494,7 +496,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
bb.utils.mkdirhier(tempdir)
|
||||
|
||||
# Determine the logfile to generate
|
||||
logfmt = localdata.getVar('BB_LOGFMT') or 'log.{task}.{pid}'
|
||||
logfmt = localdata.getVar('BB_LOGFMT', True) or 'log.{task}.{pid}'
|
||||
logbase = logfmt.format(task=task, pid=os.getpid())
|
||||
|
||||
# Document the order of the tasks...
|
||||
@@ -531,6 +533,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
self.triggered = True
|
||||
|
||||
# Handle logfiles
|
||||
si = open('/dev/null', 'r')
|
||||
try:
|
||||
bb.utils.mkdirhier(os.path.dirname(logfn))
|
||||
logfile = open(logfn, 'w')
|
||||
@@ -544,8 +547,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()]
|
||||
|
||||
# Replace those fds with our own
|
||||
with open('/dev/null', 'r') as si:
|
||||
os.dup2(si.fileno(), osi[1])
|
||||
os.dup2(si.fileno(), osi[1])
|
||||
os.dup2(logfile.fileno(), oso[1])
|
||||
os.dup2(logfile.fileno(), ose[1])
|
||||
|
||||
@@ -561,7 +563,6 @@ def _exec_task(fn, task, d, quieterr):
|
||||
|
||||
localdata.setVar('BB_LOGFILE', logfn)
|
||||
localdata.setVar('BB_RUNTASK', task)
|
||||
localdata.setVar('BB_TASK_LOGGER', bblogger)
|
||||
|
||||
flags = localdata.getVarFlags(task)
|
||||
|
||||
@@ -606,6 +607,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
os.close(osi[0])
|
||||
os.close(oso[0])
|
||||
os.close(ose[0])
|
||||
si.close()
|
||||
|
||||
logfile.close()
|
||||
if os.path.exists(logfn) and os.path.getsize(logfn) == 0:
|
||||
@@ -626,7 +628,7 @@ def exec_task(fn, task, d, profile = False):
|
||||
quieterr = True
|
||||
|
||||
if profile:
|
||||
profname = "profile-%s.log" % (d.getVar("PN") + "-" + task)
|
||||
profname = "profile-%s.log" % (d.getVar("PN", True) + "-" + task)
|
||||
try:
|
||||
import cProfile as profile
|
||||
except:
|
||||
@@ -666,9 +668,9 @@ def stamp_internal(taskname, d, file_name, baseonly=False, noextra=False):
|
||||
stamp = d.stamp[file_name]
|
||||
extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
|
||||
else:
|
||||
stamp = d.getVar('STAMP')
|
||||
file_name = d.getVar('BB_FILENAME')
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info') or ""
|
||||
stamp = d.getVar('STAMP', True)
|
||||
file_name = d.getVar('BB_FILENAME', True)
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info', True) or ""
|
||||
|
||||
if baseonly:
|
||||
return stamp
|
||||
@@ -702,9 +704,9 @@ def stamp_cleanmask_internal(taskname, d, file_name):
|
||||
stamp = d.stampclean[file_name]
|
||||
extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
|
||||
else:
|
||||
stamp = d.getVar('STAMPCLEAN')
|
||||
file_name = d.getVar('BB_FILENAME')
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info') or ""
|
||||
stamp = d.getVar('STAMPCLEAN', True)
|
||||
file_name = d.getVar('BB_FILENAME', True)
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info', True) or ""
|
||||
|
||||
if not stamp:
|
||||
return []
|
||||
@@ -740,7 +742,7 @@ def make_stamp(task, d, file_name = None):
|
||||
# as it completes
|
||||
if not task.endswith("_setscene") and task != "do_setscene" and not file_name:
|
||||
stampbase = stamp_internal(task, d, None, True)
|
||||
file_name = d.getVar('BB_FILENAME')
|
||||
file_name = d.getVar('BB_FILENAME', True)
|
||||
bb.parse.siggen.dump_sigtask(file_name, task, stampbase, True)
|
||||
|
||||
def del_stamp(task, d, file_name = None):
|
||||
@@ -762,7 +764,7 @@ def write_taint(task, d, file_name = None):
|
||||
if file_name:
|
||||
taintfn = d.stamp[file_name] + '.' + task + '.taint'
|
||||
else:
|
||||
taintfn = d.getVar('STAMP') + '.' + task + '.taint'
|
||||
taintfn = d.getVar('STAMP', True) + '.' + task + '.taint'
|
||||
bb.utils.mkdirhier(os.path.dirname(taintfn))
|
||||
# The specific content of the taint file is not really important,
|
||||
# we just need it to be random, so a random UUID is used
|
||||
@@ -800,7 +802,6 @@ def add_tasks(tasklist, d):
|
||||
if name in flags:
|
||||
deptask = d.expand(flags[name])
|
||||
task_deps[name][task] = deptask
|
||||
getTask('mcdepends')
|
||||
getTask('depends')
|
||||
getTask('rdepends')
|
||||
getTask('deptask')
|
||||
@@ -860,52 +861,3 @@ def deltask(task, d):
|
||||
if task in deps:
|
||||
deps.remove(task)
|
||||
d.setVarFlag(bbtask, 'deps', deps)
|
||||
|
||||
def preceedtask(task, with_recrdeptasks, d):
|
||||
"""
|
||||
Returns a set of tasks in the current recipe which were specified as
|
||||
precondition by the task itself ("after") or which listed themselves
|
||||
as precondition ("before"). Preceeding tasks specified via the
|
||||
"recrdeptask" are included in the result only if requested. Beware
|
||||
that this may lead to the task itself being listed.
|
||||
"""
|
||||
preceed = set()
|
||||
|
||||
# Ignore tasks which don't exist
|
||||
tasks = d.getVar('__BBTASKS', False)
|
||||
if task not in tasks:
|
||||
return preceed
|
||||
|
||||
preceed.update(d.getVarFlag(task, 'deps') or [])
|
||||
if with_recrdeptasks:
|
||||
recrdeptask = d.getVarFlag(task, 'recrdeptask')
|
||||
if recrdeptask:
|
||||
preceed.update(recrdeptask.split())
|
||||
return preceed
|
||||
|
||||
def tasksbetween(task_start, task_end, d):
|
||||
"""
|
||||
Return the list of tasks between two tasks in the current recipe,
|
||||
where task_start is to start at and task_end is the task to end at
|
||||
(and task_end has a dependency chain back to task_start).
|
||||
"""
|
||||
outtasks = []
|
||||
tasks = list(filter(lambda k: d.getVarFlag(k, "task"), d.keys()))
|
||||
def follow_chain(task, endtask, chain=None):
|
||||
if not chain:
|
||||
chain = []
|
||||
chain.append(task)
|
||||
for othertask in tasks:
|
||||
if othertask == task:
|
||||
continue
|
||||
if task == endtask:
|
||||
for ctask in chain:
|
||||
if ctask not in outtasks:
|
||||
outtasks.append(ctask)
|
||||
else:
|
||||
deps = d.getVarFlag(othertask, 'deps', False)
|
||||
if task in deps:
|
||||
follow_chain(othertask, endtask, chain)
|
||||
chain.pop()
|
||||
follow_chain(task_start, task_end)
|
||||
return outtasks
|
||||
|
||||
@@ -37,7 +37,7 @@ import bb.utils
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
|
||||
__cache_version__ = "152"
|
||||
__cache_version__ = "150"
|
||||
|
||||
def getCacheFile(path, filename, data_hash):
|
||||
return os.path.join(path, filename + "." + data_hash)
|
||||
@@ -71,7 +71,7 @@ class RecipeInfoCommon(object):
|
||||
|
||||
@classmethod
|
||||
def flaglist(cls, flag, varlist, metadata, squash=False):
|
||||
out_dict = dict((var, metadata.getVarFlag(var, flag))
|
||||
out_dict = dict((var, metadata.getVarFlag(var, flag, True))
|
||||
for var in varlist)
|
||||
if squash:
|
||||
return dict((k,v) for (k,v) in out_dict.items() if v)
|
||||
@@ -86,9 +86,9 @@ class RecipeInfoCommon(object):
|
||||
class CoreRecipeInfo(RecipeInfoCommon):
|
||||
__slots__ = ()
|
||||
|
||||
cachefile = "bb_cache.dat"
|
||||
cachefile = "bb_cache.dat"
|
||||
|
||||
def __init__(self, filename, metadata):
|
||||
def __init__(self, filename, metadata):
|
||||
self.file_depends = metadata.getVar('__depends', False)
|
||||
self.timestamp = bb.parse.cached_mtime(filename)
|
||||
self.variants = self.listvar('__VARIANTS', metadata) + ['']
|
||||
@@ -107,7 +107,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
|
||||
self.pn = self.getvar('PN', metadata)
|
||||
self.packages = self.listvar('PACKAGES', metadata)
|
||||
if not self.packages:
|
||||
if not self.pn in self.packages:
|
||||
self.packages.append(self.pn)
|
||||
|
||||
self.basetaskhashes = self.taskvar('BB_BASEHASH', self.tasks, metadata)
|
||||
@@ -122,7 +122,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
self.defaultpref = self.intvar('DEFAULT_PREFERENCE', metadata)
|
||||
self.not_world = self.getvar('EXCLUDE_FROM_WORLD', metadata)
|
||||
self.stamp = self.getvar('STAMP', metadata)
|
||||
self.stampclean = self.getvar('STAMPCLEAN', metadata)
|
||||
self.stampclean = self.getvar('STAMPCLEAN', metadata)
|
||||
self.stamp_extrainfo = self.flaglist('stamp-extra-info', self.tasks, metadata)
|
||||
self.file_checksums = self.flaglist('file-checksums', self.tasks, metadata, True)
|
||||
self.packages_dynamic = self.listvar('PACKAGES_DYNAMIC', metadata)
|
||||
@@ -217,7 +217,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
cachedata.packages_dynamic[package].append(fn)
|
||||
|
||||
# Build hash of runtime depends and recommends
|
||||
for package in self.packages:
|
||||
for package in self.packages + [self.pn]:
|
||||
cachedata.rundeps[fn][package] = list(self.rdepends) + self.rdepends_pkg[package]
|
||||
cachedata.runrecs[fn][package] = list(self.rrecommends) + self.rrecommends_pkg[package]
|
||||
|
||||
@@ -296,7 +296,7 @@ def parse_recipe(bb_data, bbfile, appends, mc=''):
|
||||
bb_data.setVar("__BBMULTICONFIG", mc)
|
||||
|
||||
# expand tmpdir to include this topdir
|
||||
bb_data.setVar('TMPDIR', bb_data.getVar('TMPDIR') or "")
|
||||
bb_data.setVar('TMPDIR', bb_data.getVar('TMPDIR', True) or "")
|
||||
bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
|
||||
oldpath = os.path.abspath(os.getcwd())
|
||||
bb.parse.cached_mtime_noerror(bbfile_loc)
|
||||
@@ -375,10 +375,10 @@ class Cache(NoCache):
|
||||
data = databuilder.data
|
||||
|
||||
# Pass caches_array information into Cache Constructor
|
||||
# It will be used later for deciding whether we
|
||||
# need extra cache file dump/load support
|
||||
# It will be used later for deciding whether we
|
||||
# need extra cache file dump/load support
|
||||
self.caches_array = caches_array
|
||||
self.cachedir = data.getVar("CACHE")
|
||||
self.cachedir = data.getVar("CACHE", True)
|
||||
self.clean = set()
|
||||
self.checked = set()
|
||||
self.depends_cache = {}
|
||||
@@ -395,7 +395,7 @@ class Cache(NoCache):
|
||||
self.has_cache = True
|
||||
self.cachefile = getCacheFile(self.cachedir, "bb_cache.dat", self.data_hash)
|
||||
|
||||
logger.debug(1, "Cache dir: %s", self.cachedir)
|
||||
logger.debug(1, "Using cache in '%s'", self.cachedir)
|
||||
bb.utils.mkdirhier(self.cachedir)
|
||||
|
||||
cache_ok = True
|
||||
@@ -408,8 +408,6 @@ class Cache(NoCache):
|
||||
self.load_cachefile()
|
||||
elif os.path.isfile(self.cachefile):
|
||||
logger.info("Out of date cache found, rebuilding...")
|
||||
else:
|
||||
logger.debug(1, "Cache file %s not found, building..." % self.cachefile)
|
||||
|
||||
def load_cachefile(self):
|
||||
cachesize = 0
|
||||
@@ -423,10 +421,9 @@ class Cache(NoCache):
|
||||
cachesize += os.fstat(cachefile.fileno()).st_size
|
||||
|
||||
bb.event.fire(bb.event.CacheLoadStarted(cachesize), self.data)
|
||||
|
||||
|
||||
for cache_class in self.caches_array:
|
||||
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
|
||||
logger.debug(1, 'Loading cache file: %s' % cachefile)
|
||||
with open(cachefile, "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
# Check cache version information
|
||||
@@ -441,8 +438,8 @@ class Cache(NoCache):
|
||||
logger.info('Cache version mismatch, rebuilding...')
|
||||
return
|
||||
elif bitbake_ver != bb.__version__:
|
||||
logger.info('Bitbake version mismatch, rebuilding...')
|
||||
return
|
||||
logger.info('Bitbake version mismatch, rebuilding...')
|
||||
return
|
||||
|
||||
# Load the rest of the cache file
|
||||
current_progress = 0
|
||||
@@ -465,10 +462,6 @@ class Cache(NoCache):
|
||||
self.depends_cache[key] = [value]
|
||||
# only fire events on even percentage boundaries
|
||||
current_progress = cachefile.tell() + previous_progress
|
||||
if current_progress > cachesize:
|
||||
# we might have calculated incorrect total size because a file
|
||||
# might've been written out just after we checked its size
|
||||
cachesize = current_progress
|
||||
current_percent = 100 * current_progress / cachesize
|
||||
if current_percent > previous_percent:
|
||||
previous_percent = current_percent
|
||||
@@ -619,13 +612,13 @@ class Cache(NoCache):
|
||||
a = fl.find(":True")
|
||||
b = fl.find(":False")
|
||||
if ((a < 0) and b) or ((b > 0) and (b < a)):
|
||||
f = fl[:b+6]
|
||||
fl = fl[b+7:]
|
||||
f = fl[:b+6]
|
||||
fl = fl[b+7:]
|
||||
elif ((b < 0) and a) or ((a > 0) and (a < b)):
|
||||
f = fl[:a+5]
|
||||
fl = fl[a+6:]
|
||||
f = fl[:a+5]
|
||||
fl = fl[a+6:]
|
||||
else:
|
||||
break
|
||||
break
|
||||
fl = fl.strip()
|
||||
if "*" in f:
|
||||
continue
|
||||
@@ -799,8 +792,8 @@ class MultiProcessCache(object):
|
||||
self.cachedata_extras = self.create_cachedata()
|
||||
|
||||
def init_cache(self, d, cache_file_name=None):
|
||||
cachedir = (d.getVar("PERSISTENT_DIR") or
|
||||
d.getVar("CACHE"))
|
||||
cachedir = (d.getVar("PERSISTENT_DIR", True) or
|
||||
d.getVar("CACHE", True))
|
||||
if cachedir in [None, '']:
|
||||
return
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
@@ -889,3 +882,4 @@ class MultiProcessCache(object):
|
||||
p.dump([data, self.__class__.CACHE_VERSION])
|
||||
|
||||
bb.utils.unlockfile(glf)
|
||||
|
||||
|
||||
@@ -97,8 +97,6 @@ class FileChecksumCache(MultiProcessCache):
|
||||
|
||||
def checksum_dir(pth):
|
||||
# Handle directories recursively
|
||||
if pth == "/":
|
||||
bb.fatal("Refusing to checksum /")
|
||||
dirchecksums = []
|
||||
for root, dirs, files in os.walk(pth):
|
||||
for name in files:
|
||||
|
||||
@@ -1,22 +1,3 @@
|
||||
"""
|
||||
BitBake code parser
|
||||
|
||||
Parses actual code (i.e. python and shell) for functions and in-line
|
||||
expressions. Used mainly to determine dependencies on other functions
|
||||
and variables within the BitBake metadata. Also provides a cache for
|
||||
this information in order to speed up processing.
|
||||
|
||||
(Not to be confused with the code that parses the metadata itself,
|
||||
see lib/bb/parse/ for that).
|
||||
|
||||
NOTE: if you change how the parsers gather information you will almost
|
||||
certainly need to increment CodeParserCache.CACHE_VERSION below so that
|
||||
any existing codeparser cache gets invalidated. Additionally you'll need
|
||||
to increment __cache_version__ in cache.py in order to ensure that old
|
||||
recipe caches don't trigger "Taskhash mismatch" errors.
|
||||
|
||||
"""
|
||||
|
||||
import ast
|
||||
import sys
|
||||
import codegen
|
||||
@@ -136,11 +117,7 @@ class shellCacheLine(object):
|
||||
|
||||
class CodeParserCache(MultiProcessCache):
|
||||
cache_file_name = "bb_codeparser.dat"
|
||||
# NOTE: you must increment this if you change how the parsers gather information,
|
||||
# so that an existing cache gets invalidated. Additionally you'll need
|
||||
# to increment __cache_version__ in cache.py in order to ensure that old
|
||||
# recipe caches don't trigger "Taskhash mismatch" errors.
|
||||
CACHE_VERSION = 10
|
||||
CACHE_VERSION = 8
|
||||
|
||||
def __init__(self):
|
||||
MultiProcessCache.__init__(self)
|
||||
@@ -209,15 +186,13 @@ class BufferedLogger(Logger):
|
||||
|
||||
def flush(self):
|
||||
for record in self.buffer:
|
||||
if self.target.isEnabledFor(record.levelno):
|
||||
self.target.handle(record)
|
||||
self.target.handle(record)
|
||||
self.buffer = []
|
||||
|
||||
class PythonParser():
|
||||
getvars = (".getVar", ".appendVar", ".prependVar", "oe.utils.conditional")
|
||||
getvars = (".getVar", ".appendVar", ".prependVar")
|
||||
getvarflags = (".getVarFlag", ".appendVarFlag", ".prependVarFlag")
|
||||
containsfuncs = ("bb.utils.contains", "base_contains")
|
||||
containsanyfuncs = ("bb.utils.contains_any", "bb.utils.filter")
|
||||
containsfuncs = ("bb.utils.contains", "base_contains", "bb.utils.contains_any")
|
||||
execfuncs = ("bb.build.exec_func", "bb.build.exec_task")
|
||||
|
||||
def warn(self, func, arg):
|
||||
@@ -236,17 +211,13 @@ class PythonParser():
|
||||
|
||||
def visit_Call(self, node):
|
||||
name = self.called_node_name(node.func)
|
||||
if name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs):
|
||||
if name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs):
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
varname = node.args[0].s
|
||||
if name in self.containsfuncs and isinstance(node.args[1], ast.Str):
|
||||
if varname not in self.contains:
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname].add(node.args[1].s)
|
||||
elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Str):
|
||||
if varname not in self.contains:
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname].update(node.args[1].s.split())
|
||||
elif name.endswith(self.getvarflags):
|
||||
if isinstance(node.args[1], ast.Str):
|
||||
self.references.add('%s[%s]' % (varname, node.args[1].s))
|
||||
|
||||
@@ -28,15 +28,8 @@ and must not trigger events, directly or indirectly.
|
||||
Commands are queued in a CommandQueue
|
||||
"""
|
||||
|
||||
from collections import OrderedDict, defaultdict
|
||||
|
||||
import bb.event
|
||||
import bb.cooker
|
||||
import bb.remotedata
|
||||
|
||||
class DataStoreConnectionHandle(object):
|
||||
def __init__(self, dsindex=0):
|
||||
self.dsindex = dsindex
|
||||
|
||||
class CommandCompleted(bb.event.Event):
|
||||
pass
|
||||
@@ -50,8 +43,6 @@ class CommandFailed(CommandExit):
|
||||
def __init__(self, message):
|
||||
self.error = message
|
||||
CommandExit.__init__(self, 1)
|
||||
def __str__(self):
|
||||
return "Command execution failed: %s" % self.error
|
||||
|
||||
class CommandError(Exception):
|
||||
pass
|
||||
@@ -64,7 +55,6 @@ class Command:
|
||||
self.cooker = cooker
|
||||
self.cmds_sync = CommandsSync()
|
||||
self.cmds_async = CommandsAsync()
|
||||
self.remotedatastores = bb.remotedata.RemoteDatastores(cooker)
|
||||
|
||||
# FIXME Add lock for this
|
||||
self.currentAsyncCommand = None
|
||||
@@ -78,8 +68,7 @@ class Command:
|
||||
if not hasattr(command_method, 'readonly') or False == getattr(command_method, 'readonly'):
|
||||
return None, "Not able to execute not readonly commands in readonly mode"
|
||||
try:
|
||||
self.cooker.process_inotify_updates()
|
||||
if getattr(command_method, 'needconfig', True):
|
||||
if getattr(command_method, 'needconfig', False):
|
||||
self.cooker.updateCacheSync()
|
||||
result = command_method(self, commandline)
|
||||
except CommandError as exc:
|
||||
@@ -99,7 +88,6 @@ class Command:
|
||||
|
||||
def runAsyncCommand(self):
|
||||
try:
|
||||
self.cooker.process_inotify_updates()
|
||||
if self.cooker.state in (bb.cooker.state.error, bb.cooker.state.shutdown, bb.cooker.state.forceshutdown):
|
||||
# updateCache will trigger a shutdown of the parser
|
||||
# and then raise BBHandledException triggering an exit
|
||||
@@ -137,23 +125,14 @@ class Command:
|
||||
|
||||
def finishAsyncCommand(self, msg=None, code=None):
|
||||
if msg or msg == "":
|
||||
bb.event.fire(CommandFailed(msg), self.cooker.data)
|
||||
bb.event.fire(CommandFailed(msg), self.cooker.expanded_data)
|
||||
elif code:
|
||||
bb.event.fire(CommandExit(code), self.cooker.data)
|
||||
bb.event.fire(CommandExit(code), self.cooker.expanded_data)
|
||||
else:
|
||||
bb.event.fire(CommandCompleted(), self.cooker.data)
|
||||
bb.event.fire(CommandCompleted(), self.cooker.expanded_data)
|
||||
self.currentAsyncCommand = None
|
||||
self.cooker.finishcommand()
|
||||
|
||||
def reset(self):
|
||||
self.remotedatastores = bb.remotedata.RemoteDatastores(self.cooker)
|
||||
|
||||
def split_mc_pn(pn):
|
||||
if pn.startswith("multiconfig:"):
|
||||
_, mc, pn = pn.split(":", 2)
|
||||
return (mc, pn)
|
||||
return ('', pn)
|
||||
|
||||
class CommandsSync:
|
||||
"""
|
||||
A class of synchronous commands
|
||||
@@ -200,7 +179,6 @@ class CommandsSync:
|
||||
"""
|
||||
varname = params[0]
|
||||
value = str(params[1])
|
||||
command.cooker.extraconfigdata[varname] = value
|
||||
command.cooker.data.setVar(varname, value)
|
||||
|
||||
def getSetVariable(self, command, params):
|
||||
@@ -240,15 +218,59 @@ class CommandsSync:
|
||||
command.cooker.configuration.postfile = postfiles
|
||||
setPrePostConfFiles.needconfig = False
|
||||
|
||||
def getCpuCount(self, command, params):
|
||||
"""
|
||||
Get the CPU count on the bitbake server
|
||||
"""
|
||||
return bb.utils.cpu_count()
|
||||
getCpuCount.readonly = True
|
||||
getCpuCount.needconfig = False
|
||||
|
||||
def matchFile(self, command, params):
|
||||
fMatch = params[0]
|
||||
return command.cooker.matchFile(fMatch)
|
||||
matchFile.needconfig = False
|
||||
|
||||
def getUIHandlerNum(self, command, params):
|
||||
return bb.event.get_uihandler()
|
||||
getUIHandlerNum.needconfig = False
|
||||
getUIHandlerNum.readonly = True
|
||||
def generateNewImage(self, command, params):
|
||||
image = params[0]
|
||||
base_image = params[1]
|
||||
package_queue = params[2]
|
||||
timestamp = params[3]
|
||||
description = params[4]
|
||||
return command.cooker.generateNewImage(image, base_image,
|
||||
package_queue, timestamp, description)
|
||||
|
||||
def ensureDir(self, command, params):
|
||||
directory = params[0]
|
||||
bb.utils.mkdirhier(directory)
|
||||
ensureDir.needconfig = False
|
||||
|
||||
def setVarFile(self, command, params):
|
||||
"""
|
||||
Save a variable in a file; used for saving in a configuration file
|
||||
"""
|
||||
var = params[0]
|
||||
val = params[1]
|
||||
default_file = params[2]
|
||||
op = params[3]
|
||||
command.cooker.modifyConfigurationVar(var, val, default_file, op)
|
||||
setVarFile.needconfig = False
|
||||
|
||||
def removeVarFile(self, command, params):
|
||||
"""
|
||||
Remove a variable declaration from a file
|
||||
"""
|
||||
var = params[0]
|
||||
command.cooker.removeConfigurationVar(var)
|
||||
removeVarFile.needconfig = False
|
||||
|
||||
def createConfigFile(self, command, params):
|
||||
"""
|
||||
Create an extra configuration file
|
||||
"""
|
||||
name = params[0]
|
||||
command.cooker.createConfigFile(name)
|
||||
createConfigFile.needconfig = False
|
||||
|
||||
def setEventMask(self, command, params):
|
||||
handlerNum = params[0]
|
||||
@@ -273,307 +295,9 @@ class CommandsSync:
|
||||
def updateConfig(self, command, params):
|
||||
options = params[0]
|
||||
environment = params[1]
|
||||
cmdline = params[2]
|
||||
command.cooker.updateConfigOpts(options, environment, cmdline)
|
||||
command.cooker.updateConfigOpts(options, environment)
|
||||
updateConfig.needconfig = False
|
||||
|
||||
def parseConfiguration(self, command, params):
|
||||
"""Instruct bitbake to parse its configuration
|
||||
NOTE: it is only necessary to call this if you aren't calling any normal action
|
||||
(otherwise parsing is taken care of automatically)
|
||||
"""
|
||||
command.cooker.parseConfiguration()
|
||||
parseConfiguration.needconfig = False
|
||||
|
||||
def getLayerPriorities(self, command, params):
|
||||
command.cooker.parseConfiguration()
|
||||
ret = []
|
||||
# regex objects cannot be marshalled by xmlrpc
|
||||
for collection, pattern, regex, pri in command.cooker.bbfile_config_priorities:
|
||||
ret.append((collection, pattern, regex.pattern, pri))
|
||||
return ret
|
||||
getLayerPriorities.readonly = True
|
||||
|
||||
def getRecipes(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return list(command.cooker.recipecaches[mc].pkg_pn.items())
|
||||
getRecipes.readonly = True
|
||||
|
||||
def getRecipeDepends(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return list(command.cooker.recipecaches[mc].deps.items())
|
||||
getRecipeDepends.readonly = True
|
||||
|
||||
def getRecipeVersions(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].pkg_pepvpr
|
||||
getRecipeVersions.readonly = True
|
||||
|
||||
def getRecipeProvides(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].fn_provides
|
||||
getRecipeProvides.readonly = True
|
||||
|
||||
def getRecipePackages(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].packages
|
||||
getRecipePackages.readonly = True
|
||||
|
||||
def getRecipePackagesDynamic(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].packages_dynamic
|
||||
getRecipePackagesDynamic.readonly = True
|
||||
|
||||
def getRProviders(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].rproviders
|
||||
getRProviders.readonly = True
|
||||
|
||||
def getRuntimeDepends(self, command, params):
|
||||
ret = []
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
rundeps = command.cooker.recipecaches[mc].rundeps
|
||||
for key, value in rundeps.items():
|
||||
if isinstance(value, defaultdict):
|
||||
value = dict(value)
|
||||
ret.append((key, value))
|
||||
return ret
|
||||
getRuntimeDepends.readonly = True
|
||||
|
||||
def getRuntimeRecommends(self, command, params):
|
||||
ret = []
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
runrecs = command.cooker.recipecaches[mc].runrecs
|
||||
for key, value in runrecs.items():
|
||||
if isinstance(value, defaultdict):
|
||||
value = dict(value)
|
||||
ret.append((key, value))
|
||||
return ret
|
||||
getRuntimeRecommends.readonly = True
|
||||
|
||||
def getRecipeInherits(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].inherits
|
||||
getRecipeInherits.readonly = True
|
||||
|
||||
def getBbFilePriority(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].bbfile_priority
|
||||
getBbFilePriority.readonly = True
|
||||
|
||||
def getDefaultPreference(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].pkg_dp
|
||||
getDefaultPreference.readonly = True
|
||||
|
||||
def getSkippedRecipes(self, command, params):
|
||||
# Return list sorted by reverse priority order
|
||||
import bb.cache
|
||||
skipdict = OrderedDict(sorted(command.cooker.skiplist.items(),
|
||||
key=lambda x: (-command.cooker.collection.calc_bbfile_priority(bb.cache.virtualfn2realfn(x[0])[0]), x[0])))
|
||||
return list(skipdict.items())
|
||||
getSkippedRecipes.readonly = True
|
||||
|
||||
def getOverlayedRecipes(self, command, params):
|
||||
return list(command.cooker.collection.overlayed.items())
|
||||
getOverlayedRecipes.readonly = True
|
||||
|
||||
def getFileAppends(self, command, params):
|
||||
fn = params[0]
|
||||
return command.cooker.collection.get_file_appends(fn)
|
||||
getFileAppends.readonly = True
|
||||
|
||||
def getAllAppends(self, command, params):
|
||||
return command.cooker.collection.bbappends
|
||||
getAllAppends.readonly = True
|
||||
|
||||
def findProviders(self, command, params):
|
||||
return command.cooker.findProviders()
|
||||
findProviders.readonly = True
|
||||
|
||||
def findBestProvider(self, command, params):
|
||||
(mc, pn) = split_mc_pn(params[0])
|
||||
return command.cooker.findBestProvider(pn, mc)
|
||||
findBestProvider.readonly = True
|
||||
|
||||
def allProviders(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return list(bb.providers.allProviders(command.cooker.recipecaches[mc]).items())
|
||||
allProviders.readonly = True
|
||||
|
||||
def getRuntimeProviders(self, command, params):
|
||||
rprovide = params[0]
|
||||
try:
|
||||
mc = params[1]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
all_p = bb.providers.getRuntimeProviders(command.cooker.recipecaches[mc], rprovide)
|
||||
if all_p:
|
||||
best = bb.providers.filterProvidersRunTime(all_p, rprovide,
|
||||
command.cooker.data,
|
||||
command.cooker.recipecaches[mc])[0][0]
|
||||
else:
|
||||
best = None
|
||||
return all_p, best
|
||||
getRuntimeProviders.readonly = True
|
||||
|
||||
def dataStoreConnectorFindVar(self, command, params):
|
||||
dsindex = params[0]
|
||||
name = params[1]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
value, overridedata = datastore._findVar(name)
|
||||
|
||||
if value:
|
||||
content = value.get('_content', None)
|
||||
if isinstance(content, bb.data_smart.DataSmart):
|
||||
# Value is a datastore (e.g. BB_ORIGENV) - need to handle this carefully
|
||||
idx = command.remotedatastores.check_store(content, True)
|
||||
return {'_content': DataStoreConnectionHandle(idx),
|
||||
'_connector_origtype': 'DataStoreConnectionHandle',
|
||||
'_connector_overrides': overridedata}
|
||||
elif isinstance(content, set):
|
||||
return {'_content': list(content),
|
||||
'_connector_origtype': 'set',
|
||||
'_connector_overrides': overridedata}
|
||||
else:
|
||||
value['_connector_overrides'] = overridedata
|
||||
else:
|
||||
value = {}
|
||||
value['_connector_overrides'] = overridedata
|
||||
return value
|
||||
dataStoreConnectorFindVar.readonly = True
|
||||
|
||||
def dataStoreConnectorGetKeys(self, command, params):
|
||||
dsindex = params[0]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
return list(datastore.keys())
|
||||
dataStoreConnectorGetKeys.readonly = True
|
||||
|
||||
def dataStoreConnectorGetVarHistory(self, command, params):
|
||||
dsindex = params[0]
|
||||
name = params[1]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
return datastore.varhistory.variable(name)
|
||||
dataStoreConnectorGetVarHistory.readonly = True
|
||||
|
||||
def dataStoreConnectorExpandPythonRef(self, command, params):
|
||||
config_data_dict = params[0]
|
||||
varname = params[1]
|
||||
expr = params[2]
|
||||
|
||||
config_data = command.remotedatastores.receive_datastore(config_data_dict)
|
||||
|
||||
varparse = bb.data_smart.VariableParse(varname, config_data)
|
||||
return varparse.python_sub(expr)
|
||||
|
||||
def dataStoreConnectorRelease(self, command, params):
|
||||
dsindex = params[0]
|
||||
if dsindex <= 0:
|
||||
raise CommandError('dataStoreConnectorRelease: invalid index %d' % dsindex)
|
||||
command.remotedatastores.release(dsindex)
|
||||
|
||||
def dataStoreConnectorSetVarFlag(self, command, params):
|
||||
dsindex = params[0]
|
||||
name = params[1]
|
||||
flag = params[2]
|
||||
value = params[3]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
datastore.setVarFlag(name, flag, value)
|
||||
|
||||
def dataStoreConnectorDelVar(self, command, params):
|
||||
dsindex = params[0]
|
||||
name = params[1]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
if len(params) > 2:
|
||||
flag = params[2]
|
||||
datastore.delVarFlag(name, flag)
|
||||
else:
|
||||
datastore.delVar(name)
|
||||
|
||||
def dataStoreConnectorRenameVar(self, command, params):
|
||||
dsindex = params[0]
|
||||
name = params[1]
|
||||
newname = params[2]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
datastore.renameVar(name, newname)
|
||||
|
||||
def parseRecipeFile(self, command, params):
|
||||
"""
|
||||
Parse the specified recipe file (with or without bbappends)
|
||||
and return a datastore object representing the environment
|
||||
for the recipe.
|
||||
"""
|
||||
fn = params[0]
|
||||
appends = params[1]
|
||||
appendlist = params[2]
|
||||
if len(params) > 3:
|
||||
config_data_dict = params[3]
|
||||
config_data = command.remotedatastores.receive_datastore(config_data_dict)
|
||||
else:
|
||||
config_data = None
|
||||
|
||||
if appends:
|
||||
if appendlist is not None:
|
||||
appendfiles = appendlist
|
||||
else:
|
||||
appendfiles = command.cooker.collection.get_file_appends(fn)
|
||||
else:
|
||||
appendfiles = []
|
||||
# We are calling bb.cache locally here rather than on the server,
|
||||
# but that's OK because it doesn't actually need anything from
|
||||
# the server barring the global datastore (which we have a remote
|
||||
# version of)
|
||||
if config_data:
|
||||
# We have to use a different function here if we're passing in a datastore
|
||||
# NOTE: we took a copy above, so we don't do it here again
|
||||
envdata = bb.cache.parse_recipe(config_data, fn, appendfiles)['']
|
||||
else:
|
||||
# Use the standard path
|
||||
parser = bb.cache.NoCache(command.cooker.databuilder)
|
||||
envdata = parser.loadDataFull(fn, appendfiles)
|
||||
idx = command.remotedatastores.store(envdata)
|
||||
return DataStoreConnectionHandle(idx)
|
||||
parseRecipeFile.readonly = True
|
||||
|
||||
class CommandsAsync:
|
||||
"""
|
||||
A class of asynchronous commands
|
||||
@@ -587,15 +311,8 @@ class CommandsAsync:
|
||||
"""
|
||||
bfile = params[0]
|
||||
task = params[1]
|
||||
if len(params) > 2:
|
||||
internal = params[2]
|
||||
else:
|
||||
internal = False
|
||||
|
||||
if internal:
|
||||
command.cooker.buildFileInternal(bfile, task, fireevents=False, quietlog=True)
|
||||
else:
|
||||
command.cooker.buildFile(bfile, task)
|
||||
command.cooker.buildFile(bfile, task)
|
||||
buildFile.needcache = False
|
||||
|
||||
def buildTargets(self, command, params):
|
||||
@@ -645,6 +362,17 @@ class CommandsAsync:
|
||||
command.finishAsyncCommand()
|
||||
generateTargetsTree.needcache = True
|
||||
|
||||
def findCoreBaseFiles(self, command, params):
|
||||
"""
|
||||
Find certain files in COREBASE directory. i.e. Layers
|
||||
"""
|
||||
subdir = params[0]
|
||||
filename = params[1]
|
||||
|
||||
command.cooker.findCoreBaseFiles(subdir, filename)
|
||||
command.finishAsyncCommand()
|
||||
findCoreBaseFiles.needcache = False
|
||||
|
||||
def findConfigFiles(self, command, params):
|
||||
"""
|
||||
Find config files which provide appropriate values
|
||||
@@ -744,22 +472,3 @@ class CommandsAsync:
|
||||
command.finishAsyncCommand()
|
||||
resetCooker.needcache = False
|
||||
|
||||
def clientComplete(self, command, params):
|
||||
"""
|
||||
Do the right thing when the controlling client exits
|
||||
"""
|
||||
command.cooker.clientComplete()
|
||||
command.finishAsyncCommand()
|
||||
clientComplete.needcache = False
|
||||
|
||||
def findSigInfo(self, command, params):
|
||||
"""
|
||||
Find signature info files via the signature generator
|
||||
"""
|
||||
pn = params[0]
|
||||
taskname = params[1]
|
||||
sigs = params[2]
|
||||
res = bb.siggen.find_siginfo(pn, taskname, sigs, command.cooker.data)
|
||||
bb.event.fire(bb.event.FindSigInfoResult(res), command.cooker.data)
|
||||
command.finishAsyncCommand()
|
||||
findSigInfo.needcache = False
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -41,6 +41,10 @@ class ConfigParameters(object):
|
||||
|
||||
self.options.pkgs_to_build = targets or []
|
||||
|
||||
self.options.tracking = False
|
||||
if hasattr(self.options, "show_environment") and self.options.show_environment:
|
||||
self.options.tracking = True
|
||||
|
||||
for key, val in self.options.__dict__.items():
|
||||
setattr(self, key, val)
|
||||
|
||||
@@ -69,15 +73,15 @@ class ConfigParameters(object):
|
||||
|
||||
def updateToServer(self, server, environment):
|
||||
options = {}
|
||||
for o in ["abort", "force", "invalidate_stamp",
|
||||
"verbose", "debug", "dry_run", "dump_signatures",
|
||||
for o in ["abort", "tryaltconfigs", "force", "invalidate_stamp",
|
||||
"verbose", "debug", "dry_run", "dump_signatures",
|
||||
"debug_domains", "extra_assume_provided", "profile",
|
||||
"prefile", "postfile", "server_timeout"]:
|
||||
"prefile", "postfile"]:
|
||||
options[o] = getattr(self.options, o)
|
||||
|
||||
ret, error = server.runCommand(["updateConfig", options, environment, sys.argv])
|
||||
ret, error = server.runCommand(["updateConfig", options, environment])
|
||||
if error:
|
||||
raise Exception("Unable to update the server configuration with local parameters: %s" % error)
|
||||
raise Exception("Unable to update the server configuration with local parameters: %s" % error)
|
||||
|
||||
def parseActions(self):
|
||||
# Parse any commandline into actions
|
||||
@@ -127,6 +131,8 @@ class CookerConfiguration(object):
|
||||
self.extra_assume_provided = []
|
||||
self.prefile = []
|
||||
self.postfile = []
|
||||
self.prefile_server = []
|
||||
self.postfile_server = []
|
||||
self.debug = 0
|
||||
self.cmd = None
|
||||
self.abort = True
|
||||
@@ -138,13 +144,8 @@ class CookerConfiguration(object):
|
||||
self.dump_signatures = []
|
||||
self.dry_run = False
|
||||
self.tracking = False
|
||||
self.xmlrpcinterface = []
|
||||
self.server_timeout = None
|
||||
self.interface = []
|
||||
self.writeeventlog = False
|
||||
self.server_only = False
|
||||
self.limited_deps = False
|
||||
self.runall = []
|
||||
self.runonly = []
|
||||
|
||||
self.env = {}
|
||||
|
||||
@@ -153,6 +154,7 @@ class CookerConfiguration(object):
|
||||
if key in parameters.options.__dict__:
|
||||
setattr(self, key, parameters.options.__dict__[key])
|
||||
self.env = parameters.environment.copy()
|
||||
self.tracking = parameters.tracking
|
||||
|
||||
def setServerRegIdleCallback(self, srcb):
|
||||
self.server_register_idlecallback = srcb
|
||||
@@ -168,7 +170,7 @@ class CookerConfiguration(object):
|
||||
|
||||
def __setstate__(self,state):
|
||||
for k in state:
|
||||
setattr(self, k, state[k])
|
||||
setattr(self, k, state[k])
|
||||
|
||||
|
||||
def catch_parse_error(func):
|
||||
@@ -210,7 +212,7 @@ def _inherit(bbclass, data):
|
||||
|
||||
def findConfigFile(configfile, data):
|
||||
search = []
|
||||
bbpath = data.getVar("BBPATH")
|
||||
bbpath = data.getVar("BBPATH", True)
|
||||
if bbpath:
|
||||
for i in bbpath.split(":"):
|
||||
search.append(os.path.join(i, "conf", configfile))
|
||||
@@ -225,27 +227,6 @@ def findConfigFile(configfile, data):
|
||||
|
||||
return None
|
||||
|
||||
#
|
||||
# We search for a conf/bblayers.conf under an entry in BBPATH or in cwd working
|
||||
# up to /. If that fails, we search for a conf/bitbake.conf in BBPATH.
|
||||
#
|
||||
|
||||
def findTopdir():
|
||||
d = bb.data.init()
|
||||
bbpath = None
|
||||
if 'BBPATH' in os.environ:
|
||||
bbpath = os.environ['BBPATH']
|
||||
d.setVar('BBPATH', bbpath)
|
||||
|
||||
layerconf = findConfigFile("bblayers.conf", d)
|
||||
if layerconf:
|
||||
return os.path.dirname(os.path.dirname(layerconf))
|
||||
if bbpath:
|
||||
bitbakeconf = bb.utils.which(bbpath, "conf/bitbake.conf")
|
||||
if bitbakeconf:
|
||||
return os.path.dirname(os.path.dirname(bitbakeconf))
|
||||
return None
|
||||
|
||||
class CookerDataBuilder(object):
|
||||
|
||||
def __init__(self, cookercfg, worker = False):
|
||||
@@ -271,7 +252,7 @@ class CookerDataBuilder(object):
|
||||
filtered_keys = bb.utils.approved_variables()
|
||||
bb.data.inheritFromOS(self.basedata, self.savedenv, filtered_keys)
|
||||
self.basedata.setVar("BB_ORIGENV", self.savedenv)
|
||||
|
||||
|
||||
if worker:
|
||||
self.basedata.setVar("BB_WORKERCONTEXT", "1")
|
||||
|
||||
@@ -305,13 +286,11 @@ class CookerDataBuilder(object):
|
||||
self.data_hash = self.data.get_hash()
|
||||
self.mcdata[''] = self.data
|
||||
|
||||
multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
|
||||
multiconfig = (self.data.getVar("BBMULTICONFIG", True) or "").split()
|
||||
for config in multiconfig:
|
||||
mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config)
|
||||
bb.event.fire(bb.event.ConfigParsed(), mcdata)
|
||||
self.mcdata[config] = mcdata
|
||||
if multiconfig:
|
||||
bb.event.fire(bb.event.MultiConfigParsed(self.mcdata), self.data)
|
||||
|
||||
except (SyntaxError, bb.BBHandledException):
|
||||
raise bb.BBHandledException
|
||||
@@ -322,18 +301,6 @@ class CookerDataBuilder(object):
|
||||
logger.exception("Error parsing configuration files")
|
||||
raise bb.BBHandledException
|
||||
|
||||
# Create a copy so we can reset at a later date when UIs disconnect
|
||||
self.origdata = self.data
|
||||
self.data = bb.data.createCopy(self.origdata)
|
||||
self.mcdata[''] = self.data
|
||||
|
||||
def reset(self):
|
||||
# We may not have run parseBaseConfiguration() yet
|
||||
if not hasattr(self, 'origdata'):
|
||||
return
|
||||
self.data = bb.data.createCopy(self.origdata)
|
||||
self.mcdata[''] = self.data
|
||||
|
||||
def _findLayerConf(self, data):
|
||||
return findConfigFile("bblayers.conf", data)
|
||||
|
||||
@@ -353,7 +320,7 @@ class CookerDataBuilder(object):
|
||||
data.setVar("TOPDIR", os.path.dirname(os.path.dirname(layerconf)))
|
||||
data = parse_config_file(layerconf, data)
|
||||
|
||||
layers = (data.getVar('BBLAYERS') or "").split()
|
||||
layers = (data.getVar('BBLAYERS', True) or "").split()
|
||||
|
||||
data = bb.data.createCopy(data)
|
||||
approved = bb.utils.approved_variables()
|
||||
@@ -376,30 +343,7 @@ class CookerDataBuilder(object):
|
||||
data.delVar('LAYERDIR_RE')
|
||||
data.delVar('LAYERDIR')
|
||||
|
||||
bbfiles_dynamic = (data.getVar('BBFILES_DYNAMIC') or "").split()
|
||||
collections = (data.getVar('BBFILE_COLLECTIONS') or "").split()
|
||||
invalid = []
|
||||
for entry in bbfiles_dynamic:
|
||||
parts = entry.split(":", 1)
|
||||
if len(parts) != 2:
|
||||
invalid.append(entry)
|
||||
continue
|
||||
l, f = parts
|
||||
if l in collections:
|
||||
data.appendVar("BBFILES", " " + f)
|
||||
if invalid:
|
||||
bb.fatal("BBFILES_DYNAMIC entries must be of the form <collection name>:<filename pattern>, not:\n %s" % "\n ".join(invalid))
|
||||
|
||||
layerseries = set((data.getVar("LAYERSERIES_CORENAMES") or "").split())
|
||||
for c in collections:
|
||||
compat = set((data.getVar("LAYERSERIES_COMPAT_%s" % c) or "").split())
|
||||
if compat and not (compat & layerseries):
|
||||
bb.fatal("Layer %s is not compatible with the core layer which only supports these series: %s (layer is compatible with %s)"
|
||||
% (c, " ".join(layerseries), " ".join(compat)))
|
||||
elif not compat and not data.getVar("BB_WORKERCONTEXT"):
|
||||
bb.warn("Layer %s should set LAYERSERIES_COMPAT_%s in its conf/layer.conf file to list the core layer names it is compatible with." % (c, c))
|
||||
|
||||
if not data.getVar("BBPATH"):
|
||||
if not data.getVar("BBPATH", True):
|
||||
msg = "The BBPATH variable is not set"
|
||||
if not layerconf:
|
||||
msg += (" and bitbake did not find a conf/bblayers.conf file in"
|
||||
@@ -414,7 +358,7 @@ class CookerDataBuilder(object):
|
||||
data = parse_config_file(p, data)
|
||||
|
||||
# Handle any INHERITs and inherit the base class
|
||||
bbclasses = ["base"] + (data.getVar('INHERIT') or "").split()
|
||||
bbclasses = ["base"] + (data.getVar('INHERIT', True) or "").split()
|
||||
for bbclass in bbclasses:
|
||||
data = _inherit(bbclass, data)
|
||||
|
||||
@@ -426,7 +370,7 @@ class CookerDataBuilder(object):
|
||||
parselog.critical("Undefined event handler function '%s'" % var)
|
||||
sys.exit(1)
|
||||
handlerln = int(data.getVarFlag(var, "lineno", False))
|
||||
bb.event.register(var, data.getVar(var, False), (data.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln)
|
||||
bb.event.register(var, data.getVar(var, False), (data.getVarFlag(var, "eventmask", True) or "").split(), handlerfn, handlerln)
|
||||
|
||||
data.setVar('BBINCLUDED',bb.parse.get_file_depends(data))
|
||||
|
||||
|
||||
@@ -1,14 +1,48 @@
|
||||
"""
|
||||
Python Daemonizing helper
|
||||
|
||||
Originally based on code Copyright (C) 2005 Chad J. Schroeder but now heavily modified
|
||||
to allow a function to be daemonized and return for bitbake use by Richard Purdie
|
||||
Configurable daemon behaviors:
|
||||
|
||||
1.) The current working directory set to the "/" directory.
|
||||
2.) The current file creation mode mask set to 0.
|
||||
3.) Close all open files (1024).
|
||||
4.) Redirect standard I/O streams to "/dev/null".
|
||||
|
||||
A failed call to fork() now raises an exception.
|
||||
|
||||
References:
|
||||
1) Advanced Programming in the Unix Environment: W. Richard Stevens
|
||||
http://www.apuebook.com/apue3e.html
|
||||
2) The Linux Programming Interface: Michael Kerrisk
|
||||
http://man7.org/tlpi/index.html
|
||||
3) Unix Programming Frequently Asked Questions:
|
||||
http://www.faqs.org/faqs/unix-faq/programmer/faq/
|
||||
|
||||
Modified to allow a function to be daemonized and return for
|
||||
bitbake use by Richard Purdie
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import io
|
||||
import traceback
|
||||
__author__ = "Chad J. Schroeder"
|
||||
__copyright__ = "Copyright (C) 2005 Chad J. Schroeder"
|
||||
__version__ = "0.2"
|
||||
|
||||
# Standard Python modules.
|
||||
import os # Miscellaneous OS interfaces.
|
||||
import sys # System-specific parameters and functions.
|
||||
|
||||
# Default daemon parameters.
|
||||
# File mode creation mask of the daemon.
|
||||
# For BitBake's children, we do want to inherit the parent umask.
|
||||
UMASK = None
|
||||
|
||||
# Default maximum for the number of available file descriptors.
|
||||
MAXFD = 1024
|
||||
|
||||
# The standard I/O file descriptors are redirected to /dev/null by default.
|
||||
if (hasattr(os, "devnull")):
|
||||
REDIRECT_TO = os.devnull
|
||||
else:
|
||||
REDIRECT_TO = "/dev/null"
|
||||
|
||||
def createDaemon(function, logfile):
|
||||
"""
|
||||
@@ -16,10 +50,6 @@ def createDaemon(function, logfile):
|
||||
background as a daemon, returning control to the caller.
|
||||
"""
|
||||
|
||||
# Ensure stdout/stderror are flushed before forking to avoid duplicate output
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
try:
|
||||
# Fork a child process so the parent can exit. This returns control to
|
||||
# the command-line or shell. It also guarantees that the child will not
|
||||
@@ -35,6 +65,36 @@ def createDaemon(function, logfile):
|
||||
# leader of the new process group, we call os.setsid(). The process is
|
||||
# also guaranteed not to have a controlling terminal.
|
||||
os.setsid()
|
||||
|
||||
# Is ignoring SIGHUP necessary?
|
||||
#
|
||||
# It's often suggested that the SIGHUP signal should be ignored before
|
||||
# the second fork to avoid premature termination of the process. The
|
||||
# reason is that when the first child terminates, all processes, e.g.
|
||||
# the second child, in the orphaned group will be sent a SIGHUP.
|
||||
#
|
||||
# "However, as part of the session management system, there are exactly
|
||||
# two cases where SIGHUP is sent on the death of a process:
|
||||
#
|
||||
# 1) When the process that dies is the session leader of a session that
|
||||
# is attached to a terminal device, SIGHUP is sent to all processes
|
||||
# in the foreground process group of that terminal device.
|
||||
# 2) When the death of a process causes a process group to become
|
||||
# orphaned, and one or more processes in the orphaned group are
|
||||
# stopped, then SIGHUP and SIGCONT are sent to all members of the
|
||||
# orphaned group." [2]
|
||||
#
|
||||
# The first case can be ignored since the child is guaranteed not to have
|
||||
# a controlling terminal. The second case isn't so easy to dismiss.
|
||||
# The process group is orphaned when the first child terminates and
|
||||
# POSIX.1 requires that every STOPPED process in an orphaned process
|
||||
# group be sent a SIGHUP signal followed by a SIGCONT signal. Since the
|
||||
# second child is not STOPPED though, we can safely forego ignoring the
|
||||
# SIGHUP signal. In any case, there are no ill-effects if it is ignored.
|
||||
#
|
||||
# import signal # Set handlers for asynchronous events.
|
||||
# signal.signal(signal.SIGHUP, signal.SIG_IGN)
|
||||
|
||||
try:
|
||||
# Fork a second child and exit immediately to prevent zombies. This
|
||||
# causes the second child process to be orphaned, making the init
|
||||
@@ -48,46 +108,86 @@ def createDaemon(function, logfile):
|
||||
except OSError as e:
|
||||
raise Exception("%s [%d]" % (e.strerror, e.errno))
|
||||
|
||||
if (pid != 0):
|
||||
if (pid == 0): # The second child.
|
||||
# We probably don't want the file mode creation mask inherited from
|
||||
# the parent, so we give the child complete control over permissions.
|
||||
if UMASK is not None:
|
||||
os.umask(UMASK)
|
||||
else:
|
||||
# Parent (the first child) of the second child.
|
||||
# exit() or _exit()?
|
||||
# _exit is like exit(), but it doesn't call any functions registered
|
||||
# with atexit (and on_exit) or any registered signal handlers. It also
|
||||
# closes any open file descriptors, but doesn't flush any buffered output.
|
||||
# Using exit() may cause all any temporary files to be unexpectedly
|
||||
# removed. It's therefore recommended that child branches of a fork()
|
||||
# and the parent branch(es) of a daemon use _exit().
|
||||
os._exit(0)
|
||||
else:
|
||||
os.waitpid(pid, 0)
|
||||
# exit() or _exit()?
|
||||
# _exit is like exit(), but it doesn't call any functions registered
|
||||
# with atexit (and on_exit) or any registered signal handlers. It also
|
||||
# closes any open file descriptors. Using exit() may cause all stdio
|
||||
# streams to be flushed twice and any temporary files may be unexpectedly
|
||||
# removed. It's therefore recommended that child branches of a fork()
|
||||
# and the parent branch(es) of a daemon use _exit().
|
||||
return
|
||||
|
||||
# The second child.
|
||||
# Close all open file descriptors. This prevents the child from keeping
|
||||
# open any file descriptors inherited from the parent. There is a variety
|
||||
# of methods to accomplish this task. Three are listed below.
|
||||
#
|
||||
# Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
|
||||
# number of open file descriptors to close. If it doesn't exist, use
|
||||
# the default value (configurable).
|
||||
#
|
||||
# try:
|
||||
# maxfd = os.sysconf("SC_OPEN_MAX")
|
||||
# except (AttributeError, ValueError):
|
||||
# maxfd = MAXFD
|
||||
#
|
||||
# OR
|
||||
#
|
||||
# if (os.sysconf_names.has_key("SC_OPEN_MAX")):
|
||||
# maxfd = os.sysconf("SC_OPEN_MAX")
|
||||
# else:
|
||||
# maxfd = MAXFD
|
||||
#
|
||||
# OR
|
||||
#
|
||||
# Use the getrlimit method to retrieve the maximum file descriptor number
|
||||
# that can be opened by this process. If there is no limit on the
|
||||
# resource, use the default value.
|
||||
#
|
||||
import resource # Resource usage information.
|
||||
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
|
||||
if (maxfd == resource.RLIM_INFINITY):
|
||||
maxfd = MAXFD
|
||||
|
||||
# Iterate through and close all file descriptors.
|
||||
# for fd in range(0, maxfd):
|
||||
# try:
|
||||
# os.close(fd)
|
||||
# except OSError: # ERROR, fd wasn't open to begin with (ignored)
|
||||
# pass
|
||||
|
||||
# Replace standard fds with our own
|
||||
with open('/dev/null', 'r') as si:
|
||||
os.dup2(si.fileno(), sys.stdin.fileno())
|
||||
# Redirect the standard I/O file descriptors to the specified file. Since
|
||||
# the daemon has no controlling terminal, most daemons redirect stdin,
|
||||
# stdout, and stderr to /dev/null. This is done to prevent side-effects
|
||||
# from reads and writes to the standard I/O file descriptors.
|
||||
|
||||
try:
|
||||
so = open(logfile, 'a+')
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(so.fileno(), sys.stderr.fileno())
|
||||
except io.UnsupportedOperation:
|
||||
sys.stdout = open(logfile, 'a+')
|
||||
# This call to open is guaranteed to return the lowest file descriptor,
|
||||
# which will be 0 (stdin), since it was closed above.
|
||||
# os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
|
||||
|
||||
# Have stdout and stderr be the same so log output matches chronologically
|
||||
# and there aren't two seperate buffers
|
||||
sys.stderr = sys.stdout
|
||||
# Duplicate standard input to standard output and standard error.
|
||||
# os.dup2(0, 1) # standard output (1)
|
||||
# os.dup2(0, 2) # standard error (2)
|
||||
|
||||
try:
|
||||
function()
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
bb.event.print_ui_queue()
|
||||
# os._exit() doesn't flush open files like os.exit() does. Manually flush
|
||||
# stdout and stderr so that any logging output will be seen, particularly
|
||||
# exception tracebacks.
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
os._exit(0)
|
||||
|
||||
si = open('/dev/null', 'r')
|
||||
so = open(logfile, 'w')
|
||||
se = so
|
||||
|
||||
|
||||
# Replace those fds with our own
|
||||
os.dup2(si.fileno(), sys.stdin.fileno())
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(se.fileno(), sys.stderr.fileno())
|
||||
|
||||
function()
|
||||
|
||||
os._exit(0)
|
||||
|
||||
@@ -38,7 +38,6 @@ the speed is more critical here.
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import sys, os, re
|
||||
import hashlib
|
||||
if sys.argv[0][-5:] == "pydoc":
|
||||
path = os.path.dirname(os.path.dirname(sys.argv[1]))
|
||||
else:
|
||||
@@ -79,6 +78,59 @@ def initVar(var, d):
|
||||
"""Non-destructive var init for data structure"""
|
||||
d.initVar(var)
|
||||
|
||||
|
||||
def setVar(var, value, d):
|
||||
"""Set a variable to a given value"""
|
||||
d.setVar(var, value)
|
||||
|
||||
|
||||
def getVar(var, d, exp = False):
|
||||
"""Gets the value of a variable"""
|
||||
return d.getVar(var, exp)
|
||||
|
||||
|
||||
def renameVar(key, newkey, d):
|
||||
"""Renames a variable from key to newkey"""
|
||||
d.renameVar(key, newkey)
|
||||
|
||||
def delVar(var, d):
|
||||
"""Removes a variable from the data set"""
|
||||
d.delVar(var)
|
||||
|
||||
def appendVar(var, value, d):
|
||||
"""Append additional value to a variable"""
|
||||
d.appendVar(var, value)
|
||||
|
||||
def setVarFlag(var, flag, flagvalue, d):
|
||||
"""Set a flag for a given variable to a given value"""
|
||||
d.setVarFlag(var, flag, flagvalue)
|
||||
|
||||
def getVarFlag(var, flag, d):
|
||||
"""Gets given flag from given var"""
|
||||
return d.getVarFlag(var, flag, False)
|
||||
|
||||
def delVarFlag(var, flag, d):
|
||||
"""Removes a given flag from the variable's flags"""
|
||||
d.delVarFlag(var, flag)
|
||||
|
||||
def setVarFlags(var, flags, d):
|
||||
"""Set the flags for a given variable
|
||||
|
||||
Note:
|
||||
setVarFlags will not clear previous
|
||||
flags. Think of this method as
|
||||
addVarFlags
|
||||
"""
|
||||
d.setVarFlags(var, flags)
|
||||
|
||||
def getVarFlags(var, d):
|
||||
"""Gets a variable's flags"""
|
||||
return d.getVarFlags(var)
|
||||
|
||||
def delVarFlags(var, d):
|
||||
"""Removes a variable's flags"""
|
||||
d.delVarFlags(var)
|
||||
|
||||
def keys(d):
|
||||
"""Return a list of keys in d"""
|
||||
return d.keys()
|
||||
@@ -122,7 +174,7 @@ def inheritFromOS(d, savedenv, permitted):
|
||||
for s in savedenv.keys():
|
||||
if s in permitted:
|
||||
try:
|
||||
d.setVar(s, savedenv.getVar(s), op = 'from env')
|
||||
d.setVar(s, savedenv.getVar(s, True), op = 'from env')
|
||||
if s in exportlist:
|
||||
d.setVarFlag(s, "export", True, op = 'auto env export')
|
||||
except TypeError:
|
||||
@@ -142,7 +194,7 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False):
|
||||
try:
|
||||
if all:
|
||||
oval = d.getVar(var, False)
|
||||
val = d.getVar(var)
|
||||
val = d.getVar(var, True)
|
||||
except (KeyboardInterrupt, bb.build.FuncFailed):
|
||||
raise
|
||||
except Exception as exc:
|
||||
@@ -197,7 +249,7 @@ def emit_env(o=sys.__stdout__, d = init(), all=False):
|
||||
keys = sorted((key for key in d.keys() if not key.startswith("__")), key=isfunc)
|
||||
grouped = groupby(keys, isfunc)
|
||||
for isfunc, keys in grouped:
|
||||
for key in sorted(keys):
|
||||
for key in keys:
|
||||
emit_var(key, o, d, all and not isfunc) and o.write('\n')
|
||||
|
||||
def exported_keys(d):
|
||||
@@ -209,9 +261,9 @@ def exported_vars(d):
|
||||
k = list(exported_keys(d))
|
||||
for key in k:
|
||||
try:
|
||||
value = d.getVar(key)
|
||||
value = d.getVar(key, True)
|
||||
except Exception as err:
|
||||
bb.warn("%s: Unable to export ${%s}: %s" % (d.getVar("FILE"), key, err))
|
||||
bb.warn("%s: Unable to export ${%s}: %s" % (d.getVar("FILE", True), key, err))
|
||||
continue
|
||||
|
||||
if value is not None:
|
||||
@@ -221,13 +273,13 @@ def emit_func(func, o=sys.__stdout__, d = init()):
|
||||
"""Emits all items in the data store in a format such that it can be sourced by a shell."""
|
||||
|
||||
keys = (key for key in d.keys() if not key.startswith("__") and not d.getVarFlag(key, "func", False))
|
||||
for key in sorted(keys):
|
||||
for key in keys:
|
||||
emit_var(key, o, d, False)
|
||||
|
||||
o.write('\n')
|
||||
emit_var(func, o, d, False) and o.write('\n')
|
||||
newdeps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func))
|
||||
newdeps |= set((d.getVarFlag(func, "vardeps") or "").split())
|
||||
newdeps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func, True))
|
||||
newdeps |= set((d.getVarFlag(func, "vardeps", True) or "").split())
|
||||
seen = set()
|
||||
while newdeps:
|
||||
deps = newdeps
|
||||
@@ -236,8 +288,8 @@ def emit_func(func, o=sys.__stdout__, d = init()):
|
||||
for dep in deps:
|
||||
if d.getVarFlag(dep, "func", False) and not d.getVarFlag(dep, "python", False):
|
||||
emit_var(dep, o, d, False) and o.write('\n')
|
||||
newdeps |= bb.codeparser.ShellParser(dep, logger).parse_shell(d.getVar(dep))
|
||||
newdeps |= set((d.getVarFlag(dep, "vardeps") or "").split())
|
||||
newdeps |= bb.codeparser.ShellParser(dep, logger).parse_shell(d.getVar(dep, True))
|
||||
newdeps |= set((d.getVarFlag(dep, "vardeps", True) or "").split())
|
||||
newdeps -= seen
|
||||
|
||||
_functionfmt = """
|
||||
@@ -260,7 +312,7 @@ def emit_func_python(func, o=sys.__stdout__, d = init()):
|
||||
pp = bb.codeparser.PythonParser(func, logger)
|
||||
pp.parse_python(d.getVar(func, False))
|
||||
newdeps = pp.execs
|
||||
newdeps |= set((d.getVarFlag(func, "vardeps") or "").split())
|
||||
newdeps |= set((d.getVarFlag(func, "vardeps", True) or "").split())
|
||||
seen = set()
|
||||
while newdeps:
|
||||
deps = newdeps
|
||||
@@ -272,7 +324,7 @@ def emit_func_python(func, o=sys.__stdout__, d = init()):
|
||||
pp = bb.codeparser.PythonParser(dep, logger)
|
||||
pp.parse_python(d.getVar(dep, False))
|
||||
newdeps |= pp.execs
|
||||
newdeps |= set((d.getVarFlag(dep, "vardeps") or "").split())
|
||||
newdeps |= set((d.getVarFlag(dep, "vardeps", True) or "").split())
|
||||
newdeps -= seen
|
||||
|
||||
def update_data(d):
|
||||
@@ -284,60 +336,49 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
|
||||
try:
|
||||
if key[-1] == ']':
|
||||
vf = key[:-1].split('[')
|
||||
value, parser = d.getVarFlag(vf[0], vf[1], False, retparser=True)
|
||||
value = d.getVarFlag(vf[0], vf[1], False)
|
||||
parser = d.expandWithRefs(value, key)
|
||||
deps |= parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
return deps, value
|
||||
varflags = d.getVarFlags(key, ["vardeps", "vardepvalue", "vardepsexclude", "exports", "postfuncs", "prefuncs", "lineno", "filename"]) or {}
|
||||
vardeps = varflags.get("vardeps")
|
||||
value = d.getVar(key, False)
|
||||
|
||||
def handle_contains(value, contains, d):
|
||||
newvalue = ""
|
||||
for k in sorted(contains):
|
||||
l = (d.getVar(k) or "").split()
|
||||
for item in sorted(contains[k]):
|
||||
for word in item.split():
|
||||
if not word in l:
|
||||
newvalue += "\n%s{%s} = Unset" % (k, item)
|
||||
break
|
||||
l = (d.getVar(k, True) or "").split()
|
||||
for word in sorted(contains[k]):
|
||||
if word in l:
|
||||
newvalue += "\n%s{%s} = Set" % (k, word)
|
||||
else:
|
||||
newvalue += "\n%s{%s} = Set" % (k, item)
|
||||
newvalue += "\n%s{%s} = Unset" % (k, word)
|
||||
if not newvalue:
|
||||
return value
|
||||
if not value:
|
||||
return newvalue
|
||||
return value + newvalue
|
||||
|
||||
def handle_remove(value, deps, removes, d):
|
||||
for r in sorted(removes):
|
||||
r2 = d.expandWithRefs(r, None)
|
||||
value += "\n_remove of %s" % r
|
||||
deps |= r2.references
|
||||
deps = deps | (keys & r2.execs)
|
||||
return value
|
||||
|
||||
if "vardepvalue" in varflags:
|
||||
value = varflags.get("vardepvalue")
|
||||
value = varflags.get("vardepvalue")
|
||||
elif varflags.get("func"):
|
||||
if varflags.get("python"):
|
||||
value = d.getVarFlag(key, "_content", False)
|
||||
parser = bb.codeparser.PythonParser(key, logger)
|
||||
if value and "\t" in value:
|
||||
logger.warning("Variable %s contains tabs, please remove these (%s)" % (key, d.getVar("FILE")))
|
||||
logger.warning("Variable %s contains tabs, please remove these (%s)" % (key, d.getVar("FILE", True)))
|
||||
parser.parse_python(value, filename=varflags.get("filename"), lineno=varflags.get("lineno"))
|
||||
deps = deps | parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
value = handle_contains(value, parser.contains, d)
|
||||
else:
|
||||
value, parsedvar = d.getVarFlag(key, "_content", False, retparser=True)
|
||||
parsedvar = d.expandWithRefs(value, key)
|
||||
parser = bb.codeparser.ShellParser(key, logger)
|
||||
parser.parse_shell(parsedvar.value)
|
||||
deps = deps | shelldeps
|
||||
deps = deps | parsedvar.references
|
||||
deps = deps | (keys & parser.execs) | (keys & parsedvar.execs)
|
||||
value = handle_contains(value, parsedvar.contains, d)
|
||||
if hasattr(parsedvar, "removes"):
|
||||
value = handle_remove(value, deps, parsedvar.removes, d)
|
||||
if vardeps is None:
|
||||
parser.log.flush()
|
||||
if "prefuncs" in varflags:
|
||||
@@ -347,12 +388,10 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
|
||||
if "exports" in varflags:
|
||||
deps = deps | set(varflags["exports"].split())
|
||||
else:
|
||||
value, parser = d.getVarFlag(key, "_content", False, retparser=True)
|
||||
parser = d.expandWithRefs(value, key)
|
||||
deps |= parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
value = handle_contains(value, parser.contains, d)
|
||||
if hasattr(parser, "removes"):
|
||||
value = handle_remove(value, deps, parser.removes, d)
|
||||
|
||||
if "vardepvalueexclude" in varflags:
|
||||
exclude = varflags.get("vardepvalueexclude")
|
||||
@@ -371,8 +410,6 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
|
||||
|
||||
deps |= set((vardeps or "").split())
|
||||
deps -= set(varflags.get("vardepsexclude", "").split())
|
||||
except bb.parse.SkipRecipe:
|
||||
raise
|
||||
except Exception as e:
|
||||
bb.warn("Exception during build_dependencies for %s" % key)
|
||||
raise
|
||||
@@ -384,7 +421,7 @@ def generate_dependencies(d):
|
||||
|
||||
keys = set(key for key in d if not key.startswith("__"))
|
||||
shelldeps = set(key for key in d.getVar("__exportlist", False) if d.getVarFlag(key, "export", False) and not d.getVarFlag(key, "unexport", False))
|
||||
varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS')
|
||||
varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS', True)
|
||||
|
||||
deps = {}
|
||||
values = {}
|
||||
@@ -406,43 +443,6 @@ def generate_dependencies(d):
|
||||
#print "For %s: %s" % (task, str(deps[task]))
|
||||
return tasklist, deps, values
|
||||
|
||||
def generate_dependency_hash(tasklist, gendeps, lookupcache, whitelist, fn):
|
||||
taskdeps = {}
|
||||
basehash = {}
|
||||
|
||||
for task in tasklist:
|
||||
data = lookupcache[task]
|
||||
|
||||
if data is None:
|
||||
bb.error("Task %s from %s seems to be empty?!" % (task, fn))
|
||||
data = ''
|
||||
|
||||
gendeps[task] -= whitelist
|
||||
newdeps = gendeps[task]
|
||||
seen = set()
|
||||
while newdeps:
|
||||
nextdeps = newdeps
|
||||
seen |= nextdeps
|
||||
newdeps = set()
|
||||
for dep in nextdeps:
|
||||
if dep in whitelist:
|
||||
continue
|
||||
gendeps[dep] -= whitelist
|
||||
newdeps |= gendeps[dep]
|
||||
newdeps -= seen
|
||||
|
||||
alldeps = sorted(seen)
|
||||
for dep in alldeps:
|
||||
data = data + dep
|
||||
var = lookupcache[dep]
|
||||
if var is not None:
|
||||
data = data + str(var)
|
||||
k = fn + "." + task
|
||||
basehash[k] = hashlib.md5(data.encode("utf-8")).hexdigest()
|
||||
taskdeps[task] = alldeps
|
||||
|
||||
return taskdeps, basehash
|
||||
|
||||
def inherits_class(klass, d):
|
||||
val = d.getVar('__inherit_cache', False) or []
|
||||
needle = os.path.join('classes', '%s.bbclass' % klass)
|
||||
|
||||
@@ -39,10 +39,9 @@ from bb.COW import COWDictBase
|
||||
logger = logging.getLogger("BitBake.Data")
|
||||
|
||||
__setvar_keyword__ = ["_append", "_prepend", "_remove"]
|
||||
__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend|_remove)(_(?P<add>[^A-Z]*))?$')
|
||||
__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend|_remove)(_(?P<add>.*))?$')
|
||||
__expand_var_regexp__ = re.compile(r"\${[^{}@\n\t :]+}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@.+?}")
|
||||
__whitespace_split__ = re.compile('(\s)')
|
||||
|
||||
def infer_caller_details(loginfo, parent = False, varval = True):
|
||||
"""Save the caller the trouble of specifying everything."""
|
||||
@@ -105,7 +104,11 @@ class VariableParse:
|
||||
if self.varname and key:
|
||||
if self.varname == key:
|
||||
raise Exception("variable %s references itself!" % self.varname)
|
||||
var = self.d.getVarFlag(key, "_content")
|
||||
if key in self.d.expand_cache:
|
||||
varparse = self.d.expand_cache[key]
|
||||
var = varparse.value
|
||||
else:
|
||||
var = self.d.getVarFlag(key, "_content", True)
|
||||
self.references.add(key)
|
||||
if var is not None:
|
||||
return var
|
||||
@@ -113,21 +116,13 @@ class VariableParse:
|
||||
return match.group()
|
||||
|
||||
def python_sub(self, match):
|
||||
if isinstance(match, str):
|
||||
code = match
|
||||
else:
|
||||
code = match.group()[3:-1]
|
||||
|
||||
if "_remote_data" in self.d:
|
||||
connector = self.d["_remote_data"]
|
||||
return connector.expandPythonRef(self.varname, code, self.d)
|
||||
|
||||
code = match.group()[3:-1]
|
||||
codeobj = compile(code.strip(), self.varname or "<expansion>", "eval")
|
||||
|
||||
parser = bb.codeparser.PythonParser(self.varname, logger)
|
||||
parser.parse_python(code)
|
||||
if self.varname:
|
||||
vardeps = self.d.getVarFlag(self.varname, "vardeps")
|
||||
vardeps = self.d.getVarFlag(self.varname, "vardeps", True)
|
||||
if vardeps is None:
|
||||
parser.log.flush()
|
||||
else:
|
||||
@@ -151,7 +146,7 @@ class DataContext(dict):
|
||||
self['d'] = metadata
|
||||
|
||||
def __missing__(self, key):
|
||||
value = self.metadata.getVar(key)
|
||||
value = self.metadata.getVar(key, True)
|
||||
if value is None or self.metadata.getVarFlag(key, 'func', False):
|
||||
raise KeyError(key)
|
||||
else:
|
||||
@@ -227,19 +222,6 @@ class VariableHistory(object):
|
||||
new.variables = self.variables.copy()
|
||||
return new
|
||||
|
||||
def __getstate__(self):
|
||||
vardict = {}
|
||||
for k, v in self.variables.iteritems():
|
||||
vardict[k] = v
|
||||
return {'dataroot': self.dataroot,
|
||||
'variables': vardict}
|
||||
|
||||
def __setstate__(self, state):
|
||||
self.dataroot = state['dataroot']
|
||||
self.variables = COWDictBase.copy()
|
||||
for k, v in state['variables'].items():
|
||||
self.variables[k] = v
|
||||
|
||||
def record(self, *kwonly, **loginfo):
|
||||
if not self.dataroot._tracking:
|
||||
return
|
||||
@@ -264,26 +246,11 @@ class VariableHistory(object):
|
||||
return
|
||||
self.variables[var].append(loginfo.copy())
|
||||
|
||||
def rename_variable_hist(self, oldvar, newvar):
|
||||
if not self.dataroot._tracking:
|
||||
return
|
||||
if oldvar not in self.variables:
|
||||
return
|
||||
if newvar not in self.variables:
|
||||
self.variables[newvar] = []
|
||||
for i in self.variables[oldvar]:
|
||||
self.variables[newvar].append(i.copy())
|
||||
|
||||
def variable(self, var):
|
||||
remote_connector = self.dataroot.getVar('_remote_data', False)
|
||||
if remote_connector:
|
||||
varhistory = remote_connector.getVarHistory(var)
|
||||
else:
|
||||
varhistory = []
|
||||
|
||||
if var in self.variables:
|
||||
varhistory.extend(self.variables[var])
|
||||
return varhistory
|
||||
return self.variables[var]
|
||||
else:
|
||||
return []
|
||||
|
||||
def emit(self, var, oval, val, o, d):
|
||||
history = self.variable(var)
|
||||
@@ -351,7 +318,7 @@ class VariableHistory(object):
|
||||
the files in which they were added.
|
||||
"""
|
||||
history = self.variable(var)
|
||||
finalitems = (d.getVar(var) or '').split()
|
||||
finalitems = (d.getVar(var, True) or '').split()
|
||||
filemap = {}
|
||||
isset = False
|
||||
for event in history:
|
||||
@@ -408,6 +375,9 @@ class DataSmart(MutableMapping):
|
||||
if not isinstance(s, str): # sanity check
|
||||
return VariableParse(varname, self, s)
|
||||
|
||||
if varname and varname in self.expand_cache:
|
||||
return self.expand_cache[varname]
|
||||
|
||||
varparse = VariableParse(varname, self)
|
||||
|
||||
while s.find('${') != -1:
|
||||
@@ -431,6 +401,9 @@ class DataSmart(MutableMapping):
|
||||
|
||||
varparse.value = s
|
||||
|
||||
if varname:
|
||||
self.expand_cache[varname] = varparse
|
||||
|
||||
return varparse
|
||||
|
||||
def expand(self, s, varname = None):
|
||||
@@ -453,11 +426,11 @@ class DataSmart(MutableMapping):
|
||||
# Can end up here recursively so setup dummy values
|
||||
self.overrides = []
|
||||
self.overridesset = set()
|
||||
self.overrides = (self.getVar("OVERRIDES") or "").split(":") or []
|
||||
self.overrides = (self.getVar("OVERRIDES", True) or "").split(":") or []
|
||||
self.overridesset = set(self.overrides)
|
||||
self.inoverride = False
|
||||
self.expand_cache = {}
|
||||
newoverrides = (self.getVar("OVERRIDES") or "").split(":") or []
|
||||
newoverrides = (self.getVar("OVERRIDES", True) or "").split(":") or []
|
||||
if newoverrides == self.overrides:
|
||||
break
|
||||
self.overrides = newoverrides
|
||||
@@ -474,22 +447,17 @@ class DataSmart(MutableMapping):
|
||||
dest = self.dict
|
||||
while dest:
|
||||
if var in dest:
|
||||
return dest[var], self.overridedata.get(var, None)
|
||||
|
||||
if "_remote_data" in dest:
|
||||
connector = dest["_remote_data"]["_content"]
|
||||
return connector.getVar(var)
|
||||
return dest[var]
|
||||
|
||||
if "_data" not in dest:
|
||||
break
|
||||
dest = dest["_data"]
|
||||
return None, self.overridedata.get(var, None)
|
||||
|
||||
def _makeShadowCopy(self, var):
|
||||
if var in self.dict:
|
||||
return
|
||||
|
||||
local_var, _ = self._findVar(var)
|
||||
local_var = self._findVar(var)
|
||||
|
||||
if local_var:
|
||||
self.dict[var] = copy.copy(local_var)
|
||||
@@ -499,20 +467,13 @@ class DataSmart(MutableMapping):
|
||||
|
||||
def setVar(self, var, value, **loginfo):
|
||||
#print("var=" + str(var) + " val=" + str(value))
|
||||
self.expand_cache = {}
|
||||
parsing=False
|
||||
if 'parsing' in loginfo:
|
||||
parsing=True
|
||||
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.setVar(var, value)
|
||||
if not res:
|
||||
return
|
||||
|
||||
if 'op' not in loginfo:
|
||||
loginfo['op'] = "set"
|
||||
|
||||
self.expand_cache = {}
|
||||
match = __setvar_regexp__.match(var)
|
||||
if match and match.group("keyword") in __setvar_keyword__:
|
||||
base = match.group('base')
|
||||
@@ -548,8 +509,6 @@ class DataSmart(MutableMapping):
|
||||
del self.dict[var]["_append"]
|
||||
if "_prepend" in self.dict[var]:
|
||||
del self.dict[var]["_prepend"]
|
||||
if "_remove" in self.dict[var]:
|
||||
del self.dict[var]["_remove"]
|
||||
if var in self.overridedata:
|
||||
active = []
|
||||
self.need_overrides()
|
||||
@@ -582,7 +541,7 @@ class DataSmart(MutableMapping):
|
||||
nextnew = set()
|
||||
self.overridevars.update(new)
|
||||
for i in new:
|
||||
vardata = self.expandWithRefs(self.getVar(i), i)
|
||||
vardata = self.expandWithRefs(self.getVar(i, True), i)
|
||||
nextnew.update(vardata.references)
|
||||
nextnew.update(vardata.contains.keys())
|
||||
new = nextnew
|
||||
@@ -606,22 +565,15 @@ class DataSmart(MutableMapping):
|
||||
if len(shortvar) == 0:
|
||||
override = None
|
||||
|
||||
def getVar(self, var, expand=True, noweakdefault=False, parsing=False):
|
||||
def getVar(self, var, expand, noweakdefault=False, parsing=False):
|
||||
return self.getVarFlag(var, "_content", expand, noweakdefault, parsing)
|
||||
|
||||
def renameVar(self, key, newkey, **loginfo):
|
||||
"""
|
||||
Rename the variable key to newkey
|
||||
"""
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.renameVar(key, newkey)
|
||||
if not res:
|
||||
return
|
||||
|
||||
val = self.getVar(key, 0, parsing=True)
|
||||
if val is not None:
|
||||
self.varhistory.rename_variable_hist(key, newkey)
|
||||
loginfo['variable'] = newkey
|
||||
loginfo['op'] = 'rename from %s' % key
|
||||
loginfo['detail'] = val
|
||||
@@ -663,16 +615,10 @@ class DataSmart(MutableMapping):
|
||||
self.setVar(var + "_prepend", value, ignore=True, parsing=True)
|
||||
|
||||
def delVar(self, var, **loginfo):
|
||||
self.expand_cache = {}
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.delVar(var)
|
||||
if not res:
|
||||
return
|
||||
|
||||
loginfo['detail'] = ""
|
||||
loginfo['op'] = 'del'
|
||||
self.varhistory.record(**loginfo)
|
||||
self.expand_cache = {}
|
||||
self.dict[var] = {}
|
||||
if var in self.overridedata:
|
||||
del self.overridedata[var]
|
||||
@@ -696,12 +642,6 @@ class DataSmart(MutableMapping):
|
||||
|
||||
def setVarFlag(self, var, flag, value, **loginfo):
|
||||
self.expand_cache = {}
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.setVarFlag(var, flag, value)
|
||||
if not res:
|
||||
return
|
||||
|
||||
if 'op' not in loginfo:
|
||||
loginfo['op'] = "set"
|
||||
loginfo['flag'] = flag
|
||||
@@ -722,26 +662,14 @@ class DataSmart(MutableMapping):
|
||||
self.dict["__exportlist"]["_content"] = set()
|
||||
self.dict["__exportlist"]["_content"].add(var)
|
||||
|
||||
def getVarFlag(self, var, flag, expand=True, noweakdefault=False, parsing=False, retparser=False):
|
||||
if flag == "_content":
|
||||
cachename = var
|
||||
else:
|
||||
if not flag:
|
||||
bb.warn("Calling getVarFlag with flag unset is invalid")
|
||||
return None
|
||||
cachename = var + "[" + flag + "]"
|
||||
|
||||
if expand and cachename in self.expand_cache:
|
||||
return self.expand_cache[cachename].value
|
||||
|
||||
local_var, overridedata = self._findVar(var)
|
||||
def getVarFlag(self, var, flag, expand, noweakdefault=False, parsing=False):
|
||||
local_var = self._findVar(var)
|
||||
value = None
|
||||
removes = set()
|
||||
if flag == "_content" and overridedata is not None and not parsing:
|
||||
if flag == "_content" and var in self.overridedata and not parsing:
|
||||
match = False
|
||||
active = {}
|
||||
self.need_overrides()
|
||||
for (r, o) in overridedata:
|
||||
for (r, o) in self.overridedata[var]:
|
||||
# What about double overrides both with "_" in the name?
|
||||
if o in self.overridesset:
|
||||
active[o] = r
|
||||
@@ -763,11 +691,7 @@ class DataSmart(MutableMapping):
|
||||
match = active[a]
|
||||
del active[a]
|
||||
if match:
|
||||
value, subparser = self.getVarFlag(match, "_content", False, retparser=True)
|
||||
if hasattr(subparser, "removes"):
|
||||
# We have to carry the removes from the overridden variable to apply at the
|
||||
# end of processing
|
||||
removes = subparser.removes
|
||||
value = self.getVar(match, False)
|
||||
|
||||
if local_var is not None and value is None:
|
||||
if flag in local_var:
|
||||
@@ -803,13 +727,17 @@ class DataSmart(MutableMapping):
|
||||
if match:
|
||||
value = r + value
|
||||
|
||||
parser = None
|
||||
if expand or retparser:
|
||||
parser = self.expandWithRefs(value, cachename)
|
||||
if expand:
|
||||
value = parser.value
|
||||
if expand and value:
|
||||
# Only getvar (flag == _content) hits the expand cache
|
||||
cachename = None
|
||||
if flag == "_content":
|
||||
cachename = var
|
||||
else:
|
||||
cachename = var + "[" + flag + "]"
|
||||
value = self.expand(value, cachename)
|
||||
|
||||
if value and flag == "_content" and local_var is not None and "_remove" in local_var and not parsing:
|
||||
if value and flag == "_content" and local_var is not None and "_remove" in local_var:
|
||||
removes = []
|
||||
self.need_overrides()
|
||||
for (r, o) in local_var["_remove"]:
|
||||
match = True
|
||||
@@ -818,46 +746,21 @@ class DataSmart(MutableMapping):
|
||||
if not o2 in self.overrides:
|
||||
match = False
|
||||
if match:
|
||||
removes.add(r)
|
||||
|
||||
if value and flag == "_content" and not parsing:
|
||||
if removes and parser:
|
||||
expanded_removes = {}
|
||||
for r in removes:
|
||||
expanded_removes[r] = self.expand(r).split()
|
||||
|
||||
parser.removes = set()
|
||||
val = ""
|
||||
for v in __whitespace_split__.split(parser.value):
|
||||
skip = False
|
||||
for r in removes:
|
||||
if v in expanded_removes[r]:
|
||||
parser.removes.add(r)
|
||||
skip = True
|
||||
if skip:
|
||||
continue
|
||||
val = val + v
|
||||
parser.value = val
|
||||
if expand:
|
||||
value = parser.value
|
||||
|
||||
if parser:
|
||||
self.expand_cache[cachename] = parser
|
||||
|
||||
if retparser:
|
||||
return value, parser
|
||||
removes.extend(self.expand(r).split())
|
||||
|
||||
if removes:
|
||||
filtered = filter(lambda v: v not in removes,
|
||||
value.split())
|
||||
value = " ".join(filtered)
|
||||
if expand and var in self.expand_cache:
|
||||
# We need to ensure the expand cache has the correct value
|
||||
# flag == "_content" here
|
||||
self.expand_cache[var].value = value
|
||||
return value
|
||||
|
||||
def delVarFlag(self, var, flag, **loginfo):
|
||||
self.expand_cache = {}
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.delVarFlag(var, flag)
|
||||
if not res:
|
||||
return
|
||||
|
||||
local_var, _ = self._findVar(var)
|
||||
local_var = self._findVar(var)
|
||||
if not local_var:
|
||||
return
|
||||
if not var in self.dict:
|
||||
@@ -900,7 +803,7 @@ class DataSmart(MutableMapping):
|
||||
self.dict[var][i] = flags[i]
|
||||
|
||||
def getVarFlags(self, var, expand = False, internalflags=False):
|
||||
local_var, _ = self._findVar(var)
|
||||
local_var = self._findVar(var)
|
||||
flags = {}
|
||||
|
||||
if local_var:
|
||||
@@ -942,7 +845,7 @@ class DataSmart(MutableMapping):
|
||||
data = DataSmart()
|
||||
data.dict["_data"] = self.dict
|
||||
data.varhistory = self.varhistory.copy()
|
||||
data.varhistory.dataroot = data
|
||||
data.varhistory.datasmart = data
|
||||
data.inchistory = self.inchistory.copy()
|
||||
|
||||
data._tracking = self._tracking
|
||||
@@ -973,7 +876,7 @@ class DataSmart(MutableMapping):
|
||||
|
||||
def localkeys(self):
|
||||
for key in self.dict:
|
||||
if key not in ['_data', '_remote_data']:
|
||||
if key != '_data':
|
||||
yield key
|
||||
|
||||
def __iter__(self):
|
||||
@@ -982,7 +885,7 @@ class DataSmart(MutableMapping):
|
||||
def keylist(d):
|
||||
klist = set()
|
||||
for key in d:
|
||||
if key in ["_data", "_remote_data"]:
|
||||
if key == "_data":
|
||||
continue
|
||||
if key in deleted:
|
||||
continue
|
||||
@@ -996,13 +899,6 @@ class DataSmart(MutableMapping):
|
||||
if "_data" in d:
|
||||
klist |= keylist(d["_data"])
|
||||
|
||||
if "_remote_data" in d:
|
||||
connector = d["_remote_data"]["_content"]
|
||||
for key in connector.getKeys():
|
||||
if key in deleted:
|
||||
continue
|
||||
klist.add(key)
|
||||
|
||||
return klist
|
||||
|
||||
self.need_overrides()
|
||||
@@ -1040,8 +936,9 @@ class DataSmart(MutableMapping):
|
||||
data = {}
|
||||
d = self.createCopy()
|
||||
bb.data.expandKeys(d)
|
||||
bb.data.update_data(d)
|
||||
|
||||
config_whitelist = set((d.getVar("BB_HASHCONFIG_WHITELIST") or "").split())
|
||||
config_whitelist = set((d.getVar("BB_HASHCONFIG_WHITELIST", True) or "").split())
|
||||
keys = set(key for key in iter(d) if not key.startswith("__"))
|
||||
for key in keys:
|
||||
if key in config_whitelist:
|
||||
@@ -1060,6 +957,7 @@ class DataSmart(MutableMapping):
|
||||
|
||||
for key in ["__BBTASKS", "__BBANONFUNCS", "__BBHANDLERS"]:
|
||||
bb_list = d.getVar(key, False) or []
|
||||
bb_list.sort()
|
||||
data.update({key:str(bb_list)})
|
||||
|
||||
if key == "__BBANONFUNCS":
|
||||
|
||||
@@ -48,16 +48,6 @@ class Event(object):
|
||||
def __init__(self):
|
||||
self.pid = worker_pid
|
||||
|
||||
|
||||
class HeartbeatEvent(Event):
|
||||
"""Triggered at regular time intervals of 10 seconds. Other events can fire much more often
|
||||
(runQueueTaskStarted when there are many short tasks) or not at all for long periods
|
||||
of time (again runQueueTaskStarted, when there is just one long-running task), so this
|
||||
event is more suitable for doing some task-independent work occassionally."""
|
||||
def __init__(self, time):
|
||||
Event.__init__(self)
|
||||
self.time = time
|
||||
|
||||
Registered = 10
|
||||
AlreadyRegistered = 14
|
||||
|
||||
@@ -141,9 +131,6 @@ def print_ui_queue():
|
||||
logger = logging.getLogger("BitBake")
|
||||
if not _uiready:
|
||||
from bb.msg import BBLogFormatter
|
||||
# Flush any existing buffered content
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
stdout = logging.StreamHandler(sys.stdout)
|
||||
stderr = logging.StreamHandler(sys.stderr)
|
||||
formatter = BBLogFormatter("%(levelname)s: %(message)s")
|
||||
@@ -152,34 +139,23 @@ def print_ui_queue():
|
||||
|
||||
# First check to see if we have any proper messages
|
||||
msgprint = False
|
||||
msgerrs = False
|
||||
|
||||
# Should we print to stderr?
|
||||
for event in ui_queue[:]:
|
||||
if isinstance(event, logging.LogRecord) and event.levelno >= logging.WARNING:
|
||||
msgerrs = True
|
||||
break
|
||||
|
||||
if msgerrs:
|
||||
logger.addHandler(stderr)
|
||||
else:
|
||||
logger.addHandler(stdout)
|
||||
|
||||
for event in ui_queue[:]:
|
||||
if isinstance(event, logging.LogRecord):
|
||||
if event.levelno > logging.DEBUG:
|
||||
if event.levelno >= logging.WARNING:
|
||||
logger.addHandler(stderr)
|
||||
else:
|
||||
logger.addHandler(stdout)
|
||||
logger.handle(event)
|
||||
msgprint = True
|
||||
if msgprint:
|
||||
return
|
||||
|
||||
# Nope, so just print all of the messages we have (including debug messages)
|
||||
if not msgprint:
|
||||
for event in ui_queue[:]:
|
||||
if isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
if msgerrs:
|
||||
logger.removeHandler(stderr)
|
||||
else:
|
||||
logger.removeHandler(stdout)
|
||||
logger.addHandler(stdout)
|
||||
for event in ui_queue[:]:
|
||||
if isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
|
||||
def fire_ui_handlers(event, d):
|
||||
global _thread_lock
|
||||
@@ -226,12 +202,6 @@ def fire(event, d):
|
||||
if worker_fire:
|
||||
worker_fire(event, d)
|
||||
else:
|
||||
# If messages have been queued up, clear the queue
|
||||
global _uiready, ui_queue
|
||||
if _uiready and ui_queue:
|
||||
for queue_event in ui_queue:
|
||||
fire_ui_handlers(queue_event, d)
|
||||
ui_queue = []
|
||||
fire_ui_handlers(event, d)
|
||||
|
||||
def fire_from_worker(event, d):
|
||||
@@ -284,11 +254,6 @@ def register(name, handler, mask=None, filename=None, lineno=None):
|
||||
def remove(name, handler):
|
||||
"""Remove an Event handler"""
|
||||
_handlers.pop(name)
|
||||
if name in _catchall_handlers:
|
||||
_catchall_handlers.pop(name)
|
||||
for event in _event_handler_map.keys():
|
||||
if name in _event_handler_map[event]:
|
||||
_event_handler_map[event].pop(name)
|
||||
|
||||
def get_handlers():
|
||||
return _handlers
|
||||
@@ -302,28 +267,20 @@ def set_eventfilter(func):
|
||||
_eventfilter = func
|
||||
|
||||
def register_UIHhandler(handler, mainui=False):
|
||||
if mainui:
|
||||
global _uiready
|
||||
_uiready = True
|
||||
bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1
|
||||
_ui_handlers[_ui_handler_seq] = handler
|
||||
level, debug_domains = bb.msg.constructLogOptions()
|
||||
_ui_logfilters[_ui_handler_seq] = UIEventFilter(level, debug_domains)
|
||||
if mainui:
|
||||
global _uiready
|
||||
_uiready = _ui_handler_seq
|
||||
return _ui_handler_seq
|
||||
|
||||
def unregister_UIHhandler(handlerNum, mainui=False):
|
||||
if mainui:
|
||||
global _uiready
|
||||
_uiready = False
|
||||
def unregister_UIHhandler(handlerNum):
|
||||
if handlerNum in _ui_handlers:
|
||||
del _ui_handlers[handlerNum]
|
||||
return
|
||||
|
||||
def get_uihandler():
|
||||
if _uiready is False:
|
||||
return None
|
||||
return _uiready
|
||||
|
||||
# Class to allow filtering of events and specific filtering of LogRecords *before* we put them over the IPC
|
||||
class UIEventFilter(object):
|
||||
def __init__(self, level, debug_domains):
|
||||
@@ -386,30 +343,13 @@ class OperationProgress(Event):
|
||||
class ConfigParsed(Event):
|
||||
"""Configuration Parsing Complete"""
|
||||
|
||||
class MultiConfigParsed(Event):
|
||||
"""Multi-Config Parsing Complete"""
|
||||
def __init__(self, mcdata):
|
||||
self.mcdata = mcdata
|
||||
Event.__init__(self)
|
||||
|
||||
class RecipeEvent(Event):
|
||||
def __init__(self, fn):
|
||||
self.fn = fn
|
||||
Event.__init__(self)
|
||||
|
||||
class RecipePreFinalise(RecipeEvent):
|
||||
""" Recipe Parsing Complete but not yet finalised"""
|
||||
|
||||
class RecipeTaskPreProcess(RecipeEvent):
|
||||
"""
|
||||
Recipe Tasks about to be finalised
|
||||
The list of tasks should be final at this point and handlers
|
||||
are only able to change interdependencies
|
||||
"""
|
||||
def __init__(self, fn, tasklist):
|
||||
self.fn = fn
|
||||
self.tasklist = tasklist
|
||||
Event.__init__(self)
|
||||
""" Recipe Parsing Complete but not yet finialised"""
|
||||
|
||||
class RecipeParsed(RecipeEvent):
|
||||
""" Recipe Parsing Complete """
|
||||
@@ -432,7 +372,7 @@ class StampUpdate(Event):
|
||||
targets = property(getTargets)
|
||||
|
||||
class BuildBase(Event):
|
||||
"""Base class for bitbake build events"""
|
||||
"""Base class for bbmake run events"""
|
||||
|
||||
def __init__(self, n, p, failures = 0):
|
||||
self._name = n
|
||||
@@ -452,6 +392,12 @@ class BuildBase(Event):
|
||||
def setName(self, name):
|
||||
self._name = name
|
||||
|
||||
def getCfg(self):
|
||||
return self.data
|
||||
|
||||
def setCfg(self, cfg):
|
||||
self.data = cfg
|
||||
|
||||
def getFailures(self):
|
||||
"""
|
||||
Return the number of failed packages
|
||||
@@ -460,6 +406,9 @@ class BuildBase(Event):
|
||||
|
||||
pkgs = property(getPkgs, setPkgs, None, "pkgs property")
|
||||
name = property(getName, setName, None, "name property")
|
||||
cfg = property(getCfg, setCfg, None, "cfg property")
|
||||
|
||||
|
||||
|
||||
class BuildInit(BuildBase):
|
||||
"""buildFile or buildTargets was invoked"""
|
||||
@@ -468,13 +417,13 @@ class BuildInit(BuildBase):
|
||||
BuildBase.__init__(self, name, p)
|
||||
|
||||
class BuildStarted(BuildBase, OperationStarted):
|
||||
"""Event when builds start"""
|
||||
"""bbmake build run started"""
|
||||
def __init__(self, n, p, failures = 0):
|
||||
OperationStarted.__init__(self, "Building Started")
|
||||
BuildBase.__init__(self, n, p, failures)
|
||||
|
||||
class BuildCompleted(BuildBase, OperationCompleted):
|
||||
"""Event when builds have completed"""
|
||||
"""bbmake build run completed"""
|
||||
def __init__(self, total, n, p, failures=0, interrupted=0):
|
||||
if not failures:
|
||||
OperationCompleted.__init__(self, total, "Building Succeeded")
|
||||
@@ -492,23 +441,6 @@ class DiskFull(Event):
|
||||
self._free = freespace
|
||||
self._mountpoint = mountpoint
|
||||
|
||||
class DiskUsageSample:
|
||||
def __init__(self, available_bytes, free_bytes, total_bytes):
|
||||
# Number of bytes available to non-root processes.
|
||||
self.available_bytes = available_bytes
|
||||
# Number of bytes available to root processes.
|
||||
self.free_bytes = free_bytes
|
||||
# Total capacity of the volume.
|
||||
self.total_bytes = total_bytes
|
||||
|
||||
class MonitorDiskEvent(Event):
|
||||
"""If BB_DISKMON_DIRS is set, then this event gets triggered each time disk space is checked.
|
||||
Provides information about devices that are getting monitored."""
|
||||
def __init__(self, disk_usage):
|
||||
Event.__init__(self)
|
||||
# hash of device root path -> DiskUsageSample
|
||||
self.disk_usage = disk_usage
|
||||
|
||||
class NoProvider(Event):
|
||||
"""No Provider for an Event"""
|
||||
|
||||
@@ -526,28 +458,6 @@ class NoProvider(Event):
|
||||
def isRuntime(self):
|
||||
return self._runtime
|
||||
|
||||
def __str__(self):
|
||||
msg = ''
|
||||
if self._runtime:
|
||||
r = "R"
|
||||
else:
|
||||
r = ""
|
||||
|
||||
extra = ''
|
||||
if not self._reasons:
|
||||
if self._close_matches:
|
||||
extra = ". Close matches:\n %s" % '\n '.join(self._close_matches)
|
||||
|
||||
if self._dependees:
|
||||
msg = "Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)%s" % (r, self._item, ", ".join(self._dependees), r, extra)
|
||||
else:
|
||||
msg = "Nothing %sPROVIDES '%s'%s" % (r, self._item, extra)
|
||||
if self._reasons:
|
||||
for reason in self._reasons:
|
||||
msg += '\n' + reason
|
||||
return msg
|
||||
|
||||
|
||||
class MultipleProviders(Event):
|
||||
"""Multiple Providers"""
|
||||
|
||||
@@ -575,16 +485,6 @@ class MultipleProviders(Event):
|
||||
"""
|
||||
return self._candidates
|
||||
|
||||
def __str__(self):
|
||||
msg = "Multiple providers are available for %s%s (%s)" % (self._is_runtime and "runtime " or "",
|
||||
self._item,
|
||||
", ".join(self._candidates))
|
||||
rtime = ""
|
||||
if self._is_runtime:
|
||||
rtime = "R"
|
||||
msg += "\nConsider defining a PREFERRED_%sPROVIDER entry to match %s" % (rtime, self._item)
|
||||
return msg
|
||||
|
||||
class ParseStarted(OperationStarted):
|
||||
"""Recipe parsing for the runqueue has begun"""
|
||||
def __init__(self, total):
|
||||
@@ -678,6 +578,14 @@ class FilesMatchingFound(Event):
|
||||
self._pattern = pattern
|
||||
self._matches = matches
|
||||
|
||||
class CoreBaseFilesFound(Event):
|
||||
"""
|
||||
Event when a list of appropriate config files has been generated
|
||||
"""
|
||||
def __init__(self, paths):
|
||||
Event.__init__(self)
|
||||
self._paths = paths
|
||||
|
||||
class ConfigFilesFound(Event):
|
||||
"""
|
||||
Event when a list of appropriate config files has been generated
|
||||
@@ -748,6 +656,19 @@ class LogHandler(logging.Handler):
|
||||
record.taskpid = worker_pid
|
||||
return True
|
||||
|
||||
class RequestPackageInfo(Event):
|
||||
"""
|
||||
Event to request package information
|
||||
"""
|
||||
|
||||
class PackageInfo(Event):
|
||||
"""
|
||||
Package information for GUI
|
||||
"""
|
||||
def __init__(self, pkginfolist):
|
||||
Event.__init__(self)
|
||||
self._pkginfolist = pkginfolist
|
||||
|
||||
class MetadataEvent(Event):
|
||||
"""
|
||||
Generic event that target for OE-Core classes
|
||||
@@ -825,10 +746,3 @@ class NetworkTestFailed(Event):
|
||||
Event to indicate network test has failed
|
||||
"""
|
||||
|
||||
class FindSigInfoResult(Event):
|
||||
"""
|
||||
Event to return results from findSigInfo command
|
||||
"""
|
||||
def __init__(self, result):
|
||||
Event.__init__(self)
|
||||
self.result = result
|
||||
|
||||
@@ -35,11 +35,10 @@ import operator
|
||||
import collections
|
||||
import subprocess
|
||||
import pickle
|
||||
import errno
|
||||
import bb.persist_data, bb.utils
|
||||
import bb.checksum
|
||||
from bb import data
|
||||
import bb.process
|
||||
import bb.event
|
||||
|
||||
__version__ = "2"
|
||||
_checksum_cache = bb.checksum.FileChecksumCache()
|
||||
@@ -49,11 +48,11 @@ logger = logging.getLogger("BitBake.Fetcher")
|
||||
class BBFetchException(Exception):
|
||||
"""Class all fetch exceptions inherit from"""
|
||||
def __init__(self, message):
|
||||
self.msg = message
|
||||
Exception.__init__(self, message)
|
||||
self.msg = message
|
||||
Exception.__init__(self, message)
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
return self.msg
|
||||
|
||||
class UntrustedUrl(BBFetchException):
|
||||
"""Exception raised when encountering a host not listed in BB_ALLOWED_NETWORKS"""
|
||||
@@ -69,24 +68,24 @@ class UntrustedUrl(BBFetchException):
|
||||
class MalformedUrl(BBFetchException):
|
||||
"""Exception raised when encountering an invalid url"""
|
||||
def __init__(self, url, message=''):
|
||||
if message:
|
||||
msg = message
|
||||
else:
|
||||
msg = "The URL: '%s' is invalid and cannot be interpreted" % url
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (url,)
|
||||
if message:
|
||||
msg = message
|
||||
else:
|
||||
msg = "The URL: '%s' is invalid and cannot be interpreted" % url
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (url,)
|
||||
|
||||
class FetchError(BBFetchException):
|
||||
"""General fetcher exception when something happens incorrectly"""
|
||||
def __init__(self, message, url = None):
|
||||
if url:
|
||||
if url:
|
||||
msg = "Fetcher failure for URL: '%s'. %s" % (url, message)
|
||||
else:
|
||||
else:
|
||||
msg = "Fetcher failure: %s" % message
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (message, url)
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (message, url)
|
||||
|
||||
class ChecksumError(FetchError):
|
||||
"""Exception when mismatched checksum encountered"""
|
||||
@@ -100,56 +99,49 @@ class NoChecksumError(FetchError):
|
||||
class UnpackError(BBFetchException):
|
||||
"""General fetcher exception when something happens incorrectly when unpacking"""
|
||||
def __init__(self, message, url):
|
||||
msg = "Unpack failure for URL: '%s'. %s" % (url, message)
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (message, url)
|
||||
msg = "Unpack failure for URL: '%s'. %s" % (url, message)
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (message, url)
|
||||
|
||||
class NoMethodError(BBFetchException):
|
||||
"""Exception raised when there is no method to obtain a supplied url or set of urls"""
|
||||
def __init__(self, url):
|
||||
msg = "Could not find a fetcher which supports the URL: '%s'" % url
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (url,)
|
||||
msg = "Could not find a fetcher which supports the URL: '%s'" % url
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (url,)
|
||||
|
||||
class MissingParameterError(BBFetchException):
|
||||
"""Exception raised when a fetch method is missing a critical parameter in the url"""
|
||||
def __init__(self, missing, url):
|
||||
msg = "URL: '%s' is missing the required parameter '%s'" % (url, missing)
|
||||
self.url = url
|
||||
self.missing = missing
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (missing, url)
|
||||
msg = "URL: '%s' is missing the required parameter '%s'" % (url, missing)
|
||||
self.url = url
|
||||
self.missing = missing
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (missing, url)
|
||||
|
||||
class ParameterError(BBFetchException):
|
||||
"""Exception raised when a url cannot be proccessed due to invalid parameters."""
|
||||
def __init__(self, message, url):
|
||||
msg = "URL: '%s' has invalid parameters. %s" % (url, message)
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (message, url)
|
||||
msg = "URL: '%s' has invalid parameters. %s" % (url, message)
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (message, url)
|
||||
|
||||
class NetworkAccess(BBFetchException):
|
||||
"""Exception raised when network access is disabled but it is required."""
|
||||
def __init__(self, url, cmd):
|
||||
msg = "Network access disabled through BB_NO_NETWORK (or set indirectly due to use of BB_FETCH_PREMIRRORONLY) but access requested with command %s (for url %s)" % (cmd, url)
|
||||
self.url = url
|
||||
self.cmd = cmd
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (url, cmd)
|
||||
msg = "Network access disabled through BB_NO_NETWORK (or set indirectly due to use of BB_FETCH_PREMIRRORONLY) but access requested with command %s (for url %s)" % (cmd, url)
|
||||
self.url = url
|
||||
self.cmd = cmd
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (url, cmd)
|
||||
|
||||
class NonLocalMethod(Exception):
|
||||
def __init__(self):
|
||||
Exception.__init__(self)
|
||||
|
||||
class MissingChecksumEvent(bb.event.Event):
|
||||
def __init__(self, url, md5sum, sha256sum):
|
||||
self.url = url
|
||||
self.checksums = {'md5sum': md5sum,
|
||||
'sha256sum': sha256sum}
|
||||
bb.event.Event.__init__(self)
|
||||
|
||||
|
||||
class URI(object):
|
||||
"""
|
||||
@@ -256,7 +248,7 @@ class URI(object):
|
||||
|
||||
# Identify if the URI is relative or not
|
||||
if urlp.scheme in self._relative_schemes and \
|
||||
re.compile(r"^\w+:(?!//)").match(uri):
|
||||
re.compile("^\w+:(?!//)").match(uri):
|
||||
self.relative = True
|
||||
|
||||
if not self.relative:
|
||||
@@ -363,7 +355,7 @@ def decodeurl(url):
|
||||
user, password, parameters).
|
||||
"""
|
||||
|
||||
m = re.compile('(?P<type>[^:]*)://((?P<user>[^/;]+)@)?(?P<location>[^;]+)(;(?P<parm>.*))?').match(url)
|
||||
m = re.compile('(?P<type>[^:]*)://((?P<user>[^/]+)@)?(?P<location>[^;]+)(;(?P<parm>.*))?').match(url)
|
||||
if not m:
|
||||
raise MalformedUrl(url)
|
||||
|
||||
@@ -383,7 +375,7 @@ def decodeurl(url):
|
||||
path = location
|
||||
else:
|
||||
host = location
|
||||
path = "/"
|
||||
path = ""
|
||||
if user:
|
||||
m = re.compile('(?P<user>[^:]+)(:?(?P<pswd>.*))').match(user)
|
||||
if m:
|
||||
@@ -411,6 +403,8 @@ def encodeurl(decoded):
|
||||
|
||||
type, host, path, user, pswd, p = decoded
|
||||
|
||||
if not path:
|
||||
raise MissingParameterError('path', "encoded from the data %s" % str(decoded))
|
||||
if not type:
|
||||
raise MissingParameterError('type', "encoded from the data %s" % str(decoded))
|
||||
url = '%s://' % type
|
||||
@@ -421,18 +415,17 @@ def encodeurl(decoded):
|
||||
url += "@"
|
||||
if host and type != "file":
|
||||
url += "%s" % host
|
||||
if path:
|
||||
# Standardise path to ensure comparisons work
|
||||
while '//' in path:
|
||||
path = path.replace("//", "/")
|
||||
url += "%s" % urllib.parse.quote(path)
|
||||
# Standardise path to ensure comparisons work
|
||||
while '//' in path:
|
||||
path = path.replace("//", "/")
|
||||
url += "%s" % urllib.parse.quote(path)
|
||||
if p:
|
||||
for parm in p:
|
||||
url += ";%s=%s" % (parm, p[parm])
|
||||
|
||||
return url
|
||||
|
||||
def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
def uri_replace(ud, uri_find, uri_replace, replacements, d):
|
||||
if not ud.url or not uri_find or not uri_replace:
|
||||
logger.error("uri_replace: passed an undefined value, not replacing")
|
||||
return None
|
||||
@@ -452,8 +445,8 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
# Handle URL parameters
|
||||
if i:
|
||||
# Any specified URL parameters must match
|
||||
for k in uri_find_decoded[loc]:
|
||||
if uri_decoded[loc][k] != uri_find_decoded[loc][k]:
|
||||
for k in uri_replace_decoded[loc]:
|
||||
if uri_decoded[loc][k] != uri_replace_decoded[loc][k]:
|
||||
return None
|
||||
# Overwrite any specified replacement parameters
|
||||
for k in uri_replace_decoded[loc]:
|
||||
@@ -462,7 +455,7 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
result_decoded[loc][k] = uri_replace_decoded[loc][k]
|
||||
elif (re.match(regexp, uri_decoded[loc])):
|
||||
if not uri_replace_decoded[loc]:
|
||||
result_decoded[loc] = ""
|
||||
result_decoded[loc] = ""
|
||||
else:
|
||||
for k in replacements:
|
||||
uri_replace_decoded[loc] = uri_replace_decoded[loc].replace(k, replacements[k])
|
||||
@@ -471,9 +464,9 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
if loc == 2:
|
||||
# Handle path manipulations
|
||||
basename = None
|
||||
if uri_decoded[0] != uri_replace_decoded[0] and mirrortarball:
|
||||
if uri_decoded[0] != uri_replace_decoded[0] and ud.mirrortarball:
|
||||
# If the source and destination url types differ, must be a mirrortarball mapping
|
||||
basename = os.path.basename(mirrortarball)
|
||||
basename = os.path.basename(ud.mirrortarball)
|
||||
# Kill parameters, they make no sense for mirror tarballs
|
||||
uri_decoded[5] = {}
|
||||
elif ud.localpath and ud.method.supports_checksum(ud):
|
||||
@@ -498,7 +491,7 @@ def fetcher_init(d):
|
||||
Calls before this must not hit the cache.
|
||||
"""
|
||||
# When to drop SCM head revisions controlled by user policy
|
||||
srcrev_policy = d.getVar('BB_SRCREV_POLICY') or "clear"
|
||||
srcrev_policy = d.getVar('BB_SRCREV_POLICY', True) or "clear"
|
||||
if srcrev_policy == "cache":
|
||||
logger.debug(1, "Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
elif srcrev_policy == "clear":
|
||||
@@ -544,11 +537,7 @@ def fetcher_compare_revisions():
|
||||
return False
|
||||
|
||||
def mirror_from_string(data):
|
||||
mirrors = (data or "").replace('\\n',' ').split()
|
||||
# Split into pairs
|
||||
if len(mirrors) % 2 != 0:
|
||||
bb.warn('Invalid mirror data %s, should have paired members.' % data)
|
||||
return list(zip(*[iter(mirrors)]*2))
|
||||
return [ i.split() for i in (data or "").replace('\\n','\n').split('\n') if i ]
|
||||
|
||||
def verify_checksum(ud, d, precomputed={}):
|
||||
"""
|
||||
@@ -583,7 +572,7 @@ def verify_checksum(ud, d, precomputed={}):
|
||||
|
||||
if ud.method.recommends_checksum(ud) and not ud.md5_expected and not ud.sha256_expected:
|
||||
# If strict checking enabled and neither sum defined, raise error
|
||||
strict = d.getVar("BB_STRICT_CHECKSUM") or "0"
|
||||
strict = d.getVar("BB_STRICT_CHECKSUM", True) or "0"
|
||||
if strict == "1":
|
||||
logger.error('No checksum specified for %s, please add at least one to the recipe:\n'
|
||||
'SRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"' %
|
||||
@@ -591,14 +580,6 @@ def verify_checksum(ud, d, precomputed={}):
|
||||
ud.sha256_name, sha256data))
|
||||
raise NoChecksumError('Missing SRC_URI checksum', ud.url)
|
||||
|
||||
bb.event.fire(MissingChecksumEvent(ud.url, md5data, sha256data), d)
|
||||
|
||||
if strict == "ignore":
|
||||
return {
|
||||
_MD5_KEY: md5data,
|
||||
_SHA256_KEY: sha256data
|
||||
}
|
||||
|
||||
# Log missing sums so user can more easily add them
|
||||
logger.warning('Missing md5 SRC_URI checksum for %s, consider adding to the recipe:\n'
|
||||
'SRC_URI[%s] = "%s"',
|
||||
@@ -640,28 +621,29 @@ def verify_donestamp(ud, d, origud=None):
|
||||
Returns True, if the donestamp exists and is valid, False otherwise. When
|
||||
returning False, any existing done stamps are removed.
|
||||
"""
|
||||
if not ud.needdonestamp or (origud and not origud.needdonestamp):
|
||||
if not ud.needdonestamp:
|
||||
return True
|
||||
|
||||
if not os.path.exists(ud.localpath):
|
||||
# local path does not exist
|
||||
if os.path.exists(ud.donestamp):
|
||||
# done stamp exists, but the downloaded file does not; the done stamp
|
||||
# must be incorrect, re-trigger the download
|
||||
bb.utils.remove(ud.donestamp)
|
||||
if not os.path.exists(ud.donestamp):
|
||||
return False
|
||||
|
||||
if (not ud.method.supports_checksum(ud) or
|
||||
(origud and not origud.method.supports_checksum(origud))):
|
||||
# if done stamp exists and checksums not supported; assume the local
|
||||
# file is current
|
||||
return os.path.exists(ud.donestamp)
|
||||
# done stamp exists, checksums not supported; assume the local file is
|
||||
# current
|
||||
return True
|
||||
|
||||
if not os.path.exists(ud.localpath):
|
||||
# done stamp exists, but the downloaded file does not; the done stamp
|
||||
# must be incorrect, re-trigger the download
|
||||
bb.utils.remove(ud.donestamp)
|
||||
return False
|
||||
|
||||
precomputed_checksums = {}
|
||||
# Only re-use the precomputed checksums if the donestamp is newer than the
|
||||
# file. Do not rely on the mtime of directories, though. If ud.localpath is
|
||||
# a directory, there will probably not be any checksums anyway.
|
||||
if os.path.exists(ud.donestamp) and (os.path.isdir(ud.localpath) or
|
||||
if (os.path.isdir(ud.localpath) or
|
||||
os.path.getmtime(ud.localpath) < os.path.getmtime(ud.donestamp)):
|
||||
try:
|
||||
with open(ud.donestamp, "rb") as cachefile:
|
||||
@@ -736,18 +718,18 @@ def subprocess_setup():
|
||||
|
||||
def get_autorev(d):
|
||||
# only not cache src rev in autorev case
|
||||
if d.getVar('BB_SRCREV_POLICY') != "cache":
|
||||
if d.getVar('BB_SRCREV_POLICY', True) != "cache":
|
||||
d.setVar('BB_DONT_CACHE', '1')
|
||||
return "AUTOINC"
|
||||
|
||||
def get_srcrev(d, method_name='sortable_revision'):
|
||||
"""
|
||||
Return the revision string, usually for use in the version string (PV) of the current package
|
||||
Return the revsion string, usually for use in the version string (PV) of the current package
|
||||
Most packages usually only have one SCM so we just pass on the call.
|
||||
In the multi SCM case, we build a value based on SRCREV_FORMAT which must
|
||||
have been set.
|
||||
|
||||
The idea here is that we put the string "AUTOINC+" into return value if the revisions are not
|
||||
The idea here is that we put the string "AUTOINC+" into return value if the revisions are not
|
||||
incremental, other code is then responsible for turning that into an increasing value (if needed)
|
||||
|
||||
A method_name can be supplied to retrieve an alternatively formatted revision from a fetcher, if
|
||||
@@ -755,7 +737,7 @@ def get_srcrev(d, method_name='sortable_revision'):
|
||||
"""
|
||||
|
||||
scms = []
|
||||
fetcher = Fetch(d.getVar('SRC_URI').split(), d)
|
||||
fetcher = Fetch(d.getVar('SRC_URI', True).split(), d)
|
||||
urldata = fetcher.ud
|
||||
for u in urldata:
|
||||
if urldata[u].method.supports_srcrev():
|
||||
@@ -775,7 +757,7 @@ def get_srcrev(d, method_name='sortable_revision'):
|
||||
#
|
||||
# Mutiple SCMs are in SRC_URI so we resort to SRCREV_FORMAT
|
||||
#
|
||||
format = d.getVar('SRCREV_FORMAT')
|
||||
format = d.getVar('SRCREV_FORMAT', True)
|
||||
if not format:
|
||||
raise FetchError("The SRCREV_FORMAT variable must be set when multiple SCMs are used.")
|
||||
|
||||
@@ -799,7 +781,7 @@ def get_srcrev(d, method_name='sortable_revision'):
|
||||
format = re.sub(name_to_rev_re, lambda match: name_to_rev[match.group(0)], format)
|
||||
|
||||
if seenautoinc:
|
||||
format = "AUTOINC+" + format
|
||||
format = "AUTOINC+" + format
|
||||
|
||||
return format
|
||||
|
||||
@@ -827,7 +809,6 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
'NO_PROXY', 'no_proxy',
|
||||
'ALL_PROXY', 'all_proxy',
|
||||
'GIT_PROXY_COMMAND',
|
||||
'GIT_SSH',
|
||||
'GIT_SSL_CAINFO',
|
||||
'GIT_SMART_HTTP',
|
||||
'SSH_AUTH_SOCK', 'SSH_AGENT_PID',
|
||||
@@ -838,26 +819,12 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
if not cleanup:
|
||||
cleanup = []
|
||||
|
||||
# If PATH contains WORKDIR which contains PV-PR which contains SRCPV we
|
||||
# can end up in circular recursion here so give the option of breaking it
|
||||
# in a data store copy.
|
||||
try:
|
||||
d.getVar("PV")
|
||||
d.getVar("PR")
|
||||
except bb.data_smart.ExpansionError:
|
||||
d = bb.data.createCopy(d)
|
||||
d.setVar("PV", "fetcheravoidrecurse")
|
||||
d.setVar("PR", "fetcheravoidrecurse")
|
||||
|
||||
origenv = d.getVar("BB_ORIGENV", False)
|
||||
for var in exportvars:
|
||||
val = d.getVar(var) or (origenv and origenv.getVar(var))
|
||||
val = d.getVar(var, True) or (origenv and origenv.getVar(var, True))
|
||||
if val:
|
||||
cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd)
|
||||
|
||||
# Disable pseudo as it may affect ssh, potentially causing it to hang.
|
||||
cmd = 'export PSEUDO_DISABLED=1; ' + cmd
|
||||
|
||||
logger.debug(1, "Running %s", cmd)
|
||||
|
||||
success = False
|
||||
@@ -889,15 +856,12 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
|
||||
return output
|
||||
|
||||
def check_network_access(d, info, url):
|
||||
def check_network_access(d, info = "", url = None):
|
||||
"""
|
||||
log remote network access, and error if BB_NO_NETWORK is set or the given
|
||||
URI is untrusted
|
||||
log remote network access, and error if BB_NO_NETWORK is set
|
||||
"""
|
||||
if d.getVar("BB_NO_NETWORK") == "1":
|
||||
if d.getVar("BB_NO_NETWORK", True) == "1":
|
||||
raise NetworkAccess(url, info)
|
||||
elif not trusted_network(d, url):
|
||||
raise UntrustedUrl(url, info)
|
||||
else:
|
||||
logger.debug(1, "Fetcher accessed the network with the command %s" % info)
|
||||
|
||||
@@ -912,47 +876,45 @@ def build_mirroruris(origud, mirrors, ld):
|
||||
replacements["BASENAME"] = origud.path.split("/")[-1]
|
||||
replacements["MIRRORNAME"] = origud.host.replace(':','.') + origud.path.replace('/', '.').replace('*', '.')
|
||||
|
||||
def adduri(ud, uris, uds, mirrors, tarballs):
|
||||
def adduri(ud, uris, uds, mirrors):
|
||||
for line in mirrors:
|
||||
try:
|
||||
(find, replace) = line
|
||||
except ValueError:
|
||||
continue
|
||||
newuri = uri_replace(ud, find, replace, replacements, ld)
|
||||
if not newuri or newuri in uris or newuri == origud.url:
|
||||
continue
|
||||
|
||||
for tarball in tarballs:
|
||||
newuri = uri_replace(ud, find, replace, replacements, ld, tarball)
|
||||
if not newuri or newuri in uris or newuri == origud.url:
|
||||
continue
|
||||
if not trusted_network(ld, newuri):
|
||||
logger.debug(1, "Mirror %s not in the list of trusted networks, skipping" % (newuri))
|
||||
continue
|
||||
|
||||
if not trusted_network(ld, newuri):
|
||||
logger.debug(1, "Mirror %s not in the list of trusted networks, skipping" % (newuri))
|
||||
continue
|
||||
|
||||
# Create a local copy of the mirrors minus the current line
|
||||
# this will prevent us from recursively processing the same line
|
||||
# as well as indirect recursion A -> B -> C -> A
|
||||
localmirrors = list(mirrors)
|
||||
localmirrors.remove(line)
|
||||
# Create a local copy of the mirrors minus the current line
|
||||
# this will prevent us from recursively processing the same line
|
||||
# as well as indirect recursion A -> B -> C -> A
|
||||
localmirrors = list(mirrors)
|
||||
localmirrors.remove(line)
|
||||
|
||||
try:
|
||||
newud = FetchData(newuri, ld)
|
||||
newud.setup_localpath(ld)
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url))
|
||||
logger.debug(1, str(e))
|
||||
try:
|
||||
newud = FetchData(newuri, ld)
|
||||
newud.setup_localpath(ld)
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url))
|
||||
logger.debug(1, str(e))
|
||||
try:
|
||||
# setup_localpath of file:// urls may fail, we should still see
|
||||
# if mirrors of the url exist
|
||||
adduri(newud, uris, uds, localmirrors, tarballs)
|
||||
except UnboundLocalError:
|
||||
pass
|
||||
continue
|
||||
uris.append(newuri)
|
||||
uds.append(newud)
|
||||
# setup_localpath of file:// urls may fail, we should still see
|
||||
# if mirrors of the url exist
|
||||
adduri(newud, uris, uds, localmirrors)
|
||||
except UnboundLocalError:
|
||||
pass
|
||||
continue
|
||||
uris.append(newuri)
|
||||
uds.append(newud)
|
||||
|
||||
adduri(newud, uris, uds, localmirrors, tarballs)
|
||||
adduri(newud, uris, uds, localmirrors)
|
||||
|
||||
adduri(origud, uris, uds, mirrors, origud.mirrortarballs or [None])
|
||||
adduri(origud, uris, uds, mirrors)
|
||||
|
||||
return uris, uds
|
||||
|
||||
@@ -996,42 +958,34 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
|
||||
|
||||
# We may be obtaining a mirror tarball which needs further processing by the real fetcher
|
||||
# If that tarball is a local file:// we need to provide a symlink to it
|
||||
dldir = ld.getVar("DL_DIR")
|
||||
|
||||
if origud.mirrortarballs and os.path.basename(ud.localpath) in origud.mirrortarballs and os.path.basename(ud.localpath) != os.path.basename(origud.localpath):
|
||||
dldir = ld.getVar("DL_DIR", True)
|
||||
if origud.mirrortarball and os.path.basename(ud.localpath) == os.path.basename(origud.mirrortarball) \
|
||||
and os.path.basename(ud.localpath) != os.path.basename(origud.localpath):
|
||||
# Create donestamp in old format to avoid triggering a re-download
|
||||
if ud.donestamp:
|
||||
bb.utils.mkdirhier(os.path.dirname(ud.donestamp))
|
||||
open(ud.donestamp, 'w').close()
|
||||
dest = os.path.join(dldir, os.path.basename(ud.localpath))
|
||||
if not os.path.exists(dest):
|
||||
# In case this is executing without any file locks held (as is
|
||||
# the case for file:// URLs), two tasks may end up here at the
|
||||
# same time, in which case we do not want the second task to
|
||||
# fail when the link has already been created by the first task.
|
||||
try:
|
||||
os.symlink(ud.localpath, dest)
|
||||
except FileExistsError:
|
||||
pass
|
||||
os.symlink(ud.localpath, dest)
|
||||
if not verify_donestamp(origud, ld) or origud.method.need_update(origud, ld):
|
||||
origud.method.download(origud, ld)
|
||||
if hasattr(origud.method, "build_mirror_data"):
|
||||
if hasattr(origud.method,"build_mirror_data"):
|
||||
origud.method.build_mirror_data(origud, ld)
|
||||
return origud.localpath
|
||||
# Otherwise the result is a local file:// and we symlink to it
|
||||
ensure_symlink(ud.localpath, origud.localpath)
|
||||
if not os.path.exists(origud.localpath):
|
||||
if os.path.islink(origud.localpath):
|
||||
# Broken symbolic link
|
||||
os.unlink(origud.localpath)
|
||||
|
||||
os.symlink(ud.localpath, origud.localpath)
|
||||
update_stamp(origud, ld)
|
||||
return ud.localpath
|
||||
|
||||
except bb.fetch2.NetworkAccess:
|
||||
raise
|
||||
|
||||
except IOError as e:
|
||||
if e.errno in [os.errno.ESTALE]:
|
||||
logger.warning("Stale Error Observed %s." % ud.url)
|
||||
return False
|
||||
raise
|
||||
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
if isinstance(e, ChecksumError):
|
||||
logger.warning("Mirror checksum failure for url %s (original url: %s)\nCleaning and trying again." % (ud.url, origud.url))
|
||||
@@ -1053,22 +1007,6 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
|
||||
bb.utils.unlockfile(lf)
|
||||
|
||||
|
||||
def ensure_symlink(target, link_name):
|
||||
if not os.path.exists(link_name):
|
||||
if os.path.islink(link_name):
|
||||
# Broken symbolic link
|
||||
os.unlink(link_name)
|
||||
|
||||
# In case this is executing without any file locks held (as is
|
||||
# the case for file:// URLs), two tasks may end up here at the
|
||||
# same time, in which case we do not want the second task to
|
||||
# fail when the link has already been created by the first task.
|
||||
try:
|
||||
os.symlink(target, link_name)
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
|
||||
def try_mirrors(fetch, d, origud, mirrors, check = False):
|
||||
"""
|
||||
Try to use a mirrored version of the sources.
|
||||
@@ -1094,16 +1032,14 @@ def trusted_network(d, url):
|
||||
BB_ALLOWED_NETWORKS is set globally or for a specific recipe.
|
||||
Note: modifies SRC_URI & mirrors.
|
||||
"""
|
||||
if d.getVar('BB_NO_NETWORK') == "1":
|
||||
if d.getVar('BB_NO_NETWORK', True) == "1":
|
||||
return True
|
||||
|
||||
pkgname = d.expand(d.getVar('PN', False))
|
||||
trusted_hosts = None
|
||||
if pkgname:
|
||||
trusted_hosts = d.getVarFlag('BB_ALLOWED_NETWORKS', pkgname, False)
|
||||
trusted_hosts = d.getVarFlag('BB_ALLOWED_NETWORKS', pkgname, False)
|
||||
|
||||
if not trusted_hosts:
|
||||
trusted_hosts = d.getVar('BB_ALLOWED_NETWORKS')
|
||||
trusted_hosts = d.getVar('BB_ALLOWED_NETWORKS', True)
|
||||
|
||||
# Not enabled.
|
||||
if not trusted_hosts:
|
||||
@@ -1135,7 +1071,7 @@ def srcrev_internal_helper(ud, d, name):
|
||||
"""
|
||||
|
||||
srcrev = None
|
||||
pn = d.getVar("PN")
|
||||
pn = d.getVar("PN", True)
|
||||
attempts = []
|
||||
if name != '' and pn:
|
||||
attempts.append("SRCREV_%s_pn-%s" % (name, pn))
|
||||
@@ -1146,7 +1082,7 @@ def srcrev_internal_helper(ud, d, name):
|
||||
attempts.append("SRCREV")
|
||||
|
||||
for a in attempts:
|
||||
srcrev = d.getVar(a)
|
||||
srcrev = d.getVar(a, True)
|
||||
if srcrev and srcrev != "INVALID":
|
||||
break
|
||||
|
||||
@@ -1161,7 +1097,7 @@ def srcrev_internal_helper(ud, d, name):
|
||||
if srcrev == "INVALID" or not srcrev:
|
||||
return parmrev
|
||||
if srcrev != parmrev:
|
||||
raise FetchError("Conflicting revisions (%s from SRCREV and %s from the url) found, please specify one valid value" % (srcrev, parmrev))
|
||||
raise FetchError("Conflicting revisions (%s from SRCREV and %s from the url) found, please spcify one valid value" % (srcrev, parmrev))
|
||||
return parmrev
|
||||
|
||||
if srcrev == "INVALID" or not srcrev:
|
||||
@@ -1179,7 +1115,7 @@ def get_checksum_file_list(d):
|
||||
"""
|
||||
fetch = Fetch([], d, cache = False, localonly = True)
|
||||
|
||||
dl_dir = d.getVar('DL_DIR')
|
||||
dl_dir = d.getVar('DL_DIR', True)
|
||||
filelist = []
|
||||
for u in fetch.urls:
|
||||
ud = fetch.ud[u]
|
||||
@@ -1193,9 +1129,9 @@ def get_checksum_file_list(d):
|
||||
if f.startswith(dl_dir):
|
||||
# The local fetcher's behaviour is to return a path under DL_DIR if it couldn't find the file anywhere else
|
||||
if os.path.exists(f):
|
||||
bb.warn("Getting checksum for %s SRC_URI entry %s: file not found except in DL_DIR" % (d.getVar('PN'), os.path.basename(f)))
|
||||
bb.warn("Getting checksum for %s SRC_URI entry %s: file not found except in DL_DIR" % (d.getVar('PN', True), os.path.basename(f)))
|
||||
else:
|
||||
bb.warn("Unable to get checksum for %s SRC_URI entry %s: file could not be found" % (d.getVar('PN'), os.path.basename(f)))
|
||||
bb.warn("Unable to get checksum for %s SRC_URI entry %s: file could not be found" % (d.getVar('PN', True), os.path.basename(f)))
|
||||
filelist.append(f + ":" + str(os.path.exists(f)))
|
||||
|
||||
return " ".join(filelist)
|
||||
@@ -1221,10 +1157,10 @@ class FetchData(object):
|
||||
self.localfile = ""
|
||||
self.localpath = None
|
||||
self.lockfile = None
|
||||
self.mirrortarballs = []
|
||||
self.mirrortarball = None
|
||||
self.basename = None
|
||||
self.basepath = None
|
||||
(self.type, self.host, self.path, self.user, self.pswd, self.parm) = decodeurl(d.expand(url))
|
||||
(self.type, self.host, self.path, self.user, self.pswd, self.parm) = decodeurl(data.expand(url, d))
|
||||
self.date = self.getSRCDate(d)
|
||||
self.url = url
|
||||
if not self.user and "user" in self.parm:
|
||||
@@ -1241,16 +1177,16 @@ class FetchData(object):
|
||||
self.sha256_name = "sha256sum"
|
||||
if self.md5_name in self.parm:
|
||||
self.md5_expected = self.parm[self.md5_name]
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]:
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp"]:
|
||||
self.md5_expected = None
|
||||
else:
|
||||
self.md5_expected = d.getVarFlag("SRC_URI", self.md5_name)
|
||||
self.md5_expected = d.getVarFlag("SRC_URI", self.md5_name, True)
|
||||
if self.sha256_name in self.parm:
|
||||
self.sha256_expected = self.parm[self.sha256_name]
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]:
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp"]:
|
||||
self.sha256_expected = None
|
||||
else:
|
||||
self.sha256_expected = d.getVarFlag("SRC_URI", self.sha256_name)
|
||||
self.sha256_expected = d.getVarFlag("SRC_URI", self.sha256_name, True)
|
||||
self.ignore_checksums = False
|
||||
|
||||
self.names = self.parm.get("name",'default').split(',')
|
||||
@@ -1259,7 +1195,7 @@ class FetchData(object):
|
||||
for m in methods:
|
||||
if m.supports(self, d):
|
||||
self.method = m
|
||||
break
|
||||
break
|
||||
|
||||
if not self.method:
|
||||
raise NoMethodError(url)
|
||||
@@ -1268,7 +1204,7 @@ class FetchData(object):
|
||||
raise NonLocalMethod()
|
||||
|
||||
if self.parm.get("proto", None) and "protocol" not in self.parm:
|
||||
logger.warning('Consider updating %s recipe to use "protocol" not "proto" in SRC_URI.', d.getVar('PN'))
|
||||
logger.warning('Consider updating %s recipe to use "protocol" not "proto" in SRC_URI.', d.getVar('PN', True))
|
||||
self.parm["protocol"] = self.parm.get("proto", None)
|
||||
|
||||
if hasattr(self.method, "urldata_init"):
|
||||
@@ -1281,7 +1217,7 @@ class FetchData(object):
|
||||
elif self.localfile:
|
||||
self.localpath = self.method.localpath(self, d)
|
||||
|
||||
dldir = d.getVar("DL_DIR")
|
||||
dldir = d.getVar("DL_DIR", True)
|
||||
|
||||
if not self.needdonestamp:
|
||||
return
|
||||
@@ -1294,12 +1230,12 @@ class FetchData(object):
|
||||
elif self.basepath or self.basename:
|
||||
basepath = dldir + os.sep + (self.basepath or self.basename)
|
||||
else:
|
||||
bb.fatal("Can't determine lock path for url %s" % url)
|
||||
bb.fatal("Can't determine lock path for url %s" % url)
|
||||
|
||||
self.donestamp = basepath + '.done'
|
||||
self.lockfile = basepath + '.lock'
|
||||
|
||||
def setup_revisions(self, d):
|
||||
def setup_revisons(self, d):
|
||||
self.revisions = {}
|
||||
for name in self.names:
|
||||
self.revisions[name] = srcrev_internal_helper(self, d, name)
|
||||
@@ -1321,12 +1257,12 @@ class FetchData(object):
|
||||
if "srcdate" in self.parm:
|
||||
return self.parm['srcdate']
|
||||
|
||||
pn = d.getVar("PN")
|
||||
pn = d.getVar("PN", True)
|
||||
|
||||
if pn:
|
||||
return d.getVar("SRCDATE_%s" % pn) or d.getVar("SRCDATE") or d.getVar("DATE")
|
||||
return d.getVar("SRCDATE_%s" % pn, True) or d.getVar("SRCDATE", True) or d.getVar("DATE", True)
|
||||
|
||||
return d.getVar("SRCDATE") or d.getVar("DATE")
|
||||
return d.getVar("SRCDATE", True) or d.getVar("DATE", True)
|
||||
|
||||
class FetchMethod(object):
|
||||
"""Base class for 'fetch'ing data"""
|
||||
@@ -1346,7 +1282,7 @@ class FetchMethod(object):
|
||||
Can also setup variables in urldata for use in go (saving code duplication
|
||||
and duplicate code execution)
|
||||
"""
|
||||
return os.path.join(d.getVar("DL_DIR"), urldata.localfile)
|
||||
return os.path.join(data.getVar("DL_DIR", d, True), urldata.localfile)
|
||||
|
||||
def supports_checksum(self, urldata):
|
||||
"""
|
||||
@@ -1357,13 +1293,13 @@ class FetchMethod(object):
|
||||
if os.path.isdir(urldata.localpath) == True:
|
||||
return False
|
||||
if urldata.localpath.find("*") != -1:
|
||||
return False
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def recommends_checksum(self, urldata):
|
||||
"""
|
||||
Is the backend on where checksumming is recommended (should warnings
|
||||
Is the backend on where checksumming is recommended (should warnings
|
||||
be displayed if there is no checksum)?
|
||||
"""
|
||||
return False
|
||||
@@ -1438,7 +1374,7 @@ class FetchMethod(object):
|
||||
cmd = 'gzip -dc %s > %s' % (file, efile)
|
||||
elif file.endswith('.bz2'):
|
||||
cmd = 'bzip2 -dc %s > %s' % (file, efile)
|
||||
elif file.endswith('.txz') or file.endswith('.tar.xz'):
|
||||
elif file.endswith('.tar.xz'):
|
||||
cmd = 'xz -dc %s | tar x --no-same-owner -f -' % file
|
||||
elif file.endswith('.xz'):
|
||||
cmd = 'xz -dc %s > %s' % (file, efile)
|
||||
@@ -1446,10 +1382,6 @@ class FetchMethod(object):
|
||||
cmd = 'lzip -dc %s | tar x --no-same-owner -f -' % file
|
||||
elif file.endswith('.lz'):
|
||||
cmd = 'lzip -dc %s > %s' % (file, efile)
|
||||
elif file.endswith('.tar.7z'):
|
||||
cmd = '7z x -so %s | tar x --no-same-owner -f -' % file
|
||||
elif file.endswith('.7z'):
|
||||
cmd = '7za x -y %s 1>/dev/null' % file
|
||||
elif file.endswith('.zip') or file.endswith('.jar'):
|
||||
try:
|
||||
dos = bb.utils.to_boolean(urldata.parm.get('dos'), False)
|
||||
@@ -1469,7 +1401,7 @@ class FetchMethod(object):
|
||||
else:
|
||||
cmd = 'rpm2cpio.sh %s | cpio -id' % (file)
|
||||
elif file.endswith('.deb') or file.endswith('.ipk'):
|
||||
output = subprocess.check_output(['ar', '-t', file], preexec_fn=subprocess_setup)
|
||||
output = subprocess.check_output('ar -t %s' % file, preexec_fn=subprocess_setup, shell=True)
|
||||
datafile = None
|
||||
if output:
|
||||
for line in output.decode().splitlines():
|
||||
@@ -1481,6 +1413,10 @@ class FetchMethod(object):
|
||||
else:
|
||||
raise UnpackError("Unable to unpack deb/ipk package - could not list contents", urldata.url)
|
||||
cmd = 'ar x %s %s && tar --no-same-owner -xpf %s && rm %s' % (file, datafile, datafile, datafile)
|
||||
elif file.endswith('.tar.7z'):
|
||||
cmd = '7z x -so %s | tar xf - ' % file
|
||||
elif file.endswith('.7z'):
|
||||
cmd = '7za x -y %s 1>/dev/null' % file
|
||||
|
||||
# If 'subdir' param exists, create a dir and use it as destination for unpack cmd
|
||||
if 'subdir' in urldata.parm:
|
||||
@@ -1514,7 +1450,7 @@ class FetchMethod(object):
|
||||
if not cmd:
|
||||
return
|
||||
|
||||
path = data.getVar('PATH')
|
||||
path = data.getVar('PATH', True)
|
||||
if path:
|
||||
cmd = "PATH=\"%s\" %s" % (path, cmd)
|
||||
bb.note("Unpacking %s to %s/" % (file, unpackdir))
|
||||
@@ -1571,15 +1507,7 @@ class FetchMethod(object):
|
||||
|
||||
def generate_revision_key(self, ud, d, name):
|
||||
key = self._revision_key(ud, d, name)
|
||||
return "%s-%s" % (key, d.getVar("PN") or "")
|
||||
|
||||
def latest_versionstring(self, ud, d):
|
||||
"""
|
||||
Compute the latest release name like "x.y.x" in "x.y.x+gitHASH"
|
||||
by searching through the tags output of ls-remote, comparing
|
||||
versions and returning the highest match as a (version, revision) pair.
|
||||
"""
|
||||
return ('', '')
|
||||
return "%s-%s" % (key, d.getVar("PN", True) or "")
|
||||
|
||||
class Fetch(object):
|
||||
def __init__(self, urls, d, cache = True, localonly = False, connection_cache = None):
|
||||
@@ -1587,14 +1515,14 @@ class Fetch(object):
|
||||
raise Exception("bb.fetch2.Fetch.__init__: cannot set cache and localonly at same time")
|
||||
|
||||
if len(urls) == 0:
|
||||
urls = d.getVar("SRC_URI").split()
|
||||
urls = d.getVar("SRC_URI", True).split()
|
||||
self.urls = urls
|
||||
self.d = d
|
||||
self.ud = {}
|
||||
self.connection_cache = connection_cache
|
||||
|
||||
fn = d.getVar('FILE')
|
||||
mc = d.getVar('__BBMULTICONFIG') or ""
|
||||
fn = d.getVar('FILE', True)
|
||||
mc = d.getVar('__BBMULTICONFIG', True) or ""
|
||||
if cache and fn and mc + fn in urldata_cache:
|
||||
self.ud = urldata_cache[mc + fn]
|
||||
|
||||
@@ -1637,8 +1565,8 @@ class Fetch(object):
|
||||
if not urls:
|
||||
urls = self.urls
|
||||
|
||||
network = self.d.getVar("BB_NO_NETWORK")
|
||||
premirroronly = (self.d.getVar("BB_FETCH_PREMIRRORONLY") == "1")
|
||||
network = self.d.getVar("BB_NO_NETWORK", True)
|
||||
premirroronly = (self.d.getVar("BB_FETCH_PREMIRRORONLY", True) == "1")
|
||||
|
||||
for u in urls:
|
||||
ud = self.ud[u]
|
||||
@@ -1651,22 +1579,13 @@ class Fetch(object):
|
||||
|
||||
try:
|
||||
self.d.setVar("BB_NO_NETWORK", network)
|
||||
|
||||
|
||||
if verify_donestamp(ud, self.d) and not m.need_update(ud, self.d):
|
||||
localpath = ud.localpath
|
||||
elif m.try_premirror(ud, self.d):
|
||||
logger.debug(1, "Trying PREMIRRORS")
|
||||
mirrors = mirror_from_string(self.d.getVar('PREMIRRORS'))
|
||||
mirrors = mirror_from_string(self.d.getVar('PREMIRRORS', True))
|
||||
localpath = try_mirrors(self, self.d, ud, mirrors, False)
|
||||
if localpath:
|
||||
try:
|
||||
# early checksum verification so that if the checksum of the premirror
|
||||
# contents mismatch the fetcher can still try upstream and mirrors
|
||||
update_stamp(ud, self.d)
|
||||
except ChecksumError as e:
|
||||
logger.warning("Checksum failure encountered with premirror download of %s - will attempt other sources." % u)
|
||||
logger.debug(1, str(e))
|
||||
localpath = ""
|
||||
|
||||
if premirroronly:
|
||||
self.d.setVar("BB_NO_NETWORK", "1")
|
||||
@@ -1705,7 +1624,7 @@ class Fetch(object):
|
||||
if not verified_stamp:
|
||||
m.clean(ud, self.d)
|
||||
logger.debug(1, "Trying MIRRORS")
|
||||
mirrors = mirror_from_string(self.d.getVar('MIRRORS'))
|
||||
mirrors = mirror_from_string(self.d.getVar('MIRRORS', True))
|
||||
localpath = try_mirrors(self, self.d, ud, mirrors)
|
||||
|
||||
if not localpath or ((not os.path.exists(localpath)) and localpath.find("*") == -1):
|
||||
@@ -1715,11 +1634,6 @@ class Fetch(object):
|
||||
|
||||
update_stamp(ud, self.d)
|
||||
|
||||
except IOError as e:
|
||||
if e.errno in [os.errno.ESTALE]:
|
||||
logger.error("Stale Error Observed %s." % u)
|
||||
raise ChecksumError("Stale Error Detected")
|
||||
|
||||
except BBFetchException as e:
|
||||
if isinstance(e, ChecksumError):
|
||||
logger.error("Checksum failure fetching %s" % u)
|
||||
@@ -1743,14 +1657,15 @@ class Fetch(object):
|
||||
m = ud.method
|
||||
logger.debug(1, "Testing URL %s", u)
|
||||
# First try checking uri, u, from PREMIRRORS
|
||||
mirrors = mirror_from_string(self.d.getVar('PREMIRRORS'))
|
||||
mirrors = mirror_from_string(self.d.getVar('PREMIRRORS', True))
|
||||
ret = try_mirrors(self, self.d, ud, mirrors, True)
|
||||
if not ret:
|
||||
# Next try checking from the original uri, u
|
||||
ret = m.checkstatus(self, ud, self.d)
|
||||
if not ret:
|
||||
try:
|
||||
ret = m.checkstatus(self, ud, self.d)
|
||||
except:
|
||||
# Finally, try checking uri, u, from MIRRORS
|
||||
mirrors = mirror_from_string(self.d.getVar('MIRRORS'))
|
||||
mirrors = mirror_from_string(self.d.getVar('MIRRORS', True))
|
||||
ret = try_mirrors(self, self.d, ud, mirrors, True)
|
||||
|
||||
if not ret:
|
||||
@@ -1758,7 +1673,7 @@ class Fetch(object):
|
||||
|
||||
def unpack(self, root, urls=None):
|
||||
"""
|
||||
Unpack urls to root
|
||||
Check all urls exist upstream
|
||||
"""
|
||||
|
||||
if not urls:
|
||||
@@ -1848,7 +1763,6 @@ from . import svn
|
||||
from . import wget
|
||||
from . import ssh
|
||||
from . import sftp
|
||||
from . import s3
|
||||
from . import perforce
|
||||
from . import bzr
|
||||
from . import hg
|
||||
@@ -1866,7 +1780,6 @@ methods.append(gitannex.GitANNEX())
|
||||
methods.append(cvs.Cvs())
|
||||
methods.append(ssh.SSH())
|
||||
methods.append(sftp.SFTP())
|
||||
methods.append(s3.S3())
|
||||
methods.append(perforce.Perforce())
|
||||
methods.append(bzr.Bzr())
|
||||
methods.append(hg.Hg())
|
||||
|
||||
@@ -27,6 +27,7 @@ import os
|
||||
import sys
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
@@ -41,16 +42,15 @@ class Bzr(FetchMethod):
|
||||
init bzr specific variable within url data
|
||||
"""
|
||||
# Create paths to bzr checkouts
|
||||
bzrdir = d.getVar("BZRDIR") or (d.getVar("DL_DIR") + "/bzr")
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(bzrdir, ud.host, relpath)
|
||||
ud.pkgdir = os.path.join(data.expand('${BZRDIR}', d), ud.host, relpath)
|
||||
|
||||
ud.setup_revisions(d)
|
||||
ud.setup_revisons(d)
|
||||
|
||||
if not ud.revision:
|
||||
ud.revision = self.latest_revision(ud, d)
|
||||
|
||||
ud.localfile = d.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision))
|
||||
ud.localfile = data.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision), d)
|
||||
|
||||
def _buildbzrcommand(self, ud, d, command):
|
||||
"""
|
||||
@@ -58,7 +58,7 @@ class Bzr(FetchMethod):
|
||||
command is "fetch", "update", "revno"
|
||||
"""
|
||||
|
||||
basecmd = d.getVar("FETCHCMD_bzr") or "/usr/bin/env bzr"
|
||||
basecmd = data.expand('${FETCHCMD_bzr}', d)
|
||||
|
||||
proto = ud.parm.get('protocol', 'http')
|
||||
|
||||
|
||||
@@ -65,10 +65,12 @@ import os
|
||||
import sys
|
||||
import shutil
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
from distutils import spawn
|
||||
|
||||
class ClearCase(FetchMethod):
|
||||
"""Class to fetch urls via 'clearcase'"""
|
||||
@@ -106,13 +108,13 @@ class ClearCase(FetchMethod):
|
||||
else:
|
||||
ud.module = ""
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_ccrc") or "/usr/bin/env cleartool || rcleartool"
|
||||
ud.basecmd = d.getVar("FETCHCMD_ccrc", True) or spawn.find_executable("cleartool") or spawn.find_executable("rcleartool")
|
||||
|
||||
if d.getVar("SRCREV") == "INVALID":
|
||||
if data.getVar("SRCREV", d, True) == "INVALID":
|
||||
raise FetchError("Set a valid SRCREV for the clearcase fetcher in your recipe, e.g. SRCREV = \"/main/LATEST\" or any other label of your choice.")
|
||||
|
||||
ud.label = d.getVar("SRCREV", False)
|
||||
ud.customspec = d.getVar("CCASE_CUSTOM_CONFIG_SPEC")
|
||||
ud.customspec = d.getVar("CCASE_CUSTOM_CONFIG_SPEC", True)
|
||||
|
||||
ud.server = "%s://%s%s" % (ud.proto, ud.host, ud.path)
|
||||
|
||||
@@ -122,7 +124,7 @@ class ClearCase(FetchMethod):
|
||||
|
||||
ud.viewname = "%s-view%s" % (ud.identifier, d.getVar("DATETIME", d, True))
|
||||
ud.csname = "%s-config-spec" % (ud.identifier)
|
||||
ud.ccasedir = os.path.join(d.getVar("DL_DIR"), ud.type)
|
||||
ud.ccasedir = os.path.join(data.getVar("DL_DIR", d, True), ud.type)
|
||||
ud.viewdir = os.path.join(ud.ccasedir, ud.viewname)
|
||||
ud.configspecfile = os.path.join(ud.ccasedir, ud.csname)
|
||||
ud.localfile = "%s.tar.gz" % (ud.identifier)
|
||||
@@ -142,7 +144,7 @@ class ClearCase(FetchMethod):
|
||||
self.debug("configspecfile = %s" % ud.configspecfile)
|
||||
self.debug("localfile = %s" % ud.localfile)
|
||||
|
||||
ud.localfile = os.path.join(d.getVar("DL_DIR"), ud.localfile)
|
||||
ud.localfile = os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
|
||||
|
||||
def _build_ccase_command(self, ud, command):
|
||||
"""
|
||||
|
||||
@@ -63,7 +63,7 @@ class Cvs(FetchMethod):
|
||||
if 'fullpath' in ud.parm:
|
||||
fullpath = '_fullpath'
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s_%s%s%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date, norecurse, fullpath))
|
||||
ud.localfile = bb.data.expand('%s_%s_%s_%s%s%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date, norecurse, fullpath), d)
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if (ud.date == "now"):
|
||||
@@ -87,10 +87,10 @@ class Cvs(FetchMethod):
|
||||
cvsroot = ud.path
|
||||
else:
|
||||
cvsroot = ":" + method
|
||||
cvsproxyhost = d.getVar('CVS_PROXY_HOST')
|
||||
cvsproxyhost = d.getVar('CVS_PROXY_HOST', True)
|
||||
if cvsproxyhost:
|
||||
cvsroot += ";proxy=" + cvsproxyhost
|
||||
cvsproxyport = d.getVar('CVS_PROXY_PORT')
|
||||
cvsproxyport = d.getVar('CVS_PROXY_PORT', True)
|
||||
if cvsproxyport:
|
||||
cvsroot += ";proxyport=" + cvsproxyport
|
||||
cvsroot += ":" + ud.user
|
||||
@@ -110,7 +110,7 @@ class Cvs(FetchMethod):
|
||||
if ud.tag:
|
||||
options.append("-r %s" % ud.tag)
|
||||
|
||||
cvsbasecmd = d.getVar("FETCHCMD_cvs") or "/usr/bin/env cvs"
|
||||
cvsbasecmd = d.getVar("FETCHCMD_cvs", True)
|
||||
cvscmd = cvsbasecmd + " '-d" + cvsroot + "' co " + " ".join(options) + " " + ud.module
|
||||
cvsupdatecmd = cvsbasecmd + " '-d" + cvsroot + "' update -d -P " + " ".join(options)
|
||||
|
||||
@@ -120,9 +120,8 @@ class Cvs(FetchMethod):
|
||||
|
||||
# create module directory
|
||||
logger.debug(2, "Fetch: checking for module directory")
|
||||
pkg = d.getVar('PN')
|
||||
cvsdir = d.getVar("CVSDIR") or (d.getVar("DL_DIR") + "/cvs")
|
||||
pkgdir = os.path.join(cvsdir, pkg)
|
||||
pkg = d.getVar('PN', True)
|
||||
pkgdir = os.path.join(d.getVar('CVSDIR', True), pkg)
|
||||
moddir = os.path.join(pkgdir, localdir)
|
||||
workdir = None
|
||||
if os.access(os.path.join(moddir, 'CVS'), os.R_OK):
|
||||
@@ -165,8 +164,8 @@ class Cvs(FetchMethod):
|
||||
def clean(self, ud, d):
|
||||
""" Clean CVS Files and tarballs """
|
||||
|
||||
pkg = d.getVar('PN')
|
||||
pkgdir = os.path.join(d.getVar("CVSDIR"), pkg)
|
||||
pkg = d.getVar('PN', True)
|
||||
pkgdir = os.path.join(d.getVar("CVSDIR", True), pkg)
|
||||
|
||||
bb.utils.remove(pkgdir, True)
|
||||
bb.utils.remove(ud.localpath)
|
||||
|
||||
@@ -50,7 +50,7 @@ Supported SRC_URI options are:
|
||||
The default is "0", set nobranch=1 if needed.
|
||||
|
||||
- usehead
|
||||
For local git:// urls to use the current branch HEAD as the revision for use with
|
||||
For local git:// urls to use the current branch HEAD as the revsion for use with
|
||||
AUTOREV. Implies nobranch.
|
||||
|
||||
"""
|
||||
@@ -70,15 +70,13 @@ Supported SRC_URI options are:
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import collections
|
||||
import errno
|
||||
import fnmatch
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import tempfile
|
||||
import bb
|
||||
import errno
|
||||
import bb.progress
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
@@ -125,9 +123,6 @@ class GitProgressHandler(bb.progress.LineFilterProgressHandler):
|
||||
|
||||
|
||||
class Git(FetchMethod):
|
||||
bitbake_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.join(os.path.abspath(__file__))), '..', '..', '..'))
|
||||
make_shallow_path = os.path.join(bitbake_dir, 'bin', 'git-make-shallow')
|
||||
|
||||
"""Class to fetch a module or modules from git repositories"""
|
||||
def init(self, d):
|
||||
pass
|
||||
@@ -178,68 +173,20 @@ class Git(FetchMethod):
|
||||
branches = ud.parm.get("branch", "master").split(',')
|
||||
if len(branches) != len(ud.names):
|
||||
raise bb.fetch2.ParameterError("The number of name and branch parameters is not balanced", ud.url)
|
||||
|
||||
ud.cloneflags = "-s -n"
|
||||
if ud.bareclone:
|
||||
ud.cloneflags += " --mirror"
|
||||
|
||||
ud.shallow = d.getVar("BB_GIT_SHALLOW") == "1"
|
||||
ud.shallow_extra_refs = (d.getVar("BB_GIT_SHALLOW_EXTRA_REFS") or "").split()
|
||||
|
||||
depth_default = d.getVar("BB_GIT_SHALLOW_DEPTH")
|
||||
if depth_default is not None:
|
||||
try:
|
||||
depth_default = int(depth_default or 0)
|
||||
except ValueError:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH: %s" % depth_default)
|
||||
else:
|
||||
if depth_default < 0:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH: %s" % depth_default)
|
||||
else:
|
||||
depth_default = 1
|
||||
ud.shallow_depths = collections.defaultdict(lambda: depth_default)
|
||||
|
||||
revs_default = d.getVar("BB_GIT_SHALLOW_REVS", True)
|
||||
ud.shallow_revs = []
|
||||
ud.branches = {}
|
||||
for pos, name in enumerate(ud.names):
|
||||
branch = branches[pos]
|
||||
for name in ud.names:
|
||||
branch = branches[ud.names.index(name)]
|
||||
ud.branches[name] = branch
|
||||
ud.unresolvedrev[name] = branch
|
||||
|
||||
shallow_depth = d.getVar("BB_GIT_SHALLOW_DEPTH_%s" % name)
|
||||
if shallow_depth is not None:
|
||||
try:
|
||||
shallow_depth = int(shallow_depth or 0)
|
||||
except ValueError:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth))
|
||||
else:
|
||||
if shallow_depth < 0:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth))
|
||||
ud.shallow_depths[name] = shallow_depth
|
||||
|
||||
revs = d.getVar("BB_GIT_SHALLOW_REVS_%s" % name)
|
||||
if revs is not None:
|
||||
ud.shallow_revs.extend(revs.split())
|
||||
elif revs_default is not None:
|
||||
ud.shallow_revs.extend(revs_default.split())
|
||||
|
||||
if (ud.shallow and
|
||||
not ud.shallow_revs and
|
||||
all(ud.shallow_depths[n] == 0 for n in ud.names)):
|
||||
# Shallow disabled for this URL
|
||||
ud.shallow = False
|
||||
|
||||
if ud.usehead:
|
||||
ud.unresolvedrev['default'] = 'HEAD'
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c core.fsyncobjectfiles=0"
|
||||
ud.basecmd = data.getVar("FETCHCMD_git", d, True) or "git -c core.fsyncobjectfiles=0"
|
||||
|
||||
write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0"
|
||||
ud.write_tarballs = write_tarballs != "0" or ud.rebaseable
|
||||
ud.write_shallow_tarballs = (d.getVar("BB_GENERATE_SHALLOW_TARBALLS") or write_tarballs) != "0"
|
||||
ud.write_tarballs = ((data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True) or "0") != "0") or ud.rebaseable
|
||||
|
||||
ud.setup_revisions(d)
|
||||
ud.setup_revisons(d)
|
||||
|
||||
for name in ud.names:
|
||||
# Ensure anything that doesn't look like a sha256 checksum/revision is translated into one
|
||||
@@ -259,66 +206,30 @@ class Git(FetchMethod):
|
||||
if ud.rebaseable:
|
||||
for name in ud.names:
|
||||
gitsrcname = gitsrcname + '_' + ud.revisions[name]
|
||||
|
||||
dl_dir = d.getVar("DL_DIR")
|
||||
gitdir = d.getVar("GITDIR") or (dl_dir + "/git2")
|
||||
ud.mirrortarball = 'git2_%s.tar.gz' % (gitsrcname)
|
||||
ud.fullmirror = os.path.join(d.getVar("DL_DIR", True), ud.mirrortarball)
|
||||
gitdir = d.getVar("GITDIR", True) or (d.getVar("DL_DIR", True) + "/git2/")
|
||||
ud.clonedir = os.path.join(gitdir, gitsrcname)
|
||||
|
||||
ud.localfile = ud.clonedir
|
||||
|
||||
mirrortarball = 'git2_%s.tar.gz' % gitsrcname
|
||||
ud.fullmirror = os.path.join(dl_dir, mirrortarball)
|
||||
ud.mirrortarballs = [mirrortarball]
|
||||
if ud.shallow:
|
||||
tarballname = gitsrcname
|
||||
if ud.bareclone:
|
||||
tarballname = "%s_bare" % tarballname
|
||||
|
||||
if ud.shallow_revs:
|
||||
tarballname = "%s_%s" % (tarballname, "_".join(sorted(ud.shallow_revs)))
|
||||
|
||||
for name, revision in sorted(ud.revisions.items()):
|
||||
tarballname = "%s_%s" % (tarballname, ud.revisions[name][:7])
|
||||
depth = ud.shallow_depths[name]
|
||||
if depth:
|
||||
tarballname = "%s-%s" % (tarballname, depth)
|
||||
|
||||
shallow_refs = []
|
||||
if not ud.nobranch:
|
||||
shallow_refs.extend(ud.branches.values())
|
||||
if ud.shallow_extra_refs:
|
||||
shallow_refs.extend(r.replace('refs/heads/', '').replace('*', 'ALL') for r in ud.shallow_extra_refs)
|
||||
if shallow_refs:
|
||||
tarballname = "%s_%s" % (tarballname, "_".join(sorted(shallow_refs)).replace('/', '.'))
|
||||
|
||||
fetcher = self.__class__.__name__.lower()
|
||||
ud.shallowtarball = '%sshallow_%s.tar.gz' % (fetcher, tarballname)
|
||||
ud.fullshallow = os.path.join(dl_dir, ud.shallowtarball)
|
||||
ud.mirrortarballs.insert(0, ud.shallowtarball)
|
||||
|
||||
def localpath(self, ud, d):
|
||||
return ud.clonedir
|
||||
|
||||
def need_update(self, ud, d):
|
||||
return self.clonedir_need_update(ud, d) or self.shallow_tarball_need_update(ud) or self.tarball_need_update(ud)
|
||||
|
||||
def clonedir_need_update(self, ud, d):
|
||||
if not os.path.exists(ud.clonedir):
|
||||
return True
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
return True
|
||||
if ud.write_tarballs and not os.path.exists(ud.fullmirror):
|
||||
return True
|
||||
return False
|
||||
|
||||
def shallow_tarball_need_update(self, ud):
|
||||
return ud.shallow and ud.write_shallow_tarballs and not os.path.exists(ud.fullshallow)
|
||||
|
||||
def tarball_need_update(self, ud):
|
||||
return ud.write_tarballs and not os.path.exists(ud.fullmirror)
|
||||
|
||||
def try_premirror(self, ud, d):
|
||||
# If we don't do this, updating an existing checkout with only premirrors
|
||||
# is not possible
|
||||
if d.getVar("BB_FETCH_PREMIRRORONLY") is not None:
|
||||
if d.getVar("BB_FETCH_PREMIRRORONLY", True) is not None:
|
||||
return True
|
||||
if os.path.exists(ud.clonedir):
|
||||
return False
|
||||
@@ -327,15 +238,10 @@ class Git(FetchMethod):
|
||||
def download(self, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
# A current clone is preferred to either tarball, a shallow tarball is
|
||||
# preferred to an out of date clone, and a missing clone will use
|
||||
# either tarball.
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and self.need_update(ud, d):
|
||||
ud.localpath = ud.fullshallow
|
||||
return
|
||||
elif os.path.exists(ud.fullmirror) and not os.path.exists(ud.clonedir):
|
||||
# If the checkout doesn't exist and the mirror tarball does, extract it
|
||||
if not os.path.exists(ud.clonedir) and os.path.exists(ud.fullmirror):
|
||||
bb.utils.mkdirhier(ud.clonedir)
|
||||
runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=ud.clonedir)
|
||||
runfetchcmd("tar -xzf %s" % (ud.fullmirror), d, workdir=ud.clonedir)
|
||||
|
||||
repourl = self._get_repo_url(ud)
|
||||
|
||||
@@ -346,7 +252,7 @@ class Git(FetchMethod):
|
||||
repourl = repourl[7:]
|
||||
clone_cmd = "LANG=C %s clone --bare --mirror %s %s --progress" % (ud.basecmd, repourl, ud.clonedir)
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, clone_cmd, ud.url)
|
||||
bb.fetch2.check_network_access(d, clone_cmd)
|
||||
progresshandler = GitProgressHandler(d)
|
||||
runfetchcmd(clone_cmd, d, log=progresshandler)
|
||||
|
||||
@@ -355,12 +261,11 @@ class Git(FetchMethod):
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
needupdate = True
|
||||
break
|
||||
|
||||
if needupdate:
|
||||
output = runfetchcmd("%s remote" % ud.basecmd, d, quiet=True, workdir=ud.clonedir)
|
||||
if "origin" in output:
|
||||
runfetchcmd("%s remote rm origin" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
try:
|
||||
runfetchcmd("%s remote rm origin" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
except bb.fetch2.FetchError:
|
||||
logger.debug(1, "No Origin")
|
||||
|
||||
runfetchcmd("%s remote add --mirror=fetch origin %s" % (ud.basecmd, repourl), d, workdir=ud.clonedir)
|
||||
fetch_cmd = "LANG=C %s fetch -f --prune --progress %s refs/*:refs/*" % (ud.basecmd, repourl)
|
||||
@@ -369,103 +274,33 @@ class Git(FetchMethod):
|
||||
progresshandler = GitProgressHandler(d)
|
||||
runfetchcmd(fetch_cmd, d, log=progresshandler, workdir=ud.clonedir)
|
||||
runfetchcmd("%s prune-packed" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
runfetchcmd("%s pack-refs --all" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
runfetchcmd("%s pack-redundant --all | xargs -r rm" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
try:
|
||||
os.unlink(ud.fullmirror)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
raise bb.fetch2.FetchError("Unable to find revision %s in branch %s even from upstream" % (ud.revisions[name], ud.branches[name]))
|
||||
|
||||
def build_mirror_data(self, ud, d):
|
||||
if ud.shallow and ud.write_shallow_tarballs:
|
||||
if not os.path.exists(ud.fullshallow):
|
||||
if os.path.islink(ud.fullshallow):
|
||||
os.unlink(ud.fullshallow)
|
||||
tempdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
|
||||
shallowclone = os.path.join(tempdir, 'git')
|
||||
try:
|
||||
self.clone_shallow_local(ud, shallowclone, d)
|
||||
|
||||
logger.info("Creating tarball of git repository")
|
||||
runfetchcmd("tar -czf %s ." % ud.fullshallow, d, workdir=shallowclone)
|
||||
runfetchcmd("touch %s.done" % ud.fullshallow, d)
|
||||
finally:
|
||||
bb.utils.remove(tempdir, recurse=True)
|
||||
elif ud.write_tarballs and not os.path.exists(ud.fullmirror):
|
||||
# Generate a mirror tarball if needed
|
||||
if ud.write_tarballs and not os.path.exists(ud.fullmirror):
|
||||
# it's possible that this symlink points to read-only filesystem with PREMIRROR
|
||||
if os.path.islink(ud.fullmirror):
|
||||
os.unlink(ud.fullmirror)
|
||||
|
||||
logger.info("Creating tarball of git repository")
|
||||
runfetchcmd("tar -czf %s ." % ud.fullmirror, d, workdir=ud.clonedir)
|
||||
runfetchcmd("touch %s.done" % ud.fullmirror, d)
|
||||
|
||||
def clone_shallow_local(self, ud, dest, d):
|
||||
"""Clone the repo and make it shallow.
|
||||
|
||||
The upstream url of the new clone isn't set at this time, as it'll be
|
||||
set correctly when unpacked."""
|
||||
runfetchcmd("%s clone %s %s %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, dest), d)
|
||||
|
||||
to_parse, shallow_branches = [], []
|
||||
for name in ud.names:
|
||||
revision = ud.revisions[name]
|
||||
depth = ud.shallow_depths[name]
|
||||
if depth:
|
||||
to_parse.append('%s~%d^{}' % (revision, depth - 1))
|
||||
|
||||
# For nobranch, we need a ref, otherwise the commits will be
|
||||
# removed, and for non-nobranch, we truncate the branch to our
|
||||
# srcrev, to avoid keeping unnecessary history beyond that.
|
||||
branch = ud.branches[name]
|
||||
if ud.nobranch:
|
||||
ref = "refs/shallow/%s" % name
|
||||
elif ud.bareclone:
|
||||
ref = "refs/heads/%s" % branch
|
||||
else:
|
||||
ref = "refs/remotes/origin/%s" % branch
|
||||
|
||||
shallow_branches.append(ref)
|
||||
runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest)
|
||||
|
||||
# Map srcrev+depths to revisions
|
||||
parsed_depths = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join(to_parse)), d, workdir=dest)
|
||||
|
||||
# Resolve specified revisions
|
||||
parsed_revs = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join('"%s^{}"' % r for r in ud.shallow_revs)), d, workdir=dest)
|
||||
shallow_revisions = parsed_depths.splitlines() + parsed_revs.splitlines()
|
||||
|
||||
# Apply extra ref wildcards
|
||||
all_refs = runfetchcmd('%s for-each-ref "--format=%%(refname)"' % ud.basecmd,
|
||||
d, workdir=dest).splitlines()
|
||||
for r in ud.shallow_extra_refs:
|
||||
if not ud.bareclone:
|
||||
r = r.replace('refs/heads/', 'refs/remotes/origin/')
|
||||
|
||||
if '*' in r:
|
||||
matches = filter(lambda a: fnmatch.fnmatchcase(a, r), all_refs)
|
||||
shallow_branches.extend(matches)
|
||||
else:
|
||||
shallow_branches.append(r)
|
||||
|
||||
# Make the repository shallow
|
||||
shallow_cmd = [self.make_shallow_path, '-s']
|
||||
for b in shallow_branches:
|
||||
shallow_cmd.append('-r')
|
||||
shallow_cmd.append(b)
|
||||
shallow_cmd.extend(shallow_revisions)
|
||||
runfetchcmd(subprocess.list2cmdline(shallow_cmd), d, workdir=dest)
|
||||
runfetchcmd("tar -czf %s %s" % (ud.fullmirror, os.path.join(".") ), d, workdir=ud.clonedir)
|
||||
runfetchcmd("touch %s.done" % (ud.fullmirror), d, workdir=ud.clonedir)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
""" unpack the downloaded src to destdir"""
|
||||
|
||||
subdir = ud.parm.get("subpath", "")
|
||||
if subdir != "":
|
||||
readpathspec = ":%s" % subdir
|
||||
readpathspec = ":%s" % (subdir)
|
||||
def_destsuffix = "%s/" % os.path.basename(subdir.rstrip('/'))
|
||||
else:
|
||||
readpathspec = ""
|
||||
@@ -476,31 +311,11 @@ class Git(FetchMethod):
|
||||
if os.path.exists(destdir):
|
||||
bb.utils.prunedir(destdir)
|
||||
|
||||
source_found = False
|
||||
source_error = []
|
||||
|
||||
if not source_found:
|
||||
clonedir_is_up_to_date = not self.clonedir_need_update(ud, d)
|
||||
if clonedir_is_up_to_date:
|
||||
runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, destdir), d)
|
||||
source_found = True
|
||||
else:
|
||||
source_error.append("clone directory not available or not up to date: " + ud.clonedir)
|
||||
|
||||
if not source_found:
|
||||
if ud.shallow:
|
||||
if os.path.exists(ud.fullshallow):
|
||||
bb.utils.mkdirhier(destdir)
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=destdir)
|
||||
source_found = True
|
||||
else:
|
||||
source_error.append("shallow clone not available: " + ud.fullshallow)
|
||||
else:
|
||||
source_error.append("shallow clone not enabled")
|
||||
|
||||
if not source_found:
|
||||
raise bb.fetch2.UnpackError("No up to date source found: " + "; ".join(source_error), ud.url)
|
||||
cloneflags = "-s -n"
|
||||
if ud.bareclone:
|
||||
cloneflags += " --mirror"
|
||||
|
||||
runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, cloneflags, ud.clonedir, destdir), d)
|
||||
repourl = self._get_repo_url(ud)
|
||||
runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, repourl), d, workdir=destdir)
|
||||
if not ud.nocheckout:
|
||||
@@ -512,7 +327,7 @@ class Git(FetchMethod):
|
||||
branchname = ud.branches[ud.names[0]]
|
||||
runfetchcmd("%s checkout -B %s %s" % (ud.basecmd, branchname, \
|
||||
ud.revisions[ud.names[0]]), d, workdir=destdir)
|
||||
runfetchcmd("%s branch %s --set-upstream-to origin/%s" % (ud.basecmd, branchname, \
|
||||
runfetchcmd("%s branch --set-upstream %s origin/%s" % (ud.basecmd, branchname, \
|
||||
branchname), d, workdir=destdir)
|
||||
else:
|
||||
runfetchcmd("%s checkout %s" % (ud.basecmd, ud.revisions[ud.names[0]]), d, workdir=destdir)
|
||||
@@ -565,26 +380,14 @@ class Git(FetchMethod):
|
||||
"""
|
||||
Run git ls-remote with the specified search string
|
||||
"""
|
||||
# Prevent recursion e.g. in OE if SRCPV is in PV, PV is in WORKDIR,
|
||||
# and WORKDIR is in PATH (as a result of RSS), our call to
|
||||
# runfetchcmd() exports PATH so this function will get called again (!)
|
||||
# In this scenario the return call of the function isn't actually
|
||||
# important - WORKDIR isn't needed in PATH to call git ls-remote
|
||||
# anyway.
|
||||
if d.getVar('_BB_GIT_IN_LSREMOTE', False):
|
||||
return ''
|
||||
d.setVar('_BB_GIT_IN_LSREMOTE', '1')
|
||||
try:
|
||||
repourl = self._get_repo_url(ud)
|
||||
cmd = "%s ls-remote %s %s" % \
|
||||
(ud.basecmd, repourl, search)
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, cmd, repourl)
|
||||
output = runfetchcmd(cmd, d, True)
|
||||
if not output:
|
||||
raise bb.fetch2.FetchError("The command %s gave empty output unexpectedly" % cmd, ud.url)
|
||||
finally:
|
||||
d.delVar('_BB_GIT_IN_LSREMOTE')
|
||||
repourl = self._get_repo_url(ud)
|
||||
cmd = "%s ls-remote %s %s" % \
|
||||
(ud.basecmd, repourl, search)
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, cmd)
|
||||
output = runfetchcmd(cmd, d, True)
|
||||
if not output:
|
||||
raise bb.fetch2.FetchError("The command %s gave empty output unexpectedly" % cmd, ud.url)
|
||||
return output
|
||||
|
||||
def _latest_revision(self, ud, d, name):
|
||||
@@ -615,11 +418,10 @@ class Git(FetchMethod):
|
||||
"""
|
||||
pupver = ('', '')
|
||||
|
||||
tagregex = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX') or "(?P<pver>([0-9][\.|_]?)+)")
|
||||
tagregex = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX', True) or "(?P<pver>([0-9][\.|_]?)+)")
|
||||
try:
|
||||
output = self._lsremote(ud, d, "refs/tags/*")
|
||||
except (bb.fetch2.FetchError, bb.fetch2.NetworkAccess) as e:
|
||||
bb.note("Could not list remote: %s" % str(e))
|
||||
except bb.fetch2.FetchError or bb.fetch2.NetworkAccess:
|
||||
return pupver
|
||||
|
||||
verstring = ""
|
||||
@@ -668,7 +470,7 @@ class Git(FetchMethod):
|
||||
if not os.path.exists(rev_file) or not os.path.getsize(rev_file):
|
||||
from pipes import quote
|
||||
commits = bb.fetch2.runfetchcmd(
|
||||
"git rev-list %s -- | wc -l" % quote(rev),
|
||||
"git rev-list %s -- | wc -l" % (quote(rev)),
|
||||
d, quiet=True).strip().lstrip('0')
|
||||
if commits:
|
||||
open(rev_file, "w").write("%d\n" % int(commits))
|
||||
@@ -683,5 +485,5 @@ class Git(FetchMethod):
|
||||
try:
|
||||
self._lsremote(ud, d, "")
|
||||
return True
|
||||
except bb.fetch2.FetchError:
|
||||
except FetchError:
|
||||
return False
|
||||
|
||||
@@ -22,6 +22,7 @@ BitBake 'Fetch' git annex implementation
|
||||
|
||||
import os
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2.git import Git
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
@@ -33,11 +34,6 @@ class GitANNEX(Git):
|
||||
"""
|
||||
return ud.type in ['gitannex']
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
super(GitANNEX, self).urldata_init(ud, d)
|
||||
if ud.shallow:
|
||||
ud.shallow_extra_refs += ['refs/heads/git-annex', 'refs/heads/synced/*']
|
||||
|
||||
def uses_annex(self, ud, d, wd):
|
||||
for name in ud.names:
|
||||
try:
|
||||
@@ -60,21 +56,9 @@ class GitANNEX(Git):
|
||||
def download(self, ud, d):
|
||||
Git.download(self, ud, d)
|
||||
|
||||
if not ud.shallow or ud.localpath != ud.fullshallow:
|
||||
if self.uses_annex(ud, d, ud.clonedir):
|
||||
self.update_annex(ud, d, ud.clonedir)
|
||||
|
||||
def clone_shallow_local(self, ud, dest, d):
|
||||
super(GitANNEX, self).clone_shallow_local(ud, dest, d)
|
||||
|
||||
try:
|
||||
runfetchcmd("%s annex init" % ud.basecmd, d, workdir=dest)
|
||||
except bb.fetch.FetchError:
|
||||
pass
|
||||
|
||||
if self.uses_annex(ud, d, dest):
|
||||
runfetchcmd("%s annex get" % ud.basecmd, d, workdir=dest)
|
||||
runfetchcmd("chmod u+w -R %s/.git/annex" % (dest), d, quiet=True, workdir=dest)
|
||||
annex = self.uses_annex(ud, d, ud.clonedir)
|
||||
if annex:
|
||||
self.update_annex(ud, d, ud.clonedir)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
Git.unpack(self, ud, destdir, d)
|
||||
|
||||
@@ -31,12 +31,10 @@ NOTE: Switching a SRC_URI from "git://" to "gitsm://" requires a clean of your r
|
||||
|
||||
import os
|
||||
import bb
|
||||
import copy
|
||||
from bb import data
|
||||
from bb.fetch2.git import Git
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import Fetch
|
||||
from bb.fetch2 import BBFetchException
|
||||
|
||||
class GitSM(Git):
|
||||
def supports(self, ud, d):
|
||||
@@ -45,161 +43,89 @@ class GitSM(Git):
|
||||
"""
|
||||
return ud.type in ['gitsm']
|
||||
|
||||
def process_submodules(self, ud, workdir, function, d):
|
||||
"""
|
||||
Iterate over all of the submodules in this repository and execute
|
||||
the 'function' for each of them.
|
||||
"""
|
||||
|
||||
submodules = []
|
||||
paths = {}
|
||||
revision = {}
|
||||
uris = {}
|
||||
subrevision = {}
|
||||
|
||||
def parse_gitmodules(gitmodules):
|
||||
modules = {}
|
||||
module = ""
|
||||
for line in gitmodules.splitlines():
|
||||
if line.startswith('[submodule'):
|
||||
module = line.split('"')[1]
|
||||
modules[module] = {}
|
||||
elif module and line.strip().startswith('path'):
|
||||
path = line.split('=')[1].strip()
|
||||
modules[module]['path'] = path
|
||||
elif module and line.strip().startswith('url'):
|
||||
url = line.split('=')[1].strip()
|
||||
modules[module]['url'] = url
|
||||
return modules
|
||||
|
||||
# Collect the defined submodules, and their attributes
|
||||
def uses_submodules(self, ud, d, wd):
|
||||
for name in ud.names:
|
||||
try:
|
||||
gitmodules = runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revisions[name]), d, quiet=True, workdir=workdir)
|
||||
except:
|
||||
# No submodules to update
|
||||
continue
|
||||
runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revisions[name]), d, quiet=True, workdir=wd)
|
||||
return True
|
||||
except bb.fetch.FetchError:
|
||||
pass
|
||||
return False
|
||||
|
||||
for m, md in parse_gitmodules(gitmodules).items():
|
||||
try:
|
||||
module_hash = runfetchcmd("%s ls-tree -z -d %s %s" % (ud.basecmd, ud.revisions[name], md['path']), d, quiet=True, workdir=workdir)
|
||||
except:
|
||||
# If the command fails, we don't have a valid file to check. If it doesn't
|
||||
# fail -- it still might be a failure, see next check...
|
||||
module_hash = ""
|
||||
|
||||
if not module_hash:
|
||||
logger.debug(1, "submodule %s is defined, but is not initialized in the repository. Skipping", m)
|
||||
continue
|
||||
|
||||
submodules.append(m)
|
||||
paths[m] = md['path']
|
||||
revision[m] = ud.revisions[name]
|
||||
uris[m] = md['url']
|
||||
subrevision[m] = module_hash.split()[2]
|
||||
|
||||
# Convert relative to absolute uri based on parent uri
|
||||
if uris[m].startswith('..'):
|
||||
newud = copy.copy(ud)
|
||||
newud.path = os.path.realpath(os.path.join(newud.path, uris[m]))
|
||||
uris[m] = Git._get_repo_url(self, newud)
|
||||
def _set_relative_paths(self, repopath):
|
||||
"""
|
||||
Fix submodule paths to be relative instead of absolute,
|
||||
so that when we move the repo it doesn't break
|
||||
(In Git 1.7.10+ this is done automatically)
|
||||
"""
|
||||
submodules = []
|
||||
with open(os.path.join(repopath, '.gitmodules'), 'r') as f:
|
||||
for line in f.readlines():
|
||||
if line.startswith('[submodule'):
|
||||
submodules.append(line.split('"')[1])
|
||||
|
||||
for module in submodules:
|
||||
# Translate the module url into a SRC_URI
|
||||
repo_conf = os.path.join(repopath, module, '.git')
|
||||
if os.path.exists(repo_conf):
|
||||
with open(repo_conf, 'r') as f:
|
||||
lines = f.readlines()
|
||||
newpath = ''
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith('gitdir:'):
|
||||
oldpath = line.split(': ')[-1].rstrip()
|
||||
if oldpath.startswith('/'):
|
||||
newpath = '../' * (module.count('/') + 1) + '.git/modules/' + module
|
||||
lines[i] = 'gitdir: %s\n' % newpath
|
||||
break
|
||||
if newpath:
|
||||
with open(repo_conf, 'w') as f:
|
||||
for line in lines:
|
||||
f.write(line)
|
||||
|
||||
if "://" in uris[module]:
|
||||
# Properly formated URL already
|
||||
proto = uris[module].split(':', 1)[0]
|
||||
url = uris[module].replace('%s:' % proto, 'gitsm:', 1)
|
||||
else:
|
||||
if ":" in uris[module]:
|
||||
# Most likely an SSH style reference
|
||||
proto = "ssh"
|
||||
if ":/" in uris[module]:
|
||||
# Absolute reference, easy to convert..
|
||||
url = "gitsm://" + uris[module].replace(':/', '/', 1)
|
||||
else:
|
||||
# Relative reference, no way to know if this is right!
|
||||
logger.warning("Submodule included by %s refers to relative ssh reference %s. References may fail if not absolute." % (ud.url, uris[module]))
|
||||
url = "gitsm://" + uris[module].replace(':', '/', 1)
|
||||
else:
|
||||
# This has to be a file reference
|
||||
proto = "file"
|
||||
url = "gitsm://" + uris[module]
|
||||
repo_conf2 = os.path.join(repopath, '.git', 'modules', module, 'config')
|
||||
if os.path.exists(repo_conf2):
|
||||
with open(repo_conf2, 'r') as f:
|
||||
lines = f.readlines()
|
||||
newpath = ''
|
||||
for i, line in enumerate(lines):
|
||||
if line.lstrip().startswith('worktree = '):
|
||||
oldpath = line.split(' = ')[-1].rstrip()
|
||||
if oldpath.startswith('/'):
|
||||
newpath = '../' * (module.count('/') + 3) + module
|
||||
lines[i] = '\tworktree = %s\n' % newpath
|
||||
break
|
||||
if newpath:
|
||||
with open(repo_conf2, 'w') as f:
|
||||
for line in lines:
|
||||
f.write(line)
|
||||
|
||||
url += ';protocol=%s' % proto
|
||||
url += ";name=%s" % module
|
||||
url += ";subpath=%s" % paths[module]
|
||||
|
||||
ld = d.createCopy()
|
||||
# Not necessary to set SRC_URI, since we're passing the URI to
|
||||
# Fetch.
|
||||
#ld.setVar('SRC_URI', url)
|
||||
ld.setVar('SRCREV_%s' % module, subrevision[module])
|
||||
|
||||
# Workaround for issues with SRCPV/SRCREV_FORMAT errors
|
||||
# error refer to 'multiple' repositories. Only the repository
|
||||
# in the original SRC_URI actually matters...
|
||||
ld.setVar('SRCPV', d.getVar('SRCPV'))
|
||||
ld.setVar('SRCREV_FORMAT', module)
|
||||
|
||||
function(ud, url, module, paths[module], ld)
|
||||
|
||||
return submodules != []
|
||||
def update_submodules(self, ud, d):
|
||||
# We have to convert bare -> full repo, do the submodule bit, then convert back
|
||||
tmpclonedir = ud.clonedir + ".tmp"
|
||||
gitdir = tmpclonedir + os.sep + ".git"
|
||||
bb.utils.remove(tmpclonedir, True)
|
||||
os.mkdir(tmpclonedir)
|
||||
os.rename(ud.clonedir, gitdir)
|
||||
runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*true/bare = false/'", d)
|
||||
runfetchcmd(ud.basecmd + " reset --hard", d, workdir=tmpclonedir)
|
||||
runfetchcmd(ud.basecmd + " checkout " + ud.revisions[ud.names[0]], d, workdir=tmpclonedir)
|
||||
runfetchcmd(ud.basecmd + " submodule update --init --recursive", d, workdir=tmpclonedir)
|
||||
self._set_relative_paths(tmpclonedir)
|
||||
runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*false/bare = true/'", d, workdir=tmpclonedir)
|
||||
os.rename(gitdir, ud.clonedir,)
|
||||
bb.utils.remove(tmpclonedir, True)
|
||||
|
||||
def download(self, ud, d):
|
||||
def download_submodule(ud, url, module, modpath, d):
|
||||
url += ";bareclone=1;nobranch=1"
|
||||
|
||||
# Is the following still needed?
|
||||
#url += ";nocheckout=1"
|
||||
|
||||
try:
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
newfetch.download()
|
||||
except Exception as e:
|
||||
logger.error('gitsm: submodule download failed: %s %s' % (type(e).__name__, str(e)))
|
||||
raise
|
||||
|
||||
Git.download(self, ud, d)
|
||||
self.process_submodules(ud, ud.clonedir, download_submodule, d)
|
||||
|
||||
submodules = self.uses_submodules(ud, d, ud.clonedir)
|
||||
if submodules:
|
||||
self.update_submodules(ud, d)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
def unpack_submodules(ud, url, module, modpath, d):
|
||||
url += ";bareclone=1;nobranch=1"
|
||||
|
||||
# Figure out where we clone over the bare submodules...
|
||||
if ud.bareclone:
|
||||
repo_conf = ud.destdir
|
||||
else:
|
||||
repo_conf = os.path.join(ud.destdir, '.git')
|
||||
|
||||
try:
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
newfetch.unpack(root=os.path.dirname(os.path.join(repo_conf, 'modules', modpath)))
|
||||
except Exception as e:
|
||||
logger.error('gitsm: submodule unpack failed: %s %s' % (type(e).__name__, str(e)))
|
||||
raise
|
||||
|
||||
local_path = newfetch.localpath(url)
|
||||
|
||||
# Correct the submodule references to the local download version...
|
||||
runfetchcmd("%(basecmd)s config submodule.%(module)s.url %(url)s" % {'basecmd': ud.basecmd, 'module': module, 'url' : local_path}, d, workdir=ud.destdir)
|
||||
|
||||
if ud.shallow:
|
||||
runfetchcmd("%(basecmd)s config submodule.%(module)s.shallow true" % {'basecmd': ud.basecmd, 'module': module}, d, workdir=ud.destdir)
|
||||
|
||||
# Ensure the submodule repository is NOT set to bare, since we're checking it out...
|
||||
try:
|
||||
runfetchcmd("%s config core.bare false" % (ud.basecmd), d, quiet=True, workdir=os.path.join(repo_conf, 'modules', modpath))
|
||||
except:
|
||||
logger.error("Unable to set git config core.bare to false for %s" % os.path.join(repo_conf, 'modules', modpath))
|
||||
raise
|
||||
|
||||
Git.unpack(self, ud, destdir, d)
|
||||
|
||||
ret = self.process_submodules(ud, ud.destdir, unpack_submodules, d)
|
||||
|
||||
if not ud.bareclone and ret:
|
||||
# Run submodule update, this sets up the directories -- without touching the config
|
||||
runfetchcmd("%s submodule update --recursive --no-fetch" % (ud.basecmd), d, quiet=True, workdir=ud.destdir)
|
||||
|
||||
submodules = self.uses_submodules(ud, d, ud.destdir)
|
||||
if submodules:
|
||||
runfetchcmd(ud.basecmd + " checkout " + ud.revisions[ud.names[0]], d, workdir=ud.destdir)
|
||||
runfetchcmd(ud.basecmd + " submodule update --init --recursive", d, workdir=ud.destdir)
|
||||
|
||||
@@ -29,6 +29,7 @@ import sys
|
||||
import logging
|
||||
import bb
|
||||
import errno
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import MissingParameterError
|
||||
@@ -66,7 +67,7 @@ class Hg(FetchMethod):
|
||||
else:
|
||||
ud.proto = "hg"
|
||||
|
||||
ud.setup_revisions(d)
|
||||
ud.setup_revisons(d)
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
@@ -76,17 +77,16 @@ class Hg(FetchMethod):
|
||||
# Create paths to mercurial checkouts
|
||||
hgsrcname = '%s_%s_%s' % (ud.module.replace('/', '.'), \
|
||||
ud.host, ud.path.replace('/', '.'))
|
||||
mirrortarball = 'hg_%s.tar.gz' % hgsrcname
|
||||
ud.fullmirror = os.path.join(d.getVar("DL_DIR"), mirrortarball)
|
||||
ud.mirrortarballs = [mirrortarball]
|
||||
ud.mirrortarball = 'hg_%s.tar.gz' % hgsrcname
|
||||
ud.fullmirror = os.path.join(d.getVar("DL_DIR", True), ud.mirrortarball)
|
||||
|
||||
hgdir = d.getVar("HGDIR") or (d.getVar("DL_DIR") + "/hg")
|
||||
hgdir = d.getVar("HGDIR", True) or (d.getVar("DL_DIR", True) + "/hg/")
|
||||
ud.pkgdir = os.path.join(hgdir, hgsrcname)
|
||||
ud.moddir = os.path.join(ud.pkgdir, ud.module)
|
||||
ud.localfile = ud.moddir
|
||||
ud.basecmd = d.getVar("FETCHCMD_hg") or "/usr/bin/env hg"
|
||||
ud.basecmd = data.getVar("FETCHCMD_hg", d, True) or "/usr/bin/env hg"
|
||||
|
||||
ud.write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS")
|
||||
ud.write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS", True)
|
||||
|
||||
def need_update(self, ud, d):
|
||||
revTag = ud.parm.get('rev', 'tip')
|
||||
@@ -99,7 +99,7 @@ class Hg(FetchMethod):
|
||||
def try_premirror(self, ud, d):
|
||||
# If we don't do this, updating an existing checkout with only premirrors
|
||||
# is not possible
|
||||
if d.getVar("BB_FETCH_PREMIRRORONLY") is not None:
|
||||
if d.getVar("BB_FETCH_PREMIRRORONLY", True) is not None:
|
||||
return True
|
||||
if os.path.exists(ud.moddir):
|
||||
return False
|
||||
@@ -221,7 +221,7 @@ class Hg(FetchMethod):
|
||||
"""
|
||||
Compute tip revision for the url
|
||||
"""
|
||||
bb.fetch2.check_network_access(d, self._buildhgcommand(ud, d, "info"), ud.url)
|
||||
bb.fetch2.check_network_access(d, self._buildhgcommand(ud, d, "info"))
|
||||
output = runfetchcmd(self._buildhgcommand(ud, d, "info"), d)
|
||||
return output.strip()
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@ import os
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import bb
|
||||
import bb.utils
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod, FetchError
|
||||
from bb.fetch2 import logger
|
||||
|
||||
@@ -62,11 +63,17 @@ class Local(FetchMethod):
|
||||
newpath = path
|
||||
if path[0] == "/":
|
||||
return [path]
|
||||
filespath = d.getVar('FILESPATH')
|
||||
filespath = data.getVar('FILESPATH', d, True)
|
||||
if filespath:
|
||||
logger.debug(2, "Searching for %s in paths:\n %s" % (path, "\n ".join(filespath.split(":"))))
|
||||
newpath, hist = bb.utils.which(filespath, path, history=True)
|
||||
searched.extend(hist)
|
||||
if not newpath:
|
||||
filesdir = data.getVar('FILESDIR', d, True)
|
||||
if filesdir:
|
||||
logger.debug(2, "Searching for %s in path: %s" % (path, filesdir))
|
||||
newpath = os.path.join(filesdir, path)
|
||||
searched.append(newpath)
|
||||
if (not newpath or not os.path.exists(newpath)) and path.find("*") != -1:
|
||||
# For expressions using '*', best we can do is take the first directory in FILESPATH that exists
|
||||
newpath, hist = bb.utils.which(filespath, ".", history=True)
|
||||
@@ -74,7 +81,7 @@ class Local(FetchMethod):
|
||||
logger.debug(2, "Searching for %s in path: %s" % (path, newpath))
|
||||
return searched
|
||||
if not os.path.exists(newpath):
|
||||
dldirfile = os.path.join(d.getVar("DL_DIR"), path)
|
||||
dldirfile = os.path.join(d.getVar("DL_DIR", True), path)
|
||||
logger.debug(2, "Defaulting to %s for %s" % (dldirfile, path))
|
||||
bb.utils.mkdirhier(os.path.dirname(dldirfile))
|
||||
searched.append(dldirfile)
|
||||
@@ -93,10 +100,13 @@ class Local(FetchMethod):
|
||||
# no need to fetch local files, we'll deal with them in place.
|
||||
if self.supports_checksum(urldata) and not os.path.exists(urldata.localpath):
|
||||
locations = []
|
||||
filespath = d.getVar('FILESPATH')
|
||||
filespath = data.getVar('FILESPATH', d, True)
|
||||
if filespath:
|
||||
locations = filespath.split(":")
|
||||
locations.append(d.getVar("DL_DIR"))
|
||||
filesdir = data.getVar('FILESDIR', d, True)
|
||||
if filesdir:
|
||||
locations.append(filesdir)
|
||||
locations.append(d.getVar("DL_DIR", True))
|
||||
|
||||
msg = "Unable to find file " + urldata.url + " anywhere. The paths that were searched were:\n " + "\n ".join(locations)
|
||||
raise FetchError(msg)
|
||||
|
||||
@@ -25,6 +25,7 @@ import json
|
||||
import subprocess
|
||||
import signal
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import ChecksumError
|
||||
@@ -32,6 +33,7 @@ from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import UnpackError
|
||||
from bb.fetch2 import ParameterError
|
||||
from distutils import spawn
|
||||
|
||||
def subprocess_setup():
|
||||
# Python installs a SIGPIPE handler by default. This is usually not what
|
||||
@@ -78,7 +80,6 @@ class Npm(FetchMethod):
|
||||
if not ud.version:
|
||||
raise ParameterError("NPM fetcher requires a version parameter", ud.url)
|
||||
ud.bbnpmmanifest = "%s-%s.deps.json" % (ud.pkgname, ud.version)
|
||||
ud.bbnpmmanifest = ud.bbnpmmanifest.replace('/', '-')
|
||||
ud.registry = "http://%s" % (ud.url.replace('npm://', '', 1).split(';'))[0]
|
||||
prefixdir = "npm/%s" % ud.pkgname
|
||||
ud.pkgdatadir = d.expand("${DL_DIR}/%s" % prefixdir)
|
||||
@@ -86,14 +87,12 @@ class Npm(FetchMethod):
|
||||
bb.utils.mkdirhier(ud.pkgdatadir)
|
||||
ud.localpath = d.expand("${DL_DIR}/npm/%s" % ud.bbnpmmanifest)
|
||||
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -O -t 2 -T 30 -nv --passive-ftp --no-check-certificate "
|
||||
self.basecmd = d.getVar("FETCHCMD_wget", True) or "/usr/bin/env wget -O -t 2 -T 30 -nv --passive-ftp --no-check-certificate "
|
||||
ud.prefixdir = prefixdir
|
||||
|
||||
ud.write_tarballs = ((d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0") != "0")
|
||||
mirrortarball = 'npm_%s-%s.tar.xz' % (ud.pkgname, ud.version)
|
||||
mirrortarball = mirrortarball.replace('/', '-')
|
||||
ud.fullmirror = os.path.join(d.getVar("DL_DIR"), mirrortarball)
|
||||
ud.mirrortarballs = [mirrortarball]
|
||||
ud.write_tarballs = ((data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True) or "0") != "0")
|
||||
ud.mirrortarball = 'npm_%s-%s.tar.xz' % (ud.pkgname, ud.version)
|
||||
ud.fullmirror = os.path.join(d.getVar("DL_DIR", True), ud.mirrortarball)
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if os.path.exists(ud.localpath):
|
||||
@@ -102,8 +101,8 @@ class Npm(FetchMethod):
|
||||
|
||||
def _runwget(self, ud, d, command, quiet):
|
||||
logger.debug(2, "Fetching %s using command '%s'" % (ud.url, command))
|
||||
bb.fetch2.check_network_access(d, command, ud.url)
|
||||
dldir = d.getVar("DL_DIR")
|
||||
bb.fetch2.check_network_access(d, command)
|
||||
dldir = d.getVar("DL_DIR", True)
|
||||
runfetchcmd(command, d, quiet, workdir=dldir)
|
||||
|
||||
def _unpackdep(self, ud, pkg, data, destdir, dldir, d):
|
||||
@@ -117,7 +116,7 @@ class Npm(FetchMethod):
|
||||
# Change to subdir before executing command
|
||||
if not os.path.exists(destdir):
|
||||
os.makedirs(destdir)
|
||||
path = d.getVar('PATH')
|
||||
path = d.getVar('PATH', True)
|
||||
if path:
|
||||
cmd = "PATH=\"%s\" %s" % (path, cmd)
|
||||
bb.note("Unpacking %s to %s/" % (file, destdir))
|
||||
@@ -133,8 +132,9 @@ class Npm(FetchMethod):
|
||||
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
dldir = d.getVar("DL_DIR")
|
||||
with open("%s/npm/%s" % (dldir, ud.bbnpmmanifest)) as datafile:
|
||||
dldir = d.getVar("DL_DIR", True)
|
||||
depdumpfile = "%s-%s.deps.json" % (ud.pkgname, ud.version)
|
||||
with open("%s/npm/%s" % (dldir, depdumpfile)) as datafile:
|
||||
workobj = json.load(datafile)
|
||||
dldir = "%s/%s" % (os.path.dirname(ud.localpath), ud.pkgname)
|
||||
|
||||
@@ -182,27 +182,19 @@ class Npm(FetchMethod):
|
||||
if pkg_os:
|
||||
if not isinstance(pkg_os, list):
|
||||
pkg_os = [pkg_os]
|
||||
blacklist = False
|
||||
for item in pkg_os:
|
||||
if item.startswith('!'):
|
||||
blacklist = True
|
||||
break
|
||||
if (not blacklist and 'linux' not in pkg_os) or '!linux' in pkg_os:
|
||||
if 'linux' not in pkg_os or '!linux' in pkg_os:
|
||||
logger.debug(2, "Skipping %s since it's incompatible with Linux" % pkg)
|
||||
return
|
||||
#logger.debug(2, "Output URL is %s - %s - %s" % (ud.basepath, ud.basename, ud.localfile))
|
||||
outputurl = pdata['dist']['tarball']
|
||||
data[pkg] = {}
|
||||
data[pkg]['tgz'] = os.path.basename(outputurl)
|
||||
if outputurl in fetchedlist:
|
||||
return
|
||||
|
||||
self._runwget(ud, d, "%s --directory-prefix=%s %s" % (self.basecmd, ud.prefixdir, outputurl), False)
|
||||
fetchedlist.append(outputurl)
|
||||
if not outputurl in fetchedlist:
|
||||
self._runwget(ud, d, "%s --directory-prefix=%s %s" % (self.basecmd, ud.prefixdir, outputurl), False)
|
||||
fetchedlist.append(outputurl)
|
||||
|
||||
dependencies = pdata.get('dependencies', {})
|
||||
optionalDependencies = pdata.get('optionalDependencies', {})
|
||||
dependencies.update(optionalDependencies)
|
||||
depsfound = {}
|
||||
optdepsfound = {}
|
||||
data[pkg]['deps'] = {}
|
||||
@@ -259,32 +251,25 @@ class Npm(FetchMethod):
|
||||
lockdown = {}
|
||||
|
||||
if not os.listdir(ud.pkgdatadir) and os.path.exists(ud.fullmirror):
|
||||
dest = d.getVar("DL_DIR")
|
||||
dest = d.getVar("DL_DIR", True)
|
||||
bb.utils.mkdirhier(dest)
|
||||
runfetchcmd("tar -xJf %s" % (ud.fullmirror), d, workdir=dest)
|
||||
return
|
||||
|
||||
if ud.parm.get("noverify", None) != '1':
|
||||
shwrf = d.getVar('NPM_SHRINKWRAP')
|
||||
logger.debug(2, "NPM shrinkwrap file is %s" % shwrf)
|
||||
if shwrf:
|
||||
try:
|
||||
with open(shwrf) as datafile:
|
||||
shrinkobj = json.load(datafile)
|
||||
except Exception as e:
|
||||
raise FetchError('Error loading NPM_SHRINKWRAP file "%s" for %s: %s' % (shwrf, ud.pkgname, str(e)))
|
||||
elif not ud.ignore_checksums:
|
||||
logger.warning('Missing shrinkwrap file in NPM_SHRINKWRAP for %s, this will lead to unreliable builds!' % ud.pkgname)
|
||||
lckdf = d.getVar('NPM_LOCKDOWN')
|
||||
logger.debug(2, "NPM lockdown file is %s" % lckdf)
|
||||
if lckdf:
|
||||
try:
|
||||
with open(lckdf) as datafile:
|
||||
lockdown = json.load(datafile)
|
||||
except Exception as e:
|
||||
raise FetchError('Error loading NPM_LOCKDOWN file "%s" for %s: %s' % (lckdf, ud.pkgname, str(e)))
|
||||
elif not ud.ignore_checksums:
|
||||
logger.warning('Missing lockdown file in NPM_LOCKDOWN for %s, this will lead to unreproducible builds!' % ud.pkgname)
|
||||
shwrf = d.getVar('NPM_SHRINKWRAP', True)
|
||||
logger.debug(2, "NPM shrinkwrap file is %s" % shwrf)
|
||||
try:
|
||||
with open(shwrf) as datafile:
|
||||
shrinkobj = json.load(datafile)
|
||||
except:
|
||||
logger.warning('Missing shrinkwrap file in NPM_SHRINKWRAP for %s, this will lead to unreliable builds!' % ud.pkgname)
|
||||
lckdf = d.getVar('NPM_LOCKDOWN', True)
|
||||
logger.debug(2, "NPM lockdown file is %s" % lckdf)
|
||||
try:
|
||||
with open(lckdf) as datafile:
|
||||
lockdown = json.load(datafile)
|
||||
except:
|
||||
logger.warning('Missing lockdown file in NPM_LOCKDOWN for %s, this will lead to unreproducible builds!' % ud.pkgname)
|
||||
|
||||
if ('name' not in shrinkobj):
|
||||
self._getdependencies(ud.pkgname, jsondepobj, ud.version, d, ud)
|
||||
@@ -301,7 +286,7 @@ class Npm(FetchMethod):
|
||||
if os.path.islink(ud.fullmirror):
|
||||
os.unlink(ud.fullmirror)
|
||||
|
||||
dldir = d.getVar("DL_DIR")
|
||||
dldir = d.getVar("DL_DIR", True)
|
||||
logger.info("Creating tarball of npm data")
|
||||
runfetchcmd("tar -cJf %s npm/%s npm/%s" % (ud.fullmirror, ud.bbnpmmanifest, ud.pkgname), d,
|
||||
workdir=dldir)
|
||||
|
||||
@@ -10,6 +10,7 @@ import os
|
||||
import sys
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import MissingParameterError
|
||||
@@ -32,9 +33,8 @@ class Osc(FetchMethod):
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
# Create paths to osc checkouts
|
||||
oscdir = d.getVar("OSCDIR") or (d.getVar("DL_DIR") + "/osc")
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(oscdir, ud.host)
|
||||
ud.pkgdir = os.path.join(d.getVar('OSCDIR', True), ud.host)
|
||||
ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module)
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
@@ -47,7 +47,7 @@ class Osc(FetchMethod):
|
||||
else:
|
||||
ud.revision = ""
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision))
|
||||
ud.localfile = data.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision), d)
|
||||
|
||||
def _buildosccommand(self, ud, d, command):
|
||||
"""
|
||||
@@ -55,7 +55,7 @@ class Osc(FetchMethod):
|
||||
command is "fetch", "update", "info"
|
||||
"""
|
||||
|
||||
basecmd = d.getVar("FETCHCMD_osc") or "/usr/bin/env osc"
|
||||
basecmd = data.expand('${FETCHCMD_osc}', d)
|
||||
|
||||
proto = ud.parm.get('protocol', 'ocs')
|
||||
|
||||
@@ -84,7 +84,7 @@ class Osc(FetchMethod):
|
||||
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
if os.access(os.path.join(d.getVar('OSCDIR'), ud.path, ud.module), os.R_OK):
|
||||
if os.access(os.path.join(d.getVar('OSCDIR', True), ud.path, ud.module), os.R_OK):
|
||||
oscupdatecmd = self._buildosccommand(ud, d, "update")
|
||||
logger.info("Update "+ ud.url)
|
||||
# update sources there
|
||||
@@ -112,7 +112,7 @@ class Osc(FetchMethod):
|
||||
Generate a .oscrc to be used for this run.
|
||||
"""
|
||||
|
||||
config_path = os.path.join(d.getVar('OSCDIR'), "oscrc")
|
||||
config_path = os.path.join(d.getVar('OSCDIR', True), "oscrc")
|
||||
if (os.path.exists(config_path)):
|
||||
os.remove(config_path)
|
||||
|
||||
@@ -121,8 +121,8 @@ class Osc(FetchMethod):
|
||||
f.write("apisrv = %s\n" % ud.host)
|
||||
f.write("scheme = http\n")
|
||||
f.write("su-wrapper = su -c\n")
|
||||
f.write("build-root = %s\n" % d.getVar('WORKDIR'))
|
||||
f.write("urllist = %s\n" % d.getVar("OSCURLLIST"))
|
||||
f.write("build-root = %s\n" % d.getVar('WORKDIR', True))
|
||||
f.write("urllist = %s\n" % d.getVar("OSCURLLIST", True))
|
||||
f.write("extra-pkgs = gzip\n")
|
||||
f.write("\n")
|
||||
f.write("[%s]\n" % ud.host)
|
||||
|
||||
@@ -26,6 +26,7 @@ BitBake 'Fetch' implementation for perforce
|
||||
import os
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
@@ -43,9 +44,13 @@ class Perforce(FetchMethod):
|
||||
provided by the env, use it. If P4PORT is specified by the recipe, use
|
||||
its values, which may override the settings in P4CONFIG.
|
||||
"""
|
||||
ud.basecmd = d.getVar("FETCHCMD_p4") or "/usr/bin/env p4"
|
||||
ud.basecmd = d.getVar('FETCHCMD_p4', True)
|
||||
if not ud.basecmd:
|
||||
ud.basecmd = "/usr/bin/env p4"
|
||||
|
||||
ud.dldir = d.getVar("P4DIR") or (d.getVar("DL_DIR") + "/p4")
|
||||
ud.dldir = d.getVar('P4DIR', True)
|
||||
if not ud.dldir:
|
||||
ud.dldir = '%s/%s' % (d.getVar('DL_DIR', True), 'p4')
|
||||
|
||||
path = ud.url.split('://')[1]
|
||||
path = path.split(';')[0]
|
||||
@@ -57,7 +62,7 @@ class Perforce(FetchMethod):
|
||||
ud.path = path
|
||||
|
||||
ud.usingp4config = False
|
||||
p4port = d.getVar('P4PORT')
|
||||
p4port = d.getVar('P4PORT', True)
|
||||
|
||||
if p4port:
|
||||
logger.debug(1, 'Using recipe provided P4PORT: %s' % p4port)
|
||||
@@ -66,7 +71,7 @@ class Perforce(FetchMethod):
|
||||
logger.debug(1, 'Trying to use P4CONFIG to automatically set P4PORT...')
|
||||
ud.usingp4config = True
|
||||
p4cmd = '%s info | grep "Server address"' % ud.basecmd
|
||||
bb.fetch2.check_network_access(d, p4cmd, ud.url)
|
||||
bb.fetch2.check_network_access(d, p4cmd)
|
||||
ud.host = runfetchcmd(p4cmd, d, True)
|
||||
ud.host = ud.host.split(': ')[1].strip()
|
||||
logger.debug(1, 'Determined P4PORT to be: %s' % ud.host)
|
||||
@@ -82,9 +87,9 @@ class Perforce(FetchMethod):
|
||||
cleanedhost = ud.host.replace(':', '.')
|
||||
ud.pkgdir = os.path.join(ud.dldir, cleanedhost, cleanedpath)
|
||||
|
||||
ud.setup_revisions(d)
|
||||
ud.setup_revisons(d)
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s.tar.gz' % (cleanedhost, cleanedpath, ud.revision))
|
||||
ud.localfile = data.expand('%s_%s_%s.tar.gz' % (cleanedhost, cleanedpath, ud.revision), d)
|
||||
|
||||
def _buildp4command(self, ud, d, command, depot_filename=None):
|
||||
"""
|
||||
@@ -135,7 +140,7 @@ class Perforce(FetchMethod):
|
||||
'p4 files' command, including trailing '#rev' file revision indicator
|
||||
"""
|
||||
p4cmd = self._buildp4command(ud, d, 'files')
|
||||
bb.fetch2.check_network_access(d, p4cmd, ud.url)
|
||||
bb.fetch2.check_network_access(d, p4cmd)
|
||||
p4fileslist = runfetchcmd(p4cmd, d, True)
|
||||
p4fileslist = [f.rstrip() for f in p4fileslist.splitlines()]
|
||||
|
||||
@@ -166,7 +171,7 @@ class Perforce(FetchMethod):
|
||||
|
||||
for afile in filelist:
|
||||
p4fetchcmd = self._buildp4command(ud, d, 'print', afile)
|
||||
bb.fetch2.check_network_access(d, p4fetchcmd, ud.url)
|
||||
bb.fetch2.check_network_access(d, p4fetchcmd)
|
||||
runfetchcmd(p4fetchcmd, d, workdir=ud.pkgdir)
|
||||
|
||||
runfetchcmd('tar -czf %s p4' % (ud.localpath), d, cleanup=[ud.localpath], workdir=ud.pkgdir)
|
||||
@@ -186,7 +191,7 @@ class Perforce(FetchMethod):
|
||||
def _latest_revision(self, ud, d, name):
|
||||
""" Return the latest upstream scm revision number """
|
||||
p4cmd = self._buildp4command(ud, d, "changes")
|
||||
bb.fetch2.check_network_access(d, p4cmd, ud.url)
|
||||
bb.fetch2.check_network_access(d, p4cmd)
|
||||
tip = runfetchcmd(p4cmd, d, True)
|
||||
|
||||
if not tip:
|
||||
|
||||
@@ -25,9 +25,9 @@ BitBake "Fetch" repo (git) implementation
|
||||
|
||||
import os
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
|
||||
class Repo(FetchMethod):
|
||||
"""Class to fetch a module or modules from repo (git) repositories"""
|
||||
@@ -45,25 +45,23 @@ class Repo(FetchMethod):
|
||||
"master".
|
||||
"""
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_repo") or "/usr/bin/env repo"
|
||||
|
||||
ud.proto = ud.parm.get('protocol', 'git')
|
||||
ud.branch = ud.parm.get('branch', 'master')
|
||||
ud.manifest = ud.parm.get('manifest', 'default.xml')
|
||||
if not ud.manifest.endswith('.xml'):
|
||||
ud.manifest += '.xml'
|
||||
|
||||
ud.localfile = d.expand("repo_%s%s_%s_%s.tar.gz" % (ud.host, ud.path.replace("/", "."), ud.manifest, ud.branch))
|
||||
ud.localfile = data.expand("repo_%s%s_%s_%s.tar.gz" % (ud.host, ud.path.replace("/", "."), ud.manifest, ud.branch), d)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
if os.access(os.path.join(d.getVar("DL_DIR"), ud.localfile), os.R_OK):
|
||||
if os.access(os.path.join(data.getVar("DL_DIR", d, True), ud.localfile), os.R_OK):
|
||||
logger.debug(1, "%s already exists (or was stashed). Skipping repo init / sync.", ud.localpath)
|
||||
return
|
||||
|
||||
repodir = d.getVar("REPODIR") or (d.getVar("DL_DIR") + "/repo")
|
||||
gitsrcname = "%s%s" % (ud.host, ud.path.replace("/", "."))
|
||||
repodir = data.getVar("REPODIR", d, True) or os.path.join(data.getVar("DL_DIR", d, True), "repo")
|
||||
codir = os.path.join(repodir, gitsrcname, ud.manifest)
|
||||
|
||||
if ud.user:
|
||||
@@ -74,11 +72,11 @@ class Repo(FetchMethod):
|
||||
repodir = os.path.join(codir, "repo")
|
||||
bb.utils.mkdirhier(repodir)
|
||||
if not os.path.exists(os.path.join(repodir, ".repo")):
|
||||
bb.fetch2.check_network_access(d, "%s init -m %s -b %s -u %s://%s%s%s" % (ud.basecmd, ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), ud.url)
|
||||
runfetchcmd("%s init -m %s -b %s -u %s://%s%s%s" % (ud.basecmd, ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), d, workdir=repodir)
|
||||
bb.fetch2.check_network_access(d, "repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), ud.url)
|
||||
runfetchcmd("repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), d, workdir=repodir)
|
||||
|
||||
bb.fetch2.check_network_access(d, "%s sync %s" % (ud.basecmd, ud.url), ud.url)
|
||||
runfetchcmd("%s sync" % ud.basecmd, d, workdir=repodir)
|
||||
bb.fetch2.check_network_access(d, "repo sync %s" % ud.url, ud.url)
|
||||
runfetchcmd("repo sync", d, workdir=repodir)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
|
||||
@@ -1,98 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'Fetch' implementation for Amazon AWS S3.
|
||||
|
||||
Class for fetching files from Amazon S3 using the AWS Command Line Interface.
|
||||
The aws tool must be correctly installed and configured prior to use.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2017, Andre McCurdy <armccurdy@gmail.com>
|
||||
#
|
||||
# Based in part on bb.fetch2.wget:
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import bb
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
class S3(FetchMethod):
|
||||
"""Class to fetch urls via 'aws s3'"""
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with s3.
|
||||
"""
|
||||
return ud.type in ['s3']
|
||||
|
||||
def recommends_checksum(self, urldata):
|
||||
return True
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
if 'downloadfilename' in ud.parm:
|
||||
ud.basename = ud.parm['downloadfilename']
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_s3") or "/usr/bin/env aws s3"
|
||||
|
||||
def download(self, ud, d):
|
||||
"""
|
||||
Fetch urls
|
||||
Assumes localpath was called first
|
||||
"""
|
||||
|
||||
cmd = '%s cp s3://%s%s %s' % (ud.basecmd, ud.host, ud.path, ud.localpath)
|
||||
bb.fetch2.check_network_access(d, cmd, ud.url)
|
||||
runfetchcmd(cmd, d)
|
||||
|
||||
# Additional sanity checks copied from the wget class (although there
|
||||
# are no known issues which mean these are required, treat the aws cli
|
||||
# tool with a little healthy suspicion).
|
||||
|
||||
if not os.path.exists(ud.localpath):
|
||||
raise FetchError("The aws cp command returned success for s3://%s%s but %s doesn't exist?!" % (ud.host, ud.path, ud.localpath))
|
||||
|
||||
if os.path.getsize(ud.localpath) == 0:
|
||||
os.remove(ud.localpath)
|
||||
raise FetchError("The aws cp command for s3://%s%s resulted in a zero size file?! Deleting and failing since this isn't right." % (ud.host, ud.path))
|
||||
|
||||
return True
|
||||
|
||||
def checkstatus(self, fetch, ud, d):
|
||||
"""
|
||||
Check the status of a URL
|
||||
"""
|
||||
|
||||
cmd = '%s ls s3://%s%s' % (ud.basecmd, ud.host, ud.path)
|
||||
bb.fetch2.check_network_access(d, cmd, ud.url)
|
||||
output = runfetchcmd(cmd, d)
|
||||
|
||||
# "aws s3 ls s3://mybucket/foo" will exit with success even if the file
|
||||
# is not found, so check output of the command to confirm success.
|
||||
|
||||
if not output:
|
||||
raise FetchError("The aws ls command for s3://%s%s gave empty output" % (ud.host, ud.path))
|
||||
|
||||
return True
|
||||
@@ -62,10 +62,12 @@ SRC_URI = "sftp://user@host.example.com/dir/path.file.txt"
|
||||
import os
|
||||
import bb
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
from bb import data
|
||||
from bb.fetch2 import URI
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
|
||||
class SFTP(FetchMethod):
|
||||
"""Class to fetch urls via 'sftp'"""
|
||||
|
||||
@@ -90,7 +92,7 @@ class SFTP(FetchMethod):
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
ud.localfile = data.expand(urllib.parse.unquote(ud.basename), d)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""Fetch urls"""
|
||||
@@ -102,7 +104,7 @@ class SFTP(FetchMethod):
|
||||
port = '-P %d' % urlo.port
|
||||
urlo.port = None
|
||||
|
||||
dldir = d.getVar('DL_DIR')
|
||||
dldir = data.getVar('DL_DIR', d, True)
|
||||
lpath = os.path.join(dldir, ud.localfile)
|
||||
|
||||
user = ''
|
||||
|
||||
@@ -43,6 +43,7 @@ IETF secsh internet draft:
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import re, os
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
@@ -86,11 +87,11 @@ class SSH(FetchMethod):
|
||||
m = __pattern__.match(urldata.url)
|
||||
path = m.group('path')
|
||||
host = m.group('host')
|
||||
urldata.localpath = os.path.join(d.getVar('DL_DIR'),
|
||||
urldata.localpath = os.path.join(d.getVar('DL_DIR', True),
|
||||
os.path.basename(os.path.normpath(path)))
|
||||
|
||||
def download(self, urldata, d):
|
||||
dldir = d.getVar('DL_DIR')
|
||||
dldir = d.getVar('DL_DIR', True)
|
||||
|
||||
m = __pattern__.match(urldata.url)
|
||||
path = m.group('path')
|
||||
|
||||
@@ -28,6 +28,7 @@ import sys
|
||||
import logging
|
||||
import bb
|
||||
import re
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import MissingParameterError
|
||||
@@ -49,7 +50,7 @@ class Svn(FetchMethod):
|
||||
if not "module" in ud.parm:
|
||||
raise MissingParameterError('module', ud.url)
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_svn") or "/usr/bin/env svn --non-interactive --trust-server-cert"
|
||||
ud.basecmd = d.getVar('FETCHCMD_svn', True)
|
||||
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
@@ -59,20 +60,16 @@ class Svn(FetchMethod):
|
||||
ud.path_spec = ud.parm["path_spec"]
|
||||
|
||||
# Create paths to svn checkouts
|
||||
svndir = d.getVar("SVNDIR") or (d.getVar("DL_DIR") + "/svn")
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(svndir, ud.host, relpath)
|
||||
ud.pkgdir = os.path.join(data.expand('${SVNDIR}', d), ud.host, relpath)
|
||||
ud.moddir = os.path.join(ud.pkgdir, ud.module)
|
||||
# Protects the repository from concurrent updates, e.g. from two
|
||||
# recipes fetching different revisions at the same time
|
||||
ud.svnlock = os.path.join(ud.pkgdir, "svn.lock")
|
||||
|
||||
ud.setup_revisions(d)
|
||||
ud.setup_revisons(d)
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s_%s_.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision))
|
||||
ud.localfile = data.expand('%s_%s_%s_%s_.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision), d)
|
||||
|
||||
def _buildsvncommand(self, ud, d, command):
|
||||
"""
|
||||
@@ -82,9 +79,9 @@ class Svn(FetchMethod):
|
||||
|
||||
proto = ud.parm.get('protocol', 'svn')
|
||||
|
||||
svn_ssh = None
|
||||
if proto == "svn+ssh" and "ssh" in ud.parm:
|
||||
svn_ssh = ud.parm["ssh"]
|
||||
svn_rsh = None
|
||||
if proto == "svn+ssh" and "rsh" in ud.parm:
|
||||
svn_rsh = ud.parm["rsh"]
|
||||
|
||||
svnroot = ud.host + ud.path
|
||||
|
||||
@@ -116,8 +113,8 @@ class Svn(FetchMethod):
|
||||
else:
|
||||
raise FetchError("Invalid svn command %s" % command, ud.url)
|
||||
|
||||
if svn_ssh:
|
||||
svncmd = "SVN_SSH=\"%s\" %s" % (svn_ssh, svncmd)
|
||||
if svn_rsh:
|
||||
svncmd = "svn_RSH=\"%s\" %s" % (svn_rsh, svncmd)
|
||||
|
||||
return svncmd
|
||||
|
||||
@@ -126,40 +123,35 @@ class Svn(FetchMethod):
|
||||
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
lf = bb.utils.lockfile(ud.svnlock)
|
||||
if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK):
|
||||
svnupdatecmd = self._buildsvncommand(ud, d, "update")
|
||||
logger.info("Update " + ud.url)
|
||||
# We need to attempt to run svn upgrade first in case its an older working format
|
||||
try:
|
||||
runfetchcmd(ud.basecmd + " upgrade", d, workdir=ud.moddir)
|
||||
except FetchError:
|
||||
pass
|
||||
logger.debug(1, "Running %s", svnupdatecmd)
|
||||
bb.fetch2.check_network_access(d, svnupdatecmd, ud.url)
|
||||
runfetchcmd(svnupdatecmd, d, workdir=ud.moddir)
|
||||
else:
|
||||
svnfetchcmd = self._buildsvncommand(ud, d, "fetch")
|
||||
logger.info("Fetch " + ud.url)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", svnfetchcmd)
|
||||
bb.fetch2.check_network_access(d, svnfetchcmd, ud.url)
|
||||
runfetchcmd(svnfetchcmd, d, workdir=ud.pkgdir)
|
||||
|
||||
try:
|
||||
if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK):
|
||||
svnupdatecmd = self._buildsvncommand(ud, d, "update")
|
||||
logger.info("Update " + ud.url)
|
||||
# We need to attempt to run svn upgrade first in case its an older working format
|
||||
try:
|
||||
runfetchcmd(ud.basecmd + " upgrade", d, workdir=ud.moddir)
|
||||
except FetchError:
|
||||
pass
|
||||
logger.debug(1, "Running %s", svnupdatecmd)
|
||||
bb.fetch2.check_network_access(d, svnupdatecmd, ud.url)
|
||||
runfetchcmd(svnupdatecmd, d, workdir=ud.moddir)
|
||||
else:
|
||||
svnfetchcmd = self._buildsvncommand(ud, d, "fetch")
|
||||
logger.info("Fetch " + ud.url)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", svnfetchcmd)
|
||||
bb.fetch2.check_network_access(d, svnfetchcmd, ud.url)
|
||||
runfetchcmd(svnfetchcmd, d, workdir=ud.pkgdir)
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude='.svn'"
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude='.svn'"
|
||||
|
||||
# tar them up to a defined filename
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.path_spec), d,
|
||||
cleanup=[ud.localpath], workdir=ud.pkgdir)
|
||||
finally:
|
||||
bb.utils.unlockfile(lf)
|
||||
# tar them up to a defined filename
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.path_spec), d,
|
||||
cleanup=[ud.localpath], workdir=ud.pkgdir)
|
||||
|
||||
def clean(self, ud, d):
|
||||
""" Clean SVN specific files and dirs """
|
||||
@@ -181,7 +173,7 @@ class Svn(FetchMethod):
|
||||
"""
|
||||
Return the latest upstream revision number
|
||||
"""
|
||||
bb.fetch2.check_network_access(d, self._buildsvncommand(ud, d, "log1"), ud.url)
|
||||
bb.fetch2.check_network_access(d, self._buildsvncommand(ud, d, "log1"))
|
||||
|
||||
output = runfetchcmd("LANG=C LC_ALL=C " + self._buildsvncommand(ud, d, "log1"), d, True)
|
||||
|
||||
|
||||
@@ -30,10 +30,10 @@ import tempfile
|
||||
import subprocess
|
||||
import os
|
||||
import logging
|
||||
import errno
|
||||
import bb
|
||||
import bb.progress
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
@@ -84,19 +84,19 @@ class Wget(FetchMethod):
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
ud.localfile = data.expand(urllib.parse.unquote(ud.basename), d)
|
||||
if not ud.localfile:
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.host + ud.path).replace("/", "."))
|
||||
ud.localfile = data.expand(urllib.parse.unquote(ud.host + ud.path).replace("/", "."), d)
|
||||
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 30 --passive-ftp --no-check-certificate"
|
||||
self.basecmd = d.getVar("FETCHCMD_wget", True) or "/usr/bin/env wget -t 2 -T 30 --passive-ftp --no-check-certificate"
|
||||
|
||||
def _runwget(self, ud, d, command, quiet, workdir=None):
|
||||
def _runwget(self, ud, d, command, quiet):
|
||||
|
||||
progresshandler = WgetProgressHandler(d)
|
||||
|
||||
logger.debug(2, "Fetching %s using command '%s'" % (ud.url, command))
|
||||
bb.fetch2.check_network_access(d, command, ud.url)
|
||||
runfetchcmd(command + ' --progress=dot -v', d, quiet, log=progresshandler, workdir=workdir)
|
||||
bb.fetch2.check_network_access(d, command)
|
||||
runfetchcmd(command + ' --progress=dot -v', d, quiet, log=progresshandler)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""Fetch urls"""
|
||||
@@ -104,7 +104,7 @@ class Wget(FetchMethod):
|
||||
fetchcmd = self.basecmd
|
||||
|
||||
if 'downloadfilename' in ud.parm:
|
||||
dldir = d.getVar("DL_DIR")
|
||||
dldir = d.getVar("DL_DIR", True)
|
||||
bb.utils.mkdirhier(os.path.dirname(dldir + os.sep + ud.localfile))
|
||||
fetchcmd += " -O " + dldir + os.sep + ud.localfile
|
||||
|
||||
@@ -207,21 +207,8 @@ class Wget(FetchMethod):
|
||||
h.request(req.get_method(), req.selector, req.data, headers)
|
||||
except socket.error as err: # XXX what error?
|
||||
# Don't close connection when cache is enabled.
|
||||
# Instead, try to detect connections that are no longer
|
||||
# usable (for example, closed unexpectedly) and remove
|
||||
# them from the cache.
|
||||
if fetch.connection_cache is None:
|
||||
h.close()
|
||||
elif isinstance(err, OSError) and err.errno == errno.EBADF:
|
||||
# This happens when the server closes the connection despite the Keep-Alive.
|
||||
# Apparently urllib then uses the file descriptor, expecting it to be
|
||||
# connected, when in reality the connection is already gone.
|
||||
# We let the request fail and expect it to be
|
||||
# tried once more ("try_again" in check_status()),
|
||||
# with the dead connection removed from the cache.
|
||||
# If it still fails, we give up, which can happend for bad
|
||||
# HTTP proxy settings.
|
||||
fetch.connection_cache.remove_connection(h.host, h.port)
|
||||
raise urllib.error.URLError(err)
|
||||
else:
|
||||
try:
|
||||
@@ -250,7 +237,6 @@ class Wget(FetchMethod):
|
||||
return ""
|
||||
def close(self):
|
||||
pass
|
||||
closed = False
|
||||
|
||||
resp = addinfourl(fp_dummy(), r.msg, req.get_full_url())
|
||||
resp.code = r.status
|
||||
@@ -284,6 +270,11 @@ class Wget(FetchMethod):
|
||||
"""
|
||||
http_error_403 = http_error_405
|
||||
|
||||
"""
|
||||
Some servers (e.g. FusionForge) returns 406 Not Acceptable when they
|
||||
actually mean 405 Method Not Allowed.
|
||||
"""
|
||||
http_error_406 = http_error_405
|
||||
|
||||
class FixedHTTPRedirectHandler(urllib.request.HTTPRedirectHandler):
|
||||
"""
|
||||
@@ -312,29 +303,14 @@ class Wget(FetchMethod):
|
||||
uri = ud.url.split(";")[0]
|
||||
r = urllib.request.Request(uri)
|
||||
r.get_method = lambda: "HEAD"
|
||||
# Some servers (FusionForge, as used on Alioth) require that the
|
||||
# optional Accept header is set.
|
||||
r.add_header("Accept", "*/*")
|
||||
def add_basic_auth(login_str, request):
|
||||
'''Adds Basic auth to http request, pass in login:password as string'''
|
||||
|
||||
if ud.user:
|
||||
import base64
|
||||
encodeuser = base64.b64encode(login_str.encode('utf-8')).decode("utf-8")
|
||||
encodeuser = base64.b64encode(ud.user.encode('utf-8')).decode("utf-8")
|
||||
authheader = "Basic %s" % encodeuser
|
||||
r.add_header("Authorization", authheader)
|
||||
|
||||
if ud.user:
|
||||
add_basic_auth(ud.user, r)
|
||||
|
||||
try:
|
||||
import netrc, urllib.parse
|
||||
n = netrc.netrc()
|
||||
login, unused, password = n.authenticators(urllib.parse.urlparse(uri).hostname)
|
||||
add_basic_auth("%s:%s" % (login, password), r)
|
||||
except (TypeError, ImportError, IOError, netrc.NetrcParseError):
|
||||
pass
|
||||
|
||||
with opener.open(r) as response:
|
||||
pass
|
||||
opener.open(r)
|
||||
except urllib.error.URLError as e:
|
||||
if try_again:
|
||||
logger.debug(2, "checkstatus: trying again")
|
||||
@@ -421,16 +397,17 @@ class Wget(FetchMethod):
|
||||
Run fetch checkstatus to get directory information
|
||||
"""
|
||||
f = tempfile.NamedTemporaryFile()
|
||||
with tempfile.TemporaryDirectory(prefix="wget-index-") as workdir, tempfile.NamedTemporaryFile(dir=workdir, prefix="wget-listing-") as f:
|
||||
agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12"
|
||||
fetchcmd = self.basecmd
|
||||
fetchcmd += " -O " + f.name + " --user-agent='" + agent + "' '" + uri + "'"
|
||||
try:
|
||||
self._runwget(ud, d, fetchcmd, True, workdir=workdir)
|
||||
fetchresult = f.read()
|
||||
except bb.fetch2.BBFetchException:
|
||||
fetchresult = ""
|
||||
|
||||
agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12"
|
||||
fetchcmd = self.basecmd
|
||||
fetchcmd += " -O " + f.name + " --user-agent='" + agent + "' '" + uri + "'"
|
||||
try:
|
||||
self._runwget(ud, d, fetchcmd, True)
|
||||
fetchresult = f.read()
|
||||
except bb.fetch2.BBFetchException:
|
||||
fetchresult = ""
|
||||
|
||||
f.close()
|
||||
return fetchresult
|
||||
|
||||
def _check_latest_version(self, url, package, package_regex, current_version, ud, d):
|
||||
@@ -557,7 +534,7 @@ class Wget(FetchMethod):
|
||||
|
||||
# src.rpm extension was added only for rpm package. Can be removed if the rpm
|
||||
# packaged will always be considered as having to be manually upgraded
|
||||
psuffix_regex = "(tar\.gz|tgz|tar\.bz2|zip|xz|tar\.lz|rpm|bz2|orig\.tar\.gz|tar\.xz|src\.tar\.gz|src\.tgz|svnr\d+\.tar\.bz2|stable\.tar\.gz|src\.rpm)"
|
||||
psuffix_regex = "(tar\.gz|tgz|tar\.bz2|zip|xz|rpm|bz2|orig\.tar\.gz|tar\.xz|src\.tar\.gz|src\.tgz|svnr\d+\.tar\.bz2|stable\.tar\.gz|src\.rpm)"
|
||||
|
||||
# match name, version and archive type of a package
|
||||
package_regex_comp = re.compile("(?P<name>%s?\.?v?)(?P<pver>%s)(?P<arch>%s)?[\.-](?P<type>%s$)"
|
||||
@@ -565,7 +542,7 @@ class Wget(FetchMethod):
|
||||
self.suffix_regex_comp = re.compile(psuffix_regex)
|
||||
|
||||
# compile regex, can be specific by package or generic regex
|
||||
pn_regex = d.getVar('UPSTREAM_CHECK_REGEX')
|
||||
pn_regex = d.getVar('UPSTREAM_CHECK_REGEX', True)
|
||||
if pn_regex:
|
||||
package_custom_regex_comp = re.compile(pn_regex)
|
||||
else:
|
||||
@@ -586,7 +563,7 @@ class Wget(FetchMethod):
|
||||
sanity check to ensure same name and type.
|
||||
"""
|
||||
package = ud.path.split("/")[-1]
|
||||
current_version = ['', d.getVar('PV'), '']
|
||||
current_version = ['', d.getVar('PV', True), '']
|
||||
|
||||
"""possible to have no version in pkg name, such as spectrum-fw"""
|
||||
if not re.search("\d+", package):
|
||||
@@ -601,7 +578,7 @@ class Wget(FetchMethod):
|
||||
bb.debug(3, "latest_versionstring, regex: %s" % (package_regex.pattern))
|
||||
|
||||
uri = ""
|
||||
regex_uri = d.getVar("UPSTREAM_CHECK_URI")
|
||||
regex_uri = d.getVar("UPSTREAM_CHECK_URI", True)
|
||||
if not regex_uri:
|
||||
path = ud.path.split(package)[0]
|
||||
|
||||
@@ -610,7 +587,7 @@ class Wget(FetchMethod):
|
||||
dirver_regex = re.compile("(?P<dirver>[^/]*(\d+\.)*\d+([-_]r\d+)*)/")
|
||||
m = dirver_regex.search(path)
|
||||
if m:
|
||||
pn = d.getVar('PN')
|
||||
pn = d.getVar('PN', True)
|
||||
dirver = m.group('dirver')
|
||||
|
||||
dirver_pn_regex = re.compile("%s\d?" % (re.escape(pn)))
|
||||
|
||||
@@ -28,8 +28,6 @@ import logging
|
||||
import optparse
|
||||
import warnings
|
||||
import fcntl
|
||||
import time
|
||||
import traceback
|
||||
|
||||
import bb
|
||||
from bb import event
|
||||
@@ -39,17 +37,11 @@ from bb import ui
|
||||
from bb import server
|
||||
from bb import cookerdata
|
||||
|
||||
import bb.server.process
|
||||
import bb.server.xmlrpcclient
|
||||
|
||||
logger = logging.getLogger("BitBake")
|
||||
|
||||
class BBMainException(Exception):
|
||||
pass
|
||||
|
||||
class BBMainFatal(bb.BBHandledException):
|
||||
pass
|
||||
|
||||
def present_options(optionlist):
|
||||
if len(optionlist) > 1:
|
||||
return ' or '.join([', '.join(optionlist[:-1]), optionlist[-1]])
|
||||
@@ -66,6 +58,9 @@ class BitbakeHelpFormatter(optparse.IndentedHelpFormatter):
|
||||
if option.dest == 'ui':
|
||||
valid_uis = list_extension_modules(bb.ui, 'main')
|
||||
option.help = option.help.replace('@CHOICES@', present_options(valid_uis))
|
||||
elif option.dest == 'servertype':
|
||||
valid_server_types = list_extension_modules(bb.server, 'BitBakeServer')
|
||||
option.help = option.help.replace('@CHOICES@', present_options(valid_server_types))
|
||||
|
||||
return optparse.IndentedHelpFormatter.format_option(self, option)
|
||||
|
||||
@@ -153,6 +148,11 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
"failed and anything depending on it cannot be built, as much as "
|
||||
"possible will be built before stopping.")
|
||||
|
||||
parser.add_option("-a", "--tryaltconfigs", action="store_true",
|
||||
dest="tryaltconfigs", default=False,
|
||||
help="Continue with builds by trying to use alternative providers "
|
||||
"where possible.")
|
||||
|
||||
parser.add_option("-f", "--force", action="store_true", dest="force", default=False,
|
||||
help="Force the specified targets/task to run (invalidating any "
|
||||
"existing stamp file).")
|
||||
@@ -174,24 +174,13 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
help="Read the specified file after bitbake.conf.")
|
||||
|
||||
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
|
||||
help="Enable tracing of shell tasks (with 'set -x'). "
|
||||
"Also print bb.note(...) messages to stdout (in "
|
||||
"addition to writing them to ${T}/log.do_<task>).")
|
||||
help="Output more log message data to the terminal.")
|
||||
|
||||
parser.add_option("-D", "--debug", action="count", dest="debug", default=0,
|
||||
help="Increase the debug level. You can specify this "
|
||||
"more than once. -D sets the debug level to 1, "
|
||||
"where only bb.debug(1, ...) messages are printed "
|
||||
"to stdout; -DD sets the debug level to 2, where "
|
||||
"both bb.debug(1, ...) and bb.debug(2, ...) "
|
||||
"messages are printed; etc. Without -D, no debug "
|
||||
"messages are printed. Note that -D only affects "
|
||||
"output to stdout. All debug messages are written "
|
||||
"to ${T}/log.do_taskname, regardless of the debug "
|
||||
"level.")
|
||||
help="Increase the debug level. You can specify this more than once.")
|
||||
|
||||
parser.add_option("-q", "--quiet", action="count", dest="quiet", default=0,
|
||||
help="Output less log message data to the terminal. You can specify this more than once.")
|
||||
parser.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False,
|
||||
help="Output less log message data to the terminal.")
|
||||
|
||||
parser.add_option("-n", "--dry-run", action="store_true", dest="dry_run", default=False,
|
||||
help="Don't execute, just go through the motions.")
|
||||
@@ -238,6 +227,11 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
default=os.environ.get('BITBAKE_UI', 'knotty'),
|
||||
help="The user interface to use (@CHOICES@ - default %default).")
|
||||
|
||||
# @CHOICES@ is substituted out by BitbakeHelpFormatter above
|
||||
parser.add_option("-t", "--servertype", action="store", dest="servertype",
|
||||
default=["process", "xmlrpc"]["BBSERVER" in os.environ],
|
||||
help="Choose which server type to use (@CHOICES@ - default %default).")
|
||||
|
||||
parser.add_option("", "--token", action="store", dest="xmlrpctoken",
|
||||
default=os.environ.get("BBTOKEN"),
|
||||
help="Specify the connection token to be used when connecting "
|
||||
@@ -253,14 +247,15 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
help="Run bitbake without a UI, only starting a server "
|
||||
"(cooker) process.")
|
||||
|
||||
parser.add_option("-B", "--bind", action="store", dest="bind", default=False,
|
||||
help="The name/address for the bitbake xmlrpc server to bind to.")
|
||||
parser.add_option("", "--foreground", action="store_true",
|
||||
help="Run bitbake server in foreground.")
|
||||
|
||||
parser.add_option("-T", "--idle-timeout", type=float, dest="server_timeout",
|
||||
default=os.getenv("BB_SERVER_TIMEOUT"),
|
||||
help="Set timeout to unload bitbake server due to inactivity, "
|
||||
"set to -1 means no unload, "
|
||||
"default: Environment variable BB_SERVER_TIMEOUT.")
|
||||
parser.add_option("-B", "--bind", action="store", dest="bind", default=False,
|
||||
help="The name/address for the bitbake server to bind to.")
|
||||
|
||||
parser.add_option("-T", "--idle-timeout", type=int,
|
||||
default=int(os.environ.get("BBTIMEOUT", "0")),
|
||||
help="Set timeout to unload bitbake server due to inactivity")
|
||||
|
||||
parser.add_option("", "--no-setscene", action="store_true",
|
||||
dest="nosetscene", default=False,
|
||||
@@ -277,7 +272,7 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
|
||||
parser.add_option("-m", "--kill-server", action="store_true",
|
||||
dest="kill_server", default=False,
|
||||
help="Terminate any running bitbake server.")
|
||||
help="Terminate the remote server.")
|
||||
|
||||
parser.add_option("", "--observe-only", action="store_true",
|
||||
dest="observe_only", default=False,
|
||||
@@ -292,13 +287,6 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
help="Writes the event log of the build to a bitbake event json file. "
|
||||
"Use '' (empty string) to assign the name automatically.")
|
||||
|
||||
parser.add_option("", "--runall", action="append", dest="runall",
|
||||
help="Run the specified task for any recipe in the taskgraph of the specified target (even if it wouldn't otherwise have run).")
|
||||
|
||||
parser.add_option("", "--runonly", action="append", dest="runonly",
|
||||
help="Run only the specified task within the taskgraph of the specified targets (and any task dependencies those tasks may have).")
|
||||
|
||||
|
||||
options, targets = parser.parse_args(argv)
|
||||
|
||||
if options.quiet and options.verbose:
|
||||
@@ -320,20 +308,69 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
eventlog = "bitbake_eventlog_%s.json" % datetime.now().strftime("%Y%m%d%H%M%S")
|
||||
options.writeeventlog = eventlog
|
||||
|
||||
if options.bind:
|
||||
try:
|
||||
#Checking that the port is a number and is a ':' delimited value
|
||||
(host, port) = options.bind.split(':')
|
||||
# if BBSERVER says to autodetect, let's do that
|
||||
if options.remote_server:
|
||||
port = -1
|
||||
if options.remote_server != 'autostart':
|
||||
host, port = options.remote_server.split(":", 2)
|
||||
port = int(port)
|
||||
except (ValueError,IndexError):
|
||||
raise BBMainException("FATAL: Malformed host:port bind parameter")
|
||||
options.xmlrpcinterface = (host, port)
|
||||
else:
|
||||
options.xmlrpcinterface = (None, 0)
|
||||
# use automatic port if port set to -1, means read it from
|
||||
# the bitbake.lock file; this is a bit tricky, but we always expect
|
||||
# to be in the base of the build directory if we need to have a
|
||||
# chance to start the server later, anyway
|
||||
if port == -1:
|
||||
lock_location = "./bitbake.lock"
|
||||
# we try to read the address at all times; if the server is not started,
|
||||
# we'll try to start it after the first connect fails, below
|
||||
try:
|
||||
lf = open(lock_location, 'r')
|
||||
remotedef = lf.readline()
|
||||
[host, port] = remotedef.split(":")
|
||||
port = int(port)
|
||||
lf.close()
|
||||
options.remote_server = remotedef
|
||||
except Exception as e:
|
||||
if options.remote_server != 'autostart':
|
||||
raise BBMainException("Failed to read bitbake.lock (%s), invalid port" % str(e))
|
||||
|
||||
return options, targets[1:]
|
||||
|
||||
|
||||
def start_server(servermodule, configParams, configuration, features):
|
||||
server = servermodule.BitBakeServer()
|
||||
single_use = not configParams.server_only and os.getenv('BBSERVER') != 'autostart'
|
||||
if configParams.bind:
|
||||
(host, port) = configParams.bind.split(':')
|
||||
server.initServer((host, int(port)), single_use=single_use,
|
||||
idle_timeout=configParams.idle_timeout)
|
||||
configuration.interface = [server.serverImpl.host, server.serverImpl.port]
|
||||
else:
|
||||
server.initServer(single_use=single_use)
|
||||
configuration.interface = []
|
||||
|
||||
try:
|
||||
configuration.setServerRegIdleCallback(server.getServerIdleCB())
|
||||
|
||||
cooker = bb.cooker.BBCooker(configuration, features)
|
||||
|
||||
server.addcooker(cooker)
|
||||
server.saveConnectionDetails()
|
||||
except Exception as e:
|
||||
while hasattr(server, "event_queue"):
|
||||
import queue
|
||||
try:
|
||||
event = server.event_queue.get(block=False)
|
||||
except (queue.Empty, IOError):
|
||||
break
|
||||
if isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
raise
|
||||
if not configParams.foreground:
|
||||
server.detach()
|
||||
cooker.lock.close()
|
||||
return server
|
||||
|
||||
|
||||
def bitbake_main(configParams, configuration):
|
||||
|
||||
# Python multiprocessing requires /dev/shm on Linux
|
||||
@@ -352,17 +389,51 @@ def bitbake_main(configParams, configuration):
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
configuration.setConfigParameters(configParams)
|
||||
|
||||
if configParams.server_only and configParams.remote_server:
|
||||
ui_module = import_extension_module(bb.ui, configParams.ui, 'main')
|
||||
servermodule = import_extension_module(bb.server, configParams.servertype, 'BitBakeServer')
|
||||
|
||||
if configParams.server_only:
|
||||
if configParams.servertype != "xmlrpc":
|
||||
raise BBMainException("FATAL: If '--server-only' is defined, we must set the "
|
||||
"servertype as 'xmlrpc'.\n")
|
||||
if not configParams.bind:
|
||||
raise BBMainException("FATAL: The '--server-only' option requires a name/address "
|
||||
"to bind to with the -B option.\n")
|
||||
else:
|
||||
try:
|
||||
#Checking that the port is a number
|
||||
int(configParams.bind.split(":")[1])
|
||||
except (ValueError,IndexError):
|
||||
raise BBMainException(
|
||||
"FATAL: Malformed host:port bind parameter")
|
||||
if configParams.remote_server:
|
||||
raise BBMainException("FATAL: The '--server-only' option conflicts with %s.\n" %
|
||||
("the BBSERVER environment variable" if "BBSERVER" in os.environ \
|
||||
else "the '--remote-server' option"))
|
||||
|
||||
if configParams.observe_only and not (configParams.remote_server or configParams.bind):
|
||||
elif configParams.foreground:
|
||||
raise BBMainException("FATAL: The '--foreground' option can only be used "
|
||||
"with --server-only.\n")
|
||||
|
||||
if configParams.bind and configParams.servertype != "xmlrpc":
|
||||
raise BBMainException("FATAL: If '-B' or '--bind' is defined, we must "
|
||||
"set the servertype as 'xmlrpc'.\n")
|
||||
|
||||
if configParams.remote_server and configParams.servertype != "xmlrpc":
|
||||
raise BBMainException("FATAL: If '--remote-server' is defined, we must "
|
||||
"set the servertype as 'xmlrpc'.\n")
|
||||
|
||||
if configParams.observe_only and (not configParams.remote_server or configParams.bind):
|
||||
raise BBMainException("FATAL: '--observe-only' can only be used by UI clients "
|
||||
"connecting to a server.\n")
|
||||
|
||||
if configParams.kill_server and not configParams.remote_server:
|
||||
raise BBMainException("FATAL: '--kill-server' can only be used to "
|
||||
"terminate a remote server")
|
||||
|
||||
if "BBDEBUG" in os.environ:
|
||||
level = int(os.environ["BBDEBUG"])
|
||||
if level > configuration.debug:
|
||||
@@ -371,139 +442,81 @@ def bitbake_main(configParams, configuration):
|
||||
bb.msg.init_msgconfig(configParams.verbose, configuration.debug,
|
||||
configuration.debug_domains)
|
||||
|
||||
server_connection, ui_module = setup_bitbake(configParams, configuration)
|
||||
# No server connection
|
||||
if server_connection is None:
|
||||
if configParams.status_only:
|
||||
return 1
|
||||
if configParams.kill_server:
|
||||
return 0
|
||||
|
||||
if not configParams.server_only:
|
||||
if configParams.status_only:
|
||||
server_connection.terminate()
|
||||
return 0
|
||||
|
||||
try:
|
||||
for event in bb.event.ui_queue:
|
||||
server_connection.events.queue_event(event)
|
||||
bb.event.ui_queue = []
|
||||
|
||||
return ui_module.main(server_connection.connection, server_connection.events,
|
||||
configParams)
|
||||
finally:
|
||||
server_connection.terminate()
|
||||
else:
|
||||
return 0
|
||||
|
||||
return 1
|
||||
|
||||
def setup_bitbake(configParams, configuration, extrafeatures=None):
|
||||
# Ensure logging messages get sent to the UI as events
|
||||
handler = bb.event.LogHandler()
|
||||
if not configParams.status_only:
|
||||
# In status only mode there are no logs and no UI
|
||||
logger.addHandler(handler)
|
||||
|
||||
if configParams.server_only:
|
||||
featureset = []
|
||||
ui_module = None
|
||||
else:
|
||||
ui_module = import_extension_module(bb.ui, configParams.ui, 'main')
|
||||
# Clear away any spurious environment variables while we stoke up the cooker
|
||||
cleanedvars = bb.utils.clean_environment()
|
||||
|
||||
featureset = []
|
||||
if not configParams.server_only:
|
||||
# Collect the feature set for the UI
|
||||
featureset = getattr(ui_module, "featureSet", [])
|
||||
|
||||
if extrafeatures:
|
||||
for feature in extrafeatures:
|
||||
if not feature in featureset:
|
||||
featureset.append(feature)
|
||||
if configParams.server_only:
|
||||
for param in ('prefile', 'postfile'):
|
||||
value = getattr(configParams, param)
|
||||
if value:
|
||||
setattr(configuration, "%s_server" % param, value)
|
||||
param = "%s_server" % param
|
||||
|
||||
server_connection = None
|
||||
|
||||
# Clear away any spurious environment variables while we stoke up the cooker
|
||||
# (done after import_extension_module() above since for example import gi triggers env var usage)
|
||||
cleanedvars = bb.utils.clean_environment()
|
||||
|
||||
if configParams.remote_server:
|
||||
# Connect to a remote XMLRPC server
|
||||
server_connection = bb.server.xmlrpcclient.connectXMLRPC(configParams.remote_server, featureset,
|
||||
configParams.observe_only, configParams.xmlrpctoken)
|
||||
else:
|
||||
retries = 8
|
||||
while retries:
|
||||
try:
|
||||
topdir, lock = lockBitbake()
|
||||
sockname = topdir + "/bitbake.sock"
|
||||
if lock:
|
||||
if configParams.status_only or configParams.kill_server:
|
||||
logger.info("bitbake server is not running.")
|
||||
lock.close()
|
||||
return None, None
|
||||
# we start a server with a given configuration
|
||||
logger.info("Starting bitbake server...")
|
||||
# Clear the event queue since we already displayed messages
|
||||
bb.event.ui_queue = []
|
||||
server = bb.server.process.BitBakeServer(lock, sockname, configuration, featureset)
|
||||
|
||||
else:
|
||||
logger.info("Reconnecting to bitbake server...")
|
||||
if not os.path.exists(sockname):
|
||||
logger.info("Previous bitbake instance shutting down?, waiting to retry...")
|
||||
i = 0
|
||||
lock = None
|
||||
# Wait for 5s or until we can get the lock
|
||||
while not lock and i < 50:
|
||||
time.sleep(0.1)
|
||||
_, lock = lockBitbake()
|
||||
i += 1
|
||||
if lock:
|
||||
bb.utils.unlockfile(lock)
|
||||
raise bb.server.process.ProcessTimeout("Bitbake still shutting down as socket exists but no lock?")
|
||||
if not configParams.server_only:
|
||||
try:
|
||||
server_connection = bb.server.process.connectProcessServer(sockname, featureset)
|
||||
except EOFError:
|
||||
# The server may have been shutting down but not closed the socket yet. If that happened,
|
||||
# ignore it.
|
||||
pass
|
||||
|
||||
if server_connection or configParams.server_only:
|
||||
break
|
||||
except BBMainFatal:
|
||||
raise
|
||||
except (Exception, bb.server.process.ProcessTimeout) as e:
|
||||
if not retries:
|
||||
raise
|
||||
retries -= 1
|
||||
if isinstance(e, (bb.server.process.ProcessTimeout, BrokenPipeError)):
|
||||
logger.info("Retrying server connection...")
|
||||
else:
|
||||
logger.info("Retrying server connection... (%s)" % traceback.format_exc())
|
||||
if not retries:
|
||||
bb.fatal("Unable to connect to bitbake server, or start one")
|
||||
if retries < 5:
|
||||
time.sleep(5)
|
||||
|
||||
if configParams.kill_server:
|
||||
server_connection.connection.terminateServer()
|
||||
server_connection.terminate()
|
||||
if not configParams.remote_server:
|
||||
# we start a server with a given configuration
|
||||
server = start_server(servermodule, configParams, configuration, featureset)
|
||||
bb.event.ui_queue = []
|
||||
logger.info("Terminated bitbake server.")
|
||||
return None, None
|
||||
else:
|
||||
if os.getenv('BBSERVER') == 'autostart':
|
||||
if configParams.remote_server == 'autostart' or \
|
||||
not servermodule.check_connection(configParams.remote_server, timeout=2):
|
||||
configParams.bind = 'localhost:0'
|
||||
srv = start_server(servermodule, configParams, configuration, featureset)
|
||||
configParams.remote_server = '%s:%d' % tuple(configuration.interface)
|
||||
bb.event.ui_queue = []
|
||||
|
||||
# Restore the environment in case the UI needs it
|
||||
for k in cleanedvars:
|
||||
os.environ[k] = cleanedvars[k]
|
||||
# we start a stub server that is actually a XMLRPClient that connects to a real server
|
||||
server = servermodule.BitBakeXMLRPCClient(configParams.observe_only,
|
||||
configParams.xmlrpctoken)
|
||||
server.saveConnectionDetails(configParams.remote_server)
|
||||
|
||||
logger.removeHandler(handler)
|
||||
|
||||
return server_connection, ui_module
|
||||
if not configParams.server_only:
|
||||
try:
|
||||
server_connection = server.establishConnection(featureset)
|
||||
except Exception as e:
|
||||
bb.fatal("Could not connect to server %s: %s" % (configParams.remote_server, str(e)))
|
||||
|
||||
def lockBitbake():
|
||||
topdir = bb.cookerdata.findTopdir()
|
||||
if not topdir:
|
||||
bb.error("Unable to find conf/bblayers.conf or conf/bitbake.conf. BBAPTH is unset and/or not in a build directory?")
|
||||
raise BBMainFatal
|
||||
lockfile = topdir + "/bitbake.lock"
|
||||
return topdir, bb.utils.lockfile(lockfile, False, False)
|
||||
if configParams.kill_server:
|
||||
server_connection.connection.terminateServer()
|
||||
bb.event.ui_queue = []
|
||||
return 0
|
||||
|
||||
server_connection.setupEventQueue()
|
||||
|
||||
# Restore the environment in case the UI needs it
|
||||
for k in cleanedvars:
|
||||
os.environ[k] = cleanedvars[k]
|
||||
|
||||
logger.removeHandler(handler)
|
||||
|
||||
|
||||
if configParams.status_only:
|
||||
server_connection.terminate()
|
||||
return 0
|
||||
|
||||
try:
|
||||
return ui_module.main(server_connection.connection, server_connection.events,
|
||||
configParams)
|
||||
finally:
|
||||
bb.event.ui_queue = []
|
||||
server_connection.terminate()
|
||||
else:
|
||||
print("Bitbake server address: %s, server port: %s" % (server.serverImpl.host,
|
||||
server.serverImpl.port))
|
||||
if configParams.foreground:
|
||||
server.serverImpl.serve_forever()
|
||||
return 0
|
||||
|
||||
return 1
|
||||
|
||||
@@ -129,7 +129,7 @@ def getDiskData(BBDirs, configuration):
|
||||
bb.utils.mkdirhier(path)
|
||||
dev = getMountedDev(path)
|
||||
# Use path/action as the key
|
||||
devDict[(path, action)] = [dev, minSpace, minInode]
|
||||
devDict[os.path.join(path, action)] = [dev, minSpace, minInode]
|
||||
|
||||
return devDict
|
||||
|
||||
@@ -141,7 +141,7 @@ def getInterval(configuration):
|
||||
spaceDefault = 50 * 1024 * 1024
|
||||
inodeDefault = 5 * 1024
|
||||
|
||||
interval = configuration.getVar("BB_DISKMON_WARNINTERVAL")
|
||||
interval = configuration.getVar("BB_DISKMON_WARNINTERVAL", True)
|
||||
if not interval:
|
||||
return spaceDefault, inodeDefault
|
||||
else:
|
||||
@@ -179,7 +179,7 @@ class diskMonitor:
|
||||
self.enableMonitor = False
|
||||
self.configuration = configuration
|
||||
|
||||
BBDirs = configuration.getVar("BB_DISKMON_DIRS") or None
|
||||
BBDirs = configuration.getVar("BB_DISKMON_DIRS", True) or None
|
||||
if BBDirs:
|
||||
self.devDict = getDiskData(BBDirs, configuration)
|
||||
if self.devDict:
|
||||
@@ -205,21 +205,18 @@ class diskMonitor:
|
||||
""" Take action for the monitor """
|
||||
|
||||
if self.enableMonitor:
|
||||
diskUsage = {}
|
||||
for k, attributes in self.devDict.items():
|
||||
path, action = k
|
||||
dev, minSpace, minInode = attributes
|
||||
for k in self.devDict:
|
||||
path = os.path.dirname(k)
|
||||
action = os.path.basename(k)
|
||||
dev = self.devDict[k][0]
|
||||
minSpace = self.devDict[k][1]
|
||||
minInode = self.devDict[k][2]
|
||||
|
||||
st = os.statvfs(path)
|
||||
|
||||
# The available free space, integer number
|
||||
# The free space, float point number
|
||||
freeSpace = st.f_bavail * st.f_frsize
|
||||
|
||||
# Send all relevant information in the event.
|
||||
freeSpaceRoot = st.f_bfree * st.f_frsize
|
||||
totalSpace = st.f_blocks * st.f_frsize
|
||||
diskUsage[dev] = bb.event.DiskUsageSample(freeSpace, freeSpaceRoot, totalSpace)
|
||||
|
||||
if minSpace and freeSpace < minSpace:
|
||||
# Always show warning, the self.checked would always be False if the action is WARN
|
||||
if self.preFreeS[k] == 0 or self.preFreeS[k] - freeSpace > self.spaceInterval and not self.checked[k]:
|
||||
@@ -238,7 +235,7 @@ class diskMonitor:
|
||||
rq.finish_runqueue(True)
|
||||
bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, path), self.configuration)
|
||||
|
||||
# The free inodes, integer number
|
||||
# The free inodes, float point number
|
||||
freeInode = st.f_favail
|
||||
|
||||
if minInode and freeInode < minInode:
|
||||
@@ -263,6 +260,4 @@ class diskMonitor:
|
||||
self.checked[k] = True
|
||||
rq.finish_runqueue(True)
|
||||
bb.event.fire(bb.event.DiskFull(dev, 'inode', freeInode, path), self.configuration)
|
||||
|
||||
bb.event.fire(bb.event.MonitorDiskEvent(diskUsage), self.configuration)
|
||||
return
|
||||
|
||||
@@ -40,7 +40,6 @@ class BBLogFormatter(logging.Formatter):
|
||||
VERBOSE = logging.INFO - 1
|
||||
NOTE = logging.INFO
|
||||
PLAIN = logging.INFO + 1
|
||||
VERBNOTE = logging.INFO + 2
|
||||
ERROR = logging.ERROR
|
||||
WARNING = logging.WARNING
|
||||
CRITICAL = logging.CRITICAL
|
||||
@@ -52,7 +51,6 @@ class BBLogFormatter(logging.Formatter):
|
||||
VERBOSE: 'NOTE',
|
||||
NOTE : 'NOTE',
|
||||
PLAIN : '',
|
||||
VERBNOTE: 'NOTE',
|
||||
WARNING : 'WARNING',
|
||||
ERROR : 'ERROR',
|
||||
CRITICAL: 'ERROR',
|
||||
@@ -68,7 +66,6 @@ class BBLogFormatter(logging.Formatter):
|
||||
VERBOSE : BASECOLOR,
|
||||
NOTE : BASECOLOR,
|
||||
PLAIN : BASECOLOR,
|
||||
VERBNOTE: BASECOLOR,
|
||||
WARNING : YELLOW,
|
||||
ERROR : RED,
|
||||
CRITICAL: RED,
|
||||
@@ -204,25 +201,3 @@ def fatal(msgdomain, msg):
|
||||
logger = logging.getLogger("BitBake")
|
||||
logger.critical(msg)
|
||||
sys.exit(1)
|
||||
|
||||
def logger_create(name, output=sys.stderr, level=logging.INFO, preserve_handlers=False, color='auto'):
|
||||
"""Standalone logger creation function"""
|
||||
logger = logging.getLogger(name)
|
||||
console = logging.StreamHandler(output)
|
||||
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
if color == 'always' or (color == 'auto' and output.isatty()):
|
||||
format.enable_color()
|
||||
console.setFormatter(format)
|
||||
if preserve_handlers:
|
||||
logger.addHandler(console)
|
||||
else:
|
||||
logger.handlers = [console]
|
||||
logger.setLevel(level)
|
||||
return logger
|
||||
|
||||
def has_console_handler(logger):
|
||||
for handler in logger.handlers:
|
||||
if isinstance(handler, logging.StreamHandler):
|
||||
if handler.stream in [sys.stderr, sys.stdout]:
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -84,10 +84,6 @@ def update_cache(f):
|
||||
logger.debug(1, "Updating mtime cache for %s" % f)
|
||||
update_mtime(f)
|
||||
|
||||
def clear_cache():
|
||||
global __mtime_cache
|
||||
__mtime_cache = {}
|
||||
|
||||
def mark_dependency(d, f):
|
||||
if f.startswith('./'):
|
||||
f = "%s/%s" % (os.getcwd(), f[2:])
|
||||
@@ -127,16 +123,15 @@ def init_parser(d):
|
||||
|
||||
def resolve_file(fn, d):
|
||||
if not os.path.isabs(fn):
|
||||
bbpath = d.getVar("BBPATH")
|
||||
bbpath = d.getVar("BBPATH", True)
|
||||
newfn, attempts = bb.utils.which(bbpath, fn, history=True)
|
||||
for af in attempts:
|
||||
mark_dependency(d, af)
|
||||
if not newfn:
|
||||
raise IOError(errno.ENOENT, "file %s not found in %s" % (fn, bbpath))
|
||||
fn = newfn
|
||||
else:
|
||||
mark_dependency(d, fn)
|
||||
|
||||
mark_dependency(d, fn)
|
||||
if not os.path.isfile(fn):
|
||||
raise IOError(errno.ENOENT, "file %s not found" % fn)
|
||||
|
||||
|
||||
@@ -30,6 +30,8 @@ import itertools
|
||||
from bb import methodpool
|
||||
from bb.parse import logger
|
||||
|
||||
_bbversions_re = re.compile(r"\[(?P<from>[0-9]+)-(?P<to>[0-9]+)\]")
|
||||
|
||||
class StatementGroup(list):
|
||||
def eval(self, data):
|
||||
for statement in self:
|
||||
@@ -130,6 +132,7 @@ class DataNode(AstNode):
|
||||
val = groupd["value"]
|
||||
elif "colon" in groupd and groupd["colon"] != None:
|
||||
e = data.createCopy()
|
||||
bb.data.update_data(e)
|
||||
op = "immediate"
|
||||
val = e.expand(groupd["value"], key + "[:=]")
|
||||
elif "append" in groupd and groupd["append"] != None:
|
||||
@@ -335,39 +338,36 @@ def handleInherit(statements, filename, lineno, m):
|
||||
classes = m.group(1)
|
||||
statements.append(InheritNode(filename, lineno, classes))
|
||||
|
||||
def runAnonFuncs(d):
|
||||
def finalize(fn, d, variant = None):
|
||||
saved_handlers = bb.event.get_handlers().copy()
|
||||
|
||||
for var in d.getVar('__BBHANDLERS', False) or []:
|
||||
# try to add the handler
|
||||
handlerfn = d.getVarFlag(var, "filename", False)
|
||||
if not handlerfn:
|
||||
bb.fatal("Undefined event handler function '%s'" % var)
|
||||
handlerln = int(d.getVarFlag(var, "lineno", False))
|
||||
bb.event.register(var, d.getVar(var, False), (d.getVarFlag(var, "eventmask", True) or "").split(), handlerfn, handlerln)
|
||||
|
||||
bb.event.fire(bb.event.RecipePreFinalise(fn), d)
|
||||
|
||||
bb.data.expandKeys(d)
|
||||
bb.data.update_data(d)
|
||||
code = []
|
||||
for funcname in d.getVar("__BBANONFUNCS", False) or []:
|
||||
code.append("%s(d)" % funcname)
|
||||
bb.utils.better_exec("\n".join(code), {"d": d})
|
||||
bb.data.update_data(d)
|
||||
|
||||
def finalize(fn, d, variant = None):
|
||||
saved_handlers = bb.event.get_handlers().copy()
|
||||
try:
|
||||
for var in d.getVar('__BBHANDLERS', False) or []:
|
||||
# try to add the handler
|
||||
handlerfn = d.getVarFlag(var, "filename", False)
|
||||
if not handlerfn:
|
||||
bb.fatal("Undefined event handler function '%s'" % var)
|
||||
handlerln = int(d.getVarFlag(var, "lineno", False))
|
||||
bb.event.register(var, d.getVar(var, False), (d.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln)
|
||||
tasklist = d.getVar('__BBTASKS', False) or []
|
||||
bb.build.add_tasks(tasklist, d)
|
||||
|
||||
bb.event.fire(bb.event.RecipePreFinalise(fn), d)
|
||||
bb.parse.siggen.finalise(fn, d, variant)
|
||||
|
||||
bb.data.expandKeys(d)
|
||||
runAnonFuncs(d)
|
||||
d.setVar('BBINCLUDED', bb.parse.get_file_depends(d))
|
||||
|
||||
tasklist = d.getVar('__BBTASKS', False) or []
|
||||
bb.event.fire(bb.event.RecipeTaskPreProcess(fn, list(tasklist)), d)
|
||||
bb.build.add_tasks(tasklist, d)
|
||||
|
||||
bb.parse.siggen.finalise(fn, d, variant)
|
||||
|
||||
d.setVar('BBINCLUDED', bb.parse.get_file_depends(d))
|
||||
|
||||
bb.event.fire(bb.event.RecipeParsed(fn), d)
|
||||
finally:
|
||||
bb.event.set_handlers(saved_handlers)
|
||||
bb.event.fire(bb.event.RecipeParsed(fn), d)
|
||||
bb.event.set_handlers(saved_handlers)
|
||||
|
||||
def _create_variants(datastores, names, function, onlyfinalise):
|
||||
def create_variant(name, orig_d, arg = None):
|
||||
@@ -385,8 +385,29 @@ def _create_variants(datastores, names, function, onlyfinalise):
|
||||
else:
|
||||
create_variant("%s-%s" % (variant, name), datastores[variant], name)
|
||||
|
||||
def _expand_versions(versions):
|
||||
def expand_one(version, start, end):
|
||||
for i in range(start, end + 1):
|
||||
ver = _bbversions_re.sub(str(i), version, 1)
|
||||
yield ver
|
||||
|
||||
versions = iter(versions)
|
||||
while True:
|
||||
try:
|
||||
version = next(versions)
|
||||
except StopIteration:
|
||||
break
|
||||
|
||||
range_ver = _bbversions_re.search(version)
|
||||
if not range_ver:
|
||||
yield version
|
||||
else:
|
||||
newversions = expand_one(version, int(range_ver.group("from")),
|
||||
int(range_ver.group("to")))
|
||||
versions = itertools.chain(newversions, versions)
|
||||
|
||||
def multi_finalize(fn, d):
|
||||
appends = (d.getVar("__BBAPPEND") or "").split()
|
||||
appends = (d.getVar("__BBAPPEND", True) or "").split()
|
||||
for append in appends:
|
||||
logger.debug(1, "Appending .bbappend file %s to %s", append, fn)
|
||||
bb.parse.BBHandler.handle(append, d, True)
|
||||
@@ -401,7 +422,51 @@ def multi_finalize(fn, d):
|
||||
d.setVar("__SKIPPED", e.args[0])
|
||||
datastores = {"": safe_d}
|
||||
|
||||
extended = d.getVar("BBCLASSEXTEND") or ""
|
||||
versions = (d.getVar("BBVERSIONS", True) or "").split()
|
||||
if versions:
|
||||
pv = orig_pv = d.getVar("PV", True)
|
||||
baseversions = {}
|
||||
|
||||
def verfunc(ver, d, pv_d = None):
|
||||
if pv_d is None:
|
||||
pv_d = d
|
||||
|
||||
overrides = d.getVar("OVERRIDES", True).split(":")
|
||||
pv_d.setVar("PV", ver)
|
||||
overrides.append(ver)
|
||||
bpv = baseversions.get(ver) or orig_pv
|
||||
pv_d.setVar("BPV", bpv)
|
||||
overrides.append(bpv)
|
||||
d.setVar("OVERRIDES", ":".join(overrides))
|
||||
|
||||
versions = list(_expand_versions(versions))
|
||||
for pos, version in enumerate(list(versions)):
|
||||
try:
|
||||
pv, bpv = version.split(":", 2)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
versions[pos] = pv
|
||||
baseversions[pv] = bpv
|
||||
|
||||
if pv in versions and not baseversions.get(pv):
|
||||
versions.remove(pv)
|
||||
else:
|
||||
pv = versions.pop()
|
||||
|
||||
# This is necessary because our existing main datastore
|
||||
# has already been finalized with the old PV, we need one
|
||||
# that's been finalized with the new PV.
|
||||
d = bb.data.createCopy(safe_d)
|
||||
verfunc(pv, d, safe_d)
|
||||
try:
|
||||
finalize(fn, d)
|
||||
except bb.parse.SkipRecipe as e:
|
||||
d.setVar("__SKIPPED", e.args[0])
|
||||
|
||||
_create_variants(datastores, versions, verfunc, onlyfinalise)
|
||||
|
||||
extended = d.getVar("BBCLASSEXTEND", True) or ""
|
||||
if extended:
|
||||
# the following is to support bbextends with arguments, for e.g. multilib
|
||||
# an example is as follows:
|
||||
@@ -419,7 +484,7 @@ def multi_finalize(fn, d):
|
||||
else:
|
||||
extendedmap[ext] = ext
|
||||
|
||||
pn = d.getVar("PN")
|
||||
pn = d.getVar("PN", True)
|
||||
def extendfunc(name, d):
|
||||
if name != extendedmap[name]:
|
||||
d.setVar("BBEXTENDCURR", extendedmap[name])
|
||||
|
||||
@@ -66,7 +66,7 @@ def inherit(files, fn, lineno, d):
|
||||
file = os.path.join('classes', '%s.bbclass' % file)
|
||||
|
||||
if not os.path.isabs(file):
|
||||
bbpath = d.getVar("BBPATH")
|
||||
bbpath = d.getVar("BBPATH", True)
|
||||
abs_fn, attempts = bb.utils.which(bbpath, file, history=True)
|
||||
for af in attempts:
|
||||
if af != abs_fn:
|
||||
@@ -87,17 +87,17 @@ def get_statements(filename, absolute_filename, base_name):
|
||||
try:
|
||||
return cached_statements[absolute_filename]
|
||||
except KeyError:
|
||||
with open(absolute_filename, 'r') as f:
|
||||
statements = ast.StatementGroup()
|
||||
|
||||
lineno = 0
|
||||
while True:
|
||||
lineno = lineno + 1
|
||||
s = f.readline()
|
||||
if not s: break
|
||||
s = s.rstrip()
|
||||
feeder(lineno, s, filename, base_name, statements)
|
||||
file = open(absolute_filename, 'r')
|
||||
statements = ast.StatementGroup()
|
||||
|
||||
lineno = 0
|
||||
while True:
|
||||
lineno = lineno + 1
|
||||
s = file.readline()
|
||||
if not s: break
|
||||
s = s.rstrip()
|
||||
feeder(lineno, s, filename, base_name, statements)
|
||||
file.close()
|
||||
if __inpython__:
|
||||
# add a blank line to close out any python definition
|
||||
feeder(lineno, "", filename, base_name, statements, eof=True)
|
||||
@@ -131,6 +131,9 @@ def handle(fn, d, include):
|
||||
|
||||
abs_fn = resolve_file(fn, d)
|
||||
|
||||
if include:
|
||||
bb.parse.mark_dependency(d, abs_fn)
|
||||
|
||||
# actual loading
|
||||
statements = get_statements(fn, abs_fn, base_name)
|
||||
|
||||
@@ -141,7 +144,7 @@ def handle(fn, d, include):
|
||||
try:
|
||||
statements.eval(d)
|
||||
except bb.parse.SkipRecipe:
|
||||
d.setVar("__SKIPPED", True)
|
||||
bb.data.setVar("__SKIPPED", True, d)
|
||||
if include == 0:
|
||||
return { "" : d }
|
||||
|
||||
|
||||
@@ -32,8 +32,8 @@ from bb.parse import ParseError, resolve_file, ast, logger, handle
|
||||
|
||||
__config_regexp__ = re.compile( r"""
|
||||
^
|
||||
(?P<exp>export\s+)?
|
||||
(?P<var>[a-zA-Z0-9\-_+.${}/~]+?)
|
||||
(?P<exp>export\s*)?
|
||||
(?P<var>[a-zA-Z0-9\-~_+.${}/]+?)
|
||||
(\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])?
|
||||
|
||||
\s* (
|
||||
@@ -56,9 +56,9 @@ __config_regexp__ = re.compile( r"""
|
||||
""", re.X)
|
||||
__include_regexp__ = re.compile( r"include\s+(.+)" )
|
||||
__require_regexp__ = re.compile( r"require\s+(.+)" )
|
||||
__export_regexp__ = re.compile( r"export\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
|
||||
__unset_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
|
||||
__unset_flag_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)\[([a-zA-Z0-9\-_+.]+)\]$" )
|
||||
__export_regexp__ = re.compile( r"export\s+([a-zA-Z0-9\-_+.${}/]+)$" )
|
||||
__unset_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/]+)$" )
|
||||
__unset_flag_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/]+)\[([a-zA-Z0-9\-_+.${}/]+)\]$" )
|
||||
|
||||
def init(data):
|
||||
topdir = data.getVar('TOPDIR', False)
|
||||
@@ -69,38 +69,30 @@ def init(data):
|
||||
def supports(fn, d):
|
||||
return fn[-5:] == ".conf"
|
||||
|
||||
def include(parentfn, fns, lineno, data, error_out):
|
||||
def include(parentfn, fn, lineno, data, error_out):
|
||||
"""
|
||||
error_out: A string indicating the verb (e.g. "include", "inherit") to be
|
||||
used in a ParseError that will be raised if the file to be included could
|
||||
not be included. Specify False to avoid raising an error in this case.
|
||||
"""
|
||||
fns = data.expand(fns)
|
||||
parentfn = data.expand(parentfn)
|
||||
|
||||
# "include" or "require" accept zero to n space-separated file names to include.
|
||||
for fn in fns.split():
|
||||
include_single_file(parentfn, fn, lineno, data, error_out)
|
||||
|
||||
def include_single_file(parentfn, fn, lineno, data, error_out):
|
||||
"""
|
||||
Helper function for include() which does not expand or split its parameters.
|
||||
"""
|
||||
if parentfn == fn: # prevent infinite recursion
|
||||
return None
|
||||
|
||||
fn = data.expand(fn)
|
||||
parentfn = data.expand(parentfn)
|
||||
|
||||
if not os.path.isabs(fn):
|
||||
dname = os.path.dirname(parentfn)
|
||||
bbpath = "%s:%s" % (dname, data.getVar("BBPATH"))
|
||||
bbpath = "%s:%s" % (dname, data.getVar("BBPATH", True))
|
||||
abs_fn, attempts = bb.utils.which(bbpath, fn, history=True)
|
||||
if abs_fn and bb.parse.check_dependency(data, abs_fn):
|
||||
logger.warning("Duplicate inclusion for %s in %s" % (abs_fn, data.getVar('FILE')))
|
||||
logger.warning("Duplicate inclusion for %s in %s" % (abs_fn, data.getVar('FILE', True)))
|
||||
for af in attempts:
|
||||
bb.parse.mark_dependency(data, af)
|
||||
if abs_fn:
|
||||
fn = abs_fn
|
||||
elif bb.parse.check_dependency(data, fn):
|
||||
logger.warning("Duplicate inclusion for %s in %s" % (fn, data.getVar('FILE')))
|
||||
logger.warning("Duplicate inclusion for %s in %s" % (fn, data.getVar('FILE', True)))
|
||||
|
||||
try:
|
||||
bb.parse.handle(fn, data, True)
|
||||
@@ -134,6 +126,9 @@ def handle(fn, data, include):
|
||||
abs_fn = resolve_file(fn, data)
|
||||
f = open(abs_fn, 'r')
|
||||
|
||||
if include:
|
||||
bb.parse.mark_dependency(data, abs_fn)
|
||||
|
||||
statements = ast.StatementGroup()
|
||||
lineno = 0
|
||||
while True:
|
||||
|
||||
@@ -28,7 +28,11 @@ import sys
|
||||
import warnings
|
||||
from bb.compat import total_ordering
|
||||
from collections import Mapping
|
||||
import sqlite3
|
||||
|
||||
try:
|
||||
import sqlite3
|
||||
except ImportError:
|
||||
from pysqlite2 import dbapi2 as sqlite3
|
||||
|
||||
sqlversion = sqlite3.sqlite_version_info
|
||||
if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
|
||||
@@ -203,8 +207,8 @@ def connect(database):
|
||||
def persist(domain, d):
|
||||
"""Convenience factory for SQLTable objects based upon metadata"""
|
||||
import bb.utils
|
||||
cachedir = (d.getVar("PERSISTENT_DIR") or
|
||||
d.getVar("CACHE"))
|
||||
cachedir = (d.getVar("PERSISTENT_DIR", True) or
|
||||
d.getVar("CACHE", True))
|
||||
if not cachedir:
|
||||
logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
|
||||
sys.exit(1)
|
||||
|
||||
@@ -94,53 +94,46 @@ def _logged_communicate(pipe, log, input, extrafiles):
|
||||
if data is not None:
|
||||
func(data)
|
||||
|
||||
def read_all_pipes(log, rin, outdata, errdata):
|
||||
rlist = rin
|
||||
stdoutbuf = b""
|
||||
stderrbuf = b""
|
||||
|
||||
try:
|
||||
r,w,e = select.select (rlist, [], [], 1)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EINTR:
|
||||
raise
|
||||
|
||||
readextras(r)
|
||||
|
||||
if pipe.stdout in r:
|
||||
data = stdoutbuf + pipe.stdout.read()
|
||||
if data is not None and len(data) > 0:
|
||||
try:
|
||||
data = data.decode("utf-8")
|
||||
outdata.append(data)
|
||||
log.write(data)
|
||||
log.flush()
|
||||
stdoutbuf = b""
|
||||
except UnicodeDecodeError:
|
||||
stdoutbuf = data
|
||||
|
||||
if pipe.stderr in r:
|
||||
data = stderrbuf + pipe.stderr.read()
|
||||
if data is not None and len(data) > 0:
|
||||
try:
|
||||
data = data.decode("utf-8")
|
||||
errdata.append(data)
|
||||
log.write(data)
|
||||
log.flush()
|
||||
stderrbuf = b""
|
||||
except UnicodeDecodeError:
|
||||
stderrbuf = data
|
||||
|
||||
try:
|
||||
# Read all pipes while the process is open
|
||||
while pipe.poll() is None:
|
||||
read_all_pipes(log, rin, outdata, errdata)
|
||||
rlist = rin
|
||||
stdoutbuf = b""
|
||||
stderrbuf = b""
|
||||
try:
|
||||
r,w,e = select.select (rlist, [], [], 1)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EINTR:
|
||||
raise
|
||||
|
||||
# Pocess closed, drain all pipes...
|
||||
read_all_pipes(log, rin, outdata, errdata)
|
||||
finally:
|
||||
if pipe.stdout in r:
|
||||
data = stdoutbuf + pipe.stdout.read()
|
||||
if data is not None and len(data) > 0:
|
||||
try:
|
||||
data = data.decode("utf-8")
|
||||
outdata.append(data)
|
||||
log.write(data)
|
||||
stdoutbuf = b""
|
||||
except UnicodeDecodeError:
|
||||
stdoutbuf = data
|
||||
|
||||
if pipe.stderr in r:
|
||||
data = stderrbuf + pipe.stderr.read()
|
||||
if data is not None and len(data) > 0:
|
||||
try:
|
||||
data = data.decode("utf-8")
|
||||
errdata.append(data)
|
||||
log.write(data)
|
||||
stderrbuf = b""
|
||||
except UnicodeDecodeError:
|
||||
stderrbuf = data
|
||||
|
||||
readextras(r)
|
||||
|
||||
finally:
|
||||
log.flush()
|
||||
|
||||
readextras([fobj for fobj, _ in extrafiles])
|
||||
|
||||
if pipe.stdout is not None:
|
||||
pipe.stdout.close()
|
||||
if pipe.stderr is not None:
|
||||
@@ -169,9 +162,9 @@ def run(cmd, input=None, log=None, extrafiles=None, **options):
|
||||
stdout, stderr = _logged_communicate(pipe, log, input, extrafiles)
|
||||
else:
|
||||
stdout, stderr = pipe.communicate(input)
|
||||
if not stdout is None:
|
||||
if stdout:
|
||||
stdout = stdout.decode("utf-8")
|
||||
if not stderr is None:
|
||||
if stderr:
|
||||
stderr = stderr.decode("utf-8")
|
||||
|
||||
if pipe.returncode != 0:
|
||||
|
||||
@@ -48,6 +48,7 @@ def findProviders(cfgData, dataCache, pkg_pn = None):
|
||||
|
||||
# Need to ensure data store is expanded
|
||||
localdata = data.createCopy(cfgData)
|
||||
bb.data.update_data(localdata)
|
||||
bb.data.expandKeys(localdata)
|
||||
|
||||
preferred_versions = {}
|
||||
@@ -122,11 +123,11 @@ def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
|
||||
# pn can contain '_', e.g. gcc-cross-x86_64 and an override cannot
|
||||
# hence we do this manually rather than use OVERRIDES
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION_pn-%s" % pn)
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION_pn-%s" % pn, True)
|
||||
if not preferred_v:
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION_%s" % pn)
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION_%s" % pn, True)
|
||||
if not preferred_v:
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION")
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION", True)
|
||||
|
||||
if preferred_v:
|
||||
m = re.match('(\d+:)*(.*)(_.*)*', preferred_v)
|
||||
@@ -244,17 +245,17 @@ def _filterProviders(providers, item, cfgData, dataCache):
|
||||
pkg_pn[pn] = []
|
||||
pkg_pn[pn].append(p)
|
||||
|
||||
logger.debug(1, "providers for %s are: %s", item, list(sorted(pkg_pn.keys())))
|
||||
logger.debug(1, "providers for %s are: %s", item, list(pkg_pn.keys()))
|
||||
|
||||
# First add PREFERRED_VERSIONS
|
||||
for pn in sorted(pkg_pn):
|
||||
for pn in pkg_pn:
|
||||
sortpkg_pn[pn] = sortPriorities(pn, dataCache, pkg_pn)
|
||||
preferred_versions[pn] = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item)
|
||||
if preferred_versions[pn][1]:
|
||||
eligible.append(preferred_versions[pn][1])
|
||||
|
||||
# Now add latest versions
|
||||
for pn in sorted(sortpkg_pn):
|
||||
for pn in sortpkg_pn:
|
||||
if pn in preferred_versions and preferred_versions[pn][1]:
|
||||
continue
|
||||
preferred_versions[pn] = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[pn][0])
|
||||
@@ -288,7 +289,7 @@ def filterProviders(providers, item, cfgData, dataCache):
|
||||
|
||||
eligible = _filterProviders(providers, item, cfgData, dataCache)
|
||||
|
||||
prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % item)
|
||||
prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % item, True)
|
||||
if prefervar:
|
||||
dataCache.preferred[item] = prefervar
|
||||
|
||||
@@ -317,7 +318,7 @@ def filterProvidersRunTime(providers, item, cfgData, dataCache):
|
||||
eligible = _filterProviders(providers, item, cfgData, dataCache)
|
||||
|
||||
# First try and match any PREFERRED_RPROVIDER entry
|
||||
prefervar = cfgData.getVar('PREFERRED_RPROVIDER_%s' % item)
|
||||
prefervar = cfgData.getVar('PREFERRED_RPROVIDER_%s' % item, True)
|
||||
foundUnique = False
|
||||
if prefervar:
|
||||
for p in eligible:
|
||||
@@ -344,7 +345,7 @@ def filterProvidersRunTime(providers, item, cfgData, dataCache):
|
||||
pn = dataCache.pkg_fn[p]
|
||||
provides = dataCache.pn_provides[pn]
|
||||
for provide in provides:
|
||||
prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % provide)
|
||||
prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % provide, True)
|
||||
#logger.debug(1, "checking PREFERRED_PROVIDER_%s (value %s) against %s", provide, prefervar, pns.keys())
|
||||
if prefervar in pns and pns[prefervar] not in preferred:
|
||||
var = "PREFERRED_PROVIDER_%s = %s" % (provide, prefervar)
|
||||
|
||||
@@ -1,116 +0,0 @@
|
||||
"""
|
||||
BitBake 'remotedata' module
|
||||
|
||||
Provides support for using a datastore from the bitbake client
|
||||
"""
|
||||
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import bb.data
|
||||
|
||||
class RemoteDatastores:
|
||||
"""Used on the server side to manage references to server-side datastores"""
|
||||
def __init__(self, cooker):
|
||||
self.cooker = cooker
|
||||
self.datastores = {}
|
||||
self.locked = []
|
||||
self.nextindex = 1
|
||||
|
||||
def __len__(self):
|
||||
return len(self.datastores)
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key is None:
|
||||
return self.cooker.data
|
||||
else:
|
||||
return self.datastores[key]
|
||||
|
||||
def items(self):
|
||||
return self.datastores.items()
|
||||
|
||||
def store(self, d, locked=False):
|
||||
"""
|
||||
Put a datastore into the collection. If locked=True then the datastore
|
||||
is understood to be managed externally and cannot be released by calling
|
||||
release().
|
||||
"""
|
||||
idx = self.nextindex
|
||||
self.datastores[idx] = d
|
||||
if locked:
|
||||
self.locked.append(idx)
|
||||
self.nextindex += 1
|
||||
return idx
|
||||
|
||||
def check_store(self, d, locked=False):
|
||||
"""
|
||||
Put a datastore into the collection if it's not already in there;
|
||||
in either case return the index
|
||||
"""
|
||||
for key, val in self.datastores.items():
|
||||
if val is d:
|
||||
idx = key
|
||||
break
|
||||
else:
|
||||
idx = self.store(d, locked)
|
||||
return idx
|
||||
|
||||
def release(self, idx):
|
||||
"""Discard a datastore in the collection"""
|
||||
if idx in self.locked:
|
||||
raise Exception('Tried to release locked datastore %d' % idx)
|
||||
del self.datastores[idx]
|
||||
|
||||
def receive_datastore(self, remote_data):
|
||||
"""Receive a datastore object sent from the client (as prepared by transmit_datastore())"""
|
||||
dct = dict(remote_data)
|
||||
d = bb.data_smart.DataSmart()
|
||||
d.dict = dct
|
||||
while True:
|
||||
if '_remote_data' in dct:
|
||||
dsindex = dct['_remote_data']['_content']
|
||||
del dct['_remote_data']
|
||||
if dsindex is None:
|
||||
dct['_data'] = self.cooker.data.dict
|
||||
else:
|
||||
dct['_data'] = self.datastores[dsindex].dict
|
||||
break
|
||||
elif '_data' in dct:
|
||||
idct = dict(dct['_data'])
|
||||
dct['_data'] = idct
|
||||
dct = idct
|
||||
else:
|
||||
break
|
||||
return d
|
||||
|
||||
@staticmethod
|
||||
def transmit_datastore(d):
|
||||
"""Prepare a datastore object for sending over IPC from the client end"""
|
||||
# FIXME content might be a dict, need to turn that into a list as well
|
||||
def copy_dicts(dct):
|
||||
if '_remote_data' in dct:
|
||||
dsindex = dct['_remote_data']['_content'].dsindex
|
||||
newdct = dct.copy()
|
||||
newdct['_remote_data'] = {'_content': dsindex}
|
||||
return list(newdct.items())
|
||||
elif '_data' in dct:
|
||||
newdct = dct.copy()
|
||||
newdata = copy_dicts(dct['_data'])
|
||||
if newdata:
|
||||
newdct['_data'] = newdata
|
||||
return list(newdct.items())
|
||||
return None
|
||||
main_dict = copy_dicts(d.dict)
|
||||
return main_dict
|
||||
File diff suppressed because it is too large
Load Diff
@@ -18,4 +18,82 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
""" Base code for Bitbake server process
|
||||
|
||||
Have a common base for that all Bitbake server classes ensures a consistent
|
||||
approach to the interface, and minimize risks associated with code duplication.
|
||||
|
||||
"""
|
||||
|
||||
""" BaseImplServer() the base class for all XXServer() implementations.
|
||||
|
||||
These classes contain the actual code that runs the server side, i.e.
|
||||
listens for the commands and executes them. Although these implementations
|
||||
contain all the data of the original bitbake command, i.e the cooker instance,
|
||||
they may well run on a different process or even machine.
|
||||
|
||||
"""
|
||||
|
||||
class BaseImplServer():
|
||||
def __init__(self):
|
||||
self._idlefuns = {}
|
||||
|
||||
def addcooker(self, cooker):
|
||||
self.cooker = cooker
|
||||
|
||||
def register_idle_function(self, function, data):
|
||||
"""Register a function to be called while the server is idle"""
|
||||
assert hasattr(function, '__call__')
|
||||
self._idlefuns[function] = data
|
||||
|
||||
|
||||
|
||||
""" BitBakeBaseServerConnection class is the common ancestor to all
|
||||
BitBakeServerConnection classes.
|
||||
|
||||
These classes control the remote server. The only command currently
|
||||
implemented is the terminate() command.
|
||||
|
||||
"""
|
||||
|
||||
class BitBakeBaseServerConnection():
|
||||
def __init__(self, serverImpl):
|
||||
pass
|
||||
|
||||
def terminate(self):
|
||||
pass
|
||||
|
||||
def setupEventQueue(self):
|
||||
pass
|
||||
|
||||
|
||||
""" BitBakeBaseServer class is the common ancestor to all Bitbake servers
|
||||
|
||||
Derive this class in order to implement a BitBakeServer which is the
|
||||
controlling stub for the actual server implementation
|
||||
|
||||
"""
|
||||
class BitBakeBaseServer(object):
|
||||
def initServer(self):
|
||||
self.serverImpl = None # we ensure a runtime crash if not overloaded
|
||||
self.connection = None
|
||||
return
|
||||
|
||||
def addcooker(self, cooker):
|
||||
self.cooker = cooker
|
||||
self.serverImpl.addcooker(cooker)
|
||||
|
||||
def getServerIdleCB(self):
|
||||
return self.serverImpl.register_idle_function
|
||||
|
||||
def saveConnectionDetails(self):
|
||||
return
|
||||
|
||||
def detach(self):
|
||||
return
|
||||
|
||||
def establishConnection(self, featureset):
|
||||
raise "Must redefine the %s.establishConnection()" % self.__class__.__name__
|
||||
|
||||
def endSession(self):
|
||||
self.connection.terminate()
|
||||
|
||||
@@ -22,261 +22,115 @@
|
||||
|
||||
import bb
|
||||
import bb.event
|
||||
import itertools
|
||||
import logging
|
||||
import multiprocessing
|
||||
import threading
|
||||
import array
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import select
|
||||
import socket
|
||||
import subprocess
|
||||
import errno
|
||||
import re
|
||||
import datetime
|
||||
import bb.server.xmlrpcserver
|
||||
from bb import daemonize
|
||||
from multiprocessing import queues
|
||||
from queue import Empty
|
||||
from multiprocessing import Event, Process, util, Queue, Pipe, queues, Manager
|
||||
|
||||
from . import BitBakeBaseServer, BitBakeBaseServerConnection, BaseImplServer
|
||||
|
||||
logger = logging.getLogger('BitBake')
|
||||
|
||||
class ProcessTimeout(SystemExit):
|
||||
pass
|
||||
class ServerCommunicator():
|
||||
def __init__(self, connection, event_handle, server):
|
||||
self.connection = connection
|
||||
self.event_handle = event_handle
|
||||
self.server = server
|
||||
|
||||
class ProcessServer(multiprocessing.Process):
|
||||
def runCommand(self, command):
|
||||
# @todo try/except
|
||||
self.connection.send(command)
|
||||
|
||||
if not self.server.is_alive():
|
||||
raise SystemExit
|
||||
|
||||
while True:
|
||||
# don't let the user ctrl-c while we're waiting for a response
|
||||
try:
|
||||
for idx in range(0,4): # 0, 1, 2, 3
|
||||
if self.connection.poll(5):
|
||||
return self.connection.recv()
|
||||
else:
|
||||
bb.warn("Timeout while attempting to communicate with bitbake server")
|
||||
bb.fatal("Gave up; Too many tries: timeout while attempting to communicate with bitbake server")
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
def getEventHandle(self):
|
||||
return self.event_handle.value
|
||||
|
||||
class EventAdapter():
|
||||
"""
|
||||
Adapter to wrap our event queue since the caller (bb.event) expects to
|
||||
call a send() method, but our actual queue only has put()
|
||||
"""
|
||||
def __init__(self, queue):
|
||||
self.queue = queue
|
||||
|
||||
def send(self, event):
|
||||
try:
|
||||
self.queue.put(event)
|
||||
except Exception as err:
|
||||
print("EventAdapter puked: %s" % str(err))
|
||||
|
||||
|
||||
class ProcessServer(Process, BaseImplServer):
|
||||
profile_filename = "profile.log"
|
||||
profile_processed_filename = "profile.log.processed"
|
||||
|
||||
def __init__(self, lock, sock, sockname):
|
||||
multiprocessing.Process.__init__(self)
|
||||
self.command_channel = False
|
||||
self.command_channel_reply = False
|
||||
def __init__(self, command_channel, event_queue, featurelist):
|
||||
BaseImplServer.__init__(self)
|
||||
Process.__init__(self)
|
||||
self.command_channel = command_channel
|
||||
self.event_queue = event_queue
|
||||
self.event = EventAdapter(event_queue)
|
||||
self.featurelist = featurelist
|
||||
self.quit = False
|
||||
self.heartbeat_seconds = 1 # default, BB_HEARTBEAT_EVENT will be checked once we have a datastore.
|
||||
self.next_heartbeat = time.time()
|
||||
|
||||
self.event_handle = None
|
||||
self.haveui = False
|
||||
self.lastui = False
|
||||
self.xmlrpc = False
|
||||
|
||||
self._idlefuns = {}
|
||||
|
||||
self.bitbake_lock = lock
|
||||
self.sock = sock
|
||||
self.sockname = sockname
|
||||
|
||||
def register_idle_function(self, function, data):
|
||||
"""Register a function to be called while the server is idle"""
|
||||
assert hasattr(function, '__call__')
|
||||
self._idlefuns[function] = data
|
||||
self.quitin, self.quitout = Pipe()
|
||||
self.event_handle = multiprocessing.Value("i")
|
||||
|
||||
def run(self):
|
||||
for event in bb.event.ui_queue:
|
||||
self.event_queue.put(event)
|
||||
self.event_handle.value = bb.event.register_UIHhandler(self, True)
|
||||
|
||||
if self.xmlrpcinterface[0]:
|
||||
self.xmlrpc = bb.server.xmlrpcserver.BitBakeXMLRPCServer(self.xmlrpcinterface, self.cooker, self)
|
||||
|
||||
print("Bitbake XMLRPC server address: %s, server port: %s" % (self.xmlrpc.host, self.xmlrpc.port))
|
||||
|
||||
heartbeat_event = self.cooker.data.getVar('BB_HEARTBEAT_EVENT')
|
||||
if heartbeat_event:
|
||||
try:
|
||||
self.heartbeat_seconds = float(heartbeat_event)
|
||||
except:
|
||||
bb.warn('Ignoring invalid BB_HEARTBEAT_EVENT=%s, must be a float specifying seconds.' % heartbeat_event)
|
||||
|
||||
self.timeout = self.server_timeout or self.cooker.data.getVar('BB_SERVER_TIMEOUT')
|
||||
try:
|
||||
if self.timeout:
|
||||
self.timeout = float(self.timeout)
|
||||
except:
|
||||
bb.warn('Ignoring invalid BB_SERVER_TIMEOUT=%s, must be a float specifying seconds.' % self.timeout)
|
||||
|
||||
|
||||
try:
|
||||
self.bitbake_lock.seek(0)
|
||||
self.bitbake_lock.truncate()
|
||||
if self.xmlrpc:
|
||||
self.bitbake_lock.write("%s %s:%s\n" % (os.getpid(), self.xmlrpc.host, self.xmlrpc.port))
|
||||
else:
|
||||
self.bitbake_lock.write("%s\n" % (os.getpid()))
|
||||
self.bitbake_lock.flush()
|
||||
except Exception as e:
|
||||
print("Error writing to lock file: %s" % str(e))
|
||||
pass
|
||||
|
||||
if self.cooker.configuration.profile:
|
||||
try:
|
||||
import cProfile as profile
|
||||
except:
|
||||
import profile
|
||||
prof = profile.Profile()
|
||||
|
||||
ret = profile.Profile.runcall(prof, self.main)
|
||||
|
||||
prof.dump_stats("profile.log")
|
||||
bb.utils.process_profilelog("profile.log")
|
||||
print("Raw profiling information saved to profile.log and processed statistics to profile.log.processed")
|
||||
|
||||
else:
|
||||
ret = self.main()
|
||||
|
||||
return ret
|
||||
bb.cooker.server_main(self.cooker, self.main)
|
||||
|
||||
def main(self):
|
||||
self.cooker.pre_serve()
|
||||
|
||||
# Ignore SIGINT within the server, as all SIGINT handling is done by
|
||||
# the UI and communicated to us
|
||||
self.quitin.close()
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
bb.utils.set_process_name("Cooker")
|
||||
|
||||
ready = []
|
||||
newconnections = []
|
||||
|
||||
self.controllersock = False
|
||||
fds = [self.sock]
|
||||
if self.xmlrpc:
|
||||
fds.append(self.xmlrpc)
|
||||
print("Entering server connection loop")
|
||||
|
||||
def disconnect_client(self, fds):
|
||||
print("Disconnecting Client")
|
||||
if self.controllersock:
|
||||
fds.remove(self.controllersock)
|
||||
self.controllersock.close()
|
||||
self.controllersock = False
|
||||
if self.haveui:
|
||||
fds.remove(self.command_channel)
|
||||
bb.event.unregister_UIHhandler(self.event_handle, True)
|
||||
self.command_channel_reply.writer.close()
|
||||
self.event_writer.writer.close()
|
||||
self.command_channel.close()
|
||||
self.command_channel = False
|
||||
del self.event_writer
|
||||
self.lastui = time.time()
|
||||
self.cooker.clientComplete()
|
||||
self.haveui = False
|
||||
ready = select.select(fds,[],[],0)[0]
|
||||
if newconnections:
|
||||
print("Starting new client")
|
||||
conn = newconnections.pop(-1)
|
||||
fds.append(conn)
|
||||
self.controllersock = conn
|
||||
elif self.timeout is None and not ready:
|
||||
print("No timeout, exiting.")
|
||||
self.quit = True
|
||||
|
||||
while not self.quit:
|
||||
if self.sock in ready:
|
||||
while select.select([self.sock],[],[],0)[0]:
|
||||
controllersock, address = self.sock.accept()
|
||||
if self.controllersock:
|
||||
print("Queuing %s (%s)" % (str(ready), str(newconnections)))
|
||||
newconnections.append(controllersock)
|
||||
else:
|
||||
print("Accepting %s (%s)" % (str(ready), str(newconnections)))
|
||||
self.controllersock = controllersock
|
||||
fds.append(controllersock)
|
||||
if self.controllersock in ready:
|
||||
try:
|
||||
print("Processing Client")
|
||||
ui_fds = recvfds(self.controllersock, 3)
|
||||
print("Connecting Client")
|
||||
|
||||
# Where to write events to
|
||||
writer = ConnectionWriter(ui_fds[0])
|
||||
self.event_handle = bb.event.register_UIHhandler(writer, True)
|
||||
self.event_writer = writer
|
||||
|
||||
# Where to read commands from
|
||||
reader = ConnectionReader(ui_fds[1])
|
||||
fds.append(reader)
|
||||
self.command_channel = reader
|
||||
|
||||
# Where to send command return values to
|
||||
writer = ConnectionWriter(ui_fds[2])
|
||||
self.command_channel_reply = writer
|
||||
|
||||
self.haveui = True
|
||||
|
||||
except (EOFError, OSError):
|
||||
disconnect_client(self, fds)
|
||||
|
||||
if not self.timeout == -1.0 and not self.haveui and self.lastui and self.timeout and \
|
||||
(self.lastui + self.timeout) < time.time():
|
||||
print("Server timeout, exiting.")
|
||||
self.quit = True
|
||||
|
||||
if self.command_channel in ready:
|
||||
try:
|
||||
command = self.command_channel.get()
|
||||
except EOFError:
|
||||
# Client connection shutting down
|
||||
ready = []
|
||||
disconnect_client(self, fds)
|
||||
continue
|
||||
if command[0] == "terminateServer":
|
||||
try:
|
||||
if self.command_channel.poll():
|
||||
command = self.command_channel.recv()
|
||||
self.runCommand(command)
|
||||
if self.quitout.poll():
|
||||
self.quitout.recv()
|
||||
self.quit = True
|
||||
continue
|
||||
try:
|
||||
print("Running command %s" % command)
|
||||
self.command_channel_reply.send(self.cooker.command.runCommand(command))
|
||||
except Exception as e:
|
||||
logger.exception('Exception in server main event loop running command %s (%s)' % (command, str(e)))
|
||||
|
||||
if self.xmlrpc in ready:
|
||||
self.xmlrpc.handle_requests()
|
||||
|
||||
ready = self.idle_commands(.1, fds)
|
||||
|
||||
print("Exiting")
|
||||
# Remove the socket file so we don't get any more connections to avoid races
|
||||
os.unlink(self.sockname)
|
||||
self.sock.close()
|
||||
|
||||
try:
|
||||
self.cooker.shutdown(True)
|
||||
self.cooker.notifier.stop()
|
||||
self.cooker.confignotifier.stop()
|
||||
except:
|
||||
pass
|
||||
|
||||
self.cooker.post_serve()
|
||||
|
||||
# Finally release the lockfile but warn about other processes holding it open
|
||||
lock = self.bitbake_lock
|
||||
lockfile = lock.name
|
||||
lock.close()
|
||||
lock = None
|
||||
|
||||
while not lock:
|
||||
with bb.utils.timeout(3):
|
||||
lock = bb.utils.lockfile(lockfile, shared=False, retry=False, block=True)
|
||||
if lock:
|
||||
# We hold the lock so we can remove the file (hide stale pid data)
|
||||
bb.utils.remove(lockfile)
|
||||
bb.utils.unlockfile(lock)
|
||||
return
|
||||
|
||||
if not lock:
|
||||
# Some systems may not have lsof available
|
||||
procs = None
|
||||
try:
|
||||
procs = subprocess.check_output(["lsof", '-w', lockfile], stderr=subprocess.STDOUT)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
if procs is None:
|
||||
# Fall back to fuser if lsof is unavailable
|
||||
try:
|
||||
procs = subprocess.check_output(["fuser", '-v', lockfile], stderr=subprocess.STDOUT)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
self.runCommand(["stateForceShutdown"])
|
||||
except:
|
||||
pass
|
||||
|
||||
msg = "Delaying shutdown due to active processes which appear to be holding bitbake.lock"
|
||||
if procs:
|
||||
msg += ":\n%s" % str(procs)
|
||||
print(msg)
|
||||
self.idle_commands(.1, [self.command_channel, self.quitout])
|
||||
except Exception:
|
||||
logger.exception('Running command %s', command)
|
||||
|
||||
self.event_queue.close()
|
||||
bb.event.unregister_UIHhandler(self.event_handle.value)
|
||||
self.command_channel.close()
|
||||
self.cooker.shutdown(True)
|
||||
self.quitout.close()
|
||||
|
||||
def idle_commands(self, delay, fds=None):
|
||||
nextsleep = delay
|
||||
@@ -306,361 +160,109 @@ class ProcessServer(multiprocessing.Process):
|
||||
del self._idlefuns[function]
|
||||
self.quit = True
|
||||
|
||||
# Create new heartbeat event?
|
||||
now = time.time()
|
||||
if now >= self.next_heartbeat:
|
||||
# We might have missed heartbeats. Just trigger once in
|
||||
# that case and continue after the usual delay.
|
||||
self.next_heartbeat += self.heartbeat_seconds
|
||||
if self.next_heartbeat <= now:
|
||||
self.next_heartbeat = now + self.heartbeat_seconds
|
||||
heartbeat = bb.event.HeartbeatEvent(now)
|
||||
bb.event.fire(heartbeat, self.cooker.data)
|
||||
if nextsleep and now + nextsleep > self.next_heartbeat:
|
||||
# Shorten timeout so that we we wake up in time for
|
||||
# the heartbeat.
|
||||
nextsleep = self.next_heartbeat - now
|
||||
|
||||
if nextsleep is not None:
|
||||
if self.xmlrpc:
|
||||
nextsleep = self.xmlrpc.get_timeout(nextsleep)
|
||||
try:
|
||||
return select.select(fds,[],[],nextsleep)[0]
|
||||
except InterruptedError:
|
||||
# Ignore EINTR
|
||||
return []
|
||||
else:
|
||||
return select.select(fds,[],[],0)[0]
|
||||
|
||||
|
||||
class ServerCommunicator():
|
||||
def __init__(self, connection, recv):
|
||||
self.connection = connection
|
||||
self.recv = recv
|
||||
select.select(fds,[],[],nextsleep)
|
||||
|
||||
def runCommand(self, command):
|
||||
self.connection.send(command)
|
||||
if not self.recv.poll(30):
|
||||
raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server")
|
||||
return self.recv.get()
|
||||
"""
|
||||
Run a cooker command on the server
|
||||
"""
|
||||
self.command_channel.send(self.cooker.command.runCommand(command))
|
||||
|
||||
def updateFeatureSet(self, featureset):
|
||||
_, error = self.runCommand(["setFeatures", featureset])
|
||||
def stop(self):
|
||||
self.quitin.send("quit")
|
||||
self.quitin.close()
|
||||
|
||||
class BitBakeProcessServerConnection(BitBakeBaseServerConnection):
|
||||
def __init__(self, serverImpl, ui_channel, event_queue):
|
||||
self.procserver = serverImpl
|
||||
self.ui_channel = ui_channel
|
||||
self.event_queue = event_queue
|
||||
self.connection = ServerCommunicator(self.ui_channel, self.procserver.event_handle, self.procserver)
|
||||
self.events = self.event_queue
|
||||
self.terminated = False
|
||||
|
||||
def sigterm_terminate(self):
|
||||
bb.error("UI received SIGTERM")
|
||||
self.terminate()
|
||||
|
||||
def terminate(self):
|
||||
if self.terminated:
|
||||
return
|
||||
self.terminated = True
|
||||
def flushevents():
|
||||
while True:
|
||||
try:
|
||||
event = self.event_queue.get(block=False)
|
||||
except (Empty, IOError):
|
||||
break
|
||||
if isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
self.procserver.stop()
|
||||
|
||||
while self.procserver.is_alive():
|
||||
flushevents()
|
||||
self.procserver.join(0.1)
|
||||
|
||||
self.ui_channel.close()
|
||||
self.event_queue.close()
|
||||
self.event_queue.setexit()
|
||||
|
||||
# Wrap Queue to provide API which isn't server implementation specific
|
||||
class ProcessEventQueue(multiprocessing.queues.Queue):
|
||||
def __init__(self, maxsize):
|
||||
multiprocessing.queues.Queue.__init__(self, maxsize, ctx=multiprocessing.get_context())
|
||||
self.exit = False
|
||||
bb.utils.set_process_name("ProcessEQueue")
|
||||
|
||||
def setexit(self):
|
||||
self.exit = True
|
||||
|
||||
def waitEvent(self, timeout):
|
||||
if self.exit:
|
||||
return self.getEvent()
|
||||
try:
|
||||
if not self.server.is_alive():
|
||||
return self.getEvent()
|
||||
return self.get(True, timeout)
|
||||
except Empty:
|
||||
return None
|
||||
|
||||
def getEvent(self):
|
||||
try:
|
||||
if not self.server.is_alive():
|
||||
self.setexit()
|
||||
return self.get(False)
|
||||
except Empty:
|
||||
if self.exit:
|
||||
sys.exit(1)
|
||||
return None
|
||||
|
||||
|
||||
class BitBakeServer(BitBakeBaseServer):
|
||||
def initServer(self, single_use=True):
|
||||
# establish communication channels. We use bidirectional pipes for
|
||||
# ui <--> server command/response pairs
|
||||
# and a queue for server -> ui event notifications
|
||||
#
|
||||
self.ui_channel, self.server_channel = Pipe()
|
||||
self.event_queue = ProcessEventQueue(0)
|
||||
self.serverImpl = ProcessServer(self.server_channel, self.event_queue, None)
|
||||
self.event_queue.server = self.serverImpl
|
||||
|
||||
def detach(self):
|
||||
self.serverImpl.start()
|
||||
return
|
||||
|
||||
def establishConnection(self, featureset):
|
||||
|
||||
self.connection = BitBakeProcessServerConnection(self.serverImpl, self.ui_channel, self.event_queue)
|
||||
|
||||
_, error = self.connection.connection.runCommand(["setFeatures", featureset])
|
||||
if error:
|
||||
logger.error("Unable to set the cooker to the correct featureset: %s" % error)
|
||||
raise BaseException(error)
|
||||
|
||||
def getEventHandle(self):
|
||||
handle, error = self.runCommand(["getUIHandlerNum"])
|
||||
if error:
|
||||
logger.error("Unable to get UI Handler Number: %s" % error)
|
||||
raise BaseException(error)
|
||||
|
||||
return handle
|
||||
|
||||
def terminateServer(self):
|
||||
self.connection.send(['terminateServer'])
|
||||
return
|
||||
|
||||
class BitBakeProcessServerConnection(object):
|
||||
def __init__(self, ui_channel, recv, eq, sock):
|
||||
self.connection = ServerCommunicator(ui_channel, recv)
|
||||
self.events = eq
|
||||
# Save sock so it doesn't get gc'd for the life of our connection
|
||||
self.socket_connection = sock
|
||||
|
||||
def terminate(self):
|
||||
self.socket_connection.close()
|
||||
self.connection.connection.close()
|
||||
self.connection.recv.close()
|
||||
return
|
||||
|
||||
class BitBakeServer(object):
|
||||
start_log_format = '--- Starting bitbake server pid %s at %s ---'
|
||||
start_log_datetime_format = '%Y-%m-%d %H:%M:%S.%f'
|
||||
|
||||
def __init__(self, lock, sockname, configuration, featureset):
|
||||
|
||||
self.configuration = configuration
|
||||
self.featureset = featureset
|
||||
self.sockname = sockname
|
||||
self.bitbake_lock = lock
|
||||
self.readypipe, self.readypipein = os.pipe()
|
||||
|
||||
# Create server control socket
|
||||
if os.path.exists(sockname):
|
||||
os.unlink(sockname)
|
||||
|
||||
# Place the log in the builddirectory alongside the lock file
|
||||
logfile = os.path.join(os.path.dirname(self.bitbake_lock.name), "bitbake-cookerdaemon.log")
|
||||
|
||||
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
# AF_UNIX has path length issues so chdir here to workaround
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(os.path.dirname(sockname))
|
||||
self.sock.bind(os.path.basename(sockname))
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
self.sock.listen(1)
|
||||
|
||||
os.set_inheritable(self.sock.fileno(), True)
|
||||
startdatetime = datetime.datetime.now()
|
||||
bb.daemonize.createDaemon(self._startServer, logfile)
|
||||
self.sock.close()
|
||||
self.bitbake_lock.close()
|
||||
os.close(self.readypipein)
|
||||
|
||||
ready = ConnectionReader(self.readypipe)
|
||||
r = ready.poll(5)
|
||||
if not r:
|
||||
bb.note("Bitbake server didn't start within 5 seconds, waiting for 90")
|
||||
r = ready.poll(90)
|
||||
if r:
|
||||
try:
|
||||
r = ready.get()
|
||||
except EOFError:
|
||||
# Trap the child exitting/closing the pipe and error out
|
||||
r = None
|
||||
if not r or r[0] != "r":
|
||||
ready.close()
|
||||
bb.error("Unable to start bitbake server (%s)" % str(r))
|
||||
if os.path.exists(logfile):
|
||||
logstart_re = re.compile(self.start_log_format % ('([0-9]+)', '([0-9-]+ [0-9:.]+)'))
|
||||
started = False
|
||||
lines = []
|
||||
lastlines = []
|
||||
with open(logfile, "r") as f:
|
||||
for line in f:
|
||||
if started:
|
||||
lines.append(line)
|
||||
else:
|
||||
lastlines.append(line)
|
||||
res = logstart_re.match(line.rstrip())
|
||||
if res:
|
||||
ldatetime = datetime.datetime.strptime(res.group(2), self.start_log_datetime_format)
|
||||
if ldatetime >= startdatetime:
|
||||
started = True
|
||||
lines.append(line)
|
||||
if len(lastlines) > 60:
|
||||
lastlines = lastlines[-60:]
|
||||
if lines:
|
||||
if len(lines) > 60:
|
||||
bb.error("Last 60 lines of server log for this session (%s):\n%s" % (logfile, "".join(lines[-60:])))
|
||||
else:
|
||||
bb.error("Server log for this session (%s):\n%s" % (logfile, "".join(lines)))
|
||||
elif lastlines:
|
||||
bb.error("Server didn't start, last 60 loglines (%s):\n%s" % (logfile, "".join(lastlines)))
|
||||
else:
|
||||
bb.error("%s doesn't exist" % logfile)
|
||||
|
||||
raise SystemExit(1)
|
||||
|
||||
ready.close()
|
||||
|
||||
def _startServer(self):
|
||||
print(self.start_log_format % (os.getpid(), datetime.datetime.now().strftime(self.start_log_datetime_format)))
|
||||
sys.stdout.flush()
|
||||
|
||||
server = ProcessServer(self.bitbake_lock, self.sock, self.sockname)
|
||||
self.configuration.setServerRegIdleCallback(server.register_idle_function)
|
||||
os.close(self.readypipe)
|
||||
writer = ConnectionWriter(self.readypipein)
|
||||
self.cooker = bb.cooker.BBCooker(self.configuration, self.featureset)
|
||||
writer.send("r")
|
||||
writer.close()
|
||||
server.cooker = self.cooker
|
||||
server.server_timeout = self.configuration.server_timeout
|
||||
server.xmlrpcinterface = self.configuration.xmlrpcinterface
|
||||
print("Started bitbake server pid %d" % os.getpid())
|
||||
sys.stdout.flush()
|
||||
|
||||
server.start()
|
||||
|
||||
def connectProcessServer(sockname, featureset):
|
||||
# Connect to socket
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
# AF_UNIX has path length issues so chdir here to workaround
|
||||
cwd = os.getcwd()
|
||||
|
||||
readfd = writefd = readfd1 = writefd1 = readfd2 = writefd2 = None
|
||||
eq = command_chan_recv = command_chan = None
|
||||
|
||||
sock.settimeout(10)
|
||||
|
||||
try:
|
||||
try:
|
||||
os.chdir(os.path.dirname(sockname))
|
||||
finished = False
|
||||
while not finished:
|
||||
try:
|
||||
sock.connect(os.path.basename(sockname))
|
||||
finished = True
|
||||
except IOError as e:
|
||||
if e.errno == errno.EWOULDBLOCK:
|
||||
pass
|
||||
raise
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
# Send an fd for the remote to write events to
|
||||
readfd, writefd = os.pipe()
|
||||
eq = BBUIEventQueue(readfd)
|
||||
# Send an fd for the remote to recieve commands from
|
||||
readfd1, writefd1 = os.pipe()
|
||||
command_chan = ConnectionWriter(writefd1)
|
||||
# Send an fd for the remote to write commands results to
|
||||
readfd2, writefd2 = os.pipe()
|
||||
command_chan_recv = ConnectionReader(readfd2)
|
||||
|
||||
sendfds(sock, [writefd, readfd1, writefd2])
|
||||
|
||||
server_connection = BitBakeProcessServerConnection(command_chan, command_chan_recv, eq, sock)
|
||||
|
||||
# Close the ends of the pipes we won't use
|
||||
for i in [writefd, readfd1, writefd2]:
|
||||
os.close(i)
|
||||
|
||||
server_connection.connection.updateFeatureSet(featureset)
|
||||
|
||||
except (Exception, SystemExit) as e:
|
||||
if command_chan_recv:
|
||||
command_chan_recv.close()
|
||||
if command_chan:
|
||||
command_chan.close()
|
||||
for i in [writefd, readfd1, writefd2]:
|
||||
try:
|
||||
if i:
|
||||
os.close(i)
|
||||
except OSError:
|
||||
pass
|
||||
sock.close()
|
||||
raise
|
||||
|
||||
return server_connection
|
||||
|
||||
def sendfds(sock, fds):
|
||||
'''Send an array of fds over an AF_UNIX socket.'''
|
||||
fds = array.array('i', fds)
|
||||
msg = bytes([len(fds) % 256])
|
||||
sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
|
||||
|
||||
def recvfds(sock, size):
|
||||
'''Receive an array of fds over an AF_UNIX socket.'''
|
||||
a = array.array('i')
|
||||
bytes_size = a.itemsize * size
|
||||
msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_LEN(bytes_size))
|
||||
if not msg and not ancdata:
|
||||
raise EOFError
|
||||
try:
|
||||
if len(ancdata) != 1:
|
||||
raise RuntimeError('received %d items of ancdata' %
|
||||
len(ancdata))
|
||||
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
|
||||
if (cmsg_level == socket.SOL_SOCKET and
|
||||
cmsg_type == socket.SCM_RIGHTS):
|
||||
if len(cmsg_data) % a.itemsize != 0:
|
||||
raise ValueError
|
||||
a.frombytes(cmsg_data)
|
||||
assert len(a) % 256 == msg[0]
|
||||
return list(a)
|
||||
except (ValueError, IndexError):
|
||||
pass
|
||||
raise RuntimeError('Invalid data received')
|
||||
|
||||
class BBUIEventQueue:
|
||||
def __init__(self, readfd):
|
||||
|
||||
self.eventQueue = []
|
||||
self.eventQueueLock = threading.Lock()
|
||||
self.eventQueueNotify = threading.Event()
|
||||
|
||||
self.reader = ConnectionReader(readfd)
|
||||
|
||||
self.t = threading.Thread()
|
||||
self.t.setDaemon(True)
|
||||
self.t.run = self.startCallbackHandler
|
||||
self.t.start()
|
||||
|
||||
def getEvent(self):
|
||||
self.eventQueueLock.acquire()
|
||||
|
||||
if len(self.eventQueue) == 0:
|
||||
self.eventQueueLock.release()
|
||||
return None
|
||||
|
||||
item = self.eventQueue.pop(0)
|
||||
|
||||
if len(self.eventQueue) == 0:
|
||||
self.eventQueueNotify.clear()
|
||||
|
||||
self.eventQueueLock.release()
|
||||
return item
|
||||
|
||||
def waitEvent(self, delay):
|
||||
self.eventQueueNotify.wait(delay)
|
||||
return self.getEvent()
|
||||
|
||||
def queue_event(self, event):
|
||||
self.eventQueueLock.acquire()
|
||||
self.eventQueue.append(event)
|
||||
self.eventQueueNotify.set()
|
||||
self.eventQueueLock.release()
|
||||
|
||||
def send_event(self, event):
|
||||
self.queue_event(pickle.loads(event))
|
||||
|
||||
def startCallbackHandler(self):
|
||||
bb.utils.set_process_name("UIEventQueue")
|
||||
while True:
|
||||
try:
|
||||
self.reader.wait()
|
||||
event = self.reader.get()
|
||||
self.queue_event(event)
|
||||
except EOFError:
|
||||
# Easiest way to exit is to close the file descriptor to cause an exit
|
||||
break
|
||||
self.reader.close()
|
||||
|
||||
class ConnectionReader(object):
|
||||
|
||||
def __init__(self, fd):
|
||||
self.reader = multiprocessing.connection.Connection(fd, writable=False)
|
||||
self.rlock = multiprocessing.Lock()
|
||||
|
||||
def wait(self, timeout=None):
|
||||
return multiprocessing.connection.wait([self.reader], timeout)
|
||||
|
||||
def poll(self, timeout=None):
|
||||
return self.reader.poll(timeout)
|
||||
|
||||
def get(self):
|
||||
with self.rlock:
|
||||
res = self.reader.recv_bytes()
|
||||
return multiprocessing.reduction.ForkingPickler.loads(res)
|
||||
|
||||
def fileno(self):
|
||||
return self.reader.fileno()
|
||||
|
||||
def close(self):
|
||||
return self.reader.close()
|
||||
|
||||
|
||||
class ConnectionWriter(object):
|
||||
|
||||
def __init__(self, fd):
|
||||
self.writer = multiprocessing.connection.Connection(fd, readable=False)
|
||||
self.wlock = multiprocessing.Lock()
|
||||
# Why bb.event needs this I have no idea
|
||||
self.event = self
|
||||
|
||||
def send(self, obj):
|
||||
obj = multiprocessing.reduction.ForkingPickler.dumps(obj)
|
||||
with self.wlock:
|
||||
self.writer.send_bytes(obj)
|
||||
|
||||
def fileno(self):
|
||||
return self.writer.fileno()
|
||||
|
||||
def close(self):
|
||||
return self.writer.close()
|
||||
signal.signal(signal.SIGTERM, lambda i, s: self.connection.sigterm_terminate())
|
||||
return self.connection
|
||||
|
||||
422
bitbake/lib/bb/server/xmlrpc.py
Normal file
422
bitbake/lib/bb/server/xmlrpc.py
Normal file
@@ -0,0 +1,422 @@
|
||||
#
|
||||
# BitBake XMLRPC Server
|
||||
#
|
||||
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
|
||||
# Copyright (C) 2006 - 2008 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
"""
|
||||
This module implements an xmlrpc server for BitBake.
|
||||
|
||||
Use this by deriving a class from BitBakeXMLRPCServer and then adding
|
||||
methods which you want to "export" via XMLRPC. If the methods have the
|
||||
prefix xmlrpc_, then registering those function will happen automatically,
|
||||
if not, you need to call register_function.
|
||||
|
||||
Use register_idle_function() to add a function which the xmlrpc server
|
||||
calls from within server_forever when no requests are pending. Make sure
|
||||
that those functions are non-blocking or else you will introduce latency
|
||||
in the server's main loop.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import hashlib
|
||||
import time
|
||||
import socket
|
||||
import signal
|
||||
import threading
|
||||
import pickle
|
||||
import inspect
|
||||
import select
|
||||
import http.client
|
||||
import xmlrpc.client
|
||||
from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
|
||||
|
||||
import bb
|
||||
from bb import daemonize
|
||||
from bb.ui import uievent
|
||||
from . import BitBakeBaseServer, BitBakeBaseServerConnection, BaseImplServer
|
||||
|
||||
DEBUG = False
|
||||
|
||||
class BBTransport(xmlrpc.client.Transport):
|
||||
def __init__(self, timeout):
|
||||
self.timeout = timeout
|
||||
self.connection_token = None
|
||||
xmlrpc.client.Transport.__init__(self)
|
||||
|
||||
# Modified from default to pass timeout to HTTPConnection
|
||||
def make_connection(self, host):
|
||||
#return an existing connection if possible. This allows
|
||||
#HTTP/1.1 keep-alive.
|
||||
if self._connection and host == self._connection[0]:
|
||||
return self._connection[1]
|
||||
|
||||
# create a HTTP connection object from a host descriptor
|
||||
chost, self._extra_headers, x509 = self.get_host_info(host)
|
||||
#store the host argument along with the connection object
|
||||
self._connection = host, http.client.HTTPConnection(chost, timeout=self.timeout)
|
||||
return self._connection[1]
|
||||
|
||||
def set_connection_token(self, token):
|
||||
self.connection_token = token
|
||||
|
||||
def send_content(self, h, body):
|
||||
if self.connection_token:
|
||||
h.putheader("Bitbake-token", self.connection_token)
|
||||
xmlrpc.client.Transport.send_content(self, h, body)
|
||||
|
||||
def _create_server(host, port, timeout = 60):
|
||||
t = BBTransport(timeout)
|
||||
s = xmlrpc.client.ServerProxy("http://%s:%d/" % (host, port), transport=t, allow_none=True, use_builtin_types=True)
|
||||
return s, t
|
||||
|
||||
def check_connection(remote, timeout):
|
||||
try:
|
||||
host, port = remote.split(":")
|
||||
port = int(port)
|
||||
except Exception as e:
|
||||
bb.warn("Failed to read remote definition (%s)" % str(e))
|
||||
raise e
|
||||
|
||||
server, _transport = _create_server(host, port, timeout)
|
||||
try:
|
||||
ret, err = server.runCommand(['getVariable', 'TOPDIR'])
|
||||
if err or not ret:
|
||||
return False
|
||||
except ConnectionError:
|
||||
return False
|
||||
return True
|
||||
|
||||
class BitBakeServerCommands():
|
||||
|
||||
def __init__(self, server):
|
||||
self.server = server
|
||||
self.has_client = False
|
||||
|
||||
def registerEventHandler(self, host, port):
|
||||
"""
|
||||
Register a remote UI Event Handler
|
||||
"""
|
||||
s, t = _create_server(host, port)
|
||||
|
||||
# we don't allow connections if the cooker is running
|
||||
if (self.cooker.state in [bb.cooker.state.parsing, bb.cooker.state.running]):
|
||||
return None, "Cooker is busy: %s" % bb.cooker.state.get_name(self.cooker.state)
|
||||
|
||||
self.event_handle = bb.event.register_UIHhandler(s, True)
|
||||
return self.event_handle, 'OK'
|
||||
|
||||
def unregisterEventHandler(self, handlerNum):
|
||||
"""
|
||||
Unregister a remote UI Event Handler
|
||||
"""
|
||||
return bb.event.unregister_UIHhandler(handlerNum)
|
||||
|
||||
def runCommand(self, command):
|
||||
"""
|
||||
Run a cooker command on the server
|
||||
"""
|
||||
return self.cooker.command.runCommand(command, self.server.readonly)
|
||||
|
||||
def getEventHandle(self):
|
||||
return self.event_handle
|
||||
|
||||
def terminateServer(self):
|
||||
"""
|
||||
Trigger the server to quit
|
||||
"""
|
||||
self.server.quit = True
|
||||
print("Server (cooker) exiting")
|
||||
return
|
||||
|
||||
def addClient(self):
|
||||
if self.has_client:
|
||||
return None
|
||||
token = hashlib.md5(str(time.time()).encode("utf-8")).hexdigest()
|
||||
self.server.set_connection_token(token)
|
||||
self.has_client = True
|
||||
return token
|
||||
|
||||
def removeClient(self):
|
||||
if self.has_client:
|
||||
self.server.set_connection_token(None)
|
||||
self.has_client = False
|
||||
if self.server.single_use:
|
||||
self.server.quit = True
|
||||
|
||||
# This request handler checks if the request has a "Bitbake-token" header
|
||||
# field (this comes from the client side) and compares it with its internal
|
||||
# "Bitbake-token" field (this comes from the server). If the two are not
|
||||
# equal, it is assumed that a client is trying to connect to the server
|
||||
# while another client is connected to the server. In this case, a 503 error
|
||||
# ("service unavailable") is returned to the client.
|
||||
class BitBakeXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||
def __init__(self, request, client_address, server):
|
||||
self.server = server
|
||||
SimpleXMLRPCRequestHandler.__init__(self, request, client_address, server)
|
||||
|
||||
def do_POST(self):
|
||||
try:
|
||||
remote_token = self.headers["Bitbake-token"]
|
||||
except:
|
||||
remote_token = None
|
||||
if remote_token != self.server.connection_token and remote_token != "observer":
|
||||
self.report_503()
|
||||
else:
|
||||
if remote_token == "observer":
|
||||
self.server.readonly = True
|
||||
else:
|
||||
self.server.readonly = False
|
||||
SimpleXMLRPCRequestHandler.do_POST(self)
|
||||
|
||||
def report_503(self):
|
||||
self.send_response(503)
|
||||
response = 'No more client allowed'
|
||||
self.send_header("Content-type", "text/plain")
|
||||
self.send_header("Content-length", str(len(response)))
|
||||
self.end_headers()
|
||||
self.wfile.write(response)
|
||||
|
||||
|
||||
class XMLRPCProxyServer(BaseImplServer):
|
||||
""" not a real working server, but a stub for a proxy server connection
|
||||
|
||||
"""
|
||||
def __init__(self, host, port, use_builtin_types=True):
|
||||
self.host = host
|
||||
self.port = port
|
||||
|
||||
class XMLRPCServer(SimpleXMLRPCServer, BaseImplServer):
|
||||
# remove this when you're done with debugging
|
||||
# allow_reuse_address = True
|
||||
|
||||
def __init__(self, interface, single_use=False, idle_timeout=0):
|
||||
"""
|
||||
Constructor
|
||||
"""
|
||||
BaseImplServer.__init__(self)
|
||||
self.single_use = single_use
|
||||
# Use auto port configuration
|
||||
if (interface[1] == -1):
|
||||
interface = (interface[0], 0)
|
||||
SimpleXMLRPCServer.__init__(self, interface,
|
||||
requestHandler=BitBakeXMLRPCRequestHandler,
|
||||
logRequests=False, allow_none=True)
|
||||
self.host, self.port = self.socket.getsockname()
|
||||
self.connection_token = None
|
||||
#self.register_introspection_functions()
|
||||
self.commands = BitBakeServerCommands(self)
|
||||
self.autoregister_all_functions(self.commands, "")
|
||||
self.interface = interface
|
||||
self.time = time.time()
|
||||
self.idle_timeout = idle_timeout
|
||||
if idle_timeout:
|
||||
self.register_idle_function(self.handle_idle_timeout, self)
|
||||
|
||||
def addcooker(self, cooker):
|
||||
BaseImplServer.addcooker(self, cooker)
|
||||
self.commands.cooker = cooker
|
||||
|
||||
def autoregister_all_functions(self, context, prefix):
|
||||
"""
|
||||
Convenience method for registering all functions in the scope
|
||||
of this class that start with a common prefix
|
||||
"""
|
||||
methodlist = inspect.getmembers(context, inspect.ismethod)
|
||||
for name, method in methodlist:
|
||||
if name.startswith(prefix):
|
||||
self.register_function(method, name[len(prefix):])
|
||||
|
||||
def handle_idle_timeout(self, server, data, abort):
|
||||
if not abort:
|
||||
if time.time() - server.time > server.idle_timeout:
|
||||
server.quit = True
|
||||
print("Server idle timeout expired")
|
||||
return []
|
||||
|
||||
def serve_forever(self):
|
||||
# Start the actual XMLRPC server
|
||||
bb.cooker.server_main(self.cooker, self._serve_forever)
|
||||
|
||||
def _serve_forever(self):
|
||||
"""
|
||||
Serve Requests. Overloaded to honor a quit command
|
||||
"""
|
||||
self.quit = False
|
||||
while not self.quit:
|
||||
fds = [self]
|
||||
nextsleep = 0.1
|
||||
for function, data in list(self._idlefuns.items()):
|
||||
retval = None
|
||||
try:
|
||||
retval = function(self, data, False)
|
||||
if retval is False:
|
||||
del self._idlefuns[function]
|
||||
elif retval is True:
|
||||
nextsleep = 0
|
||||
elif isinstance(retval, float):
|
||||
if (retval < nextsleep):
|
||||
nextsleep = retval
|
||||
else:
|
||||
fds = fds + retval
|
||||
except SystemExit:
|
||||
raise
|
||||
except:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
if retval == None:
|
||||
# the function execute failed; delete it
|
||||
del self._idlefuns[function]
|
||||
pass
|
||||
|
||||
socktimeout = self.socket.gettimeout() or nextsleep
|
||||
socktimeout = min(socktimeout, nextsleep)
|
||||
# Mirror what BaseServer handle_request would do
|
||||
try:
|
||||
fd_sets = select.select(fds, [], [], socktimeout)
|
||||
if fd_sets[0] and self in fd_sets[0]:
|
||||
if self.idle_timeout:
|
||||
self.time = time.time()
|
||||
self._handle_request_noblock()
|
||||
except IOError:
|
||||
# we ignore interrupted calls
|
||||
pass
|
||||
|
||||
# Tell idle functions we're exiting
|
||||
for function, data in list(self._idlefuns.items()):
|
||||
try:
|
||||
retval = function(self, data, True)
|
||||
except:
|
||||
pass
|
||||
self.server_close()
|
||||
return
|
||||
|
||||
def set_connection_token(self, token):
|
||||
self.connection_token = token
|
||||
|
||||
class BitBakeXMLRPCServerConnection(BitBakeBaseServerConnection):
|
||||
def __init__(self, serverImpl, clientinfo=("localhost", 0), observer_only = False, featureset = None):
|
||||
self.connection, self.transport = _create_server(serverImpl.host, serverImpl.port)
|
||||
self.clientinfo = clientinfo
|
||||
self.serverImpl = serverImpl
|
||||
self.observer_only = observer_only
|
||||
if featureset:
|
||||
self.featureset = featureset
|
||||
else:
|
||||
self.featureset = []
|
||||
|
||||
def connect(self, token = None):
|
||||
if token is None:
|
||||
if self.observer_only:
|
||||
token = "observer"
|
||||
else:
|
||||
token = self.connection.addClient()
|
||||
|
||||
if token is None:
|
||||
return None
|
||||
|
||||
self.transport.set_connection_token(token)
|
||||
return self
|
||||
|
||||
def setupEventQueue(self):
|
||||
self.events = uievent.BBUIEventQueue(self.connection, self.clientinfo)
|
||||
for event in bb.event.ui_queue:
|
||||
self.events.queue_event(event)
|
||||
|
||||
_, error = self.connection.runCommand(["setFeatures", self.featureset])
|
||||
if error:
|
||||
# disconnect the client, we can't make the setFeature work
|
||||
self.connection.removeClient()
|
||||
# no need to log it here, the error shall be sent to the client
|
||||
raise BaseException(error)
|
||||
|
||||
def removeClient(self):
|
||||
if not self.observer_only:
|
||||
self.connection.removeClient()
|
||||
|
||||
def terminate(self):
|
||||
# Don't wait for server indefinitely
|
||||
import socket
|
||||
socket.setdefaulttimeout(2)
|
||||
try:
|
||||
self.events.system_quit()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
self.connection.removeClient()
|
||||
except:
|
||||
pass
|
||||
|
||||
class BitBakeServer(BitBakeBaseServer):
|
||||
def initServer(self, interface = ("localhost", 0),
|
||||
single_use = False, idle_timeout=0):
|
||||
self.interface = interface
|
||||
self.serverImpl = XMLRPCServer(interface, single_use, idle_timeout)
|
||||
|
||||
def detach(self):
|
||||
daemonize.createDaemon(self.serverImpl.serve_forever, "bitbake-cookerdaemon.log")
|
||||
del self.cooker
|
||||
|
||||
def establishConnection(self, featureset):
|
||||
self.connection = BitBakeXMLRPCServerConnection(self.serverImpl, self.interface, False, featureset)
|
||||
return self.connection.connect()
|
||||
|
||||
def set_connection_token(self, token):
|
||||
self.connection.transport.set_connection_token(token)
|
||||
|
||||
class BitBakeXMLRPCClient(BitBakeBaseServer):
|
||||
|
||||
def __init__(self, observer_only = False, token = None):
|
||||
self.token = token
|
||||
|
||||
self.observer_only = observer_only
|
||||
# if we need extra caches, just tell the server to load them all
|
||||
pass
|
||||
|
||||
def saveConnectionDetails(self, remote):
|
||||
self.remote = remote
|
||||
|
||||
def establishConnection(self, featureset):
|
||||
# The format of "remote" must be "server:port"
|
||||
try:
|
||||
[host, port] = self.remote.split(":")
|
||||
port = int(port)
|
||||
except Exception as e:
|
||||
bb.warn("Failed to read remote definition (%s)" % str(e))
|
||||
raise e
|
||||
|
||||
# We need our IP for the server connection. We get the IP
|
||||
# by trying to connect with the server
|
||||
try:
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
s.connect((host, port))
|
||||
ip = s.getsockname()[0]
|
||||
s.close()
|
||||
except Exception as e:
|
||||
bb.warn("Could not create socket for %s:%s (%s)" % (host, port, str(e)))
|
||||
raise e
|
||||
try:
|
||||
self.serverImpl = XMLRPCProxyServer(host, port, use_builtin_types=True)
|
||||
self.connection = BitBakeXMLRPCServerConnection(self.serverImpl, (ip, 0), self.observer_only, featureset)
|
||||
return self.connection.connect(self.token)
|
||||
except Exception as e:
|
||||
bb.warn("Could not connect to server at %s:%s (%s)" % (host, port, str(e)))
|
||||
raise e
|
||||
|
||||
def endSession(self):
|
||||
self.connection.removeClient()
|
||||
@@ -1,154 +0,0 @@
|
||||
#
|
||||
# BitBake XMLRPC Client Interface
|
||||
#
|
||||
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
|
||||
# Copyright (C) 2006 - 2008 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import socket
|
||||
import http.client
|
||||
import xmlrpc.client
|
||||
|
||||
import bb
|
||||
from bb.ui import uievent
|
||||
|
||||
class BBTransport(xmlrpc.client.Transport):
|
||||
def __init__(self, timeout):
|
||||
self.timeout = timeout
|
||||
self.connection_token = None
|
||||
xmlrpc.client.Transport.__init__(self)
|
||||
|
||||
# Modified from default to pass timeout to HTTPConnection
|
||||
def make_connection(self, host):
|
||||
#return an existing connection if possible. This allows
|
||||
#HTTP/1.1 keep-alive.
|
||||
if self._connection and host == self._connection[0]:
|
||||
return self._connection[1]
|
||||
|
||||
# create a HTTP connection object from a host descriptor
|
||||
chost, self._extra_headers, x509 = self.get_host_info(host)
|
||||
#store the host argument along with the connection object
|
||||
self._connection = host, http.client.HTTPConnection(chost, timeout=self.timeout)
|
||||
return self._connection[1]
|
||||
|
||||
def set_connection_token(self, token):
|
||||
self.connection_token = token
|
||||
|
||||
def send_content(self, h, body):
|
||||
if self.connection_token:
|
||||
h.putheader("Bitbake-token", self.connection_token)
|
||||
xmlrpc.client.Transport.send_content(self, h, body)
|
||||
|
||||
def _create_server(host, port, timeout = 60):
|
||||
t = BBTransport(timeout)
|
||||
s = xmlrpc.client.ServerProxy("http://%s:%d/" % (host, port), transport=t, allow_none=True, use_builtin_types=True)
|
||||
return s, t
|
||||
|
||||
def check_connection(remote, timeout):
|
||||
try:
|
||||
host, port = remote.split(":")
|
||||
port = int(port)
|
||||
except Exception as e:
|
||||
bb.warn("Failed to read remote definition (%s)" % str(e))
|
||||
raise e
|
||||
|
||||
server, _transport = _create_server(host, port, timeout)
|
||||
try:
|
||||
ret, err = server.runCommand(['getVariable', 'TOPDIR'])
|
||||
if err or not ret:
|
||||
return False
|
||||
except ConnectionError:
|
||||
return False
|
||||
return True
|
||||
|
||||
class BitBakeXMLRPCServerConnection(object):
|
||||
def __init__(self, host, port, clientinfo=("localhost", 0), observer_only = False, featureset = None):
|
||||
self.connection, self.transport = _create_server(host, port)
|
||||
self.clientinfo = clientinfo
|
||||
self.observer_only = observer_only
|
||||
if featureset:
|
||||
self.featureset = featureset
|
||||
else:
|
||||
self.featureset = []
|
||||
|
||||
self.events = uievent.BBUIEventQueue(self.connection, self.clientinfo)
|
||||
|
||||
_, error = self.connection.runCommand(["setFeatures", self.featureset])
|
||||
if error:
|
||||
# disconnect the client, we can't make the setFeature work
|
||||
self.connection.removeClient()
|
||||
# no need to log it here, the error shall be sent to the client
|
||||
raise BaseException(error)
|
||||
|
||||
def connect(self, token = None):
|
||||
if token is None:
|
||||
if self.observer_only:
|
||||
token = "observer"
|
||||
else:
|
||||
token = self.connection.addClient()
|
||||
|
||||
if token is None:
|
||||
return None
|
||||
|
||||
self.transport.set_connection_token(token)
|
||||
return self
|
||||
|
||||
def removeClient(self):
|
||||
if not self.observer_only:
|
||||
self.connection.removeClient()
|
||||
|
||||
def terminate(self):
|
||||
# Don't wait for server indefinitely
|
||||
socket.setdefaulttimeout(2)
|
||||
try:
|
||||
self.events.system_quit()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
self.connection.removeClient()
|
||||
except:
|
||||
pass
|
||||
|
||||
def connectXMLRPC(remote, featureset, observer_only = False, token = None):
|
||||
# The format of "remote" must be "server:port"
|
||||
try:
|
||||
[host, port] = remote.split(":")
|
||||
port = int(port)
|
||||
except Exception as e:
|
||||
bb.warn("Failed to parse remote definition %s (%s)" % (remote, str(e)))
|
||||
raise e
|
||||
|
||||
# We need our IP for the server connection. We get the IP
|
||||
# by trying to connect with the server
|
||||
try:
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
s.connect((host, port))
|
||||
ip = s.getsockname()[0]
|
||||
s.close()
|
||||
except Exception as e:
|
||||
bb.warn("Could not create socket for %s:%s (%s)" % (host, port, str(e)))
|
||||
raise e
|
||||
try:
|
||||
connection = BitBakeXMLRPCServerConnection(host, port, (ip, 0), observer_only, featureset)
|
||||
return connection.connect(token)
|
||||
except Exception as e:
|
||||
bb.warn("Could not connect to server at %s:%s (%s)" % (host, port, str(e)))
|
||||
raise e
|
||||
|
||||
|
||||
|
||||
@@ -1,158 +0,0 @@
|
||||
#
|
||||
# BitBake XMLRPC Server Interface
|
||||
#
|
||||
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
|
||||
# Copyright (C) 2006 - 2008 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import hashlib
|
||||
import time
|
||||
import inspect
|
||||
from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
|
||||
|
||||
import bb
|
||||
|
||||
# This request handler checks if the request has a "Bitbake-token" header
|
||||
# field (this comes from the client side) and compares it with its internal
|
||||
# "Bitbake-token" field (this comes from the server). If the two are not
|
||||
# equal, it is assumed that a client is trying to connect to the server
|
||||
# while another client is connected to the server. In this case, a 503 error
|
||||
# ("service unavailable") is returned to the client.
|
||||
class BitBakeXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||
def __init__(self, request, client_address, server):
|
||||
self.server = server
|
||||
SimpleXMLRPCRequestHandler.__init__(self, request, client_address, server)
|
||||
|
||||
def do_POST(self):
|
||||
try:
|
||||
remote_token = self.headers["Bitbake-token"]
|
||||
except:
|
||||
remote_token = None
|
||||
if 0 and remote_token != self.server.connection_token and remote_token != "observer":
|
||||
self.report_503()
|
||||
else:
|
||||
if remote_token == "observer":
|
||||
self.server.readonly = True
|
||||
else:
|
||||
self.server.readonly = False
|
||||
SimpleXMLRPCRequestHandler.do_POST(self)
|
||||
|
||||
def report_503(self):
|
||||
self.send_response(503)
|
||||
response = 'No more client allowed'
|
||||
self.send_header("Content-type", "text/plain")
|
||||
self.send_header("Content-length", str(len(response)))
|
||||
self.end_headers()
|
||||
self.wfile.write(bytes(response, 'utf-8'))
|
||||
|
||||
class BitBakeXMLRPCServer(SimpleXMLRPCServer):
|
||||
# remove this when you're done with debugging
|
||||
# allow_reuse_address = True
|
||||
|
||||
def __init__(self, interface, cooker, parent):
|
||||
# Use auto port configuration
|
||||
if (interface[1] == -1):
|
||||
interface = (interface[0], 0)
|
||||
SimpleXMLRPCServer.__init__(self, interface,
|
||||
requestHandler=BitBakeXMLRPCRequestHandler,
|
||||
logRequests=False, allow_none=True)
|
||||
self.host, self.port = self.socket.getsockname()
|
||||
self.interface = interface
|
||||
|
||||
self.connection_token = None
|
||||
self.commands = BitBakeXMLRPCServerCommands(self)
|
||||
self.register_functions(self.commands, "")
|
||||
|
||||
self.cooker = cooker
|
||||
self.parent = parent
|
||||
|
||||
|
||||
def register_functions(self, context, prefix):
|
||||
"""
|
||||
Convenience method for registering all functions in the scope
|
||||
of this class that start with a common prefix
|
||||
"""
|
||||
methodlist = inspect.getmembers(context, inspect.ismethod)
|
||||
for name, method in methodlist:
|
||||
if name.startswith(prefix):
|
||||
self.register_function(method, name[len(prefix):])
|
||||
|
||||
def get_timeout(self, delay):
|
||||
socktimeout = self.socket.gettimeout() or delay
|
||||
return min(socktimeout, delay)
|
||||
|
||||
def handle_requests(self):
|
||||
self._handle_request_noblock()
|
||||
|
||||
class BitBakeXMLRPCServerCommands():
|
||||
|
||||
def __init__(self, server):
|
||||
self.server = server
|
||||
self.has_client = False
|
||||
|
||||
def registerEventHandler(self, host, port):
|
||||
"""
|
||||
Register a remote UI Event Handler
|
||||
"""
|
||||
s, t = bb.server.xmlrpcclient._create_server(host, port)
|
||||
|
||||
# we don't allow connections if the cooker is running
|
||||
if (self.server.cooker.state in [bb.cooker.state.parsing, bb.cooker.state.running]):
|
||||
return None, "Cooker is busy: %s" % bb.cooker.state.get_name(self.server.cooker.state)
|
||||
|
||||
self.event_handle = bb.event.register_UIHhandler(s, True)
|
||||
return self.event_handle, 'OK'
|
||||
|
||||
def unregisterEventHandler(self, handlerNum):
|
||||
"""
|
||||
Unregister a remote UI Event Handler
|
||||
"""
|
||||
ret = bb.event.unregister_UIHhandler(handlerNum, True)
|
||||
self.event_handle = None
|
||||
return ret
|
||||
|
||||
def runCommand(self, command):
|
||||
"""
|
||||
Run a cooker command on the server
|
||||
"""
|
||||
return self.server.cooker.command.runCommand(command, self.server.readonly)
|
||||
|
||||
def getEventHandle(self):
|
||||
return self.event_handle
|
||||
|
||||
def terminateServer(self):
|
||||
"""
|
||||
Trigger the server to quit
|
||||
"""
|
||||
self.server.parent.quit = True
|
||||
print("XMLRPC Server triggering exit")
|
||||
return
|
||||
|
||||
def addClient(self):
|
||||
if self.server.parent.haveui:
|
||||
return None
|
||||
token = hashlib.md5(str(time.time()).encode("utf-8")).hexdigest()
|
||||
self.server.connection_token = token
|
||||
self.server.parent.haveui = True
|
||||
return token
|
||||
|
||||
def removeClient(self):
|
||||
if self.server.parent.haveui:
|
||||
self.server.connection_token = None
|
||||
self.server.parent.haveui = False
|
||||
|
||||
820
bitbake/lib/bb/shell.py
Normal file
820
bitbake/lib/bb/shell.py
Normal file
@@ -0,0 +1,820 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
##########################################################################
|
||||
#
|
||||
# Copyright (C) 2005-2006 Michael 'Mickey' Lauer <mickey@Vanille.de>
|
||||
# Copyright (C) 2005-2006 Vanille Media
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
##########################################################################
|
||||
#
|
||||
# Thanks to:
|
||||
# * Holger Freyther <zecke@handhelds.org>
|
||||
# * Justin Patrin <papercrane@reversefold.com>
|
||||
#
|
||||
##########################################################################
|
||||
|
||||
"""
|
||||
BitBake Shell
|
||||
|
||||
IDEAS:
|
||||
* list defined tasks per package
|
||||
* list classes
|
||||
* toggle force
|
||||
* command to reparse just one (or more) bbfile(s)
|
||||
* automatic check if reparsing is necessary (inotify?)
|
||||
* frontend for bb file manipulation
|
||||
* more shell-like features:
|
||||
- output control, i.e. pipe output into grep, sort, etc.
|
||||
- job control, i.e. bring running commands into background and foreground
|
||||
* start parsing in background right after startup
|
||||
* ncurses interface
|
||||
|
||||
PROBLEMS:
|
||||
* force doesn't always work
|
||||
* readline completion for commands with more than one parameters
|
||||
|
||||
"""
|
||||
|
||||
##########################################################################
|
||||
# Import and setup global variables
|
||||
##########################################################################
|
||||
|
||||
from __future__ import print_function
|
||||
from functools import reduce
|
||||
try:
|
||||
set
|
||||
except NameError:
|
||||
from sets import Set as set
|
||||
import sys, os, readline, socket, httplib, urllib, commands, popen2, shlex, Queue, fnmatch
|
||||
from bb import data, parse, build, cache, taskdata, runqueue, providers as Providers
|
||||
|
||||
__version__ = "0.5.3.1"
|
||||
__credits__ = """BitBake Shell Version %s (C) 2005 Michael 'Mickey' Lauer <mickey@Vanille.de>
|
||||
Type 'help' for more information, press CTRL-D to exit.""" % __version__
|
||||
|
||||
cmds = {}
|
||||
leave_mainloop = False
|
||||
last_exception = None
|
||||
cooker = None
|
||||
parsed = False
|
||||
debug = os.environ.get( "BBSHELL_DEBUG", "" )
|
||||
|
||||
##########################################################################
|
||||
# Class BitBakeShellCommands
|
||||
##########################################################################
|
||||
|
||||
class BitBakeShellCommands:
|
||||
"""This class contains the valid commands for the shell"""
|
||||
|
||||
def __init__( self, shell ):
|
||||
"""Register all the commands"""
|
||||
self._shell = shell
|
||||
for attr in BitBakeShellCommands.__dict__:
|
||||
if not attr.startswith( "_" ):
|
||||
if attr.endswith( "_" ):
|
||||
command = attr[:-1].lower()
|
||||
else:
|
||||
command = attr[:].lower()
|
||||
method = getattr( BitBakeShellCommands, attr )
|
||||
debugOut( "registering command '%s'" % command )
|
||||
# scan number of arguments
|
||||
usage = getattr( method, "usage", "" )
|
||||
if usage != "<...>":
|
||||
numArgs = len( usage.split() )
|
||||
else:
|
||||
numArgs = -1
|
||||
shell.registerCommand( command, method, numArgs, "%s %s" % ( command, usage ), method.__doc__ )
|
||||
|
||||
def _checkParsed( self ):
|
||||
if not parsed:
|
||||
print("SHELL: This command needs to parse bbfiles...")
|
||||
self.parse( None )
|
||||
|
||||
def _findProvider( self, item ):
|
||||
self._checkParsed()
|
||||
# Need to use taskData for this information
|
||||
preferred = data.getVar( "PREFERRED_PROVIDER_%s" % item, cooker.configuration.data, 1 )
|
||||
if not preferred: preferred = item
|
||||
try:
|
||||
lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status)
|
||||
except KeyError:
|
||||
if item in cooker.status.providers:
|
||||
pf = cooker.status.providers[item][0]
|
||||
else:
|
||||
pf = None
|
||||
return pf
|
||||
|
||||
def alias( self, params ):
|
||||
"""Register a new name for a command"""
|
||||
new, old = params
|
||||
if not old in cmds:
|
||||
print("ERROR: Command '%s' not known" % old)
|
||||
else:
|
||||
cmds[new] = cmds[old]
|
||||
print("OK")
|
||||
alias.usage = "<alias> <command>"
|
||||
|
||||
def buffer( self, params ):
|
||||
"""Dump specified output buffer"""
|
||||
index = params[0]
|
||||
print(self._shell.myout.buffer( int( index ) ))
|
||||
buffer.usage = "<index>"
|
||||
|
||||
def buffers( self, params ):
|
||||
"""Show the available output buffers"""
|
||||
commands = self._shell.myout.bufferedCommands()
|
||||
if not commands:
|
||||
print("SHELL: No buffered commands available yet. Start doing something.")
|
||||
else:
|
||||
print("="*35, "Available Output Buffers", "="*27)
|
||||
for index, cmd in enumerate( commands ):
|
||||
print("| %s %s" % ( str( index ).ljust( 3 ), cmd ))
|
||||
print("="*88)
|
||||
|
||||
def build( self, params, cmd = "build" ):
|
||||
"""Build a providee"""
|
||||
global last_exception
|
||||
globexpr = params[0]
|
||||
self._checkParsed()
|
||||
names = globfilter( cooker.status.pkg_pn, globexpr )
|
||||
if len( names ) == 0: names = [ globexpr ]
|
||||
print("SHELL: Building %s" % ' '.join( names ))
|
||||
|
||||
td = taskdata.TaskData(cooker.configuration.abort)
|
||||
localdata = data.createCopy(cooker.configuration.data)
|
||||
data.update_data(localdata)
|
||||
data.expandKeys(localdata)
|
||||
|
||||
try:
|
||||
tasks = []
|
||||
for name in names:
|
||||
td.add_provider(localdata, cooker.status, name)
|
||||
providers = td.get_provider(name)
|
||||
|
||||
if len(providers) == 0:
|
||||
raise Providers.NoProvider
|
||||
|
||||
tasks.append([name, "do_%s" % cmd])
|
||||
|
||||
td.add_unresolved(localdata, cooker.status)
|
||||
|
||||
rq = runqueue.RunQueue(cooker, localdata, cooker.status, td, tasks)
|
||||
rq.prepare_runqueue()
|
||||
rq.execute_runqueue()
|
||||
|
||||
except Providers.NoProvider:
|
||||
print("ERROR: No Provider")
|
||||
last_exception = Providers.NoProvider
|
||||
|
||||
except runqueue.TaskFailure as fnids:
|
||||
last_exception = runqueue.TaskFailure
|
||||
|
||||
except build.FuncFailed as e:
|
||||
print("ERROR: Couldn't build '%s'" % names)
|
||||
last_exception = e
|
||||
|
||||
|
||||
build.usage = "<providee>"
|
||||
|
||||
def clean( self, params ):
|
||||
"""Clean a providee"""
|
||||
self.build( params, "clean" )
|
||||
clean.usage = "<providee>"
|
||||
|
||||
def compile( self, params ):
|
||||
"""Execute 'compile' on a providee"""
|
||||
self.build( params, "compile" )
|
||||
compile.usage = "<providee>"
|
||||
|
||||
def configure( self, params ):
|
||||
"""Execute 'configure' on a providee"""
|
||||
self.build( params, "configure" )
|
||||
configure.usage = "<providee>"
|
||||
|
||||
def install( self, params ):
|
||||
"""Execute 'install' on a providee"""
|
||||
self.build( params, "install" )
|
||||
install.usage = "<providee>"
|
||||
|
||||
def edit( self, params ):
|
||||
"""Call $EDITOR on a providee"""
|
||||
name = params[0]
|
||||
bbfile = self._findProvider( name )
|
||||
if bbfile is not None:
|
||||
os.system( "%s %s" % ( os.environ.get( "EDITOR", "vi" ), bbfile ) )
|
||||
else:
|
||||
print("ERROR: Nothing provides '%s'" % name)
|
||||
edit.usage = "<providee>"
|
||||
|
||||
def environment( self, params ):
|
||||
"""Dump out the outer BitBake environment"""
|
||||
cooker.showEnvironment()
|
||||
|
||||
def exit_( self, params ):
|
||||
"""Leave the BitBake Shell"""
|
||||
debugOut( "setting leave_mainloop to true" )
|
||||
global leave_mainloop
|
||||
leave_mainloop = True
|
||||
|
||||
def fetch( self, params ):
|
||||
"""Fetch a providee"""
|
||||
self.build( params, "fetch" )
|
||||
fetch.usage = "<providee>"
|
||||
|
||||
def fileBuild( self, params, cmd = "build" ):
|
||||
"""Parse and build a .bb file"""
|
||||
global last_exception
|
||||
name = params[0]
|
||||
bf = completeFilePath( name )
|
||||
print("SHELL: Calling '%s' on '%s'" % ( cmd, bf ))
|
||||
|
||||
try:
|
||||
cooker.buildFile(bf, cmd)
|
||||
except parse.ParseError:
|
||||
print("ERROR: Unable to open or parse '%s'" % bf)
|
||||
except build.FuncFailed as e:
|
||||
print("ERROR: Couldn't build '%s'" % name)
|
||||
last_exception = e
|
||||
|
||||
fileBuild.usage = "<bbfile>"
|
||||
|
||||
def fileClean( self, params ):
|
||||
"""Clean a .bb file"""
|
||||
self.fileBuild( params, "clean" )
|
||||
fileClean.usage = "<bbfile>"
|
||||
|
||||
def fileEdit( self, params ):
|
||||
"""Call $EDITOR on a .bb file"""
|
||||
name = params[0]
|
||||
os.system( "%s %s" % ( os.environ.get( "EDITOR", "vi" ), completeFilePath( name ) ) )
|
||||
fileEdit.usage = "<bbfile>"
|
||||
|
||||
def fileRebuild( self, params ):
|
||||
"""Rebuild (clean & build) a .bb file"""
|
||||
self.fileBuild( params, "rebuild" )
|
||||
fileRebuild.usage = "<bbfile>"
|
||||
|
||||
def fileReparse( self, params ):
|
||||
"""(re)Parse a bb file"""
|
||||
bbfile = params[0]
|
||||
print("SHELL: Parsing '%s'" % bbfile)
|
||||
parse.update_mtime( bbfile )
|
||||
cooker.parser.reparse(bbfile)
|
||||
if False: #fromCache:
|
||||
print("SHELL: File has not been updated, not reparsing")
|
||||
else:
|
||||
print("SHELL: Parsed")
|
||||
fileReparse.usage = "<bbfile>"
|
||||
|
||||
def abort( self, params ):
|
||||
"""Toggle abort task execution flag (see bitbake -k)"""
|
||||
cooker.configuration.abort = not cooker.configuration.abort
|
||||
print("SHELL: Abort Flag is now '%s'" % repr( cooker.configuration.abort ))
|
||||
|
||||
def force( self, params ):
|
||||
"""Toggle force task execution flag (see bitbake -f)"""
|
||||
cooker.configuration.force = not cooker.configuration.force
|
||||
print("SHELL: Force Flag is now '%s'" % repr( cooker.configuration.force ))
|
||||
|
||||
def help( self, params ):
|
||||
"""Show a comprehensive list of commands and their purpose"""
|
||||
print("="*30, "Available Commands", "="*30)
|
||||
for cmd in sorted(cmds):
|
||||
function, numparams, usage, helptext = cmds[cmd]
|
||||
print("| %s | %s" % (usage.ljust(30), helptext))
|
||||
print("="*78)
|
||||
|
||||
def lastError( self, params ):
|
||||
"""Show the reason or log that was produced by the last BitBake event exception"""
|
||||
if last_exception is None:
|
||||
print("SHELL: No Errors yet (Phew)...")
|
||||
else:
|
||||
reason, event = last_exception.args
|
||||
print("SHELL: Reason for the last error: '%s'" % reason)
|
||||
if ':' in reason:
|
||||
msg, filename = reason.split( ':' )
|
||||
filename = filename.strip()
|
||||
print("SHELL: Dumping log file for last error:")
|
||||
try:
|
||||
print(open( filename ).read())
|
||||
except IOError:
|
||||
print("ERROR: Couldn't open '%s'" % filename)
|
||||
|
||||
def match( self, params ):
|
||||
"""Dump all files or providers matching a glob expression"""
|
||||
what, globexpr = params
|
||||
if what == "files":
|
||||
self._checkParsed()
|
||||
for key in globfilter( cooker.status.pkg_fn, globexpr ): print(key)
|
||||
elif what == "providers":
|
||||
self._checkParsed()
|
||||
for key in globfilter( cooker.status.pkg_pn, globexpr ): print(key)
|
||||
else:
|
||||
print("Usage: match %s" % self.print_.usage)
|
||||
match.usage = "<files|providers> <glob>"
|
||||
|
||||
def new( self, params ):
|
||||
"""Create a new .bb file and open the editor"""
|
||||
dirname, filename = params
|
||||
packages = '/'.join( data.getVar( "BBFILES", cooker.configuration.data, 1 ).split('/')[:-2] )
|
||||
fulldirname = "%s/%s" % ( packages, dirname )
|
||||
|
||||
if not os.path.exists( fulldirname ):
|
||||
print("SHELL: Creating '%s'" % fulldirname)
|
||||
os.mkdir( fulldirname )
|
||||
if os.path.exists( fulldirname ) and os.path.isdir( fulldirname ):
|
||||
if os.path.exists( "%s/%s" % ( fulldirname, filename ) ):
|
||||
print("SHELL: ERROR: %s/%s already exists" % ( fulldirname, filename ))
|
||||
return False
|
||||
print("SHELL: Creating '%s/%s'" % ( fulldirname, filename ))
|
||||
newpackage = open( "%s/%s" % ( fulldirname, filename ), "w" )
|
||||
print("""DESCRIPTION = ""
|
||||
SECTION = ""
|
||||
AUTHOR = ""
|
||||
HOMEPAGE = ""
|
||||
MAINTAINER = ""
|
||||
LICENSE = "GPL"
|
||||
PR = "r0"
|
||||
|
||||
SRC_URI = ""
|
||||
|
||||
#inherit base
|
||||
|
||||
#do_configure() {
|
||||
#
|
||||
#}
|
||||
|
||||
#do_compile() {
|
||||
#
|
||||
#}
|
||||
|
||||
#do_stage() {
|
||||
#
|
||||
#}
|
||||
|
||||
#do_install() {
|
||||
#
|
||||
#}
|
||||
""", file=newpackage)
|
||||
newpackage.close()
|
||||
os.system( "%s %s/%s" % ( os.environ.get( "EDITOR" ), fulldirname, filename ) )
|
||||
new.usage = "<directory> <filename>"
|
||||
|
||||
def package( self, params ):
|
||||
"""Execute 'package' on a providee"""
|
||||
self.build( params, "package" )
|
||||
package.usage = "<providee>"
|
||||
|
||||
def pasteBin( self, params ):
|
||||
"""Send a command + output buffer to the pastebin at http://rafb.net/paste"""
|
||||
index = params[0]
|
||||
contents = self._shell.myout.buffer( int( index ) )
|
||||
sendToPastebin( "output of " + params[0], contents )
|
||||
pasteBin.usage = "<index>"
|
||||
|
||||
def pasteLog( self, params ):
|
||||
"""Send the last event exception error log (if there is one) to http://rafb.net/paste"""
|
||||
if last_exception is None:
|
||||
print("SHELL: No Errors yet (Phew)...")
|
||||
else:
|
||||
reason, event = last_exception.args
|
||||
print("SHELL: Reason for the last error: '%s'" % reason)
|
||||
if ':' in reason:
|
||||
msg, filename = reason.split( ':' )
|
||||
filename = filename.strip()
|
||||
print("SHELL: Pasting log file to pastebin...")
|
||||
|
||||
file = open( filename ).read()
|
||||
sendToPastebin( "contents of " + filename, file )
|
||||
|
||||
def patch( self, params ):
|
||||
"""Execute 'patch' command on a providee"""
|
||||
self.build( params, "patch" )
|
||||
patch.usage = "<providee>"
|
||||
|
||||
def parse( self, params ):
|
||||
"""(Re-)parse .bb files and calculate the dependency graph"""
|
||||
cooker.status = cache.CacheData(cooker.caches_array)
|
||||
ignore = data.getVar("ASSUME_PROVIDED", cooker.configuration.data, 1) or ""
|
||||
cooker.status.ignored_dependencies = set( ignore.split() )
|
||||
cooker.handleCollections( data.getVar("BBFILE_COLLECTIONS", cooker.configuration.data, 1) )
|
||||
|
||||
(filelist, masked) = cooker.collect_bbfiles()
|
||||
cooker.parse_bbfiles(filelist, masked, cooker.myProgressCallback)
|
||||
cooker.buildDepgraph()
|
||||
global parsed
|
||||
parsed = True
|
||||
print()
|
||||
|
||||
def reparse( self, params ):
|
||||
"""(re)Parse a providee's bb file"""
|
||||
bbfile = self._findProvider( params[0] )
|
||||
if bbfile is not None:
|
||||
print("SHELL: Found bbfile '%s' for '%s'" % ( bbfile, params[0] ))
|
||||
self.fileReparse( [ bbfile ] )
|
||||
else:
|
||||
print("ERROR: Nothing provides '%s'" % params[0])
|
||||
reparse.usage = "<providee>"
|
||||
|
||||
def getvar( self, params ):
|
||||
"""Dump the contents of an outer BitBake environment variable"""
|
||||
var = params[0]
|
||||
value = data.getVar( var, cooker.configuration.data, 1 )
|
||||
print(value)
|
||||
getvar.usage = "<variable>"
|
||||
|
||||
def peek( self, params ):
|
||||
"""Dump contents of variable defined in providee's metadata"""
|
||||
name, var = params
|
||||
bbfile = self._findProvider( name )
|
||||
if bbfile is not None:
|
||||
the_data = cache.Cache.loadDataFull(bbfile, cooker.configuration.data)
|
||||
value = the_data.getVar( var, 1 )
|
||||
print(value)
|
||||
else:
|
||||
print("ERROR: Nothing provides '%s'" % name)
|
||||
peek.usage = "<providee> <variable>"
|
||||
|
||||
def poke( self, params ):
|
||||
"""Set contents of variable defined in providee's metadata"""
|
||||
name, var, value = params
|
||||
bbfile = self._findProvider( name )
|
||||
if bbfile is not None:
|
||||
print("ERROR: Sorry, this functionality is currently broken")
|
||||
#d = cooker.pkgdata[bbfile]
|
||||
#data.setVar( var, value, d )
|
||||
|
||||
# mark the change semi persistant
|
||||
#cooker.pkgdata.setDirty(bbfile, d)
|
||||
#print "OK"
|
||||
else:
|
||||
print("ERROR: Nothing provides '%s'" % name)
|
||||
poke.usage = "<providee> <variable> <value>"
|
||||
|
||||
def print_( self, params ):
|
||||
"""Dump all files or providers"""
|
||||
what = params[0]
|
||||
if what == "files":
|
||||
self._checkParsed()
|
||||
for key in cooker.status.pkg_fn: print(key)
|
||||
elif what == "providers":
|
||||
self._checkParsed()
|
||||
for key in cooker.status.providers: print(key)
|
||||
else:
|
||||
print("Usage: print %s" % self.print_.usage)
|
||||
print_.usage = "<files|providers>"
|
||||
|
||||
def python( self, params ):
|
||||
"""Enter the expert mode - an interactive BitBake Python Interpreter"""
|
||||
sys.ps1 = "EXPERT BB>>> "
|
||||
sys.ps2 = "EXPERT BB... "
|
||||
import code
|
||||
interpreter = code.InteractiveConsole( dict( globals() ) )
|
||||
interpreter.interact( "SHELL: Expert Mode - BitBake Python %s\nType 'help' for more information, press CTRL-D to switch back to BBSHELL." % sys.version )
|
||||
|
||||
def showdata( self, params ):
|
||||
"""Execute 'showdata' on a providee"""
|
||||
cooker.showEnvironment(None, params)
|
||||
showdata.usage = "<providee>"
|
||||
|
||||
def setVar( self, params ):
|
||||
"""Set an outer BitBake environment variable"""
|
||||
var, value = params
|
||||
data.setVar( var, value, cooker.configuration.data )
|
||||
print("OK")
|
||||
setVar.usage = "<variable> <value>"
|
||||
|
||||
def rebuild( self, params ):
|
||||
"""Clean and rebuild a .bb file or a providee"""
|
||||
self.build( params, "clean" )
|
||||
self.build( params, "build" )
|
||||
rebuild.usage = "<providee>"
|
||||
|
||||
def shell( self, params ):
|
||||
"""Execute a shell command and dump the output"""
|
||||
if params != "":
|
||||
print(commands.getoutput( " ".join( params ) ))
|
||||
shell.usage = "<...>"
|
||||
|
||||
def stage( self, params ):
|
||||
"""Execute 'stage' on a providee"""
|
||||
self.build( params, "populate_staging" )
|
||||
stage.usage = "<providee>"
|
||||
|
||||
def status( self, params ):
|
||||
"""<just for testing>"""
|
||||
print("-" * 78)
|
||||
print("building list = '%s'" % cooker.building_list)
|
||||
print("build path = '%s'" % cooker.build_path)
|
||||
print("consider_msgs_cache = '%s'" % cooker.consider_msgs_cache)
|
||||
print("build stats = '%s'" % cooker.stats)
|
||||
if last_exception is not None: print("last_exception = '%s'" % repr( last_exception.args ))
|
||||
print("memory output contents = '%s'" % self._shell.myout._buffer)
|
||||
|
||||
def test( self, params ):
|
||||
"""<just for testing>"""
|
||||
print("testCommand called with '%s'" % params)
|
||||
|
||||
def unpack( self, params ):
|
||||
"""Execute 'unpack' on a providee"""
|
||||
self.build( params, "unpack" )
|
||||
unpack.usage = "<providee>"
|
||||
|
||||
def which( self, params ):
|
||||
"""Computes the providers for a given providee"""
|
||||
# Need to use taskData for this information
|
||||
item = params[0]
|
||||
|
||||
self._checkParsed()
|
||||
|
||||
preferred = data.getVar( "PREFERRED_PROVIDER_%s" % item, cooker.configuration.data, 1 )
|
||||
if not preferred: preferred = item
|
||||
|
||||
try:
|
||||
lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status)
|
||||
except KeyError:
|
||||
lv, lf, pv, pf = (None,)*4
|
||||
|
||||
try:
|
||||
providers = cooker.status.providers[item]
|
||||
except KeyError:
|
||||
print("SHELL: ERROR: Nothing provides", preferred)
|
||||
else:
|
||||
for provider in providers:
|
||||
if provider == pf: provider = " (***) %s" % provider
|
||||
else: provider = " %s" % provider
|
||||
print(provider)
|
||||
which.usage = "<providee>"
|
||||
|
||||
##########################################################################
|
||||
# Common helper functions
|
||||
##########################################################################
|
||||
|
||||
def completeFilePath( bbfile ):
|
||||
"""Get the complete bbfile path"""
|
||||
if not cooker.status: return bbfile
|
||||
if not cooker.status.pkg_fn: return bbfile
|
||||
for key in cooker.status.pkg_fn:
|
||||
if key.endswith( bbfile ):
|
||||
return key
|
||||
return bbfile
|
||||
|
||||
def sendToPastebin( desc, content ):
|
||||
"""Send content to http://oe.pastebin.com"""
|
||||
mydata = {}
|
||||
mydata["lang"] = "Plain Text"
|
||||
mydata["desc"] = desc
|
||||
mydata["cvt_tabs"] = "No"
|
||||
mydata["nick"] = "%s@%s" % ( os.environ.get( "USER", "unknown" ), socket.gethostname() or "unknown" )
|
||||
mydata["text"] = content
|
||||
params = urllib.urlencode( mydata )
|
||||
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
|
||||
|
||||
host = "rafb.net"
|
||||
conn = httplib.HTTPConnection( "%s:80" % host )
|
||||
conn.request("POST", "/paste/paste.php", params, headers )
|
||||
|
||||
response = conn.getresponse()
|
||||
conn.close()
|
||||
|
||||
if response.status == 302:
|
||||
location = response.getheader( "location" ) or "unknown"
|
||||
print("SHELL: Pasted to http://%s%s" % ( host, location ))
|
||||
else:
|
||||
print("ERROR: %s %s" % ( response.status, response.reason ))
|
||||
|
||||
def completer( text, state ):
|
||||
"""Return a possible readline completion"""
|
||||
debugOut( "completer called with text='%s', state='%d'" % ( text, state ) )
|
||||
|
||||
if state == 0:
|
||||
line = readline.get_line_buffer()
|
||||
if " " in line:
|
||||
line = line.split()
|
||||
# we are in second (or more) argument
|
||||
if line[0] in cmds and hasattr( cmds[line[0]][0], "usage" ): # known command and usage
|
||||
u = getattr( cmds[line[0]][0], "usage" ).split()[0]
|
||||
if u == "<variable>":
|
||||
allmatches = cooker.configuration.data.keys()
|
||||
elif u == "<bbfile>":
|
||||
if cooker.status.pkg_fn is None: allmatches = [ "(No Matches Available. Parsed yet?)" ]
|
||||
else: allmatches = [ x.split("/")[-1] for x in cooker.status.pkg_fn ]
|
||||
elif u == "<providee>":
|
||||
if cooker.status.pkg_fn is None: allmatches = [ "(No Matches Available. Parsed yet?)" ]
|
||||
else: allmatches = cooker.status.providers.iterkeys()
|
||||
else: allmatches = [ "(No tab completion available for this command)" ]
|
||||
else: allmatches = [ "(No tab completion available for this command)" ]
|
||||
else:
|
||||
# we are in first argument
|
||||
allmatches = cmds.iterkeys()
|
||||
|
||||
completer.matches = [ x for x in allmatches if x[:len(text)] == text ]
|
||||
#print "completer.matches = '%s'" % completer.matches
|
||||
if len( completer.matches ) > state:
|
||||
return completer.matches[state]
|
||||
else:
|
||||
return None
|
||||
|
||||
def debugOut( text ):
|
||||
if debug:
|
||||
sys.stderr.write( "( %s )\n" % text )
|
||||
|
||||
def columnize( alist, width = 80 ):
|
||||
"""
|
||||
A word-wrap function that preserves existing line breaks
|
||||
and most spaces in the text. Expects that existing line
|
||||
breaks are posix newlines (\n).
|
||||
"""
|
||||
return reduce(lambda line, word, width=width: '%s%s%s' %
|
||||
(line,
|
||||
' \n'[(len(line[line.rfind('\n')+1:])
|
||||
+ len(word.split('\n', 1)[0]
|
||||
) >= width)],
|
||||
word),
|
||||
alist
|
||||
)
|
||||
|
||||
def globfilter( names, pattern ):
|
||||
return fnmatch.filter( names, pattern )
|
||||
|
||||
##########################################################################
|
||||
# Class MemoryOutput
|
||||
##########################################################################
|
||||
|
||||
class MemoryOutput:
|
||||
"""File-like output class buffering the output of the last 10 commands"""
|
||||
def __init__( self, delegate ):
|
||||
self.delegate = delegate
|
||||
self._buffer = []
|
||||
self.text = []
|
||||
self._command = None
|
||||
|
||||
def startCommand( self, command ):
|
||||
self._command = command
|
||||
self.text = []
|
||||
def endCommand( self ):
|
||||
if self._command is not None:
|
||||
if len( self._buffer ) == 10: del self._buffer[0]
|
||||
self._buffer.append( ( self._command, self.text ) )
|
||||
def removeLast( self ):
|
||||
if self._buffer:
|
||||
del self._buffer[ len( self._buffer ) - 1 ]
|
||||
self.text = []
|
||||
self._command = None
|
||||
def lastBuffer( self ):
|
||||
if self._buffer:
|
||||
return self._buffer[ len( self._buffer ) -1 ][1]
|
||||
def bufferedCommands( self ):
|
||||
return [ cmd for cmd, output in self._buffer ]
|
||||
def buffer( self, i ):
|
||||
if i < len( self._buffer ):
|
||||
return "BB>> %s\n%s" % ( self._buffer[i][0], "".join( self._buffer[i][1] ) )
|
||||
else: return "ERROR: Invalid buffer number. Buffer needs to be in (0, %d)" % ( len( self._buffer ) - 1 )
|
||||
def write( self, text ):
|
||||
if self._command is not None and text != "BB>> ": self.text.append( text )
|
||||
if self.delegate is not None: self.delegate.write( text )
|
||||
def flush( self ):
|
||||
return self.delegate.flush()
|
||||
def fileno( self ):
|
||||
return self.delegate.fileno()
|
||||
def isatty( self ):
|
||||
return self.delegate.isatty()
|
||||
|
||||
##########################################################################
|
||||
# Class BitBakeShell
|
||||
##########################################################################
|
||||
|
||||
class BitBakeShell:
|
||||
|
||||
def __init__( self ):
|
||||
"""Register commands and set up readline"""
|
||||
self.commandQ = Queue.Queue()
|
||||
self.commands = BitBakeShellCommands( self )
|
||||
self.myout = MemoryOutput( sys.stdout )
|
||||
self.historyfilename = os.path.expanduser( "~/.bbsh_history" )
|
||||
self.startupfilename = os.path.expanduser( "~/.bbsh_startup" )
|
||||
|
||||
readline.set_completer( completer )
|
||||
readline.set_completer_delims( " " )
|
||||
readline.parse_and_bind("tab: complete")
|
||||
|
||||
try:
|
||||
readline.read_history_file( self.historyfilename )
|
||||
except IOError:
|
||||
pass # It doesn't exist yet.
|
||||
|
||||
print(__credits__)
|
||||
|
||||
def cleanup( self ):
|
||||
"""Write readline history and clean up resources"""
|
||||
debugOut( "writing command history" )
|
||||
try:
|
||||
readline.write_history_file( self.historyfilename )
|
||||
except:
|
||||
print("SHELL: Unable to save command history")
|
||||
|
||||
def registerCommand( self, command, function, numparams = 0, usage = "", helptext = "" ):
|
||||
"""Register a command"""
|
||||
if usage == "": usage = command
|
||||
if helptext == "": helptext = function.__doc__ or "<not yet documented>"
|
||||
cmds[command] = ( function, numparams, usage, helptext )
|
||||
|
||||
def processCommand( self, command, params ):
|
||||
"""Process a command. Check number of params and print a usage string, if appropriate"""
|
||||
debugOut( "processing command '%s'..." % command )
|
||||
try:
|
||||
function, numparams, usage, helptext = cmds[command]
|
||||
except KeyError:
|
||||
print("SHELL: ERROR: '%s' command is not a valid command." % command)
|
||||
self.myout.removeLast()
|
||||
else:
|
||||
if (numparams != -1) and (not len( params ) == numparams):
|
||||
print("Usage: '%s'" % usage)
|
||||
return
|
||||
|
||||
result = function( self.commands, params )
|
||||
debugOut( "result was '%s'" % result )
|
||||
|
||||
def processStartupFile( self ):
|
||||
"""Read and execute all commands found in $HOME/.bbsh_startup"""
|
||||
if os.path.exists( self.startupfilename ):
|
||||
startupfile = open( self.startupfilename, "r" )
|
||||
for cmdline in startupfile:
|
||||
debugOut( "processing startup line '%s'" % cmdline )
|
||||
if not cmdline:
|
||||
continue
|
||||
if "|" in cmdline:
|
||||
print("ERROR: '|' in startup file is not allowed. Ignoring line")
|
||||
continue
|
||||
self.commandQ.put( cmdline.strip() )
|
||||
|
||||
def main( self ):
|
||||
"""The main command loop"""
|
||||
while not leave_mainloop:
|
||||
try:
|
||||
if self.commandQ.empty():
|
||||
sys.stdout = self.myout.delegate
|
||||
cmdline = raw_input( "BB>> " )
|
||||
sys.stdout = self.myout
|
||||
else:
|
||||
cmdline = self.commandQ.get()
|
||||
if cmdline:
|
||||
allCommands = cmdline.split( ';' )
|
||||
for command in allCommands:
|
||||
pipecmd = None
|
||||
#
|
||||
# special case for expert mode
|
||||
if command == 'python':
|
||||
sys.stdout = self.myout.delegate
|
||||
self.processCommand( command, "" )
|
||||
sys.stdout = self.myout
|
||||
else:
|
||||
self.myout.startCommand( command )
|
||||
if '|' in command: # disable output
|
||||
command, pipecmd = command.split( '|' )
|
||||
delegate = self.myout.delegate
|
||||
self.myout.delegate = None
|
||||
tokens = shlex.split( command, True )
|
||||
self.processCommand( tokens[0], tokens[1:] or "" )
|
||||
self.myout.endCommand()
|
||||
if pipecmd is not None: # restore output
|
||||
self.myout.delegate = delegate
|
||||
|
||||
pipe = popen2.Popen4( pipecmd )
|
||||
pipe.tochild.write( "\n".join( self.myout.lastBuffer() ) )
|
||||
pipe.tochild.close()
|
||||
sys.stdout.write( pipe.fromchild.read() )
|
||||
#
|
||||
except EOFError:
|
||||
print()
|
||||
return
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
##########################################################################
|
||||
# Start function - called from the BitBake command line utility
|
||||
##########################################################################
|
||||
|
||||
def start( aCooker ):
|
||||
global cooker
|
||||
cooker = aCooker
|
||||
bbshell = BitBakeShell()
|
||||
bbshell.processStartupFile()
|
||||
bbshell.main()
|
||||
bbshell.cleanup()
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("SHELL: Sorry, this program should only be called by BitBake.")
|
||||
@@ -5,8 +5,6 @@ import re
|
||||
import tempfile
|
||||
import pickle
|
||||
import bb.data
|
||||
import difflib
|
||||
import simplediff
|
||||
from bb.checksum import FileChecksumCache
|
||||
|
||||
logger = logging.getLogger('BitBake.SigGen')
|
||||
@@ -15,7 +13,7 @@ def init(d):
|
||||
siggens = [obj for obj in globals().values()
|
||||
if type(obj) is type and issubclass(obj, SignatureGenerator)]
|
||||
|
||||
desired = d.getVar("BB_SIGNATURE_HANDLER") or "noop"
|
||||
desired = d.getVar("BB_SIGNATURE_HANDLER", True) or "noop"
|
||||
for sg in siggens:
|
||||
if desired == sg.name:
|
||||
return sg(d)
|
||||
@@ -69,10 +67,6 @@ class SignatureGenerator(object):
|
||||
def set_taskdata(self, data):
|
||||
self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash = data
|
||||
|
||||
def reset(self, data):
|
||||
self.__init__(data)
|
||||
|
||||
|
||||
class SignatureGeneratorBasic(SignatureGenerator):
|
||||
"""
|
||||
"""
|
||||
@@ -88,10 +82,10 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
self.gendeps = {}
|
||||
self.lookupcache = {}
|
||||
self.pkgnameextract = re.compile("(?P<fn>.*)\..*")
|
||||
self.basewhitelist = set((data.getVar("BB_HASHBASE_WHITELIST") or "").split())
|
||||
self.basewhitelist = set((data.getVar("BB_HASHBASE_WHITELIST", True) or "").split())
|
||||
self.taskwhitelist = None
|
||||
self.init_rundepcheck(data)
|
||||
checksum_cache_file = data.getVar("BB_HASH_CHECKSUM_CACHE_FILE")
|
||||
checksum_cache_file = data.getVar("BB_HASH_CHECKSUM_CACHE_FILE", True)
|
||||
if checksum_cache_file:
|
||||
self.checksum_cache = FileChecksumCache()
|
||||
self.checksum_cache.init_cache(data, checksum_cache_file)
|
||||
@@ -99,7 +93,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
self.checksum_cache = None
|
||||
|
||||
def init_rundepcheck(self, data):
|
||||
self.taskwhitelist = data.getVar("BB_HASHTASK_WHITELIST") or None
|
||||
self.taskwhitelist = data.getVar("BB_HASHTASK_WHITELIST", True) or None
|
||||
if self.taskwhitelist:
|
||||
self.twl = re.compile(self.taskwhitelist)
|
||||
else:
|
||||
@@ -107,16 +101,44 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
|
||||
def _build_data(self, fn, d):
|
||||
|
||||
ignore_mismatch = ((d.getVar("BB_HASH_IGNORE_MISMATCH") or '') == '1')
|
||||
tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d)
|
||||
|
||||
taskdeps, basehash = bb.data.generate_dependency_hash(tasklist, gendeps, lookupcache, self.basewhitelist, fn)
|
||||
taskdeps = {}
|
||||
basehash = {}
|
||||
|
||||
for task in tasklist:
|
||||
data = lookupcache[task]
|
||||
|
||||
if data is None:
|
||||
bb.error("Task %s from %s seems to be empty?!" % (task, fn))
|
||||
data = ''
|
||||
|
||||
gendeps[task] -= self.basewhitelist
|
||||
newdeps = gendeps[task]
|
||||
seen = set()
|
||||
while newdeps:
|
||||
nextdeps = newdeps
|
||||
seen |= nextdeps
|
||||
newdeps = set()
|
||||
for dep in nextdeps:
|
||||
if dep in self.basewhitelist:
|
||||
continue
|
||||
gendeps[dep] -= self.basewhitelist
|
||||
newdeps |= gendeps[dep]
|
||||
newdeps -= seen
|
||||
|
||||
alldeps = sorted(seen)
|
||||
for dep in alldeps:
|
||||
data = data + dep
|
||||
var = lookupcache[dep]
|
||||
if var is not None:
|
||||
data = data + str(var)
|
||||
datahash = hashlib.md5(data.encode("utf-8")).hexdigest()
|
||||
k = fn + "." + task
|
||||
if not ignore_mismatch and k in self.basehash and self.basehash[k] != basehash[k]:
|
||||
bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (k, self.basehash[k], basehash[k]))
|
||||
self.basehash[k] = basehash[k]
|
||||
if k in self.basehash and self.basehash[k] != datahash:
|
||||
bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (k, self.basehash[k], datahash))
|
||||
self.basehash[k] = datahash
|
||||
taskdeps[task] = alldeps
|
||||
|
||||
self.taskdeps[fn] = taskdeps
|
||||
self.gendeps[fn] = gendeps
|
||||
@@ -132,15 +154,13 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
|
||||
try:
|
||||
taskdeps = self._build_data(fn, d)
|
||||
except bb.parse.SkipRecipe:
|
||||
raise
|
||||
except:
|
||||
bb.warn("Error during finalise of %s" % fn)
|
||||
raise
|
||||
|
||||
#Slow but can be useful for debugging mismatched basehashes
|
||||
#for task in self.taskdeps[fn]:
|
||||
# self.dump_sigtask(fn, task, d.getVar("STAMP"), False)
|
||||
# self.dump_sigtask(fn, task, d.getVar("STAMP", True), False)
|
||||
|
||||
for task in taskdeps:
|
||||
d.setVar("BB_BASEHASH_task-%s" % task, self.basehash[fn + "." + task])
|
||||
@@ -164,30 +184,19 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
return taint
|
||||
|
||||
def get_taskhash(self, fn, task, deps, dataCache):
|
||||
|
||||
mc = ''
|
||||
if fn.startswith('multiconfig:'):
|
||||
mc = fn.split(':')[1]
|
||||
k = fn + "." + task
|
||||
|
||||
data = dataCache.basetaskhash[k]
|
||||
self.basehash[k] = data
|
||||
self.runtaskdeps[k] = []
|
||||
self.file_checksum_values[k] = []
|
||||
recipename = dataCache.pkg_fn[fn]
|
||||
|
||||
for dep in sorted(deps, key=clean_basepath):
|
||||
pkgname = self.pkgnameextract.search(dep).group('fn')
|
||||
if mc:
|
||||
depmc = pkgname.split(':')[1]
|
||||
if mc != depmc:
|
||||
continue
|
||||
if dep.startswith("multiconfig:") and not mc:
|
||||
continue
|
||||
depname = dataCache.pkg_fn[pkgname]
|
||||
depname = dataCache.pkg_fn[self.pkgnameextract.search(dep).group('fn')]
|
||||
if not self.rundep_check(fn, recipename, task, dep, depname, dataCache):
|
||||
continue
|
||||
if dep not in self.taskhash:
|
||||
bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?" % dep)
|
||||
bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?", dep)
|
||||
data = data + self.taskhash[dep]
|
||||
self.runtaskdeps[k].append(dep)
|
||||
|
||||
@@ -297,8 +306,8 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
pass
|
||||
raise err
|
||||
|
||||
def dump_sigfn(self, fn, dataCaches, options):
|
||||
if fn in self.taskdeps:
|
||||
def dump_sigs(self, dataCaches, options):
|
||||
for fn in self.taskdeps:
|
||||
for task in self.taskdeps[fn]:
|
||||
tid = fn + ":" + task
|
||||
(mc, _, _) = bb.runqueue.split_tid(tid)
|
||||
@@ -329,74 +338,23 @@ class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
|
||||
|
||||
def stampcleanmask(self, stampbase, fn, taskname, extrainfo):
|
||||
return self.stampfile(stampbase, fn, taskname, extrainfo, clean=True)
|
||||
|
||||
|
||||
def invalidate_task(self, task, d, fn):
|
||||
bb.note("Tainting hash to force rebuild of task %s, %s" % (fn, task))
|
||||
bb.build.write_taint(task, d, fn)
|
||||
|
||||
def dump_this_task(outfile, d):
|
||||
import bb.parse
|
||||
fn = d.getVar("BB_FILENAME")
|
||||
task = "do_" + d.getVar("BB_CURRENTTASK")
|
||||
fn = d.getVar("BB_FILENAME", True)
|
||||
task = "do_" + d.getVar("BB_CURRENTTASK", True)
|
||||
referencestamp = bb.build.stamp_internal(task, d, None, True)
|
||||
bb.parse.siggen.dump_sigtask(fn, task, outfile, "customfile:" + referencestamp)
|
||||
|
||||
def init_colors(enable_color):
|
||||
"""Initialise colour dict for passing to compare_sigfiles()"""
|
||||
# First set up the colours
|
||||
colors = {'color_title': '\033[1m',
|
||||
'color_default': '\033[0m',
|
||||
'color_add': '\033[0;32m',
|
||||
'color_remove': '\033[0;31m',
|
||||
}
|
||||
# Leave all keys present but clear the values
|
||||
if not enable_color:
|
||||
for k in colors.keys():
|
||||
colors[k] = ''
|
||||
return colors
|
||||
|
||||
def worddiff_str(oldstr, newstr, colors=None):
|
||||
if not colors:
|
||||
colors = init_colors(False)
|
||||
diff = simplediff.diff(oldstr.split(' '), newstr.split(' '))
|
||||
ret = []
|
||||
for change, value in diff:
|
||||
value = ' '.join(value)
|
||||
if change == '=':
|
||||
ret.append(value)
|
||||
elif change == '+':
|
||||
item = '{color_add}{{+{value}+}}{color_default}'.format(value=value, **colors)
|
||||
ret.append(item)
|
||||
elif change == '-':
|
||||
item = '{color_remove}[-{value}-]{color_default}'.format(value=value, **colors)
|
||||
ret.append(item)
|
||||
whitespace_note = ''
|
||||
if oldstr != newstr and ' '.join(oldstr.split()) == ' '.join(newstr.split()):
|
||||
whitespace_note = ' (whitespace changed)'
|
||||
return '"%s"%s' % (' '.join(ret), whitespace_note)
|
||||
|
||||
def list_inline_diff(oldlist, newlist, colors=None):
|
||||
if not colors:
|
||||
colors = init_colors(False)
|
||||
diff = simplediff.diff(oldlist, newlist)
|
||||
ret = []
|
||||
for change, value in diff:
|
||||
value = ' '.join(value)
|
||||
if change == '=':
|
||||
ret.append("'%s'" % value)
|
||||
elif change == '+':
|
||||
item = '{color_add}+{value}{color_default}'.format(value=value, **colors)
|
||||
ret.append(item)
|
||||
elif change == '-':
|
||||
item = '{color_remove}-{value}{color_default}'.format(value=value, **colors)
|
||||
ret.append(item)
|
||||
return '[%s]' % (', '.join(ret))
|
||||
|
||||
def clean_basepath(a):
|
||||
mc = None
|
||||
if a.startswith("multiconfig:"):
|
||||
_, mc, a = a.split(":", 2)
|
||||
b = a.rsplit("/", 2)[1] + '/' + a.rsplit("/", 2)[2]
|
||||
b = a.rsplit("/", 2)[1] + a.rsplit("/", 2)[2]
|
||||
if a.startswith("virtual:"):
|
||||
b = b + ":" + a.rsplit(":", 1)[0]
|
||||
if mc:
|
||||
@@ -415,26 +373,9 @@ def clean_basepaths_list(a):
|
||||
b.append(clean_basepath(x))
|
||||
return b
|
||||
|
||||
def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
def compare_sigfiles(a, b, recursecb = None):
|
||||
output = []
|
||||
|
||||
colors = init_colors(color)
|
||||
def color_format(formatstr, **values):
|
||||
"""
|
||||
Return colour formatted string.
|
||||
NOTE: call with the format string, not an already formatted string
|
||||
containing values (otherwise you could have trouble with { and }
|
||||
characters)
|
||||
"""
|
||||
if not formatstr.endswith('{color_default}'):
|
||||
formatstr += '{color_default}'
|
||||
# In newer python 3 versions you can pass both of these directly,
|
||||
# but we only require 3.4 at the moment
|
||||
formatparams = {}
|
||||
formatparams.update(colors)
|
||||
formatparams.update(values)
|
||||
return formatstr.format(**formatparams)
|
||||
|
||||
with open(a, 'rb') as f:
|
||||
p1 = pickle.Unpickler(f)
|
||||
a_data = p1.load()
|
||||
@@ -488,59 +429,39 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
return changed, added, removed
|
||||
|
||||
if 'basewhitelist' in a_data and a_data['basewhitelist'] != b_data['basewhitelist']:
|
||||
output.append(color_format("{color_title}basewhitelist changed{color_default} from '%s' to '%s'") % (a_data['basewhitelist'], b_data['basewhitelist']))
|
||||
output.append("basewhitelist changed from '%s' to '%s'" % (a_data['basewhitelist'], b_data['basewhitelist']))
|
||||
if a_data['basewhitelist'] and b_data['basewhitelist']:
|
||||
output.append("changed items: %s" % a_data['basewhitelist'].symmetric_difference(b_data['basewhitelist']))
|
||||
|
||||
if 'taskwhitelist' in a_data and a_data['taskwhitelist'] != b_data['taskwhitelist']:
|
||||
output.append(color_format("{color_title}taskwhitelist changed{color_default} from '%s' to '%s'") % (a_data['taskwhitelist'], b_data['taskwhitelist']))
|
||||
output.append("taskwhitelist changed from '%s' to '%s'" % (a_data['taskwhitelist'], b_data['taskwhitelist']))
|
||||
if a_data['taskwhitelist'] and b_data['taskwhitelist']:
|
||||
output.append("changed items: %s" % a_data['taskwhitelist'].symmetric_difference(b_data['taskwhitelist']))
|
||||
|
||||
if a_data['taskdeps'] != b_data['taskdeps']:
|
||||
output.append(color_format("{color_title}Task dependencies changed{color_default} from:\n%s\nto:\n%s") % (sorted(a_data['taskdeps']), sorted(b_data['taskdeps'])))
|
||||
output.append("Task dependencies changed from:\n%s\nto:\n%s" % (sorted(a_data['taskdeps']), sorted(b_data['taskdeps'])))
|
||||
|
||||
if a_data['basehash'] != b_data['basehash'] and not collapsed:
|
||||
output.append(color_format("{color_title}basehash changed{color_default} from %s to %s") % (a_data['basehash'], b_data['basehash']))
|
||||
if a_data['basehash'] != b_data['basehash']:
|
||||
output.append("basehash changed from %s to %s" % (a_data['basehash'], b_data['basehash']))
|
||||
|
||||
changed, added, removed = dict_diff(a_data['gendeps'], b_data['gendeps'], a_data['basewhitelist'] & b_data['basewhitelist'])
|
||||
if changed:
|
||||
for dep in changed:
|
||||
output.append(color_format("{color_title}List of dependencies for variable %s changed from '{color_default}%s{color_title}' to '{color_default}%s{color_title}'") % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep]))
|
||||
output.append("List of dependencies for variable %s changed from '%s' to '%s'" % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep]))
|
||||
if a_data['gendeps'][dep] and b_data['gendeps'][dep]:
|
||||
output.append("changed items: %s" % a_data['gendeps'][dep].symmetric_difference(b_data['gendeps'][dep]))
|
||||
if added:
|
||||
for dep in added:
|
||||
output.append(color_format("{color_title}Dependency on variable %s was added") % (dep))
|
||||
output.append("Dependency on variable %s was added" % (dep))
|
||||
if removed:
|
||||
for dep in removed:
|
||||
output.append(color_format("{color_title}Dependency on Variable %s was removed") % (dep))
|
||||
output.append("Dependency on Variable %s was removed" % (dep))
|
||||
|
||||
|
||||
changed, added, removed = dict_diff(a_data['varvals'], b_data['varvals'])
|
||||
if changed:
|
||||
for dep in changed:
|
||||
oldval = a_data['varvals'][dep]
|
||||
newval = b_data['varvals'][dep]
|
||||
if newval and oldval and ('\n' in oldval or '\n' in newval):
|
||||
diff = difflib.unified_diff(oldval.splitlines(), newval.splitlines(), lineterm='')
|
||||
# Cut off the first two lines, since we aren't interested in
|
||||
# the old/new filename (they are blank anyway in this case)
|
||||
difflines = list(diff)[2:]
|
||||
if color:
|
||||
# Add colour to diff output
|
||||
for i, line in enumerate(difflines):
|
||||
if line.startswith('+'):
|
||||
line = color_format('{color_add}{line}', line=line)
|
||||
difflines[i] = line
|
||||
elif line.startswith('-'):
|
||||
line = color_format('{color_remove}{line}', line=line)
|
||||
difflines[i] = line
|
||||
output.append(color_format("{color_title}Variable {var} value changed:{color_default}\n{diff}", var=dep, diff='\n'.join(difflines)))
|
||||
elif newval and oldval and (' ' in oldval or ' ' in newval):
|
||||
output.append(color_format("{color_title}Variable {var} value changed:{color_default}\n{diff}", var=dep, diff=worddiff_str(oldval, newval, colors)))
|
||||
else:
|
||||
output.append(color_format("{color_title}Variable {var} value changed from '{color_default}{oldval}{color_title}' to '{color_default}{newval}{color_title}'{color_default}", var=dep, oldval=oldval, newval=newval))
|
||||
output.append("Variable %s value changed from '%s' to '%s'" % (dep, a_data['varvals'][dep], b_data['varvals'][dep]))
|
||||
|
||||
if not 'file_checksum_values' in a_data:
|
||||
a_data['file_checksum_values'] = {}
|
||||
@@ -550,38 +471,32 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
changed, added, removed = file_checksums_diff(a_data['file_checksum_values'], b_data['file_checksum_values'])
|
||||
if changed:
|
||||
for f, old, new in changed:
|
||||
output.append(color_format("{color_title}Checksum for file %s changed{color_default} from %s to %s") % (f, old, new))
|
||||
output.append("Checksum for file %s changed from %s to %s" % (f, old, new))
|
||||
if added:
|
||||
for f in added:
|
||||
output.append(color_format("{color_title}Dependency on checksum of file %s was added") % (f))
|
||||
output.append("Dependency on checksum of file %s was added" % (f))
|
||||
if removed:
|
||||
for f in removed:
|
||||
output.append(color_format("{color_title}Dependency on checksum of file %s was removed") % (f))
|
||||
output.append("Dependency on checksum of file %s was removed" % (f))
|
||||
|
||||
if not 'runtaskdeps' in a_data:
|
||||
a_data['runtaskdeps'] = {}
|
||||
if not 'runtaskdeps' in b_data:
|
||||
b_data['runtaskdeps'] = {}
|
||||
|
||||
if not collapsed:
|
||||
if len(a_data['runtaskdeps']) != len(b_data['runtaskdeps']):
|
||||
changed = ["Number of task dependencies changed"]
|
||||
else:
|
||||
changed = []
|
||||
for idx, task in enumerate(a_data['runtaskdeps']):
|
||||
a = a_data['runtaskdeps'][idx]
|
||||
b = b_data['runtaskdeps'][idx]
|
||||
if a_data['runtaskhashes'][a] != b_data['runtaskhashes'][b] and not collapsed:
|
||||
changed.append("%s with hash %s\n changed to\n%s with hash %s" % (clean_basepath(a), a_data['runtaskhashes'][a], clean_basepath(b), b_data['runtaskhashes'][b]))
|
||||
if len(a_data['runtaskdeps']) != len(b_data['runtaskdeps']):
|
||||
changed = ["Number of task dependencies changed"]
|
||||
else:
|
||||
changed = []
|
||||
for idx, task in enumerate(a_data['runtaskdeps']):
|
||||
a = a_data['runtaskdeps'][idx]
|
||||
b = b_data['runtaskdeps'][idx]
|
||||
if a_data['runtaskhashes'][a] != b_data['runtaskhashes'][b]:
|
||||
changed.append("%s with hash %s\n changed to\n%s with hash %s" % (a, a_data['runtaskhashes'][a], b, b_data['runtaskhashes'][b]))
|
||||
|
||||
if changed:
|
||||
clean_a = clean_basepaths_list(a_data['runtaskdeps'])
|
||||
clean_b = clean_basepaths_list(b_data['runtaskdeps'])
|
||||
if clean_a != clean_b:
|
||||
output.append(color_format("{color_title}runtaskdeps changed:{color_default}\n%s") % list_inline_diff(clean_a, clean_b, colors))
|
||||
else:
|
||||
output.append(color_format("{color_title}runtaskdeps changed:"))
|
||||
output.append("\n".join(changed))
|
||||
if changed:
|
||||
output.append("runtaskdeps changed from %s to %s" % (clean_basepaths_list(a_data['runtaskdeps']), clean_basepaths_list(b_data['runtaskdeps'])))
|
||||
output.append("\n".join(changed))
|
||||
|
||||
|
||||
if 'runtaskhashes' in a_data and 'runtaskhashes' in b_data:
|
||||
@@ -597,7 +512,7 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
#output.append("Dependency on task %s was replaced by %s with same hash" % (dep, bdep))
|
||||
bdep_found = True
|
||||
if not bdep_found:
|
||||
output.append(color_format("{color_title}Dependency on task %s was added{color_default} with hash %s") % (clean_basepath(dep), b[dep]))
|
||||
output.append("Dependency on task %s was added with hash %s" % (clean_basepath(dep), b[dep]))
|
||||
if removed:
|
||||
for dep in removed:
|
||||
adep_found = False
|
||||
@@ -607,25 +522,21 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
#output.append("Dependency on task %s was replaced by %s with same hash" % (adep, dep))
|
||||
adep_found = True
|
||||
if not adep_found:
|
||||
output.append(color_format("{color_title}Dependency on task %s was removed{color_default} with hash %s") % (clean_basepath(dep), a[dep]))
|
||||
output.append("Dependency on task %s was removed with hash %s" % (clean_basepath(dep), a[dep]))
|
||||
if changed:
|
||||
for dep in changed:
|
||||
if not collapsed:
|
||||
output.append(color_format("{color_title}Hash for dependent task %s changed{color_default} from %s to %s") % (clean_basepath(dep), a[dep], b[dep]))
|
||||
output.append("Hash for dependent task %s changed from %s to %s" % (clean_basepath(dep), a[dep], b[dep]))
|
||||
if callable(recursecb):
|
||||
# If a dependent hash changed, might as well print the line above and then defer to the changes in
|
||||
# that hash since in all likelyhood, they're the same changes this task also saw.
|
||||
recout = recursecb(dep, a[dep], b[dep])
|
||||
if recout:
|
||||
if collapsed:
|
||||
output.extend(recout)
|
||||
else:
|
||||
# If a dependent hash changed, might as well print the line above and then defer to the changes in
|
||||
# that hash since in all likelyhood, they're the same changes this task also saw.
|
||||
output = [output[-1]] + recout
|
||||
output = [output[-1]] + recout
|
||||
|
||||
a_taint = a_data.get('taint', None)
|
||||
b_taint = b_data.get('taint', None)
|
||||
if a_taint != b_taint:
|
||||
output.append(color_format("{color_title}Taint (by forced/invalidated task) changed{color_default} from %s to %s") % (a_taint, b_taint))
|
||||
output.append("Taint (by forced/invalidated task) changed from %s to %s" % (a_taint, b_taint))
|
||||
|
||||
return output
|
||||
|
||||
|
||||
@@ -47,7 +47,7 @@ class TaskData:
|
||||
"""
|
||||
BitBake Task Data implementation
|
||||
"""
|
||||
def __init__(self, abort = True, skiplist = None, allowincomplete = False):
|
||||
def __init__(self, abort = True, tryaltconfigs = False, skiplist = None, allowincomplete = False):
|
||||
self.build_targets = {}
|
||||
self.run_targets = {}
|
||||
|
||||
@@ -66,12 +66,11 @@ class TaskData:
|
||||
self.failed_fns = []
|
||||
|
||||
self.abort = abort
|
||||
self.tryaltconfigs = tryaltconfigs
|
||||
self.allowincomplete = allowincomplete
|
||||
|
||||
self.skiplist = skiplist
|
||||
|
||||
self.mcdepends = []
|
||||
|
||||
def add_tasks(self, fn, dataCache):
|
||||
"""
|
||||
Add tasks for a given fn to the database
|
||||
@@ -90,26 +89,6 @@ class TaskData:
|
||||
|
||||
self.add_extra_deps(fn, dataCache)
|
||||
|
||||
def add_mcdepends(task):
|
||||
for dep in task_deps['mcdepends'][task].split():
|
||||
if len(dep.split(':')) != 5:
|
||||
bb.msg.fatal("TaskData", "Error for %s:%s[%s], multiconfig dependency %s does not contain exactly four ':' characters.\n Task '%s' should be specified in the form 'multiconfig:fromMC:toMC:packagename:task'" % (fn, task, 'mcdepends', dep, 'mcdepends'))
|
||||
if dep not in self.mcdepends:
|
||||
self.mcdepends.append(dep)
|
||||
|
||||
# Common code for dep_name/depends = 'depends'/idepends and 'rdepends'/irdepends
|
||||
def handle_deps(task, dep_name, depends, seen):
|
||||
if dep_name in task_deps and task in task_deps[dep_name]:
|
||||
ids = []
|
||||
for dep in task_deps[dep_name][task].split():
|
||||
if dep:
|
||||
parts = dep.split(":")
|
||||
if len(parts) != 2:
|
||||
bb.msg.fatal("TaskData", "Error for %s:%s[%s], dependency %s in '%s' does not contain exactly one ':' character.\n Task '%s' should be specified in the form 'packagename:task'" % (fn, task, dep_name, dep, task_deps[dep_name][task], dep_name))
|
||||
ids.append((parts[0], parts[1]))
|
||||
seen(parts[0])
|
||||
depends.extend(ids)
|
||||
|
||||
for task in task_deps['tasks']:
|
||||
|
||||
tid = "%s:%s" % (fn, task)
|
||||
@@ -119,19 +98,31 @@ class TaskData:
|
||||
parentids = []
|
||||
for dep in task_deps['parents'][task]:
|
||||
if dep not in task_deps['tasks']:
|
||||
bb.debug(2, "Not adding dependency of %s on %s since %s does not exist" % (task, dep, dep))
|
||||
bb.debug(2, "Not adding dependeny of %s on %s since %s does not exist" % (task, dep, dep))
|
||||
continue
|
||||
parentid = "%s:%s" % (fn, dep)
|
||||
parentids.append(parentid)
|
||||
self.taskentries[tid].tdepends.extend(parentids)
|
||||
|
||||
|
||||
# Touch all intertask dependencies
|
||||
handle_deps(task, 'depends', self.taskentries[tid].idepends, self.seen_build_target)
|
||||
handle_deps(task, 'rdepends', self.taskentries[tid].irdepends, self.seen_run_target)
|
||||
|
||||
if 'mcdepends' in task_deps and task in task_deps['mcdepends']:
|
||||
add_mcdepends(task)
|
||||
if 'depends' in task_deps and task in task_deps['depends']:
|
||||
ids = []
|
||||
for dep in task_deps['depends'][task].split():
|
||||
if dep:
|
||||
if ":" not in dep:
|
||||
bb.msg.fatal("TaskData", "Error for %s, dependency %s does not contain ':' character\n. Task 'depends' should be specified in the form 'packagename:task'" % (fn, dep))
|
||||
ids.append(((dep.split(":")[0]), dep.split(":")[1]))
|
||||
self.seen_build_target(dep.split(":")[0])
|
||||
self.taskentries[tid].idepends.extend(ids)
|
||||
if 'rdepends' in task_deps and task in task_deps['rdepends']:
|
||||
ids = []
|
||||
for dep in task_deps['rdepends'][task].split():
|
||||
if dep:
|
||||
if ":" not in dep:
|
||||
bb.msg.fatal("TaskData", "Error for %s, dependency %s does not contain ':' character\n. Task 'rdepends' should be specified in the form 'packagename:task'" % (fn, dep))
|
||||
ids.append(((dep.split(":")[0]), dep.split(":")[1]))
|
||||
self.seen_run_target(dep.split(":")[0])
|
||||
self.taskentries[tid].irdepends.extend(ids)
|
||||
|
||||
# Work out build dependencies
|
||||
if not fn in self.depids:
|
||||
@@ -550,9 +541,6 @@ class TaskData:
|
||||
provmap[name] = provider[0]
|
||||
return provmap
|
||||
|
||||
def get_mcdepends(self):
|
||||
return self.mcdepends
|
||||
|
||||
def dump_data(self):
|
||||
"""
|
||||
Dump some debug information on the internal data structures
|
||||
|
||||
@@ -49,9 +49,6 @@ class ReferenceTest(unittest.TestCase):
|
||||
def assertExecs(self, execs):
|
||||
self.assertEqual(self.execs, execs)
|
||||
|
||||
def assertContains(self, contains):
|
||||
self.assertEqual(self.contains, contains)
|
||||
|
||||
class VariableReferenceTest(ReferenceTest):
|
||||
|
||||
def parseExpression(self, exp):
|
||||
@@ -71,7 +68,7 @@ class VariableReferenceTest(ReferenceTest):
|
||||
|
||||
def test_python_reference(self):
|
||||
self.setEmptyVars(["BAR"])
|
||||
self.parseExpression("${@d.getVar('BAR') + 'foo'}")
|
||||
self.parseExpression("${@bb.data.getVar('BAR', d, True) + 'foo'}")
|
||||
self.assertReferences(set(["BAR"]))
|
||||
|
||||
class ShellReferenceTest(ReferenceTest):
|
||||
@@ -204,7 +201,6 @@ class PythonReferenceTest(ReferenceTest):
|
||||
|
||||
self.references = parsedvar.references | parser.references
|
||||
self.execs = parser.execs
|
||||
self.contains = parser.contains
|
||||
|
||||
@staticmethod
|
||||
def indent(value):
|
||||
@@ -213,17 +209,17 @@ be. These unit tests are testing snippets."""
|
||||
return " " + value
|
||||
|
||||
def test_getvar_reference(self):
|
||||
self.parseExpression("d.getVar('foo')")
|
||||
self.parseExpression("bb.data.getVar('foo', d, True)")
|
||||
self.assertReferences(set(["foo"]))
|
||||
self.assertExecs(set())
|
||||
|
||||
def test_getvar_computed_reference(self):
|
||||
self.parseExpression("d.getVar('f' + 'o' + 'o')")
|
||||
self.parseExpression("bb.data.getVar('f' + 'o' + 'o', d, True)")
|
||||
self.assertReferences(set())
|
||||
self.assertExecs(set())
|
||||
|
||||
def test_getvar_exec_reference(self):
|
||||
self.parseExpression("eval('d.getVar(\"foo\")')")
|
||||
self.parseExpression("eval('bb.data.getVar(\"foo\", d, True)')")
|
||||
self.assertReferences(set())
|
||||
self.assertExecs(set(["eval"]))
|
||||
|
||||
@@ -269,35 +265,15 @@ be. These unit tests are testing snippets."""
|
||||
self.assertExecs(set(["testget"]))
|
||||
del self.context["testget"]
|
||||
|
||||
def test_contains(self):
|
||||
self.parseExpression('bb.utils.contains("TESTVAR", "one", "true", "false", d)')
|
||||
self.assertContains({'TESTVAR': {'one'}})
|
||||
|
||||
def test_contains_multi(self):
|
||||
self.parseExpression('bb.utils.contains("TESTVAR", "one two", "true", "false", d)')
|
||||
self.assertContains({'TESTVAR': {'one two'}})
|
||||
|
||||
def test_contains_any(self):
|
||||
self.parseExpression('bb.utils.contains_any("TESTVAR", "hello", "true", "false", d)')
|
||||
self.assertContains({'TESTVAR': {'hello'}})
|
||||
|
||||
def test_contains_any_multi(self):
|
||||
self.parseExpression('bb.utils.contains_any("TESTVAR", "one two three", "true", "false", d)')
|
||||
self.assertContains({'TESTVAR': {'one', 'two', 'three'}})
|
||||
|
||||
def test_contains_filter(self):
|
||||
self.parseExpression('bb.utils.filter("TESTVAR", "hello there world", d)')
|
||||
self.assertContains({'TESTVAR': {'hello', 'there', 'world'}})
|
||||
|
||||
|
||||
class DependencyReferenceTest(ReferenceTest):
|
||||
|
||||
pydata = """
|
||||
d.getVar('somevar')
|
||||
bb.data.getVar('somevar', d, True)
|
||||
def test(d):
|
||||
foo = 'bar %s' % 'foo'
|
||||
def test2(d):
|
||||
d.getVar(foo)
|
||||
d.getVar(foo, True)
|
||||
d.getVar('bar', False)
|
||||
test2(d)
|
||||
|
||||
@@ -309,9 +285,9 @@ def a():
|
||||
|
||||
test(d)
|
||||
|
||||
d.expand(d.getVar("something", False))
|
||||
d.expand("${inexpand} somethingelse")
|
||||
d.getVar(a(), False)
|
||||
bb.data.expand(bb.data.getVar("something", False, d), d)
|
||||
bb.data.expand("${inexpand} somethingelse", d)
|
||||
bb.data.getVar(a(), d, False)
|
||||
"""
|
||||
|
||||
def test_python(self):
|
||||
@@ -394,30 +370,6 @@ esac
|
||||
|
||||
self.assertEqual(deps, set(["oe_libinstall"]))
|
||||
|
||||
def test_contains_vardeps(self):
|
||||
expr = '${@bb.utils.filter("TESTVAR", "somevalue anothervalue", d)} \
|
||||
${@bb.utils.contains("TESTVAR", "testval testval2", "yetanothervalue", "", d)} \
|
||||
${@bb.utils.contains("TESTVAR", "testval2 testval3", "blah", "", d)} \
|
||||
${@bb.utils.contains_any("TESTVAR", "testval2 testval3", "lastone", "", d)}'
|
||||
parsedvar = self.d.expandWithRefs(expr, None)
|
||||
# Check contains
|
||||
self.assertEqual(parsedvar.contains, {'TESTVAR': {'testval2 testval3', 'anothervalue', 'somevalue', 'testval testval2', 'testval2', 'testval3'}})
|
||||
# Check dependencies
|
||||
self.d.setVar('ANOTHERVAR', expr)
|
||||
self.d.setVar('TESTVAR', 'anothervalue testval testval2')
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), self.d)
|
||||
self.assertEqual(sorted(values.splitlines()),
|
||||
sorted([expr,
|
||||
'TESTVAR{anothervalue} = Set',
|
||||
'TESTVAR{somevalue} = Unset',
|
||||
'TESTVAR{testval testval2} = Set',
|
||||
'TESTVAR{testval2 testval3} = Unset',
|
||||
'TESTVAR{testval2} = Set',
|
||||
'TESTVAR{testval3} = Unset'
|
||||
]))
|
||||
# Check final value
|
||||
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['anothervalue', 'yetanothervalue', 'lastone'])
|
||||
|
||||
#Currently no wildcard support
|
||||
#def test_vardeps_wildcards(self):
|
||||
# self.d.setVar("oe_libinstall", "echo test")
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# BitBake Tests for cooker.py
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
|
||||
import unittest
|
||||
import tempfile
|
||||
import os
|
||||
import bb, bb.cooker
|
||||
import re
|
||||
import logging
|
||||
|
||||
# Cooker tests
|
||||
class CookerTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# At least one variable needs to be set
|
||||
self.d = bb.data.init()
|
||||
topdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testdata/cooker")
|
||||
self.d.setVar('TOPDIR', topdir)
|
||||
|
||||
def test_CookerCollectFiles_sublayers(self):
|
||||
'''Test that a sublayer of an existing layer does not trigger
|
||||
No bb files matched ...'''
|
||||
|
||||
def append_collection(topdir, path, d):
|
||||
collection = path.split('/')[-1]
|
||||
pattern = "^" + topdir + "/" + path + "/"
|
||||
regex = re.compile(pattern)
|
||||
priority = 5
|
||||
|
||||
d.setVar('BBFILE_COLLECTIONS', (d.getVar('BBFILE_COLLECTIONS') or "") + " " + collection)
|
||||
d.setVar('BBFILE_PATTERN_%s' % (collection), pattern)
|
||||
d.setVar('BBFILE_PRIORITY_%s' % (collection), priority)
|
||||
|
||||
return (collection, pattern, regex, priority)
|
||||
|
||||
topdir = self.d.getVar("TOPDIR")
|
||||
|
||||
# Priorities: list of (collection, pattern, regex, priority)
|
||||
bbfile_config_priorities = []
|
||||
# Order is important for this test, shortest to longest is typical failure case
|
||||
bbfile_config_priorities.append( append_collection(topdir, 'first', self.d) )
|
||||
bbfile_config_priorities.append( append_collection(topdir, 'second', self.d) )
|
||||
bbfile_config_priorities.append( append_collection(topdir, 'second/third', self.d) )
|
||||
|
||||
pkgfns = [ topdir + '/first/recipes/sample1_1.0.bb',
|
||||
topdir + '/second/recipes/sample2_1.0.bb',
|
||||
topdir + '/second/third/recipes/sample3_1.0.bb' ]
|
||||
|
||||
class LogHandler(logging.Handler):
|
||||
def __init__(self):
|
||||
logging.Handler.__init__(self)
|
||||
self.logdata = []
|
||||
|
||||
def emit(self, record):
|
||||
self.logdata.append(record.getMessage())
|
||||
|
||||
# Move cooker to use my special logging
|
||||
logger = bb.cooker.logger
|
||||
log_handler = LogHandler()
|
||||
logger.addHandler(log_handler)
|
||||
collection = bb.cooker.CookerCollectFiles(bbfile_config_priorities)
|
||||
collection.collection_priorities(pkgfns, self.d)
|
||||
logger.removeHandler(log_handler)
|
||||
|
||||
# Should be empty (no generated messages)
|
||||
expected = []
|
||||
|
||||
self.assertEqual(log_handler.logdata, expected)
|
||||
@@ -77,13 +77,13 @@ class DataExpansions(unittest.TestCase):
|
||||
self.assertEqual(str(val), "boo value_of_foo")
|
||||
|
||||
def test_python_snippet_getvar(self):
|
||||
val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
|
||||
val = self.d.expand("${@d.getVar('foo', True) + ' ${bar}'}")
|
||||
self.assertEqual(str(val), "value_of_foo value_of_bar")
|
||||
|
||||
def test_python_unexpanded(self):
|
||||
self.d.setVar("bar", "${unsetvar}")
|
||||
val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
|
||||
self.assertEqual(str(val), "${@d.getVar('foo') + ' ${unsetvar}'}")
|
||||
val = self.d.expand("${@d.getVar('foo', True) + ' ${bar}'}")
|
||||
self.assertEqual(str(val), "${@d.getVar('foo', True) + ' ${unsetvar}'}")
|
||||
|
||||
def test_python_snippet_syntax_error(self):
|
||||
self.d.setVar("FOO", "${@foo = 5}")
|
||||
@@ -99,7 +99,7 @@ class DataExpansions(unittest.TestCase):
|
||||
self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
|
||||
|
||||
def test_value_containing_value(self):
|
||||
val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
|
||||
val = self.d.expand("${@d.getVar('foo', True) + ' ${bar}'}")
|
||||
self.assertEqual(str(val), "value_of_foo value_of_bar")
|
||||
|
||||
def test_reference_undefined_var(self):
|
||||
@@ -109,7 +109,7 @@ class DataExpansions(unittest.TestCase):
|
||||
def test_double_reference(self):
|
||||
self.d.setVar("BAR", "bar value")
|
||||
self.d.setVar("FOO", "${BAR} foo ${BAR}")
|
||||
val = self.d.getVar("FOO")
|
||||
val = self.d.getVar("FOO", True)
|
||||
self.assertEqual(str(val), "bar value foo bar value")
|
||||
|
||||
def test_direct_recursion(self):
|
||||
@@ -129,12 +129,12 @@ class DataExpansions(unittest.TestCase):
|
||||
|
||||
def test_incomplete_varexp_single_quotes(self):
|
||||
self.d.setVar("FOO", "sed -i -e 's:IP{:I${:g' $pc")
|
||||
val = self.d.getVar("FOO")
|
||||
val = self.d.getVar("FOO", True)
|
||||
self.assertEqual(str(val), "sed -i -e 's:IP{:I${:g' $pc")
|
||||
|
||||
def test_nonstring(self):
|
||||
self.d.setVar("TEST", 5)
|
||||
val = self.d.getVar("TEST")
|
||||
val = self.d.getVar("TEST", True)
|
||||
self.assertEqual(str(val), "5")
|
||||
|
||||
def test_rename(self):
|
||||
@@ -234,19 +234,19 @@ class TestConcat(unittest.TestCase):
|
||||
def test_prepend(self):
|
||||
self.d.setVar("TEST", "${VAL}")
|
||||
self.d.prependVar("TEST", "${FOO}:")
|
||||
self.assertEqual(self.d.getVar("TEST"), "foo:val")
|
||||
self.assertEqual(self.d.getVar("TEST", True), "foo:val")
|
||||
|
||||
def test_append(self):
|
||||
self.d.setVar("TEST", "${VAL}")
|
||||
self.d.appendVar("TEST", ":${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "val:bar")
|
||||
self.assertEqual(self.d.getVar("TEST", True), "val:bar")
|
||||
|
||||
def test_multiple_append(self):
|
||||
self.d.setVar("TEST", "${VAL}")
|
||||
self.d.prependVar("TEST", "${FOO}:")
|
||||
self.d.appendVar("TEST", ":val2")
|
||||
self.d.appendVar("TEST", ":${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "foo:val:val2:bar")
|
||||
self.assertEqual(self.d.getVar("TEST", True), "foo:val:val2:bar")
|
||||
|
||||
class TestConcatOverride(unittest.TestCase):
|
||||
def setUp(self):
|
||||
@@ -258,78 +258,62 @@ class TestConcatOverride(unittest.TestCase):
|
||||
def test_prepend(self):
|
||||
self.d.setVar("TEST", "${VAL}")
|
||||
self.d.setVar("TEST_prepend", "${FOO}:")
|
||||
self.assertEqual(self.d.getVar("TEST"), "foo:val")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "foo:val")
|
||||
|
||||
def test_append(self):
|
||||
self.d.setVar("TEST", "${VAL}")
|
||||
self.d.setVar("TEST_append", ":${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "val:bar")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "val:bar")
|
||||
|
||||
def test_multiple_append(self):
|
||||
self.d.setVar("TEST", "${VAL}")
|
||||
self.d.setVar("TEST_prepend", "${FOO}:")
|
||||
self.d.setVar("TEST_append", ":val2")
|
||||
self.d.setVar("TEST_append", ":${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "foo:val:val2:bar")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "foo:val:val2:bar")
|
||||
|
||||
def test_append_unset(self):
|
||||
self.d.setVar("TEST_prepend", "${FOO}:")
|
||||
self.d.setVar("TEST_append", ":val2")
|
||||
self.d.setVar("TEST_append", ":${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "foo::val2:bar")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "foo::val2:bar")
|
||||
|
||||
def test_remove(self):
|
||||
self.d.setVar("TEST", "${VAL} ${BAR}")
|
||||
self.d.setVar("TEST_remove", "val")
|
||||
self.assertEqual(self.d.getVar("TEST"), " bar")
|
||||
|
||||
def test_remove_cleared(self):
|
||||
self.d.setVar("TEST", "${VAL} ${BAR}")
|
||||
self.d.setVar("TEST_remove", "val")
|
||||
self.d.setVar("TEST", "${VAL} ${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "val bar")
|
||||
|
||||
# Ensure the value is unchanged if we have an inactive remove override
|
||||
# (including that whitespace is preserved)
|
||||
def test_remove_inactive_override(self):
|
||||
self.d.setVar("TEST", "${VAL} ${BAR} 123")
|
||||
self.d.setVar("TEST_remove_inactiveoverride", "val")
|
||||
self.assertEqual(self.d.getVar("TEST"), "val bar 123")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "bar")
|
||||
|
||||
def test_doubleref_remove(self):
|
||||
self.d.setVar("TEST", "${VAL} ${BAR}")
|
||||
self.d.setVar("TEST_remove", "val")
|
||||
self.d.setVar("TEST_TEST", "${TEST} ${TEST}")
|
||||
self.assertEqual(self.d.getVar("TEST_TEST"), " bar bar")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST_TEST", True), "bar bar")
|
||||
|
||||
def test_empty_remove(self):
|
||||
self.d.setVar("TEST", "")
|
||||
self.d.setVar("TEST_remove", "val")
|
||||
self.assertEqual(self.d.getVar("TEST"), "")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "")
|
||||
|
||||
def test_remove_expansion(self):
|
||||
self.d.setVar("BAR", "Z")
|
||||
self.d.setVar("TEST", "${BAR}/X Y")
|
||||
self.d.setVar("TEST_remove", "${BAR}/X")
|
||||
self.assertEqual(self.d.getVar("TEST"), " Y")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "Y")
|
||||
|
||||
def test_remove_expansion_items(self):
|
||||
self.d.setVar("TEST", "A B C D")
|
||||
self.d.setVar("BAR", "B D")
|
||||
self.d.setVar("TEST_remove", "${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "A C ")
|
||||
|
||||
def test_remove_preserve_whitespace(self):
|
||||
# When the removal isn't active, the original value should be preserved
|
||||
self.d.setVar("TEST", " A B")
|
||||
self.d.setVar("TEST_remove", "C")
|
||||
self.assertEqual(self.d.getVar("TEST"), " A B")
|
||||
|
||||
def test_remove_preserve_whitespace2(self):
|
||||
# When the removal is active preserve the whitespace
|
||||
self.d.setVar("TEST", " A B")
|
||||
self.d.setVar("TEST_remove", "B")
|
||||
self.assertEqual(self.d.getVar("TEST"), " A ")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "A C")
|
||||
|
||||
class TestOverrides(unittest.TestCase):
|
||||
def setUp(self):
|
||||
@@ -338,62 +322,60 @@ class TestOverrides(unittest.TestCase):
|
||||
self.d.setVar("TEST", "testvalue")
|
||||
|
||||
def test_no_override(self):
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "testvalue")
|
||||
|
||||
def test_one_override(self):
|
||||
self.d.setVar("TEST_bar", "testvalue2")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue2")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "testvalue2")
|
||||
|
||||
def test_one_override_unset(self):
|
||||
self.d.setVar("TEST2_bar", "testvalue2")
|
||||
|
||||
self.assertEqual(self.d.getVar("TEST2"), "testvalue2")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST2", True), "testvalue2")
|
||||
self.assertCountEqual(list(self.d.keys()), ['TEST', 'TEST2', 'OVERRIDES', 'TEST2_bar'])
|
||||
|
||||
def test_multiple_override(self):
|
||||
self.d.setVar("TEST_bar", "testvalue2")
|
||||
self.d.setVar("TEST_local", "testvalue3")
|
||||
self.d.setVar("TEST_foo", "testvalue4")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "testvalue3")
|
||||
self.assertCountEqual(list(self.d.keys()), ['TEST', 'TEST_foo', 'OVERRIDES', 'TEST_bar', 'TEST_local'])
|
||||
|
||||
def test_multiple_combined_overrides(self):
|
||||
self.d.setVar("TEST_local_foo_bar", "testvalue3")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "testvalue3")
|
||||
|
||||
def test_multiple_overrides_unset(self):
|
||||
self.d.setVar("TEST2_local_foo_bar", "testvalue3")
|
||||
self.assertEqual(self.d.getVar("TEST2"), "testvalue3")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST2", True), "testvalue3")
|
||||
|
||||
def test_keyexpansion_override(self):
|
||||
self.d.setVar("LOCAL", "local")
|
||||
self.d.setVar("TEST_bar", "testvalue2")
|
||||
self.d.setVar("TEST_${LOCAL}", "testvalue3")
|
||||
self.d.setVar("TEST_foo", "testvalue4")
|
||||
bb.data.update_data(self.d)
|
||||
bb.data.expandKeys(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
|
||||
self.assertEqual(self.d.getVar("TEST", True), "testvalue3")
|
||||
|
||||
def test_rename_override(self):
|
||||
self.d.setVar("ALTERNATIVE_ncurses-tools_class-target", "a")
|
||||
self.d.setVar("OVERRIDES", "class-target")
|
||||
bb.data.update_data(self.d)
|
||||
self.d.renameVar("ALTERNATIVE_ncurses-tools", "ALTERNATIVE_lib32-ncurses-tools")
|
||||
self.assertEqual(self.d.getVar("ALTERNATIVE_lib32-ncurses-tools"), "a")
|
||||
self.assertEqual(self.d.getVar("ALTERNATIVE_lib32-ncurses-tools", True), "a")
|
||||
|
||||
def test_underscore_override(self):
|
||||
self.d.setVar("TEST_bar", "testvalue2")
|
||||
self.d.setVar("TEST_some_val", "testvalue3")
|
||||
self.d.setVar("TEST_foo", "testvalue4")
|
||||
self.d.setVar("OVERRIDES", "foo:bar:some_val")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
|
||||
|
||||
def test_remove_with_override(self):
|
||||
self.d.setVar("TEST_bar", "testvalue2")
|
||||
self.d.setVar("TEST_some_val", "testvalue3 testvalue5")
|
||||
self.d.setVar("TEST_some_val_remove", "testvalue3")
|
||||
self.d.setVar("TEST_foo", "testvalue4")
|
||||
self.d.setVar("OVERRIDES", "foo:bar:some_val")
|
||||
self.assertEqual(self.d.getVar("TEST"), " testvalue5")
|
||||
|
||||
self.assertEqual(self.d.getVar("TEST", True), "testvalue3")
|
||||
|
||||
class TestKeyExpansion(unittest.TestCase):
|
||||
def setUp(self):
|
||||
@@ -407,7 +389,7 @@ class TestKeyExpansion(unittest.TestCase):
|
||||
with LogRecord() as logs:
|
||||
bb.data.expandKeys(self.d)
|
||||
self.assertTrue(logContains("Variable key VAL_${FOO} (A) replaces original key VAL_foo (B)", logs))
|
||||
self.assertEqual(self.d.getVar("VAL_foo"), "A")
|
||||
self.assertEqual(self.d.getVar("VAL_foo", True), "A")
|
||||
|
||||
class TestFlags(unittest.TestCase):
|
||||
def setUp(self):
|
||||
@@ -462,215 +444,3 @@ class Contains(unittest.TestCase):
|
||||
|
||||
self.assertFalse(bb.utils.contains_any("SOMEFLAG", "x", True, False, self.d))
|
||||
self.assertFalse(bb.utils.contains_any("SOMEFLAG", "x y z", True, False, self.d))
|
||||
|
||||
|
||||
class TaskHash(unittest.TestCase):
|
||||
def test_taskhashes(self):
|
||||
def gettask_bashhash(taskname, d):
|
||||
tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d)
|
||||
taskdeps, basehash = bb.data.generate_dependency_hash(tasklist, gendeps, lookupcache, set(), "somefile")
|
||||
bb.warn(str(lookupcache))
|
||||
return basehash["somefile." + taskname]
|
||||
|
||||
d = bb.data.init()
|
||||
d.setVar("__BBTASKS", ["mytask"])
|
||||
d.setVar("__exportlist", [])
|
||||
d.setVar("mytask", "${MYCOMMAND}")
|
||||
d.setVar("MYCOMMAND", "${VAR}; foo; bar; exit 0")
|
||||
d.setVar("VAR", "val")
|
||||
orighash = gettask_bashhash("mytask", d)
|
||||
|
||||
# Changing a variable should change the hash
|
||||
d.setVar("VAR", "val2")
|
||||
nexthash = gettask_bashhash("mytask", d)
|
||||
self.assertNotEqual(orighash, nexthash)
|
||||
|
||||
d.setVar("VAR", "val")
|
||||
# Adding an inactive removal shouldn't change the hash
|
||||
d.setVar("BAR", "notbar")
|
||||
d.setVar("MYCOMMAND_remove", "${BAR}")
|
||||
nexthash = gettask_bashhash("mytask", d)
|
||||
self.assertEqual(orighash, nexthash)
|
||||
|
||||
# Adding an active removal should change the hash
|
||||
d.setVar("BAR", "bar;")
|
||||
nexthash = gettask_bashhash("mytask", d)
|
||||
self.assertNotEqual(orighash, nexthash)
|
||||
|
||||
# Setup an inactive contains()
|
||||
d.setVar("VAR", "${@bb.utils.contains('VAR2', 'A', 'val', '', d)}")
|
||||
orighash = gettask_bashhash("mytask", d)
|
||||
|
||||
# Activate the contains() and the hash should change
|
||||
d.setVar("VAR2", "A")
|
||||
nexthash = gettask_bashhash("mytask", d)
|
||||
self.assertNotEqual(orighash, nexthash)
|
||||
|
||||
# The contains should be inactive but even though VAR2 has a
|
||||
# different value the hash should match the original
|
||||
d.setVar("VAR2", "B")
|
||||
nexthash = gettask_bashhash("mytask", d)
|
||||
self.assertEqual(orighash, nexthash)
|
||||
|
||||
class Serialize(unittest.TestCase):
|
||||
|
||||
def test_serialize(self):
|
||||
import tempfile
|
||||
import pickle
|
||||
d = bb.data.init()
|
||||
d.enableTracking()
|
||||
d.setVar('HELLO', 'world')
|
||||
d.setVarFlag('HELLO', 'other', 'planet')
|
||||
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
|
||||
tmpfilename = tmpfile.name
|
||||
pickle.dump(d, tmpfile)
|
||||
|
||||
with open(tmpfilename, 'rb') as f:
|
||||
newd = pickle.load(f)
|
||||
|
||||
os.remove(tmpfilename)
|
||||
|
||||
self.assertEqual(d, newd)
|
||||
self.assertEqual(newd.getVar('HELLO'), 'world')
|
||||
self.assertEqual(newd.getVarFlag('HELLO', 'other'), 'planet')
|
||||
|
||||
|
||||
# Remote datastore tests
|
||||
# These really only test the interface, since in actual usage we have a
|
||||
# tinfoil connector that does everything over RPC, and this doesn't test
|
||||
# that.
|
||||
|
||||
class TestConnector:
|
||||
d = None
|
||||
def __init__(self, d):
|
||||
self.d = d
|
||||
def getVar(self, name):
|
||||
return self.d._findVar(name)
|
||||
def getKeys(self):
|
||||
return set(self.d.keys())
|
||||
def getVarHistory(self, name):
|
||||
return self.d.varhistory.variable(name)
|
||||
def expandPythonRef(self, varname, expr, d):
|
||||
localdata = self.d.createCopy()
|
||||
for key in d.localkeys():
|
||||
localdata.setVar(d.getVar(key))
|
||||
varparse = bb.data_smart.VariableParse(varname, localdata)
|
||||
return varparse.python_sub(expr)
|
||||
def setVar(self, name, value):
|
||||
self.d.setVar(name, value)
|
||||
def setVarFlag(self, name, flag, value):
|
||||
self.d.setVarFlag(name, flag, value)
|
||||
def delVar(self, name):
|
||||
self.d.delVar(name)
|
||||
return False
|
||||
def delVarFlag(self, name, flag):
|
||||
self.d.delVarFlag(name, flag)
|
||||
return False
|
||||
def renameVar(self, name, newname):
|
||||
self.d.renameVar(name, newname)
|
||||
return False
|
||||
|
||||
class Remote(unittest.TestCase):
|
||||
def test_remote(self):
|
||||
|
||||
d1 = bb.data.init()
|
||||
d1.enableTracking()
|
||||
d2 = bb.data.init()
|
||||
d2.enableTracking()
|
||||
connector = TestConnector(d1)
|
||||
|
||||
d2.setVar('_remote_data', connector)
|
||||
|
||||
d1.setVar('HELLO', 'world')
|
||||
d1.setVarFlag('OTHER', 'flagname', 'flagvalue')
|
||||
self.assertEqual(d2.getVar('HELLO'), 'world')
|
||||
self.assertEqual(d2.expand('${HELLO}'), 'world')
|
||||
self.assertEqual(d2.expand('${@d.getVar("HELLO")}'), 'world')
|
||||
self.assertIn('flagname', d2.getVarFlags('OTHER'))
|
||||
self.assertEqual(d2.getVarFlag('OTHER', 'flagname'), 'flagvalue')
|
||||
self.assertEqual(d1.varhistory.variable('HELLO'), d2.varhistory.variable('HELLO'))
|
||||
# Test setVar on client side affects server
|
||||
d2.setVar('HELLO', 'other-world')
|
||||
self.assertEqual(d1.getVar('HELLO'), 'other-world')
|
||||
# Test setVarFlag on client side affects server
|
||||
d2.setVarFlag('HELLO', 'flagname', 'flagvalue')
|
||||
self.assertEqual(d1.getVarFlag('HELLO', 'flagname'), 'flagvalue')
|
||||
# Test client side data is incorporated in python expansion (which is done on server)
|
||||
d2.setVar('FOO', 'bar')
|
||||
self.assertEqual(d2.expand('${@d.getVar("FOO")}'), 'bar')
|
||||
# Test overrides work
|
||||
d1.setVar('FOO_test', 'baz')
|
||||
d1.appendVar('OVERRIDES', ':test')
|
||||
self.assertEqual(d2.getVar('FOO'), 'baz')
|
||||
|
||||
|
||||
# Remote equivalents of local test classes
|
||||
# Note that these aren't perfect since we only test in one direction
|
||||
|
||||
class RemoteDataExpansions(DataExpansions):
|
||||
def setUp(self):
|
||||
self.d1 = bb.data.init()
|
||||
self.d = bb.data.init()
|
||||
self.d1["foo"] = "value_of_foo"
|
||||
self.d1["bar"] = "value_of_bar"
|
||||
self.d1["value_of_foo"] = "value_of_'value_of_foo'"
|
||||
connector = TestConnector(self.d1)
|
||||
self.d.setVar('_remote_data', connector)
|
||||
|
||||
class TestRemoteNestedExpansions(TestNestedExpansions):
|
||||
def setUp(self):
|
||||
self.d1 = bb.data.init()
|
||||
self.d = bb.data.init()
|
||||
self.d1["foo"] = "foo"
|
||||
self.d1["bar"] = "bar"
|
||||
self.d1["value_of_foobar"] = "187"
|
||||
connector = TestConnector(self.d1)
|
||||
self.d.setVar('_remote_data', connector)
|
||||
|
||||
class TestRemoteConcat(TestConcat):
|
||||
def setUp(self):
|
||||
self.d1 = bb.data.init()
|
||||
self.d = bb.data.init()
|
||||
self.d1.setVar("FOO", "foo")
|
||||
self.d1.setVar("VAL", "val")
|
||||
self.d1.setVar("BAR", "bar")
|
||||
connector = TestConnector(self.d1)
|
||||
self.d.setVar('_remote_data', connector)
|
||||
|
||||
class TestRemoteConcatOverride(TestConcatOverride):
|
||||
def setUp(self):
|
||||
self.d1 = bb.data.init()
|
||||
self.d = bb.data.init()
|
||||
self.d1.setVar("FOO", "foo")
|
||||
self.d1.setVar("VAL", "val")
|
||||
self.d1.setVar("BAR", "bar")
|
||||
connector = TestConnector(self.d1)
|
||||
self.d.setVar('_remote_data', connector)
|
||||
|
||||
class TestRemoteOverrides(TestOverrides):
|
||||
def setUp(self):
|
||||
self.d1 = bb.data.init()
|
||||
self.d = bb.data.init()
|
||||
self.d1.setVar("OVERRIDES", "foo:bar:local")
|
||||
self.d1.setVar("TEST", "testvalue")
|
||||
connector = TestConnector(self.d1)
|
||||
self.d.setVar('_remote_data', connector)
|
||||
|
||||
class TestRemoteKeyExpansion(TestKeyExpansion):
|
||||
def setUp(self):
|
||||
self.d1 = bb.data.init()
|
||||
self.d = bb.data.init()
|
||||
self.d1.setVar("FOO", "foo")
|
||||
self.d1.setVar("BAR", "foo")
|
||||
connector = TestConnector(self.d1)
|
||||
self.d.setVar('_remote_data', connector)
|
||||
|
||||
class TestRemoteFlags(TestFlags):
|
||||
def setUp(self):
|
||||
self.d1 = bb.data.init()
|
||||
self.d = bb.data.init()
|
||||
self.d1.setVar("foo", "value of foo")
|
||||
self.d1.setVarFlag("foo", "flag1", "value of flag1")
|
||||
self.d1.setVarFlag("foo", "flag2", "value of flag2")
|
||||
connector = TestConnector(self.d1)
|
||||
self.d.setVar('_remote_data', connector)
|
||||
|
||||
@@ -1,986 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# BitBake Tests for the Event implementation (event.py)
|
||||
#
|
||||
# Copyright (C) 2017 Intel Corporation
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
|
||||
import unittest
|
||||
import bb
|
||||
import logging
|
||||
import bb.compat
|
||||
import bb.event
|
||||
import importlib
|
||||
import threading
|
||||
import time
|
||||
import pickle
|
||||
from unittest.mock import Mock
|
||||
from unittest.mock import call
|
||||
from bb.msg import BBLogFormatter
|
||||
|
||||
|
||||
class EventQueueStubBase(object):
|
||||
""" Base class for EventQueueStub classes """
|
||||
def __init__(self):
|
||||
self.event_calls = []
|
||||
return
|
||||
|
||||
def _store_event_data_string(self, event):
|
||||
if isinstance(event, logging.LogRecord):
|
||||
formatter = BBLogFormatter("%(levelname)s: %(message)s")
|
||||
self.event_calls.append(formatter.format(event))
|
||||
else:
|
||||
self.event_calls.append(bb.event.getName(event))
|
||||
return
|
||||
|
||||
|
||||
class EventQueueStub(EventQueueStubBase):
|
||||
""" Class used as specification for UI event handler queue stub objects """
|
||||
def __init__(self):
|
||||
super(EventQueueStub, self).__init__()
|
||||
|
||||
def send(self, event):
|
||||
super(EventQueueStub, self)._store_event_data_string(event)
|
||||
|
||||
|
||||
class PickleEventQueueStub(EventQueueStubBase):
|
||||
""" Class used as specification for UI event handler queue stub objects
|
||||
with sendpickle method """
|
||||
def __init__(self):
|
||||
super(PickleEventQueueStub, self).__init__()
|
||||
|
||||
def sendpickle(self, pickled_event):
|
||||
event = pickle.loads(pickled_event)
|
||||
super(PickleEventQueueStub, self)._store_event_data_string(event)
|
||||
|
||||
|
||||
class UIClientStub(object):
|
||||
""" Class used as specification for UI event handler stub objects """
|
||||
def __init__(self):
|
||||
self.event = None
|
||||
|
||||
|
||||
class EventHandlingTest(unittest.TestCase):
|
||||
""" Event handling test class """
|
||||
|
||||
|
||||
def setUp(self):
|
||||
self._test_process = Mock()
|
||||
ui_client1 = UIClientStub()
|
||||
ui_client2 = UIClientStub()
|
||||
self._test_ui1 = Mock(wraps=ui_client1)
|
||||
self._test_ui2 = Mock(wraps=ui_client2)
|
||||
importlib.reload(bb.event)
|
||||
|
||||
def _create_test_handlers(self):
|
||||
""" Method used to create a test handler ordered dictionary """
|
||||
test_handlers = bb.compat.OrderedDict()
|
||||
test_handlers["handler1"] = self._test_process.handler1
|
||||
test_handlers["handler2"] = self._test_process.handler2
|
||||
return test_handlers
|
||||
|
||||
def test_class_handlers(self):
|
||||
""" Test set_class_handlers and get_class_handlers methods """
|
||||
test_handlers = self._create_test_handlers()
|
||||
bb.event.set_class_handlers(test_handlers)
|
||||
self.assertEqual(test_handlers,
|
||||
bb.event.get_class_handlers())
|
||||
|
||||
def test_handlers(self):
|
||||
""" Test set_handlers and get_handlers """
|
||||
test_handlers = self._create_test_handlers()
|
||||
bb.event.set_handlers(test_handlers)
|
||||
self.assertEqual(test_handlers,
|
||||
bb.event.get_handlers())
|
||||
|
||||
def test_clean_class_handlers(self):
|
||||
""" Test clean_class_handlers method """
|
||||
cleanDict = bb.compat.OrderedDict()
|
||||
self.assertEqual(cleanDict,
|
||||
bb.event.clean_class_handlers())
|
||||
|
||||
def test_register(self):
|
||||
""" Test register method for class handlers """
|
||||
result = bb.event.register("handler", self._test_process.handler)
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
handlers_dict = bb.event.get_class_handlers()
|
||||
self.assertIn("handler", handlers_dict)
|
||||
|
||||
def test_already_registered(self):
|
||||
""" Test detection of an already registed class handler """
|
||||
bb.event.register("handler", self._test_process.handler)
|
||||
handlers_dict = bb.event.get_class_handlers()
|
||||
self.assertIn("handler", handlers_dict)
|
||||
result = bb.event.register("handler", self._test_process.handler)
|
||||
self.assertEqual(result, bb.event.AlreadyRegistered)
|
||||
|
||||
def test_register_from_string(self):
|
||||
""" Test register method receiving code in string """
|
||||
result = bb.event.register("string_handler", " return True")
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
handlers_dict = bb.event.get_class_handlers()
|
||||
self.assertIn("string_handler", handlers_dict)
|
||||
|
||||
def test_register_with_mask(self):
|
||||
""" Test register method with event masking """
|
||||
mask = ["bb.event.OperationStarted",
|
||||
"bb.event.OperationCompleted"]
|
||||
result = bb.event.register("event_handler",
|
||||
self._test_process.event_handler,
|
||||
mask)
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
handlers_dict = bb.event.get_class_handlers()
|
||||
self.assertIn("event_handler", handlers_dict)
|
||||
|
||||
def test_remove(self):
|
||||
""" Test remove method for class handlers """
|
||||
test_handlers = self._create_test_handlers()
|
||||
bb.event.set_class_handlers(test_handlers)
|
||||
count = len(test_handlers)
|
||||
bb.event.remove("handler1", None)
|
||||
test_handlers = bb.event.get_class_handlers()
|
||||
self.assertEqual(len(test_handlers), count - 1)
|
||||
with self.assertRaises(KeyError):
|
||||
bb.event.remove("handler1", None)
|
||||
|
||||
def test_execute_handler(self):
|
||||
""" Test execute_handler method for class handlers """
|
||||
mask = ["bb.event.OperationProgress"]
|
||||
result = bb.event.register("event_handler",
|
||||
self._test_process.event_handler,
|
||||
mask)
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
event = bb.event.OperationProgress(current=10, total=100)
|
||||
bb.event.execute_handler("event_handler",
|
||||
self._test_process.event_handler,
|
||||
event,
|
||||
None)
|
||||
self._test_process.event_handler.assert_called_once_with(event)
|
||||
|
||||
def test_fire_class_handlers(self):
|
||||
""" Test fire_class_handlers method """
|
||||
mask = ["bb.event.OperationStarted"]
|
||||
result = bb.event.register("event_handler1",
|
||||
self._test_process.event_handler1,
|
||||
mask)
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
result = bb.event.register("event_handler2",
|
||||
self._test_process.event_handler2,
|
||||
"*")
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
event1 = bb.event.OperationStarted()
|
||||
event2 = bb.event.OperationCompleted(total=123)
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected_event_handler1 = [call(event1)]
|
||||
expected_event_handler2 = [call(event1),
|
||||
call(event2),
|
||||
call(event2)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected_event_handler1)
|
||||
self.assertEqual(self._test_process.event_handler2.call_args_list,
|
||||
expected_event_handler2)
|
||||
|
||||
def test_class_handler_filters(self):
|
||||
""" Test filters for class handlers """
|
||||
mask = ["bb.event.OperationStarted"]
|
||||
result = bb.event.register("event_handler1",
|
||||
self._test_process.event_handler1,
|
||||
mask)
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
result = bb.event.register("event_handler2",
|
||||
self._test_process.event_handler2,
|
||||
"*")
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
bb.event.set_eventfilter(
|
||||
lambda name, handler, event, d :
|
||||
name == 'event_handler2' and
|
||||
bb.event.getName(event) == "OperationStarted")
|
||||
event1 = bb.event.OperationStarted()
|
||||
event2 = bb.event.OperationCompleted(total=123)
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected_event_handler1 = []
|
||||
expected_event_handler2 = [call(event1)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected_event_handler1)
|
||||
self.assertEqual(self._test_process.event_handler2.call_args_list,
|
||||
expected_event_handler2)
|
||||
|
||||
def test_change_handler_event_mapping(self):
|
||||
""" Test changing the event mapping for class handlers """
|
||||
event1 = bb.event.OperationStarted()
|
||||
event2 = bb.event.OperationCompleted(total=123)
|
||||
|
||||
# register handler for all events
|
||||
result = bb.event.register("event_handler1",
|
||||
self._test_process.event_handler1,
|
||||
"*")
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected = [call(event1), call(event2)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected)
|
||||
|
||||
# unregister handler and register it only for OperationStarted
|
||||
bb.event.remove("event_handler1",
|
||||
self._test_process.event_handler1)
|
||||
mask = ["bb.event.OperationStarted"]
|
||||
result = bb.event.register("event_handler1",
|
||||
self._test_process.event_handler1,
|
||||
mask)
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected = [call(event1), call(event2), call(event1)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected)
|
||||
|
||||
# unregister handler and register it only for OperationCompleted
|
||||
bb.event.remove("event_handler1",
|
||||
self._test_process.event_handler1)
|
||||
mask = ["bb.event.OperationCompleted"]
|
||||
result = bb.event.register("event_handler1",
|
||||
self._test_process.event_handler1,
|
||||
mask)
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected = [call(event1), call(event2), call(event1), call(event2)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected)
|
||||
|
||||
def test_register_UIHhandler(self):
|
||||
""" Test register_UIHhandler method """
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
def test_UIHhandler_already_registered(self):
|
||||
""" Test registering an UIHhandler already existing """
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
self.assertEqual(result, 1)
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
self.assertEqual(result, 2)
|
||||
|
||||
def test_unregister_UIHhandler(self):
|
||||
""" Test unregister_UIHhandler method """
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
self.assertEqual(result, 1)
|
||||
result = bb.event.unregister_UIHhandler(1)
|
||||
self.assertIs(result, None)
|
||||
|
||||
def test_fire_ui_handlers(self):
|
||||
""" Test fire_ui_handlers method """
|
||||
self._test_ui1.event = Mock(spec_set=EventQueueStub)
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
self.assertEqual(result, 1)
|
||||
self._test_ui2.event = Mock(spec_set=PickleEventQueueStub)
|
||||
result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
|
||||
self.assertEqual(result, 2)
|
||||
event1 = bb.event.OperationStarted()
|
||||
bb.event.fire_ui_handlers(event1, None)
|
||||
expected = [call(event1)]
|
||||
self.assertEqual(self._test_ui1.event.send.call_args_list,
|
||||
expected)
|
||||
expected = [call(pickle.dumps(event1))]
|
||||
self.assertEqual(self._test_ui2.event.sendpickle.call_args_list,
|
||||
expected)
|
||||
|
||||
def test_ui_handler_mask_filter(self):
|
||||
""" Test filters for UI handlers """
|
||||
mask = ["bb.event.OperationStarted"]
|
||||
debug_domains = {}
|
||||
self._test_ui1.event = Mock(spec_set=EventQueueStub)
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
bb.event.set_UIHmask(result, logging.INFO, debug_domains, mask)
|
||||
self._test_ui2.event = Mock(spec_set=PickleEventQueueStub)
|
||||
result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
|
||||
bb.event.set_UIHmask(result, logging.INFO, debug_domains, mask)
|
||||
|
||||
event1 = bb.event.OperationStarted()
|
||||
event2 = bb.event.OperationCompleted(total=1)
|
||||
|
||||
bb.event.fire_ui_handlers(event1, None)
|
||||
bb.event.fire_ui_handlers(event2, None)
|
||||
expected = [call(event1)]
|
||||
self.assertEqual(self._test_ui1.event.send.call_args_list,
|
||||
expected)
|
||||
expected = [call(pickle.dumps(event1))]
|
||||
self.assertEqual(self._test_ui2.event.sendpickle.call_args_list,
|
||||
expected)
|
||||
|
||||
def test_ui_handler_log_filter(self):
|
||||
""" Test log filters for UI handlers """
|
||||
mask = ["*"]
|
||||
debug_domains = {'BitBake.Foo': logging.WARNING}
|
||||
|
||||
self._test_ui1.event = EventQueueStub()
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
bb.event.set_UIHmask(result, logging.ERROR, debug_domains, mask)
|
||||
self._test_ui2.event = PickleEventQueueStub()
|
||||
result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
|
||||
bb.event.set_UIHmask(result, logging.ERROR, debug_domains, mask)
|
||||
|
||||
event1 = bb.event.OperationStarted()
|
||||
bb.event.fire_ui_handlers(event1, None) # All events match
|
||||
|
||||
event_log_handler = bb.event.LogHandler()
|
||||
logger = logging.getLogger("BitBake")
|
||||
logger.addHandler(event_log_handler)
|
||||
logger1 = logging.getLogger("BitBake.Foo")
|
||||
logger1.warning("Test warning LogRecord1") # Matches debug_domains level
|
||||
logger1.info("Test info LogRecord") # Filtered out
|
||||
logger2 = logging.getLogger("BitBake.Bar")
|
||||
logger2.error("Test error LogRecord") # Matches filter base level
|
||||
logger2.warning("Test warning LogRecord2") # Filtered out
|
||||
logger.removeHandler(event_log_handler)
|
||||
|
||||
expected = ['OperationStarted',
|
||||
'WARNING: Test warning LogRecord1',
|
||||
'ERROR: Test error LogRecord']
|
||||
self.assertEqual(self._test_ui1.event.event_calls, expected)
|
||||
self.assertEqual(self._test_ui2.event.event_calls, expected)
|
||||
|
||||
def test_fire(self):
|
||||
""" Test fire method used to trigger class and ui event handlers """
|
||||
mask = ["bb.event.ConfigParsed"]
|
||||
result = bb.event.register("event_handler1",
|
||||
self._test_process.event_handler1,
|
||||
mask)
|
||||
|
||||
self._test_ui1.event = Mock(spec_set=EventQueueStub)
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
event1 = bb.event.ConfigParsed()
|
||||
bb.event.fire(event1, None)
|
||||
expected = [call(event1)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected)
|
||||
self.assertEqual(self._test_ui1.event.send.call_args_list,
|
||||
expected)
|
||||
|
||||
def test_fire_from_worker(self):
|
||||
""" Test fire_from_worker method """
|
||||
self._test_ui1.event = Mock(spec_set=EventQueueStub)
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
self.assertEqual(result, 1)
|
||||
event1 = bb.event.ConfigParsed()
|
||||
bb.event.fire_from_worker(event1, None)
|
||||
expected = [call(event1)]
|
||||
self.assertEqual(self._test_ui1.event.send.call_args_list,
|
||||
expected)
|
||||
|
||||
def test_worker_fire(self):
|
||||
""" Test the triggering of bb.event.worker_fire callback """
|
||||
bb.event.worker_fire = Mock()
|
||||
event = bb.event.Event()
|
||||
bb.event.fire(event, None)
|
||||
expected = [call(event, None)]
|
||||
self.assertEqual(bb.event.worker_fire.call_args_list, expected)
|
||||
|
||||
def test_print_ui_queue(self):
|
||||
""" Test print_ui_queue method """
|
||||
event1 = bb.event.OperationStarted()
|
||||
event2 = bb.event.OperationCompleted(total=123)
|
||||
bb.event.fire(event1, None)
|
||||
bb.event.fire(event2, None)
|
||||
event_log_handler = bb.event.LogHandler()
|
||||
logger = logging.getLogger("BitBake")
|
||||
logger.addHandler(event_log_handler)
|
||||
logger.info("Test info LogRecord")
|
||||
logger.warning("Test warning LogRecord")
|
||||
with self.assertLogs("BitBake", level="INFO") as cm:
|
||||
bb.event.print_ui_queue()
|
||||
logger.removeHandler(event_log_handler)
|
||||
self.assertEqual(cm.output,
|
||||
["INFO:BitBake:Test info LogRecord",
|
||||
"WARNING:BitBake:Test warning LogRecord"])
|
||||
|
||||
def _set_threadlock_test_mockups(self):
|
||||
""" Create UI event handler mockups used in enable and disable
|
||||
threadlock tests """
|
||||
def ui1_event_send(event):
|
||||
if type(event) is bb.event.ConfigParsed:
|
||||
self._threadlock_test_calls.append("w1_ui1")
|
||||
if type(event) is bb.event.OperationStarted:
|
||||
self._threadlock_test_calls.append("w2_ui1")
|
||||
time.sleep(2)
|
||||
|
||||
def ui2_event_send(event):
|
||||
if type(event) is bb.event.ConfigParsed:
|
||||
self._threadlock_test_calls.append("w1_ui2")
|
||||
if type(event) is bb.event.OperationStarted:
|
||||
self._threadlock_test_calls.append("w2_ui2")
|
||||
time.sleep(2)
|
||||
|
||||
self._threadlock_test_calls = []
|
||||
self._test_ui1.event = EventQueueStub()
|
||||
self._test_ui1.event.send = ui1_event_send
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
self.assertEqual(result, 1)
|
||||
self._test_ui2.event = EventQueueStub()
|
||||
self._test_ui2.event.send = ui2_event_send
|
||||
result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
|
||||
self.assertEqual(result, 2)
|
||||
|
||||
def _set_and_run_threadlock_test_workers(self):
|
||||
""" Create and run the workers used to trigger events in enable and
|
||||
disable threadlock tests """
|
||||
worker1 = threading.Thread(target=self._thread_lock_test_worker1)
|
||||
worker2 = threading.Thread(target=self._thread_lock_test_worker2)
|
||||
worker1.start()
|
||||
time.sleep(1)
|
||||
worker2.start()
|
||||
worker1.join()
|
||||
worker2.join()
|
||||
|
||||
def _thread_lock_test_worker1(self):
|
||||
""" First worker used to fire the ConfigParsed event for enable and
|
||||
disable threadlocks tests """
|
||||
bb.event.fire(bb.event.ConfigParsed(), None)
|
||||
|
||||
def _thread_lock_test_worker2(self):
|
||||
""" Second worker used to fire the OperationStarted event for enable
|
||||
and disable threadlocks tests """
|
||||
bb.event.fire(bb.event.OperationStarted(), None)
|
||||
|
||||
def test_enable_threadlock(self):
|
||||
""" Test enable_threadlock method """
|
||||
self._set_threadlock_test_mockups()
|
||||
bb.event.enable_threadlock()
|
||||
self._set_and_run_threadlock_test_workers()
|
||||
# Calls to UI handlers should be in order as all the registered
|
||||
# handlers for the event coming from the first worker should be
|
||||
# called before processing the event from the second worker.
|
||||
self.assertEqual(self._threadlock_test_calls,
|
||||
["w1_ui1", "w1_ui2", "w2_ui1", "w2_ui2"])
|
||||
|
||||
|
||||
def test_disable_threadlock(self):
|
||||
""" Test disable_threadlock method """
|
||||
self._set_threadlock_test_mockups()
|
||||
bb.event.disable_threadlock()
|
||||
self._set_and_run_threadlock_test_workers()
|
||||
# Calls to UI handlers should be intertwined together. Thanks to the
|
||||
# delay in the registered handlers for the event coming from the first
|
||||
# worker, the event coming from the second worker starts being
|
||||
# processed before finishing handling the first worker event.
|
||||
self.assertEqual(self._threadlock_test_calls,
|
||||
["w1_ui1", "w2_ui1", "w1_ui2", "w2_ui2"])
|
||||
|
||||
|
||||
class EventClassesTest(unittest.TestCase):
|
||||
""" Event classes test class """
|
||||
|
||||
_worker_pid = 54321
|
||||
|
||||
def setUp(self):
|
||||
bb.event.worker_pid = EventClassesTest._worker_pid
|
||||
|
||||
def test_Event(self):
|
||||
""" Test the Event base class """
|
||||
event = bb.event.Event()
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_HeartbeatEvent(self):
|
||||
""" Test the HeartbeatEvent class """
|
||||
time = 10
|
||||
event = bb.event.HeartbeatEvent(time)
|
||||
self.assertEqual(event.time, time)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_OperationStarted(self):
|
||||
""" Test OperationStarted event class """
|
||||
msg = "Foo Bar"
|
||||
event = bb.event.OperationStarted(msg)
|
||||
self.assertEqual(event.msg, msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_OperationCompleted(self):
|
||||
""" Test OperationCompleted event class """
|
||||
msg = "Foo Bar"
|
||||
total = 123
|
||||
event = bb.event.OperationCompleted(total, msg)
|
||||
self.assertEqual(event.msg, msg)
|
||||
self.assertEqual(event.total, total)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_OperationProgress(self):
|
||||
""" Test OperationProgress event class """
|
||||
msg = "Foo Bar"
|
||||
total = 123
|
||||
current = 111
|
||||
event = bb.event.OperationProgress(current, total, msg)
|
||||
self.assertEqual(event.msg, msg + ": %s/%s" % (current, total))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ConfigParsed(self):
|
||||
""" Test the ConfigParsed class """
|
||||
event = bb.event.ConfigParsed()
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_MultiConfigParsed(self):
|
||||
""" Test MultiConfigParsed event class """
|
||||
mcdata = {"foobar": "Foo Bar"}
|
||||
event = bb.event.MultiConfigParsed(mcdata)
|
||||
self.assertEqual(event.mcdata, mcdata)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_RecipeEvent(self):
|
||||
""" Test RecipeEvent event base class """
|
||||
callback = lambda a: 2 * a
|
||||
event = bb.event.RecipeEvent(callback)
|
||||
self.assertEqual(event.fn(1), callback(1))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_RecipePreFinalise(self):
|
||||
""" Test RecipePreFinalise event class """
|
||||
callback = lambda a: 2 * a
|
||||
event = bb.event.RecipePreFinalise(callback)
|
||||
self.assertEqual(event.fn(1), callback(1))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_RecipeTaskPreProcess(self):
|
||||
""" Test RecipeTaskPreProcess event class """
|
||||
callback = lambda a: 2 * a
|
||||
tasklist = [("foobar", callback)]
|
||||
event = bb.event.RecipeTaskPreProcess(callback, tasklist)
|
||||
self.assertEqual(event.fn(1), callback(1))
|
||||
self.assertEqual(event.tasklist, tasklist)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_RecipeParsed(self):
|
||||
""" Test RecipeParsed event base class """
|
||||
callback = lambda a: 2 * a
|
||||
event = bb.event.RecipeParsed(callback)
|
||||
self.assertEqual(event.fn(1), callback(1))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_StampUpdate(self):
|
||||
targets = ["foo", "bar"]
|
||||
stampfns = [lambda:"foobar"]
|
||||
event = bb.event.StampUpdate(targets, stampfns)
|
||||
self.assertEqual(event.targets, targets)
|
||||
self.assertEqual(event.stampPrefix, stampfns)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_BuildBase(self):
|
||||
""" Test base class for bitbake build events """
|
||||
name = "foo"
|
||||
pkgs = ["bar"]
|
||||
failures = 123
|
||||
event = bb.event.BuildBase(name, pkgs, failures)
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), failures)
|
||||
name = event.name = "bar"
|
||||
pkgs = event.pkgs = ["foo"]
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), failures)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_BuildInit(self):
|
||||
""" Test class for bitbake build invocation events """
|
||||
event = bb.event.BuildInit()
|
||||
self.assertEqual(event.name, None)
|
||||
self.assertEqual(event.pkgs, [])
|
||||
self.assertEqual(event.getFailures(), 0)
|
||||
name = event.name = "bar"
|
||||
pkgs = event.pkgs = ["foo"]
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), 0)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_BuildStarted(self):
|
||||
""" Test class for build started events """
|
||||
name = "foo"
|
||||
pkgs = ["bar"]
|
||||
failures = 123
|
||||
event = bb.event.BuildStarted(name, pkgs, failures)
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), failures)
|
||||
self.assertEqual(event.msg, "Building Started")
|
||||
name = event.name = "bar"
|
||||
pkgs = event.pkgs = ["foo"]
|
||||
msg = event.msg = "foobar"
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), failures)
|
||||
self.assertEqual(event.msg, msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_BuildCompleted(self):
|
||||
""" Test class for build completed events """
|
||||
total = 1000
|
||||
name = "foo"
|
||||
pkgs = ["bar"]
|
||||
failures = 123
|
||||
interrupted = 1
|
||||
event = bb.event.BuildCompleted(total, name, pkgs, failures,
|
||||
interrupted)
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), failures)
|
||||
self.assertEqual(event.msg, "Building Failed")
|
||||
event2 = bb.event.BuildCompleted(total, name, pkgs)
|
||||
self.assertEqual(event2.name, name)
|
||||
self.assertEqual(event2.pkgs, pkgs)
|
||||
self.assertEqual(event2.getFailures(), 0)
|
||||
self.assertEqual(event2.msg, "Building Succeeded")
|
||||
self.assertEqual(event2.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_DiskFull(self):
|
||||
""" Test DiskFull event class """
|
||||
dev = "/dev/foo"
|
||||
type = "ext4"
|
||||
freespace = "104M"
|
||||
mountpoint = "/"
|
||||
event = bb.event.DiskFull(dev, type, freespace, mountpoint)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_MonitorDiskEvent(self):
|
||||
""" Test MonitorDiskEvent class """
|
||||
available_bytes = 10000000
|
||||
free_bytes = 90000000
|
||||
total_bytes = 1000000000
|
||||
du = bb.event.DiskUsageSample(available_bytes, free_bytes,
|
||||
total_bytes)
|
||||
event = bb.event.MonitorDiskEvent(du)
|
||||
self.assertEqual(event.disk_usage.available_bytes, available_bytes)
|
||||
self.assertEqual(event.disk_usage.free_bytes, free_bytes)
|
||||
self.assertEqual(event.disk_usage.total_bytes, total_bytes)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_NoProvider(self):
|
||||
""" Test NoProvider event class """
|
||||
item = "foobar"
|
||||
event1 = bb.event.NoProvider(item)
|
||||
self.assertEqual(event1.getItem(), item)
|
||||
self.assertEqual(event1.isRuntime(), False)
|
||||
self.assertEqual(str(event1), "Nothing PROVIDES 'foobar'")
|
||||
runtime = True
|
||||
dependees = ["foo", "bar"]
|
||||
reasons = None
|
||||
close_matches = ["foibar", "footbar"]
|
||||
event2 = bb.event.NoProvider(item, runtime, dependees, reasons,
|
||||
close_matches)
|
||||
self.assertEqual(event2.isRuntime(), True)
|
||||
expected = ("Nothing RPROVIDES 'foobar' (but foo, bar RDEPENDS"
|
||||
" on or otherwise requires it). Close matches:\n"
|
||||
" foibar\n"
|
||||
" footbar")
|
||||
self.assertEqual(str(event2), expected)
|
||||
reasons = ["Item does not exist on database"]
|
||||
close_matches = ["foibar", "footbar"]
|
||||
event3 = bb.event.NoProvider(item, runtime, dependees, reasons,
|
||||
close_matches)
|
||||
expected = ("Nothing RPROVIDES 'foobar' (but foo, bar RDEPENDS"
|
||||
" on or otherwise requires it)\n"
|
||||
"Item does not exist on database")
|
||||
self.assertEqual(str(event3), expected)
|
||||
self.assertEqual(event3.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_MultipleProviders(self):
|
||||
""" Test MultipleProviders event class """
|
||||
item = "foobar"
|
||||
candidates = ["foobarv1", "foobars"]
|
||||
event1 = bb.event.MultipleProviders(item, candidates)
|
||||
self.assertEqual(event1.isRuntime(), False)
|
||||
self.assertEqual(event1.getItem(), item)
|
||||
self.assertEqual(event1.getCandidates(), candidates)
|
||||
expected = ("Multiple providers are available for foobar (foobarv1,"
|
||||
" foobars)\n"
|
||||
"Consider defining a PREFERRED_PROVIDER entry to match "
|
||||
"foobar")
|
||||
self.assertEqual(str(event1), expected)
|
||||
runtime = True
|
||||
event2 = bb.event.MultipleProviders(item, candidates, runtime)
|
||||
self.assertEqual(event2.isRuntime(), runtime)
|
||||
expected = ("Multiple providers are available for runtime foobar "
|
||||
"(foobarv1, foobars)\n"
|
||||
"Consider defining a PREFERRED_RPROVIDER entry to match "
|
||||
"foobar")
|
||||
self.assertEqual(str(event2), expected)
|
||||
self.assertEqual(event2.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ParseStarted(self):
|
||||
""" Test ParseStarted event class """
|
||||
total = 123
|
||||
event = bb.event.ParseStarted(total)
|
||||
self.assertEqual(event.msg, "Recipe parsing Started")
|
||||
self.assertEqual(event.total, total)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ParseCompleted(self):
|
||||
""" Test ParseCompleted event class """
|
||||
cached = 10
|
||||
parsed = 13
|
||||
skipped = 7
|
||||
virtuals = 2
|
||||
masked = 1
|
||||
errors = 0
|
||||
total = 23
|
||||
event = bb.event.ParseCompleted(cached, parsed, skipped, masked,
|
||||
virtuals, errors, total)
|
||||
self.assertEqual(event.msg, "Recipe parsing Completed")
|
||||
expected = [cached, parsed, skipped, virtuals, masked, errors,
|
||||
cached + parsed, total]
|
||||
actual = [event.cached, event.parsed, event.skipped, event.virtuals,
|
||||
event.masked, event.errors, event.sofar, event.total]
|
||||
self.assertEqual(str(actual), str(expected))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ParseProgress(self):
|
||||
""" Test ParseProgress event class """
|
||||
current = 10
|
||||
total = 100
|
||||
event = bb.event.ParseProgress(current, total)
|
||||
self.assertEqual(event.msg,
|
||||
"Recipe parsing" + ": %s/%s" % (current, total))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_CacheLoadStarted(self):
|
||||
""" Test CacheLoadStarted event class """
|
||||
total = 123
|
||||
event = bb.event.CacheLoadStarted(total)
|
||||
self.assertEqual(event.msg, "Loading cache Started")
|
||||
self.assertEqual(event.total, total)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_CacheLoadProgress(self):
|
||||
""" Test CacheLoadProgress event class """
|
||||
current = 10
|
||||
total = 100
|
||||
event = bb.event.CacheLoadProgress(current, total)
|
||||
self.assertEqual(event.msg,
|
||||
"Loading cache" + ": %s/%s" % (current, total))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_CacheLoadCompleted(self):
|
||||
""" Test CacheLoadCompleted event class """
|
||||
total = 23
|
||||
num_entries = 12
|
||||
event = bb.event.CacheLoadCompleted(total, num_entries)
|
||||
self.assertEqual(event.msg, "Loading cache Completed")
|
||||
expected = [total, num_entries]
|
||||
actual = [event.total, event.num_entries]
|
||||
self.assertEqual(str(actual), str(expected))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_TreeDataPreparationStarted(self):
|
||||
""" Test TreeDataPreparationStarted event class """
|
||||
event = bb.event.TreeDataPreparationStarted()
|
||||
self.assertEqual(event.msg, "Preparing tree data Started")
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_TreeDataPreparationProgress(self):
|
||||
""" Test TreeDataPreparationProgress event class """
|
||||
current = 10
|
||||
total = 100
|
||||
event = bb.event.TreeDataPreparationProgress(current, total)
|
||||
self.assertEqual(event.msg,
|
||||
"Preparing tree data" + ": %s/%s" % (current, total))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_TreeDataPreparationCompleted(self):
|
||||
""" Test TreeDataPreparationCompleted event class """
|
||||
total = 23
|
||||
event = bb.event.TreeDataPreparationCompleted(total)
|
||||
self.assertEqual(event.msg, "Preparing tree data Completed")
|
||||
self.assertEqual(event.total, total)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_DepTreeGenerated(self):
|
||||
""" Test DepTreeGenerated event class """
|
||||
depgraph = Mock()
|
||||
event = bb.event.DepTreeGenerated(depgraph)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_TargetsTreeGenerated(self):
|
||||
""" Test TargetsTreeGenerated event class """
|
||||
model = Mock()
|
||||
event = bb.event.TargetsTreeGenerated(model)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ReachableStamps(self):
|
||||
""" Test ReachableStamps event class """
|
||||
stamps = [Mock(), Mock()]
|
||||
event = bb.event.ReachableStamps(stamps)
|
||||
self.assertEqual(event.stamps, stamps)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_FilesMatchingFound(self):
|
||||
""" Test FilesMatchingFound event class """
|
||||
pattern = "foo.*bar"
|
||||
matches = ["foobar"]
|
||||
event = bb.event.FilesMatchingFound(pattern, matches)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ConfigFilesFound(self):
|
||||
""" Test ConfigFilesFound event class """
|
||||
variable = "FOO_BAR"
|
||||
values = ["foo", "bar"]
|
||||
event = bb.event.ConfigFilesFound(variable, values)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ConfigFilePathFound(self):
|
||||
""" Test ConfigFilePathFound event class """
|
||||
path = "/foo/bar"
|
||||
event = bb.event.ConfigFilePathFound(path)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_message_classes(self):
|
||||
""" Test message event classes """
|
||||
msg = "foobar foo bar"
|
||||
event = bb.event.MsgBase(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgDebug(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgNote(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgWarn(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgError(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgFatal(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgPlain(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_LogExecTTY(self):
|
||||
""" Test LogExecTTY event class """
|
||||
msg = "foo bar"
|
||||
prog = "foo.sh"
|
||||
sleep_delay = 10
|
||||
retries = 3
|
||||
event = bb.event.LogExecTTY(msg, prog, sleep_delay, retries)
|
||||
self.assertEqual(event.msg, msg)
|
||||
self.assertEqual(event.prog, prog)
|
||||
self.assertEqual(event.sleep_delay, sleep_delay)
|
||||
self.assertEqual(event.retries, retries)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def _throw_zero_division_exception(self):
|
||||
a = 1 / 0
|
||||
return
|
||||
|
||||
def _worker_handler(self, event, d):
|
||||
self._returned_event = event
|
||||
return
|
||||
|
||||
def test_LogHandler(self):
|
||||
""" Test LogHandler class """
|
||||
logger = logging.getLogger("TestEventClasses")
|
||||
logger.propagate = False
|
||||
handler = bb.event.LogHandler(logging.INFO)
|
||||
logger.addHandler(handler)
|
||||
bb.event.worker_fire = self._worker_handler
|
||||
try:
|
||||
self._throw_zero_division_exception()
|
||||
except ZeroDivisionError as ex:
|
||||
logger.exception(ex)
|
||||
event = self._returned_event
|
||||
try:
|
||||
pe = pickle.dumps(event)
|
||||
newevent = pickle.loads(pe)
|
||||
except:
|
||||
self.fail('Logged event is not serializable')
|
||||
self.assertEqual(event.taskpid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_MetadataEvent(self):
|
||||
""" Test MetadataEvent class """
|
||||
eventtype = "footype"
|
||||
eventdata = {"foo": "bar"}
|
||||
event = bb.event.MetadataEvent(eventtype, eventdata)
|
||||
self.assertEqual(event.type, eventtype)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ProcessStarted(self):
|
||||
""" Test ProcessStarted class """
|
||||
processname = "foo"
|
||||
total = 9783128974
|
||||
event = bb.event.ProcessStarted(processname, total)
|
||||
self.assertEqual(event.processname, processname)
|
||||
self.assertEqual(event.total, total)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ProcessProgress(self):
|
||||
""" Test ProcessProgress class """
|
||||
processname = "foo"
|
||||
progress = 243224
|
||||
event = bb.event.ProcessProgress(processname, progress)
|
||||
self.assertEqual(event.processname, processname)
|
||||
self.assertEqual(event.progress, progress)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ProcessFinished(self):
|
||||
""" Test ProcessFinished class """
|
||||
processname = "foo"
|
||||
total = 1242342344
|
||||
event = bb.event.ProcessFinished(processname)
|
||||
self.assertEqual(event.processname, processname)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_SanityCheck(self):
|
||||
""" Test SanityCheck class """
|
||||
event1 = bb.event.SanityCheck()
|
||||
self.assertEqual(event1.generateevents, True)
|
||||
self.assertEqual(event1.pid, EventClassesTest._worker_pid)
|
||||
generateevents = False
|
||||
event2 = bb.event.SanityCheck(generateevents)
|
||||
self.assertEqual(event2.generateevents, generateevents)
|
||||
self.assertEqual(event2.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_SanityCheckPassed(self):
|
||||
""" Test SanityCheckPassed class """
|
||||
event = bb.event.SanityCheckPassed()
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_SanityCheckFailed(self):
|
||||
""" Test SanityCheckFailed class """
|
||||
msg = "The sanity test failed."
|
||||
event1 = bb.event.SanityCheckFailed(msg)
|
||||
self.assertEqual(event1.pid, EventClassesTest._worker_pid)
|
||||
network_error = True
|
||||
event2 = bb.event.SanityCheckFailed(msg, network_error)
|
||||
self.assertEqual(event2.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_network_event_classes(self):
|
||||
""" Test network event classes """
|
||||
event1 = bb.event.NetworkTest()
|
||||
generateevents = False
|
||||
self.assertEqual(event1.pid, EventClassesTest._worker_pid)
|
||||
event2 = bb.event.NetworkTest(generateevents)
|
||||
self.assertEqual(event2.pid, EventClassesTest._worker_pid)
|
||||
event3 = bb.event.NetworkTestPassed()
|
||||
self.assertEqual(event3.pid, EventClassesTest._worker_pid)
|
||||
event4 = bb.event.NetworkTestFailed()
|
||||
self.assertEqual(event4.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_FindSigInfoResult(self):
|
||||
""" Test FindSigInfoResult event class """
|
||||
result = [Mock()]
|
||||
event = bb.event.FindSigInfoResult(result)
|
||||
self.assertEqual(event.result, result)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -44,13 +44,9 @@ C = "3"
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
self.origdir = os.getcwd()
|
||||
self.d = bb.data.init()
|
||||
bb.parse.siggen = bb.siggen.init(self.d)
|
||||
|
||||
def tearDown(self):
|
||||
os.chdir(self.origdir)
|
||||
|
||||
def parsehelper(self, content, suffix = ".bb"):
|
||||
|
||||
f = tempfile.NamedTemporaryFile(suffix = suffix)
|
||||
@@ -62,9 +58,9 @@ C = "3"
|
||||
def test_parse_simple(self):
|
||||
f = self.parsehelper(self.testfile)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
self.assertEqual(d.getVar("A"), "1")
|
||||
self.assertEqual(d.getVar("B"), "2")
|
||||
self.assertEqual(d.getVar("C"), "3")
|
||||
self.assertEqual(d.getVar("A", True), "1")
|
||||
self.assertEqual(d.getVar("B", True), "2")
|
||||
self.assertEqual(d.getVar("C", True), "3")
|
||||
|
||||
def test_parse_incomplete_function(self):
|
||||
testfileB = self.testfile.replace("}", "")
|
||||
@@ -84,31 +80,10 @@ unset B[flag]
|
||||
def test_parse_unset(self):
|
||||
f = self.parsehelper(self.unsettest)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
self.assertEqual(d.getVar("A"), None)
|
||||
self.assertEqual(d.getVarFlag("A","flag"), None)
|
||||
self.assertEqual(d.getVar("B"), "2")
|
||||
|
||||
exporttest = """
|
||||
A = "a"
|
||||
export B = "b"
|
||||
export C
|
||||
exportD = "d"
|
||||
"""
|
||||
|
||||
def test_parse_exports(self):
|
||||
f = self.parsehelper(self.exporttest)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
self.assertEqual(d.getVar("A"), "a")
|
||||
self.assertIsNone(d.getVarFlag("A", "export"))
|
||||
self.assertEqual(d.getVar("B"), "b")
|
||||
self.assertEqual(d.getVarFlag("B", "export"), 1)
|
||||
self.assertIsNone(d.getVar("C"))
|
||||
self.assertEqual(d.getVarFlag("C", "export"), 1)
|
||||
self.assertIsNone(d.getVar("D"))
|
||||
self.assertIsNone(d.getVarFlag("D", "export"))
|
||||
self.assertEqual(d.getVar("exportD"), "d")
|
||||
self.assertIsNone(d.getVarFlag("exportD", "export"))
|
||||
|
||||
self.assertEqual(d.getVar("A", True), None)
|
||||
self.assertEqual(d.getVarFlag("A","flag", True), None)
|
||||
self.assertEqual(d.getVar("B", True), "2")
|
||||
|
||||
|
||||
overridetest = """
|
||||
RRECOMMENDS_${PN} = "a"
|
||||
@@ -120,11 +95,11 @@ PN = "gtk+"
|
||||
def test_parse_overrides(self):
|
||||
f = self.parsehelper(self.overridetest)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
self.assertEqual(d.getVar("RRECOMMENDS"), "b")
|
||||
self.assertEqual(d.getVar("RRECOMMENDS", True), "b")
|
||||
bb.data.expandKeys(d)
|
||||
self.assertEqual(d.getVar("RRECOMMENDS"), "b")
|
||||
self.assertEqual(d.getVar("RRECOMMENDS", True), "b")
|
||||
d.setVar("RRECOMMENDS_gtk+", "c")
|
||||
self.assertEqual(d.getVar("RRECOMMENDS"), "c")
|
||||
self.assertEqual(d.getVar("RRECOMMENDS", True), "c")
|
||||
|
||||
overridetest2 = """
|
||||
EXTRA_OECONF = ""
|
||||
@@ -137,7 +112,7 @@ EXTRA_OECONF_append = " c"
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
d.appendVar("EXTRA_OECONF", " d")
|
||||
d.setVar("OVERRIDES", "class-target")
|
||||
self.assertEqual(d.getVar("EXTRA_OECONF"), "b c d")
|
||||
self.assertEqual(d.getVar("EXTRA_OECONF", True), "b c d")
|
||||
|
||||
overridetest3 = """
|
||||
DESCRIPTION = "A"
|
||||
@@ -149,11 +124,11 @@ PN = "bc"
|
||||
f = self.parsehelper(self.overridetest3)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
bb.data.expandKeys(d)
|
||||
self.assertEqual(d.getVar("DESCRIPTION_bc-dev"), "A B")
|
||||
self.assertEqual(d.getVar("DESCRIPTION_bc-dev", True), "A B")
|
||||
d.setVar("DESCRIPTION", "E")
|
||||
d.setVar("DESCRIPTION_bc-dev", "C D")
|
||||
d.setVar("OVERRIDES", "bc-dev")
|
||||
self.assertEqual(d.getVar("DESCRIPTION"), "C D")
|
||||
self.assertEqual(d.getVar("DESCRIPTION", True), "C D")
|
||||
|
||||
|
||||
classextend = """
|
||||
@@ -184,6 +159,6 @@ python () {
|
||||
alldata = bb.parse.handle(f.name, self.d)
|
||||
d1 = alldata['']
|
||||
d2 = alldata[cls.name]
|
||||
self.assertEqual(d1.getVar("VAR_var"), "B")
|
||||
self.assertEqual(d2.getVar("VAR_var"), None)
|
||||
self.assertEqual(d1.getVar("VAR_var", True), "B")
|
||||
self.assertEqual(d2.getVar("VAR_var", True), None)
|
||||
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
# tinfoil: a simple wrapper around cooker for bitbake-based command-line utilities
|
||||
#
|
||||
# Copyright (C) 2012-2017 Intel Corporation
|
||||
# Copyright (C) 2012 Intel Corporation
|
||||
# Copyright (C) 2011 Mentor Graphics Corporation
|
||||
# Copyright (C) 2006-2012 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
@@ -18,319 +17,50 @@
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import logging
|
||||
import warnings
|
||||
import os
|
||||
import sys
|
||||
import atexit
|
||||
import re
|
||||
from collections import OrderedDict, defaultdict
|
||||
|
||||
import bb.cache
|
||||
import bb.cooker
|
||||
import bb.providers
|
||||
import bb.taskdata
|
||||
import bb.utils
|
||||
import bb.command
|
||||
import bb.remotedata
|
||||
from bb.cooker import state, BBCooker, CookerFeatures
|
||||
from bb.cookerdata import CookerConfiguration, ConfigParameters
|
||||
from bb.main import setup_bitbake, BitBakeConfigParameters, BBMainException
|
||||
import bb.fetch2
|
||||
|
||||
|
||||
# We need this in order to shut down the connection to the bitbake server,
|
||||
# otherwise the process will never properly exit
|
||||
_server_connections = []
|
||||
def _terminate_connections():
|
||||
for connection in _server_connections:
|
||||
connection.terminate()
|
||||
atexit.register(_terminate_connections)
|
||||
|
||||
class TinfoilUIException(Exception):
|
||||
"""Exception raised when the UI returns non-zero from its main function"""
|
||||
def __init__(self, returncode):
|
||||
self.returncode = returncode
|
||||
def __repr__(self):
|
||||
return 'UI module main returned %d' % self.returncode
|
||||
|
||||
class TinfoilCommandFailed(Exception):
|
||||
"""Exception raised when run_command fails"""
|
||||
|
||||
class TinfoilDataStoreConnector:
|
||||
"""Connector object used to enable access to datastore objects via tinfoil"""
|
||||
|
||||
def __init__(self, tinfoil, dsindex):
|
||||
self.tinfoil = tinfoil
|
||||
self.dsindex = dsindex
|
||||
def getVar(self, name):
|
||||
value = self.tinfoil.run_command('dataStoreConnectorFindVar', self.dsindex, name)
|
||||
overrides = None
|
||||
if isinstance(value, dict):
|
||||
if '_connector_origtype' in value:
|
||||
value['_content'] = self.tinfoil._reconvert_type(value['_content'], value['_connector_origtype'])
|
||||
del value['_connector_origtype']
|
||||
if '_connector_overrides' in value:
|
||||
overrides = value['_connector_overrides']
|
||||
del value['_connector_overrides']
|
||||
return value, overrides
|
||||
def getKeys(self):
|
||||
return set(self.tinfoil.run_command('dataStoreConnectorGetKeys', self.dsindex))
|
||||
def getVarHistory(self, name):
|
||||
return self.tinfoil.run_command('dataStoreConnectorGetVarHistory', self.dsindex, name)
|
||||
def expandPythonRef(self, varname, expr, d):
|
||||
ds = bb.remotedata.RemoteDatastores.transmit_datastore(d)
|
||||
ret = self.tinfoil.run_command('dataStoreConnectorExpandPythonRef', ds, varname, expr)
|
||||
return ret
|
||||
def setVar(self, varname, value):
|
||||
if self.dsindex is None:
|
||||
self.tinfoil.run_command('setVariable', varname, value)
|
||||
else:
|
||||
# Not currently implemented - indicate that setting should
|
||||
# be redirected to local side
|
||||
return True
|
||||
def setVarFlag(self, varname, flagname, value):
|
||||
if self.dsindex is None:
|
||||
self.tinfoil.run_command('dataStoreConnectorSetVarFlag', self.dsindex, varname, flagname, value)
|
||||
else:
|
||||
# Not currently implemented - indicate that setting should
|
||||
# be redirected to local side
|
||||
return True
|
||||
def delVar(self, varname):
|
||||
if self.dsindex is None:
|
||||
self.tinfoil.run_command('dataStoreConnectorDelVar', self.dsindex, varname)
|
||||
else:
|
||||
# Not currently implemented - indicate that setting should
|
||||
# be redirected to local side
|
||||
return True
|
||||
def delVarFlag(self, varname, flagname):
|
||||
if self.dsindex is None:
|
||||
self.tinfoil.run_command('dataStoreConnectorDelVar', self.dsindex, varname, flagname)
|
||||
else:
|
||||
# Not currently implemented - indicate that setting should
|
||||
# be redirected to local side
|
||||
return True
|
||||
def renameVar(self, name, newname):
|
||||
if self.dsindex is None:
|
||||
self.tinfoil.run_command('dataStoreConnectorRenameVar', self.dsindex, name, newname)
|
||||
else:
|
||||
# Not currently implemented - indicate that setting should
|
||||
# be redirected to local side
|
||||
return True
|
||||
|
||||
class TinfoilCookerAdapter:
|
||||
"""
|
||||
Provide an adapter for existing code that expects to access a cooker object via Tinfoil,
|
||||
since now Tinfoil is on the client side it no longer has direct access.
|
||||
"""
|
||||
|
||||
class TinfoilCookerCollectionAdapter:
|
||||
""" cooker.collection adapter """
|
||||
def __init__(self, tinfoil):
|
||||
self.tinfoil = tinfoil
|
||||
def get_file_appends(self, fn):
|
||||
return self.tinfoil.get_file_appends(fn)
|
||||
def __getattr__(self, name):
|
||||
if name == 'overlayed':
|
||||
return self.tinfoil.get_overlayed_recipes()
|
||||
elif name == 'bbappends':
|
||||
return self.tinfoil.run_command('getAllAppends')
|
||||
else:
|
||||
raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name))
|
||||
|
||||
class TinfoilRecipeCacheAdapter:
|
||||
""" cooker.recipecache adapter """
|
||||
def __init__(self, tinfoil):
|
||||
self.tinfoil = tinfoil
|
||||
self._cache = {}
|
||||
|
||||
def get_pkg_pn_fn(self):
|
||||
pkg_pn = defaultdict(list, self.tinfoil.run_command('getRecipes') or [])
|
||||
pkg_fn = {}
|
||||
for pn, fnlist in pkg_pn.items():
|
||||
for fn in fnlist:
|
||||
pkg_fn[fn] = pn
|
||||
self._cache['pkg_pn'] = pkg_pn
|
||||
self._cache['pkg_fn'] = pkg_fn
|
||||
|
||||
def __getattr__(self, name):
|
||||
# Grab these only when they are requested since they aren't always used
|
||||
if name in self._cache:
|
||||
return self._cache[name]
|
||||
elif name == 'pkg_pn':
|
||||
self.get_pkg_pn_fn()
|
||||
return self._cache[name]
|
||||
elif name == 'pkg_fn':
|
||||
self.get_pkg_pn_fn()
|
||||
return self._cache[name]
|
||||
elif name == 'deps':
|
||||
attrvalue = defaultdict(list, self.tinfoil.run_command('getRecipeDepends') or [])
|
||||
elif name == 'rundeps':
|
||||
attrvalue = defaultdict(lambda: defaultdict(list), self.tinfoil.run_command('getRuntimeDepends') or [])
|
||||
elif name == 'runrecs':
|
||||
attrvalue = defaultdict(lambda: defaultdict(list), self.tinfoil.run_command('getRuntimeRecommends') or [])
|
||||
elif name == 'pkg_pepvpr':
|
||||
attrvalue = self.tinfoil.run_command('getRecipeVersions') or {}
|
||||
elif name == 'inherits':
|
||||
attrvalue = self.tinfoil.run_command('getRecipeInherits') or {}
|
||||
elif name == 'bbfile_priority':
|
||||
attrvalue = self.tinfoil.run_command('getBbFilePriority') or {}
|
||||
elif name == 'pkg_dp':
|
||||
attrvalue = self.tinfoil.run_command('getDefaultPreference') or {}
|
||||
elif name == 'fn_provides':
|
||||
attrvalue = self.tinfoil.run_command('getRecipeProvides') or {}
|
||||
elif name == 'packages':
|
||||
attrvalue = self.tinfoil.run_command('getRecipePackages') or {}
|
||||
elif name == 'packages_dynamic':
|
||||
attrvalue = self.tinfoil.run_command('getRecipePackagesDynamic') or {}
|
||||
elif name == 'rproviders':
|
||||
attrvalue = self.tinfoil.run_command('getRProviders') or {}
|
||||
else:
|
||||
raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name))
|
||||
|
||||
self._cache[name] = attrvalue
|
||||
return attrvalue
|
||||
|
||||
def __init__(self, tinfoil):
|
||||
self.tinfoil = tinfoil
|
||||
self.collection = self.TinfoilCookerCollectionAdapter(tinfoil)
|
||||
self.recipecaches = {}
|
||||
# FIXME all machines
|
||||
self.recipecaches[''] = self.TinfoilRecipeCacheAdapter(tinfoil)
|
||||
self._cache = {}
|
||||
def __getattr__(self, name):
|
||||
# Grab these only when they are requested since they aren't always used
|
||||
if name in self._cache:
|
||||
return self._cache[name]
|
||||
elif name == 'skiplist':
|
||||
attrvalue = self.tinfoil.get_skipped_recipes()
|
||||
elif name == 'bbfile_config_priorities':
|
||||
ret = self.tinfoil.run_command('getLayerPriorities')
|
||||
bbfile_config_priorities = []
|
||||
for collection, pattern, regex, pri in ret:
|
||||
bbfile_config_priorities.append((collection, pattern, re.compile(regex), pri))
|
||||
|
||||
attrvalue = bbfile_config_priorities
|
||||
else:
|
||||
raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name))
|
||||
|
||||
self._cache[name] = attrvalue
|
||||
return attrvalue
|
||||
|
||||
def findBestProvider(self, pn):
|
||||
return self.tinfoil.find_best_provider(pn)
|
||||
|
||||
|
||||
class TinfoilRecipeInfo:
|
||||
"""
|
||||
Provides a convenient representation of the cached information for a single recipe.
|
||||
Some attributes are set on construction, others are read on-demand (which internally
|
||||
may result in a remote procedure call to the bitbake server the first time).
|
||||
Note that only information which is cached is available through this object - if
|
||||
you need other variable values you will need to parse the recipe using
|
||||
Tinfoil.parse_recipe().
|
||||
"""
|
||||
def __init__(self, recipecache, d, pn, fn, fns):
|
||||
self._recipecache = recipecache
|
||||
self._d = d
|
||||
self.pn = pn
|
||||
self.fn = fn
|
||||
self.fns = fns
|
||||
self.inherit_files = recipecache.inherits[fn]
|
||||
self.depends = recipecache.deps[fn]
|
||||
(self.pe, self.pv, self.pr) = recipecache.pkg_pepvpr[fn]
|
||||
self._cached_packages = None
|
||||
self._cached_rprovides = None
|
||||
self._cached_packages_dynamic = None
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name == 'alternates':
|
||||
return [x for x in self.fns if x != self.fn]
|
||||
elif name == 'rdepends':
|
||||
return self._recipecache.rundeps[self.fn]
|
||||
elif name == 'rrecommends':
|
||||
return self._recipecache.runrecs[self.fn]
|
||||
elif name == 'provides':
|
||||
return self._recipecache.fn_provides[self.fn]
|
||||
elif name == 'packages':
|
||||
if self._cached_packages is None:
|
||||
self._cached_packages = []
|
||||
for pkg, fns in self._recipecache.packages.items():
|
||||
if self.fn in fns:
|
||||
self._cached_packages.append(pkg)
|
||||
return self._cached_packages
|
||||
elif name == 'packages_dynamic':
|
||||
if self._cached_packages_dynamic is None:
|
||||
self._cached_packages_dynamic = []
|
||||
for pkg, fns in self._recipecache.packages_dynamic.items():
|
||||
if self.fn in fns:
|
||||
self._cached_packages_dynamic.append(pkg)
|
||||
return self._cached_packages_dynamic
|
||||
elif name == 'rprovides':
|
||||
if self._cached_rprovides is None:
|
||||
self._cached_rprovides = []
|
||||
for pkg, fns in self._recipecache.rproviders.items():
|
||||
if self.fn in fns:
|
||||
self._cached_rprovides.append(pkg)
|
||||
return self._cached_rprovides
|
||||
else:
|
||||
raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name))
|
||||
def inherits(self, only_recipe=False):
|
||||
"""
|
||||
Get the inherited classes for a recipe. Returns the class names only.
|
||||
Parameters:
|
||||
only_recipe: True to return only the classes inherited by the recipe
|
||||
itself, False to return all classes inherited within
|
||||
the context for the recipe (which includes globally
|
||||
inherited classes).
|
||||
"""
|
||||
if only_recipe:
|
||||
global_inherit = [x for x in (self._d.getVar('BBINCLUDED') or '').split() if x.endswith('.bbclass')]
|
||||
else:
|
||||
global_inherit = []
|
||||
for clsfile in self.inherit_files:
|
||||
if only_recipe and clsfile in global_inherit:
|
||||
continue
|
||||
clsname = os.path.splitext(os.path.basename(clsfile))[0]
|
||||
yield clsname
|
||||
def __str__(self):
|
||||
return '%s' % self.pn
|
||||
|
||||
|
||||
class Tinfoil:
|
||||
"""
|
||||
Tinfoil - an API for scripts and utilities to query
|
||||
BitBake internals and perform build operations.
|
||||
"""
|
||||
def __init__(self, output=sys.stdout, tracking=False):
|
||||
# Needed to avoid deprecation warnings with python 2.6
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
||||
|
||||
def __init__(self, output=sys.stdout, tracking=False, setup_logging=True):
|
||||
"""
|
||||
Create a new tinfoil object.
|
||||
Parameters:
|
||||
output: specifies where console output should be sent. Defaults
|
||||
to sys.stdout.
|
||||
tracking: True to enable variable history tracking, False to
|
||||
disable it (default). Enabling this has a minor
|
||||
performance impact so typically it isn't enabled
|
||||
unless you need to query variable history.
|
||||
setup_logging: True to setup a logger so that things like
|
||||
bb.warn() will work immediately and timeout warnings
|
||||
are visible; False to let BitBake do this itself.
|
||||
"""
|
||||
# Set up logging
|
||||
self.logger = logging.getLogger('BitBake')
|
||||
self.config_data = None
|
||||
self.cooker = None
|
||||
self.tracking = tracking
|
||||
self.ui_module = None
|
||||
self.server_connection = None
|
||||
self.recipes_parsed = False
|
||||
self.quiet = 0
|
||||
self.oldhandlers = self.logger.handlers[:]
|
||||
if setup_logging:
|
||||
# This is the *client-side* logger, nothing to do with
|
||||
# logging messages from the server
|
||||
bb.msg.logger_create('BitBake', output)
|
||||
self.localhandlers = []
|
||||
for handler in self.logger.handlers:
|
||||
if handler not in self.oldhandlers:
|
||||
self.localhandlers.append(handler)
|
||||
self._log_hdlr = logging.StreamHandler(output)
|
||||
bb.msg.addDefaultlogFilter(self._log_hdlr)
|
||||
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
if output.isatty():
|
||||
format.enable_color()
|
||||
self._log_hdlr.setFormatter(format)
|
||||
self.logger.addHandler(self._log_hdlr)
|
||||
|
||||
self.config = CookerConfiguration()
|
||||
configparams = TinfoilConfigParameters(parse_only=True)
|
||||
self.config.setConfigParameters(configparams)
|
||||
self.config.setServerRegIdleCallback(self.register_idle_function)
|
||||
features = []
|
||||
if tracking:
|
||||
features.append(CookerFeatures.BASEDATASTORE_TRACKING)
|
||||
cleanedvars = bb.utils.clean_environment()
|
||||
self.cooker = BBCooker(self.config, features)
|
||||
self.config_data = self.cooker.data
|
||||
bb.providers.logger.setLevel(logging.ERROR)
|
||||
self.cooker_data = None
|
||||
for k in cleanedvars:
|
||||
os.environ[k] = cleanedvars[k]
|
||||
|
||||
def register_idle_function(self, function, data):
|
||||
pass
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
@@ -338,290 +68,30 @@ class Tinfoil:
|
||||
def __exit__(self, type, value, traceback):
|
||||
self.shutdown()
|
||||
|
||||
def prepare(self, config_only=False, config_params=None, quiet=0, extra_features=None):
|
||||
"""
|
||||
Prepares the underlying BitBake system to be used via tinfoil.
|
||||
This function must be called prior to calling any of the other
|
||||
functions in the API.
|
||||
NOTE: if you call prepare() you must absolutely call shutdown()
|
||||
before your code terminates. You can use a "with" block to ensure
|
||||
this happens e.g.
|
||||
|
||||
with bb.tinfoil.Tinfoil() as tinfoil:
|
||||
tinfoil.prepare()
|
||||
...
|
||||
|
||||
Parameters:
|
||||
config_only: True to read only the configuration and not load
|
||||
the cache / parse recipes. This is useful if you just
|
||||
want to query the value of a variable at the global
|
||||
level or you want to do anything else that doesn't
|
||||
involve knowing anything about the recipes in the
|
||||
current configuration. False loads the cache / parses
|
||||
recipes.
|
||||
config_params: optionally specify your own configuration
|
||||
parameters. If not specified an instance of
|
||||
TinfoilConfigParameters will be created internally.
|
||||
quiet: quiet level controlling console output - equivalent
|
||||
to bitbake's -q/--quiet option. Default of 0 gives
|
||||
the same output level as normal bitbake execution.
|
||||
extra_features: extra features to be added to the feature
|
||||
set requested from the server. See
|
||||
CookerFeatures._feature_list for possible
|
||||
features.
|
||||
"""
|
||||
self.quiet = quiet
|
||||
|
||||
if self.tracking:
|
||||
extrafeatures = [bb.cooker.CookerFeatures.BASEDATASTORE_TRACKING]
|
||||
else:
|
||||
extrafeatures = []
|
||||
|
||||
if extra_features:
|
||||
extrafeatures += extra_features
|
||||
|
||||
if not config_params:
|
||||
config_params = TinfoilConfigParameters(config_only=config_only, quiet=quiet)
|
||||
|
||||
cookerconfig = CookerConfiguration()
|
||||
cookerconfig.setConfigParameters(config_params)
|
||||
|
||||
if not config_only:
|
||||
# Disable local loggers because the UI module is going to set up its own
|
||||
for handler in self.localhandlers:
|
||||
self.logger.handlers.remove(handler)
|
||||
self.localhandlers = []
|
||||
|
||||
self.server_connection, ui_module = setup_bitbake(config_params,
|
||||
cookerconfig,
|
||||
extrafeatures)
|
||||
|
||||
self.ui_module = ui_module
|
||||
|
||||
# Ensure the path to bitbake's bin directory is in PATH so that things like
|
||||
# bitbake-worker can be run (usually this is the case, but it doesn't have to be)
|
||||
path = os.getenv('PATH').split(':')
|
||||
bitbakebinpath = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'bin'))
|
||||
for entry in path:
|
||||
if entry.endswith(os.sep):
|
||||
entry = entry[:-1]
|
||||
if os.path.abspath(entry) == bitbakebinpath:
|
||||
break
|
||||
else:
|
||||
path.insert(0, bitbakebinpath)
|
||||
os.environ['PATH'] = ':'.join(path)
|
||||
|
||||
if self.server_connection:
|
||||
_server_connections.append(self.server_connection)
|
||||
if config_only:
|
||||
config_params.updateToServer(self.server_connection.connection, os.environ.copy())
|
||||
self.run_command('parseConfiguration')
|
||||
else:
|
||||
self.run_actions(config_params)
|
||||
self.recipes_parsed = True
|
||||
|
||||
self.config_data = bb.data.init()
|
||||
connector = TinfoilDataStoreConnector(self, None)
|
||||
self.config_data.setVar('_remote_data', connector)
|
||||
self.cooker = TinfoilCookerAdapter(self)
|
||||
self.cooker_data = self.cooker.recipecaches['']
|
||||
else:
|
||||
raise Exception('Failed to start bitbake server')
|
||||
|
||||
def run_actions(self, config_params):
|
||||
"""
|
||||
Run the actions specified in config_params through the UI.
|
||||
"""
|
||||
ret = self.ui_module.main(self.server_connection.connection, self.server_connection.events, config_params)
|
||||
if ret:
|
||||
raise TinfoilUIException(ret)
|
||||
|
||||
def parseRecipes(self):
|
||||
"""
|
||||
Legacy function - use parse_recipes() instead.
|
||||
"""
|
||||
self.parse_recipes()
|
||||
sys.stderr.write("Parsing recipes..")
|
||||
self.logger.setLevel(logging.WARNING)
|
||||
|
||||
def parse_recipes(self):
|
||||
"""
|
||||
Load information on all recipes. Normally you should specify
|
||||
config_only=False when calling prepare() instead of using this
|
||||
function; this function is designed for situations where you need
|
||||
to initialise Tinfoil and use it with config_only=True first and
|
||||
then conditionally call this function to parse recipes later.
|
||||
"""
|
||||
config_params = TinfoilConfigParameters(config_only=False)
|
||||
self.run_actions(config_params)
|
||||
self.recipes_parsed = True
|
||||
try:
|
||||
while self.cooker.state in (state.initial, state.parsing):
|
||||
self.cooker.updateCache()
|
||||
except KeyboardInterrupt:
|
||||
self.cooker.shutdown()
|
||||
self.cooker.updateCache()
|
||||
sys.exit(2)
|
||||
|
||||
def run_command(self, command, *params):
|
||||
"""
|
||||
Run a command on the server (as implemented in bb.command).
|
||||
Note that there are two types of command - synchronous and
|
||||
asynchronous; in order to receive the results of asynchronous
|
||||
commands you will need to set an appropriate event mask
|
||||
using set_event_mask() and listen for the result using
|
||||
wait_event() - with the correct event mask you'll at least get
|
||||
bb.command.CommandCompleted and possibly other events before
|
||||
that depending on the command.
|
||||
"""
|
||||
if not self.server_connection:
|
||||
raise Exception('Not connected to server (did you call .prepare()?)')
|
||||
self.logger.setLevel(logging.INFO)
|
||||
sys.stderr.write("done.\n")
|
||||
|
||||
commandline = [command]
|
||||
if params:
|
||||
commandline.extend(params)
|
||||
result = self.server_connection.connection.runCommand(commandline)
|
||||
if result[1]:
|
||||
raise TinfoilCommandFailed(result[1])
|
||||
return result[0]
|
||||
self.cooker_data = self.cooker.recipecaches['']
|
||||
|
||||
def set_event_mask(self, eventlist):
|
||||
"""Set the event mask which will be applied within wait_event()"""
|
||||
if not self.server_connection:
|
||||
raise Exception('Not connected to server (did you call .prepare()?)')
|
||||
llevel, debug_domains = bb.msg.constructLogOptions()
|
||||
ret = self.run_command('setEventMask', self.server_connection.connection.getEventHandle(), llevel, debug_domains, eventlist)
|
||||
if not ret:
|
||||
raise Exception('setEventMask failed')
|
||||
|
||||
def wait_event(self, timeout=0):
|
||||
"""
|
||||
Wait for an event from the server for the specified time.
|
||||
A timeout of 0 means don't wait if there are no events in the queue.
|
||||
Returns the next event in the queue or None if the timeout was
|
||||
reached. Note that in order to recieve any events you will
|
||||
first need to set the internal event mask using set_event_mask()
|
||||
(otherwise whatever event mask the UI set up will be in effect).
|
||||
"""
|
||||
if not self.server_connection:
|
||||
raise Exception('Not connected to server (did you call .prepare()?)')
|
||||
return self.server_connection.events.waitEvent(timeout)
|
||||
|
||||
def get_overlayed_recipes(self):
|
||||
"""
|
||||
Find recipes which are overlayed (i.e. where recipes exist in multiple layers)
|
||||
"""
|
||||
return defaultdict(list, self.run_command('getOverlayedRecipes'))
|
||||
|
||||
def get_skipped_recipes(self):
|
||||
"""
|
||||
Find recipes which were skipped (i.e. SkipRecipe was raised
|
||||
during parsing).
|
||||
"""
|
||||
return OrderedDict(self.run_command('getSkippedRecipes'))
|
||||
|
||||
def get_all_providers(self):
|
||||
return defaultdict(list, self.run_command('allProviders'))
|
||||
|
||||
def find_providers(self):
|
||||
return self.run_command('findProviders')
|
||||
|
||||
def find_best_provider(self, pn):
|
||||
return self.run_command('findBestProvider', pn)
|
||||
|
||||
def get_runtime_providers(self, rdep):
|
||||
return self.run_command('getRuntimeProviders', rdep)
|
||||
|
||||
def get_recipe_file(self, pn):
|
||||
"""
|
||||
Get the file name for the specified recipe/target. Raises
|
||||
bb.providers.NoProvider if there is no match or the recipe was
|
||||
skipped.
|
||||
"""
|
||||
best = self.find_best_provider(pn)
|
||||
if not best or (len(best) > 3 and not best[3]):
|
||||
skiplist = self.get_skipped_recipes()
|
||||
taskdata = bb.taskdata.TaskData(None, skiplist=skiplist)
|
||||
skipreasons = taskdata.get_reasons(pn)
|
||||
if skipreasons:
|
||||
raise bb.providers.NoProvider('%s is unavailable:\n %s' % (pn, ' \n'.join(skipreasons)))
|
||||
def prepare(self, config_only = False):
|
||||
if not self.cooker_data:
|
||||
if config_only:
|
||||
self.cooker.parseConfiguration()
|
||||
self.cooker_data = self.cooker.recipecaches['']
|
||||
else:
|
||||
raise bb.providers.NoProvider('Unable to find any recipe file matching "%s"' % pn)
|
||||
return best[3]
|
||||
|
||||
def get_file_appends(self, fn):
|
||||
"""
|
||||
Find the bbappends for a recipe file
|
||||
"""
|
||||
return self.run_command('getFileAppends', fn)
|
||||
|
||||
def all_recipes(self, mc='', sort=True):
|
||||
"""
|
||||
Enable iterating over all recipes in the current configuration.
|
||||
Returns an iterator over TinfoilRecipeInfo objects created on demand.
|
||||
Parameters:
|
||||
mc: The multiconfig, default of '' uses the main configuration.
|
||||
sort: True to sort recipes alphabetically (default), False otherwise
|
||||
"""
|
||||
recipecache = self.cooker.recipecaches[mc]
|
||||
if sort:
|
||||
recipes = sorted(recipecache.pkg_pn.items())
|
||||
else:
|
||||
recipes = recipecache.pkg_pn.items()
|
||||
for pn, fns in recipes:
|
||||
prov = self.find_best_provider(pn)
|
||||
recipe = TinfoilRecipeInfo(recipecache,
|
||||
self.config_data,
|
||||
pn=pn,
|
||||
fn=prov[3],
|
||||
fns=fns)
|
||||
yield recipe
|
||||
|
||||
def all_recipe_files(self, mc='', variants=True, preferred_only=False):
|
||||
"""
|
||||
Enable iterating over all recipe files in the current configuration.
|
||||
Returns an iterator over file paths.
|
||||
Parameters:
|
||||
mc: The multiconfig, default of '' uses the main configuration.
|
||||
variants: True to include variants of recipes created through
|
||||
BBCLASSEXTEND (default) or False to exclude them
|
||||
preferred_only: True to include only the preferred recipe where
|
||||
multiple exist providing the same PN, False to list
|
||||
all recipes
|
||||
"""
|
||||
recipecache = self.cooker.recipecaches[mc]
|
||||
if preferred_only:
|
||||
files = []
|
||||
for pn in recipecache.pkg_pn.keys():
|
||||
prov = self.find_best_provider(pn)
|
||||
files.append(prov[3])
|
||||
else:
|
||||
files = recipecache.pkg_fn.keys()
|
||||
for fn in sorted(files):
|
||||
if not variants and fn.startswith('virtual:'):
|
||||
continue
|
||||
yield fn
|
||||
|
||||
|
||||
def get_recipe_info(self, pn, mc=''):
|
||||
"""
|
||||
Get information on a specific recipe in the current configuration by name (PN).
|
||||
Returns a TinfoilRecipeInfo object created on demand.
|
||||
Parameters:
|
||||
mc: The multiconfig, default of '' uses the main configuration.
|
||||
"""
|
||||
recipecache = self.cooker.recipecaches[mc]
|
||||
prov = self.find_best_provider(pn)
|
||||
fn = prov[3]
|
||||
if fn:
|
||||
actual_pn = recipecache.pkg_fn[fn]
|
||||
recipe = TinfoilRecipeInfo(recipecache,
|
||||
self.config_data,
|
||||
pn=actual_pn,
|
||||
fn=fn,
|
||||
fns=recipecache.pkg_pn[actual_pn])
|
||||
return recipe
|
||||
else:
|
||||
return None
|
||||
|
||||
def parse_recipe(self, pn):
|
||||
"""
|
||||
Parse the specified recipe and return a datastore object
|
||||
representing the environment for the recipe.
|
||||
"""
|
||||
fn = self.get_recipe_file(pn)
|
||||
return self.parse_recipe_file(fn)
|
||||
self.parseRecipes()
|
||||
|
||||
def parse_recipe_file(self, fn, appends=True, appendlist=None, config_data=None):
|
||||
"""
|
||||
@@ -638,263 +108,43 @@ class Tinfoil:
|
||||
specify config_data then you cannot use a virtual
|
||||
specification for fn.
|
||||
"""
|
||||
if self.tracking:
|
||||
# Enable history tracking just for the parse operation
|
||||
self.run_command('enableDataTracking')
|
||||
try:
|
||||
if appends and appendlist == []:
|
||||
appends = False
|
||||
if config_data:
|
||||
dctr = bb.remotedata.RemoteDatastores.transmit_datastore(config_data)
|
||||
dscon = self.run_command('parseRecipeFile', fn, appends, appendlist, dctr)
|
||||
if appends and appendlist == []:
|
||||
appends = False
|
||||
if appends:
|
||||
if appendlist:
|
||||
appendfiles = appendlist
|
||||
else:
|
||||
dscon = self.run_command('parseRecipeFile', fn, appends, appendlist)
|
||||
if dscon:
|
||||
return self._reconvert_type(dscon, 'DataStoreConnectionHandle')
|
||||
else:
|
||||
return None
|
||||
finally:
|
||||
if self.tracking:
|
||||
self.run_command('disableDataTracking')
|
||||
|
||||
def build_file(self, buildfile, task, internal=True):
|
||||
"""
|
||||
Runs the specified task for just a single recipe (i.e. no dependencies).
|
||||
This is equivalent to bitbake -b, except with the default internal=True
|
||||
no warning about dependencies will be produced, normal info messages
|
||||
from the runqueue will be silenced and BuildInit, BuildStarted and
|
||||
BuildCompleted events will not be fired.
|
||||
"""
|
||||
return self.run_command('buildFile', buildfile, task, internal)
|
||||
|
||||
def build_targets(self, targets, task=None, handle_events=True, extra_events=None, event_callback=None):
|
||||
"""
|
||||
Builds the specified targets. This is equivalent to a normal invocation
|
||||
of bitbake. Has built-in event handling which is enabled by default and
|
||||
can be extended if needed.
|
||||
Parameters:
|
||||
targets:
|
||||
One or more targets to build. Can be a list or a
|
||||
space-separated string.
|
||||
task:
|
||||
The task to run; if None then the value of BB_DEFAULT_TASK
|
||||
will be used. Default None.
|
||||
handle_events:
|
||||
True to handle events in a similar way to normal bitbake
|
||||
invocation with knotty; False to return immediately (on the
|
||||
assumption that the caller will handle the events instead).
|
||||
Default True.
|
||||
extra_events:
|
||||
An optional list of events to add to the event mask (if
|
||||
handle_events=True). If you add events here you also need
|
||||
to specify a callback function in event_callback that will
|
||||
handle the additional events. Default None.
|
||||
event_callback:
|
||||
An optional function taking a single parameter which
|
||||
will be called first upon receiving any event (if
|
||||
handle_events=True) so that the caller can override or
|
||||
extend the event handling. Default None.
|
||||
"""
|
||||
if isinstance(targets, str):
|
||||
targets = targets.split()
|
||||
if not task:
|
||||
task = self.config_data.getVar('BB_DEFAULT_TASK')
|
||||
|
||||
if handle_events:
|
||||
# A reasonable set of default events matching up with those we handle below
|
||||
eventmask = [
|
||||
'bb.event.BuildStarted',
|
||||
'bb.event.BuildCompleted',
|
||||
'logging.LogRecord',
|
||||
'bb.event.NoProvider',
|
||||
'bb.command.CommandCompleted',
|
||||
'bb.command.CommandFailed',
|
||||
'bb.build.TaskStarted',
|
||||
'bb.build.TaskFailed',
|
||||
'bb.build.TaskSucceeded',
|
||||
'bb.build.TaskFailedSilent',
|
||||
'bb.build.TaskProgress',
|
||||
'bb.runqueue.runQueueTaskStarted',
|
||||
'bb.runqueue.sceneQueueTaskStarted',
|
||||
'bb.event.ProcessStarted',
|
||||
'bb.event.ProcessProgress',
|
||||
'bb.event.ProcessFinished',
|
||||
]
|
||||
if extra_events:
|
||||
eventmask.extend(extra_events)
|
||||
ret = self.set_event_mask(eventmask)
|
||||
|
||||
includelogs = self.config_data.getVar('BBINCLUDELOGS')
|
||||
loglines = self.config_data.getVar('BBINCLUDELOGS_LINES')
|
||||
|
||||
ret = self.run_command('buildTargets', targets, task)
|
||||
if handle_events:
|
||||
result = False
|
||||
# Borrowed from knotty, instead somewhat hackily we use the helper
|
||||
# as the object to store "shutdown" on
|
||||
helper = bb.ui.uihelper.BBUIHelper()
|
||||
# We set up logging optionally in the constructor so now we need to
|
||||
# grab the handlers to pass to TerminalFilter
|
||||
console = None
|
||||
errconsole = None
|
||||
for handler in self.logger.handlers:
|
||||
if isinstance(handler, logging.StreamHandler):
|
||||
if handler.stream == sys.stdout:
|
||||
console = handler
|
||||
elif handler.stream == sys.stderr:
|
||||
errconsole = handler
|
||||
format_str = "%(levelname)s: %(message)s"
|
||||
format = bb.msg.BBLogFormatter(format_str)
|
||||
helper.shutdown = 0
|
||||
parseprogress = None
|
||||
termfilter = bb.ui.knotty.TerminalFilter(helper, helper, console, errconsole, format, quiet=self.quiet)
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
event = self.wait_event(0.25)
|
||||
if event:
|
||||
if event_callback and event_callback(event):
|
||||
continue
|
||||
if helper.eventHandler(event):
|
||||
if isinstance(event, bb.build.TaskFailedSilent):
|
||||
logger.warning("Logfile for failed setscene task is %s" % event.logfile)
|
||||
elif isinstance(event, bb.build.TaskFailed):
|
||||
bb.ui.knotty.print_event_log(event, includelogs, loglines, termfilter)
|
||||
continue
|
||||
if isinstance(event, bb.event.ProcessStarted):
|
||||
if self.quiet > 1:
|
||||
continue
|
||||
parseprogress = bb.ui.knotty.new_progress(event.processname, event.total)
|
||||
parseprogress.start(False)
|
||||
continue
|
||||
if isinstance(event, bb.event.ProcessProgress):
|
||||
if self.quiet > 1:
|
||||
continue
|
||||
if parseprogress:
|
||||
parseprogress.update(event.progress)
|
||||
else:
|
||||
bb.warn("Got ProcessProgress event for someting that never started?")
|
||||
continue
|
||||
if isinstance(event, bb.event.ProcessFinished):
|
||||
if self.quiet > 1:
|
||||
continue
|
||||
if parseprogress:
|
||||
parseprogress.finish()
|
||||
parseprogress = None
|
||||
continue
|
||||
if isinstance(event, bb.command.CommandCompleted):
|
||||
result = True
|
||||
break
|
||||
if isinstance(event, bb.command.CommandFailed):
|
||||
self.logger.error(str(event))
|
||||
result = False
|
||||
break
|
||||
if isinstance(event, logging.LogRecord):
|
||||
if event.taskpid == 0 or event.levelno > logging.INFO:
|
||||
self.logger.handle(event)
|
||||
continue
|
||||
if isinstance(event, bb.event.NoProvider):
|
||||
self.logger.error(str(event))
|
||||
result = False
|
||||
break
|
||||
|
||||
elif helper.shutdown > 1:
|
||||
break
|
||||
termfilter.updateFooter()
|
||||
except KeyboardInterrupt:
|
||||
termfilter.clearFooter()
|
||||
if helper.shutdown == 1:
|
||||
print("\nSecond Keyboard Interrupt, stopping...\n")
|
||||
ret = self.run_command("stateForceShutdown")
|
||||
if ret and ret[2]:
|
||||
self.logger.error("Unable to cleanly stop: %s" % ret[2])
|
||||
elif helper.shutdown == 0:
|
||||
print("\nKeyboard Interrupt, closing down...\n")
|
||||
interrupted = True
|
||||
ret = self.run_command("stateShutdown")
|
||||
if ret and ret[2]:
|
||||
self.logger.error("Unable to cleanly shutdown: %s" % ret[2])
|
||||
helper.shutdown = helper.shutdown + 1
|
||||
termfilter.clearFooter()
|
||||
finally:
|
||||
termfilter.finish()
|
||||
if helper.failed_tasks:
|
||||
result = False
|
||||
return result
|
||||
if not hasattr(self.cooker, 'collection'):
|
||||
raise Exception('You must call tinfoil.prepare() with config_only=False in order to get bbappends')
|
||||
appendfiles = self.cooker.collection.get_file_appends(fn)
|
||||
else:
|
||||
return ret
|
||||
appendfiles = None
|
||||
if config_data:
|
||||
# We have to use a different function here if we're passing in a datastore
|
||||
localdata = bb.data.createCopy(config_data)
|
||||
envdata = bb.cache.parse_recipe(localdata, fn, appendfiles)['']
|
||||
else:
|
||||
# Use the standard path
|
||||
parser = bb.cache.NoCache(self.cooker.databuilder)
|
||||
envdata = parser.loadDataFull(fn, appendfiles)
|
||||
return envdata
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Shut down tinfoil. Disconnects from the server and gracefully
|
||||
releases any associated resources. You must call this function if
|
||||
prepare() has been called, or use a with... block when you create
|
||||
the tinfoil object which will ensure that it gets called.
|
||||
"""
|
||||
if self.server_connection:
|
||||
self.run_command('clientComplete')
|
||||
_server_connections.remove(self.server_connection)
|
||||
bb.event.ui_queue = []
|
||||
self.server_connection.terminate()
|
||||
self.server_connection = None
|
||||
self.cooker.shutdown(force=True)
|
||||
self.cooker.post_serve()
|
||||
self.cooker.unlockBitbake()
|
||||
self.logger.removeHandler(self._log_hdlr)
|
||||
|
||||
# Restore logging handlers to how it looked when we started
|
||||
if self.oldhandlers:
|
||||
for handler in self.logger.handlers:
|
||||
if handler not in self.oldhandlers:
|
||||
self.logger.handlers.remove(handler)
|
||||
class TinfoilConfigParameters(ConfigParameters):
|
||||
|
||||
def _reconvert_type(self, obj, origtypename):
|
||||
"""
|
||||
Convert an object back to the right type, in the case
|
||||
that marshalling has changed it (especially with xmlrpc)
|
||||
"""
|
||||
supported_types = {
|
||||
'set': set,
|
||||
'DataStoreConnectionHandle': bb.command.DataStoreConnectionHandle,
|
||||
}
|
||||
|
||||
origtype = supported_types.get(origtypename, None)
|
||||
if origtype is None:
|
||||
raise Exception('Unsupported type "%s"' % origtypename)
|
||||
if type(obj) == origtype:
|
||||
newobj = obj
|
||||
elif isinstance(obj, dict):
|
||||
# New style class
|
||||
newobj = origtype()
|
||||
for k,v in obj.items():
|
||||
setattr(newobj, k, v)
|
||||
else:
|
||||
# Assume we can coerce the type
|
||||
newobj = origtype(obj)
|
||||
|
||||
if isinstance(newobj, bb.command.DataStoreConnectionHandle):
|
||||
connector = TinfoilDataStoreConnector(self, newobj.dsindex)
|
||||
newobj = bb.data.init()
|
||||
newobj.setVar('_remote_data', connector)
|
||||
|
||||
return newobj
|
||||
|
||||
|
||||
class TinfoilConfigParameters(BitBakeConfigParameters):
|
||||
|
||||
def __init__(self, config_only, **options):
|
||||
def __init__(self, **options):
|
||||
self.initial_options = options
|
||||
# Apply some sane defaults
|
||||
if not 'parse_only' in options:
|
||||
self.initial_options['parse_only'] = not config_only
|
||||
#if not 'status_only' in options:
|
||||
# self.initial_options['status_only'] = config_only
|
||||
if not 'ui' in options:
|
||||
self.initial_options['ui'] = 'knotty'
|
||||
if not 'argv' in options:
|
||||
self.initial_options['argv'] = []
|
||||
|
||||
super(TinfoilConfigParameters, self).__init__()
|
||||
|
||||
def parseCommandLine(self, argv=None):
|
||||
# We don't want any parameters parsed from the command line
|
||||
opts = super(TinfoilConfigParameters, self).parseCommandLine([])
|
||||
for key, val in self.initial_options.items():
|
||||
setattr(opts[0], key, val)
|
||||
return opts
|
||||
def parseCommandLine(self, argv=sys.argv):
|
||||
class DummyOptions:
|
||||
def __init__(self, initial_options):
|
||||
for key, val in initial_options.items():
|
||||
setattr(self, key, val)
|
||||
|
||||
return DummyOptions(self.initial_options), None
|
||||
|
||||
@@ -42,12 +42,10 @@ from orm.models import Variable, VariableHistory
|
||||
from orm.models import Package, Package_File, Target_Installed_Package, Target_File
|
||||
from orm.models import Task_Dependency, Package_Dependency
|
||||
from orm.models import Recipe_Dependency, Provides
|
||||
from orm.models import Project, CustomImagePackage
|
||||
from orm.models import Project, CustomImagePackage, CustomImageRecipe
|
||||
from orm.models import signal_runbuilds
|
||||
|
||||
from bldcontrol.models import BuildEnvironment, BuildRequest
|
||||
from bldcontrol.models import BRLayer
|
||||
from bldcontrol import bbcontroller
|
||||
|
||||
from bb.msg import BBLogFormatter as formatter
|
||||
from django.db import models
|
||||
@@ -363,6 +361,11 @@ class ORMWrapper(object):
|
||||
|
||||
def get_update_layer_version_object(self, build_obj, layer_obj, layer_version_information):
|
||||
if isinstance(layer_obj, Layer_Version):
|
||||
# Special case the toaster-custom-images layer which is created
|
||||
# on the fly so don't update the values which may cause the layer
|
||||
# to be duplicated on a future get_or_create
|
||||
if layer_obj.layer.name == CustomImageRecipe.LAYER_NAME:
|
||||
return layer_obj
|
||||
# We already found our layer version for this build so just
|
||||
# update it with the new build information
|
||||
logger.debug("We found our layer from toaster")
|
||||
@@ -381,8 +384,8 @@ class ORMWrapper(object):
|
||||
local_path=layer_version_information['local_path'],
|
||||
)
|
||||
|
||||
logger.debug("Created new layer version %s for build history",
|
||||
layer_copy.layer.name)
|
||||
logger.info("created new historical layer version %d",
|
||||
layer_copy.pk)
|
||||
|
||||
self.layer_version_built.append(layer_copy)
|
||||
|
||||
@@ -438,33 +441,48 @@ class ORMWrapper(object):
|
||||
else:
|
||||
br_id, be_id = brbe.split(":")
|
||||
|
||||
# Find the layer version by matching the layer event information
|
||||
# against the metadata we have in Toaster
|
||||
# find layer by checkout path;
|
||||
from bldcontrol import bbcontroller
|
||||
bc = bbcontroller.getBuildEnvironmentController(pk = be_id)
|
||||
|
||||
try:
|
||||
br_layer = BRLayer.objects.get(req=br_id,
|
||||
name=layer_information['name'])
|
||||
return br_layer.layer_version
|
||||
except (BRLayer.MultipleObjectsReturned, BRLayer.DoesNotExist):
|
||||
# There are multiple of the same layer name or the name
|
||||
# hasn't been determined by the toaster.bbclass layer
|
||||
# so let's filter by the local_path
|
||||
bc = bbcontroller.getBuildEnvironmentController(pk=be_id)
|
||||
for br_layer in BRLayer.objects.filter(req=br_id):
|
||||
if br_layer.giturl and \
|
||||
layer_information['local_path'].endswith(
|
||||
bc.getGitCloneDirectory(br_layer.giturl,
|
||||
br_layer.commit)):
|
||||
return br_layer.layer_version
|
||||
# we might have a race condition here, as the project layers may change between the build trigger and the actual build execution
|
||||
# but we can only match on the layer name, so the worst thing can happen is a mis-identification of the layer, not a total failure
|
||||
|
||||
if br_layer.local_source_dir == \
|
||||
layer_information['local_path']:
|
||||
return br_layer.layer_version
|
||||
# note that this is different
|
||||
buildrequest = BuildRequest.objects.get(pk = br_id)
|
||||
for brl in buildrequest.brlayer_set.all():
|
||||
if brl.local_source_dir:
|
||||
localdirname = os.path.join(brl.local_source_dir,
|
||||
brl.dirpath)
|
||||
else:
|
||||
localdirname = os.path.join(bc.getGitCloneDirectory(brl.giturl, brl.commit), brl.dirpath)
|
||||
# we get a relative path, unless running in HEAD mode where the path is absolute
|
||||
if not localdirname.startswith("/"):
|
||||
localdirname = os.path.join(bc.be.sourcedir, localdirname)
|
||||
#logger.debug(1, "Localdirname %s lcal_path %s" % (localdirname, layer_information['local_path']))
|
||||
if localdirname.startswith(layer_information['local_path']):
|
||||
# If the build request came from toaster this field
|
||||
# should contain the information from the layer_version
|
||||
# That created this build request.
|
||||
if brl.layer_version:
|
||||
return brl.layer_version
|
||||
|
||||
# This might be a local layer (i.e. no git info) so try
|
||||
# matching local_source_dir
|
||||
if brl.local_source_dir and brl.local_source_dir == layer_information["local_path"]:
|
||||
return brl.layer_version
|
||||
|
||||
# we matched the BRLayer, but we need the layer_version that generated this BR; reverse of the Project.schedule_build()
|
||||
#logger.debug(1, "Matched %s to BRlayer %s" % (pformat(layer_information["local_path"]), localdirname))
|
||||
|
||||
for pl in buildrequest.project.projectlayer_set.filter(layercommit__layer__name = brl.name):
|
||||
if pl.layercommit.layer.vcs_url == brl.giturl :
|
||||
layer = pl.layercommit.layer
|
||||
layer.save()
|
||||
return layer
|
||||
|
||||
raise NotExisting("Unidentified layer %s" % pformat(layer_information))
|
||||
|
||||
# We've reached the end of our search and couldn't find the layer
|
||||
# we can continue but some data may be missing
|
||||
raise NotExisting("Unidentified layer %s" %
|
||||
pformat(layer_information))
|
||||
|
||||
def save_target_file_information(self, build_obj, target_obj, filedata):
|
||||
assert isinstance(build_obj, Build)
|
||||
@@ -719,11 +737,7 @@ class ORMWrapper(object):
|
||||
|
||||
def save_build_package_information(self, build_obj, package_info, recipes,
|
||||
built_package):
|
||||
# assert isinstance(build_obj, Build)
|
||||
|
||||
if not 'PN' in package_info.keys():
|
||||
# no package data to save (e.g. 'OPKGN'="lib64-*"|"lib32-*")
|
||||
return None
|
||||
# assert isinstance(build_obj, Build)
|
||||
|
||||
# create and save the object
|
||||
pname = package_info['PKG']
|
||||
@@ -862,12 +876,6 @@ class MockEvent(object):
|
||||
self.pathname = None
|
||||
self.lineno = None
|
||||
|
||||
def getMessage(self):
|
||||
"""
|
||||
Simulate LogRecord message return
|
||||
"""
|
||||
return self.msg
|
||||
|
||||
|
||||
class BuildInfoHelper(object):
|
||||
""" This class gathers the build information from the server and sends it
|
||||
@@ -975,10 +983,9 @@ class BuildInfoHelper(object):
|
||||
return task_information
|
||||
|
||||
def _get_layer_version_for_dependency(self, pathRE):
|
||||
""" Returns the layer in the toaster db that has a full regex
|
||||
match to the pathRE. pathRE - the layer path passed as a regex in the
|
||||
event. It is created in cooker.py as a collection for the layer
|
||||
priorities.
|
||||
""" Returns the layer in the toaster db that has a full regex match to the pathRE.
|
||||
pathRE - the layer path passed as a regex in the event. It is created in
|
||||
cooker.py as a collection for the layer priorities.
|
||||
"""
|
||||
self._ensure_build()
|
||||
|
||||
@@ -986,31 +993,19 @@ class BuildInfoHelper(object):
|
||||
assert isinstance(layer_version, Layer_Version)
|
||||
return len(layer_version.local_path)
|
||||
|
||||
# Our paths don't append a trailing slash
|
||||
if pathRE.endswith("/"):
|
||||
pathRE = pathRE[:-1]
|
||||
|
||||
p = re.compile(pathRE)
|
||||
path=re.sub(r'[$^]',r'',pathRE)
|
||||
# Heuristics: we always match recipe to the deepest layer path in
|
||||
# the discovered layers
|
||||
for lvo in sorted(self.orm_wrapper.layer_version_objects,
|
||||
reverse=True, key=_sort_longest_path):
|
||||
if p.fullmatch(os.path.abspath(lvo.local_path)):
|
||||
# we don't care if we match the trailing slashes
|
||||
p = re.compile(re.sub("/[^/]*?$","",pathRE))
|
||||
# Heuristics: we always match recipe to the deepest layer path in the discovered layers
|
||||
for lvo in sorted(self.orm_wrapper.layer_version_objects, reverse=True, key=_sort_longest_path):
|
||||
if p.fullmatch(lvo.local_path):
|
||||
return lvo
|
||||
if lvo.layer.local_source_dir:
|
||||
if p.fullmatch(os.path.abspath(lvo.layer.local_source_dir)):
|
||||
if p.fullmatch(lvo.layer.local_source_dir):
|
||||
return lvo
|
||||
if 0 == path.find(lvo.local_path):
|
||||
# sub-layer path inside existing layer
|
||||
return lvo
|
||||
#if we get here, we didn't read layers correctly; dump whatever information we have on the error log
|
||||
logger.warning("Could not match layer dependency for path %s : %s", path, self.orm_wrapper.layer_version_objects)
|
||||
|
||||
|
||||
# if we get here, we didn't read layers correctly;
|
||||
# dump whatever information we have on the error log
|
||||
logger.warning("Could not match layer dependency for path %s : %s",
|
||||
pathRE,
|
||||
self.orm_wrapper.layer_version_objects)
|
||||
return None
|
||||
|
||||
def _get_layer_version_for_path(self, path):
|
||||
self._ensure_build()
|
||||
@@ -1273,14 +1268,6 @@ class BuildInfoHelper(object):
|
||||
candidates = [x for x in self.internal_state['taskdata'].keys() if x.endswith(identifier)]
|
||||
if len(candidates) == 1:
|
||||
identifier = candidates[0]
|
||||
elif len(candidates) > 1 and hasattr(event,'_package'):
|
||||
if 'native-' in event._package:
|
||||
identifier = 'native:' + identifier
|
||||
if 'nativesdk-' in event._package:
|
||||
identifier = 'nativesdk:' + identifier
|
||||
candidates = [x for x in self.internal_state['taskdata'].keys() if x.endswith(identifier)]
|
||||
if len(candidates) == 1:
|
||||
identifier = candidates[0]
|
||||
|
||||
assert identifier in self.internal_state['taskdata']
|
||||
identifierlist = identifier.split(":")
|
||||
@@ -1411,9 +1398,9 @@ class BuildInfoHelper(object):
|
||||
for lv in event._depgraph['layer-priorities']:
|
||||
(_, path, _, priority) = lv
|
||||
layer_version_obj = self._get_layer_version_for_dependency(path)
|
||||
if layer_version_obj:
|
||||
layer_version_obj.priority = priority
|
||||
layer_version_obj.save()
|
||||
assert layer_version_obj is not None
|
||||
layer_version_obj.priority = priority
|
||||
layer_version_obj.save()
|
||||
|
||||
# save recipe information
|
||||
self.internal_state['recipes'] = {}
|
||||
@@ -1603,14 +1590,14 @@ class BuildInfoHelper(object):
|
||||
mockevent.lineno = -1
|
||||
self.store_log_event(mockevent)
|
||||
|
||||
def store_log_event(self, event,cli_backlog=True):
|
||||
def store_log_event(self, event):
|
||||
self._ensure_build()
|
||||
|
||||
if event.levelno < formatter.WARNING:
|
||||
return
|
||||
|
||||
# early return for CLI builds
|
||||
if cli_backlog and self.brbe is None:
|
||||
if self.brbe is None:
|
||||
if not 'backlog' in self.internal_state:
|
||||
self.internal_state['backlog'] = []
|
||||
self.internal_state['backlog'].append(event)
|
||||
@@ -1622,7 +1609,7 @@ class BuildInfoHelper(object):
|
||||
tempevent = self.internal_state['backlog'].pop()
|
||||
logger.debug(1, "buildinfohelper: Saving stored event %s "
|
||||
% tempevent)
|
||||
self.store_log_event(tempevent,cli_backlog)
|
||||
self.store_log_event(tempevent)
|
||||
else:
|
||||
logger.info("buildinfohelper: All events saved")
|
||||
del self.internal_state['backlog']
|
||||
@@ -1678,36 +1665,6 @@ class BuildInfoHelper(object):
|
||||
break
|
||||
return endswith
|
||||
|
||||
def scan_task_artifacts(self, event):
|
||||
"""
|
||||
The 'TaskArtifacts' event passes the manifest file content for the
|
||||
tasks 'do_deploy', 'do_image_complete', 'do_populate_sdk', and
|
||||
'do_populate_sdk_ext'. The first two will be implemented later.
|
||||
"""
|
||||
task_vars = BuildInfoHelper._get_data_from_event(event)
|
||||
task_name = task_vars['task'][task_vars['task'].find(':')+1:]
|
||||
task_artifacts = task_vars['artifacts']
|
||||
|
||||
if task_name in ['do_populate_sdk', 'do_populate_sdk_ext']:
|
||||
targets = [target for target in self.internal_state['targets'] \
|
||||
if target.task == task_name[3:]]
|
||||
if not targets:
|
||||
logger.warning("scan_task_artifacts: SDK targets not found: %s\n", task_name)
|
||||
return
|
||||
for artifact_path in task_artifacts:
|
||||
if not os.path.isfile(artifact_path):
|
||||
logger.warning("scan_task_artifacts: artifact file not found: %s\n", artifact_path)
|
||||
continue
|
||||
for target in targets:
|
||||
# don't record the file if it's already been added
|
||||
# to this target
|
||||
matching_files = TargetSDKFile.objects.filter(
|
||||
target=target, file_name=artifact_path)
|
||||
if matching_files.count() == 0:
|
||||
artifact_size = os.stat(artifact_path).st_size
|
||||
self.orm_wrapper.save_target_sdk_file(
|
||||
target, artifact_path, artifact_size)
|
||||
|
||||
def _get_image_files(self, deploy_dir_image, image_name, image_file_extensions):
|
||||
"""
|
||||
Find files in deploy_dir_image whose basename starts with the
|
||||
@@ -1987,8 +1944,7 @@ class BuildInfoHelper(object):
|
||||
if 'backlog' in self.internal_state:
|
||||
# we save missed events in the database for the current build
|
||||
tempevent = self.internal_state['backlog'].pop()
|
||||
# Do not skip command line build events
|
||||
self.store_log_event(tempevent,False)
|
||||
self.store_log_event(tempevent)
|
||||
|
||||
if not connection.features.autocommits_when_autocommit_is_off:
|
||||
transaction.set_autocommit(True)
|
||||
|
||||
358
bitbake/lib/bb/ui/depexp.py
Normal file
358
bitbake/lib/bb/ui/depexp.py
Normal file
@@ -0,0 +1,358 @@
|
||||
#
|
||||
# BitBake Graphical GTK based Dependency Explorer
|
||||
#
|
||||
# Copyright (C) 2007 Ross Burton
|
||||
# Copyright (C) 2007 - 2008 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import sys
|
||||
import gi
|
||||
gi.require_version('Gtk', '3.0')
|
||||
from gi.repository import Gtk, Gdk, GObject
|
||||
from multiprocessing import Queue
|
||||
import threading
|
||||
from xmlrpc import client
|
||||
import time
|
||||
import bb
|
||||
import bb.event
|
||||
|
||||
# Package Model
|
||||
(COL_PKG_NAME) = (0)
|
||||
|
||||
# Dependency Model
|
||||
(TYPE_DEP, TYPE_RDEP) = (0, 1)
|
||||
(COL_DEP_TYPE, COL_DEP_PARENT, COL_DEP_PACKAGE) = (0, 1, 2)
|
||||
|
||||
|
||||
class PackageDepView(Gtk.TreeView):
|
||||
def __init__(self, model, dep_type, label):
|
||||
Gtk.TreeView.__init__(self)
|
||||
self.current = None
|
||||
self.dep_type = dep_type
|
||||
self.filter_model = model.filter_new()
|
||||
self.filter_model.set_visible_func(self._filter, data=None)
|
||||
self.set_model(self.filter_model)
|
||||
self.append_column(Gtk.TreeViewColumn(label, Gtk.CellRendererText(), text=COL_DEP_PACKAGE))
|
||||
|
||||
def _filter(self, model, iter, data):
|
||||
this_type = model[iter][COL_DEP_TYPE]
|
||||
package = model[iter][COL_DEP_PARENT]
|
||||
if this_type != self.dep_type: return False
|
||||
return package == self.current
|
||||
|
||||
def set_current_package(self, package):
|
||||
self.current = package
|
||||
self.filter_model.refilter()
|
||||
|
||||
|
||||
class PackageReverseDepView(Gtk.TreeView):
|
||||
def __init__(self, model, label):
|
||||
Gtk.TreeView.__init__(self)
|
||||
self.current = None
|
||||
self.filter_model = model.filter_new()
|
||||
self.filter_model.set_visible_func(self._filter)
|
||||
self.set_model(self.filter_model)
|
||||
self.append_column(Gtk.TreeViewColumn(label, Gtk.CellRendererText(), text=COL_DEP_PARENT))
|
||||
|
||||
def _filter(self, model, iter, data):
|
||||
package = model[iter][COL_DEP_PACKAGE]
|
||||
return package == self.current
|
||||
|
||||
def set_current_package(self, package):
|
||||
self.current = package
|
||||
self.filter_model.refilter()
|
||||
|
||||
|
||||
class DepExplorer(Gtk.Window):
|
||||
def __init__(self):
|
||||
Gtk.Window.__init__(self)
|
||||
self.set_title("Dependency Explorer")
|
||||
self.set_default_size(500, 500)
|
||||
self.connect("delete-event", Gtk.main_quit)
|
||||
|
||||
# Create the data models
|
||||
self.pkg_model = Gtk.ListStore(GObject.TYPE_STRING)
|
||||
self.pkg_model.set_sort_column_id(COL_PKG_NAME, Gtk.SortType.ASCENDING)
|
||||
self.depends_model = Gtk.ListStore(GObject.TYPE_INT, GObject.TYPE_STRING, GObject.TYPE_STRING)
|
||||
self.depends_model.set_sort_column_id(COL_DEP_PACKAGE, Gtk.SortType.ASCENDING)
|
||||
|
||||
pane = Gtk.HPaned()
|
||||
pane.set_position(250)
|
||||
self.add(pane)
|
||||
|
||||
# The master list of packages
|
||||
scrolled = Gtk.ScrolledWindow()
|
||||
scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
|
||||
scrolled.set_shadow_type(Gtk.ShadowType.IN)
|
||||
|
||||
self.pkg_treeview = Gtk.TreeView(self.pkg_model)
|
||||
self.pkg_treeview.get_selection().connect("changed", self.on_cursor_changed)
|
||||
column = Gtk.TreeViewColumn("Package", Gtk.CellRendererText(), text=COL_PKG_NAME)
|
||||
self.pkg_treeview.append_column(column)
|
||||
pane.add1(scrolled)
|
||||
scrolled.add(self.pkg_treeview)
|
||||
|
||||
box = Gtk.VBox(homogeneous=True, spacing=4)
|
||||
|
||||
# Runtime Depends
|
||||
scrolled = Gtk.ScrolledWindow()
|
||||
scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
|
||||
scrolled.set_shadow_type(Gtk.ShadowType.IN)
|
||||
self.rdep_treeview = PackageDepView(self.depends_model, TYPE_RDEP, "Runtime Depends")
|
||||
self.rdep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE)
|
||||
scrolled.add(self.rdep_treeview)
|
||||
box.add(scrolled)
|
||||
|
||||
# Build Depends
|
||||
scrolled = Gtk.ScrolledWindow()
|
||||
scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
|
||||
scrolled.set_shadow_type(Gtk.ShadowType.IN)
|
||||
self.dep_treeview = PackageDepView(self.depends_model, TYPE_DEP, "Build Depends")
|
||||
self.dep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE)
|
||||
scrolled.add(self.dep_treeview)
|
||||
box.add(scrolled)
|
||||
pane.add2(box)
|
||||
|
||||
# Reverse Depends
|
||||
scrolled = Gtk.ScrolledWindow()
|
||||
scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
|
||||
scrolled.set_shadow_type(Gtk.ShadowType.IN)
|
||||
self.revdep_treeview = PackageReverseDepView(self.depends_model, "Reverse Depends")
|
||||
self.revdep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PARENT)
|
||||
scrolled.add(self.revdep_treeview)
|
||||
box.add(scrolled)
|
||||
pane.add2(box)
|
||||
|
||||
self.show_all()
|
||||
|
||||
def on_package_activated(self, treeview, path, column, data_col):
|
||||
model = treeview.get_model()
|
||||
package = model.get_value(model.get_iter(path), data_col)
|
||||
|
||||
pkg_path = []
|
||||
def finder(model, path, iter, needle):
|
||||
package = model.get_value(iter, COL_PKG_NAME)
|
||||
if package == needle:
|
||||
pkg_path.append(path)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
self.pkg_model.foreach(finder, package)
|
||||
if pkg_path:
|
||||
self.pkg_treeview.get_selection().select_path(pkg_path[0])
|
||||
self.pkg_treeview.scroll_to_cell(pkg_path[0])
|
||||
|
||||
def on_cursor_changed(self, selection):
|
||||
(model, it) = selection.get_selected()
|
||||
if it is None:
|
||||
current_package = None
|
||||
else:
|
||||
current_package = model.get_value(it, COL_PKG_NAME)
|
||||
self.rdep_treeview.set_current_package(current_package)
|
||||
self.dep_treeview.set_current_package(current_package)
|
||||
self.revdep_treeview.set_current_package(current_package)
|
||||
|
||||
|
||||
def parse(self, depgraph):
|
||||
for package in depgraph["pn"]:
|
||||
self.pkg_model.insert(0, (package,))
|
||||
|
||||
for package in depgraph["depends"]:
|
||||
for depend in depgraph["depends"][package]:
|
||||
self.depends_model.insert (0, (TYPE_DEP, package, depend))
|
||||
|
||||
for package in depgraph["rdepends-pn"]:
|
||||
for rdepend in depgraph["rdepends-pn"][package]:
|
||||
self.depends_model.insert (0, (TYPE_RDEP, package, rdepend))
|
||||
|
||||
|
||||
class gtkthread(threading.Thread):
|
||||
quit = threading.Event()
|
||||
def __init__(self, shutdown):
|
||||
threading.Thread.__init__(self)
|
||||
self.setDaemon(True)
|
||||
self.shutdown = shutdown
|
||||
if not Gtk.init_check()[0]:
|
||||
sys.stderr.write("Gtk+ init failed. Make sure DISPLAY variable is set.\n")
|
||||
gtkthread.quit.set()
|
||||
|
||||
def run(self):
|
||||
GObject.threads_init()
|
||||
Gdk.threads_init()
|
||||
Gtk.main()
|
||||
gtkthread.quit.set()
|
||||
|
||||
|
||||
def main(server, eventHandler, params):
|
||||
shutdown = 0
|
||||
|
||||
gtkgui = gtkthread(shutdown)
|
||||
gtkgui.start()
|
||||
|
||||
try:
|
||||
params.updateFromServer(server)
|
||||
cmdline = params.parseActions()
|
||||
if not cmdline:
|
||||
print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
|
||||
return 1
|
||||
if 'msg' in cmdline and cmdline['msg']:
|
||||
print(cmdline['msg'])
|
||||
return 1
|
||||
cmdline = cmdline['action']
|
||||
if not cmdline or cmdline[0] != "generateDotGraph":
|
||||
print("This UI requires the -g option")
|
||||
return 1
|
||||
ret, error = server.runCommand(["generateDepTreeEvent", cmdline[1], cmdline[2]])
|
||||
if error:
|
||||
print("Error running command '%s': %s" % (cmdline, error))
|
||||
return 1
|
||||
elif ret != True:
|
||||
print("Error running command '%s': returned %s" % (cmdline, ret))
|
||||
return 1
|
||||
except client.Fault as x:
|
||||
print("XMLRPC Fault getting commandline:\n %s" % x)
|
||||
return
|
||||
|
||||
if gtkthread.quit.isSet():
|
||||
return
|
||||
|
||||
Gdk.threads_enter()
|
||||
dep = DepExplorer()
|
||||
bardialog = Gtk.Dialog(parent=dep,
|
||||
flags=Gtk.DialogFlags.MODAL|Gtk.DialogFlags.DESTROY_WITH_PARENT)
|
||||
bardialog.set_default_size(400, 50)
|
||||
box = bardialog.get_content_area()
|
||||
pbar = Gtk.ProgressBar()
|
||||
box.pack_start(pbar, True, True, 0)
|
||||
bardialog.show_all()
|
||||
bardialog.connect("delete-event", Gtk.main_quit)
|
||||
Gdk.threads_leave()
|
||||
|
||||
progress_total = 0
|
||||
while True:
|
||||
try:
|
||||
event = eventHandler.waitEvent(0.25)
|
||||
if gtkthread.quit.isSet():
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
if error:
|
||||
print('Unable to cleanly stop: %s' % error)
|
||||
break
|
||||
|
||||
if event is None:
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.CacheLoadStarted):
|
||||
progress_total = event.total
|
||||
Gdk.threads_enter()
|
||||
bardialog.set_title("Loading Cache")
|
||||
pbar.set_fraction(0.0)
|
||||
Gdk.threads_leave()
|
||||
|
||||
if isinstance(event, bb.event.CacheLoadProgress):
|
||||
x = event.current
|
||||
Gdk.threads_enter()
|
||||
pbar.set_fraction(x * 1.0 / progress_total)
|
||||
Gdk.threads_leave()
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.CacheLoadCompleted):
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.ParseStarted):
|
||||
progress_total = event.total
|
||||
if progress_total == 0:
|
||||
continue
|
||||
Gdk.threads_enter()
|
||||
pbar.set_fraction(0.0)
|
||||
bardialog.set_title("Processing recipes")
|
||||
Gdk.threads_leave()
|
||||
|
||||
if isinstance(event, bb.event.ParseProgress):
|
||||
x = event.current
|
||||
Gdk.threads_enter()
|
||||
pbar.set_fraction(x * 1.0 / progress_total)
|
||||
Gdk.threads_leave()
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.ParseCompleted):
|
||||
Gdk.threads_enter()
|
||||
bardialog.set_title("Generating dependency tree")
|
||||
Gdk.threads_leave()
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.DepTreeGenerated):
|
||||
Gdk.threads_enter()
|
||||
bardialog.hide()
|
||||
dep.parse(event._depgraph)
|
||||
Gdk.threads_leave()
|
||||
|
||||
if isinstance(event, bb.command.CommandCompleted):
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.NoProvider):
|
||||
if event._runtime:
|
||||
r = "R"
|
||||
else:
|
||||
r = ""
|
||||
|
||||
extra = ''
|
||||
if not event._reasons:
|
||||
if event._close_matches:
|
||||
extra = ". Close matches:\n %s" % '\n '.join(event._close_matches)
|
||||
|
||||
if event._dependees:
|
||||
print("Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)%s" % r, event._item, ", ".join(event._dependees), r, extra)
|
||||
else:
|
||||
print("Nothing %sPROVIDES '%s'%s" % (r, event._item, extra))
|
||||
if event._reasons:
|
||||
for reason in event._reasons:
|
||||
print(reason)
|
||||
|
||||
_, error = server.runCommand(["stateShutdown"])
|
||||
if error:
|
||||
print('Unable to cleanly shutdown: %s' % error)
|
||||
break
|
||||
|
||||
if isinstance(event, bb.command.CommandFailed):
|
||||
print("Command execution failed: %s" % event.error)
|
||||
return event.exitcode
|
||||
|
||||
if isinstance(event, bb.command.CommandExit):
|
||||
return event.exitcode
|
||||
|
||||
if isinstance(event, bb.cooker.CookerExit):
|
||||
break
|
||||
|
||||
continue
|
||||
except EnvironmentError as ioerror:
|
||||
# ignore interrupted io
|
||||
if ioerror.args[0] == 4:
|
||||
pass
|
||||
except KeyboardInterrupt:
|
||||
if shutdown == 2:
|
||||
print("\nThird Keyboard Interrupt, exit.\n")
|
||||
break
|
||||
if shutdown == 1:
|
||||
print("\nSecond Keyboard Interrupt, stopping...\n")
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
if error:
|
||||
print('Unable to cleanly stop: %s' % error)
|
||||
if shutdown == 0:
|
||||
print("\nKeyboard Interrupt, closing down...\n")
|
||||
_, error = server.runCommand(["stateShutdown"])
|
||||
if error:
|
||||
print('Unable to cleanly shutdown: %s' % error)
|
||||
shutdown = shutdown + 1
|
||||
pass
|
||||
@@ -75,8 +75,10 @@ class BBProgress(progressbar.ProgressBar):
|
||||
extrastr = str(extra)
|
||||
if extrastr[0] != ' ':
|
||||
extrastr = ' ' + extrastr
|
||||
if extrastr[-1] != ' ':
|
||||
extrastr += ' '
|
||||
else:
|
||||
extrastr = ''
|
||||
extrastr = ' '
|
||||
self.widgets[self.extrapos] = extrastr
|
||||
|
||||
def _need_update(self):
|
||||
@@ -207,10 +209,8 @@ class TerminalFilter(object):
|
||||
self.interactive = False
|
||||
bb.note("Unable to use interactive mode for this terminal, using fallback")
|
||||
return
|
||||
if console:
|
||||
console.addFilter(InteractConsoleLogFilter(self, format))
|
||||
if errconsole:
|
||||
errconsole.addFilter(InteractConsoleLogFilter(self, format))
|
||||
console.addFilter(InteractConsoleLogFilter(self, format))
|
||||
errconsole.addFilter(InteractConsoleLogFilter(self, format))
|
||||
|
||||
self.main_progress = None
|
||||
|
||||
@@ -284,7 +284,7 @@ class TerminalFilter(object):
|
||||
content = self.main_progress.update(progress)
|
||||
print('')
|
||||
lines = 1 + int(len(content) / (self.columns + 1))
|
||||
if self.quiet == 0:
|
||||
if not self.quiet:
|
||||
for tasknum, task in enumerate(tasks[:(self.rows - 2)]):
|
||||
if isinstance(task, tuple):
|
||||
pbar, progress, rate, start_time = task
|
||||
@@ -312,33 +312,7 @@ class TerminalFilter(object):
|
||||
fd = sys.stdin.fileno()
|
||||
self.termios.tcsetattr(fd, self.termios.TCSADRAIN, self.stdinbackup)
|
||||
|
||||
def print_event_log(event, includelogs, loglines, termfilter):
|
||||
# FIXME refactor this out further
|
||||
logfile = event.logfile
|
||||
if logfile and os.path.exists(logfile):
|
||||
termfilter.clearFooter()
|
||||
bb.error("Logfile of failure stored in: %s" % logfile)
|
||||
if includelogs and not event.errprinted:
|
||||
print("Log data follows:")
|
||||
f = open(logfile, "r")
|
||||
lines = []
|
||||
while True:
|
||||
l = f.readline()
|
||||
if l == '':
|
||||
break
|
||||
l = l.rstrip()
|
||||
if loglines:
|
||||
lines.append(' | %s' % l)
|
||||
if len(lines) > int(loglines):
|
||||
lines.pop(0)
|
||||
else:
|
||||
print('| %s' % l)
|
||||
f.close()
|
||||
if lines:
|
||||
for line in lines:
|
||||
print(line)
|
||||
|
||||
def _log_settings_from_server(server, observe_only):
|
||||
def _log_settings_from_server(server):
|
||||
# Get values of variables which control our output
|
||||
includelogs, error = server.runCommand(["getVariable", "BBINCLUDELOGS"])
|
||||
if error:
|
||||
@@ -348,11 +322,7 @@ def _log_settings_from_server(server, observe_only):
|
||||
if error:
|
||||
logger.error("Unable to get the value of BBINCLUDELOGS_LINES variable: %s" % error)
|
||||
raise BaseException(error)
|
||||
if observe_only:
|
||||
cmd = 'getVariable'
|
||||
else:
|
||||
cmd = 'getSetVariable'
|
||||
consolelogfile, error = server.runCommand([cmd, "BB_CONSOLELOG"])
|
||||
consolelogfile, error = server.runCommand(["getSetVariable", "BB_CONSOLELOG"])
|
||||
if error:
|
||||
logger.error("Unable to get the value of BB_CONSOLELOG variable: %s" % error)
|
||||
raise BaseException(error)
|
||||
@@ -370,10 +340,7 @@ _evt_list = [ "bb.runqueue.runQueueExitWait", "bb.event.LogExecTTY", "logging.Lo
|
||||
|
||||
def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
|
||||
if not params.observe_only:
|
||||
params.updateToServer(server, os.environ.copy())
|
||||
|
||||
includelogs, loglines, consolelogfile = _log_settings_from_server(server, params.observe_only)
|
||||
includelogs, loglines, consolelogfile = _log_settings_from_server(server)
|
||||
|
||||
if sys.stdin.isatty() and sys.stdout.isatty():
|
||||
log_exec_tty = True
|
||||
@@ -386,19 +353,15 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
errconsole = logging.StreamHandler(sys.stderr)
|
||||
format_str = "%(levelname)s: %(message)s"
|
||||
format = bb.msg.BBLogFormatter(format_str)
|
||||
if params.options.quiet == 0:
|
||||
forcelevel = None
|
||||
elif params.options.quiet > 2:
|
||||
forcelevel = bb.msg.BBLogFormatter.ERROR
|
||||
if params.options.quiet:
|
||||
bb.msg.addDefaultlogFilter(console, bb.msg.BBLogFilterStdOut, bb.msg.BBLogFormatter.WARNING)
|
||||
else:
|
||||
forcelevel = bb.msg.BBLogFormatter.WARNING
|
||||
bb.msg.addDefaultlogFilter(console, bb.msg.BBLogFilterStdOut, forcelevel)
|
||||
bb.msg.addDefaultlogFilter(console, bb.msg.BBLogFilterStdOut)
|
||||
bb.msg.addDefaultlogFilter(errconsole, bb.msg.BBLogFilterStdErr)
|
||||
console.setFormatter(format)
|
||||
errconsole.setFormatter(format)
|
||||
if not bb.msg.has_console_handler(logger):
|
||||
logger.addHandler(console)
|
||||
logger.addHandler(errconsole)
|
||||
logger.addHandler(console)
|
||||
logger.addHandler(errconsole)
|
||||
|
||||
bb.utils.set_process_name("KnottyUI")
|
||||
|
||||
@@ -427,6 +390,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
universe = False
|
||||
if not params.observe_only:
|
||||
params.updateFromServer(server)
|
||||
params.updateToServer(server, os.environ.copy())
|
||||
cmdline = params.parseActions()
|
||||
if not cmdline:
|
||||
print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
|
||||
@@ -502,11 +466,11 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
continue
|
||||
|
||||
# Prefix task messages with recipe/task
|
||||
if event.taskpid in helper.running_tasks and event.levelno != format.PLAIN:
|
||||
if event.taskpid in helper.running_tasks:
|
||||
taskinfo = helper.running_tasks[event.taskpid]
|
||||
event.msg = taskinfo['title'] + ': ' + event.msg
|
||||
if hasattr(event, 'fn'):
|
||||
event.msg = event.fn + ': ' + event.msg
|
||||
event.msg = event.fn + ': ' + event.msg
|
||||
logger.handle(event)
|
||||
continue
|
||||
|
||||
@@ -515,52 +479,62 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
continue
|
||||
if isinstance(event, bb.build.TaskFailed):
|
||||
return_value = 1
|
||||
print_event_log(event, includelogs, loglines, termfilter)
|
||||
logfile = event.logfile
|
||||
if logfile and os.path.exists(logfile):
|
||||
termfilter.clearFooter()
|
||||
bb.error("Logfile of failure stored in: %s" % logfile)
|
||||
if includelogs and not event.errprinted:
|
||||
print("Log data follows:")
|
||||
f = open(logfile, "r")
|
||||
lines = []
|
||||
while True:
|
||||
l = f.readline()
|
||||
if l == '':
|
||||
break
|
||||
l = l.rstrip()
|
||||
if loglines:
|
||||
lines.append(' | %s' % l)
|
||||
if len(lines) > int(loglines):
|
||||
lines.pop(0)
|
||||
else:
|
||||
print('| %s' % l)
|
||||
f.close()
|
||||
if lines:
|
||||
for line in lines:
|
||||
print(line)
|
||||
if isinstance(event, bb.build.TaskBase):
|
||||
logger.info(event._message)
|
||||
continue
|
||||
if isinstance(event, bb.event.ParseStarted):
|
||||
if params.options.quiet > 1:
|
||||
continue
|
||||
if event.total == 0:
|
||||
continue
|
||||
parseprogress = new_progress("Parsing recipes", event.total).start()
|
||||
continue
|
||||
if isinstance(event, bb.event.ParseProgress):
|
||||
if params.options.quiet > 1:
|
||||
continue
|
||||
if parseprogress:
|
||||
parseprogress.update(event.current)
|
||||
else:
|
||||
bb.warn("Got ParseProgress event for parsing that never started?")
|
||||
continue
|
||||
if isinstance(event, bb.event.ParseCompleted):
|
||||
if params.options.quiet > 1:
|
||||
continue
|
||||
if not parseprogress:
|
||||
continue
|
||||
parseprogress.finish()
|
||||
pasreprogress = None
|
||||
if params.options.quiet == 0:
|
||||
if not params.options.quiet:
|
||||
print(("Parsing of %d .bb files complete (%d cached, %d parsed). %d targets, %d skipped, %d masked, %d errors."
|
||||
% ( event.total, event.cached, event.parsed, event.virtuals, event.skipped, event.masked, event.errors)))
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.CacheLoadStarted):
|
||||
if params.options.quiet > 1:
|
||||
continue
|
||||
cacheprogress = new_progress("Loading cache", event.total).start()
|
||||
continue
|
||||
if isinstance(event, bb.event.CacheLoadProgress):
|
||||
if params.options.quiet > 1:
|
||||
continue
|
||||
cacheprogress.update(event.current)
|
||||
continue
|
||||
if isinstance(event, bb.event.CacheLoadCompleted):
|
||||
if params.options.quiet > 1:
|
||||
continue
|
||||
cacheprogress.finish()
|
||||
if params.options.quiet == 0:
|
||||
if not params.options.quiet:
|
||||
print("Loaded %d entries from dependency cache." % event.num_entries)
|
||||
continue
|
||||
|
||||
@@ -568,7 +542,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
return_value = event.exitcode
|
||||
if event.error:
|
||||
errors = errors + 1
|
||||
logger.error(str(event))
|
||||
logger.error("Command execution failed: %s", event.error)
|
||||
main.shutdown = 2
|
||||
continue
|
||||
if isinstance(event, bb.command.CommandExit):
|
||||
@@ -579,16 +553,39 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
main.shutdown = 2
|
||||
continue
|
||||
if isinstance(event, bb.event.MultipleProviders):
|
||||
logger.info(str(event))
|
||||
logger.info("multiple providers are available for %s%s (%s)", event._is_runtime and "runtime " or "",
|
||||
event._item,
|
||||
", ".join(event._candidates))
|
||||
rtime = ""
|
||||
if event._is_runtime:
|
||||
rtime = "R"
|
||||
logger.info("consider defining a PREFERRED_%sPROVIDER entry to match %s" % (rtime, event._item))
|
||||
continue
|
||||
if isinstance(event, bb.event.NoProvider):
|
||||
if event._runtime:
|
||||
r = "R"
|
||||
else:
|
||||
r = ""
|
||||
|
||||
extra = ''
|
||||
if not event._reasons:
|
||||
if event._close_matches:
|
||||
extra = ". Close matches:\n %s" % '\n '.join(event._close_matches)
|
||||
|
||||
# For universe builds, only show these as warnings, not errors
|
||||
h = logger.warning
|
||||
if not universe:
|
||||
return_value = 1
|
||||
errors = errors + 1
|
||||
logger.error(str(event))
|
||||
h = logger.error
|
||||
|
||||
if event._dependees:
|
||||
h("Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)%s", r, event._item, ", ".join(event._dependees), r, extra)
|
||||
else:
|
||||
logger.warning(str(event))
|
||||
h("Nothing %sPROVIDES '%s'%s", r, event._item, extra)
|
||||
if event._reasons:
|
||||
for reason in event._reasons:
|
||||
h("%s", reason)
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.runqueue.sceneQueueTaskStarted):
|
||||
@@ -610,33 +607,29 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
if isinstance(event, bb.runqueue.runQueueTaskFailed):
|
||||
return_value = 1
|
||||
taskfailures.append(event.taskstring)
|
||||
logger.error(str(event))
|
||||
logger.error("Task (%s) failed with exit code '%s'",
|
||||
event.taskstring, event.exitcode)
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.runqueue.sceneQueueTaskFailed):
|
||||
logger.warning(str(event))
|
||||
logger.warning("Setscene task (%s) failed with exit code '%s' - real task will be run instead",
|
||||
event.taskstring, event.exitcode)
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.DepTreeGenerated):
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.ProcessStarted):
|
||||
if params.options.quiet > 1:
|
||||
continue
|
||||
parseprogress = new_progress(event.processname, event.total)
|
||||
parseprogress.start(False)
|
||||
continue
|
||||
if isinstance(event, bb.event.ProcessProgress):
|
||||
if params.options.quiet > 1:
|
||||
continue
|
||||
if parseprogress:
|
||||
parseprogress.update(event.progress)
|
||||
else:
|
||||
bb.warn("Got ProcessProgress event for someting that never started?")
|
||||
continue
|
||||
if isinstance(event, bb.event.ProcessFinished):
|
||||
if params.options.quiet > 1:
|
||||
continue
|
||||
if parseprogress:
|
||||
parseprogress.finish()
|
||||
parseprogress = None
|
||||
@@ -647,7 +640,6 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
bb.event.MetadataEvent,
|
||||
bb.event.StampUpdate,
|
||||
bb.event.ConfigParsed,
|
||||
bb.event.MultiConfigParsed,
|
||||
bb.event.RecipeParsed,
|
||||
bb.event.RecipePreFinalise,
|
||||
bb.runqueue.runQueueEvent,
|
||||
@@ -655,7 +647,6 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
bb.event.OperationCompleted,
|
||||
bb.event.OperationProgress,
|
||||
bb.event.DiskFull,
|
||||
bb.event.HeartbeatEvent,
|
||||
bb.build.TaskProgress)):
|
||||
continue
|
||||
|
||||
@@ -709,7 +700,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
if return_value and errors:
|
||||
summary += pluralise("\nSummary: There was %s ERROR message shown, returning a non-zero exit code.",
|
||||
"\nSummary: There were %s ERROR messages shown, returning a non-zero exit code.", errors)
|
||||
if summary and params.options.quiet == 0:
|
||||
if summary and not params.options.quiet:
|
||||
print(summary)
|
||||
|
||||
if interrupted:
|
||||
|
||||
@@ -297,7 +297,7 @@ class NCursesUI:
|
||||
# bb.error("log data follows (%s)" % logfile)
|
||||
# number_of_lines = data.getVar("BBINCLUDELOGS_LINES", d)
|
||||
# if number_of_lines:
|
||||
# subprocess.check_call('tail -n%s %s' % (number_of_lines, logfile), shell=True)
|
||||
# subprocess.call('tail -n%s %s' % (number_of_lines, logfile), shell=True)
|
||||
# else:
|
||||
# f = open(logfile, "r")
|
||||
# while True:
|
||||
@@ -315,7 +315,7 @@ class NCursesUI:
|
||||
# also allow them to now exit with a single ^C
|
||||
shutdown = 2
|
||||
if isinstance(event, bb.command.CommandFailed):
|
||||
mw.appendText(str(event))
|
||||
mw.appendText("Command execution failed: %s" % event.error)
|
||||
time.sleep(2)
|
||||
exitflag = True
|
||||
if isinstance(event, bb.command.CommandExit):
|
||||
|
||||
@@ -1,336 +0,0 @@
|
||||
#
|
||||
# BitBake Graphical GTK based Dependency Explorer
|
||||
#
|
||||
# Copyright (C) 2007 Ross Burton
|
||||
# Copyright (C) 2007 - 2008 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import sys
|
||||
import gi
|
||||
gi.require_version('Gtk', '3.0')
|
||||
from gi.repository import Gtk, Gdk, GObject
|
||||
from multiprocessing import Queue
|
||||
import threading
|
||||
from xmlrpc import client
|
||||
import time
|
||||
import bb
|
||||
import bb.event
|
||||
|
||||
# Package Model
|
||||
(COL_PKG_NAME) = (0)
|
||||
|
||||
# Dependency Model
|
||||
(TYPE_DEP, TYPE_RDEP) = (0, 1)
|
||||
(COL_DEP_TYPE, COL_DEP_PARENT, COL_DEP_PACKAGE) = (0, 1, 2)
|
||||
|
||||
|
||||
class PackageDepView(Gtk.TreeView):
|
||||
def __init__(self, model, dep_type, label):
|
||||
Gtk.TreeView.__init__(self)
|
||||
self.current = None
|
||||
self.dep_type = dep_type
|
||||
self.filter_model = model.filter_new()
|
||||
self.filter_model.set_visible_func(self._filter, data=None)
|
||||
self.set_model(self.filter_model)
|
||||
self.append_column(Gtk.TreeViewColumn(label, Gtk.CellRendererText(), text=COL_DEP_PACKAGE))
|
||||
|
||||
def _filter(self, model, iter, data):
|
||||
this_type = model[iter][COL_DEP_TYPE]
|
||||
package = model[iter][COL_DEP_PARENT]
|
||||
if this_type != self.dep_type: return False
|
||||
return package == self.current
|
||||
|
||||
def set_current_package(self, package):
|
||||
self.current = package
|
||||
self.filter_model.refilter()
|
||||
|
||||
|
||||
class PackageReverseDepView(Gtk.TreeView):
|
||||
def __init__(self, model, label):
|
||||
Gtk.TreeView.__init__(self)
|
||||
self.current = None
|
||||
self.filter_model = model.filter_new()
|
||||
self.filter_model.set_visible_func(self._filter)
|
||||
self.sort_model = self.filter_model.sort_new_with_model()
|
||||
self.sort_model.set_sort_column_id(COL_DEP_PARENT, Gtk.SortType.ASCENDING)
|
||||
self.set_model(self.sort_model)
|
||||
self.append_column(Gtk.TreeViewColumn(label, Gtk.CellRendererText(), text=COL_DEP_PARENT))
|
||||
|
||||
def _filter(self, model, iter, data):
|
||||
package = model[iter][COL_DEP_PACKAGE]
|
||||
return package == self.current
|
||||
|
||||
def set_current_package(self, package):
|
||||
self.current = package
|
||||
self.filter_model.refilter()
|
||||
|
||||
|
||||
class DepExplorer(Gtk.Window):
|
||||
def __init__(self):
|
||||
Gtk.Window.__init__(self)
|
||||
self.set_title("Task Dependency Explorer")
|
||||
self.set_default_size(500, 500)
|
||||
self.connect("delete-event", Gtk.main_quit)
|
||||
|
||||
# Create the data models
|
||||
self.pkg_model = Gtk.ListStore(GObject.TYPE_STRING)
|
||||
self.pkg_model.set_sort_column_id(COL_PKG_NAME, Gtk.SortType.ASCENDING)
|
||||
self.depends_model = Gtk.ListStore(GObject.TYPE_INT, GObject.TYPE_STRING, GObject.TYPE_STRING)
|
||||
self.depends_model.set_sort_column_id(COL_DEP_PACKAGE, Gtk.SortType.ASCENDING)
|
||||
|
||||
pane = Gtk.HPaned()
|
||||
pane.set_position(250)
|
||||
self.add(pane)
|
||||
|
||||
# The master list of packages
|
||||
scrolled = Gtk.ScrolledWindow()
|
||||
scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
|
||||
scrolled.set_shadow_type(Gtk.ShadowType.IN)
|
||||
|
||||
self.pkg_treeview = Gtk.TreeView(self.pkg_model)
|
||||
self.pkg_treeview.get_selection().connect("changed", self.on_cursor_changed)
|
||||
column = Gtk.TreeViewColumn("Package", Gtk.CellRendererText(), text=COL_PKG_NAME)
|
||||
self.pkg_treeview.append_column(column)
|
||||
scrolled.add(self.pkg_treeview)
|
||||
|
||||
self.search_entry = Gtk.SearchEntry.new()
|
||||
self.pkg_treeview.set_search_entry(self.search_entry)
|
||||
|
||||
left_panel = Gtk.VPaned()
|
||||
left_panel.add(self.search_entry)
|
||||
left_panel.add(scrolled)
|
||||
pane.add1(left_panel)
|
||||
|
||||
box = Gtk.VBox(homogeneous=True, spacing=4)
|
||||
|
||||
# Task Depends
|
||||
scrolled = Gtk.ScrolledWindow()
|
||||
scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
|
||||
scrolled.set_shadow_type(Gtk.ShadowType.IN)
|
||||
self.dep_treeview = PackageDepView(self.depends_model, TYPE_DEP, "Dependencies")
|
||||
self.dep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE)
|
||||
scrolled.add(self.dep_treeview)
|
||||
box.add(scrolled)
|
||||
pane.add2(box)
|
||||
|
||||
# Reverse Task Depends
|
||||
scrolled = Gtk.ScrolledWindow()
|
||||
scrolled.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
|
||||
scrolled.set_shadow_type(Gtk.ShadowType.IN)
|
||||
self.revdep_treeview = PackageReverseDepView(self.depends_model, "Dependent Tasks")
|
||||
self.revdep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PARENT)
|
||||
scrolled.add(self.revdep_treeview)
|
||||
box.add(scrolled)
|
||||
pane.add2(box)
|
||||
|
||||
self.show_all()
|
||||
self.search_entry.grab_focus()
|
||||
|
||||
def on_package_activated(self, treeview, path, column, data_col):
|
||||
model = treeview.get_model()
|
||||
package = model.get_value(model.get_iter(path), data_col)
|
||||
|
||||
pkg_path = []
|
||||
def finder(model, path, iter, needle):
|
||||
package = model.get_value(iter, COL_PKG_NAME)
|
||||
if package == needle:
|
||||
pkg_path.append(path)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
self.pkg_model.foreach(finder, package)
|
||||
if pkg_path:
|
||||
self.pkg_treeview.get_selection().select_path(pkg_path[0])
|
||||
self.pkg_treeview.scroll_to_cell(pkg_path[0])
|
||||
|
||||
def on_cursor_changed(self, selection):
|
||||
(model, it) = selection.get_selected()
|
||||
if it is None:
|
||||
current_package = None
|
||||
else:
|
||||
current_package = model.get_value(it, COL_PKG_NAME)
|
||||
self.dep_treeview.set_current_package(current_package)
|
||||
self.revdep_treeview.set_current_package(current_package)
|
||||
|
||||
|
||||
def parse(self, depgraph):
|
||||
for task in depgraph["tdepends"]:
|
||||
self.pkg_model.insert(0, (task,))
|
||||
for depend in depgraph["tdepends"][task]:
|
||||
self.depends_model.insert (0, (TYPE_DEP, task, depend))
|
||||
|
||||
|
||||
class gtkthread(threading.Thread):
|
||||
quit = threading.Event()
|
||||
def __init__(self, shutdown):
|
||||
threading.Thread.__init__(self)
|
||||
self.setDaemon(True)
|
||||
self.shutdown = shutdown
|
||||
if not Gtk.init_check()[0]:
|
||||
sys.stderr.write("Gtk+ init failed. Make sure DISPLAY variable is set.\n")
|
||||
gtkthread.quit.set()
|
||||
|
||||
def run(self):
|
||||
GObject.threads_init()
|
||||
Gdk.threads_init()
|
||||
Gtk.main()
|
||||
gtkthread.quit.set()
|
||||
|
||||
|
||||
def main(server, eventHandler, params):
|
||||
shutdown = 0
|
||||
|
||||
gtkgui = gtkthread(shutdown)
|
||||
gtkgui.start()
|
||||
|
||||
try:
|
||||
params.updateFromServer(server)
|
||||
cmdline = params.parseActions()
|
||||
if not cmdline:
|
||||
print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
|
||||
return 1
|
||||
if 'msg' in cmdline and cmdline['msg']:
|
||||
print(cmdline['msg'])
|
||||
return 1
|
||||
cmdline = cmdline['action']
|
||||
if not cmdline or cmdline[0] != "generateDotGraph":
|
||||
print("This UI requires the -g option")
|
||||
return 1
|
||||
ret, error = server.runCommand(["generateDepTreeEvent", cmdline[1], cmdline[2]])
|
||||
if error:
|
||||
print("Error running command '%s': %s" % (cmdline, error))
|
||||
return 1
|
||||
elif ret != True:
|
||||
print("Error running command '%s': returned %s" % (cmdline, ret))
|
||||
return 1
|
||||
except client.Fault as x:
|
||||
print("XMLRPC Fault getting commandline:\n %s" % x)
|
||||
return
|
||||
|
||||
if gtkthread.quit.isSet():
|
||||
return
|
||||
|
||||
Gdk.threads_enter()
|
||||
dep = DepExplorer()
|
||||
bardialog = Gtk.Dialog(parent=dep,
|
||||
flags=Gtk.DialogFlags.MODAL|Gtk.DialogFlags.DESTROY_WITH_PARENT)
|
||||
bardialog.set_default_size(400, 50)
|
||||
box = bardialog.get_content_area()
|
||||
pbar = Gtk.ProgressBar()
|
||||
box.pack_start(pbar, True, True, 0)
|
||||
bardialog.show_all()
|
||||
bardialog.connect("delete-event", Gtk.main_quit)
|
||||
Gdk.threads_leave()
|
||||
|
||||
progress_total = 0
|
||||
while True:
|
||||
try:
|
||||
event = eventHandler.waitEvent(0.25)
|
||||
if gtkthread.quit.isSet():
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
if error:
|
||||
print('Unable to cleanly stop: %s' % error)
|
||||
break
|
||||
|
||||
if event is None:
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.CacheLoadStarted):
|
||||
progress_total = event.total
|
||||
Gdk.threads_enter()
|
||||
bardialog.set_title("Loading Cache")
|
||||
pbar.set_fraction(0.0)
|
||||
Gdk.threads_leave()
|
||||
|
||||
if isinstance(event, bb.event.CacheLoadProgress):
|
||||
x = event.current
|
||||
Gdk.threads_enter()
|
||||
pbar.set_fraction(x * 1.0 / progress_total)
|
||||
Gdk.threads_leave()
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.CacheLoadCompleted):
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.ParseStarted):
|
||||
progress_total = event.total
|
||||
if progress_total == 0:
|
||||
continue
|
||||
Gdk.threads_enter()
|
||||
pbar.set_fraction(0.0)
|
||||
bardialog.set_title("Processing recipes")
|
||||
Gdk.threads_leave()
|
||||
|
||||
if isinstance(event, bb.event.ParseProgress):
|
||||
x = event.current
|
||||
Gdk.threads_enter()
|
||||
pbar.set_fraction(x * 1.0 / progress_total)
|
||||
Gdk.threads_leave()
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.ParseCompleted):
|
||||
Gdk.threads_enter()
|
||||
bardialog.set_title("Generating dependency tree")
|
||||
Gdk.threads_leave()
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.DepTreeGenerated):
|
||||
Gdk.threads_enter()
|
||||
bardialog.hide()
|
||||
dep.parse(event._depgraph)
|
||||
Gdk.threads_leave()
|
||||
|
||||
if isinstance(event, bb.command.CommandCompleted):
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.NoProvider):
|
||||
print(str(event))
|
||||
|
||||
_, error = server.runCommand(["stateShutdown"])
|
||||
if error:
|
||||
print('Unable to cleanly shutdown: %s' % error)
|
||||
break
|
||||
|
||||
if isinstance(event, bb.command.CommandFailed):
|
||||
print(str(event))
|
||||
return event.exitcode
|
||||
|
||||
if isinstance(event, bb.command.CommandExit):
|
||||
return event.exitcode
|
||||
|
||||
if isinstance(event, bb.cooker.CookerExit):
|
||||
break
|
||||
|
||||
continue
|
||||
except EnvironmentError as ioerror:
|
||||
# ignore interrupted io
|
||||
if ioerror.args[0] == 4:
|
||||
pass
|
||||
except KeyboardInterrupt:
|
||||
if shutdown == 2:
|
||||
print("\nThird Keyboard Interrupt, exit.\n")
|
||||
break
|
||||
if shutdown == 1:
|
||||
print("\nSecond Keyboard Interrupt, stopping...\n")
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
if error:
|
||||
print('Unable to cleanly stop: %s' % error)
|
||||
if shutdown == 0:
|
||||
print("\nKeyboard Interrupt, closing down...\n")
|
||||
_, error = server.runCommand(["stateShutdown"])
|
||||
if error:
|
||||
print('Unable to cleanly shutdown: %s' % error)
|
||||
shutdown = shutdown + 1
|
||||
pass
|
||||
@@ -168,9 +168,6 @@ def main(server, eventHandler, params):
|
||||
logger.warning("buildhistory is not enabled. Please enable INHERIT += \"buildhistory\" to see image details.")
|
||||
build_history_enabled = False
|
||||
|
||||
if not "buildstats" in inheritlist.split(" "):
|
||||
logger.warning("buildstats is not enabled. Please enable INHERIT += \"buildstats\" to generate build statistics.")
|
||||
|
||||
if not params.observe_only:
|
||||
params.updateFromServer(server)
|
||||
params.updateToServer(server, os.environ.copy())
|
||||
@@ -236,9 +233,6 @@ def main(server, eventHandler, params):
|
||||
# pylint: disable=protected-access
|
||||
# the code will look into the protected variables of the event; no easy way around this
|
||||
|
||||
if isinstance(event, bb.event.HeartbeatEvent):
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.ParseStarted):
|
||||
if not (build_log and build_log_file_path):
|
||||
build_log, build_log_file_path = _open_build_log(log_dir)
|
||||
@@ -320,13 +314,29 @@ def main(server, eventHandler, params):
|
||||
if isinstance(event, bb.event.CacheLoadCompleted):
|
||||
continue
|
||||
if isinstance(event, bb.event.MultipleProviders):
|
||||
logger.info(str(event))
|
||||
logger.info("multiple providers are available for %s%s (%s)", event._is_runtime and "runtime " or "",
|
||||
event._item,
|
||||
", ".join(event._candidates))
|
||||
logger.info("consider defining a PREFERRED_PROVIDER entry to match %s", event._item)
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.NoProvider):
|
||||
errors = errors + 1
|
||||
text = str(event)
|
||||
if event._runtime:
|
||||
r = "R"
|
||||
else:
|
||||
r = ""
|
||||
|
||||
if event._dependees:
|
||||
text = "Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)" % (r, event._item, ", ".join(event._dependees), r)
|
||||
else:
|
||||
text = "Nothing %sPROVIDES '%s'" % (r, event._item)
|
||||
|
||||
logger.error(text)
|
||||
if event._reasons:
|
||||
for reason in event._reasons:
|
||||
logger.error("%s", reason)
|
||||
text += reason
|
||||
buildinfohelper.store_log_error(text)
|
||||
continue
|
||||
|
||||
@@ -348,7 +358,8 @@ def main(server, eventHandler, params):
|
||||
if isinstance(event, bb.runqueue.runQueueTaskFailed):
|
||||
buildinfohelper.update_and_store_task(event)
|
||||
taskfailures.append(event.taskstring)
|
||||
logger.error(str(event))
|
||||
logger.error("Task (%s) failed with exit code '%s'",
|
||||
event.taskstring, event.exitcode)
|
||||
continue
|
||||
|
||||
if isinstance(event, (bb.runqueue.sceneQueueTaskCompleted, bb.runqueue.sceneQueueTaskFailed)):
|
||||
@@ -365,7 +376,7 @@ def main(server, eventHandler, params):
|
||||
if isinstance(event, bb.command.CommandFailed):
|
||||
errors += 1
|
||||
errorcode = 1
|
||||
logger.error(str(event))
|
||||
logger.error("Command execution failed: %s", event.error)
|
||||
elif isinstance(event, bb.event.BuildCompleted):
|
||||
buildinfohelper.scan_image_artifacts()
|
||||
buildinfohelper.clone_required_sdk_artifacts()
|
||||
@@ -421,7 +432,9 @@ def main(server, eventHandler, params):
|
||||
elif event.type == "SetBRBE":
|
||||
buildinfohelper.brbe = buildinfohelper._get_data_from_event(event)
|
||||
elif event.type == "TaskArtifacts":
|
||||
buildinfohelper.scan_task_artifacts(event)
|
||||
# not implemented yet
|
||||
# see https://bugzilla.yoctoproject.org/show_bug.cgi?id=10283 for details
|
||||
pass
|
||||
elif event.type == "OSErrorException":
|
||||
logger.error(event)
|
||||
else:
|
||||
|
||||
@@ -61,9 +61,6 @@ class BBUIHelper:
|
||||
self.running_tasks[event.pid]['progress'] = event.progress
|
||||
self.running_tasks[event.pid]['rate'] = event.rate
|
||||
self.needUpdate = True
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
def getTasks(self):
|
||||
self.needUpdate = False
|
||||
|
||||
@@ -187,7 +187,7 @@ def explode_deps(s):
|
||||
#r[-1] += ' ' + ' '.join(j)
|
||||
return r
|
||||
|
||||
def explode_dep_versions2(s, *, sort=True):
|
||||
def explode_dep_versions2(s):
|
||||
"""
|
||||
Take an RDEPENDS style string of format:
|
||||
"DEPEND1 (optional version) DEPEND2 (optional version) ..."
|
||||
@@ -250,8 +250,7 @@ def explode_dep_versions2(s, *, sort=True):
|
||||
if not (i in r and r[i]):
|
||||
r[lastdep] = []
|
||||
|
||||
if sort:
|
||||
r = collections.OrderedDict(sorted(r.items(), key=lambda x: x[0]))
|
||||
r = collections.OrderedDict(sorted(r.items(), key=lambda x: x[0]))
|
||||
return r
|
||||
|
||||
def explode_dep_versions(s):
|
||||
@@ -497,11 +496,7 @@ def lockfile(name, shared=False, retry=True, block=False):
|
||||
if statinfo.st_ino == statinfo2.st_ino:
|
||||
return lf
|
||||
lf.close()
|
||||
except OSError as e:
|
||||
if e.errno == errno.EACCES:
|
||||
logger.error("Unable to acquire lock '%s', %s",
|
||||
e.strerror, name)
|
||||
sys.exit(1)
|
||||
except Exception:
|
||||
try:
|
||||
lf.close()
|
||||
except Exception:
|
||||
@@ -528,25 +523,29 @@ def md5_file(filename):
|
||||
"""
|
||||
Return the hex string representation of the MD5 checksum of filename.
|
||||
"""
|
||||
import hashlib, mmap
|
||||
try:
|
||||
import hashlib
|
||||
m = hashlib.md5()
|
||||
except ImportError:
|
||||
import md5
|
||||
m = md5.new()
|
||||
|
||||
with open(filename, "rb") as f:
|
||||
m = hashlib.md5()
|
||||
try:
|
||||
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mm:
|
||||
for chunk in iter(lambda: mm.read(8192), b''):
|
||||
m.update(chunk)
|
||||
except ValueError:
|
||||
# You can't mmap() an empty file so silence this exception
|
||||
pass
|
||||
for line in f:
|
||||
m.update(line)
|
||||
return m.hexdigest()
|
||||
|
||||
def sha256_file(filename):
|
||||
"""
|
||||
Return the hex string representation of the 256-bit SHA checksum of
|
||||
filename.
|
||||
filename. On Python 2.4 this will return None, so callers will need to
|
||||
handle that by either skipping SHA checks, or running a standalone sha256sum
|
||||
binary.
|
||||
"""
|
||||
import hashlib
|
||||
try:
|
||||
import hashlib
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
s = hashlib.sha256()
|
||||
with open(filename, "rb") as f:
|
||||
@@ -558,7 +557,10 @@ def sha1_file(filename):
|
||||
"""
|
||||
Return the hex string representation of the SHA1 checksum of the filename
|
||||
"""
|
||||
import hashlib
|
||||
try:
|
||||
import hashlib
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
s = hashlib.sha1()
|
||||
with open(filename, "rb") as f:
|
||||
@@ -663,7 +665,7 @@ def build_environment(d):
|
||||
for var in bb.data.keys(d):
|
||||
export = d.getVarFlag(var, "export", False)
|
||||
if export:
|
||||
os.environ[var] = d.getVar(var) or ""
|
||||
os.environ[var] = d.getVar(var, True) or ""
|
||||
|
||||
def _check_unsafe_delete_path(path):
|
||||
"""
|
||||
@@ -690,7 +692,7 @@ def remove(path, recurse=False):
|
||||
if _check_unsafe_delete_path(path):
|
||||
raise Exception('bb.utils.remove: called with dangerous path "%s" and recurse=True, refusing to delete!' % path)
|
||||
# shutil.rmtree(name) would be ideal but its too slow
|
||||
subprocess.check_call(['rm', '-rf'] + glob.glob(path))
|
||||
subprocess.call(['rm', '-rf'] + glob.glob(path))
|
||||
return
|
||||
for name in glob.glob(path):
|
||||
try:
|
||||
@@ -781,14 +783,13 @@ def movefile(src, dest, newmtime = None, sstat = None):
|
||||
return None
|
||||
|
||||
renamefailed = 1
|
||||
# os.rename needs to know the dest path ending with file name
|
||||
# so append the file name to a path only if it's a dir specified
|
||||
srcfname = os.path.basename(src)
|
||||
destpath = os.path.join(dest, srcfname) if os.path.isdir(dest) \
|
||||
else dest
|
||||
|
||||
if sstat[stat.ST_DEV] == dstat[stat.ST_DEV]:
|
||||
try:
|
||||
# os.rename needs to know the dest path ending with file name
|
||||
# so append the file name to a path only if it's a dir specified
|
||||
srcfname = os.path.basename(src)
|
||||
destpath = os.path.join(dest, srcfname) if os.path.isdir(dest) \
|
||||
else dest
|
||||
os.rename(src, destpath)
|
||||
renamefailed = 0
|
||||
except Exception as e:
|
||||
@@ -802,8 +803,8 @@ def movefile(src, dest, newmtime = None, sstat = None):
|
||||
didcopy = 0
|
||||
if stat.S_ISREG(sstat[stat.ST_MODE]):
|
||||
try: # For safety copy then move it over.
|
||||
shutil.copyfile(src, destpath + "#new")
|
||||
os.rename(destpath + "#new", destpath)
|
||||
shutil.copyfile(src, dest + "#new")
|
||||
os.rename(dest + "#new", dest)
|
||||
didcopy = 1
|
||||
except Exception as e:
|
||||
print('movefile: copy', src, '->', dest, 'failed.', e)
|
||||
@@ -816,17 +817,17 @@ def movefile(src, dest, newmtime = None, sstat = None):
|
||||
return None # failure
|
||||
try:
|
||||
if didcopy:
|
||||
os.lchown(destpath, sstat[stat.ST_UID], sstat[stat.ST_GID])
|
||||
os.chmod(destpath, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
|
||||
os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
|
||||
os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
|
||||
os.unlink(src)
|
||||
except Exception as e:
|
||||
print("movefile: Failed to chown/chmod/unlink", dest, e)
|
||||
return None
|
||||
|
||||
if newmtime:
|
||||
os.utime(destpath, (newmtime, newmtime))
|
||||
os.utime(dest, (newmtime, newmtime))
|
||||
else:
|
||||
os.utime(destpath, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
|
||||
os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
|
||||
newmtime = sstat[stat.ST_MTIME]
|
||||
return newmtime
|
||||
|
||||
@@ -910,36 +911,10 @@ def copyfile(src, dest, newmtime = None, sstat = None):
|
||||
newmtime = sstat[stat.ST_MTIME]
|
||||
return newmtime
|
||||
|
||||
def break_hardlinks(src, sstat = None):
|
||||
def which(path, item, direction = 0, history = False):
|
||||
"""
|
||||
Ensures src is the only hardlink to this file. Other hardlinks,
|
||||
if any, are not affected (other than in their st_nlink value, of
|
||||
course). Returns true on success and false on failure.
|
||||
|
||||
Locate a file in a PATH
|
||||
"""
|
||||
try:
|
||||
if not sstat:
|
||||
sstat = os.lstat(src)
|
||||
except Exception as e:
|
||||
logger.warning("break_hardlinks: stat of %s failed (%s)" % (src, e))
|
||||
return False
|
||||
if sstat[stat.ST_NLINK] == 1:
|
||||
return True
|
||||
return copyfile(src, src, sstat=sstat)
|
||||
|
||||
def which(path, item, direction = 0, history = False, executable=False):
|
||||
"""
|
||||
Locate `item` in the list of paths `path` (colon separated string like $PATH).
|
||||
If `direction` is non-zero then the list is reversed.
|
||||
If `history` is True then the list of candidates also returned as result,history.
|
||||
If `executable` is True then the candidate has to be an executable file,
|
||||
otherwise the candidate simply has to exist.
|
||||
"""
|
||||
|
||||
if executable:
|
||||
is_candidate = lambda p: os.path.isfile(p) and os.access(p, os.X_OK)
|
||||
else:
|
||||
is_candidate = lambda p: os.path.exists(p)
|
||||
|
||||
hist = []
|
||||
paths = (path or "").split(':')
|
||||
@@ -949,7 +924,7 @@ def which(path, item, direction = 0, history = False, executable=False):
|
||||
for p in paths:
|
||||
next = os.path.join(p, item)
|
||||
hist.append(next)
|
||||
if is_candidate(next):
|
||||
if os.path.exists(next):
|
||||
if not os.path.isabs(next):
|
||||
next = os.path.abspath(next)
|
||||
if history:
|
||||
@@ -978,7 +953,7 @@ def contains(variable, checkvalues, truevalue, falsevalue, d):
|
||||
Arguments:
|
||||
|
||||
variable -- the variable name. This will be fetched and expanded (using
|
||||
d.getVar(variable)) and then split into a set().
|
||||
d.getVar(variable, True)) and then split into a set().
|
||||
|
||||
checkvalues -- if this is a string it is split on whitespace into a set(),
|
||||
otherwise coerced directly into a set().
|
||||
@@ -991,7 +966,7 @@ def contains(variable, checkvalues, truevalue, falsevalue, d):
|
||||
d -- the data store.
|
||||
"""
|
||||
|
||||
val = d.getVar(variable)
|
||||
val = d.getVar(variable, True)
|
||||
if not val:
|
||||
return falsevalue
|
||||
val = set(val.split())
|
||||
@@ -1004,7 +979,7 @@ def contains(variable, checkvalues, truevalue, falsevalue, d):
|
||||
return falsevalue
|
||||
|
||||
def contains_any(variable, checkvalues, truevalue, falsevalue, d):
|
||||
val = d.getVar(variable)
|
||||
val = d.getVar(variable, True)
|
||||
if not val:
|
||||
return falsevalue
|
||||
val = set(val.split())
|
||||
@@ -1016,30 +991,6 @@ def contains_any(variable, checkvalues, truevalue, falsevalue, d):
|
||||
return truevalue
|
||||
return falsevalue
|
||||
|
||||
def filter(variable, checkvalues, d):
|
||||
"""Return all words in the variable that are present in the checkvalues.
|
||||
|
||||
Arguments:
|
||||
|
||||
variable -- the variable name. This will be fetched and expanded (using
|
||||
d.getVar(variable)) and then split into a set().
|
||||
|
||||
checkvalues -- if this is a string it is split on whitespace into a set(),
|
||||
otherwise coerced directly into a set().
|
||||
|
||||
d -- the data store.
|
||||
"""
|
||||
|
||||
val = d.getVar(variable)
|
||||
if not val:
|
||||
return ''
|
||||
val = set(val.split())
|
||||
if isinstance(checkvalues, str):
|
||||
checkvalues = set(checkvalues.split())
|
||||
else:
|
||||
checkvalues = set(checkvalues)
|
||||
return ' '.join(sorted(checkvalues & val))
|
||||
|
||||
def cpu_count():
|
||||
return multiprocessing.cpu_count()
|
||||
|
||||
@@ -1311,7 +1262,7 @@ def edit_metadata_file(meta_file, variables, varfunc):
|
||||
return updated
|
||||
|
||||
|
||||
def edit_bblayers_conf(bblayers_conf, add, remove, edit_cb=None):
|
||||
def edit_bblayers_conf(bblayers_conf, add, remove):
|
||||
"""Edit bblayers.conf, adding and/or removing layers
|
||||
Parameters:
|
||||
bblayers_conf: path to bblayers.conf file to edit
|
||||
@@ -1319,8 +1270,6 @@ def edit_bblayers_conf(bblayers_conf, add, remove, edit_cb=None):
|
||||
list to add nothing
|
||||
remove: layer path (or list of layer paths) to remove; None or
|
||||
empty list to remove nothing
|
||||
edit_cb: optional callback function that will be called after
|
||||
processing adds/removes once per existing entry.
|
||||
Returns a tuple:
|
||||
notadded: list of layers specified to be added but weren't
|
||||
(because they were already in the list)
|
||||
@@ -1384,17 +1333,6 @@ def edit_bblayers_conf(bblayers_conf, add, remove, edit_cb=None):
|
||||
bblayers.append(addlayer)
|
||||
del addlayers[:]
|
||||
|
||||
if edit_cb:
|
||||
newlist = []
|
||||
for layer in bblayers:
|
||||
res = edit_cb(layer, canonicalise_path(layer))
|
||||
if res != layer:
|
||||
newlist.append(res)
|
||||
updated = True
|
||||
else:
|
||||
newlist.append(layer)
|
||||
bblayers = newlist
|
||||
|
||||
if updated:
|
||||
if op == '+=' and not bblayers:
|
||||
bblayers = None
|
||||
@@ -1440,10 +1378,10 @@ def edit_bblayers_conf(bblayers_conf, add, remove, edit_cb=None):
|
||||
|
||||
def get_file_layer(filename, d):
|
||||
"""Determine the collection (as defined by a layer's layer.conf file) containing the specified file"""
|
||||
collections = (d.getVar('BBFILE_COLLECTIONS') or '').split()
|
||||
collections = (d.getVar('BBFILE_COLLECTIONS', True) or '').split()
|
||||
collection_res = {}
|
||||
for collection in collections:
|
||||
collection_res[collection] = d.getVar('BBFILE_PATTERN_%s' % collection) or ''
|
||||
collection_res[collection] = d.getVar('BBFILE_PATTERN_%s' % collection, True) or ''
|
||||
|
||||
def path_to_layer(path):
|
||||
# Use longest path so we handle nested layers
|
||||
@@ -1456,7 +1394,7 @@ def get_file_layer(filename, d):
|
||||
return match
|
||||
|
||||
result = None
|
||||
bbfiles = (d.getVar('BBFILES') or '').split()
|
||||
bbfiles = (d.getVar('BBFILES', True) or '').split()
|
||||
bbfilesmatch = False
|
||||
for bbfilesentry in bbfiles:
|
||||
if fnmatch.fnmatch(filename, bbfilesentry):
|
||||
@@ -1533,7 +1471,7 @@ def export_proxies(d):
|
||||
if v in os.environ.keys():
|
||||
exported = True
|
||||
else:
|
||||
v_proxy = d.getVar(v)
|
||||
v_proxy = d.getVar(v, True)
|
||||
if v_proxy is not None:
|
||||
os.environ[v] = v_proxy
|
||||
exported = True
|
||||
@@ -1543,7 +1481,7 @@ def export_proxies(d):
|
||||
|
||||
def load_plugins(logger, plugins, pluginpath):
|
||||
def load_plugin(name):
|
||||
logger.debug(1, 'Loading plugin %s' % name)
|
||||
logger.debug('Loading plugin %s' % name)
|
||||
fp, pathname, description = imp.find_module(name, [pluginpath])
|
||||
try:
|
||||
return imp.load_module(name, fp, pathname, description)
|
||||
@@ -1551,7 +1489,7 @@ def load_plugins(logger, plugins, pluginpath):
|
||||
if fp:
|
||||
fp.close()
|
||||
|
||||
logger.debug(1, 'Loading plugins from %s...' % pluginpath)
|
||||
logger.debug('Loading plugins from %s...' % pluginpath)
|
||||
|
||||
expanded = (glob.glob(os.path.join(pluginpath, '*' + ext))
|
||||
for ext in python_extensions)
|
||||
@@ -1565,14 +1503,3 @@ def load_plugins(logger, plugins, pluginpath):
|
||||
plugins.append(obj or plugin)
|
||||
else:
|
||||
plugins.append(plugin)
|
||||
|
||||
|
||||
class LogCatcher(logging.Handler):
|
||||
"""Logging handler for collecting logged messages so you can check them later"""
|
||||
def __init__(self):
|
||||
self.messages = []
|
||||
logging.Handler.__init__(self, logging.WARNING)
|
||||
def emit(self, record):
|
||||
self.messages.append(bb.build.logformatter.format(record))
|
||||
def contains(self, message):
|
||||
return (message in self.messages)
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
import fnmatch
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
import bb.utils
|
||||
|
||||
@@ -18,62 +16,41 @@ def plugin_init(plugins):
|
||||
|
||||
class ActionPlugin(LayerPlugin):
|
||||
def do_add_layer(self, args):
|
||||
"""Add one or more layers to bblayers.conf."""
|
||||
layerdirs = [os.path.abspath(ldir) for ldir in args.layerdir]
|
||||
"""Add a layer to bblayers.conf."""
|
||||
layerdir = os.path.abspath(args.layerdir)
|
||||
if not os.path.exists(layerdir):
|
||||
sys.stderr.write("Specified layer directory doesn't exist\n")
|
||||
return 1
|
||||
|
||||
for layerdir in layerdirs:
|
||||
if not os.path.exists(layerdir):
|
||||
sys.stderr.write("Specified layer directory %s doesn't exist\n" % layerdir)
|
||||
return 1
|
||||
|
||||
layer_conf = os.path.join(layerdir, 'conf', 'layer.conf')
|
||||
if not os.path.exists(layer_conf):
|
||||
sys.stderr.write("Specified layer directory %s doesn't contain a conf/layer.conf file\n" % layerdir)
|
||||
return 1
|
||||
layer_conf = os.path.join(layerdir, 'conf', 'layer.conf')
|
||||
if not os.path.exists(layer_conf):
|
||||
sys.stderr.write("Specified layer directory doesn't contain a conf/layer.conf file\n")
|
||||
return 1
|
||||
|
||||
bblayers_conf = os.path.join('conf', 'bblayers.conf')
|
||||
if not os.path.exists(bblayers_conf):
|
||||
sys.stderr.write("Unable to find bblayers.conf\n")
|
||||
return 1
|
||||
|
||||
# Back up bblayers.conf to tempdir before we add layers
|
||||
tempdir = tempfile.mkdtemp()
|
||||
backup = tempdir + "/bblayers.conf.bak"
|
||||
shutil.copy2(bblayers_conf, backup)
|
||||
|
||||
try:
|
||||
notadded, _ = bb.utils.edit_bblayers_conf(bblayers_conf, layerdirs, None)
|
||||
if not (args.force or notadded):
|
||||
try:
|
||||
self.tinfoil.run_command('parseConfiguration')
|
||||
except bb.tinfoil.TinfoilUIException:
|
||||
# Restore the back up copy of bblayers.conf
|
||||
shutil.copy2(backup, bblayers_conf)
|
||||
bb.fatal("Parse failure with the specified layer added")
|
||||
else:
|
||||
for item in notadded:
|
||||
sys.stderr.write("Specified layer %s is already in BBLAYERS\n" % item)
|
||||
finally:
|
||||
# Remove the back up copy of bblayers.conf
|
||||
shutil.rmtree(tempdir)
|
||||
notadded, _ = bb.utils.edit_bblayers_conf(bblayers_conf, layerdir, None)
|
||||
if notadded:
|
||||
for item in notadded:
|
||||
sys.stderr.write("Specified layer %s is already in BBLAYERS\n" % item)
|
||||
|
||||
def do_remove_layer(self, args):
|
||||
"""Remove one or more layers from bblayers.conf."""
|
||||
"""Remove a layer from bblayers.conf."""
|
||||
bblayers_conf = os.path.join('conf', 'bblayers.conf')
|
||||
if not os.path.exists(bblayers_conf):
|
||||
sys.stderr.write("Unable to find bblayers.conf\n")
|
||||
return 1
|
||||
|
||||
layerdirs = []
|
||||
for item in args.layerdir:
|
||||
if item.startswith('*'):
|
||||
layerdir = item
|
||||
elif not '/' in item:
|
||||
layerdir = '*/%s' % item
|
||||
else:
|
||||
layerdir = os.path.abspath(item)
|
||||
layerdirs.append(layerdir)
|
||||
(_, notremoved) = bb.utils.edit_bblayers_conf(bblayers_conf, None, layerdirs)
|
||||
if args.layerdir.startswith('*'):
|
||||
layerdir = args.layerdir
|
||||
elif not '/' in args.layerdir:
|
||||
layerdir = '*/%s' % args.layerdir
|
||||
else:
|
||||
layerdir = os.path.abspath(args.layerdir)
|
||||
(_, notremoved) = bb.utils.edit_bblayers_conf(bblayers_conf, None, layerdir)
|
||||
if notremoved:
|
||||
for item in notremoved:
|
||||
sys.stderr.write("No layers matching %s found in BBLAYERS\n" % item)
|
||||
@@ -203,7 +180,7 @@ build results (as the layer priority order has effectively changed).
|
||||
|
||||
if first_regex:
|
||||
# Find the BBFILES entries that match (which will have come from this conf/layer.conf file)
|
||||
bbfiles = str(self.tinfoil.config_data.getVar('BBFILES')).split()
|
||||
bbfiles = str(self.tinfoil.config_data.getVar('BBFILES', True)).split()
|
||||
bbfiles_layer = []
|
||||
for item in bbfiles:
|
||||
if first_regex.match(item):
|
||||
@@ -245,10 +222,10 @@ build results (as the layer priority order has effectively changed).
|
||||
|
||||
def register_commands(self, sp):
|
||||
parser_add_layer = self.add_command(sp, 'add-layer', self.do_add_layer, parserecipes=False)
|
||||
parser_add_layer.add_argument('layerdir', nargs='+', help='Layer directory/directories to add')
|
||||
parser_add_layer.add_argument('layerdir', help='Layer directory to add')
|
||||
|
||||
parser_remove_layer = self.add_command(sp, 'remove-layer', self.do_remove_layer, parserecipes=False)
|
||||
parser_remove_layer.add_argument('layerdir', nargs='+', help='Layer directory/directories to remove (wildcards allowed, enclose in quotes to avoid shell expansion)')
|
||||
parser_remove_layer.add_argument('layerdir', help='Layer directory to remove (wildcards allowed, enclose in quotes to avoid shell expansion)')
|
||||
parser_remove_layer.set_defaults(func=self.do_remove_layer)
|
||||
|
||||
parser_flatten = self.add_command(sp, 'flatten', self.do_flatten)
|
||||
|
||||
@@ -12,7 +12,7 @@ class LayerPlugin():
|
||||
|
||||
def tinfoil_init(self, tinfoil):
|
||||
self.tinfoil = tinfoil
|
||||
self.bblayers = (self.tinfoil.config_data.getVar('BBLAYERS') or "").split()
|
||||
self.bblayers = (self.tinfoil.config_data.getVar('BBLAYERS', True) or "").split()
|
||||
layerconfs = self.tinfoil.config_data.varhistory.get_variable_items_files('BBFILE_COLLECTIONS', self.tinfoil.config_data)
|
||||
self.bbfile_collections = {layer: os.path.dirname(os.path.dirname(path)) for layer, path in layerconfs.items()}
|
||||
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import layerindexlib
|
||||
|
||||
import argparse
|
||||
import http.client
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
import urllib.parse
|
||||
|
||||
from bblayers.action import ActionPlugin
|
||||
|
||||
@@ -20,6 +21,110 @@ class LayerIndexPlugin(ActionPlugin):
|
||||
This class inherits ActionPlugin to get do_add_layer.
|
||||
"""
|
||||
|
||||
def get_json_data(self, apiurl):
|
||||
proxy_settings = os.environ.get("http_proxy", None)
|
||||
conn = None
|
||||
_parsedurl = urllib.parse.urlparse(apiurl)
|
||||
path = _parsedurl.path
|
||||
query = _parsedurl.query
|
||||
|
||||
def parse_url(url):
|
||||
parsedurl = urllib.parse.urlparse(url)
|
||||
if parsedurl.netloc[0] == '[':
|
||||
host, port = parsedurl.netloc[1:].split(']', 1)
|
||||
if ':' in port:
|
||||
port = port.rsplit(':', 1)[1]
|
||||
else:
|
||||
port = None
|
||||
else:
|
||||
if parsedurl.netloc.count(':') == 1:
|
||||
(host, port) = parsedurl.netloc.split(":")
|
||||
else:
|
||||
host = parsedurl.netloc
|
||||
port = None
|
||||
return (host, 80 if port is None else int(port))
|
||||
|
||||
if proxy_settings is None:
|
||||
host, port = parse_url(apiurl)
|
||||
conn = http.client.HTTPConnection(host, port)
|
||||
conn.request("GET", path + "?" + query)
|
||||
else:
|
||||
host, port = parse_url(proxy_settings)
|
||||
conn = http.client.HTTPConnection(host, port)
|
||||
conn.request("GET", apiurl)
|
||||
|
||||
r = conn.getresponse()
|
||||
if r.status != 200:
|
||||
raise Exception("Failed to read " + path + ": %d %s" % (r.status, r.reason))
|
||||
return json.loads(r.read())
|
||||
|
||||
def get_layer_deps(self, layername, layeritems, layerbranches, layerdependencies, branchnum, selfname=False):
|
||||
def layeritems_info_id(items_name, layeritems):
|
||||
litems_id = None
|
||||
for li in layeritems:
|
||||
if li['name'] == items_name:
|
||||
litems_id = li['id']
|
||||
break
|
||||
return litems_id
|
||||
|
||||
def layerbranches_info(items_id, layerbranches):
|
||||
lbranch = {}
|
||||
for lb in layerbranches:
|
||||
if lb['layer'] == items_id and lb['branch'] == branchnum:
|
||||
lbranch['id'] = lb['id']
|
||||
lbranch['vcs_subdir'] = lb['vcs_subdir']
|
||||
break
|
||||
return lbranch
|
||||
|
||||
def layerdependencies_info(lb_id, layerdependencies):
|
||||
ld_deps = []
|
||||
for ld in layerdependencies:
|
||||
if ld['layerbranch'] == lb_id and not ld['dependency'] in ld_deps:
|
||||
ld_deps.append(ld['dependency'])
|
||||
if not ld_deps:
|
||||
logger.error("The dependency of layerDependencies is not found.")
|
||||
return ld_deps
|
||||
|
||||
def layeritems_info_name_subdir(items_id, layeritems):
|
||||
litems = {}
|
||||
for li in layeritems:
|
||||
if li['id'] == items_id:
|
||||
litems['vcs_url'] = li['vcs_url']
|
||||
litems['name'] = li['name']
|
||||
break
|
||||
return litems
|
||||
|
||||
if selfname:
|
||||
selfid = layeritems_info_id(layername, layeritems)
|
||||
lbinfo = layerbranches_info(selfid, layerbranches)
|
||||
if lbinfo:
|
||||
selfsubdir = lbinfo['vcs_subdir']
|
||||
else:
|
||||
logger.error("%s is not found in the specified branch" % layername)
|
||||
return
|
||||
selfurl = layeritems_info_name_subdir(selfid, layeritems)['vcs_url']
|
||||
if selfurl:
|
||||
return selfurl, selfsubdir
|
||||
else:
|
||||
logger.error("Cannot get layer %s git repo and subdir" % layername)
|
||||
return
|
||||
ldict = {}
|
||||
itemsid = layeritems_info_id(layername, layeritems)
|
||||
if not itemsid:
|
||||
return layername, None
|
||||
lbid = layerbranches_info(itemsid, layerbranches)
|
||||
if lbid:
|
||||
lbid = layerbranches_info(itemsid, layerbranches)['id']
|
||||
else:
|
||||
logger.error("%s is not found in the specified branch" % layername)
|
||||
return None, None
|
||||
for dependency in layerdependencies_info(lbid, layerdependencies):
|
||||
lname = layeritems_info_name_subdir(dependency, layeritems)['name']
|
||||
lurl = layeritems_info_name_subdir(dependency, layeritems)['vcs_url']
|
||||
lsubdir = layerbranches_info(dependency, layerbranches)['vcs_subdir']
|
||||
ldict[lname] = lurl, lsubdir
|
||||
return None, ldict
|
||||
|
||||
def get_fetch_layer(self, fetchdir, url, subdir, fetch_layer):
|
||||
layername = self.get_layer_name(url)
|
||||
if os.path.splitext(layername)[1] == '.git':
|
||||
@@ -28,167 +133,124 @@ class LayerIndexPlugin(ActionPlugin):
|
||||
layerdir = os.path.join(repodir, subdir)
|
||||
if not os.path.exists(repodir):
|
||||
if fetch_layer:
|
||||
result = subprocess.call(['git', 'clone', url, repodir])
|
||||
result = subprocess.call('git clone %s %s' % (url, repodir), shell = True)
|
||||
if result:
|
||||
logger.error("Failed to download %s" % url)
|
||||
return None, None, None
|
||||
return None, None
|
||||
else:
|
||||
return subdir, layername, layerdir
|
||||
return layername, layerdir
|
||||
else:
|
||||
logger.plain("Repository %s needs to be fetched" % url)
|
||||
return subdir, layername, layerdir
|
||||
return layername, layerdir
|
||||
elif os.path.exists(layerdir):
|
||||
return subdir, layername, layerdir
|
||||
return layername, layerdir
|
||||
else:
|
||||
logger.error("%s is not in %s" % (url, subdir))
|
||||
return None, None, None
|
||||
return None, None
|
||||
|
||||
def do_layerindex_fetch(self, args):
|
||||
"""Fetches a layer from a layer index along with its dependent layers, and adds them to conf/bblayers.conf.
|
||||
"""
|
||||
|
||||
def _construct_url(baseurls, branches):
|
||||
urls = []
|
||||
for baseurl in baseurls:
|
||||
if baseurl[-1] != '/':
|
||||
baseurl += '/'
|
||||
|
||||
if not baseurl.startswith('cooker'):
|
||||
baseurl += "api/"
|
||||
|
||||
if branches:
|
||||
baseurl += ";branch=%s" % ','.join(branches)
|
||||
|
||||
urls.append(baseurl)
|
||||
|
||||
return urls
|
||||
|
||||
|
||||
# Set the default...
|
||||
if args.branch:
|
||||
branches = [args.branch]
|
||||
apiurl = self.tinfoil.config_data.getVar('BBLAYERS_LAYERINDEX_URL', True)
|
||||
if not apiurl:
|
||||
logger.error("Cannot get BBLAYERS_LAYERINDEX_URL")
|
||||
return 1
|
||||
else:
|
||||
branches = (self.tinfoil.config_data.getVar('LAYERSERIES_CORENAMES') or 'master').split()
|
||||
logger.debug(1, 'Trying branches: %s' % branches)
|
||||
if apiurl[-1] != '/':
|
||||
apiurl += '/'
|
||||
apiurl += "api/"
|
||||
apilinks = self.get_json_data(apiurl)
|
||||
branches = self.get_json_data(apilinks['branches'])
|
||||
|
||||
branchnum = 0
|
||||
for branch in branches:
|
||||
if branch['name'] == args.branch:
|
||||
branchnum = branch['id']
|
||||
break
|
||||
if branchnum == 0:
|
||||
validbranches = ', '.join([branch['name'] for branch in branches])
|
||||
logger.error('Invalid layer branch name "%s". Valid branches: %s' % (args.branch, validbranches))
|
||||
return 1
|
||||
|
||||
ignore_layers = []
|
||||
for collection in self.tinfoil.config_data.getVar('BBFILE_COLLECTIONS', True).split():
|
||||
lname = self.tinfoil.config_data.getVar('BBLAYERS_LAYERINDEX_NAME_%s' % collection, True)
|
||||
if lname:
|
||||
ignore_layers.append(lname)
|
||||
|
||||
if args.ignore:
|
||||
ignore_layers.extend(args.ignore.split(','))
|
||||
|
||||
# Load the cooker DB
|
||||
cookerIndex = layerindexlib.LayerIndex(self.tinfoil.config_data)
|
||||
cookerIndex.load_layerindex('cooker://', load='layerDependencies')
|
||||
layeritems = self.get_json_data(apilinks['layerItems'])
|
||||
layerbranches = self.get_json_data(apilinks['layerBranches'])
|
||||
layerdependencies = self.get_json_data(apilinks['layerDependencies'])
|
||||
invaluenames = []
|
||||
repourls = {}
|
||||
printlayers = []
|
||||
|
||||
# Fast path, check if we already have what has been requested!
|
||||
(dependencies, invalidnames) = cookerIndex.find_dependencies(names=args.layername, ignores=ignore_layers)
|
||||
if not args.show_only and not invalidnames:
|
||||
logger.plain("You already have the requested layer(s): %s" % args.layername)
|
||||
return 0
|
||||
def query_dependencies(layers, layeritems, layerbranches, layerdependencies, branchnum):
|
||||
depslayer = []
|
||||
for layername in layers:
|
||||
invaluename, layerdict = self.get_layer_deps(layername, layeritems, layerbranches, layerdependencies, branchnum)
|
||||
if layerdict:
|
||||
repourls[layername] = self.get_layer_deps(layername, layeritems, layerbranches, layerdependencies, branchnum, selfname=True)
|
||||
for layer in layerdict:
|
||||
if not layer in ignore_layers:
|
||||
depslayer.append(layer)
|
||||
printlayers.append((layername, layer, layerdict[layer][0], layerdict[layer][1]))
|
||||
if not layer in ignore_layers and not layer in repourls:
|
||||
repourls[layer] = (layerdict[layer][0], layerdict[layer][1])
|
||||
if invaluename and not invaluename in invaluenames:
|
||||
invaluenames.append(invaluename)
|
||||
return depslayer
|
||||
|
||||
# The information to show is already in the cookerIndex
|
||||
if invalidnames:
|
||||
# General URL to use to access the layer index
|
||||
# While there is ONE right now, we're expect users could enter several
|
||||
apiurl = self.tinfoil.config_data.getVar('BBLAYERS_LAYERINDEX_URL').split()
|
||||
if not apiurl:
|
||||
logger.error("Cannot get BBLAYERS_LAYERINDEX_URL")
|
||||
return 1
|
||||
depslayers = query_dependencies(args.layername, layeritems, layerbranches, layerdependencies, branchnum)
|
||||
while depslayers:
|
||||
depslayer = query_dependencies(depslayers, layeritems, layerbranches, layerdependencies, branchnum)
|
||||
depslayers = depslayer
|
||||
if invaluenames:
|
||||
for invaluename in invaluenames:
|
||||
logger.error('Layer "%s" not found in layer index' % invaluename)
|
||||
return 1
|
||||
logger.plain("%s %s %s %s" % ("Layer".ljust(19), "Required by".ljust(19), "Git repository".ljust(54), "Subdirectory"))
|
||||
logger.plain('=' * 115)
|
||||
for layername in args.layername:
|
||||
layerurl = repourls[layername]
|
||||
logger.plain("%s %s %s %s" % (layername.ljust(20), '-'.ljust(20), layerurl[0].ljust(55), layerurl[1]))
|
||||
printedlayers = []
|
||||
for layer, dependency, gitrepo, subdirectory in printlayers:
|
||||
if dependency in printedlayers:
|
||||
continue
|
||||
logger.plain("%s %s %s %s" % (dependency.ljust(20), layer.ljust(20), gitrepo.ljust(55), subdirectory))
|
||||
printedlayers.append(dependency)
|
||||
|
||||
remoteIndex = layerindexlib.LayerIndex(self.tinfoil.config_data)
|
||||
|
||||
for remoteurl in _construct_url(apiurl, branches):
|
||||
logger.plain("Loading %s..." % remoteurl)
|
||||
remoteIndex.load_layerindex(remoteurl)
|
||||
|
||||
if remoteIndex.is_empty():
|
||||
logger.error("Remote layer index %s is empty for branches %s" % (apiurl, branches))
|
||||
return 1
|
||||
|
||||
lIndex = cookerIndex + remoteIndex
|
||||
|
||||
(dependencies, invalidnames) = lIndex.find_dependencies(names=args.layername, ignores=ignore_layers)
|
||||
|
||||
if invalidnames:
|
||||
for invaluename in invalidnames:
|
||||
logger.error('Layer "%s" not found in layer index' % invaluename)
|
||||
return 1
|
||||
|
||||
logger.plain("%s %s %s" % ("Layer".ljust(49), "Git repository (branch)".ljust(54), "Subdirectory"))
|
||||
logger.plain('=' * 125)
|
||||
|
||||
for deplayerbranch in dependencies:
|
||||
layerBranch = dependencies[deplayerbranch][0]
|
||||
|
||||
# TODO: Determine display behavior
|
||||
# This is the local content, uncomment to hide local
|
||||
# layers from the display.
|
||||
#if layerBranch.index.config['TYPE'] == 'cooker':
|
||||
# continue
|
||||
|
||||
layerDeps = dependencies[deplayerbranch][1:]
|
||||
|
||||
requiredby = []
|
||||
recommendedby = []
|
||||
for dep in layerDeps:
|
||||
if dep.required:
|
||||
requiredby.append(dep.layer.name)
|
||||
else:
|
||||
recommendedby.append(dep.layer.name)
|
||||
|
||||
logger.plain('%s %s %s' % (("%s:%s:%s" %
|
||||
(layerBranch.index.config['DESCRIPTION'],
|
||||
layerBranch.branch.name,
|
||||
layerBranch.layer.name)).ljust(50),
|
||||
("%s (%s)" % (layerBranch.layer.vcs_url,
|
||||
layerBranch.actual_branch)).ljust(55),
|
||||
layerBranch.vcs_subdir
|
||||
))
|
||||
if requiredby:
|
||||
logger.plain(' required by: %s' % ' '.join(requiredby))
|
||||
if recommendedby:
|
||||
logger.plain(' recommended by: %s' % ' '.join(recommendedby))
|
||||
|
||||
if dependencies:
|
||||
fetchdir = self.tinfoil.config_data.getVar('BBLAYERS_FETCH_DIR')
|
||||
if repourls:
|
||||
fetchdir = self.tinfoil.config_data.getVar('BBLAYERS_FETCH_DIR', True)
|
||||
if not fetchdir:
|
||||
logger.error("Cannot get BBLAYERS_FETCH_DIR")
|
||||
return 1
|
||||
if not os.path.exists(fetchdir):
|
||||
os.makedirs(fetchdir)
|
||||
addlayers = []
|
||||
|
||||
for deplayerbranch in dependencies:
|
||||
layerBranch = dependencies[deplayerbranch][0]
|
||||
|
||||
if layerBranch.index.config['TYPE'] == 'cooker':
|
||||
# Anything loaded via cooker is already local, skip it
|
||||
continue
|
||||
|
||||
subdir, name, layerdir = self.get_fetch_layer(fetchdir,
|
||||
layerBranch.layer.vcs_url,
|
||||
layerBranch.vcs_subdir,
|
||||
not args.show_only)
|
||||
for repourl, subdir in repourls.values():
|
||||
name, layerdir = self.get_fetch_layer(fetchdir, repourl, subdir, not args.show_only)
|
||||
if not name:
|
||||
# Error already shown
|
||||
return 1
|
||||
addlayers.append((subdir, name, layerdir))
|
||||
if not args.show_only:
|
||||
localargs = argparse.Namespace()
|
||||
localargs.layerdir = []
|
||||
localargs.force = args.force
|
||||
for subdir, name, layerdir in addlayers:
|
||||
for subdir, name, layerdir in set(addlayers):
|
||||
if os.path.exists(layerdir):
|
||||
if subdir:
|
||||
logger.plain("Adding layer \"%s\" (%s) to conf/bblayers.conf" % (subdir, layerdir))
|
||||
logger.plain("Adding layer \"%s\" to conf/bblayers.conf" % subdir)
|
||||
else:
|
||||
logger.plain("Adding layer \"%s\" (%s) to conf/bblayers.conf" % (name, layerdir))
|
||||
localargs.layerdir.append(layerdir)
|
||||
logger.plain("Adding layer \"%s\" to conf/bblayers.conf" % name)
|
||||
localargs = argparse.Namespace()
|
||||
localargs.layerdir = layerdir
|
||||
self.do_add_layer(localargs)
|
||||
else:
|
||||
break
|
||||
|
||||
if localargs.layerdir:
|
||||
self.do_add_layer(localargs)
|
||||
|
||||
def do_layerindex_show_depends(self, args):
|
||||
"""Find layer dependencies from layer index.
|
||||
"""
|
||||
@@ -197,12 +259,12 @@ class LayerIndexPlugin(ActionPlugin):
|
||||
self.do_layerindex_fetch(args)
|
||||
|
||||
def register_commands(self, sp):
|
||||
parser_layerindex_fetch = self.add_command(sp, 'layerindex-fetch', self.do_layerindex_fetch, parserecipes=False)
|
||||
parser_layerindex_fetch = self.add_command(sp, 'layerindex-fetch', self.do_layerindex_fetch)
|
||||
parser_layerindex_fetch.add_argument('-n', '--show-only', help='show dependencies and do nothing else', action='store_true')
|
||||
parser_layerindex_fetch.add_argument('-b', '--branch', help='branch name to fetch')
|
||||
parser_layerindex_fetch.add_argument('-b', '--branch', help='branch name to fetch (default %(default)s)', default='master')
|
||||
parser_layerindex_fetch.add_argument('-i', '--ignore', help='assume the specified layers do not need to be fetched/added (separate multiple layers with commas, no spaces)', metavar='LAYER')
|
||||
parser_layerindex_fetch.add_argument('layername', nargs='+', help='layer to fetch')
|
||||
|
||||
parser_layerindex_show_depends = self.add_command(sp, 'layerindex-show-depends', self.do_layerindex_show_depends, parserecipes=False)
|
||||
parser_layerindex_show_depends.add_argument('-b', '--branch', help='branch name to fetch')
|
||||
parser_layerindex_show_depends = self.add_command(sp, 'layerindex-show-depends', self.do_layerindex_show_depends)
|
||||
parser_layerindex_show_depends.add_argument('-b', '--branch', help='branch name to fetch (default %(default)s)', default='master')
|
||||
parser_layerindex_show_depends.add_argument('layername', nargs='+', help='layer to query')
|
||||
|
||||
@@ -5,6 +5,8 @@ import sys
|
||||
import os
|
||||
import re
|
||||
|
||||
import bb.cache
|
||||
import bb.providers
|
||||
import bb.utils
|
||||
|
||||
from bblayers.common import LayerPlugin
|
||||
@@ -60,7 +62,7 @@ are overlayed will also be listed, with a " (skipped)" suffix.
|
||||
# factor - however, each layer.conf is free to either prepend or append to
|
||||
# BBPATH (or indeed do crazy stuff with it). Thus the order in BBPATH might
|
||||
# not be exactly the order present in bblayers.conf either.
|
||||
bbpath = str(self.tinfoil.config_data.getVar('BBPATH'))
|
||||
bbpath = str(self.tinfoil.config_data.getVar('BBPATH', True))
|
||||
overlayed_class_found = False
|
||||
for (classfile, classdirs) in classes.items():
|
||||
if len(classdirs) > 1:
|
||||
@@ -112,7 +114,7 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
|
||||
|
||||
def list_recipes(self, title, pnspec, show_overlayed_only, show_same_ver_only, show_filenames, show_multi_provider_only, inherits):
|
||||
if inherits:
|
||||
bbpath = str(self.tinfoil.config_data.getVar('BBPATH'))
|
||||
bbpath = str(self.tinfoil.config_data.getVar('BBPATH', True))
|
||||
for classname in inherits:
|
||||
classfile = 'classes/%s.bbclass' % classname
|
||||
if not bb.utils.which(bbpath, classfile, history=False):
|
||||
@@ -120,13 +122,15 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
|
||||
sys.exit(1)
|
||||
|
||||
pkg_pn = self.tinfoil.cooker.recipecaches[''].pkg_pn
|
||||
(latest_versions, preferred_versions) = self.tinfoil.find_providers()
|
||||
allproviders = self.tinfoil.get_all_providers()
|
||||
(latest_versions, preferred_versions) = bb.providers.findProviders(self.tinfoil.config_data, self.tinfoil.cooker.recipecaches[''], pkg_pn)
|
||||
allproviders = bb.providers.allProviders(self.tinfoil.cooker.recipecaches[''])
|
||||
|
||||
# Ensure we list skipped recipes
|
||||
# We are largely guessing about PN, PV and the preferred version here,
|
||||
# but we have no choice since skipped recipes are not fully parsed
|
||||
skiplist = list(self.tinfoil.cooker.skiplist.keys())
|
||||
skiplist.sort( key=lambda fileitem: self.tinfoil.cooker.collection.calc_bbfile_priority(fileitem) )
|
||||
skiplist.reverse()
|
||||
for fn in skiplist:
|
||||
recipe_parts = os.path.splitext(os.path.basename(fn))[0].split('_')
|
||||
p = recipe_parts[0]
|
||||
@@ -154,19 +158,14 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
|
||||
logger.plain("%s:", pn)
|
||||
logger.plain(" %s %s%s", layer.ljust(20), ver, skipped)
|
||||
|
||||
global_inherit = (self.tinfoil.config_data.getVar('INHERIT') or "").split()
|
||||
global_inherit = (self.tinfoil.config_data.getVar('INHERIT', True) or "").split()
|
||||
cls_re = re.compile('classes/')
|
||||
|
||||
preffiles = []
|
||||
items_listed = False
|
||||
for p in sorted(pkg_pn):
|
||||
if pnspec:
|
||||
found=False
|
||||
for pnm in pnspec:
|
||||
if fnmatch.fnmatch(p, pnm):
|
||||
found=True
|
||||
break
|
||||
if not found:
|
||||
if not fnmatch.fnmatch(p, pnspec):
|
||||
continue
|
||||
|
||||
if len(allproviders[p]) > 1 or not show_multi_provider_only:
|
||||
@@ -247,28 +246,17 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
|
||||
|
||||
Lists recipes with the bbappends that apply to them as subitems.
|
||||
"""
|
||||
if args.pnspec:
|
||||
logger.plain('=== Matched appended recipes ===')
|
||||
else:
|
||||
logger.plain('=== Appended recipes ===')
|
||||
|
||||
logger.plain('=== Appended recipes ===')
|
||||
|
||||
pnlist = list(self.tinfoil.cooker_data.pkg_pn.keys())
|
||||
pnlist.sort()
|
||||
appends = False
|
||||
for pn in pnlist:
|
||||
if args.pnspec:
|
||||
found=False
|
||||
for pnm in args.pnspec:
|
||||
if fnmatch.fnmatch(pn, pnm):
|
||||
found=True
|
||||
break
|
||||
if not found:
|
||||
continue
|
||||
|
||||
if self.show_appends_for_pn(pn):
|
||||
appends = True
|
||||
|
||||
if not args.pnspec and self.show_appends_for_skipped():
|
||||
if self.show_appends_for_skipped():
|
||||
appends = True
|
||||
|
||||
if not appends:
|
||||
@@ -277,7 +265,10 @@ Lists recipes with the bbappends that apply to them as subitems.
|
||||
def show_appends_for_pn(self, pn):
|
||||
filenames = self.tinfoil.cooker_data.pkg_pn[pn]
|
||||
|
||||
best = self.tinfoil.find_best_provider(pn)
|
||||
best = bb.providers.findBestProvider(pn,
|
||||
self.tinfoil.config_data,
|
||||
self.tinfoil.cooker_data,
|
||||
self.tinfoil.cooker_data.pkg_pn)
|
||||
best_filename = os.path.basename(best[3])
|
||||
|
||||
return self.show_appends_output(filenames, best_filename)
|
||||
@@ -328,12 +319,12 @@ NOTE: .bbappend files can impact the dependencies.
|
||||
ignore_layers = (args.ignore or '').split(',')
|
||||
|
||||
pkg_fn = self.tinfoil.cooker_data.pkg_fn
|
||||
bbpath = str(self.tinfoil.config_data.getVar('BBPATH'))
|
||||
bbpath = str(self.tinfoil.config_data.getVar('BBPATH', True))
|
||||
self.require_re = re.compile(r"require\s+(.+)")
|
||||
self.include_re = re.compile(r"include\s+(.+)")
|
||||
self.inherit_re = re.compile(r"inherit\s+(.+)")
|
||||
|
||||
global_inherit = (self.tinfoil.config_data.getVar('INHERIT') or "").split()
|
||||
global_inherit = (self.tinfoil.config_data.getVar('INHERIT', True) or "").split()
|
||||
|
||||
# The bb's DEPENDS and RDEPENDS
|
||||
for f in pkg_fn:
|
||||
@@ -345,7 +336,10 @@ NOTE: .bbappend files can impact the dependencies.
|
||||
deps = self.tinfoil.cooker_data.deps[f]
|
||||
for pn in deps:
|
||||
if pn in self.tinfoil.cooker_data.pkg_pn:
|
||||
best = self.tinfoil.find_best_provider(pn)
|
||||
best = bb.providers.findBestProvider(pn,
|
||||
self.tinfoil.config_data,
|
||||
self.tinfoil.cooker_data,
|
||||
self.tinfoil.cooker_data.pkg_pn)
|
||||
self.check_cross_depends("DEPENDS", layername, f, best[3], args.filenames, ignore_layers)
|
||||
|
||||
# The RDPENDS
|
||||
@@ -358,11 +352,14 @@ NOTE: .bbappend files can impact the dependencies.
|
||||
sorted_rdeps[k2] = 1
|
||||
all_rdeps = sorted_rdeps.keys()
|
||||
for rdep in all_rdeps:
|
||||
all_p, best = self.tinfoil.get_runtime_providers(rdep)
|
||||
all_p = bb.providers.getRuntimeProviders(self.tinfoil.cooker_data, rdep)
|
||||
if all_p:
|
||||
if f in all_p:
|
||||
# The recipe provides this one itself, ignore
|
||||
continue
|
||||
best = bb.providers.filterProvidersRunTime(all_p, rdep,
|
||||
self.tinfoil.config_data,
|
||||
self.tinfoil.cooker_data)[0][0]
|
||||
self.check_cross_depends("RDEPENDS", layername, f, best, args.filenames, ignore_layers)
|
||||
|
||||
# The RRECOMMENDS
|
||||
@@ -375,11 +372,14 @@ NOTE: .bbappend files can impact the dependencies.
|
||||
sorted_rrecs[k2] = 1
|
||||
all_rrecs = sorted_rrecs.keys()
|
||||
for rrec in all_rrecs:
|
||||
all_p, best = self.tinfoil.get_runtime_providers(rrec)
|
||||
all_p = bb.providers.getRuntimeProviders(self.tinfoil.cooker_data, rrec)
|
||||
if all_p:
|
||||
if f in all_p:
|
||||
# The recipe provides this one itself, ignore
|
||||
continue
|
||||
best = bb.providers.filterProvidersRunTime(all_p, rrec,
|
||||
self.tinfoil.config_data,
|
||||
self.tinfoil.cooker_data)[0][0]
|
||||
self.check_cross_depends("RRECOMMENDS", layername, f, best, args.filenames, ignore_layers)
|
||||
|
||||
# The inherit class
|
||||
@@ -490,11 +490,10 @@ NOTE: .bbappend files can impact the dependencies.
|
||||
parser_show_recipes = self.add_command(sp, 'show-recipes', self.do_show_recipes)
|
||||
parser_show_recipes.add_argument('-f', '--filenames', help='instead of the default formatting, list filenames of higher priority recipes with the ones they overlay indented underneath', action='store_true')
|
||||
parser_show_recipes.add_argument('-m', '--multiple', help='only list where multiple recipes (in the same layer or different layers) exist for the same recipe name', action='store_true')
|
||||
parser_show_recipes.add_argument('-i', '--inherits', help='only list recipes that inherit the named class(es) - separate multiple classes using , (without spaces)', metavar='CLASS', default='')
|
||||
parser_show_recipes.add_argument('pnspec', nargs='*', help='optional recipe name specification (wildcards allowed, enclose in quotes to avoid shell expansion)')
|
||||
parser_show_recipes.add_argument('-i', '--inherits', help='only list recipes that inherit the named class', metavar='CLASS', default='')
|
||||
parser_show_recipes.add_argument('pnspec', nargs='?', help='optional recipe name specification (wildcards allowed, enclose in quotes to avoid shell expansion)')
|
||||
|
||||
parser_show_appends = self.add_command(sp, 'show-appends', self.do_show_appends)
|
||||
parser_show_appends.add_argument('pnspec', nargs='*', help='optional recipe name specification (wildcards allowed, enclose in quotes to avoid shell expansion)')
|
||||
self.add_command(sp, 'show-appends', self.do_show_appends)
|
||||
|
||||
parser_show_cross_depends = self.add_command(sp, 'show-cross-depends', self.do_show_cross_depends)
|
||||
parser_show_cross_depends.add_argument('-f', '--filenames', help='show full file path', action='store_true')
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
The layerindexlib module is designed to permit programs to work directly
|
||||
with layer index information. (See layers.openembedded.org...)
|
||||
|
||||
The layerindexlib module includes a plugin interface that is used to extend
|
||||
the basic functionality. There are two primary plugins available: restapi
|
||||
and cooker.
|
||||
|
||||
The restapi plugin works with a web based REST Api compatible with the
|
||||
layerindex-web project, as well as the ability to store and retried a
|
||||
the information for one or more files on the disk.
|
||||
|
||||
The cooker plugin works by reading the information from the current build
|
||||
project and processing it as if it were a layer index.
|
||||
|
||||
|
||||
TODO:
|
||||
|
||||
__init__.py:
|
||||
Implement local on-disk caching (using the rest api store/load)
|
||||
Implement layer index style query operations on a combined index
|
||||
|
||||
common.py:
|
||||
Stop network access if BB_NO_NETWORK or allowed hosts is restricted
|
||||
|
||||
cooker.py:
|
||||
Cooker - Implement recipe parsing
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user