mirror of
https://git.yoctoproject.org/poky
synced 2026-02-20 16:39:40 +01:00
Compare commits
380 Commits
warrior-21
...
morty-16.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
60402978fe | ||
|
|
666c8a87b2 | ||
|
|
f764659f54 | ||
|
|
f8dd325b85 | ||
|
|
d47a5f1a15 | ||
|
|
17597d2213 | ||
|
|
38613fdacf | ||
|
|
767caa9cf3 | ||
|
|
0255195de4 | ||
|
|
2a70e84643 | ||
|
|
5224bf7c72 | ||
|
|
1d063ba492 | ||
|
|
895229c183 | ||
|
|
dac0411384 | ||
|
|
cd79140864 | ||
|
|
341726ffdb | ||
|
|
07e5111828 | ||
|
|
7cc37c5390 | ||
|
|
5e4adbd780 | ||
|
|
81f8a454d0 | ||
|
|
0f33bfad25 | ||
|
|
e45e0dbcca | ||
|
|
0ec430131e | ||
|
|
d5b5efafb4 | ||
|
|
40a97fa9a7 | ||
|
|
17fa797283 | ||
|
|
2d80c47b18 | ||
|
|
e6955b7d0d | ||
|
|
5b2a7393f9 | ||
|
|
a746a4c171 | ||
|
|
b8987f1a23 | ||
|
|
fa4a5024fc | ||
|
|
4f064564fd | ||
|
|
da6dd6f9fb | ||
|
|
c35d3a6c5e | ||
|
|
a5dfa90ea8 | ||
|
|
bd631d76b8 | ||
|
|
2c638f46ba | ||
|
|
f85376290f | ||
|
|
72084f7513 | ||
|
|
5f7d84821f | ||
|
|
1188ce3e3f | ||
|
|
5d6e240db6 | ||
|
|
c7e48934c9 | ||
|
|
6131129c0a | ||
|
|
63eab150b2 | ||
|
|
af43028b3e | ||
|
|
e6a1c94eef | ||
|
|
604b2b2c6c | ||
|
|
0d91032b84 | ||
|
|
4812871780 | ||
|
|
88ad6d975e | ||
|
|
7d5822bf4c | ||
|
|
2e132efa2f | ||
|
|
bbe268e060 | ||
|
|
fd7f2d644a | ||
|
|
d4b41223d3 | ||
|
|
a992a31803 | ||
|
|
b4cf48ecee | ||
|
|
cf781e2f9b | ||
|
|
e707865a6a | ||
|
|
47fc2d6139 | ||
|
|
8a96509017 | ||
|
|
4a48386e91 | ||
|
|
3643da7804 | ||
|
|
61b729a3a8 | ||
|
|
f7c6d8da56 | ||
|
|
a55be63437 | ||
|
|
7ff5544bf3 | ||
|
|
96bdd99f36 | ||
|
|
b6a1004c73 | ||
|
|
21aa71dbaa | ||
|
|
d05941ae45 | ||
|
|
e92165f5ce | ||
|
|
48a8c893ad | ||
|
|
078915fbf5 | ||
|
|
3e5a09491d | ||
|
|
8bbf2e6903 | ||
|
|
c2ade72828 | ||
|
|
00966fd90d | ||
|
|
e7f948f28a | ||
|
|
78890ea227 | ||
|
|
1fb5079557 | ||
|
|
0c5ab30a30 | ||
|
|
3b12b433c0 | ||
|
|
01c4b5499d | ||
|
|
1e26fcb8bb | ||
|
|
b37b775e77 | ||
|
|
c90540df8b | ||
|
|
293288ef1c | ||
|
|
5b46dfb4fe | ||
|
|
ec212b2220 | ||
|
|
22cd02a09d | ||
|
|
18ae18f6ea | ||
|
|
4eb24dc6ac | ||
|
|
15e0b3178f | ||
|
|
e309d144d7 | ||
|
|
c69d0d3756 | ||
|
|
4b4f7aee6d | ||
|
|
8f829dfb12 | ||
|
|
2de92f2768 | ||
|
|
094b64ea8b | ||
|
|
254336d09b | ||
|
|
27258d249e | ||
|
|
1693b28967 | ||
|
|
5476c5d9d8 | ||
|
|
7391de7bf6 | ||
|
|
017b38fc78 | ||
|
|
be54ac0ebd | ||
|
|
e9c681777b | ||
|
|
a8500d9c75 | ||
|
|
46b3cc9d8d | ||
|
|
53b19e7e7f | ||
|
|
3305872894 | ||
|
|
8a2eb1a75e | ||
|
|
7549ae82cd | ||
|
|
99c6c878ee | ||
|
|
6c3d10ea62 | ||
|
|
7c0a184f8b | ||
|
|
ae95c1e7cb | ||
|
|
458b9e6949 | ||
|
|
b5fe7ef2ed | ||
|
|
561ff936f1 | ||
|
|
c567b4e058 | ||
|
|
fe4703c473 | ||
|
|
c7fc10a5f6 | ||
|
|
af70b781ae | ||
|
|
bd2009ecfc | ||
|
|
b6e0d7c99f | ||
|
|
30166f042d | ||
|
|
924e576b89 | ||
|
|
e292e935b0 | ||
|
|
555d776785 | ||
|
|
ca020278b3 | ||
|
|
ca66b08b3b | ||
|
|
c8daf50729 | ||
|
|
6c08cf277a | ||
|
|
35c95af7a8 | ||
|
|
5e874ac0b1 | ||
|
|
fd78c35086 | ||
|
|
1038cb98e6 | ||
|
|
8ab3fa3758 | ||
|
|
b0f3c0fb99 | ||
|
|
6a1f33cc40 | ||
|
|
8ba5b9eae3 | ||
|
|
a2f06ef254 | ||
|
|
eed433faba | ||
|
|
4f991d93f6 | ||
|
|
b45822fe66 | ||
|
|
7cf454e23c | ||
|
|
5bdf7c980b | ||
|
|
1371bb4c43 | ||
|
|
e732dfb8df | ||
|
|
62175510dc | ||
|
|
f2ff6a40c4 | ||
|
|
28b0f21342 | ||
|
|
6e43936704 | ||
|
|
314c727daa | ||
|
|
a3fa5ce876 | ||
|
|
fd30939f5f | ||
|
|
e0b862b68c | ||
|
|
4b8ddc4322 | ||
|
|
ae1e127566 | ||
|
|
a9e1475efc | ||
|
|
25032be4a3 | ||
|
|
1b3d0e5168 | ||
|
|
0a94b71878 | ||
|
|
1e51b7173a | ||
|
|
5068f2c960 | ||
|
|
ba8fc212de | ||
|
|
ac6773117a | ||
|
|
13d9371e40 | ||
|
|
13783cce0b | ||
|
|
e0d8921bf0 | ||
|
|
61d424fbb3 | ||
|
|
b50444c90e | ||
|
|
11cc61020e | ||
|
|
a81ec4c00f | ||
|
|
84b3a5ac35 | ||
|
|
7bab6ffc45 | ||
|
|
6b49da40e7 | ||
|
|
4bcf8babc4 | ||
|
|
ba5ee79aba | ||
|
|
f94e71c40a | ||
|
|
30259219ca | ||
|
|
86f91f680a | ||
|
|
e171e9a088 | ||
|
|
a411abf823 | ||
|
|
6a3f93a377 | ||
|
|
b4f432e390 | ||
|
|
6f65543778 | ||
|
|
124df4ee35 | ||
|
|
6c5a52ca8f | ||
|
|
51e2f226bd | ||
|
|
8108c47740 | ||
|
|
c8f4fb15de | ||
|
|
359189b6e6 | ||
|
|
5ba779d5ab | ||
|
|
41e8df8228 | ||
|
|
baf73313b3 | ||
|
|
985beaaa13 | ||
|
|
c5489cb14a | ||
|
|
80d90725f5 | ||
|
|
389d0a02d7 | ||
|
|
ea584f108c | ||
|
|
29184b22d7 | ||
|
|
0533d4e8ec | ||
|
|
2cfd1582df | ||
|
|
6011f5658d | ||
|
|
b9879ad3f9 | ||
|
|
1c8b9df38c | ||
|
|
1933b492ca | ||
|
|
da3332f2e2 | ||
|
|
50cf219c3a | ||
|
|
c3d06886b7 | ||
|
|
5580c222b4 | ||
|
|
a17d78b33c | ||
|
|
521b01ab7c | ||
|
|
87e5e492bc | ||
|
|
9db1372e45 | ||
|
|
d33274cd75 | ||
|
|
b3484ab7f6 | ||
|
|
45c87f4b4b | ||
|
|
b6df490f47 | ||
|
|
5b0b9943f5 | ||
|
|
2d74e26228 | ||
|
|
1dcc0ab902 | ||
|
|
e8b00a62b1 | ||
|
|
4dd5ede472 | ||
|
|
7760427c61 | ||
|
|
5d2c218f57 | ||
|
|
b6d7bc53e2 | ||
|
|
3c6b603bc7 | ||
|
|
ef7ccf837b | ||
|
|
56fb4af388 | ||
|
|
587b9ecd37 | ||
|
|
443ce32b63 | ||
|
|
9a36dcdb9b | ||
|
|
1ca43bb304 | ||
|
|
bbc7130507 | ||
|
|
745517685b | ||
|
|
ed9e0b8aaf | ||
|
|
926f3dfe2d | ||
|
|
4073438491 | ||
|
|
6fe39199a4 | ||
|
|
3f0c6a8f5c | ||
|
|
2fa2739850 | ||
|
|
d87772571f | ||
|
|
cebbc4372a | ||
|
|
14f514d93c | ||
|
|
793092d1a1 | ||
|
|
903c4158a5 | ||
|
|
468b78967a | ||
|
|
93b13d9995 | ||
|
|
67965302e2 | ||
|
|
71c8568b38 | ||
|
|
3a96984d3a | ||
|
|
44a13f115d | ||
|
|
eee2e545d0 | ||
|
|
35207b387f | ||
|
|
a871504b70 | ||
|
|
c3be679a54 | ||
|
|
02bfc6a0dc | ||
|
|
6a25e7a5fd | ||
|
|
da9ceffa9e | ||
|
|
7033e66e9f | ||
|
|
3c9a34aaea | ||
|
|
91ec543003 | ||
|
|
d6f478d9df | ||
|
|
0a25fbfdb4 | ||
|
|
0c5596e5d3 | ||
|
|
25cd713c44 | ||
|
|
abcb751f02 | ||
|
|
ebbaae9005 | ||
|
|
545a4d2e56 | ||
|
|
8325c93df9 | ||
|
|
cb2967921e | ||
|
|
460bbf456b | ||
|
|
8d1afc2a0d | ||
|
|
31d4000b15 | ||
|
|
a469b6efc6 | ||
|
|
6d57ce3fa3 | ||
|
|
c852c9352f | ||
|
|
c0526ab007 | ||
|
|
1a118330c3 | ||
|
|
53ad465b40 | ||
|
|
2336faa9aa | ||
|
|
a4c7f45b8b | ||
|
|
6b736febdc | ||
|
|
4c1020aad6 | ||
|
|
afaef2018f | ||
|
|
724fa5a489 | ||
|
|
101173d6cf | ||
|
|
b0b646ab3c | ||
|
|
7dc792da69 | ||
|
|
5aa481dfed | ||
|
|
872d4bccc8 | ||
|
|
d9e3f7629b | ||
|
|
2a6bba4378 | ||
|
|
e56cd9d7d0 | ||
|
|
73454473d7 | ||
|
|
ee6fb7521b | ||
|
|
6f33cfec73 | ||
|
|
e706bc85a7 | ||
|
|
0de5f9472d | ||
|
|
047790941e | ||
|
|
0a22517801 | ||
|
|
38366d30fc | ||
|
|
71f907cbe1 | ||
|
|
604a71f7c0 | ||
|
|
56238f3676 | ||
|
|
feb145f6ce | ||
|
|
8472ed0c89 | ||
|
|
a34582a85e | ||
|
|
f17446b645 | ||
|
|
aea27a0554 | ||
|
|
350df1de9c | ||
|
|
350a850d44 | ||
|
|
87d51bd79b | ||
|
|
d16356eb26 | ||
|
|
a531ca4636 | ||
|
|
8d1d9cfdb5 | ||
|
|
c2fd90f245 | ||
|
|
0572d73d11 | ||
|
|
1bad6c2d73 | ||
|
|
f068277f90 | ||
|
|
ff95390bee | ||
|
|
19bee74728 | ||
|
|
849863fdaf | ||
|
|
ac8981c01d | ||
|
|
3081194a9f | ||
|
|
b6335e4251 | ||
|
|
274d77860f | ||
|
|
8d2e24810e | ||
|
|
a368478133 | ||
|
|
77bc68c7ac | ||
|
|
4708f7af51 | ||
|
|
17a550b9de | ||
|
|
30cdca66c5 | ||
|
|
54e8fa5060 | ||
|
|
9600da51e4 | ||
|
|
ee6ff5ce83 | ||
|
|
7a308a7919 | ||
|
|
6a1ef8a965 | ||
|
|
95a10d1dfc | ||
|
|
914e89709c | ||
|
|
f33e739fc2 | ||
|
|
719a8dcd0d | ||
|
|
d94c22d4e4 | ||
|
|
ee2a19d38e | ||
|
|
57bcb0b509 | ||
|
|
698bbcad68 | ||
|
|
25fa4e9e8f | ||
|
|
3d471b811d | ||
|
|
8b84b31dc2 | ||
|
|
2e6624293c | ||
|
|
8c69cdacea | ||
|
|
b814b6df0b | ||
|
|
26e8ebc511 | ||
|
|
874ccd1bb3 | ||
|
|
a628a17a44 | ||
|
|
4d30681e8b | ||
|
|
4722bfbda9 | ||
|
|
6c9f6b5f70 | ||
|
|
dc8508f609 | ||
|
|
bf5dd36042 | ||
|
|
746c681be4 | ||
|
|
73aa36e3ee | ||
|
|
cddb7f10b8 | ||
|
|
399903724b | ||
|
|
e09163a08b | ||
|
|
2c0efd2f33 | ||
|
|
fb1df184b9 | ||
|
|
e127d017e1 | ||
|
|
0915ee7dc3 | ||
|
|
7c3cdf8a17 | ||
|
|
9ae4ab56e7 | ||
|
|
e40a8d739a | ||
|
|
5e0d6341ab | ||
|
|
41e74881b0 |
11
.gitignore
vendored
11
.gitignore
vendored
@@ -1,7 +1,6 @@
|
||||
*.pyc
|
||||
*.pyo
|
||||
/*.patch
|
||||
/.repo/
|
||||
/build*/
|
||||
pyshtables.py
|
||||
pstage/
|
||||
@@ -19,13 +18,9 @@ hob-image-*.bb
|
||||
!meta-yocto
|
||||
!meta-yocto-bsp
|
||||
!meta-yocto-imported
|
||||
/documentation/*/eclipse/
|
||||
/documentation/*/*.html
|
||||
/documentation/*/*.pdf
|
||||
/documentation/*/*.tgz
|
||||
/bitbake/doc/bitbake-user-manual/bitbake-user-manual.html
|
||||
/bitbake/doc/bitbake-user-manual/bitbake-user-manual.pdf
|
||||
/bitbake/doc/bitbake-user-manual/bitbake-user-manual.tgz
|
||||
documentation/user-manual/user-manual.html
|
||||
documentation/user-manual/user-manual.pdf
|
||||
documentation/user-manual/user-manual.tgz
|
||||
pull-*/
|
||||
bitbake/lib/toaster/contrib/tts/backlog.txt
|
||||
bitbake/lib/toaster/contrib/tts/log/*
|
||||
|
||||
58
README
Normal file
58
README
Normal file
@@ -0,0 +1,58 @@
|
||||
Poky
|
||||
====
|
||||
|
||||
Poky is an integration of various components to form a complete prepackaged
|
||||
build system and development environment. It features support for building
|
||||
customised embedded device style images. There are reference demo images
|
||||
featuring a X11/Matchbox/GTK themed UI called Sato. The system supports
|
||||
cross-architecture application development using QEMU emulation and a
|
||||
standalone toolchain and SDK with IDE integration.
|
||||
|
||||
Additional information on the specifics of hardware that Poky supports
|
||||
is available in README.hardware. Further hardware support can easily be added
|
||||
in the form of layers which extend the systems capabilities in a modular way.
|
||||
|
||||
As an integration layer Poky consists of several upstream projects such as
|
||||
BitBake, OpenEmbedded-Core, Yocto documentation and various sources of information
|
||||
e.g. for the hardware support. Poky is in turn a component of the Yocto Project.
|
||||
|
||||
The Yocto Project has extensive documentation about the system including a
|
||||
reference manual which can be found at:
|
||||
http://yoctoproject.org/documentation
|
||||
|
||||
OpenEmbedded-Core is a layer containing the core metadata for current versions
|
||||
of OpenEmbedded. It is distro-less (can build a functional image with
|
||||
DISTRO = "nodistro") and contains only emulated machine support.
|
||||
|
||||
For information about OpenEmbedded, see the OpenEmbedded website:
|
||||
http://www.openembedded.org/
|
||||
|
||||
Where to Send Patches
|
||||
=====================
|
||||
|
||||
As Poky is an integration repository (built using a tool called combo-layer),
|
||||
patches against the various components should be sent to their respective
|
||||
upstreams:
|
||||
|
||||
bitbake:
|
||||
Git repository: http://git.openembedded.org/bitbake/
|
||||
Mailing list: bitbake-devel@lists.openembedded.org
|
||||
|
||||
documentation:
|
||||
Git repository: http://git.yoctoproject.org/cgit/cgit.cgi/yocto-docs/
|
||||
Mailing list: yocto@yoctoproject.org
|
||||
|
||||
meta-poky, meta-yocto-bsp:
|
||||
Git repository: http://git.yoctoproject.org/cgit/cgit.cgi/meta-yocto(-bsp)
|
||||
Mailing list: poky@yoctoproject.org
|
||||
|
||||
Everything else should be sent to the OpenEmbedded Core mailing list. If in
|
||||
doubt, check the oe-core git repository for the content you intend to modify.
|
||||
Before sending, be sure the patches apply cleanly to the current oe-core git
|
||||
repository.
|
||||
|
||||
Git repository: http://git.openembedded.org/openembedded-core/
|
||||
Mailing list: openembedded-core@lists.openembedded.org
|
||||
|
||||
Note: The scripts directory should be treated with extra care as it is a mix of
|
||||
oe-core and poky-specific files.
|
||||
26
README.LSB
26
README.LSB
@@ -1,26 +0,0 @@
|
||||
OE-Core aims to be able to provide basic LSB compatible images. There
|
||||
are some challenges for OE as LSB isn't always 100% relevant to its
|
||||
target embedded and IoT audiences.
|
||||
|
||||
One challenge is that the LSB spec is no longer being actively
|
||||
developed [https://github.com/LinuxStandardBase/lsb] and has
|
||||
components which are end of life or significantly dated. OE
|
||||
therefore provides compatibility with the following caveats:
|
||||
|
||||
* Qt4 is provided by the separate meta-qt4 layer. Its noted that Qt4
|
||||
is end of life and this isn't something the core project regularly
|
||||
tests any longer. Users are recommended to group together to support
|
||||
maintenance of that layer. [http://git.yoctoproject.org/cgit/cgit.cgi/meta-qt4/]
|
||||
|
||||
* mailx has been dropped since its no longer being developed upstream
|
||||
and there are better, more modern replacements such as s-nail
|
||||
(http://sdaoden.eu/code.html) or mailutils (http://mailutils.org/).
|
||||
|
||||
* A few perl modules that were required by LSB 4.x aren't provided:
|
||||
libclass-isa, libenv, libdumpvalue, libfile-checktree,
|
||||
libi18n-collate, libpod-plainer.
|
||||
|
||||
* libpng 1.2 isn't provided; oe-core includes the latest release of libpng
|
||||
instead.
|
||||
|
||||
* pax (POSIX standard archive) tool is not provided.
|
||||
@@ -1,29 +0,0 @@
|
||||
OpenEmbedded-Core
|
||||
=================
|
||||
|
||||
OpenEmbedded-Core is a layer containing the core metadata for current versions
|
||||
of OpenEmbedded. It is distro-less (can build a functional image with
|
||||
DISTRO = "nodistro") and contains only emulated machine support.
|
||||
|
||||
For information about OpenEmbedded, see the OpenEmbedded website:
|
||||
http://www.openembedded.org/
|
||||
|
||||
The Yocto Project has extensive documentation about OE including a reference manual
|
||||
which can be found at:
|
||||
http://yoctoproject.org/documentation
|
||||
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to
|
||||
http://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
for guidelines on how to submit patches.
|
||||
|
||||
Mailing list:
|
||||
|
||||
http://lists.openembedded.org/mailman/listinfo/openembedded-core
|
||||
|
||||
Source code:
|
||||
|
||||
http://git.openembedded.org/openembedded-core/
|
||||
@@ -1 +0,0 @@
|
||||
meta-yocto-bsp/README.hardware
|
||||
360
README.hardware
Normal file
360
README.hardware
Normal file
@@ -0,0 +1,360 @@
|
||||
Poky Hardware README
|
||||
====================
|
||||
|
||||
This file gives details about using Poky with the reference machines
|
||||
supported out of the box. A full list of supported reference target machines
|
||||
can be found by looking in the following directories:
|
||||
|
||||
meta/conf/machine/
|
||||
meta-yocto-bsp/conf/machine/
|
||||
|
||||
If you are in doubt about using Poky/OpenEmbedded with your hardware, consult
|
||||
the documentation for your board/device.
|
||||
|
||||
Support for additional devices is normally added by creating BSP layers - for
|
||||
more information please see the Yocto Board Support Package (BSP) Developer's
|
||||
Guide - documentation source is in documentation/bspguide or download the PDF
|
||||
from:
|
||||
|
||||
http://yoctoproject.org/documentation
|
||||
|
||||
Support for physical reference hardware has now been split out into a
|
||||
meta-yocto-bsp layer which can be removed separately from other layers if not
|
||||
needed.
|
||||
|
||||
|
||||
QEMU Emulation Targets
|
||||
======================
|
||||
|
||||
To simplify development, the build system supports building images to
|
||||
work with the QEMU emulator in system emulation mode. Several architectures
|
||||
are currently supported:
|
||||
|
||||
* ARM (qemuarm)
|
||||
* x86 (qemux86)
|
||||
* x86-64 (qemux86-64)
|
||||
* PowerPC (qemuppc)
|
||||
* MIPS (qemumips)
|
||||
|
||||
Use of the QEMU images is covered in the Yocto Project Reference Manual.
|
||||
The appropriate MACHINE variable value corresponding to the target is given
|
||||
in brackets.
|
||||
|
||||
|
||||
Hardware Reference Boards
|
||||
=========================
|
||||
|
||||
The following boards are supported by the meta-yocto-bsp layer:
|
||||
|
||||
* Texas Instruments Beaglebone (beaglebone)
|
||||
* Freescale MPC8315E-RDB (mpc8315e-rdb)
|
||||
|
||||
For more information see the board's section below. The appropriate MACHINE
|
||||
variable value corresponding to the board is given in brackets.
|
||||
|
||||
Reference Board Maintenance
|
||||
===========================
|
||||
|
||||
Send pull requests, patches, comments or questions about meta-yocto-bsps to poky@yoctoproject.org
|
||||
|
||||
Maintainers: Kevin Hao <kexin.hao@windriver.com>
|
||||
Bruce Ashfield <bruce.ashfield@windriver.com>
|
||||
|
||||
Consumer Devices
|
||||
================
|
||||
|
||||
The following consumer devices are supported by the meta-yocto-bsp layer:
|
||||
|
||||
* Intel x86 based PCs and devices (genericx86)
|
||||
* Ubiquiti Networks EdgeRouter Lite (edgerouter)
|
||||
|
||||
For more information see the device's section below. The appropriate MACHINE
|
||||
variable value corresponding to the device is given in brackets.
|
||||
|
||||
|
||||
|
||||
Specific Hardware Documentation
|
||||
===============================
|
||||
|
||||
|
||||
Intel x86 based PCs and devices (genericx86*)
|
||||
=============================================
|
||||
|
||||
The genericx86 and genericx86-64 MACHINE are tested on the following platforms:
|
||||
|
||||
Intel Xeon/Core i-Series:
|
||||
+ Intel NUC5 Series - ix-52xx Series SOC (Broadwell)
|
||||
+ Intel NUC6 Series - ix-62xx Series SOC (Skylake)
|
||||
+ Intel Shumway Xeon Server
|
||||
|
||||
Intel Atom platforms:
|
||||
+ MinnowBoard MAX - E3825 SOC (Bay Trail)
|
||||
+ MinnowBoard MAX - Turbot (ADI Engineering) - E3826 SOC (Bay Trail)
|
||||
- These boards can be either 32bot or 64bit modes depending on firmware
|
||||
- See minnowboard.org for details
|
||||
+ Intel Braswell SOC
|
||||
|
||||
and is likely to work on many unlisted Atom/Core/Xeon based devices. The MACHINE
|
||||
type supports ethernet, wifi, sound, and Intel/vesa graphics by default in
|
||||
addition to common PC input devices, busses, and so on.
|
||||
|
||||
Depending on the device, it can boot from a traditional hard-disk, a USB device,
|
||||
or over the network. Writing generated images to physical media is
|
||||
straightforward with a caveat for USB devices. The following examples assume the
|
||||
target boot device is /dev/sdb, be sure to verify this and use the correct
|
||||
device as the following commands are run as root and are not reversable.
|
||||
|
||||
USB Device:
|
||||
1. Build a live image. This image type consists of a simple filesystem
|
||||
without a partition table, which is suitable for USB keys, and with the
|
||||
default setup for the genericx86 machine, this image type is built
|
||||
automatically for any image you build. For example:
|
||||
|
||||
$ bitbake core-image-minimal
|
||||
|
||||
2. Use the "dd" utility to write the image to the raw block device. For
|
||||
example:
|
||||
|
||||
# dd if=core-image-minimal-genericx86.hddimg of=/dev/sdb
|
||||
|
||||
If the device fails to boot with "Boot error" displayed, or apparently
|
||||
stops just after the SYSLINUX version banner, it is likely the BIOS cannot
|
||||
understand the physical layout of the disk (or rather it expects a
|
||||
particular layout and cannot handle anything else). There are two possible
|
||||
solutions to this problem:
|
||||
|
||||
1. Change the BIOS USB Device setting to HDD mode. The label will vary by
|
||||
device, but the idea is to force BIOS to read the Cylinder/Head/Sector
|
||||
geometry from the device.
|
||||
|
||||
2. Use a ".wic" image with an EFI partition
|
||||
|
||||
a) With a default grub-efi bootloader:
|
||||
# dd if=core-image-minimal-genericx86-64.wic of=/dev/sdb
|
||||
|
||||
b) Use systemd-boot instead
|
||||
- Build an image with EFI_PROVIDER="systemd-boot" then use the above
|
||||
dd command to write the image to a USB stick.
|
||||
|
||||
|
||||
Texas Instruments Beaglebone (beaglebone)
|
||||
=========================================
|
||||
|
||||
The Beaglebone is an ARM Cortex-A8 development board with USB, Ethernet, 2D/3D
|
||||
accelerated graphics, audio, serial, JTAG, and SD/MMC. The Black adds a faster
|
||||
CPU, more RAM, eMMC flash and a micro HDMI port. The beaglebone MACHINE is
|
||||
tested on the following platforms:
|
||||
|
||||
o Beaglebone Black A6
|
||||
o Beaglebone A6 (the original "White" model)
|
||||
|
||||
The Beaglebone Black has eMMC, while the White does not. Pressing the USER/BOOT
|
||||
button when powering on will temporarily change the boot order. But for the sake
|
||||
of simplicity, these instructions assume you have erased the eMMC on the Black,
|
||||
so its boot behavior matches that of the White and boots off of SD card. To do
|
||||
this, issue the following commands from the u-boot prompt:
|
||||
|
||||
# mmc dev 1
|
||||
# mmc erase 0 512
|
||||
|
||||
To further tailor these instructions for your board, please refer to the
|
||||
documentation at http://www.beagleboard.org/bone and http://www.beagleboard.org/black
|
||||
|
||||
From a Linux system with access to the image files perform the following steps:
|
||||
|
||||
1. Build an image. For example:
|
||||
|
||||
$ bitbake core-image-minimal
|
||||
|
||||
2. Use the "dd" utility to write the image to the SD card. For example:
|
||||
|
||||
# dd core-image-minimal-beaglebone.wic of=/dev/sdb
|
||||
|
||||
3. Insert the SD card into the Beaglebone and boot the board.
|
||||
|
||||
Freescale MPC8315E-RDB (mpc8315e-rdb)
|
||||
=====================================
|
||||
|
||||
The MPC8315 PowerPC reference platform (MPC8315E-RDB) is aimed at hardware and
|
||||
software development of network attached storage (NAS) and digital media server
|
||||
applications. The MPC8315E-RDB features the PowerQUICC II Pro processor, which
|
||||
includes a built-in security accelerator.
|
||||
|
||||
(Note: you may find it easier to order MPC8315E-RDBA; this appears to be the
|
||||
same board in an enclosure with accessories. In any case it is fully
|
||||
compatible with the instructions given here.)
|
||||
|
||||
Setup instructions
|
||||
------------------
|
||||
|
||||
You will need the following:
|
||||
* NFS root setup on your workstation
|
||||
* TFTP server installed on your workstation
|
||||
* Straight-thru 9-conductor serial cable (DB9, M/F) connected from your
|
||||
PC to UART1
|
||||
* Ethernet connected to the first ethernet port on the board
|
||||
|
||||
--- Preparation ---
|
||||
|
||||
Note: if you have altered your board's ethernet MAC address(es) from the
|
||||
defaults, or you need to do so because you want multiple boards on the same
|
||||
network, then you will need to change the values in the dts file (patch
|
||||
linux/arch/powerpc/boot/dts/mpc8315erdb.dts within the kernel source). If
|
||||
you have left them at the factory default then you shouldn't need to do
|
||||
anything here.
|
||||
|
||||
--- Booting from NFS root ---
|
||||
|
||||
Load the kernel and dtb (device tree blob), and boot the system as follows:
|
||||
|
||||
1. Get the kernel (uImage-mpc8315e-rdb.bin) and dtb (uImage-mpc8315e-rdb.dtb)
|
||||
files from the tmp/deploy directory, and make them available on your TFTP
|
||||
server.
|
||||
|
||||
2. Connect the board's first serial port to your workstation and then start up
|
||||
your favourite serial terminal so that you will be able to interact with
|
||||
the serial console. If you don't have a favourite, picocom is suggested:
|
||||
|
||||
$ picocom /dev/ttyUSB0 -b 115200
|
||||
|
||||
3. Power up or reset the board and press a key on the terminal when prompted
|
||||
to get to the U-Boot command line
|
||||
|
||||
4. Set up the environment in U-Boot:
|
||||
|
||||
=> setenv ipaddr <board ip>
|
||||
=> setenv serverip <tftp server ip>
|
||||
=> setenv bootargs root=/dev/nfs rw nfsroot=<nfsroot ip>:<rootfs path> ip=<board ip>:<server ip>:<gateway ip>:255.255.255.0:mpc8315e:eth0:off console=ttyS0,115200
|
||||
|
||||
5. Download the kernel and dtb, and boot:
|
||||
|
||||
=> tftp 1000000 uImage-mpc8315e-rdb.bin
|
||||
=> tftp 2000000 uImage-mpc8315e-rdb.dtb
|
||||
=> bootm 1000000 - 2000000
|
||||
|
||||
--- Booting from JFFS2 root ---
|
||||
|
||||
1. First boot the board with NFS root.
|
||||
|
||||
2. Erase the MTD partition which will be used as root:
|
||||
|
||||
$ flash_eraseall /dev/mtd3
|
||||
|
||||
3. Copy the JFFS2 image to the MTD partition:
|
||||
|
||||
$ flashcp core-image-minimal-mpc8315e-rdb.jffs2 /dev/mtd3
|
||||
|
||||
4. Then reboot the board and set up the environment in U-Boot:
|
||||
|
||||
=> setenv bootargs root=/dev/mtdblock3 rootfstype=jffs2 console=ttyS0,115200
|
||||
|
||||
|
||||
Ubiquiti Networks EdgeRouter Lite (edgerouter)
|
||||
==============================================
|
||||
|
||||
The EdgeRouter Lite is part of the EdgeMax series. It is a MIPS64 router
|
||||
(based on the Cavium Octeon processor) with 512MB of RAM, which uses an
|
||||
internal USB pendrive for storage.
|
||||
|
||||
Setup instructions
|
||||
------------------
|
||||
|
||||
You will need the following:
|
||||
* RJ45 -> serial ("rollover") cable connected from your PC to the CONSOLE
|
||||
port on the device
|
||||
* Ethernet connected to the first ethernet port on the board
|
||||
|
||||
If using NFS as part of the setup process, you will also need:
|
||||
* NFS root setup on your workstation
|
||||
* TFTP server installed on your workstation (if fetching the kernel from
|
||||
TFTP, see below).
|
||||
|
||||
--- Preparation ---
|
||||
|
||||
Build an image (e.g. core-image-minimal) using "edgerouter" as the MACHINE.
|
||||
In the following instruction it is based on core-image-minimal. Another target
|
||||
may be similiar with it.
|
||||
|
||||
--- Booting from NFS root / kernel via TFTP ---
|
||||
|
||||
Load the kernel, and boot the system as follows:
|
||||
|
||||
1. Get the kernel (vmlinux) file from the tmp/deploy/images/edgerouter
|
||||
directory, and make them available on your TFTP server.
|
||||
|
||||
2. Connect the board's first serial port to your workstation and then start up
|
||||
your favourite serial terminal so that you will be able to interact with
|
||||
the serial console. If you don't have a favourite, picocom is suggested:
|
||||
|
||||
$ picocom /dev/ttyS0 -b 115200
|
||||
|
||||
3. Power up or reset the board and press a key on the terminal when prompted
|
||||
to get to the U-Boot command line
|
||||
|
||||
4. Set up the environment in U-Boot:
|
||||
|
||||
=> setenv ipaddr <board ip>
|
||||
=> setenv serverip <tftp server ip>
|
||||
|
||||
5. Download the kernel and boot:
|
||||
|
||||
=> tftp tftp $loadaddr vmlinux
|
||||
=> bootoctlinux $loadaddr coremask=0x3 root=/dev/nfs rw nfsroot=<nfsroot ip>:<rootfs path> ip=<board ip>:<server ip>:<gateway ip>:<netmask>:edgerouter:eth0:off mtdparts=phys_mapped_flash:512k(boot0),512k(boot1),64k@3072k(eeprom)
|
||||
|
||||
--- Booting from USB disk ---
|
||||
|
||||
To boot from the USB disk, you either need to remove it from the edgerouter
|
||||
box and populate it from another computer, or use a previously booted NFS
|
||||
image and populate from the edgerouter itself.
|
||||
|
||||
Type 1: Use partitioned image
|
||||
-----------------------------
|
||||
|
||||
Steps:
|
||||
|
||||
1. Remove the USB disk from the edgerouter and insert it into a computer
|
||||
that has access to your build artifacts.
|
||||
|
||||
2. Flash the image.
|
||||
|
||||
# dd core-image-minimal-edgerouter.wic of=/dev/sdb
|
||||
|
||||
3. Insert USB disk into the edgerouter and boot it.
|
||||
|
||||
Type 2: NFS
|
||||
-----------
|
||||
|
||||
Note: If you place the kernel on the ext3 partition, you must re-create the
|
||||
ext3 filesystem, since the factory u-boot can only handle 128 byte inodes and
|
||||
cannot read the partition otherwise.
|
||||
|
||||
These boot instructions assume that you have recreated the ext3 filesystem with
|
||||
128 byte inodes, you have an updated uboot or you are running and image capable
|
||||
of making the filesystem on the board itself.
|
||||
|
||||
|
||||
1. Boot from NFS root
|
||||
|
||||
2. Mount the USB disk partition 2 and then extract the contents of
|
||||
tmp/deploy/core-image-XXXX.tar.bz2 into it.
|
||||
|
||||
Before starting, copy core-image-minimal-xxx.tar.bz2 and vmlinux into
|
||||
rootfs path on your workstation.
|
||||
|
||||
and then,
|
||||
|
||||
# mount /dev/sda2 /media/sda2
|
||||
# tar -xvjpf core-image-minimal-XXX.tar.bz2 -C /media/sda2
|
||||
# cp vmlinux /media/sda2/boot/vmlinux
|
||||
# umount /media/sda2
|
||||
# reboot
|
||||
|
||||
3. Reboot the board and press a key on the terminal when prompted to get to the U-Boot
|
||||
command line:
|
||||
|
||||
# reboot
|
||||
|
||||
4. Load the kernel and boot:
|
||||
|
||||
=> ext2load usb 0:2 $loadaddr boot/vmlinux
|
||||
=> bootoctlinux $loadaddr coremask=0x3 root=/dev/sda2 rw rootwait mtdparts=phys_mapped_flash:512k(boot0),512k(boot1),64k@3072k(eeprom)
|
||||
@@ -1 +0,0 @@
|
||||
meta-poky/README.poky
|
||||
15
README.qemu
15
README.qemu
@@ -1,15 +0,0 @@
|
||||
QEMU Emulation Targets
|
||||
======================
|
||||
|
||||
To simplify development, the build system supports building images to
|
||||
work with the QEMU emulator in system emulation mode. Several architectures
|
||||
are currently supported in 32 and 64 bit variants:
|
||||
|
||||
* ARM (qemuarm + qemuarm64)
|
||||
* x86 (qemux86 + qemux86-64)
|
||||
* PowerPC (qemuppc only)
|
||||
* MIPS (qemumips + qemumips64)
|
||||
|
||||
Use of the QEMU images is covered in the Yocto Project Reference Manual.
|
||||
The appropriate MACHINE variable value corresponding to the target is given
|
||||
in brackets.
|
||||
2
bitbake/.gitattributes
vendored
2
bitbake/.gitattributes
vendored
@@ -1,2 +0,0 @@
|
||||
*min.js binary
|
||||
*min.css binary
|
||||
@@ -15,5 +15,3 @@ Foundation and individual contributors.
|
||||
* QUnit is redistributed under the MIT license.
|
||||
|
||||
* Font Awesome fonts redistributed under the SIL Open Font License 1.1
|
||||
|
||||
* simplediff is distributed under the zlib license.
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
Bitbake
|
||||
=======
|
||||
|
||||
BitBake is a generic task execution engine that allows shell and Python tasks to be run
|
||||
efficiently and in parallel while working within complex inter-task dependency constraints.
|
||||
One of BitBake's main users, OpenEmbedded, takes this core and builds embedded Linux software
|
||||
stacks using a task-oriented approach.
|
||||
|
||||
For information about Bitbake, see the OpenEmbedded website:
|
||||
http://www.openembedded.org/
|
||||
|
||||
Bitbake plain documentation can be found under the doc directory or its integrated
|
||||
html version at the Yocto Project website:
|
||||
http://yoctoproject.org/documentation
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to
|
||||
http://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
for guidelines on how to submit patches, just note that the latter documentation is intended
|
||||
for OpenEmbedded (and its core) not bitbake patches (bitbake-devel@lists.openembedded.org)
|
||||
but in general main guidelines apply. Once the commit(s) have been created, the way to send
|
||||
the patch is through git-send-email. For example, to send the last commit (HEAD) on current
|
||||
branch, type:
|
||||
|
||||
git send-email -M -1 --to bitbake-devel@lists.openembedded.org
|
||||
|
||||
Mailing list:
|
||||
|
||||
http://lists.openembedded.org/mailman/listinfo/bitbake-devel
|
||||
|
||||
Source code:
|
||||
|
||||
http://git.openembedded.org/bitbake/
|
||||
@@ -36,9 +36,9 @@ from bb import cookerdata
|
||||
from bb.main import bitbake_main, BitBakeConfigParameters, BBMainException
|
||||
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
sys.exit("Please use a locale setting which supports utf-8.\nPython can't change the filesystem locale after loading so we need a utf-8 when python starts or things won't work.")
|
||||
|
||||
__version__ = "1.42.0"
|
||||
__version__ = "1.32.0"
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __version__ != bb.__version__:
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# bitbake-diffsigs / bitbake-dumpsig
|
||||
# BitBake task signature data dump and comparison utility
|
||||
# bitbake-diffsigs
|
||||
# BitBake task signature data comparison utility
|
||||
#
|
||||
# Copyright (C) 2012-2013, 2017 Intel Corporation
|
||||
# Copyright (C) 2012-2013 Intel Corporation
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
@@ -21,7 +21,8 @@
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
import argparse
|
||||
import fnmatch
|
||||
import optparse
|
||||
import logging
|
||||
import pickle
|
||||
|
||||
@@ -29,180 +30,109 @@ sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), '
|
||||
|
||||
import bb.tinfoil
|
||||
import bb.siggen
|
||||
import bb.msg
|
||||
|
||||
myname = os.path.basename(sys.argv[0])
|
||||
logger = bb.msg.logger_create(myname)
|
||||
def logger_create(name, output=sys.stderr):
|
||||
logger = logging.getLogger(name)
|
||||
console = logging.StreamHandler(output)
|
||||
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
if output.isatty():
|
||||
format.enable_color()
|
||||
console.setFormatter(format)
|
||||
logger.addHandler(console)
|
||||
logger.setLevel(logging.INFO)
|
||||
return logger
|
||||
|
||||
is_dump = myname == 'bitbake-dumpsig'
|
||||
logger = logger_create('bitbake-diffsigs')
|
||||
|
||||
def find_siginfo(tinfoil, pn, taskname, sigs=None):
|
||||
result = None
|
||||
tinfoil.set_event_mask(['bb.event.FindSigInfoResult',
|
||||
'logging.LogRecord',
|
||||
'bb.command.CommandCompleted',
|
||||
'bb.command.CommandFailed'])
|
||||
ret = tinfoil.run_command('findSigInfo', pn, taskname, sigs)
|
||||
if ret:
|
||||
while True:
|
||||
event = tinfoil.wait_event(1)
|
||||
if event:
|
||||
if isinstance(event, bb.command.CommandCompleted):
|
||||
break
|
||||
elif isinstance(event, bb.command.CommandFailed):
|
||||
logger.error(str(event))
|
||||
sys.exit(2)
|
||||
elif isinstance(event, bb.event.FindSigInfoResult):
|
||||
result = event.result
|
||||
elif isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
else:
|
||||
logger.error('No result returned from findSigInfo command')
|
||||
sys.exit(2)
|
||||
return result
|
||||
def find_compare_task(bbhandler, pn, taskname):
|
||||
""" Find the most recent signature files for the specified PN/task and compare them """
|
||||
|
||||
def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
|
||||
""" Find the most recent signature files for the specified PN/task """
|
||||
def get_hashval(siginfo):
|
||||
if siginfo.endswith('.siginfo'):
|
||||
return siginfo.rpartition(':')[2].partition('_')[0]
|
||||
else:
|
||||
return siginfo.rpartition('.')[2]
|
||||
|
||||
if not hasattr(bb.siggen, 'find_siginfo'):
|
||||
logger.error('Metadata does not support finding signature data files')
|
||||
sys.exit(1)
|
||||
|
||||
if not taskname.startswith('do_'):
|
||||
taskname = 'do_%s' % taskname
|
||||
|
||||
if sig1 and sig2:
|
||||
sigfiles = find_siginfo(bbhandler, pn, taskname, [sig1, sig2])
|
||||
if len(sigfiles) == 0:
|
||||
logger.error('No sigdata files found matching %s %s matching either %s or %s' % (pn, taskname, sig1, sig2))
|
||||
sys.exit(1)
|
||||
elif not sig1 in sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig1))
|
||||
sys.exit(1)
|
||||
elif not sig2 in sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig2))
|
||||
sys.exit(1)
|
||||
latestfiles = [sigfiles[sig1], sigfiles[sig2]]
|
||||
filedates = bb.siggen.find_siginfo(pn, taskname, None, bbhandler.config_data)
|
||||
latestfiles = sorted(filedates.keys(), key=lambda f: filedates[f])[-3:]
|
||||
if not latestfiles:
|
||||
logger.error('No sigdata files found matching %s %s' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
elif len(latestfiles) < 2:
|
||||
logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
else:
|
||||
filedates = find_siginfo(bbhandler, pn, taskname)
|
||||
latestfiles = sorted(filedates.keys(), key=lambda f: filedates[f])[-2:]
|
||||
if not latestfiles:
|
||||
logger.error('No sigdata files found matching %s %s' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
# It's possible that latestfiles contain 3 elements and the first two have the same hash value.
|
||||
# In this case, we delete the second element.
|
||||
# The above case is actually the most common one. Because we may have sigdata file and siginfo
|
||||
# file having the same hash value. Comparing such two files makes no sense.
|
||||
if len(latestfiles) == 3:
|
||||
hash0 = get_hashval(latestfiles[0])
|
||||
hash1 = get_hashval(latestfiles[1])
|
||||
if hash0 == hash1:
|
||||
latestfiles.pop(1)
|
||||
|
||||
return latestfiles
|
||||
# Define recursion callback
|
||||
def recursecb(key, hash1, hash2):
|
||||
hashes = [hash1, hash2]
|
||||
hashfiles = bb.siggen.find_siginfo(key, None, hashes, bbhandler.config_data)
|
||||
|
||||
recout = []
|
||||
if len(hashfiles) == 2:
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
|
||||
recout.extend(list(' ' + l for l in out2))
|
||||
else:
|
||||
recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
|
||||
|
||||
return recout
|
||||
|
||||
# Recurse into signature comparison
|
||||
output = bb.siggen.compare_sigfiles(latestfiles[0], latestfiles[1], recursecb)
|
||||
if output:
|
||||
print('\n'.join(output))
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
# Define recursion callback
|
||||
def recursecb(key, hash1, hash2):
|
||||
hashes = [hash1, hash2]
|
||||
hashfiles = find_siginfo(tinfoil, key, None, hashes)
|
||||
|
||||
recout = []
|
||||
if len(hashfiles) == 0:
|
||||
recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
|
||||
elif not hash1 in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash1))
|
||||
elif not hash2 in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash2))
|
||||
else:
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, color=color)
|
||||
for change in out2:
|
||||
for line in change.splitlines():
|
||||
recout.append(' ' + line)
|
||||
parser = optparse.OptionParser(
|
||||
description = "Compares siginfo/sigdata files written out by BitBake",
|
||||
usage = """
|
||||
%prog -t recipename taskname
|
||||
%prog sigdatafile1 sigdatafile2
|
||||
%prog sigdatafile1""")
|
||||
|
||||
return recout
|
||||
parser.add_option("-t", "--task",
|
||||
help = "find the signature data files for last two runs of the specified task and compare them",
|
||||
action="store", dest="taskargs", nargs=2, metavar='recipename taskname')
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description=("Dumps" if is_dump else "Compares") + " siginfo/sigdata files written out by BitBake")
|
||||
|
||||
parser.add_argument('-D', '--debug',
|
||||
help='Enable debug output',
|
||||
action='store_true')
|
||||
|
||||
if is_dump:
|
||||
parser.add_argument("-t", "--task",
|
||||
help="find the signature data file for the last run of the specified task",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
|
||||
parser.add_argument("sigdatafile1",
|
||||
help="Signature file to dump. Not used when using -t/--task.",
|
||||
action="store", nargs='?', metavar="sigdatafile")
|
||||
else:
|
||||
parser.add_argument('-c', '--color',
|
||||
help='Colorize the output (where %(metavar)s is %(choices)s)',
|
||||
choices=['auto', 'always', 'never'], default='auto', metavar='color')
|
||||
|
||||
parser.add_argument('-d', '--dump',
|
||||
help='Dump the last signature data instead of comparing (equivalent to using bitbake-dumpsig)',
|
||||
action='store_true')
|
||||
|
||||
parser.add_argument("-t", "--task",
|
||||
help="find the signature data files for the last two runs of the specified task and compare them",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
|
||||
parser.add_argument("-s", "--signature",
|
||||
help="With -t/--task, specify the signatures to look for instead of taking the last two",
|
||||
action="store", dest="sigargs", nargs=2, metavar=('fromsig', 'tosig'))
|
||||
|
||||
parser.add_argument("sigdatafile1",
|
||||
help="First signature file to compare (or signature file to dump, if second not specified). Not used when using -t/--task.",
|
||||
action="store", nargs='?')
|
||||
|
||||
parser.add_argument("sigdatafile2",
|
||||
help="Second signature file to compare",
|
||||
action="store", nargs='?')
|
||||
|
||||
options = parser.parse_args()
|
||||
if is_dump:
|
||||
options.color = 'never'
|
||||
options.dump = True
|
||||
options.sigdatafile2 = None
|
||||
options.sigargs = None
|
||||
|
||||
if options.debug:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
color = (options.color == 'always' or (options.color == 'auto' and sys.stdout.isatty()))
|
||||
options, args = parser.parse_args(sys.argv)
|
||||
|
||||
if options.taskargs:
|
||||
with bb.tinfoil.Tinfoil() as tinfoil:
|
||||
tinfoil.prepare(config_only=True)
|
||||
if not options.dump and options.sigargs:
|
||||
files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1], options.sigargs[0], options.sigargs[1])
|
||||
else:
|
||||
files = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1])
|
||||
|
||||
if options.dump:
|
||||
logger.debug("Signature file: %s" % files[-1])
|
||||
output = bb.siggen.dump_sigfile(files[-1])
|
||||
else:
|
||||
if len(files) < 2:
|
||||
logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (options.taskargs[0], options.taskargs[1]))
|
||||
sys.exit(1)
|
||||
|
||||
# Recurse into signature comparison
|
||||
logger.debug("Signature file (previous): %s" % files[-2])
|
||||
logger.debug("Signature file (latest): %s" % files[-1])
|
||||
output = bb.siggen.compare_sigfiles(files[-2], files[-1], recursecb, color=color)
|
||||
find_compare_task(tinfoil, options.taskargs[0], options.taskargs[1])
|
||||
else:
|
||||
if options.sigargs:
|
||||
logger.error('-s/--signature can only be used together with -t/--task')
|
||||
sys.exit(1)
|
||||
try:
|
||||
if not options.dump and options.sigdatafile1 and options.sigdatafile2:
|
||||
with bb.tinfoil.Tinfoil() as tinfoil:
|
||||
tinfoil.prepare(config_only=True)
|
||||
output = bb.siggen.compare_sigfiles(options.sigdatafile1, options.sigdatafile2, recursecb, color=color)
|
||||
elif options.sigdatafile1:
|
||||
output = bb.siggen.dump_sigfile(options.sigdatafile1)
|
||||
else:
|
||||
logger.error('Must specify signature file(s) or -t/--task')
|
||||
parser.print_help()
|
||||
if len(args) == 1:
|
||||
parser.print_help()
|
||||
else:
|
||||
try:
|
||||
if len(args) == 2:
|
||||
output = bb.siggen.dump_sigfile(sys.argv[1])
|
||||
else:
|
||||
output = bb.siggen.compare_sigfiles(sys.argv[1], sys.argv[2])
|
||||
except IOError as e:
|
||||
logger.error(str(e))
|
||||
sys.exit(1)
|
||||
except (pickle.UnpicklingError, EOFError):
|
||||
logger.error('Invalid signature data - ensure you are specifying sigdata/siginfo files')
|
||||
sys.exit(1)
|
||||
except IOError as e:
|
||||
logger.error(str(e))
|
||||
sys.exit(1)
|
||||
except (pickle.UnpicklingError, EOFError):
|
||||
logger.error('Invalid signature data - ensure you are specifying sigdata/siginfo files')
|
||||
sys.exit(1)
|
||||
|
||||
if output:
|
||||
print('\n'.join(output))
|
||||
if output:
|
||||
print('\n'.join(output))
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
bitbake-diffsigs
|
||||
65
bitbake/bin/bitbake-dumpsig
Executable file
65
bitbake/bin/bitbake-dumpsig
Executable file
@@ -0,0 +1,65 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# bitbake-dumpsig
|
||||
# BitBake task signature dump utility
|
||||
#
|
||||
# Copyright (C) 2013 Intel Corporation
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
import optparse
|
||||
import logging
|
||||
import pickle
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
|
||||
import bb.siggen
|
||||
|
||||
def logger_create(name, output=sys.stderr):
|
||||
logger = logging.getLogger(name)
|
||||
console = logging.StreamHandler(output)
|
||||
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
if output.isatty():
|
||||
format.enable_color()
|
||||
console.setFormatter(format)
|
||||
logger.addHandler(console)
|
||||
logger.setLevel(logging.INFO)
|
||||
return logger
|
||||
|
||||
logger = logger_create('bitbake-dumpsig')
|
||||
|
||||
parser = optparse.OptionParser(
|
||||
description = "Dumps siginfo/sigdata files written out by BitBake",
|
||||
usage = """
|
||||
%prog sigdatafile""")
|
||||
|
||||
options, args = parser.parse_args(sys.argv)
|
||||
|
||||
if len(args) == 1:
|
||||
parser.print_help()
|
||||
else:
|
||||
try:
|
||||
output = bb.siggen.dump_sigfile(args[1])
|
||||
except IOError as e:
|
||||
logger.error(str(e))
|
||||
sys.exit(1)
|
||||
except (pickle.UnpicklingError, EOFError):
|
||||
logger.error('Invalid signature data - ensure you are specifying a sigdata/siginfo file')
|
||||
sys.exit(1)
|
||||
|
||||
if output:
|
||||
print('\n'.join(output))
|
||||
@@ -1,67 +0,0 @@
|
||||
#! /usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2018 Garmin Ltd.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import argparse
|
||||
import sqlite3
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),'lib'))
|
||||
|
||||
import hashserv
|
||||
|
||||
VERSION = "1.0.0"
|
||||
|
||||
DEFAULT_HOST = ''
|
||||
DEFAULT_PORT = 8686
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='HTTP Equivalence Reference Server. Version=%s' % VERSION)
|
||||
parser.add_argument('--address', default=DEFAULT_HOST, help='Bind address (default "%(default)s")')
|
||||
parser.add_argument('--port', type=int, default=DEFAULT_PORT, help='Bind port (default %(default)d)')
|
||||
parser.add_argument('--prefix', default='', help='HTTP path prefix (default "%(default)s")')
|
||||
parser.add_argument('--database', default='./hashserv.db', help='Database file (default "%(default)s")')
|
||||
parser.add_argument('--log', default='WARNING', help='Set logging level')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logger = logging.getLogger('hashserv')
|
||||
|
||||
level = getattr(logging, args.log.upper(), None)
|
||||
if not isinstance(level, int):
|
||||
raise ValueError('Invalid log level: %s' % args.log)
|
||||
|
||||
logger.setLevel(level)
|
||||
console = logging.StreamHandler()
|
||||
console.setLevel(level)
|
||||
logger.addHandler(console)
|
||||
|
||||
db = sqlite3.connect(args.database)
|
||||
|
||||
server = hashserv.create_server((args.address, args.port), db, args.prefix)
|
||||
server.serve_forever()
|
||||
return 0
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
ret = main()
|
||||
except Exception:
|
||||
ret = 1
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(ret)
|
||||
|
||||
@@ -24,26 +24,49 @@ import logging
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import signal
|
||||
|
||||
bindir = os.path.dirname(__file__)
|
||||
topdir = os.path.dirname(bindir)
|
||||
sys.path[0:0] = [os.path.join(topdir, 'lib')]
|
||||
|
||||
import bb.tinfoil
|
||||
import bb.msg
|
||||
|
||||
logger = bb.msg.logger_create('bitbake-layers', sys.stdout)
|
||||
|
||||
def tinfoil_init(parserecipes):
|
||||
import bb.tinfoil
|
||||
tinfoil = bb.tinfoil.Tinfoil(tracking=True)
|
||||
tinfoil.prepare(not parserecipes)
|
||||
tinfoil.logger.setLevel(logger.getEffectiveLevel())
|
||||
return tinfoil
|
||||
|
||||
|
||||
def logger_create(name, output=sys.stderr):
|
||||
logger = logging.getLogger(name)
|
||||
loggerhandler = logging.StreamHandler(output)
|
||||
loggerhandler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
|
||||
logger.addHandler(loggerhandler)
|
||||
logger.setLevel(logging.INFO)
|
||||
return logger
|
||||
|
||||
def logger_setup_color(logger, color='auto'):
|
||||
from bb.msg import BBLogFormatter
|
||||
console = logging.StreamHandler(sys.stdout)
|
||||
formatter = BBLogFormatter("%(levelname)s: %(message)s")
|
||||
console.setFormatter(formatter)
|
||||
logger.handlers = [console]
|
||||
if color == 'always' or (color == 'auto' and console.stream.isatty()):
|
||||
formatter.enable_color()
|
||||
|
||||
|
||||
logger = logger_create('bitbake-layers', sys.stdout)
|
||||
|
||||
def main():
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||
parser = argparse.ArgumentParser(
|
||||
description="BitBake layers utility",
|
||||
epilog="Use %(prog)s <subcommand> --help to get help on a specific command",
|
||||
add_help=False)
|
||||
parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
|
||||
parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
|
||||
parser.add_argument('-F', '--force', help='Force add without recipe parse verification', action='store_true')
|
||||
parser.add_argument('--color', choices=['auto', 'always', 'never'], default='auto', help='Colorize output (where %(metavar)s is %(choices)s)', metavar='COLOR')
|
||||
|
||||
global_args, unparsed_args = parser.parse_known_args()
|
||||
@@ -60,17 +83,13 @@ def main():
|
||||
elif global_args.quiet:
|
||||
logger.setLevel(logging.ERROR)
|
||||
|
||||
# Need to re-run logger_create with color argument
|
||||
# (will be the same logger since it has the same name)
|
||||
bb.msg.logger_create('bitbake-layers', output=sys.stdout, color=global_args.color)
|
||||
logger_setup_color(logger, global_args.color)
|
||||
|
||||
plugins = []
|
||||
tinfoil = bb.tinfoil.Tinfoil(tracking=True)
|
||||
tinfoil.logger.setLevel(logger.getEffectiveLevel())
|
||||
tinfoil = tinfoil_init(False)
|
||||
try:
|
||||
tinfoil.prepare(True)
|
||||
for path in ([topdir] +
|
||||
tinfoil.config_data.getVar('BBPATH').split(':')):
|
||||
tinfoil.config_data.getVar('BBPATH', True).split(':')):
|
||||
pluginpath = os.path.join(path, 'lib', 'bblayers')
|
||||
bb.utils.load_plugins(logger, plugins, pluginpath)
|
||||
|
||||
@@ -90,7 +109,7 @@ def main():
|
||||
|
||||
if getattr(args, 'parserecipes', False):
|
||||
tinfoil.config_data.disableTracking()
|
||||
tinfoil.parse_recipes()
|
||||
tinfoil.parseRecipes()
|
||||
tinfoil.config_data.enableTracking()
|
||||
|
||||
return args.func(args)
|
||||
|
||||
@@ -22,24 +22,15 @@ sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib
|
||||
import unittest
|
||||
try:
|
||||
import bb
|
||||
import hashserv
|
||||
import layerindexlib
|
||||
except RuntimeError as exc:
|
||||
sys.exit(str(exc))
|
||||
|
||||
tests = ["bb.tests.codeparser",
|
||||
"bb.tests.cooker",
|
||||
"bb.tests.cow",
|
||||
"bb.tests.data",
|
||||
"bb.tests.event",
|
||||
"bb.tests.fetch",
|
||||
"bb.tests.parse",
|
||||
"bb.tests.persist_data",
|
||||
"bb.tests.utils",
|
||||
"hashserv.tests",
|
||||
"layerindexlib.tests.layerindexobj",
|
||||
"layerindexlib.tests.restapi",
|
||||
"layerindexlib.tests.cooker"]
|
||||
"bb.tests.utils"]
|
||||
|
||||
for t in tests:
|
||||
t = '.'.join(t.split('.')[:3])
|
||||
|
||||
@@ -17,7 +17,7 @@ from multiprocessing import Lock
|
||||
from threading import Thread
|
||||
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
sys.exit("Please use a locale setting which supports utf-8.\nPython can't change the filesystem locale after loading so we need a utf-8 when python starts or things won't work.")
|
||||
|
||||
# Users shouldn't be running this code directly
|
||||
if len(sys.argv) != 2 or not sys.argv[1].startswith("decafbad"):
|
||||
@@ -95,7 +95,6 @@ def worker_flush(worker_queue):
|
||||
pass
|
||||
while (worker_queue_int or not worker_queue.empty()):
|
||||
try:
|
||||
(_, ready, _) = select.select([], [worker_pipe], [], 1)
|
||||
if not worker_queue.empty():
|
||||
worker_queue_int = worker_queue_int + worker_queue.get()
|
||||
written = os.write(worker_pipe, worker_queue_int)
|
||||
@@ -136,7 +135,7 @@ def sigterm_handler(signum, frame):
|
||||
os.killpg(0, signal.SIGTERM)
|
||||
sys.exit()
|
||||
|
||||
def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskhash, unihash, appends, taskdepdata, extraconfigdata, quieterrors=False, dry_run_exec=False):
|
||||
def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, appends, taskdepdata, quieterrors=False):
|
||||
# We need to setup the environment BEFORE the fork, since
|
||||
# a fork() or exec*() activates PSEUDO...
|
||||
|
||||
@@ -152,10 +151,8 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskha
|
||||
except TypeError:
|
||||
umask = taskdep['umask'][taskname]
|
||||
|
||||
dry_run = cfg.dry_run or dry_run_exec
|
||||
|
||||
# We can't use the fakeroot environment in a dry run as it possibly hasn't been built
|
||||
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not dry_run:
|
||||
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not cfg.dry_run:
|
||||
envvars = (workerdata["fakerootenv"][fn] or "").split()
|
||||
for key, value in (var.split('=') for var in envvars):
|
||||
envbackup[key] = os.environ.get(key)
|
||||
@@ -222,22 +219,16 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskha
|
||||
the_data = databuilder.mcdata[mc]
|
||||
the_data.setVar("BB_WORKERCONTEXT", "1")
|
||||
the_data.setVar("BB_TASKDEPDATA", taskdepdata)
|
||||
if cfg.limited_deps:
|
||||
the_data.setVar("BB_LIMITEDDEPS", "1")
|
||||
the_data.setVar("BUILDNAME", workerdata["buildname"])
|
||||
the_data.setVar("DATE", workerdata["date"])
|
||||
the_data.setVar("TIME", workerdata["time"])
|
||||
for varname, value in extraconfigdata.items():
|
||||
the_data.setVar(varname, value)
|
||||
|
||||
bb.parse.siggen.set_taskdata(workerdata["sigdata"])
|
||||
ret = 0
|
||||
|
||||
the_data = bb_cache.loadDataFull(fn, appends)
|
||||
the_data.setVar('BB_TASKHASH', taskhash)
|
||||
the_data.setVar('BB_UNIHASH', unihash)
|
||||
the_data.setVar('BB_TASKHASH', workerdata["runq_hash"][task])
|
||||
|
||||
bb.utils.set_process_name("%s:%s" % (the_data.getVar("PN"), taskname.replace("do_", "")))
|
||||
bb.utils.set_process_name("%s:%s" % (the_data.getVar("PN", True), taskname.replace("do_", "")))
|
||||
|
||||
# exported_vars() returns a generator which *cannot* be passed to os.environ.update()
|
||||
# successfully. We also need to unset anything from the environment which shouldn't be there
|
||||
@@ -252,11 +243,11 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskha
|
||||
the_data.setVar(e, fakeenv[e])
|
||||
the_data.setVarFlag(e, 'export', "1")
|
||||
|
||||
task_exports = the_data.getVarFlag(taskname, 'exports')
|
||||
task_exports = the_data.getVarFlag(taskname, 'exports', True)
|
||||
if task_exports:
|
||||
for e in task_exports.split():
|
||||
the_data.setVarFlag(e, 'export', '1')
|
||||
v = the_data.getVar(e)
|
||||
v = the_data.getVar(e, True)
|
||||
if v is not None:
|
||||
os.environ[e] = v
|
||||
|
||||
@@ -268,7 +259,7 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, taskha
|
||||
logger.critical(traceback.format_exc())
|
||||
os._exit(1)
|
||||
try:
|
||||
if dry_run:
|
||||
if cfg.dry_run:
|
||||
return 0
|
||||
return bb.build.exec_task(fn, taskname, the_data, cfg.profile)
|
||||
except:
|
||||
@@ -337,7 +328,6 @@ class BitbakeWorker(object):
|
||||
self.cookercfg = None
|
||||
self.databuilder = None
|
||||
self.data = None
|
||||
self.extraconfigdata = None
|
||||
self.build_pids = {}
|
||||
self.build_pipes = {}
|
||||
|
||||
@@ -372,7 +362,6 @@ class BitbakeWorker(object):
|
||||
pass
|
||||
if len(self.queue):
|
||||
self.handle_item(b"cookerconfig", self.handle_cookercfg)
|
||||
self.handle_item(b"extraconfigdata", self.handle_extraconfigdata)
|
||||
self.handle_item(b"workerdata", self.handle_workerdata)
|
||||
self.handle_item(b"runtask", self.handle_runtask)
|
||||
self.handle_item(b"finishnow", self.handle_finishnow)
|
||||
@@ -380,11 +369,9 @@ class BitbakeWorker(object):
|
||||
self.handle_item(b"quit", self.handle_quit)
|
||||
|
||||
for pipe in self.build_pipes:
|
||||
if self.build_pipes[pipe].input in ready:
|
||||
self.build_pipes[pipe].read()
|
||||
self.build_pipes[pipe].read()
|
||||
if len(self.build_pids):
|
||||
while self.process_waitpid():
|
||||
continue
|
||||
self.process_waitpid()
|
||||
|
||||
|
||||
def handle_item(self, item, func):
|
||||
@@ -401,9 +388,6 @@ class BitbakeWorker(object):
|
||||
self.databuilder.parseBaseConfiguration()
|
||||
self.data = self.databuilder.data
|
||||
|
||||
def handle_extraconfigdata(self, data):
|
||||
self.extraconfigdata = pickle.loads(data)
|
||||
|
||||
def handle_workerdata(self, data):
|
||||
self.workerdata = pickle.loads(data)
|
||||
bb.msg.loggerDefaultDebugLevel = self.workerdata["logdefaultdebug"]
|
||||
@@ -426,10 +410,10 @@ class BitbakeWorker(object):
|
||||
sys.exit(0)
|
||||
|
||||
def handle_runtask(self, data):
|
||||
fn, task, taskname, taskhash, unihash, quieterrors, appends, taskdepdata, dry_run_exec = pickle.loads(data)
|
||||
fn, task, taskname, quieterrors, appends, taskdepdata = pickle.loads(data)
|
||||
workerlog_write("Handling runtask %s %s %s\n" % (task, fn, taskname))
|
||||
|
||||
pid, pipein, pipeout = fork_off_task(self.cookercfg, self.data, self.databuilder, self.workerdata, fn, task, taskname, taskhash, unihash, appends, taskdepdata, self.extraconfigdata, quieterrors, dry_run_exec)
|
||||
pid, pipein, pipeout = fork_off_task(self.cookercfg, self.data, self.databuilder, self.workerdata, fn, task, taskname, appends, taskdepdata, quieterrors)
|
||||
|
||||
self.build_pids[pid] = task
|
||||
self.build_pipes[pid] = runQueueWorkerPipe(pipein, pipeout)
|
||||
@@ -442,9 +426,9 @@ class BitbakeWorker(object):
|
||||
try:
|
||||
pid, status = os.waitpid(-1, os.WNOHANG)
|
||||
if pid == 0 or os.WIFSTOPPED(status):
|
||||
return False
|
||||
return None
|
||||
except OSError:
|
||||
return False
|
||||
return None
|
||||
|
||||
workerlog_write("Exit code of %s for pid %s\n" % (status, pid))
|
||||
|
||||
@@ -463,8 +447,6 @@ class BitbakeWorker(object):
|
||||
|
||||
worker_fire_prepickled(b"<exitcode>" + pickle.dumps((task, status)) + b"</exitcode>")
|
||||
|
||||
return True
|
||||
|
||||
def handle_finishnow(self, _):
|
||||
if self.build_pids:
|
||||
logger.info("Sending SIGTERM to remaining %s tasks", len(self.build_pids))
|
||||
@@ -500,3 +482,4 @@ worker_thread.join()
|
||||
|
||||
workerlog_write("exitting")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
@@ -1,165 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""git-make-shallow: make the current git repository shallow
|
||||
|
||||
Remove the history of the specified revisions, then optionally filter the
|
||||
available refs to those specified.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import collections
|
||||
import errno
|
||||
import itertools
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
version = 1.0
|
||||
|
||||
|
||||
def main():
|
||||
if sys.version_info < (3, 4, 0):
|
||||
sys.exit('Python 3.4 or greater is required')
|
||||
|
||||
git_dir = check_output(['git', 'rev-parse', '--git-dir']).rstrip()
|
||||
shallow_file = os.path.join(git_dir, 'shallow')
|
||||
if os.path.exists(shallow_file):
|
||||
try:
|
||||
check_output(['git', 'fetch', '--unshallow'])
|
||||
except subprocess.CalledProcessError:
|
||||
try:
|
||||
os.unlink(shallow_file)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
args = process_args()
|
||||
revs = check_output(['git', 'rev-list'] + args.revisions).splitlines()
|
||||
|
||||
make_shallow(shallow_file, args.revisions, args.refs)
|
||||
|
||||
ref_revs = check_output(['git', 'rev-list'] + args.refs).splitlines()
|
||||
remaining_history = set(revs) & set(ref_revs)
|
||||
for rev in remaining_history:
|
||||
if check_output(['git', 'rev-parse', '{}^@'.format(rev)]):
|
||||
sys.exit('Error: %s was not made shallow' % rev)
|
||||
|
||||
filter_refs(args.refs)
|
||||
|
||||
if args.shrink:
|
||||
shrink_repo(git_dir)
|
||||
subprocess.check_call(['git', 'fsck', '--unreachable'])
|
||||
|
||||
|
||||
def process_args():
|
||||
# TODO: add argument to automatically keep local-only refs, since they
|
||||
# can't be easily restored with a git fetch.
|
||||
parser = argparse.ArgumentParser(description='Remove the history of the specified revisions, then optionally filter the available refs to those specified.')
|
||||
parser.add_argument('--ref', '-r', metavar='REF', action='append', dest='refs', help='remove all but the specified refs (cumulative)')
|
||||
parser.add_argument('--shrink', '-s', action='store_true', help='shrink the git repository by repacking and pruning')
|
||||
parser.add_argument('revisions', metavar='REVISION', nargs='+', help='a git revision/commit')
|
||||
if len(sys.argv) < 2:
|
||||
parser.print_help()
|
||||
sys.exit(2)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.refs:
|
||||
args.refs = check_output(['git', 'rev-parse', '--symbolic-full-name'] + args.refs).splitlines()
|
||||
else:
|
||||
args.refs = get_all_refs(lambda r, t, tt: t == 'commit' or tt == 'commit')
|
||||
|
||||
args.refs = list(filter(lambda r: not r.endswith('/HEAD'), args.refs))
|
||||
args.revisions = check_output(['git', 'rev-parse'] + ['%s^{}' % i for i in args.revisions]).splitlines()
|
||||
return args
|
||||
|
||||
|
||||
def check_output(cmd, input=None):
|
||||
return subprocess.check_output(cmd, universal_newlines=True, input=input)
|
||||
|
||||
|
||||
def make_shallow(shallow_file, revisions, refs):
|
||||
"""Remove the history of the specified revisions."""
|
||||
for rev in follow_history_intersections(revisions, refs):
|
||||
print("Processing %s" % rev)
|
||||
with open(shallow_file, 'a') as f:
|
||||
f.write(rev + '\n')
|
||||
|
||||
|
||||
def get_all_refs(ref_filter=None):
|
||||
"""Return all the existing refs in this repository, optionally filtering the refs."""
|
||||
ref_output = check_output(['git', 'for-each-ref', '--format=%(refname)\t%(objecttype)\t%(*objecttype)'])
|
||||
ref_split = [tuple(iter_extend(l.rsplit('\t'), 3)) for l in ref_output.splitlines()]
|
||||
if ref_filter:
|
||||
ref_split = (e for e in ref_split if ref_filter(*e))
|
||||
refs = [r[0] for r in ref_split]
|
||||
return refs
|
||||
|
||||
|
||||
def iter_extend(iterable, length, obj=None):
|
||||
"""Ensure that iterable is the specified length by extending with obj."""
|
||||
return itertools.islice(itertools.chain(iterable, itertools.repeat(obj)), length)
|
||||
|
||||
|
||||
def filter_refs(refs):
|
||||
"""Remove all but the specified refs from the git repository."""
|
||||
all_refs = get_all_refs()
|
||||
to_remove = set(all_refs) - set(refs)
|
||||
if to_remove:
|
||||
check_output(['xargs', '-0', '-n', '1', 'git', 'update-ref', '-d', '--no-deref'],
|
||||
input=''.join(l + '\0' for l in to_remove))
|
||||
|
||||
|
||||
def follow_history_intersections(revisions, refs):
|
||||
"""Determine all the points where the history of the specified revisions intersects the specified refs."""
|
||||
queue = collections.deque(revisions)
|
||||
seen = set()
|
||||
|
||||
for rev in iter_except(queue.popleft, IndexError):
|
||||
if rev in seen:
|
||||
continue
|
||||
|
||||
parents = check_output(['git', 'rev-parse', '%s^@' % rev]).splitlines()
|
||||
|
||||
yield rev
|
||||
seen.add(rev)
|
||||
|
||||
if not parents:
|
||||
continue
|
||||
|
||||
check_refs = check_output(['git', 'merge-base', '--independent'] + sorted(refs)).splitlines()
|
||||
for parent in parents:
|
||||
for ref in check_refs:
|
||||
print("Checking %s vs %s" % (parent, ref))
|
||||
try:
|
||||
merge_base = check_output(['git', 'merge-base', parent, ref]).rstrip()
|
||||
except subprocess.CalledProcessError:
|
||||
continue
|
||||
else:
|
||||
queue.append(merge_base)
|
||||
|
||||
|
||||
def iter_except(func, exception, start=None):
|
||||
"""Yield a function repeatedly until it raises an exception."""
|
||||
try:
|
||||
if start is not None:
|
||||
yield start()
|
||||
while True:
|
||||
yield func()
|
||||
except exception:
|
||||
pass
|
||||
|
||||
|
||||
def shrink_repo(git_dir):
|
||||
"""Shrink the newly shallow repository, removing the unreachable objects."""
|
||||
subprocess.check_call(['git', 'reflog', 'expire', '--expire-unreachable=now', '--all'])
|
||||
subprocess.check_call(['git', 'repack', '-ad'])
|
||||
try:
|
||||
os.unlink(os.path.join(git_dir, 'objects', 'info', 'alternates'))
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
subprocess.check_call(['git', 'prune', '--expire', 'now'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -18,24 +18,35 @@
|
||||
# along with this program. If not, see http://www.gnu.org/licenses/.
|
||||
|
||||
HELP="
|
||||
Usage: source toaster start|stop [webport=<address:port>] [noweb] [nobuild] [toasterdir]
|
||||
Usage: source toaster start|stop [webport=<address:port>] [noweb]
|
||||
Optional arguments:
|
||||
[nobuild] Setup the environment for capturing builds with toaster but disable managed builds
|
||||
[noweb] Setup the environment for capturing builds with toaster but don't start the web server
|
||||
[noweb] Setup the environment for building with toaster but don't start the development server
|
||||
[webport] Set the development server (default: localhost:8000)
|
||||
[toasterdir] Set absolute path to be used as TOASTER_DIR (default: BUILDDIR/../)
|
||||
"
|
||||
|
||||
custom_extention()
|
||||
webserverKillAll()
|
||||
{
|
||||
custom_extension=$BBBASEDIR/lib/toaster/orm/fixtures/custom_toaster_append.sh
|
||||
if [ -f $custom_extension ] ; then
|
||||
$custom_extension $*
|
||||
fi
|
||||
local pidfile
|
||||
for pidfile in ${BUILDDIR}/.toastermain.pid ${BUILDDIR}/.runbuilds.pid; do
|
||||
if [ -f ${pidfile} ]; then
|
||||
pid=`cat ${pidfile}`
|
||||
while kill -0 $pid 2>/dev/null; do
|
||||
kill -SIGTERM -$pid 2>/dev/null
|
||||
sleep 1
|
||||
done
|
||||
rm ${pidfile}
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
databaseCheck()
|
||||
webserverStartAll()
|
||||
{
|
||||
# do not start if toastermain points to a valid process
|
||||
if ! cat "${BUILDDIR}/.toastermain.pid" 2>/dev/null | xargs -I{} kill -0 {} ; then
|
||||
retval=1
|
||||
rm "${BUILDDIR}/.toastermain.pid"
|
||||
fi
|
||||
|
||||
retval=0
|
||||
# you can always add a superuser later via
|
||||
# ../bitbake/lib/toaster/manage.py createsuperuser --username=<ME>
|
||||
@@ -54,45 +65,9 @@ databaseCheck()
|
||||
return $retval
|
||||
fi
|
||||
|
||||
return $retval
|
||||
}
|
||||
|
||||
webserverKillAll()
|
||||
{
|
||||
local pidfile
|
||||
if [ -f ${BUILDDIR}/.toastermain.pid ] ; then
|
||||
custom_extention web_stop_postpend
|
||||
else
|
||||
custom_extention noweb_stop_postpend
|
||||
fi
|
||||
for pidfile in ${BUILDDIR}/.toastermain.pid ${BUILDDIR}/.runbuilds.pid; do
|
||||
if [ -f ${pidfile} ]; then
|
||||
pid=`cat ${pidfile}`
|
||||
while kill -0 $pid 2>/dev/null; do
|
||||
kill -SIGTERM $pid 2>/dev/null
|
||||
sleep 1
|
||||
done
|
||||
rm ${pidfile}
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
webserverStartAll()
|
||||
{
|
||||
# do not start if toastermain points to a valid process
|
||||
if ! cat "${BUILDDIR}/.toastermain.pid" 2>/dev/null | xargs -I{} kill -0 {} ; then
|
||||
retval=1
|
||||
rm "${BUILDDIR}/.toastermain.pid"
|
||||
fi
|
||||
|
||||
retval=0
|
||||
|
||||
# check the database
|
||||
databaseCheck || return 1
|
||||
|
||||
echo "Starting webserver..."
|
||||
|
||||
$MANAGE runserver --noreload "$ADDR_PORT" \
|
||||
$MANAGE runserver "$ADDR_PORT" \
|
||||
</dev/null >>${BUILDDIR}/toaster_web.log 2>&1 \
|
||||
& echo $! >${BUILDDIR}/.toastermain.pid
|
||||
|
||||
@@ -104,7 +79,6 @@ webserverStartAll()
|
||||
else
|
||||
echo "Toaster development webserver started at http://$ADDR_PORT"
|
||||
echo -e "\nYou can now run 'bitbake <target>' on the command line and monitor your build in Toaster.\nYou can also use a Toaster project to configure and run a build.\n"
|
||||
custom_extention web_start_postpend $ADDR_PORT
|
||||
fi
|
||||
|
||||
return $retval
|
||||
@@ -132,18 +106,12 @@ verify_prereq() {
|
||||
# Verify Django version
|
||||
reqfile=$(python3 -c "import os; print(os.path.realpath('$BBBASEDIR/toaster-requirements.txt'))")
|
||||
exp='s/Django\([><=]\+\)\([^,]\+\),\([><=]\+\)\(.\+\)/'
|
||||
# expand version parts to 2 digits to support 1.10.x > 1.8
|
||||
# (note:helper functions hard to insert in-line)
|
||||
exp=$exp'import sys,django;'
|
||||
exp=$exp'version=["%02d" % int(n) for n in django.get_version().split(".")];'
|
||||
exp=$exp'vmin=["%02d" % int(n) for n in "\2".split(".")];'
|
||||
exp=$exp'vmax=["%02d" % int(n) for n in "\4".split(".")];'
|
||||
exp=$exp'sys.exit(not (version \1 vmin and version \3 vmax))'
|
||||
exp=$exp'/p'
|
||||
exp=$exp'import sys,django;version=django.get_version().split(".");'
|
||||
exp=$exp'sys.exit(not (version \1 "\2".split(".") and version \3 "\4".split(".")))/p'
|
||||
if ! sed -n "$exp" $reqfile | python3 - ; then
|
||||
req=`grep ^Django $reqfile`
|
||||
echo "This program needs $req"
|
||||
echo "Please install with pip3 install -r $reqfile"
|
||||
echo "Please install with pip install -r $reqfile"
|
||||
return 2
|
||||
fi
|
||||
|
||||
@@ -161,9 +129,7 @@ fi
|
||||
|
||||
export BBBASEDIR=`dirname $TOASTER`/..
|
||||
MANAGE="python3 $BBBASEDIR/lib/toaster/manage.py"
|
||||
if [ -z "$OE_ROOT" ]; then
|
||||
OE_ROOT=`dirname $TOASTER`/../..
|
||||
fi
|
||||
OE_ROOT=`dirname $TOASTER`/../..
|
||||
|
||||
# this is the configuraton file we are using for toaster
|
||||
# we are using the same logic that oe-setup-builddir uses
|
||||
@@ -185,20 +151,23 @@ fi
|
||||
|
||||
unset OE_ROOT
|
||||
|
||||
# this defines the dir toaster will use for
|
||||
# 1) clones of layers (in _toaster_clones )
|
||||
# 2) the build dir (in build)
|
||||
# 3) the sqlite db if that is being used.
|
||||
# 4) pid's we need to clean up on exit/shutdown
|
||||
# note: for future. in order to make this an arbitrary directory, we need to
|
||||
# make sure that the toaster.sqlite file doesn't default to `pwd` like it currently does.
|
||||
export TOASTER_DIR=`pwd`
|
||||
|
||||
WEBSERVER=1
|
||||
export TOASTER_BUILDSERVER=1
|
||||
ADDR_PORT="localhost:8000"
|
||||
TOASTERDIR=`dirname $BUILDDIR`
|
||||
unset CMD
|
||||
for param in $*; do
|
||||
case $param in
|
||||
noweb )
|
||||
WEBSERVER=0
|
||||
;;
|
||||
nobuild )
|
||||
TOASTER_BUILDSERVER=0
|
||||
;;
|
||||
start )
|
||||
CMD=$param
|
||||
;;
|
||||
@@ -215,9 +184,6 @@ for param in $*; do
|
||||
ADDR_PORT="localhost:$PORT"
|
||||
fi
|
||||
;;
|
||||
toasterdir=*)
|
||||
TOASTERDIR="${param#*=}"
|
||||
;;
|
||||
--help)
|
||||
echo "$HELP"
|
||||
return 0
|
||||
@@ -248,8 +214,10 @@ fi
|
||||
# 2) the build dir (in build)
|
||||
# 3) the sqlite db if that is being used.
|
||||
# 4) pid's we need to clean up on exit/shutdown
|
||||
export TOASTER_DIR=$TOASTERDIR
|
||||
export BB_ENV_EXTRAWHITE="$BB_ENV_EXTRAWHITE TOASTER_DIR"
|
||||
# note: for future. in order to make this an arbitrary directory, we need to
|
||||
# make sure that the toaster.sqlite file doesn't default to `pwd`
|
||||
# like it currently does.
|
||||
export TOASTER_DIR=`dirname $BUILDDIR`
|
||||
|
||||
# Determine the action. If specified by arguments, fine, if not, toggle it
|
||||
if [ "$CMD" = "start" ] ; then
|
||||
@@ -266,7 +234,6 @@ fi
|
||||
echo "The system will $CMD."
|
||||
|
||||
# Execute the commands
|
||||
custom_extention toaster_prepend $CMD $ADDR_PORT
|
||||
|
||||
case $CMD in
|
||||
start )
|
||||
@@ -282,34 +249,15 @@ case $CMD in
|
||||
line='INHERIT+="toaster buildhistory"'
|
||||
grep -q "$line" $conf || echo $line >> $conf
|
||||
|
||||
if [ $WEBSERVER -eq 0 ] ; then
|
||||
# Do not update the database for "noweb" unless
|
||||
# it does not yet exist
|
||||
if [ ! -f "$TOASTER_DIR/toaster.sqlite" ] ; then
|
||||
if ! databaseCheck; then
|
||||
echo "Failed ${CMD}."
|
||||
return 4
|
||||
fi
|
||||
fi
|
||||
custom_extention noweb_start_postpend $ADDR_PORT
|
||||
fi
|
||||
if [ $WEBSERVER -gt 0 ] && ! webserverStartAll; then
|
||||
echo "Failed ${CMD}."
|
||||
return 4
|
||||
fi
|
||||
export BITBAKE_UI='toasterui'
|
||||
if [ $TOASTER_BUILDSERVER -eq 1 ] ; then
|
||||
$MANAGE runbuilds \
|
||||
</dev/null >>${BUILDDIR}/toaster_runbuilds.log 2>&1 \
|
||||
& echo $! >${BUILDDIR}/.runbuilds.pid
|
||||
else
|
||||
echo "Toaster build server not started."
|
||||
fi
|
||||
|
||||
$MANAGE runbuilds & echo $! >${BUILDDIR}/.runbuilds.pid
|
||||
# set fail safe stop system on terminal exit
|
||||
trap stop_system SIGHUP
|
||||
echo "Successful ${CMD}."
|
||||
custom_extention toaster_postpend $CMD $ADDR_PORT
|
||||
return 0
|
||||
;;
|
||||
stop )
|
||||
@@ -317,5 +265,3 @@ case $CMD in
|
||||
echo "Successful ${CMD}."
|
||||
;;
|
||||
esac
|
||||
custom_extention toaster_postpend $CMD $ADDR_PORT
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# Copyright (C) 2012, 2018 Wind River Systems, Inc.
|
||||
# Copyright (C) 2012 Wind River Systems, Inc.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
@@ -18,68 +18,51 @@
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
#
|
||||
# Used for dumping the bb_cache.dat
|
||||
# This is used for dumping the bb_cache.dat, the output format is:
|
||||
# recipe_path PN PV PACKAGES
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import warnings
|
||||
|
||||
# For importing bb.cache
|
||||
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), '../lib'))
|
||||
from bb.cache import CoreRecipeInfo
|
||||
|
||||
import pickle
|
||||
import pickle as pickle
|
||||
|
||||
class DumpCache(object):
|
||||
def __init__(self):
|
||||
parser = argparse.ArgumentParser(
|
||||
description="bb_cache.dat's dumper",
|
||||
epilog="Use %(prog)s --help to get help")
|
||||
parser.add_argument("-r", "--recipe",
|
||||
help="specify the recipe, default: all recipes", action="store")
|
||||
parser.add_argument("-m", "--members",
|
||||
help = "specify the member, use comma as separator for multiple ones, default: all members", action="store", default="")
|
||||
parser.add_argument("-s", "--skip",
|
||||
help = "skip skipped recipes", action="store_true")
|
||||
parser.add_argument("cachefile",
|
||||
help = "specify bb_cache.dat", nargs = 1, action="store", default="")
|
||||
def main(argv=None):
|
||||
"""
|
||||
Get the mapping for the target recipe.
|
||||
"""
|
||||
if len(argv) != 1:
|
||||
print("Error, need one argument!", file=sys.stderr)
|
||||
return 2
|
||||
|
||||
self.args = parser.parse_args()
|
||||
cachefile = argv[0]
|
||||
|
||||
def main(self):
|
||||
with open(self.args.cachefile[0], "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
while True:
|
||||
try:
|
||||
key = pickled.load()
|
||||
val = pickled.load()
|
||||
except Exception:
|
||||
break
|
||||
if isinstance(val, CoreRecipeInfo):
|
||||
pn = val.pn
|
||||
with open(cachefile, "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
while cachefile:
|
||||
try:
|
||||
key = pickled.load()
|
||||
val = pickled.load()
|
||||
except Exception:
|
||||
break
|
||||
if isinstance(val, CoreRecipeInfo) and (not val.skipped):
|
||||
pn = val.pn
|
||||
# Filter out the native recipes.
|
||||
if key.startswith('virtual:native:') or pn.endswith("-native"):
|
||||
continue
|
||||
|
||||
if self.args.recipe and self.args.recipe != pn:
|
||||
continue
|
||||
# 1.0 is the default version for a no PV recipe.
|
||||
if "pv" in val.__dict__:
|
||||
pv = val.pv
|
||||
else:
|
||||
pv = "1.0"
|
||||
|
||||
if self.args.skip and val.skipped:
|
||||
continue
|
||||
|
||||
if self.args.members:
|
||||
out = key
|
||||
for member in self.args.members.split(','):
|
||||
out += ": %s" % val.__dict__.get(member)
|
||||
print("%s" % out)
|
||||
else:
|
||||
print("%s: %s" % (key, val.__dict__))
|
||||
elif not self.args.recipe:
|
||||
print("%s %s" % (key, val))
|
||||
print("%s %s %s %s" % (key, pn, pv, ' '.join(val.packages)))
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
dump = DumpCache()
|
||||
ret = dump.main()
|
||||
except Exception as esc:
|
||||
ret = 1
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(ret)
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
<para>
|
||||
Prior to executing BitBake, you should take advantage of available
|
||||
parallel thread execution on your build host by setting the
|
||||
<link linkend='var-bb-BB_NUMBER_THREADS'><filename>BB_NUMBER_THREADS</filename></link>
|
||||
<link linkend='var-BB_NUMBER_THREADS'><filename>BB_NUMBER_THREADS</filename></link>
|
||||
variable in your project's <filename>local.conf</filename>
|
||||
configuration file.
|
||||
</para>
|
||||
@@ -87,9 +87,9 @@
|
||||
<para>
|
||||
The <filename>layer.conf</filename> files are used to
|
||||
construct key variables such as
|
||||
<link linkend='var-bb-BBPATH'><filename>BBPATH</filename></link>
|
||||
<link linkend='var-BBPATH'><filename>BBPATH</filename></link>
|
||||
and
|
||||
<link linkend='var-bb-BBFILES'><filename>BBFILES</filename></link>.
|
||||
<link linkend='var-BBFILES'><filename>BBFILES</filename></link>.
|
||||
<filename>BBPATH</filename> is used to search for
|
||||
configuration and class files under the
|
||||
<filename>conf</filename> and <filename>classes</filename>
|
||||
@@ -117,19 +117,19 @@
|
||||
at certain variables, including:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<link linkend='var-bb-BB_ENV_WHITELIST'><filename>BB_ENV_WHITELIST</filename></link>
|
||||
<link linkend='var-BB_ENV_WHITELIST'><filename>BB_ENV_WHITELIST</filename></link>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-bb-BB_ENV_EXTRAWHITE'><filename>BB_ENV_EXTRAWHITE</filename></link>
|
||||
<link linkend='var-BB_ENV_EXTRAWHITE'><filename>BB_ENV_EXTRAWHITE</filename></link>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-bb-BB_PRESERVE_ENV'><filename>BB_PRESERVE_ENV</filename></link>
|
||||
<link linkend='var-BB_PRESERVE_ENV'><filename>BB_PRESERVE_ENV</filename></link>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-bb-BB_ORIGENV'><filename>BB_ORIGENV</filename></link>
|
||||
<link linkend='var-BB_ORIGENV'><filename>BB_ORIGENV</filename></link>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-bb-BITBAKE_UI'><filename>BITBAKE_UI</filename></link>
|
||||
<link linkend='var-BITBAKE_UI'><filename>BITBAKE_UI</filename></link>
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
The first four variables in this list relate to how BitBake treats shell
|
||||
@@ -156,7 +156,7 @@
|
||||
BitBake first searches the current working directory for an
|
||||
optional <filename>conf/bblayers.conf</filename> configuration file.
|
||||
This file is expected to contain a
|
||||
<link linkend='var-bb-BBLAYERS'><filename>BBLAYERS</filename></link>
|
||||
<link linkend='var-BBLAYERS'><filename>BBLAYERS</filename></link>
|
||||
variable that is a space-delimited list of 'layer' directories.
|
||||
Recall that if BitBake cannot find a <filename>bblayers.conf</filename>
|
||||
file, then it is assumed the user has set the <filename>BBPATH</filename>
|
||||
@@ -166,10 +166,10 @@
|
||||
<para>
|
||||
For each directory (layer) in this list, a <filename>conf/layer.conf</filename>
|
||||
file is located and parsed with the
|
||||
<link linkend='var-bb-LAYERDIR'><filename>LAYERDIR</filename></link>
|
||||
<link linkend='var-LAYERDIR'><filename>LAYERDIR</filename></link>
|
||||
variable being set to the directory where the layer was found.
|
||||
The idea is these files automatically set up
|
||||
<link linkend='var-bb-BBPATH'><filename>BBPATH</filename></link>
|
||||
<link linkend='var-BBPATH'><filename>BBPATH</filename></link>
|
||||
and other variables correctly for a given build directory.
|
||||
</para>
|
||||
|
||||
@@ -189,7 +189,7 @@
|
||||
depending on the environment variables previously
|
||||
mentioned or set in the configuration files.
|
||||
The
|
||||
"<link linkend='ref-bb-variables-glos'>Variables Glossary</link>"
|
||||
"<link linkend='ref-variables-glos'>Variables Glossary</link>"
|
||||
chapter presents a full list of variables.
|
||||
</para>
|
||||
|
||||
@@ -204,7 +204,7 @@
|
||||
<para>
|
||||
The <filename>base.bbclass</filename> file is always included.
|
||||
Other classes that are specified in the configuration using the
|
||||
<link linkend='var-bb-INHERIT'><filename>INHERIT</filename></link>
|
||||
<link linkend='var-INHERIT'><filename>INHERIT</filename></link>
|
||||
variable are also included.
|
||||
BitBake searches for class files in a
|
||||
<filename>classes</filename> subdirectory under
|
||||
@@ -270,7 +270,7 @@
|
||||
|
||||
<para>
|
||||
During the configuration phase, BitBake will have set
|
||||
<link linkend='var-bb-BBFILES'><filename>BBFILES</filename></link>.
|
||||
<link linkend='var-BBFILES'><filename>BBFILES</filename></link>.
|
||||
BitBake now uses it to construct a list of recipes to parse,
|
||||
along with any append files (<filename>.bbappend</filename>)
|
||||
to apply.
|
||||
@@ -292,7 +292,7 @@
|
||||
Any inherit statements cause BitBake to find and
|
||||
then parse class files (<filename>.bbclass</filename>)
|
||||
using
|
||||
<link linkend='var-bb-BBPATH'><filename>BBPATH</filename></link>
|
||||
<link linkend='var-BBPATH'><filename>BBPATH</filename></link>
|
||||
as the search path.
|
||||
Finally, BitBake parses in order any append files found in
|
||||
<filename>BBFILES</filename>.
|
||||
@@ -303,8 +303,8 @@
|
||||
pieces of metadata.
|
||||
For example, in <filename>bitbake.conf</filename> the recipe
|
||||
name and version are used to set the variables
|
||||
<link linkend='var-bb-PN'><filename>PN</filename></link> and
|
||||
<link linkend='var-bb-PV'><filename>PV</filename></link>:
|
||||
<link linkend='var-PN'><filename>PN</filename></link> and
|
||||
<link linkend='var-PV'><filename>PV</filename></link>:
|
||||
<literallayout class='monospaced'>
|
||||
PN = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE', False),d)[0] or 'defaultpkgname'}"
|
||||
PV = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE', False),d)[1] or '1.0'}"
|
||||
@@ -336,7 +336,7 @@
|
||||
recipe information.
|
||||
The validity of this cache is determined by first computing a
|
||||
checksum of the base configuration data (see
|
||||
<link linkend='var-bb-BB_HASHCONFIG_WHITELIST'><filename>BB_HASHCONFIG_WHITELIST</filename></link>)
|
||||
<link linkend='var-BB_HASHCONFIG_WHITELIST'><filename>BB_HASHCONFIG_WHITELIST</filename></link>)
|
||||
and then checking if the checksum matches.
|
||||
If that checksum matches what is in the cache and the recipe
|
||||
and class files have not changed, Bitbake is able to use
|
||||
@@ -384,9 +384,9 @@
|
||||
the recipe can be known.
|
||||
Each recipe's <filename>PROVIDES</filename> list is created
|
||||
implicitly through the recipe's
|
||||
<link linkend='var-bb-PN'><filename>PN</filename></link> variable
|
||||
<link linkend='var-PN'><filename>PN</filename></link> variable
|
||||
and explicitly through the recipe's
|
||||
<link linkend='var-bb-PROVIDES'><filename>PROVIDES</filename></link>
|
||||
<link linkend='var-PROVIDES'><filename>PROVIDES</filename></link>
|
||||
variable, which is optional.
|
||||
</para>
|
||||
|
||||
@@ -427,7 +427,7 @@
|
||||
PREFERRED_PROVIDER_virtual/kernel = "linux-yocto"
|
||||
</literallayout>
|
||||
The default
|
||||
<link linkend='var-bb-PREFERRED_PROVIDER'><filename>PREFERRED_PROVIDER</filename></link>
|
||||
<link linkend='var-PREFERRED_PROVIDER'><filename>PREFERRED_PROVIDER</filename></link>
|
||||
is the provider with the same name as the target.
|
||||
Bitbake iterates through each target it needs to build and
|
||||
resolves them and their dependencies using this process.
|
||||
@@ -439,10 +439,10 @@
|
||||
BitBake defaults to the highest version of a provider.
|
||||
Version comparisons are made using the same method as Debian.
|
||||
You can use the
|
||||
<link linkend='var-bb-PREFERRED_VERSION'><filename>PREFERRED_VERSION</filename></link>
|
||||
<link linkend='var-PREFERRED_VERSION'><filename>PREFERRED_VERSION</filename></link>
|
||||
variable to specify a particular version.
|
||||
You can influence the order by using the
|
||||
<link linkend='var-bb-DEFAULT_PREFERENCE'><filename>DEFAULT_PREFERENCE</filename></link>
|
||||
<link linkend='var-DEFAULT_PREFERENCE'><filename>DEFAULT_PREFERENCE</filename></link>
|
||||
variable.
|
||||
</para>
|
||||
|
||||
@@ -464,7 +464,7 @@
|
||||
BitBake defaults to selecting the most recent
|
||||
version, unless otherwise specified.
|
||||
If the recipe in question has a
|
||||
<link linkend='var-bb-DEFAULT_PREFERENCE'><filename>DEFAULT_PREFERENCE</filename></link>
|
||||
<link linkend='var-DEFAULT_PREFERENCE'><filename>DEFAULT_PREFERENCE</filename></link>
|
||||
set lower than the other recipes (default is 0), then
|
||||
it will not be selected.
|
||||
This allows the person or persons maintaining
|
||||
@@ -475,9 +475,9 @@
|
||||
|
||||
<para>
|
||||
If the first recipe is named <filename>a_1.1.bb</filename>, then the
|
||||
<link linkend='var-bb-PN'><filename>PN</filename></link> variable
|
||||
<link linkend='var-PN'><filename>PN</filename></link> variable
|
||||
will be set to “a”, and the
|
||||
<link linkend='var-bb-PV'><filename>PV</filename></link>
|
||||
<link linkend='var-PV'><filename>PV</filename></link>
|
||||
variable will be set to 1.1.
|
||||
</para>
|
||||
|
||||
@@ -532,11 +532,11 @@
|
||||
<para>
|
||||
Dependencies are defined through several variables.
|
||||
You can find information about variables BitBake uses in
|
||||
the <link linkend='ref-bb-variables-glos'>Variables Glossary</link>
|
||||
the <link linkend='ref-variables-glos'>Variables Glossary</link>
|
||||
near the end of this manual.
|
||||
At a basic level, it is sufficient to know that BitBake uses the
|
||||
<link linkend='var-bb-DEPENDS'><filename>DEPENDS</filename></link> and
|
||||
<link linkend='var-bb-RDEPENDS'><filename>RDEPENDS</filename></link> variables when
|
||||
<link linkend='var-DEPENDS'><filename>DEPENDS</filename></link> and
|
||||
<link linkend='var-RDEPENDS'><filename>RDEPENDS</filename></link> variables when
|
||||
calculating dependencies.
|
||||
</para>
|
||||
|
||||
@@ -560,7 +560,7 @@
|
||||
|
||||
<para>
|
||||
The build now starts with BitBake forking off threads up to the limit set in the
|
||||
<link linkend='var-bb-BB_NUMBER_THREADS'><filename>BB_NUMBER_THREADS</filename></link>
|
||||
<link linkend='var-BB_NUMBER_THREADS'><filename>BB_NUMBER_THREADS</filename></link>
|
||||
variable.
|
||||
BitBake continues to fork threads as long as there are tasks ready to run,
|
||||
those tasks have all their dependencies met, and the thread threshold has not been
|
||||
@@ -574,7 +574,7 @@
|
||||
|
||||
<para>
|
||||
As each task completes, a timestamp is written to the directory specified by the
|
||||
<link linkend='var-bb-STAMP'><filename>STAMP</filename></link> variable.
|
||||
<link linkend='var-STAMP'><filename>STAMP</filename></link> variable.
|
||||
On subsequent runs, BitBake looks in the build directory within
|
||||
<filename>tmp/stamps</filename> and does not rerun
|
||||
tasks that are already completed unless a timestamp is found to be invalid.
|
||||
@@ -618,7 +618,7 @@
|
||||
<para>
|
||||
Tasks can be either a shell task or a Python task.
|
||||
For shell tasks, BitBake writes a shell script to
|
||||
<filename>${</filename><link linkend='var-bb-T'><filename>T</filename></link><filename>}/run.do_taskname.pid</filename>
|
||||
<filename>${</filename><link linkend='var-T'><filename>T</filename></link><filename>}/run.do_taskname.pid</filename>
|
||||
and then executes the script.
|
||||
The generated shell script contains all the exported variables,
|
||||
and the shell functions with all variables expanded.
|
||||
@@ -645,10 +645,10 @@
|
||||
behavior:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<link linkend='var-bb-BB_SCHEDULER'><filename>BB_SCHEDULER</filename></link>
|
||||
<link linkend='var-BB_SCHEDULER'><filename>BB_SCHEDULER</filename></link>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-bb-BB_SCHEDULERS'><filename>BB_SCHEDULERS</filename></link>
|
||||
<link linkend='var-BB_SCHEDULERS'><filename>BB_SCHEDULERS</filename></link>
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
It is possible to have functions run before and after a task's main
|
||||
@@ -684,7 +684,7 @@
|
||||
The simplistic approach for excluding the working directory is to set
|
||||
it to some fixed value and create the checksum for the "run" script.
|
||||
BitBake goes one step better and uses the
|
||||
<link linkend='var-bb-BB_HASHBASE_WHITELIST'><filename>BB_HASHBASE_WHITELIST</filename></link>
|
||||
<link linkend='var-BB_HASHBASE_WHITELIST'><filename>BB_HASHBASE_WHITELIST</filename></link>
|
||||
variable to define a list of variables that should never be included
|
||||
when generating the signatures.
|
||||
</para>
|
||||
@@ -781,7 +781,7 @@
|
||||
The code in <filename>meta/lib/oe/sstatesig.py</filename> shows two examples
|
||||
of this and also illustrates how you can insert your own policy into the system
|
||||
if so desired.
|
||||
This file defines the two basic signature generators OpenEmbedded-Core
|
||||
This file defines the two basic signature generators OpenEmbedded Core
|
||||
uses: "OEBasic" and "OEBasicHash".
|
||||
By default, there is a dummy "noop" signature handler enabled in BitBake.
|
||||
This means that behavior is unchanged from previous versions.
|
||||
@@ -795,7 +795,7 @@
|
||||
This results in any metadata change that changes the task hash, automatically
|
||||
causing the task to be run again.
|
||||
This removes the need to bump
|
||||
<link linkend='var-bb-PR'><filename>PR</filename></link>
|
||||
<link linkend='var-PR'><filename>PR</filename></link>
|
||||
values, and changes to metadata automatically ripple across the build.
|
||||
</para>
|
||||
|
||||
@@ -884,7 +884,7 @@
|
||||
|
||||
<para>
|
||||
BitBake first calls the function defined by the
|
||||
<link linkend='var-bb-BB_HASHCHECK_FUNCTION'><filename>BB_HASHCHECK_FUNCTION</filename></link>
|
||||
<link linkend='var-BB_HASHCHECK_FUNCTION'><filename>BB_HASHCHECK_FUNCTION</filename></link>
|
||||
variable with a list of tasks and corresponding
|
||||
hashes it wants to build.
|
||||
This function is designed to be fast and returns a list
|
||||
@@ -908,7 +908,7 @@
|
||||
For example, it is pointless to obtain a compiler if you
|
||||
already have the compiled binary.
|
||||
To handle this, BitBake calls the
|
||||
<link linkend='var-bb-BB_SETSCENE_DEPVALID'><filename>BB_SETSCENE_DEPVALID</filename></link>
|
||||
<link linkend='var-BB_SETSCENE_DEPVALID'><filename>BB_SETSCENE_DEPVALID</filename></link>
|
||||
function for each successful setscene task to know whether or not it needs
|
||||
to obtain the dependencies of that task.
|
||||
</para>
|
||||
@@ -916,7 +916,7 @@
|
||||
<para>
|
||||
Finally, after all the setscene tasks have executed, BitBake calls the
|
||||
function listed in
|
||||
<link linkend='var-bb-BB_SETSCENE_VERIFY_FUNCTION2'><filename>BB_SETSCENE_VERIFY_FUNCTION2</filename></link>
|
||||
<link linkend='var-BB_SETSCENE_VERIFY_FUNCTION2'><filename>BB_SETSCENE_VERIFY_FUNCTION2</filename></link>
|
||||
with the list of tasks BitBake thinks has been "covered".
|
||||
The metadata can then ensure that this list is correct and can
|
||||
inform BitBake that it wants specific tasks to be run regardless
|
||||
|
||||
@@ -38,13 +38,13 @@
|
||||
The code to execute the first part of this process, a fetch,
|
||||
looks something like the following:
|
||||
<literallayout class='monospaced'>
|
||||
src_uri = (d.getVar('SRC_URI') or "").split()
|
||||
src_uri = (d.getVar('SRC_URI', True) or "").split()
|
||||
fetcher = bb.fetch2.Fetch(src_uri, d)
|
||||
fetcher.download()
|
||||
</literallayout>
|
||||
This code sets up an instance of the fetch class.
|
||||
The instance uses a space-separated list of URLs from the
|
||||
<link linkend='var-bb-SRC_URI'><filename>SRC_URI</filename></link>
|
||||
<link linkend='var-SRC_URI'><filename>SRC_URI</filename></link>
|
||||
variable and then calls the <filename>download</filename>
|
||||
method to download the files.
|
||||
</para>
|
||||
@@ -52,7 +52,7 @@
|
||||
<para>
|
||||
The instantiation of the fetch class is usually followed by:
|
||||
<literallayout class='monospaced'>
|
||||
rootdir = l.getVar('WORKDIR')
|
||||
rootdir = l.getVar('WORKDIR', True)
|
||||
fetcher.unpack(rootdir)
|
||||
</literallayout>
|
||||
This code unpacks the downloaded files to the
|
||||
@@ -78,7 +78,7 @@
|
||||
<listitem><para><emphasis>Pre-mirror Sites:</emphasis>
|
||||
BitBake first uses pre-mirrors to try and find source files.
|
||||
These locations are defined using the
|
||||
<link linkend='var-bb-PREMIRRORS'><filename>PREMIRRORS</filename></link>
|
||||
<link linkend='var-PREMIRRORS'><filename>PREMIRRORS</filename></link>
|
||||
variable.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>Source URI:</emphasis>
|
||||
@@ -88,7 +88,7 @@
|
||||
<listitem><para><emphasis>Mirror Sites:</emphasis>
|
||||
If fetch failures occur, BitBake next uses mirror locations as
|
||||
defined by the
|
||||
<link linkend='var-bb-MIRRORS'><filename>MIRRORS</filename></link>
|
||||
<link linkend='var-MIRRORS'><filename>MIRRORS</filename></link>
|
||||
variable.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
@@ -144,7 +144,7 @@
|
||||
Any source files that are not local (i.e.
|
||||
downloaded from the Internet) are placed into the download
|
||||
directory, which is specified by the
|
||||
<link linkend='var-bb-DL_DIR'><filename>DL_DIR</filename></link>
|
||||
<link linkend='var-DL_DIR'><filename>DL_DIR</filename></link>
|
||||
variable.
|
||||
</para>
|
||||
|
||||
@@ -184,11 +184,11 @@
|
||||
|
||||
<para>
|
||||
If
|
||||
<link linkend='var-bb-BB_STRICT_CHECKSUM'><filename>BB_STRICT_CHECKSUM</filename></link>
|
||||
<link linkend='var-BB_STRICT_CHECKSUM'><filename>BB_STRICT_CHECKSUM</filename></link>
|
||||
is set, any download without a checksum triggers an
|
||||
error message.
|
||||
The
|
||||
<link linkend='var-bb-BB_NO_NETWORK'><filename>BB_NO_NETWORK</filename></link>
|
||||
<link linkend='var-BB_NO_NETWORK'><filename>BB_NO_NETWORK</filename></link>
|
||||
variable can be used to make any attempted network access a fatal
|
||||
error, which is useful for checking that mirrors are complete
|
||||
as well as other things.
|
||||
@@ -265,11 +265,20 @@
|
||||
The filename you specify within the URL can be
|
||||
either an absolute or relative path to a file.
|
||||
If the filename is relative, the contents of the
|
||||
<link linkend='var-bb-FILESPATH'><filename>FILESPATH</filename></link>
|
||||
<link linkend='var-FILESPATH'><filename>FILESPATH</filename></link>
|
||||
variable is used in the same way
|
||||
<filename>PATH</filename> is used to find executables.
|
||||
Failing that,
|
||||
<link linkend='var-FILESDIR'><filename>FILESDIR</filename></link>
|
||||
is used to find the appropriate relative file.
|
||||
<note>
|
||||
<filename>FILESDIR</filename> is deprecated and can
|
||||
be replaced with <filename>FILESPATH</filename>.
|
||||
Because <filename>FILESDIR</filename> is likely to be
|
||||
removed, you should not use this variable in any new code.
|
||||
</note>
|
||||
If the file cannot be found, it is assumed that it is available in
|
||||
<link linkend='var-bb-DL_DIR'><filename>DL_DIR</filename></link>
|
||||
<link linkend='var-DL_DIR'><filename>DL_DIR</filename></link>
|
||||
by the time the <filename>download()</filename> method is called.
|
||||
</para>
|
||||
|
||||
@@ -304,7 +313,7 @@
|
||||
allows the name of the downloaded file to be specified.
|
||||
Specifying the name of the downloaded file is useful
|
||||
for avoiding collisions in
|
||||
<link linkend='var-bb-DL_DIR'><filename>DL_DIR</filename></link>
|
||||
<link linkend='var-DL_DIR'><filename>DL_DIR</filename></link>
|
||||
when dealing with multiple files that have the same name.
|
||||
</para>
|
||||
|
||||
@@ -355,7 +364,7 @@
|
||||
A special value of "now" causes the checkout to
|
||||
be updated on every build.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis><link linkend='var-bb-CVSDIR'><filename>CVSDIR</filename></link>:</emphasis>
|
||||
<listitem><para><emphasis><link linkend='var-CVSDIR'><filename>CVSDIR</filename></link>:</emphasis>
|
||||
Specifies where a temporary checkout is saved.
|
||||
The location is often <filename>DL_DIR/cvs</filename>.
|
||||
</para></listitem>
|
||||
@@ -376,8 +385,7 @@
|
||||
The supported parameters are as follows:
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis>"method":</emphasis>
|
||||
The protocol over which to communicate with the CVS
|
||||
server.
|
||||
The protocol over which to communicate with the CVS server.
|
||||
By default, this protocol is "pserver".
|
||||
If "method" is set to "ext", BitBake examines the
|
||||
"rsh" parameter and sets <filename>CVS_RSH</filename>.
|
||||
@@ -395,7 +403,7 @@
|
||||
<listitem><para><emphasis>"date":</emphasis>
|
||||
Specifies a date.
|
||||
If no "date" is specified, the
|
||||
<link linkend='var-bb-SRCDATE'><filename>SRCDATE</filename></link>
|
||||
<link linkend='var-SRCDATE'><filename>SRCDATE</filename></link>
|
||||
of the configuration is used to checkout a specific date.
|
||||
The special value of "now" causes the checkout to be
|
||||
updated on every build.
|
||||
@@ -406,7 +414,7 @@
|
||||
to which the module is unpacked.
|
||||
You are forcing the module into a special
|
||||
directory relative to
|
||||
<link linkend='var-bb-CVSDIR'><filename>CVSDIR</filename></link>.
|
||||
<link linkend='var-CVSDIR'><filename>CVSDIR</filename></link>.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"rsh"</emphasis>
|
||||
Used in conjunction with the "method" parameter.
|
||||
@@ -448,7 +456,7 @@
|
||||
<filename>FETCHCMD_svn</filename>, which defaults
|
||||
to "svn".
|
||||
The fetcher's temporary working directory is set by
|
||||
<link linkend='var-bb-SVNDIR'><filename>SVNDIR</filename></link>,
|
||||
<link linkend='var-SVNDIR'><filename>SVNDIR</filename></link>,
|
||||
which is usually <filename>DL_DIR/svn</filename>.
|
||||
</para>
|
||||
|
||||
@@ -461,29 +469,25 @@
|
||||
You can think of this parameter as the top-level
|
||||
directory of the repository data you want.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"path_spec":</emphasis>
|
||||
A specific directory in which to checkout the
|
||||
specified svn module.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"protocol":</emphasis>
|
||||
The protocol to use, which defaults to "svn".
|
||||
If "protocol" is set to "svn+ssh", the "ssh"
|
||||
parameter is also used.
|
||||
Other options are "svn+ssh" and "rsh".
|
||||
For "rsh", the "rsh" parameter is also used.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"rev":</emphasis>
|
||||
The revision of the source code to checkout.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"date":</emphasis>
|
||||
The date of the source code to checkout.
|
||||
Specific revisions are generally much safer to checkout
|
||||
rather than by date as they do not involve timezones
|
||||
(e.g. they are much more deterministic).
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"scmdata":</emphasis>
|
||||
Causes the “.svn” directories to be available during
|
||||
compile-time when set to "keep".
|
||||
By default, these directories are removed.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"ssh":</emphasis>
|
||||
An optional parameter used when "protocol" is set
|
||||
to "svn+ssh".
|
||||
You can use this parameter to specify the ssh
|
||||
program used by svn.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"transportuser":</emphasis>
|
||||
When required, sets the username for the transport.
|
||||
By default, this parameter is empty.
|
||||
@@ -492,11 +496,10 @@
|
||||
command.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
Following are three examples using svn:
|
||||
Following are two examples using svn:
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI = "svn://myrepos/proj1;module=vip;protocol=http;rev=667"
|
||||
SRC_URI = "svn://myrepos/proj1;module=opie;protocol=svn+ssh"
|
||||
SRC_URI = "svn://myrepos/proj1;module=trunk;protocol=http;path_spec=${MY_DIR}/proj1"
|
||||
SRC_URI = "svn://svn.oe.handhelds.org/svn;module=vip;proto=http;rev=667"
|
||||
SRC_URI = "svn://svn.oe.handhelds.org/svn/;module=opie;proto=svn+ssh;date=20060126"
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
@@ -509,7 +512,7 @@
|
||||
source control system.
|
||||
The fetcher works by creating a bare clone of the
|
||||
remote into
|
||||
<link linkend='var-bb-GITDIR'><filename>GITDIR</filename></link>,
|
||||
<link linkend='var-GITDIR'><filename>GITDIR</filename></link>,
|
||||
which is usually <filename>DL_DIR/git2</filename>.
|
||||
This bare clone is then cloned into the work directory during the
|
||||
unpack stage when a specific tree is checked out.
|
||||
@@ -588,14 +591,6 @@
|
||||
The name of the path in which to place the checkout.
|
||||
By default, the path is <filename>git/</filename>.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"usehead":</emphasis>
|
||||
Enables local <filename>git://</filename> URLs to use the
|
||||
current branch HEAD as the revision for use with
|
||||
<filename>AUTOREV</filename>.
|
||||
The "usehead" parameter implies no branch and only works
|
||||
when the transfer protocol is
|
||||
<filename>file://</filename>.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
Here are some example URLs:
|
||||
<literallayout class='monospaced'>
|
||||
@@ -612,7 +607,7 @@
|
||||
This fetcher submodule inherits from the
|
||||
<link linkend='git-fetcher'>Git fetcher</link> and extends
|
||||
that fetcher's behavior by fetching a repository's submodules.
|
||||
<link linkend='var-bb-SRC_URI'><filename>SRC_URI</filename></link>
|
||||
<link linkend='var-SRC_URI'><filename>SRC_URI</filename></link>
|
||||
is passed to the Git fetcher as described in the
|
||||
"<link linkend='git-fetcher'>Git Fetcher (<filename>git://</filename>)</link>"
|
||||
section.
|
||||
@@ -628,9 +623,7 @@
|
||||
The Git Submodules fetcher is not a complete fetcher
|
||||
implementation.
|
||||
The fetcher has known issues where it does not use the
|
||||
normal source mirroring infrastructure properly. Further,
|
||||
the submodule sources it fetches are not visible to the
|
||||
licensing and source archiving infrastructures.
|
||||
normal source mirroring infrastructure properly.
|
||||
</para>
|
||||
</note>
|
||||
</para>
|
||||
@@ -647,9 +640,9 @@
|
||||
|
||||
<para>
|
||||
To use this fetcher, make sure your recipe has proper
|
||||
<link linkend='var-bb-SRC_URI'><filename>SRC_URI</filename></link>,
|
||||
<link linkend='var-bb-SRCREV'><filename>SRCREV</filename></link>, and
|
||||
<link linkend='var-bb-PV'><filename>PV</filename></link> settings.
|
||||
<link linkend='var-SRC_URI'><filename>SRC_URI</filename></link>,
|
||||
<link linkend='var-SRCREV'><filename>SRCREV</filename></link>, and
|
||||
<link linkend='var-PV'><filename>PV</filename></link> settings.
|
||||
Here is an example:
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI = "ccrc://cc.example.org/ccrc;vob=/example_vob;module=/example_module"
|
||||
@@ -734,15 +727,15 @@
|
||||
<filename>FETCHCMD_p4</filename>, which defaults
|
||||
to "p4".
|
||||
The fetcher's temporary working directory is set by
|
||||
<link linkend='var-bb-P4DIR'><filename>P4DIR</filename></link>,
|
||||
<link linkend='var-P4DIR'><filename>P4DIR</filename></link>,
|
||||
which defaults to "DL_DIR/p4".
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To use this fetcher, make sure your recipe has proper
|
||||
<link linkend='var-bb-SRC_URI'><filename>SRC_URI</filename></link>,
|
||||
<link linkend='var-bb-SRCREV'><filename>SRCREV</filename></link>, and
|
||||
<link linkend='var-bb-PV'><filename>PV</filename></link> values.
|
||||
<link linkend='var-SRC_URI'><filename>SRC_URI</filename></link>,
|
||||
<link linkend='var-SRCREV'><filename>SRCREV</filename></link>, and
|
||||
<link linkend='var-PV'><filename>PV</filename></link> values.
|
||||
The p4 executable is able to use the config file defined by your
|
||||
system's <filename>P4CONFIG</filename> environment variable in
|
||||
order to define the Perforce server URL and port, username, and
|
||||
@@ -785,43 +778,6 @@
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='repo-fetcher'>
|
||||
<title>Repo Fetcher (<filename>repo://</filename>)</title>
|
||||
|
||||
<para>
|
||||
This fetcher submodule fetches code from
|
||||
<filename>google-repo</filename> source control system.
|
||||
The fetcher works by initiating and syncing sources of the
|
||||
repository into
|
||||
<link linkend='var-bb-REPODIR'><filename>REPODIR</filename></link>,
|
||||
which is usually
|
||||
<link linkend='var-bb-DL_DIR'><filename>DL_DIR</filename></link><filename>/repo</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This fetcher supports the following parameters:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<emphasis>"protocol":</emphasis>
|
||||
Protocol to fetch the repository manifest (default: git).
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<emphasis>"branch":</emphasis>
|
||||
Branch or tag of repository to get (default: master).
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<emphasis>"manifest":</emphasis>
|
||||
Name of the manifest file (default: <filename>default.xml</filename>).
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
Here are some example URLs:
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI = "repo://REPOROOT;protocol=git;branch=some_branch;manifest=my_manifest.xml"
|
||||
SRC_URI = "repo://REPOROOT;protocol=file;branch=some_branch;manifest=my_manifest.xml"
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='other-fetchers'>
|
||||
<title>Other Fetchers</title>
|
||||
|
||||
@@ -840,6 +796,9 @@
|
||||
<listitem><para>
|
||||
Secure Shell (<filename>ssh://</filename>)
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Repo (<filename>repo://</filename>)
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
OSC (<filename>osc://</filename>)
|
||||
</para></listitem>
|
||||
|
||||
@@ -128,8 +128,15 @@
|
||||
</para>
|
||||
|
||||
<note>
|
||||
This example was inspired by and drew heavily from
|
||||
<ulink url="http://www.mail-archive.com/yocto@yoctoproject.org/msg09379.html">Mailing List post - The BitBake equivalent of "Hello, World!"</ulink>.
|
||||
This example was inspired by and drew heavily from these sources:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<ulink url="http://www.mail-archive.com/yocto@yoctoproject.org/msg09379.html">Mailing List post - The BitBake equivalent of "Hello, World!"</ulink>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<ulink url="http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/">Hambedded Linux blog post - From Bitbake Hello World to an Image</ulink>
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
|
||||
<para>
|
||||
@@ -194,7 +201,7 @@
|
||||
<para>
|
||||
When you run BitBake, it begins looking for metadata files.
|
||||
The
|
||||
<link linkend='var-bb-BBPATH'><filename>BBPATH</filename></link>
|
||||
<link linkend='var-BBPATH'><filename>BBPATH</filename></link>
|
||||
variable is what tells BitBake where to look for those files.
|
||||
<filename>BBPATH</filename> is not set and you need to set it.
|
||||
Without <filename>BBPATH</filename>, Bitbake cannot
|
||||
@@ -260,9 +267,9 @@
|
||||
files.
|
||||
For this example, you need to create the file in your project directory
|
||||
and define some key BitBake variables.
|
||||
For more information on the <filename>bitbake.conf</filename> file,
|
||||
For more information on the <filename>bitbake.conf</filename>,
|
||||
see
|
||||
<ulink url='http://git.openembedded.org/bitbake/tree/conf/bitbake.conf'></ulink>.
|
||||
<ulink url='http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#an-overview-of-bitbakeconf'></ulink>
|
||||
</para>
|
||||
<para>Use the following commands to create the <filename>conf</filename>
|
||||
directory in the project directory:
|
||||
@@ -273,32 +280,14 @@
|
||||
some editor to create the <filename>bitbake.conf</filename>
|
||||
so that it contains the following:
|
||||
<literallayout class='monospaced'>
|
||||
<link linkend='var-bb-PN'>PN</link> = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE', False),d)[0] or 'defaultpkgname'}"
|
||||
TMPDIR = "${<link linkend='var-TOPDIR'>TOPDIR</link>}/tmp"
|
||||
<link linkend='var-CACHE'>CACHE</link> = "${TMPDIR}/cache"
|
||||
<link linkend='var-STAMP'>STAMP</link> = "${TMPDIR}/stamps"
|
||||
<link linkend='var-T'>T</link> = "${TMPDIR}/work"
|
||||
<link linkend='var-B'>B</link> = "${TMPDIR}"
|
||||
</literallayout>
|
||||
<literallayout class='monospaced'>
|
||||
TMPDIR = "${<link linkend='var-bb-TOPDIR'>TOPDIR</link>}/tmp"
|
||||
<link linkend='var-bb-CACHE'>CACHE</link> = "${TMPDIR}/cache"
|
||||
<link linkend='var-bb-STAMP'>STAMP</link> = "${TMPDIR}/${PN}/stamps"
|
||||
<link linkend='var-bb-T'>T</link> = "${TMPDIR}/${PN}/work"
|
||||
<link linkend='var-bb-B'>B</link> = "${TMPDIR}/${PN}"
|
||||
</literallayout>
|
||||
<note>
|
||||
Without a value for <filename>PN</filename>, the
|
||||
variables <filename>STAMP</filename>,
|
||||
<filename>T</filename>, and <filename>B</filename>,
|
||||
prevent more than one recipe from working. You can fix
|
||||
this by either setting <filename>PN</filename> to have
|
||||
a value similar to what OpenEmbedded and BitBake use
|
||||
in the default <filename>bitbake.conf</filename> file
|
||||
(see previous example). Or, by manually updating each
|
||||
recipe to set <filename>PN</filename>. You will also
|
||||
need to include <filename>PN</filename> as part of the
|
||||
<filename>STAMP</filename>, <filename>T</filename>, and
|
||||
<filename>B</filename> variable definitions in the
|
||||
<filename>local.conf</filename> file.
|
||||
</note>
|
||||
The <filename>TMPDIR</filename> variable establishes a directory
|
||||
that BitBake uses for build output and intermediate files other
|
||||
that BitBake uses for build output and intermediate files (other
|
||||
than the cached information used by the
|
||||
<link linkend='setscene'>Setscene</link> process.
|
||||
Here, the <filename>TMPDIR</filename> directory is set to
|
||||
@@ -318,19 +307,19 @@
|
||||
file exists, you can run the <filename>bitbake</filename>
|
||||
command again:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake
|
||||
ERROR: Traceback (most recent call last):
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 163, in wrapped
|
||||
return func(fn, *args)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 177, in _inherit
|
||||
bb.parse.BBHandler.inherit(bbclass, "configuration INHERITs", 0, data)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py", line 92, in inherit
|
||||
include(fn, file, lineno, d, "inherit")
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/ConfHandler.py", line 100, in include
|
||||
raise ParseError("Could not %(error_out)s file %(fn)s" % vars(), oldfn, lineno)
|
||||
ParseError: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
$ bitbake
|
||||
ERROR: Traceback (most recent call last):
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 163, in wrapped
|
||||
return func(fn, *args)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 177, in _inherit
|
||||
bb.parse.BBHandler.inherit(bbclass, "configuration INHERITs", 0, data)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py", line 92, in inherit
|
||||
include(fn, file, lineno, d, "inherit")
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/ConfHandler.py", line 100, in include
|
||||
raise ParseError("Could not %(error_out)s file %(fn)s" % vars(), oldfn, lineno)
|
||||
ParseError: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
|
||||
ERROR: Unable to parse base: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
ERROR: Unable to parse base: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
</literallayout>
|
||||
In the sample output, BitBake could not find the
|
||||
<filename>classes/base.bbclass</filename> file.
|
||||
@@ -363,6 +352,9 @@
|
||||
Of course, the <filename>base.bbclass</filename> can have much
|
||||
more depending on which build environments BitBake is
|
||||
supporting.
|
||||
For more information on the <filename>base.bbclass</filename> file,
|
||||
you can look at
|
||||
<ulink url='http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#tasks'></ulink>.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>Run Bitbake:</emphasis>
|
||||
After making sure that the <filename>classes/base.bbclass</filename>
|
||||
@@ -383,10 +375,10 @@
|
||||
code separate from the general metadata used by BitBake.
|
||||
Thus, this example creates and uses a layer called "mylayer".
|
||||
<note>
|
||||
You can find additional information on layers in the
|
||||
"<link linkend='layers'>Layers</link>" section.
|
||||
</note></para>
|
||||
|
||||
You can find additional information on adding a layer at
|
||||
<ulink url='http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#adding-an-example-layer'></ulink>.
|
||||
</note>
|
||||
</para>
|
||||
<para>Minimally, you need a recipe file and a layer configuration
|
||||
file in your layer.
|
||||
The configuration file needs to be in the <filename>conf</filename>
|
||||
@@ -402,12 +394,12 @@
|
||||
Move to the <filename>conf</filename> directory and create a
|
||||
<filename>layer.conf</filename> file that has the following:
|
||||
<literallayout class='monospaced'>
|
||||
BBPATH .= ":${<link linkend='var-bb-LAYERDIR'>LAYERDIR</link>}"
|
||||
BBPATH .= ":${<link linkend='var-LAYERDIR'>LAYERDIR</link>}"
|
||||
|
||||
<link linkend='var-bb-BBFILES'>BBFILES</link> += "${LAYERDIR}/*.bb"
|
||||
<link linkend='var-BBFILES'>BBFILES</link> += "${LAYERDIR}/*.bb"
|
||||
|
||||
<link linkend='var-bb-BBFILE_COLLECTIONS'>BBFILE_COLLECTIONS</link> += "mylayer"
|
||||
<link linkend='var-bb-BBFILE_PATTERN'>BBFILE_PATTERN_mylayer</link> := "^${LAYERDIR_RE}/"
|
||||
<link linkend='var-BBFILE_COLLECTIONS'>BBFILE_COLLECTIONS</link> += "mylayer"
|
||||
<link linkend='var-BBFILE_PATTERN'>BBFILE_PATTERN_mylayer</link> := "^${LAYERDIR_RE}/"
|
||||
</literallayout>
|
||||
For information on these variables, click the links
|
||||
to go to the definitions in the glossary.</para>
|
||||
@@ -416,9 +408,9 @@
|
||||
a recipe file named <filename>printhello.bb</filename> that
|
||||
has the following:
|
||||
<literallayout class='monospaced'>
|
||||
<link linkend='var-bb-DESCRIPTION'>DESCRIPTION</link> = "Prints Hello World"
|
||||
<link linkend='var-bb-PN'>PN</link> = 'printhello'
|
||||
<link linkend='var-bb-PV'>PV</link> = '1'
|
||||
<link linkend='var-DESCRIPTION'>DESCRIPTION</link> = "Prints Hello World"
|
||||
<link linkend='var-PN'>PN</link> = 'printhello'
|
||||
<link linkend='var-PV'>PV</link> = '1'
|
||||
|
||||
python do_build() {
|
||||
bb.plain("********************");
|
||||
|
||||
@@ -342,14 +342,13 @@
|
||||
|
||||
<para>
|
||||
When you name an append file, you can use the
|
||||
"<filename>%</filename>" wildcard character to allow for matching
|
||||
recipe names.
|
||||
wildcard character (%) to allow for matching recipe names.
|
||||
For example, suppose you have an append file named
|
||||
as follows:
|
||||
<literallayout class='monospaced'>
|
||||
busybox_1.21.%.bbappend
|
||||
</literallayout>
|
||||
That append file would match any <filename>busybox_1.21.</filename><replaceable>x</replaceable><filename>.bb</filename>
|
||||
That append file would match any <filename>busybox_1.21.x.bb</filename>
|
||||
version of the recipe.
|
||||
So, the append file would match the following recipe names:
|
||||
<literallayout class='monospaced'>
|
||||
@@ -357,14 +356,6 @@
|
||||
busybox_1.21.2.bb
|
||||
busybox_1.21.3.bb
|
||||
</literallayout>
|
||||
<note><title>Important</title>
|
||||
The use of the "<filename>%</filename>" character
|
||||
is limited in that it only works directly in front of the
|
||||
<filename>.bbappend</filename> portion of the append file's
|
||||
name.
|
||||
You cannot use the wildcard character in any other
|
||||
location of the name.
|
||||
</note>
|
||||
If the <filename>busybox</filename> recipe was updated to
|
||||
<filename>busybox_1.3.0.bb</filename>, the append name would not
|
||||
match.
|
||||
@@ -449,7 +440,7 @@
|
||||
Build Checkout:</emphasis>
|
||||
A final possibility for getting a copy of BitBake is that it
|
||||
already comes with your checkout of a larger Bitbake-based build
|
||||
system, such as Poky.
|
||||
system, such as Poky or Yocto Project.
|
||||
Rather than manually checking out individual layers and
|
||||
gluing them together yourself, you can check
|
||||
out an entire build system.
|
||||
@@ -497,6 +488,8 @@
|
||||
target that failed and anything depending on it cannot
|
||||
be built, as much as possible will be built before
|
||||
stopping.
|
||||
-a, --tryaltconfigs Continue with builds by trying to use alternative
|
||||
providers where possible.
|
||||
-f, --force Force the specified targets/task to run (invalidating
|
||||
any existing stamp file).
|
||||
-c CMD, --cmd=CMD Specify the task to execute. The exact options
|
||||
@@ -511,20 +504,9 @@
|
||||
Read the specified file before bitbake.conf.
|
||||
-R POSTFILE, --postread=POSTFILE
|
||||
Read the specified file after bitbake.conf.
|
||||
-v, --verbose Enable tracing of shell tasks (with 'set -x'). Also
|
||||
print bb.note(...) messages to stdout (in addition to
|
||||
writing them to ${T}/log.do_<task>).
|
||||
-v, --verbose Output more log message data to the terminal.
|
||||
-D, --debug Increase the debug level. You can specify this more
|
||||
than once. -D sets the debug level to 1, where only
|
||||
bb.debug(1, ...) messages are printed to stdout; -DD
|
||||
sets the debug level to 2, where both bb.debug(1, ...)
|
||||
and bb.debug(2, ...) messages are printed; etc.
|
||||
Without -D, no debug messages are printed. Note that
|
||||
-D only affects output to stdout. All debug messages
|
||||
are written to ${T}/log.do_taskname, regardless of the
|
||||
debug level.
|
||||
-q, --quiet Output less log message data to the terminal. You can
|
||||
specify this more than once.
|
||||
than once.
|
||||
-n, --dry-run Don't execute, just go through the motions.
|
||||
-S SIGNATURE_HANDLER, --dump-signatures=SIGNATURE_HANDLER
|
||||
Dump out the signature construction information, with
|
||||
@@ -547,38 +529,29 @@
|
||||
-l DEBUG_DOMAINS, --log-domains=DEBUG_DOMAINS
|
||||
Show debug logging for the specified logging domains
|
||||
-P, --profile Profile the command and save reports.
|
||||
-u UI, --ui=UI The user interface to use (knotty, ncurses or taskexp
|
||||
- default knotty).
|
||||
-u UI, --ui=UI The user interface to use (depexp, goggle, hob, knotty
|
||||
or ncurses - default knotty).
|
||||
-t SERVERTYPE, --servertype=SERVERTYPE
|
||||
Choose which server type to use (process or xmlrpc -
|
||||
default process).
|
||||
--token=XMLRPCTOKEN Specify the connection token to be used when
|
||||
connecting to a remote server.
|
||||
--revisions-changed Set the exit code depending on whether upstream
|
||||
floating revisions have changed or not.
|
||||
--server-only Run bitbake without a UI, only starting a server
|
||||
(cooker) process.
|
||||
-B BIND, --bind=BIND The name/address for the bitbake xmlrpc server to bind
|
||||
to.
|
||||
-T SERVER_TIMEOUT, --idle-timeout=SERVER_TIMEOUT
|
||||
Set timeout to unload bitbake server due to
|
||||
inactivity, set to -1 means no unload, default:
|
||||
Environment variable BB_SERVER_TIMEOUT.
|
||||
-B BIND, --bind=BIND The name/address for the bitbake server to bind to.
|
||||
--no-setscene Do not run any setscene tasks. sstate will be ignored
|
||||
and everything needed, built.
|
||||
--setscene-only Only run setscene tasks, don't run any real tasks.
|
||||
--remote-server=REMOTE_SERVER
|
||||
Connect to the specified server.
|
||||
-m, --kill-server Terminate any running bitbake server.
|
||||
-m, --kill-server Terminate the remote server.
|
||||
--observe-only Connect to a server as an observing-only client.
|
||||
--status-only Check the status of the remote bitbake server.
|
||||
-w WRITEEVENTLOG, --write-log=WRITEEVENTLOG
|
||||
Writes the event log of the build to a bitbake event
|
||||
json file. Use '' (empty string) to assign the name
|
||||
automatically.
|
||||
--runall=RUNALL Run the specified task for any recipe in the taskgraph
|
||||
of the specified target (even if it wouldn't otherwise
|
||||
have run).
|
||||
--runonly=RUNONLY Run only the specified task within the taskgraph of
|
||||
the specified targets (and any task dependencies those
|
||||
tasks may have).
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
@@ -692,21 +665,21 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
When you generate a dependency graph, BitBake writes three files
|
||||
When you generate a dependency graph, BitBake writes four files
|
||||
to the current working directory:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<emphasis><filename>recipe-depends.dot</filename>:</emphasis>
|
||||
Shows dependencies between recipes (i.e. a collapsed version of
|
||||
<filename>task-depends.dot</filename>).
|
||||
<listitem><para><emphasis><filename>package-depends.dot</filename>:</emphasis>
|
||||
Shows BitBake's knowledge of dependencies between
|
||||
runtime targets.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<emphasis><filename>task-depends.dot</filename>:</emphasis>
|
||||
<listitem><para><emphasis><filename>pn-depends.dot</filename>:</emphasis>
|
||||
Shows dependencies between build-time targets
|
||||
(i.e. recipes).
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis><filename>task-depends.dot</filename>:</emphasis>
|
||||
Shows dependencies between tasks.
|
||||
These dependencies match BitBake's internal task execution list.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<emphasis><filename>pn-buildlist</filename>:</emphasis>
|
||||
<listitem><para><emphasis><filename>pn-buildlist</filename>:</emphasis>
|
||||
Shows a simple list of targets that are to be built.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
@@ -732,163 +705,6 @@
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='executing-a-multiple-configuration-build'>
|
||||
<title>Executing a Multiple Configuration Build</title>
|
||||
|
||||
<para>
|
||||
BitBake is able to build multiple images or packages
|
||||
using a single command where the different targets
|
||||
require different configurations (multiple configuration
|
||||
builds).
|
||||
Each target, in this scenario, is referred to as a
|
||||
"multiconfig".
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To accomplish a multiple configuration build, you must
|
||||
define each target's configuration separately using
|
||||
a parallel configuration file in the build directory.
|
||||
The location for these multiconfig configuration files
|
||||
is specific.
|
||||
They must reside in the current build directory in
|
||||
a sub-directory of <filename>conf</filename> named
|
||||
<filename>multiconfig</filename>.
|
||||
Following is an example for two separate targets:
|
||||
<imagedata fileref="figures/bb_multiconfig_files.png" align="center" width="4in" depth="3in" />
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The reason for this required file hierarchy
|
||||
is because the <filename>BBPATH</filename> variable
|
||||
is not constructed until the layers are parsed.
|
||||
Consequently, using the configuration file as a
|
||||
pre-configuration file is not possible unless it is
|
||||
located in the current working directory.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Minimally, each configuration file must define the
|
||||
machine and the temporary directory BitBake uses
|
||||
for the build.
|
||||
Suggested practice dictates that you do not
|
||||
overlap the temporary directories used during the
|
||||
builds.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Aside from separate configuration files for each
|
||||
target, you must also enable BitBake to perform multiple
|
||||
configuration builds.
|
||||
Enabling is accomplished by setting the
|
||||
<link linkend='var-bb-BBMULTICONFIG'><filename>BBMULTICONFIG</filename></link>
|
||||
variable in the <filename>local.conf</filename>
|
||||
configuration file.
|
||||
As an example, suppose you had configuration files
|
||||
for <filename>target1</filename> and
|
||||
<filename>target2</filename> defined in the build
|
||||
directory.
|
||||
The following statement in the
|
||||
<filename>local.conf</filename> file both enables
|
||||
BitBake to perform multiple configuration builds and
|
||||
specifies the two multiconfigs:
|
||||
<literallayout class='monospaced'>
|
||||
BBMULTICONFIG = "target1 target2"
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Once the target configuration files are in place and
|
||||
BitBake has been enabled to perform multiple configuration
|
||||
builds, use the following command form to start the
|
||||
builds:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake [multiconfig:<replaceable>multiconfigname</replaceable>:]<replaceable>target</replaceable> [[[multiconfig:<replaceable>multiconfigname</replaceable>:]<replaceable>target</replaceable>] ... ]
|
||||
</literallayout>
|
||||
Here is an example for two multiconfigs:
|
||||
<filename>target1</filename> and
|
||||
<filename>target2</filename>:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake multiconfig:target1:<replaceable>target</replaceable> multiconfig:target2:<replaceable>target</replaceable>
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='bb-enabling-multiple-configuration-build-dependencies'>
|
||||
<title>Enabling Multiple Configuration Build Dependencies</title>
|
||||
|
||||
<para>
|
||||
Sometimes dependencies can exist between targets
|
||||
(multiconfigs) in a multiple configuration build.
|
||||
For example, suppose that in order to build an image
|
||||
for a particular architecture, the root filesystem of
|
||||
another build for a different architecture needs to
|
||||
exist.
|
||||
In other words, the image for the first multiconfig depends
|
||||
on the root filesystem of the second multiconfig.
|
||||
This dependency is essentially that the task in the recipe
|
||||
that builds one multiconfig is dependent on the
|
||||
completion of the task in the recipe that builds
|
||||
another multiconfig.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To enable dependencies in a multiple configuration
|
||||
build, you must declare the dependencies in the recipe
|
||||
using the following statement form:
|
||||
<literallayout class='monospaced'>
|
||||
<replaceable>task_or_package</replaceable>[mcdepends] = "multiconfig:<replaceable>from_multiconfig</replaceable>:<replaceable>to_multiconfig</replaceable>:<replaceable>recipe_name</replaceable>:<replaceable>task_on_which_to_depend</replaceable>"
|
||||
</literallayout>
|
||||
To better show how to use this statement, consider an
|
||||
example with two multiconfigs: <filename>target1</filename>
|
||||
and <filename>target2</filename>:
|
||||
<literallayout class='monospaced'>
|
||||
<replaceable>image_task</replaceable>[mcdepends] = "multiconfig:target1:target2:<replaceable>image2</replaceable>:<replaceable>rootfs_task</replaceable>"
|
||||
</literallayout>
|
||||
In this example, the
|
||||
<replaceable>from_multiconfig</replaceable> is "target1" and
|
||||
the <replaceable>to_multiconfig</replaceable> is "target2".
|
||||
The task on which the image whose recipe contains
|
||||
<replaceable>image_task</replaceable> depends on the
|
||||
completion of the <replaceable>rootfs_task</replaceable>
|
||||
used to build out <replaceable>image2</replaceable>, which
|
||||
is associated with the "target2" multiconfig.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Once you set up this dependency, you can build the
|
||||
"target1" multiconfig using a BitBake command as follows:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake multiconfig:target1:<replaceable>image1</replaceable>
|
||||
</literallayout>
|
||||
This command executes all the tasks needed to create
|
||||
<replaceable>image1</replaceable> for the "target1"
|
||||
multiconfig.
|
||||
Because of the dependency, BitBake also executes through
|
||||
the <replaceable>rootfs_task</replaceable> for the "target2"
|
||||
multiconfig build.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Having a recipe depend on the root filesystem of another
|
||||
build might not seem that useful.
|
||||
Consider this change to the statement in the
|
||||
<replaceable>image1</replaceable> recipe:
|
||||
<literallayout class='monospaced'>
|
||||
<replaceable>image_task</replaceable>[mcdepends] = "multiconfig:target1:target2:<replaceable>image2</replaceable>:<replaceable>image_task</replaceable>"
|
||||
</literallayout>
|
||||
In this case, BitBake must create
|
||||
<replaceable>image2</replaceable> for the "target2"
|
||||
build since the "target1" build depends on it.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Because "target1" and "target2" are enabled for multiple
|
||||
configuration builds and have separate configuration
|
||||
files, BitBake places the artifacts for each build in the
|
||||
respective temporary build directories.
|
||||
</para>
|
||||
</section>
|
||||
</section>
|
||||
</section>
|
||||
</chapter>
|
||||
|
||||
@@ -61,48 +61,6 @@
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='line-joining'>
|
||||
<title>Line Joining</title>
|
||||
|
||||
<para>
|
||||
Outside of
|
||||
<link linkend='functions'>functions</link>, BitBake joins
|
||||
any line ending in a backslash character ("\")
|
||||
with the following line before parsing statements.
|
||||
The most common use for the "\" character is to split variable
|
||||
assignments over multiple lines, as in the following example:
|
||||
<literallayout class='monospaced'>
|
||||
FOO = "bar \
|
||||
baz \
|
||||
qaz"
|
||||
</literallayout>
|
||||
Both the "\" character and the newline character
|
||||
that follow it are removed when joining lines.
|
||||
Thus, no newline characters end up in the value of
|
||||
<filename>FOO</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Consider this additional example where the two
|
||||
assignments both assign "barbaz" to
|
||||
<filename>FOO</filename>:
|
||||
<literallayout class='monospaced'>
|
||||
FOO = "barbaz"
|
||||
|
||||
FOO = "bar\
|
||||
baz"
|
||||
</literallayout>
|
||||
<note>
|
||||
BitBake does not interpret escape sequences like
|
||||
"\n" in variable values.
|
||||
For these to have an effect, the value must be passed
|
||||
to some utility that interprets escape sequences,
|
||||
such as <filename>printf</filename> or
|
||||
<filename>echo -n</filename>.
|
||||
</note>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='variable-expansion'>
|
||||
<title>Variable Expansion</title>
|
||||
|
||||
@@ -342,7 +300,7 @@
|
||||
|
||||
<para>
|
||||
When you use this syntax, BitBake expects one or more strings.
|
||||
Surrounding spaces and spacing are preserved.
|
||||
Surrounding spaces are removed as well.
|
||||
Here is an example:
|
||||
<literallayout class='monospaced'>
|
||||
FOO = "123 456 789 123456 123 456 123 456"
|
||||
@@ -352,9 +310,8 @@
|
||||
FOO2_remove = "abc def"
|
||||
</literallayout>
|
||||
The variable <filename>FOO</filename> becomes
|
||||
" 789 123456 "
|
||||
and <filename>FOO2</filename> becomes
|
||||
" ghi abcdef ".
|
||||
"789 123456" and <filename>FOO2</filename> becomes
|
||||
"ghi abcdef".
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -503,17 +460,17 @@
|
||||
</section>
|
||||
|
||||
<section id='unsetting-variables'>
|
||||
<title>Unsetting variables</title>
|
||||
<title>Unseting variables</title>
|
||||
|
||||
<para>
|
||||
It is possible to completely remove a variable or a variable flag
|
||||
It is possible to completely remove a variable or a variable flag
|
||||
from BitBake's internal data dictionary by using the "unset" keyword.
|
||||
Here is an example:
|
||||
<literallayout class='monospaced'>
|
||||
unset DATE
|
||||
unset do_fetch[noexec]
|
||||
</literallayout>
|
||||
These two statements remove the <filename>DATE</filename> and the
|
||||
These two statements remove the <filename>DATE</filename> and the
|
||||
<filename>do_fetch[noexec]</filename> flag.
|
||||
</para>
|
||||
|
||||
@@ -595,7 +552,7 @@
|
||||
|
||||
<para>
|
||||
BitBake uses
|
||||
<link linkend='var-bb-OVERRIDES'><filename>OVERRIDES</filename></link>
|
||||
<link linkend='var-OVERRIDES'><filename>OVERRIDES</filename></link>
|
||||
to control what variables are overridden after BitBake
|
||||
parses recipes and configuration files.
|
||||
This section describes how you can use
|
||||
@@ -670,7 +627,7 @@
|
||||
<literallayout class='monospaced'>
|
||||
DEPENDS = "glibc ncurses"
|
||||
OVERRIDES = "machine:local"
|
||||
DEPENDS_append_machine = " libmad"
|
||||
DEPENDS_append_machine = "libmad"
|
||||
</literallayout>
|
||||
In this example, <filename>DEPENDS</filename> becomes
|
||||
"glibc ncurses libmad".
|
||||
@@ -705,7 +662,7 @@
|
||||
|
||||
<para>Internally, this is implemented by prepending
|
||||
the task (e.g. "task-compile:") to the value of
|
||||
<link linkend='var-bb-OVERRIDES'><filename>OVERRIDES</filename></link>
|
||||
<link linkend='var-OVERRIDES'><filename>OVERRIDES</filename></link>
|
||||
for the local datastore of the <filename>do_compile</filename>
|
||||
task.</para>
|
||||
|
||||
@@ -868,7 +825,7 @@
|
||||
|
||||
<para>
|
||||
BitBake uses the
|
||||
<link linkend='var-bb-BBPATH'><filename>BBPATH</filename></link>
|
||||
<link linkend='var-BBPATH'><filename>BBPATH</filename></link>
|
||||
variable to locate needed include and class files.
|
||||
Additionally, BitBake searches the current directory for
|
||||
<filename>include</filename> and <filename>require</filename>
|
||||
@@ -900,12 +857,11 @@
|
||||
|
||||
<para>
|
||||
The <filename>inherit</filename> directive is a rudimentary
|
||||
means of specifying functionality contained in class files
|
||||
that your recipes require.
|
||||
means of specifying what classes of functionality your
|
||||
recipes require.
|
||||
For example, you can easily abstract out the tasks involved in
|
||||
building a package that uses Autoconf and Automake and put
|
||||
those tasks into a class file and then have your recipe
|
||||
inherit that class file.
|
||||
those tasks into a class file that can be used by your recipe.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -924,24 +880,13 @@
|
||||
inherited class within your recipe by doing so
|
||||
after the "inherit" statement.
|
||||
</note>
|
||||
If you want to use the directive to inherit
|
||||
multiple classes, separate them with spaces.
|
||||
The following example shows how to inherit both the
|
||||
<filename>buildhistory</filename> and <filename>rm_work</filename>
|
||||
classes:
|
||||
<literallayout class='monospaced'>
|
||||
inherit buildhistory rm_work
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
An advantage with the inherit directive as compared to both
|
||||
the
|
||||
<link linkend='include-directive'>include</link> and
|
||||
<link linkend='require-inclusion'>require</link> directives
|
||||
is that you can inherit class files conditionally.
|
||||
You can accomplish this by using a variable expression
|
||||
after the <filename>inherit</filename> statement.
|
||||
If necessary, it is possible to inherit a class
|
||||
conditionally by using
|
||||
a variable expression after the <filename>inherit</filename>
|
||||
statement.
|
||||
Here is an example:
|
||||
<literallayout class='monospaced'>
|
||||
inherit ${VARNAME}
|
||||
@@ -997,17 +942,6 @@
|
||||
within <filename>BBPATH</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The include directive is a more generic method of including
|
||||
functionality as compared to the
|
||||
<link linkend='inherit-directive'>inherit</link> directive,
|
||||
which is restricted to class (i.e. <filename>.bbclass</filename>)
|
||||
files.
|
||||
The include directive is applicable for any other kind of
|
||||
shared or encapsulated functionality or configuration that
|
||||
does not suit a <filename>.bbclass</filename> file.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
As an example, suppose you needed a recipe to include some
|
||||
self-test definitions:
|
||||
@@ -1041,18 +975,6 @@
|
||||
being parsed at the location of the directive.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The require directive, like the include directive previously
|
||||
described, is a more generic method of including
|
||||
functionality as compared to the
|
||||
<link linkend='inherit-directive'>inherit</link> directive,
|
||||
which is restricted to class (i.e. <filename>.bbclass</filename>)
|
||||
files.
|
||||
The require directive is applicable for any other kind of
|
||||
shared or encapsulated functionality or configuration that
|
||||
does not suit a <filename>.bbclass</filename> file.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Similar to how BitBake handles
|
||||
<link linkend='include-directive'><filename>include</filename></link>,
|
||||
@@ -1085,9 +1007,8 @@
|
||||
|
||||
<para>
|
||||
When creating a configuration file (<filename>.conf</filename>),
|
||||
you can use the
|
||||
<link linkend='var-bb-INHERIT'><filename>INHERIT</filename></link>
|
||||
configuration directive to inherit a class.
|
||||
you can use the <filename>INHERIT</filename> directive to
|
||||
inherit a class.
|
||||
BitBake only supports this directive when used within
|
||||
a configuration file.
|
||||
</para>
|
||||
@@ -1120,7 +1041,7 @@
|
||||
<filename>autotools</filename> and <filename>pkgconfig</filename>
|
||||
classes:
|
||||
<literallayout class='monospaced'>
|
||||
INHERIT += "autotools pkgconfig"
|
||||
inherit autotools pkgconfig
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
@@ -1244,7 +1165,7 @@
|
||||
<literallayout class='monospaced'>
|
||||
python some_python_function () {
|
||||
d.setVar("TEXT", "Hello World")
|
||||
print d.getVar("TEXT")
|
||||
print d.getVar("TEXT", True)
|
||||
}
|
||||
</literallayout>
|
||||
Because the Python "bb" and "os" modules are already
|
||||
@@ -1259,7 +1180,7 @@
|
||||
to freely set variable values to expandable expressions
|
||||
without having them expanded prematurely.
|
||||
If you do wish to expand a variable within a Python
|
||||
function, use <filename>d.getVar("X")</filename>.
|
||||
function, use <filename>d.getVar("X", True)</filename>.
|
||||
Or, for more complicated expressions, use
|
||||
<filename>d.expand()</filename>.
|
||||
</note>
|
||||
@@ -1311,7 +1232,7 @@
|
||||
Here is an example:
|
||||
<literallayout class='monospaced'>
|
||||
def get_depends(d):
|
||||
if d.getVar('SOMECONDITION'):
|
||||
if d.getVar('SOMECONDITION', True):
|
||||
return "dependencywithcond"
|
||||
else:
|
||||
return "dependency"
|
||||
@@ -1370,7 +1291,7 @@
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
BitBake-style Python functions generate a separate
|
||||
<filename>${</filename><link linkend='var-bb-T'><filename>T</filename></link><filename>}/run.</filename><replaceable>function-name</replaceable><filename>.</filename><replaceable>pid</replaceable>
|
||||
<filename>${</filename><link linkend='var-T'><filename>T</filename></link><filename>}/run.</filename><replaceable>function-name</replaceable><filename>.</filename><replaceable>pid</replaceable>
|
||||
script that is executed to run the function, and also
|
||||
generate a log file in
|
||||
<filename>${T}/log.</filename><replaceable>function-name</replaceable><filename>.</filename><replaceable>pid</replaceable>
|
||||
@@ -1446,7 +1367,7 @@
|
||||
based on the value of another variable:
|
||||
<literallayout class='monospaced'>
|
||||
python () {
|
||||
if d.getVar('SOMEVAR') == 'value':
|
||||
if d.getVar('SOMEVAR', True) == 'value':
|
||||
d.setVar('ANOTHERVAR', 'value2')
|
||||
}
|
||||
</literallayout>
|
||||
@@ -1773,7 +1694,7 @@
|
||||
things exported or listed in its whitelist to ensure that the build
|
||||
environment is reproducible and consistent.
|
||||
You can prevent this "cleaning" by setting the
|
||||
<link linkend='var-bb-BB_PRESERVE_ENV'><filename>BB_PRESERVE_ENV</filename></link>
|
||||
<link linkend='var-BB_PRESERVE_ENV'><filename>BB_PRESERVE_ENV</filename></link>
|
||||
variable.
|
||||
</note>
|
||||
Consequently, if you do want something to get passed into the
|
||||
@@ -1783,9 +1704,9 @@
|
||||
Tell BitBake to load what you want from the environment
|
||||
into the datastore.
|
||||
You can do so through the
|
||||
<link linkend='var-bb-BB_ENV_WHITELIST'><filename>BB_ENV_WHITELIST</filename></link>
|
||||
<link linkend='var-BB_ENV_WHITELIST'><filename>BB_ENV_WHITELIST</filename></link>
|
||||
and
|
||||
<link linkend='var-bb-BB_ENV_EXTRAWHITE'><filename>BB_ENV_EXTRAWHITE</filename></link>
|
||||
<link linkend='var-BB_ENV_EXTRAWHITE'><filename>BB_ENV_EXTRAWHITE</filename></link>
|
||||
variables.
|
||||
For example, assume you want to prevent the build system from
|
||||
accessing your <filename>$HOME/.ccache</filename>
|
||||
@@ -1824,7 +1745,7 @@
|
||||
from the original execution environment.
|
||||
Bitbake saves a copy of the original environment into
|
||||
a special variable named
|
||||
<link linkend='var-bb-BB_ORIGENV'><filename>BB_ORIGENV</filename></link>.
|
||||
<link linkend='var-BB_ORIGENV'><filename>BB_ORIGENV</filename></link>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -1883,7 +1804,7 @@
|
||||
<listitem><para><emphasis><filename>[depends]</filename>:</emphasis>
|
||||
Controls inter-task dependencies.
|
||||
See the
|
||||
<link linkend='var-bb-DEPENDS'><filename>DEPENDS</filename></link>
|
||||
<link linkend='var-DEPENDS'><filename>DEPENDS</filename></link>
|
||||
variable and the
|
||||
"<link linkend='inter-task-dependencies'>Inter-Task Dependencies</link>"
|
||||
section for more information.
|
||||
@@ -1891,7 +1812,7 @@
|
||||
<listitem><para><emphasis><filename>[deptask]</filename>:</emphasis>
|
||||
Controls task build-time dependencies.
|
||||
See the
|
||||
<link linkend='var-bb-DEPENDS'><filename>DEPENDS</filename></link>
|
||||
<link linkend='var-DEPENDS'><filename>DEPENDS</filename></link>
|
||||
variable and the
|
||||
"<link linkend='build-dependencies'>Build Dependencies</link>"
|
||||
section for more information.
|
||||
@@ -1930,38 +1851,6 @@
|
||||
not careful.
|
||||
</note>
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis><filename>[number_threads]</filename>:</emphasis>
|
||||
Limits tasks to a specific number of simultaneous threads
|
||||
during execution.
|
||||
This varflag is useful when your build host has a large number
|
||||
of cores but certain tasks need to be rate-limited due to various
|
||||
kinds of resource constraints (e.g. to avoid network throttling).
|
||||
<filename>number_threads</filename> works similarly to the
|
||||
<link linkend='var-bb-BB_NUMBER_THREADS'><filename>BB_NUMBER_THREADS</filename></link>
|
||||
variable but is task-specific.</para>
|
||||
|
||||
<para>Set the value globally.
|
||||
For example, the following makes sure the
|
||||
<filename>do_fetch</filename> task uses no more than two
|
||||
simultaneous execution threads:
|
||||
<literallayout class='monospaced'>
|
||||
do_fetch[number_threads] = "2"
|
||||
</literallayout>
|
||||
<note><title>Warnings</title>
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
Setting the varflag in individual recipes rather
|
||||
than globally can result in unpredictable behavior.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Setting the varflag to a value greater than the
|
||||
value used in the <filename>BB_NUMBER_THREADS</filename>
|
||||
variable causes <filename>number_threads</filename>
|
||||
to have no effect.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis><filename>[postfuncs]</filename>:</emphasis>
|
||||
List of functions to call after the completion of the task.
|
||||
</para></listitem>
|
||||
@@ -1971,9 +1860,9 @@
|
||||
<listitem><para><emphasis><filename>[rdepends]</filename>:</emphasis>
|
||||
Controls inter-task runtime dependencies.
|
||||
See the
|
||||
<link linkend='var-bb-RDEPENDS'><filename>RDEPENDS</filename></link>
|
||||
<link linkend='var-RDEPENDS'><filename>RDEPENDS</filename></link>
|
||||
variable, the
|
||||
<link linkend='var-bb-RRECOMMENDS'><filename>RRECOMMENDS</filename></link>
|
||||
<link linkend='var-RRECOMMENDS'><filename>RRECOMMENDS</filename></link>
|
||||
variable, and the
|
||||
"<link linkend='inter-task-dependencies'>Inter-Task Dependencies</link>"
|
||||
section for more information.
|
||||
@@ -1981,9 +1870,9 @@
|
||||
<listitem><para><emphasis><filename>[rdeptask]</filename>:</emphasis>
|
||||
Controls task runtime dependencies.
|
||||
See the
|
||||
<link linkend='var-bb-RDEPENDS'><filename>RDEPENDS</filename></link>
|
||||
<link linkend='var-RDEPENDS'><filename>RDEPENDS</filename></link>
|
||||
variable, the
|
||||
<link linkend='var-bb-RRECOMMENDS'><filename>RRECOMMENDS</filename></link>
|
||||
<link linkend='var-RRECOMMENDS'><filename>RRECOMMENDS</filename></link>
|
||||
variable, and the
|
||||
"<link linkend='runtime-dependencies'>Runtime Dependencies</link>"
|
||||
section for more information.
|
||||
@@ -1996,9 +1885,9 @@
|
||||
<listitem><para><emphasis><filename>[recrdeptask]</filename>:</emphasis>
|
||||
Controls task recursive runtime dependencies.
|
||||
See the
|
||||
<link linkend='var-bb-RDEPENDS'><filename>RDEPENDS</filename></link>
|
||||
<link linkend='var-RDEPENDS'><filename>RDEPENDS</filename></link>
|
||||
variable, the
|
||||
<link linkend='var-bb-RRECOMMENDS'><filename>RRECOMMENDS</filename></link>
|
||||
<link linkend='var-RRECOMMENDS'><filename>RRECOMMENDS</filename></link>
|
||||
variable, and the
|
||||
"<link linkend='recursive-dependencies'>Recursive Dependencies</link>"
|
||||
section for more information.
|
||||
@@ -2053,196 +1942,128 @@
|
||||
<title>Events</title>
|
||||
|
||||
<para>
|
||||
BitBake allows installation of event handlers within recipe
|
||||
and class files.
|
||||
Events are triggered at certain points during operation, such
|
||||
as the beginning of operation against a given recipe
|
||||
(i.e. <filename>*.bb</filename>), the start of a given task,
|
||||
a task failure, a task success, and so forth.
|
||||
BitBake allows installation of event handlers within
|
||||
recipe and class files.
|
||||
Events are triggered at certain points during operation,
|
||||
such as the beginning of an operation against a given recipe
|
||||
(<filename>*.bb</filename> file), the start of a given task,
|
||||
task failure, task success, and so forth.
|
||||
The intent is to make it easy to do things like email
|
||||
notification on build failures.
|
||||
notification on build failure.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Following is an example event handler that prints the name
|
||||
of the event and the content of the
|
||||
<filename>FILE</filename> variable:
|
||||
Following is an example event handler that
|
||||
prints the name of the event and the content of
|
||||
the <filename>FILE</filename> variable:
|
||||
<literallayout class='monospaced'>
|
||||
addhandler myclass_eventhandler
|
||||
python myclass_eventhandler() {
|
||||
from bb.event import getName
|
||||
from bb import data
|
||||
print("The name of the Event is %s" % getName(e))
|
||||
print("The file we run for is %s" % d.getVar('FILE'))
|
||||
print("The file we run for is %s" % data.getVar('FILE', e.data, True))
|
||||
}
|
||||
myclass_eventhandler[eventmask] = "bb.event.BuildStarted bb.event.BuildCompleted"
|
||||
</literallayout>
|
||||
In the previous example, an eventmask has been set so that
|
||||
the handler only sees the "BuildStarted" and "BuildCompleted"
|
||||
events.
|
||||
This event handler gets called every time an event matching
|
||||
the eventmask is triggered.
|
||||
A global variable "e" is defined, which represents the current
|
||||
event.
|
||||
With the <filename>getName(e)</filename> method, you can get
|
||||
This event handler gets called every time an event is
|
||||
triggered.
|
||||
A global variable "<filename>e</filename>" is defined and
|
||||
"<filename>e.data</filename>" contains an instance of
|
||||
"<filename>bb.data</filename>".
|
||||
With the <filename>getName(e)</filename> method, one can get
|
||||
the name of the triggered event.
|
||||
The global datastore is available as "d".
|
||||
In legacy code, you might see "e.data" used to get the datastore.
|
||||
However, realize that "e.data" is deprecated and you should use
|
||||
"d" going forward.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The context of the datastore is appropriate to the event
|
||||
in question.
|
||||
For example, "BuildStarted" and "BuildCompleted" events run
|
||||
before any tasks are executed so would be in the global
|
||||
configuration datastore namespace.
|
||||
No recipe-specific metadata exists in that namespace.
|
||||
The "BuildStarted" and "BuildCompleted" events also run in
|
||||
the main cooker/server process rather than any worker context.
|
||||
Thus, any changes made to the datastore would be seen by other
|
||||
cooker/server events within the current build but not seen
|
||||
outside of that build or in any worker context.
|
||||
Task events run in the actual tasks in question consequently
|
||||
have recipe-specific and task-specific contents.
|
||||
These events run in the worker context and are discarded at
|
||||
the end of task execution.
|
||||
Because you probably are only interested in a subset of events,
|
||||
you would likely use the <filename>[eventmask]</filename> flag
|
||||
for your event handler to be sure that only certain events
|
||||
trigger the handler.
|
||||
Given the previous example, suppose you only wanted the
|
||||
<filename>bb.build.TaskFailed</filename> event to trigger that
|
||||
event handler.
|
||||
Use the flag as follows:
|
||||
<literallayout class='monospaced'>
|
||||
addhandler myclass_eventhandler
|
||||
myclass_eventhandler[eventmask] = "bb.build.TaskFailed"
|
||||
python myclass_eventhandler() {
|
||||
from bb.event import getName
|
||||
from bb import data
|
||||
print("The name of the Event is %s" % getName(e))
|
||||
print("The file we run for is %s" % data.getVar('FILE', e.data, True))
|
||||
}
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
During a standard build, the following common events might
|
||||
occur.
|
||||
The following events are the most common kinds of events that
|
||||
most metadata might have an interest in viewing:
|
||||
During a standard build, the following common events might occur:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<filename>bb.event.ConfigParsed()</filename>:
|
||||
Fired when the base configuration; which consists of
|
||||
<filename>bitbake.conf</filename>,
|
||||
<filename>base.bbclass</filename> and any global
|
||||
<filename>INHERIT</filename> statements; has been parsed.
|
||||
You can see multiple such events when each of the
|
||||
workers parse the base configuration or if the server
|
||||
changes configuration and reparses.
|
||||
Any given datastore only has one such event executed
|
||||
against it, however.
|
||||
If
|
||||
<link linkende='var-bb-BB_INVALIDCONF'><filename>BB_INVALIDCONF</filename></link>
|
||||
is set in the datastore by the event handler, the
|
||||
configuration is reparsed and a new event triggered,
|
||||
allowing the metadata to update configuration.
|
||||
<filename>bb.event.ConfigParsed()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.HeartbeatEvent()</filename>:
|
||||
Fires at regular time intervals of one second.
|
||||
You can configure the interval time using the
|
||||
<filename>BB_HEARTBEAT_EVENT</filename> variable.
|
||||
The event's "time" attribute is the
|
||||
<filename>time.time()</filename> value when the
|
||||
event is triggered.
|
||||
This event is useful for activities such as
|
||||
system state monitoring.
|
||||
<filename>bb.event.ParseStarted()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.ParseStarted()</filename>:
|
||||
Fired when BitBake is about to start parsing recipes.
|
||||
This event's "total" attribute represents the number of
|
||||
recipes BitBake plans to parse.
|
||||
<filename>bb.event.ParseProgress()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.ParseProgress()</filename>:
|
||||
Fired as parsing progresses.
|
||||
This event's "current" attribute is the number of
|
||||
recipes parsed as well as the "total" attribute.
|
||||
<filename>bb.event.ParseCompleted()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.ParseCompleted()</filename>:
|
||||
Fired when parsing is complete.
|
||||
This event's "cached", "parsed", "skipped", "virtuals",
|
||||
"masked", and "errors" attributes provide statistics
|
||||
for the parsing results.
|
||||
<filename>bb.event.BuildStarted()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.BuildStarted()</filename>:
|
||||
Fired when a new build starts.
|
||||
BitBake fires multiple "BuildStarted" events (one per configuration)
|
||||
when multiple configuration (multiconfig) is enabled.
|
||||
<filename>bb.build.TaskStarted()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.build.TaskStarted()</filename>:
|
||||
Fired when a task starts.
|
||||
This event's "taskfile" attribute points to the recipe
|
||||
from which the task originates.
|
||||
The "taskname" attribute, which is the task's name,
|
||||
includes the <filename>do_</filename> prefix, and the
|
||||
"logfile" attribute point to where the task's output is
|
||||
stored.
|
||||
Finally, the "time" attribute is the task's execution start
|
||||
time.
|
||||
<filename>bb.build.TaskInvalid()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.build.TaskInvalid()</filename>:
|
||||
Fired if BitBake tries to execute a task that does not exist.
|
||||
<filename>bb.build.TaskFailedSilent()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.build.TaskFailedSilent()</filename>:
|
||||
Fired for setscene tasks that fail and should not be
|
||||
presented to the user verbosely.
|
||||
<filename>bb.build.TaskFailed()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.build.TaskFailed()</filename>:
|
||||
Fired for normal tasks that fail.
|
||||
<filename>bb.build.TaskSucceeded()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.build.TaskSucceeded()</filename>:
|
||||
Fired when a task successfully completes.
|
||||
<filename>bb.event.BuildCompleted()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.BuildCompleted()</filename>:
|
||||
Fired when a build finishes.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.cooker.CookerExit()</filename>:
|
||||
Fired when the BitBake server/cooker shuts down.
|
||||
This event is usually only seen by the UIs as a
|
||||
sign they should also shutdown.
|
||||
<filename>bb.cooker.CookerExit()</filename>
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This next list of example events occur based on specific
|
||||
requests to the server.
|
||||
These events are often used to communicate larger pieces of
|
||||
information from the BitBake server to other parts of
|
||||
BitBake such as user interfaces:
|
||||
Here is a list of other events that occur based on specific requests
|
||||
to the server:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<filename>bb.event.TreeDataPreparationStarted()</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.TreeDataPreparationProgress()</filename>
|
||||
<filename>bb.event.TreeDataPreparationProgress</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.TreeDataPreparationCompleted()</filename>
|
||||
<filename>bb.event.TreeDataPreparationCompleted</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.DepTreeGenerated()</filename>
|
||||
<filename>bb.event.DepTreeGenerated</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.CoreBaseFilesFound()</filename>
|
||||
<filename>bb.event.CoreBaseFilesFound</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.ConfigFilePathFound()</filename>
|
||||
<filename>bb.event.ConfigFilePathFound</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.FilesMatchingFound()</filename>
|
||||
<filename>bb.event.FilesMatchingFound</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.ConfigFilesFound()</filename>
|
||||
<filename>bb.event.ConfigFilesFound</filename>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<filename>bb.event.TargetsTreeGenerated()</filename>
|
||||
<filename>bb.event.TargetsTreeGenerated</filename>
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
@@ -2256,17 +2077,17 @@
|
||||
from a single recipe file multiple incarnations of that
|
||||
recipe file where all incarnations are buildable.
|
||||
These features are enabled through the
|
||||
<link linkend='var-bb-BBCLASSEXTEND'><filename>BBCLASSEXTEND</filename></link>
|
||||
<link linkend='var-BBCLASSEXTEND'><filename>BBCLASSEXTEND</filename></link>
|
||||
and
|
||||
<link linkend='var-bb-BBVERSIONS'><filename>BBVERSIONS</filename></link>
|
||||
<link linkend='var-BBVERSIONS'><filename>BBVERSIONS</filename></link>
|
||||
variables.
|
||||
<note>
|
||||
The mechanism for this class extension is extremely
|
||||
specific to the implementation.
|
||||
Usually, the recipe's
|
||||
<link linkend='var-bb-PROVIDES'><filename>PROVIDES</filename></link>,
|
||||
<link linkend='var-bb-PN'><filename>PN</filename></link>, and
|
||||
<link linkend='var-bb-DEPENDS'><filename>DEPENDS</filename></link>
|
||||
<link linkend='var-PROVIDES'><filename>PROVIDES</filename></link>,
|
||||
<link linkend='var-PN'><filename>PN</filename></link>, and
|
||||
<link linkend='var-DEPENDS'><filename>DEPENDS</filename></link>
|
||||
variables would need to be modified by the extension class.
|
||||
For specific examples, see the OE-Core
|
||||
<filename>native</filename>, <filename>nativesdk</filename>,
|
||||
@@ -2287,7 +2108,7 @@
|
||||
project from a single recipe file.
|
||||
You can also specify conditional metadata
|
||||
(using the
|
||||
<link linkend='var-bb-OVERRIDES'><filename>OVERRIDES</filename></link>
|
||||
<link linkend='var-OVERRIDES'><filename>OVERRIDES</filename></link>
|
||||
mechanism) for a single version, or an optionally named range of versions.
|
||||
Here is an example:
|
||||
<literallayout class='monospaced'>
|
||||
@@ -2306,7 +2127,7 @@
|
||||
into overrides, but it is also made available for the metadata to use
|
||||
in the variable that defines the base recipe versions for use in
|
||||
<filename>file://</filename> search paths
|
||||
(<link linkend='var-bb-FILESPATH'><filename>FILESPATH</filename></link>).
|
||||
(<link linkend='var-FILESPATH'><filename>FILESPATH</filename></link>).
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
@@ -2408,7 +2229,7 @@
|
||||
|
||||
<para>
|
||||
BitBake uses the
|
||||
<link linkend='var-bb-DEPENDS'><filename>DEPENDS</filename></link>
|
||||
<link linkend='var-DEPENDS'><filename>DEPENDS</filename></link>
|
||||
variable to manage build time dependencies.
|
||||
The <filename>[deptask]</filename> varflag for tasks
|
||||
signifies the task of each
|
||||
@@ -2429,9 +2250,9 @@
|
||||
|
||||
<para>
|
||||
BitBake uses the
|
||||
<link linkend='var-bb-PACKAGES'><filename>PACKAGES</filename></link>,
|
||||
<link linkend='var-bb-RDEPENDS'><filename>RDEPENDS</filename></link>, and
|
||||
<link linkend='var-bb-RRECOMMENDS'><filename>RRECOMMENDS</filename></link>
|
||||
<link linkend='var-PACKAGES'><filename>PACKAGES</filename></link>,
|
||||
<link linkend='var-RDEPENDS'><filename>RDEPENDS</filename></link>, and
|
||||
<link linkend='var-RRECOMMENDS'><filename>RRECOMMENDS</filename></link>
|
||||
variables to manage runtime dependencies.
|
||||
</para>
|
||||
|
||||
@@ -2685,97 +2506,48 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
These checksums are stored in
|
||||
<link linkend='var-bb-STAMP'><filename>STAMP</filename></link>.
|
||||
You can examine the checksums using the following BitBake command:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake-dumpsigs
|
||||
</literallayout>
|
||||
This command returns the signature data in a readable format
|
||||
that allows you to examine the inputs used when the
|
||||
OpenEmbedded build system generates signatures.
|
||||
For example, using <filename>bitbake-dumpsigs</filename>
|
||||
allows you to examine the <filename>do_compile</filename>
|
||||
task's “sigdata” for a C application (e.g.
|
||||
<filename>bash</filename>).
|
||||
Running the command also reveals that the “CC” variable is part of
|
||||
the inputs that are hashed.
|
||||
Any changes to this variable would invalidate the stamp and
|
||||
cause the <filename>do_compile</filename> task to run.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The following list describes related variables:
|
||||
This list is a place holder of content existed from previous work
|
||||
on the manual.
|
||||
Some or all of it probably needs integrated into the subsections
|
||||
that make up this section.
|
||||
For now, I have just provided a short glossary-like description
|
||||
for each variable.
|
||||
Ultimately, this list goes away.
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<link linkend='var-bb-BB_HASHCHECK_FUNCTION'><filename>BB_HASHCHECK_FUNCTION</filename></link>:
|
||||
<listitem><para><filename>STAMP</filename>:
|
||||
The base path to create stamp files.</para></listitem>
|
||||
<listitem><para><filename>STAMPCLEAN</filename>
|
||||
Again, the base path to create stamp files but can use wildcards
|
||||
for matching a range of files for clean operations.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>BB_STAMP_WHITELIST</filename>
|
||||
Lists stamp files that are looked at when the stamp policy
|
||||
is "whitelist".
|
||||
</para></listitem>
|
||||
<listitem><para><filename>BB_STAMP_POLICY</filename>
|
||||
Defines the mode for comparing timestamps of stamp files.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>BB_HASHCHECK_FUNCTION</filename>
|
||||
Specifies the name of the function to call during
|
||||
the "setscene" part of the task's execution in order
|
||||
to validate the list of task hashes.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-bb-BB_SETSCENE_DEPVALID'><filename>BB_SETSCENE_DEPVALID</filename></link>:
|
||||
Specifies a function BitBake calls that determines
|
||||
whether BitBake requires a setscene dependency to
|
||||
be met.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-bb-BB_SETSCENE_VERIFY_FUNCTION2'><filename>BB_SETSCENE_VERIFY_FUNCTION2</filename></link>:
|
||||
<listitem><para><filename>BB_SETSCENE_VERIFY_FUNCTION2</filename>
|
||||
Specifies a function to call that verifies the list of
|
||||
planned task execution before the main task execution
|
||||
happens.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-bb-BB_STAMP_POLICY'><filename>BB_STAMP_POLICY</filename></link>:
|
||||
Defines the mode for comparing timestamps of stamp files.
|
||||
<listitem><para><filename>BB_SETSCENE_DEPVALID</filename>
|
||||
Specifies a function BitBake calls that determines
|
||||
whether BitBake requires a setscene dependency to
|
||||
be met.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-bb-BB_STAMP_WHITELIST'><filename>BB_STAMP_WHITELIST</filename></link>:
|
||||
Lists stamp files that are looked at when the stamp policy
|
||||
is "whitelist".
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-bb-BB_TASKHASH'><filename>BB_TASKHASH</filename></link>:
|
||||
<listitem><para><filename>BB_TASKHASH</filename>
|
||||
Within an executing task, this variable holds the hash
|
||||
of the task as returned by the currently enabled
|
||||
signature generator.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-bb-STAMP'><filename>STAMP</filename></link>:
|
||||
The base path to create stamp files.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-bb-STAMPCLEAN'><filename>STAMPCLEAN</filename></link>:
|
||||
Again, the base path to create stamp files but can use wildcards
|
||||
for matching a range of files for clean operations.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='wildcard-support-in-variables'>
|
||||
<title>Wildcard Support in Variables</title>
|
||||
|
||||
<para>
|
||||
Support for wildcard use in variables varies depending on the
|
||||
context in which it is used.
|
||||
For example, some variables and file names allow limited use of
|
||||
wildcards through the "<filename>%</filename>" and
|
||||
"<filename>*</filename>" characters.
|
||||
Other variables or names support Python's
|
||||
<ulink url='https://docs.python.org/3/library/glob.html'><filename>glob</filename></ulink>
|
||||
syntax,
|
||||
<ulink url='https://docs.python.org/3/library/fnmatch.html#module-fnmatch'><filename>fnmatch</filename></ulink>
|
||||
syntax, or
|
||||
<ulink url='https://docs.python.org/3/library/re.html#re'><filename>Regular Expression (re)</filename></ulink>
|
||||
syntax.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For variables that have wildcard suport, the
|
||||
documentation describes which form of wildcard, its
|
||||
use, and its limitations.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
</chapter>
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -56,7 +56,7 @@
|
||||
-->
|
||||
|
||||
<copyright>
|
||||
<year>2004-2018</year>
|
||||
<year>2004-2016</year>
|
||||
<holder>Richard Purdie</holder>
|
||||
<holder>Chris Larson</holder>
|
||||
<holder>and Phil Blundell</holder>
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 20 KiB |
@@ -105,7 +105,7 @@ Show debug logging for the specified logging domains
|
||||
profile the command and print a report
|
||||
.TP
|
||||
.B \-uUI, \-\-ui=UI
|
||||
User interface to use. Currently, knotty, taskexp or ncurses can be specified as UI.
|
||||
User interface to use. Currently, hob, depexp, goggle or ncurses can be specified as UI.
|
||||
.TP
|
||||
.B \-tSERVERTYPE, \-\-servertype=SERVERTYPE
|
||||
Choose which server to use, none, process or xmlrpc.
|
||||
|
||||
@@ -17,6 +17,13 @@
|
||||
<!ENTITY OE_DOCS_URL "http://docs.openembedded.org">
|
||||
<!ENTITY OH_HOME_URL "http://o-hand.com">
|
||||
<!ENTITY BITBAKE_HOME_URL "http://developer.berlios.de/projects/bitbake/">
|
||||
<!ENTITY ECLIPSE_MAIN_URL "http://www.eclipse.org/downloads">
|
||||
<!ENTITY ECLIPSE_DL_URL "http://download.eclipse.org">
|
||||
<!ENTITY ECLIPSE_DL_PLUGIN_URL "&YOCTO_DL_URL;/releases/eclipse-plugin/&DISTRO;">
|
||||
<!ENTITY ECLIPSE_UPDATES_URL "&ECLIPSE_DL_URL;/tm/updates/3.3">
|
||||
<!ENTITY ECLIPSE_INDIGO_URL "&ECLIPSE_DL_URL;/releases/indigo">
|
||||
<!ENTITY ECLIPSE_JUNO_URL "&ECLIPSE_DL_URL;/releases/juno">
|
||||
<!ENTITY ECLIPSE_INDIGO_CDT_URL "&ECLIPSE_DL_URL;tools/cdt/releases/indigo">
|
||||
<!ENTITY YOCTO_DOCS_URL "&YOCTO_HOME_URL;/docs">
|
||||
<!ENTITY YOCTO_SOURCES_URL "&YOCTO_HOME_URL;/sources/">
|
||||
<!ENTITY YOCTO_AB_PORT_URL "&YOCTO_AB_URL;:8010">
|
||||
@@ -24,6 +31,7 @@
|
||||
<!ENTITY YOCTO_POKY_URL "&YOCTO_DL_URL;/releases/poky/">
|
||||
<!ENTITY YOCTO_RELEASE_DL_URL "&YOCTO_DL_URL;/releases/yocto/yocto-&DISTRO;">
|
||||
<!ENTITY YOCTO_TOOLCHAIN_DL_URL "&YOCTO_RELEASE_DL_URL;/toolchain/">
|
||||
<!ENTITY YOCTO_ECLIPSE_DL_URL "&YOCTO_RELEASE_DL_URL;/eclipse-plugin/indigo;">
|
||||
<!ENTITY YOCTO_ADTINSTALLER_DL_URL "&YOCTO_RELEASE_DL_URL;/adt_installer">
|
||||
<!ENTITY YOCTO_POKY_DL_URL "&YOCTO_RELEASE_DL_URL;/&YOCTO_POKY;.tar.bz2">
|
||||
<!ENTITY YOCTO_MACHINES_DL_URL "&YOCTO_RELEASE_DL_URL;/machines">
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
#
|
||||
# This is a copy on write dictionary and set which abuses classes to try and be nice and fast.
|
||||
#
|
||||
# Copyright (C) 2006 Tim Ansell
|
||||
# Copyright (C) 2006 Tim Amsell
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
@@ -150,7 +150,7 @@ class COWDictMeta(COWMeta):
|
||||
yield value
|
||||
if type == "items":
|
||||
yield (key, value)
|
||||
return
|
||||
raise StopIteration()
|
||||
|
||||
def iterkeys(cls):
|
||||
return cls.iter("keys")
|
||||
@@ -213,11 +213,11 @@ if __name__ == "__main__":
|
||||
print()
|
||||
|
||||
print("a", a)
|
||||
for x in a.iteritems():
|
||||
for x in a.items():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
for x in b.iteritems():
|
||||
for x in b.items():
|
||||
print(x)
|
||||
print()
|
||||
|
||||
@@ -225,11 +225,11 @@ if __name__ == "__main__":
|
||||
b['a'] = 'c'
|
||||
|
||||
print("a", a)
|
||||
for x in a.iteritems():
|
||||
for x in a.items():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
for x in b.iteritems():
|
||||
for x in b.items():
|
||||
print(x)
|
||||
print()
|
||||
|
||||
@@ -244,22 +244,22 @@ if __name__ == "__main__":
|
||||
a['set'].add("o2")
|
||||
|
||||
print("a", a)
|
||||
for x in a['set'].itervalues():
|
||||
for x in a['set'].values():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
for x in b['set'].itervalues():
|
||||
for x in b['set'].values():
|
||||
print(x)
|
||||
print()
|
||||
|
||||
b['set'].add('o3')
|
||||
|
||||
print("a", a)
|
||||
for x in a['set'].itervalues():
|
||||
for x in a['set'].values():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
for x in b['set'].itervalues():
|
||||
for x in b['set'].values():
|
||||
print(x)
|
||||
print()
|
||||
|
||||
@@ -269,7 +269,7 @@ if __name__ == "__main__":
|
||||
a['set2'].add("o2")
|
||||
|
||||
print("a", a)
|
||||
for x in a.iteritems():
|
||||
for x in a.items():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
@@ -289,7 +289,7 @@ if __name__ == "__main__":
|
||||
print("Yay - has_key with delete works!")
|
||||
|
||||
print("a", a)
|
||||
for x in a.iteritems():
|
||||
for x in a.items():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
@@ -300,7 +300,7 @@ if __name__ == "__main__":
|
||||
b.__revertitem__('b')
|
||||
|
||||
print("a", a)
|
||||
for x in a.iteritems():
|
||||
for x in a.items():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
@@ -310,7 +310,7 @@ if __name__ == "__main__":
|
||||
|
||||
b.__revertitem__('dict')
|
||||
print("a", a)
|
||||
for x in a.iteritems():
|
||||
for x in a.items():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
__version__ = "1.42.0"
|
||||
__version__ = "1.32.0"
|
||||
|
||||
import sys
|
||||
if sys.version_info < (3, 4, 0):
|
||||
@@ -63,10 +63,6 @@ class BBLogger(Logger):
|
||||
def verbose(self, msg, *args, **kwargs):
|
||||
return self.log(logging.INFO - 1, msg, *args, **kwargs)
|
||||
|
||||
def verbnote(self, msg, *args, **kwargs):
|
||||
return self.log(logging.INFO + 2, msg, *args, **kwargs)
|
||||
|
||||
|
||||
logging.raiseExceptions = False
|
||||
logging.setLoggerClass(BBLogger)
|
||||
|
||||
@@ -97,18 +93,6 @@ def debug(lvl, *args):
|
||||
def note(*args):
|
||||
mainlogger.info(''.join(args))
|
||||
|
||||
#
|
||||
# A higher prioity note which will show on the console but isn't a warning
|
||||
#
|
||||
# Something is happening the user should be aware of but they probably did
|
||||
# something to make it happen
|
||||
#
|
||||
def verbnote(*args):
|
||||
mainlogger.verbnote(''.join(args))
|
||||
|
||||
#
|
||||
# Warnings - things the user likely needs to pay attention to and fix
|
||||
#
|
||||
def warn(*args):
|
||||
mainlogger.warning(''.join(args))
|
||||
|
||||
|
||||
@@ -41,6 +41,8 @@ from bb import data, event, utils
|
||||
bblogger = logging.getLogger('BitBake')
|
||||
logger = logging.getLogger('BitBake.Build')
|
||||
|
||||
NULL = open(os.devnull, 'r+')
|
||||
|
||||
__mtime_cache = {}
|
||||
|
||||
def cached_mtime_noerror(f):
|
||||
@@ -89,14 +91,14 @@ class TaskBase(event.Event):
|
||||
|
||||
def __init__(self, t, logfile, d):
|
||||
self._task = t
|
||||
self._package = d.getVar("PF")
|
||||
self._mc = d.getVar("BB_CURRENT_MC")
|
||||
self.taskfile = d.getVar("FILE")
|
||||
self._package = d.getVar("PF", True)
|
||||
self._mc = d.getVar("BB_CURRENT_MC", True)
|
||||
self.taskfile = d.getVar("FILE", True)
|
||||
self.taskname = self._task
|
||||
self.logfile = logfile
|
||||
self.time = time.time()
|
||||
event.Event.__init__(self)
|
||||
self._message = "recipe %s: task %s: %s" % (d.getVar("PF"), t, self.getDisplayName())
|
||||
self._message = "recipe %s: task %s: %s" % (d.getVar("PF", True), t, self.getDisplayName())
|
||||
|
||||
def getTask(self):
|
||||
return self._task
|
||||
@@ -193,13 +195,13 @@ def exec_func(func, d, dirs = None, pythonexception=False):
|
||||
oldcwd = None
|
||||
|
||||
flags = d.getVarFlags(func)
|
||||
cleandirs = flags.get('cleandirs') if flags else None
|
||||
cleandirs = flags.get('cleandirs')
|
||||
if cleandirs:
|
||||
for cdir in d.expand(cleandirs).split():
|
||||
bb.utils.remove(cdir, True)
|
||||
bb.utils.mkdirhier(cdir)
|
||||
|
||||
if flags and dirs is None:
|
||||
if dirs is None:
|
||||
dirs = flags.get('dirs')
|
||||
if dirs:
|
||||
dirs = d.expand(dirs).split()
|
||||
@@ -225,17 +227,17 @@ def exec_func(func, d, dirs = None, pythonexception=False):
|
||||
else:
|
||||
lockfiles = None
|
||||
|
||||
tempdir = d.getVar('T')
|
||||
tempdir = d.getVar('T', True)
|
||||
|
||||
# or func allows items to be executed outside of the normal
|
||||
# task set, such as buildhistory
|
||||
task = d.getVar('BB_RUNTASK') or func
|
||||
task = d.getVar('BB_RUNTASK', True) or func
|
||||
if task == func:
|
||||
taskfunc = task
|
||||
else:
|
||||
taskfunc = "%s.%s" % (task, func)
|
||||
|
||||
runfmt = d.getVar('BB_RUNFMT') or "run.{func}.{pid}"
|
||||
runfmt = d.getVar('BB_RUNFMT', True) or "run.{func}.{pid}"
|
||||
runfn = runfmt.format(taskfunc=taskfunc, task=task, func=func, pid=os.getpid())
|
||||
runfile = os.path.join(tempdir, runfn)
|
||||
bb.utils.mkdirhier(os.path.dirname(runfile))
|
||||
@@ -304,10 +306,9 @@ def exec_func_python(func, d, runfile, cwd=None, pythonexception=False):
|
||||
utils.better_exec(comp, {"d": d}, code, "exec_python_func() autogenerated", pythonexception=pythonexception)
|
||||
except (bb.parse.SkipRecipe, bb.build.FuncFailed):
|
||||
raise
|
||||
except Exception as e:
|
||||
except:
|
||||
if pythonexception:
|
||||
raise
|
||||
logger.error(str(e))
|
||||
raise FuncFailed(func, None)
|
||||
finally:
|
||||
bb.debug(2, "Python function %s finished" % func)
|
||||
@@ -368,7 +369,7 @@ exit $ret
|
||||
|
||||
cmd = runfile
|
||||
if d.getVarFlag(func, 'fakeroot', False):
|
||||
fakerootcmd = d.getVar('FAKEROOT')
|
||||
fakerootcmd = d.getVar('FAKEROOT', True)
|
||||
if fakerootcmd:
|
||||
cmd = [fakerootcmd, runfile]
|
||||
|
||||
@@ -377,7 +378,7 @@ exit $ret
|
||||
else:
|
||||
logfile = sys.stdout
|
||||
|
||||
progress = d.getVarFlag(func, 'progress')
|
||||
progress = d.getVarFlag(func, 'progress', True)
|
||||
if progress:
|
||||
if progress == 'percent':
|
||||
# Use default regex
|
||||
@@ -429,7 +430,7 @@ exit $ret
|
||||
else:
|
||||
break
|
||||
|
||||
tempdir = d.getVar('T')
|
||||
tempdir = d.getVar('T', True)
|
||||
fifopath = os.path.join(tempdir, 'fifo.%s' % os.getpid())
|
||||
if os.path.exists(fifopath):
|
||||
os.unlink(fifopath)
|
||||
@@ -442,7 +443,7 @@ exit $ret
|
||||
with open(os.devnull, 'r+') as stdin:
|
||||
bb.process.run(cmd, shell=False, stdin=stdin, log=logfile, extrafiles=[(fifo,readfifo)])
|
||||
except bb.process.CmdError:
|
||||
logfn = d.getVar('BB_LOGFILE')
|
||||
logfn = d.getVar('BB_LOGFILE', True)
|
||||
raise FuncFailed(func, logfn)
|
||||
finally:
|
||||
os.unlink(fifopath)
|
||||
@@ -473,18 +474,18 @@ def _exec_task(fn, task, d, quieterr):
|
||||
logger.debug(1, "Executing task %s", task)
|
||||
|
||||
localdata = _task_data(fn, task, d)
|
||||
tempdir = localdata.getVar('T')
|
||||
tempdir = localdata.getVar('T', True)
|
||||
if not tempdir:
|
||||
bb.fatal("T variable not set, unable to build")
|
||||
|
||||
# Change nice level if we're asked to
|
||||
nice = localdata.getVar("BB_TASK_NICE_LEVEL")
|
||||
nice = localdata.getVar("BB_TASK_NICE_LEVEL", True)
|
||||
if nice:
|
||||
curnice = os.nice(0)
|
||||
nice = int(nice) - curnice
|
||||
newnice = os.nice(nice)
|
||||
logger.debug(1, "Renice to %s " % newnice)
|
||||
ionice = localdata.getVar("BB_TASK_IONICE_LEVEL")
|
||||
ionice = localdata.getVar("BB_TASK_IONICE_LEVEL", True)
|
||||
if ionice:
|
||||
try:
|
||||
cls, prio = ionice.split(".", 1)
|
||||
@@ -495,7 +496,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
bb.utils.mkdirhier(tempdir)
|
||||
|
||||
# Determine the logfile to generate
|
||||
logfmt = localdata.getVar('BB_LOGFMT') or 'log.{task}.{pid}'
|
||||
logfmt = localdata.getVar('BB_LOGFMT', True) or 'log.{task}.{pid}'
|
||||
logbase = logfmt.format(task=task, pid=os.getpid())
|
||||
|
||||
# Document the order of the tasks...
|
||||
@@ -532,6 +533,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
self.triggered = True
|
||||
|
||||
# Handle logfiles
|
||||
si = open('/dev/null', 'r')
|
||||
try:
|
||||
bb.utils.mkdirhier(os.path.dirname(logfn))
|
||||
logfile = open(logfn, 'w')
|
||||
@@ -545,8 +547,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()]
|
||||
|
||||
# Replace those fds with our own
|
||||
with open('/dev/null', 'r') as si:
|
||||
os.dup2(si.fileno(), osi[1])
|
||||
os.dup2(si.fileno(), osi[1])
|
||||
os.dup2(logfile.fileno(), oso[1])
|
||||
os.dup2(logfile.fileno(), ose[1])
|
||||
|
||||
@@ -562,7 +563,6 @@ def _exec_task(fn, task, d, quieterr):
|
||||
|
||||
localdata.setVar('BB_LOGFILE', logfn)
|
||||
localdata.setVar('BB_RUNTASK', task)
|
||||
localdata.setVar('BB_TASK_LOGGER', bblogger)
|
||||
|
||||
flags = localdata.getVarFlags(task)
|
||||
|
||||
@@ -607,6 +607,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
os.close(osi[0])
|
||||
os.close(oso[0])
|
||||
os.close(ose[0])
|
||||
si.close()
|
||||
|
||||
logfile.close()
|
||||
if os.path.exists(logfn) and os.path.getsize(logfn) == 0:
|
||||
@@ -627,7 +628,7 @@ def exec_task(fn, task, d, profile = False):
|
||||
quieterr = True
|
||||
|
||||
if profile:
|
||||
profname = "profile-%s.log" % (d.getVar("PN") + "-" + task)
|
||||
profname = "profile-%s.log" % (d.getVar("PN", True) + "-" + task)
|
||||
try:
|
||||
import cProfile as profile
|
||||
except:
|
||||
@@ -667,9 +668,9 @@ def stamp_internal(taskname, d, file_name, baseonly=False, noextra=False):
|
||||
stamp = d.stamp[file_name]
|
||||
extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
|
||||
else:
|
||||
stamp = d.getVar('STAMP')
|
||||
file_name = d.getVar('BB_FILENAME')
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info') or ""
|
||||
stamp = d.getVar('STAMP', True)
|
||||
file_name = d.getVar('BB_FILENAME', True)
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info', True) or ""
|
||||
|
||||
if baseonly:
|
||||
return stamp
|
||||
@@ -703,9 +704,9 @@ def stamp_cleanmask_internal(taskname, d, file_name):
|
||||
stamp = d.stampclean[file_name]
|
||||
extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
|
||||
else:
|
||||
stamp = d.getVar('STAMPCLEAN')
|
||||
file_name = d.getVar('BB_FILENAME')
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info') or ""
|
||||
stamp = d.getVar('STAMPCLEAN', True)
|
||||
file_name = d.getVar('BB_FILENAME', True)
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info', True) or ""
|
||||
|
||||
if not stamp:
|
||||
return []
|
||||
@@ -741,7 +742,7 @@ def make_stamp(task, d, file_name = None):
|
||||
# as it completes
|
||||
if not task.endswith("_setscene") and task != "do_setscene" and not file_name:
|
||||
stampbase = stamp_internal(task, d, None, True)
|
||||
file_name = d.getVar('BB_FILENAME')
|
||||
file_name = d.getVar('BB_FILENAME', True)
|
||||
bb.parse.siggen.dump_sigtask(file_name, task, stampbase, True)
|
||||
|
||||
def del_stamp(task, d, file_name = None):
|
||||
@@ -763,7 +764,7 @@ def write_taint(task, d, file_name = None):
|
||||
if file_name:
|
||||
taintfn = d.stamp[file_name] + '.' + task + '.taint'
|
||||
else:
|
||||
taintfn = d.getVar('STAMP') + '.' + task + '.taint'
|
||||
taintfn = d.getVar('STAMP', True) + '.' + task + '.taint'
|
||||
bb.utils.mkdirhier(os.path.dirname(taintfn))
|
||||
# The specific content of the taint file is not really important,
|
||||
# we just need it to be random, so a random UUID is used
|
||||
@@ -801,7 +802,6 @@ def add_tasks(tasklist, d):
|
||||
if name in flags:
|
||||
deptask = d.expand(flags[name])
|
||||
task_deps[name][task] = deptask
|
||||
getTask('mcdepends')
|
||||
getTask('depends')
|
||||
getTask('rdepends')
|
||||
getTask('deptask')
|
||||
@@ -861,52 +861,3 @@ def deltask(task, d):
|
||||
if task in deps:
|
||||
deps.remove(task)
|
||||
d.setVarFlag(bbtask, 'deps', deps)
|
||||
|
||||
def preceedtask(task, with_recrdeptasks, d):
|
||||
"""
|
||||
Returns a set of tasks in the current recipe which were specified as
|
||||
precondition by the task itself ("after") or which listed themselves
|
||||
as precondition ("before"). Preceeding tasks specified via the
|
||||
"recrdeptask" are included in the result only if requested. Beware
|
||||
that this may lead to the task itself being listed.
|
||||
"""
|
||||
preceed = set()
|
||||
|
||||
# Ignore tasks which don't exist
|
||||
tasks = d.getVar('__BBTASKS', False)
|
||||
if task not in tasks:
|
||||
return preceed
|
||||
|
||||
preceed.update(d.getVarFlag(task, 'deps') or [])
|
||||
if with_recrdeptasks:
|
||||
recrdeptask = d.getVarFlag(task, 'recrdeptask')
|
||||
if recrdeptask:
|
||||
preceed.update(recrdeptask.split())
|
||||
return preceed
|
||||
|
||||
def tasksbetween(task_start, task_end, d):
|
||||
"""
|
||||
Return the list of tasks between two tasks in the current recipe,
|
||||
where task_start is to start at and task_end is the task to end at
|
||||
(and task_end has a dependency chain back to task_start).
|
||||
"""
|
||||
outtasks = []
|
||||
tasks = list(filter(lambda k: d.getVarFlag(k, "task"), d.keys()))
|
||||
def follow_chain(task, endtask, chain=None):
|
||||
if not chain:
|
||||
chain = []
|
||||
chain.append(task)
|
||||
for othertask in tasks:
|
||||
if othertask == task:
|
||||
continue
|
||||
if task == endtask:
|
||||
for ctask in chain:
|
||||
if ctask not in outtasks:
|
||||
outtasks.append(ctask)
|
||||
else:
|
||||
deps = d.getVarFlag(othertask, 'deps', False)
|
||||
if task in deps:
|
||||
follow_chain(othertask, endtask, chain)
|
||||
chain.pop()
|
||||
follow_chain(task_start, task_end)
|
||||
return outtasks
|
||||
|
||||
@@ -37,7 +37,7 @@ import bb.utils
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
|
||||
__cache_version__ = "152"
|
||||
__cache_version__ = "150"
|
||||
|
||||
def getCacheFile(path, filename, data_hash):
|
||||
return os.path.join(path, filename + "." + data_hash)
|
||||
@@ -71,7 +71,7 @@ class RecipeInfoCommon(object):
|
||||
|
||||
@classmethod
|
||||
def flaglist(cls, flag, varlist, metadata, squash=False):
|
||||
out_dict = dict((var, metadata.getVarFlag(var, flag))
|
||||
out_dict = dict((var, metadata.getVarFlag(var, flag, True))
|
||||
for var in varlist)
|
||||
if squash:
|
||||
return dict((k,v) for (k,v) in out_dict.items() if v)
|
||||
@@ -86,9 +86,9 @@ class RecipeInfoCommon(object):
|
||||
class CoreRecipeInfo(RecipeInfoCommon):
|
||||
__slots__ = ()
|
||||
|
||||
cachefile = "bb_cache.dat"
|
||||
cachefile = "bb_cache.dat"
|
||||
|
||||
def __init__(self, filename, metadata):
|
||||
def __init__(self, filename, metadata):
|
||||
self.file_depends = metadata.getVar('__depends', False)
|
||||
self.timestamp = bb.parse.cached_mtime(filename)
|
||||
self.variants = self.listvar('__VARIANTS', metadata) + ['']
|
||||
@@ -97,7 +97,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
|
||||
self.skipreason = self.getvar('__SKIPPED', metadata)
|
||||
if self.skipreason:
|
||||
self.pn = self.getvar('PN', metadata) or bb.parse.vars_from_file(filename,metadata)[0]
|
||||
self.pn = self.getvar('PN', metadata) or bb.parse.BBHandler.vars_from_file(filename,metadata)[0]
|
||||
self.skipped = True
|
||||
self.provides = self.depvar('PROVIDES', metadata)
|
||||
self.rprovides = self.depvar('RPROVIDES', metadata)
|
||||
@@ -107,7 +107,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
|
||||
self.pn = self.getvar('PN', metadata)
|
||||
self.packages = self.listvar('PACKAGES', metadata)
|
||||
if not self.packages:
|
||||
if not self.pn in self.packages:
|
||||
self.packages.append(self.pn)
|
||||
|
||||
self.basetaskhashes = self.taskvar('BB_BASEHASH', self.tasks, metadata)
|
||||
@@ -122,7 +122,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
self.defaultpref = self.intvar('DEFAULT_PREFERENCE', metadata)
|
||||
self.not_world = self.getvar('EXCLUDE_FROM_WORLD', metadata)
|
||||
self.stamp = self.getvar('STAMP', metadata)
|
||||
self.stampclean = self.getvar('STAMPCLEAN', metadata)
|
||||
self.stampclean = self.getvar('STAMPCLEAN', metadata)
|
||||
self.stamp_extrainfo = self.flaglist('stamp-extra-info', self.tasks, metadata)
|
||||
self.file_checksums = self.flaglist('file-checksums', self.tasks, metadata, True)
|
||||
self.packages_dynamic = self.listvar('PACKAGES_DYNAMIC', metadata)
|
||||
@@ -217,7 +217,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
cachedata.packages_dynamic[package].append(fn)
|
||||
|
||||
# Build hash of runtime depends and recommends
|
||||
for package in self.packages:
|
||||
for package in self.packages + [self.pn]:
|
||||
cachedata.rundeps[fn][package] = list(self.rdepends) + self.rdepends_pkg[package]
|
||||
cachedata.runrecs[fn][package] = list(self.rrecommends) + self.rrecommends_pkg[package]
|
||||
|
||||
@@ -296,7 +296,7 @@ def parse_recipe(bb_data, bbfile, appends, mc=''):
|
||||
bb_data.setVar("__BBMULTICONFIG", mc)
|
||||
|
||||
# expand tmpdir to include this topdir
|
||||
bb_data.setVar('TMPDIR', bb_data.getVar('TMPDIR') or "")
|
||||
bb_data.setVar('TMPDIR', bb_data.getVar('TMPDIR', True) or "")
|
||||
bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
|
||||
oldpath = os.path.abspath(os.getcwd())
|
||||
bb.parse.cached_mtime_noerror(bbfile_loc)
|
||||
@@ -375,10 +375,10 @@ class Cache(NoCache):
|
||||
data = databuilder.data
|
||||
|
||||
# Pass caches_array information into Cache Constructor
|
||||
# It will be used later for deciding whether we
|
||||
# need extra cache file dump/load support
|
||||
# It will be used later for deciding whether we
|
||||
# need extra cache file dump/load support
|
||||
self.caches_array = caches_array
|
||||
self.cachedir = data.getVar("CACHE")
|
||||
self.cachedir = data.getVar("CACHE", True)
|
||||
self.clean = set()
|
||||
self.checked = set()
|
||||
self.depends_cache = {}
|
||||
@@ -395,7 +395,7 @@ class Cache(NoCache):
|
||||
self.has_cache = True
|
||||
self.cachefile = getCacheFile(self.cachedir, "bb_cache.dat", self.data_hash)
|
||||
|
||||
logger.debug(1, "Cache dir: %s", self.cachedir)
|
||||
logger.debug(1, "Using cache in '%s'", self.cachedir)
|
||||
bb.utils.mkdirhier(self.cachedir)
|
||||
|
||||
cache_ok = True
|
||||
@@ -408,8 +408,6 @@ class Cache(NoCache):
|
||||
self.load_cachefile()
|
||||
elif os.path.isfile(self.cachefile):
|
||||
logger.info("Out of date cache found, rebuilding...")
|
||||
else:
|
||||
logger.debug(1, "Cache file %s not found, building..." % self.cachefile)
|
||||
|
||||
def load_cachefile(self):
|
||||
cachesize = 0
|
||||
@@ -423,10 +421,9 @@ class Cache(NoCache):
|
||||
cachesize += os.fstat(cachefile.fileno()).st_size
|
||||
|
||||
bb.event.fire(bb.event.CacheLoadStarted(cachesize), self.data)
|
||||
|
||||
|
||||
for cache_class in self.caches_array:
|
||||
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
|
||||
logger.debug(1, 'Loading cache file: %s' % cachefile)
|
||||
with open(cachefile, "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
# Check cache version information
|
||||
@@ -441,8 +438,8 @@ class Cache(NoCache):
|
||||
logger.info('Cache version mismatch, rebuilding...')
|
||||
return
|
||||
elif bitbake_ver != bb.__version__:
|
||||
logger.info('Bitbake version mismatch, rebuilding...')
|
||||
return
|
||||
logger.info('Bitbake version mismatch, rebuilding...')
|
||||
return
|
||||
|
||||
# Load the rest of the cache file
|
||||
current_progress = 0
|
||||
@@ -465,10 +462,6 @@ class Cache(NoCache):
|
||||
self.depends_cache[key] = [value]
|
||||
# only fire events on even percentage boundaries
|
||||
current_progress = cachefile.tell() + previous_progress
|
||||
if current_progress > cachesize:
|
||||
# we might have calculated incorrect total size because a file
|
||||
# might've been written out just after we checked its size
|
||||
cachesize = current_progress
|
||||
current_percent = 100 * current_progress / cachesize
|
||||
if current_percent > previous_percent:
|
||||
previous_percent = current_percent
|
||||
@@ -619,13 +612,13 @@ class Cache(NoCache):
|
||||
a = fl.find(":True")
|
||||
b = fl.find(":False")
|
||||
if ((a < 0) and b) or ((b > 0) and (b < a)):
|
||||
f = fl[:b+6]
|
||||
fl = fl[b+7:]
|
||||
f = fl[:b+6]
|
||||
fl = fl[b+7:]
|
||||
elif ((b < 0) and a) or ((a > 0) and (a < b)):
|
||||
f = fl[:a+5]
|
||||
fl = fl[a+6:]
|
||||
f = fl[:a+5]
|
||||
fl = fl[a+6:]
|
||||
else:
|
||||
break
|
||||
break
|
||||
fl = fl.strip()
|
||||
if "*" in f:
|
||||
continue
|
||||
@@ -799,8 +792,8 @@ class MultiProcessCache(object):
|
||||
self.cachedata_extras = self.create_cachedata()
|
||||
|
||||
def init_cache(self, d, cache_file_name=None):
|
||||
cachedir = (d.getVar("PERSISTENT_DIR") or
|
||||
d.getVar("CACHE"))
|
||||
cachedir = (d.getVar("PERSISTENT_DIR", True) or
|
||||
d.getVar("CACHE", True))
|
||||
if cachedir in [None, '']:
|
||||
return
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
@@ -889,3 +882,4 @@ class MultiProcessCache(object):
|
||||
p.dump([data, self.__class__.CACHE_VERSION])
|
||||
|
||||
bb.utils.unlockfile(glf)
|
||||
|
||||
|
||||
@@ -97,8 +97,6 @@ class FileChecksumCache(MultiProcessCache):
|
||||
|
||||
def checksum_dir(pth):
|
||||
# Handle directories recursively
|
||||
if pth == "/":
|
||||
bb.fatal("Refusing to checksum /")
|
||||
dirchecksums = []
|
||||
for root, dirs, files in os.walk(pth):
|
||||
for name in files:
|
||||
|
||||
@@ -1,22 +1,3 @@
|
||||
"""
|
||||
BitBake code parser
|
||||
|
||||
Parses actual code (i.e. python and shell) for functions and in-line
|
||||
expressions. Used mainly to determine dependencies on other functions
|
||||
and variables within the BitBake metadata. Also provides a cache for
|
||||
this information in order to speed up processing.
|
||||
|
||||
(Not to be confused with the code that parses the metadata itself,
|
||||
see lib/bb/parse/ for that).
|
||||
|
||||
NOTE: if you change how the parsers gather information you will almost
|
||||
certainly need to increment CodeParserCache.CACHE_VERSION below so that
|
||||
any existing codeparser cache gets invalidated. Additionally you'll need
|
||||
to increment __cache_version__ in cache.py in order to ensure that old
|
||||
recipe caches don't trigger "Taskhash mismatch" errors.
|
||||
|
||||
"""
|
||||
|
||||
import ast
|
||||
import sys
|
||||
import codegen
|
||||
@@ -33,7 +14,7 @@ from bb.cache import MultiProcessCache
|
||||
logger = logging.getLogger('BitBake.CodeParser')
|
||||
|
||||
def bbhash(s):
|
||||
return hashlib.sha256(s.encode("utf-8")).hexdigest()
|
||||
return hashlib.md5(s.encode("utf-8")).hexdigest()
|
||||
|
||||
def check_indent(codestr):
|
||||
"""If the code is indented, add a top level piece of code to 'remove' the indentation"""
|
||||
@@ -136,11 +117,7 @@ class shellCacheLine(object):
|
||||
|
||||
class CodeParserCache(MultiProcessCache):
|
||||
cache_file_name = "bb_codeparser.dat"
|
||||
# NOTE: you must increment this if you change how the parsers gather information,
|
||||
# so that an existing cache gets invalidated. Additionally you'll need
|
||||
# to increment __cache_version__ in cache.py in order to ensure that old
|
||||
# recipe caches don't trigger "Taskhash mismatch" errors.
|
||||
CACHE_VERSION = 11
|
||||
CACHE_VERSION = 8
|
||||
|
||||
def __init__(self):
|
||||
MultiProcessCache.__init__(self)
|
||||
@@ -209,15 +186,13 @@ class BufferedLogger(Logger):
|
||||
|
||||
def flush(self):
|
||||
for record in self.buffer:
|
||||
if self.target.isEnabledFor(record.levelno):
|
||||
self.target.handle(record)
|
||||
self.target.handle(record)
|
||||
self.buffer = []
|
||||
|
||||
class PythonParser():
|
||||
getvars = (".getVar", ".appendVar", ".prependVar", "oe.utils.conditional")
|
||||
getvars = (".getVar", ".appendVar", ".prependVar")
|
||||
getvarflags = (".getVarFlag", ".appendVarFlag", ".prependVarFlag")
|
||||
containsfuncs = ("bb.utils.contains", "base_contains")
|
||||
containsanyfuncs = ("bb.utils.contains_any", "bb.utils.filter")
|
||||
containsfuncs = ("bb.utils.contains", "base_contains", "bb.utils.contains_any")
|
||||
execfuncs = ("bb.build.exec_func", "bb.build.exec_task")
|
||||
|
||||
def warn(self, func, arg):
|
||||
@@ -236,17 +211,13 @@ class PythonParser():
|
||||
|
||||
def visit_Call(self, node):
|
||||
name = self.called_node_name(node.func)
|
||||
if name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs):
|
||||
if name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs):
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
varname = node.args[0].s
|
||||
if name in self.containsfuncs and isinstance(node.args[1], ast.Str):
|
||||
if varname not in self.contains:
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname].add(node.args[1].s)
|
||||
elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Str):
|
||||
if varname not in self.contains:
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname].update(node.args[1].s.split())
|
||||
elif name.endswith(self.getvarflags):
|
||||
if isinstance(node.args[1], ast.Str):
|
||||
self.references.add('%s[%s]' % (varname, node.args[1].s))
|
||||
@@ -368,9 +339,8 @@ class ShellParser():
|
||||
def _parse_shell(self, value):
|
||||
try:
|
||||
tokens, _ = pyshyacc.parse(value, eof=True, debug=False)
|
||||
except Exception:
|
||||
bb.error('Error during parse shell code, the last 5 lines are:\n%s' % '\n'.join(value.split('\n')[-5:]))
|
||||
raise
|
||||
except pyshlex.NeedMore:
|
||||
raise sherrors.ShellSyntaxError("Unexpected EOF")
|
||||
|
||||
self.process_tokens(tokens)
|
||||
|
||||
|
||||
@@ -28,15 +28,8 @@ and must not trigger events, directly or indirectly.
|
||||
Commands are queued in a CommandQueue
|
||||
"""
|
||||
|
||||
from collections import OrderedDict, defaultdict
|
||||
|
||||
import bb.event
|
||||
import bb.cooker
|
||||
import bb.remotedata
|
||||
|
||||
class DataStoreConnectionHandle(object):
|
||||
def __init__(self, dsindex=0):
|
||||
self.dsindex = dsindex
|
||||
|
||||
class CommandCompleted(bb.event.Event):
|
||||
pass
|
||||
@@ -50,8 +43,6 @@ class CommandFailed(CommandExit):
|
||||
def __init__(self, message):
|
||||
self.error = message
|
||||
CommandExit.__init__(self, 1)
|
||||
def __str__(self):
|
||||
return "Command execution failed: %s" % self.error
|
||||
|
||||
class CommandError(Exception):
|
||||
pass
|
||||
@@ -64,7 +55,6 @@ class Command:
|
||||
self.cooker = cooker
|
||||
self.cmds_sync = CommandsSync()
|
||||
self.cmds_async = CommandsAsync()
|
||||
self.remotedatastores = bb.remotedata.RemoteDatastores(cooker)
|
||||
|
||||
# FIXME Add lock for this
|
||||
self.currentAsyncCommand = None
|
||||
@@ -78,8 +68,7 @@ class Command:
|
||||
if not hasattr(command_method, 'readonly') or False == getattr(command_method, 'readonly'):
|
||||
return None, "Not able to execute not readonly commands in readonly mode"
|
||||
try:
|
||||
self.cooker.process_inotify_updates()
|
||||
if getattr(command_method, 'needconfig', True):
|
||||
if getattr(command_method, 'needconfig', False):
|
||||
self.cooker.updateCacheSync()
|
||||
result = command_method(self, commandline)
|
||||
except CommandError as exc:
|
||||
@@ -99,7 +88,6 @@ class Command:
|
||||
|
||||
def runAsyncCommand(self):
|
||||
try:
|
||||
self.cooker.process_inotify_updates()
|
||||
if self.cooker.state in (bb.cooker.state.error, bb.cooker.state.shutdown, bb.cooker.state.forceshutdown):
|
||||
# updateCache will trigger a shutdown of the parser
|
||||
# and then raise BBHandledException triggering an exit
|
||||
@@ -137,23 +125,14 @@ class Command:
|
||||
|
||||
def finishAsyncCommand(self, msg=None, code=None):
|
||||
if msg or msg == "":
|
||||
bb.event.fire(CommandFailed(msg), self.cooker.data)
|
||||
bb.event.fire(CommandFailed(msg), self.cooker.expanded_data)
|
||||
elif code:
|
||||
bb.event.fire(CommandExit(code), self.cooker.data)
|
||||
bb.event.fire(CommandExit(code), self.cooker.expanded_data)
|
||||
else:
|
||||
bb.event.fire(CommandCompleted(), self.cooker.data)
|
||||
bb.event.fire(CommandCompleted(), self.cooker.expanded_data)
|
||||
self.currentAsyncCommand = None
|
||||
self.cooker.finishcommand()
|
||||
|
||||
def reset(self):
|
||||
self.remotedatastores = bb.remotedata.RemoteDatastores(self.cooker)
|
||||
|
||||
def split_mc_pn(pn):
|
||||
if pn.startswith("multiconfig:"):
|
||||
_, mc, pn = pn.split(":", 2)
|
||||
return (mc, pn)
|
||||
return ('', pn)
|
||||
|
||||
class CommandsSync:
|
||||
"""
|
||||
A class of synchronous commands
|
||||
@@ -200,7 +179,6 @@ class CommandsSync:
|
||||
"""
|
||||
varname = params[0]
|
||||
value = str(params[1])
|
||||
command.cooker.extraconfigdata[varname] = value
|
||||
command.cooker.data.setVar(varname, value)
|
||||
|
||||
def getSetVariable(self, command, params):
|
||||
@@ -240,15 +218,59 @@ class CommandsSync:
|
||||
command.cooker.configuration.postfile = postfiles
|
||||
setPrePostConfFiles.needconfig = False
|
||||
|
||||
def getCpuCount(self, command, params):
|
||||
"""
|
||||
Get the CPU count on the bitbake server
|
||||
"""
|
||||
return bb.utils.cpu_count()
|
||||
getCpuCount.readonly = True
|
||||
getCpuCount.needconfig = False
|
||||
|
||||
def matchFile(self, command, params):
|
||||
fMatch = params[0]
|
||||
return command.cooker.matchFile(fMatch)
|
||||
matchFile.needconfig = False
|
||||
|
||||
def getUIHandlerNum(self, command, params):
|
||||
return bb.event.get_uihandler()
|
||||
getUIHandlerNum.needconfig = False
|
||||
getUIHandlerNum.readonly = True
|
||||
def generateNewImage(self, command, params):
|
||||
image = params[0]
|
||||
base_image = params[1]
|
||||
package_queue = params[2]
|
||||
timestamp = params[3]
|
||||
description = params[4]
|
||||
return command.cooker.generateNewImage(image, base_image,
|
||||
package_queue, timestamp, description)
|
||||
|
||||
def ensureDir(self, command, params):
|
||||
directory = params[0]
|
||||
bb.utils.mkdirhier(directory)
|
||||
ensureDir.needconfig = False
|
||||
|
||||
def setVarFile(self, command, params):
|
||||
"""
|
||||
Save a variable in a file; used for saving in a configuration file
|
||||
"""
|
||||
var = params[0]
|
||||
val = params[1]
|
||||
default_file = params[2]
|
||||
op = params[3]
|
||||
command.cooker.modifyConfigurationVar(var, val, default_file, op)
|
||||
setVarFile.needconfig = False
|
||||
|
||||
def removeVarFile(self, command, params):
|
||||
"""
|
||||
Remove a variable declaration from a file
|
||||
"""
|
||||
var = params[0]
|
||||
command.cooker.removeConfigurationVar(var)
|
||||
removeVarFile.needconfig = False
|
||||
|
||||
def createConfigFile(self, command, params):
|
||||
"""
|
||||
Create an extra configuration file
|
||||
"""
|
||||
name = params[0]
|
||||
command.cooker.createConfigFile(name)
|
||||
createConfigFile.needconfig = False
|
||||
|
||||
def setEventMask(self, command, params):
|
||||
handlerNum = params[0]
|
||||
@@ -273,307 +295,9 @@ class CommandsSync:
|
||||
def updateConfig(self, command, params):
|
||||
options = params[0]
|
||||
environment = params[1]
|
||||
cmdline = params[2]
|
||||
command.cooker.updateConfigOpts(options, environment, cmdline)
|
||||
command.cooker.updateConfigOpts(options, environment)
|
||||
updateConfig.needconfig = False
|
||||
|
||||
def parseConfiguration(self, command, params):
|
||||
"""Instruct bitbake to parse its configuration
|
||||
NOTE: it is only necessary to call this if you aren't calling any normal action
|
||||
(otherwise parsing is taken care of automatically)
|
||||
"""
|
||||
command.cooker.parseConfiguration()
|
||||
parseConfiguration.needconfig = False
|
||||
|
||||
def getLayerPriorities(self, command, params):
|
||||
command.cooker.parseConfiguration()
|
||||
ret = []
|
||||
# regex objects cannot be marshalled by xmlrpc
|
||||
for collection, pattern, regex, pri in command.cooker.bbfile_config_priorities:
|
||||
ret.append((collection, pattern, regex.pattern, pri))
|
||||
return ret
|
||||
getLayerPriorities.readonly = True
|
||||
|
||||
def getRecipes(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return list(command.cooker.recipecaches[mc].pkg_pn.items())
|
||||
getRecipes.readonly = True
|
||||
|
||||
def getRecipeDepends(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return list(command.cooker.recipecaches[mc].deps.items())
|
||||
getRecipeDepends.readonly = True
|
||||
|
||||
def getRecipeVersions(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].pkg_pepvpr
|
||||
getRecipeVersions.readonly = True
|
||||
|
||||
def getRecipeProvides(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].fn_provides
|
||||
getRecipeProvides.readonly = True
|
||||
|
||||
def getRecipePackages(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].packages
|
||||
getRecipePackages.readonly = True
|
||||
|
||||
def getRecipePackagesDynamic(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].packages_dynamic
|
||||
getRecipePackagesDynamic.readonly = True
|
||||
|
||||
def getRProviders(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].rproviders
|
||||
getRProviders.readonly = True
|
||||
|
||||
def getRuntimeDepends(self, command, params):
|
||||
ret = []
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
rundeps = command.cooker.recipecaches[mc].rundeps
|
||||
for key, value in rundeps.items():
|
||||
if isinstance(value, defaultdict):
|
||||
value = dict(value)
|
||||
ret.append((key, value))
|
||||
return ret
|
||||
getRuntimeDepends.readonly = True
|
||||
|
||||
def getRuntimeRecommends(self, command, params):
|
||||
ret = []
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
runrecs = command.cooker.recipecaches[mc].runrecs
|
||||
for key, value in runrecs.items():
|
||||
if isinstance(value, defaultdict):
|
||||
value = dict(value)
|
||||
ret.append((key, value))
|
||||
return ret
|
||||
getRuntimeRecommends.readonly = True
|
||||
|
||||
def getRecipeInherits(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].inherits
|
||||
getRecipeInherits.readonly = True
|
||||
|
||||
def getBbFilePriority(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].bbfile_priority
|
||||
getBbFilePriority.readonly = True
|
||||
|
||||
def getDefaultPreference(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].pkg_dp
|
||||
getDefaultPreference.readonly = True
|
||||
|
||||
def getSkippedRecipes(self, command, params):
|
||||
# Return list sorted by reverse priority order
|
||||
import bb.cache
|
||||
skipdict = OrderedDict(sorted(command.cooker.skiplist.items(),
|
||||
key=lambda x: (-command.cooker.collection.calc_bbfile_priority(bb.cache.virtualfn2realfn(x[0])[0]), x[0])))
|
||||
return list(skipdict.items())
|
||||
getSkippedRecipes.readonly = True
|
||||
|
||||
def getOverlayedRecipes(self, command, params):
|
||||
return list(command.cooker.collection.overlayed.items())
|
||||
getOverlayedRecipes.readonly = True
|
||||
|
||||
def getFileAppends(self, command, params):
|
||||
fn = params[0]
|
||||
return command.cooker.collection.get_file_appends(fn)
|
||||
getFileAppends.readonly = True
|
||||
|
||||
def getAllAppends(self, command, params):
|
||||
return command.cooker.collection.bbappends
|
||||
getAllAppends.readonly = True
|
||||
|
||||
def findProviders(self, command, params):
|
||||
return command.cooker.findProviders()
|
||||
findProviders.readonly = True
|
||||
|
||||
def findBestProvider(self, command, params):
|
||||
(mc, pn) = split_mc_pn(params[0])
|
||||
return command.cooker.findBestProvider(pn, mc)
|
||||
findBestProvider.readonly = True
|
||||
|
||||
def allProviders(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return list(bb.providers.allProviders(command.cooker.recipecaches[mc]).items())
|
||||
allProviders.readonly = True
|
||||
|
||||
def getRuntimeProviders(self, command, params):
|
||||
rprovide = params[0]
|
||||
try:
|
||||
mc = params[1]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
all_p = bb.providers.getRuntimeProviders(command.cooker.recipecaches[mc], rprovide)
|
||||
if all_p:
|
||||
best = bb.providers.filterProvidersRunTime(all_p, rprovide,
|
||||
command.cooker.data,
|
||||
command.cooker.recipecaches[mc])[0][0]
|
||||
else:
|
||||
best = None
|
||||
return all_p, best
|
||||
getRuntimeProviders.readonly = True
|
||||
|
||||
def dataStoreConnectorFindVar(self, command, params):
|
||||
dsindex = params[0]
|
||||
name = params[1]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
value, overridedata = datastore._findVar(name)
|
||||
|
||||
if value:
|
||||
content = value.get('_content', None)
|
||||
if isinstance(content, bb.data_smart.DataSmart):
|
||||
# Value is a datastore (e.g. BB_ORIGENV) - need to handle this carefully
|
||||
idx = command.remotedatastores.check_store(content, True)
|
||||
return {'_content': DataStoreConnectionHandle(idx),
|
||||
'_connector_origtype': 'DataStoreConnectionHandle',
|
||||
'_connector_overrides': overridedata}
|
||||
elif isinstance(content, set):
|
||||
return {'_content': list(content),
|
||||
'_connector_origtype': 'set',
|
||||
'_connector_overrides': overridedata}
|
||||
else:
|
||||
value['_connector_overrides'] = overridedata
|
||||
else:
|
||||
value = {}
|
||||
value['_connector_overrides'] = overridedata
|
||||
return value
|
||||
dataStoreConnectorFindVar.readonly = True
|
||||
|
||||
def dataStoreConnectorGetKeys(self, command, params):
|
||||
dsindex = params[0]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
return list(datastore.keys())
|
||||
dataStoreConnectorGetKeys.readonly = True
|
||||
|
||||
def dataStoreConnectorGetVarHistory(self, command, params):
|
||||
dsindex = params[0]
|
||||
name = params[1]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
return datastore.varhistory.variable(name)
|
||||
dataStoreConnectorGetVarHistory.readonly = True
|
||||
|
||||
def dataStoreConnectorExpandPythonRef(self, command, params):
|
||||
config_data_dict = params[0]
|
||||
varname = params[1]
|
||||
expr = params[2]
|
||||
|
||||
config_data = command.remotedatastores.receive_datastore(config_data_dict)
|
||||
|
||||
varparse = bb.data_smart.VariableParse(varname, config_data)
|
||||
return varparse.python_sub(expr)
|
||||
|
||||
def dataStoreConnectorRelease(self, command, params):
|
||||
dsindex = params[0]
|
||||
if dsindex <= 0:
|
||||
raise CommandError('dataStoreConnectorRelease: invalid index %d' % dsindex)
|
||||
command.remotedatastores.release(dsindex)
|
||||
|
||||
def dataStoreConnectorSetVarFlag(self, command, params):
|
||||
dsindex = params[0]
|
||||
name = params[1]
|
||||
flag = params[2]
|
||||
value = params[3]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
datastore.setVarFlag(name, flag, value)
|
||||
|
||||
def dataStoreConnectorDelVar(self, command, params):
|
||||
dsindex = params[0]
|
||||
name = params[1]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
if len(params) > 2:
|
||||
flag = params[2]
|
||||
datastore.delVarFlag(name, flag)
|
||||
else:
|
||||
datastore.delVar(name)
|
||||
|
||||
def dataStoreConnectorRenameVar(self, command, params):
|
||||
dsindex = params[0]
|
||||
name = params[1]
|
||||
newname = params[2]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
datastore.renameVar(name, newname)
|
||||
|
||||
def parseRecipeFile(self, command, params):
|
||||
"""
|
||||
Parse the specified recipe file (with or without bbappends)
|
||||
and return a datastore object representing the environment
|
||||
for the recipe.
|
||||
"""
|
||||
fn = params[0]
|
||||
appends = params[1]
|
||||
appendlist = params[2]
|
||||
if len(params) > 3:
|
||||
config_data_dict = params[3]
|
||||
config_data = command.remotedatastores.receive_datastore(config_data_dict)
|
||||
else:
|
||||
config_data = None
|
||||
|
||||
if appends:
|
||||
if appendlist is not None:
|
||||
appendfiles = appendlist
|
||||
else:
|
||||
appendfiles = command.cooker.collection.get_file_appends(fn)
|
||||
else:
|
||||
appendfiles = []
|
||||
# We are calling bb.cache locally here rather than on the server,
|
||||
# but that's OK because it doesn't actually need anything from
|
||||
# the server barring the global datastore (which we have a remote
|
||||
# version of)
|
||||
if config_data:
|
||||
# We have to use a different function here if we're passing in a datastore
|
||||
# NOTE: we took a copy above, so we don't do it here again
|
||||
envdata = bb.cache.parse_recipe(config_data, fn, appendfiles)['']
|
||||
else:
|
||||
# Use the standard path
|
||||
parser = bb.cache.NoCache(command.cooker.databuilder)
|
||||
envdata = parser.loadDataFull(fn, appendfiles)
|
||||
idx = command.remotedatastores.store(envdata)
|
||||
return DataStoreConnectionHandle(idx)
|
||||
parseRecipeFile.readonly = True
|
||||
|
||||
class CommandsAsync:
|
||||
"""
|
||||
A class of asynchronous commands
|
||||
@@ -587,15 +311,8 @@ class CommandsAsync:
|
||||
"""
|
||||
bfile = params[0]
|
||||
task = params[1]
|
||||
if len(params) > 2:
|
||||
internal = params[2]
|
||||
else:
|
||||
internal = False
|
||||
|
||||
if internal:
|
||||
command.cooker.buildFileInternal(bfile, task, fireevents=False, quietlog=True)
|
||||
else:
|
||||
command.cooker.buildFile(bfile, task)
|
||||
command.cooker.buildFile(bfile, task)
|
||||
buildFile.needcache = False
|
||||
|
||||
def buildTargets(self, command, params):
|
||||
@@ -645,6 +362,17 @@ class CommandsAsync:
|
||||
command.finishAsyncCommand()
|
||||
generateTargetsTree.needcache = True
|
||||
|
||||
def findCoreBaseFiles(self, command, params):
|
||||
"""
|
||||
Find certain files in COREBASE directory. i.e. Layers
|
||||
"""
|
||||
subdir = params[0]
|
||||
filename = params[1]
|
||||
|
||||
command.cooker.findCoreBaseFiles(subdir, filename)
|
||||
command.finishAsyncCommand()
|
||||
findCoreBaseFiles.needcache = False
|
||||
|
||||
def findConfigFiles(self, command, params):
|
||||
"""
|
||||
Find config files which provide appropriate values
|
||||
@@ -744,22 +472,3 @@ class CommandsAsync:
|
||||
command.finishAsyncCommand()
|
||||
resetCooker.needcache = False
|
||||
|
||||
def clientComplete(self, command, params):
|
||||
"""
|
||||
Do the right thing when the controlling client exits
|
||||
"""
|
||||
command.cooker.clientComplete()
|
||||
command.finishAsyncCommand()
|
||||
clientComplete.needcache = False
|
||||
|
||||
def findSigInfo(self, command, params):
|
||||
"""
|
||||
Find signature info files via the signature generator
|
||||
"""
|
||||
pn = params[0]
|
||||
taskname = params[1]
|
||||
sigs = params[2]
|
||||
res = bb.siggen.find_siginfo(pn, taskname, sigs, command.cooker.data)
|
||||
bb.event.fire(bb.event.FindSigInfoResult(res), command.cooker.data)
|
||||
command.finishAsyncCommand()
|
||||
findSigInfo.needcache = False
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -41,6 +41,10 @@ class ConfigParameters(object):
|
||||
|
||||
self.options.pkgs_to_build = targets or []
|
||||
|
||||
self.options.tracking = False
|
||||
if hasattr(self.options, "show_environment") and self.options.show_environment:
|
||||
self.options.tracking = True
|
||||
|
||||
for key, val in self.options.__dict__.items():
|
||||
setattr(self, key, val)
|
||||
|
||||
@@ -69,15 +73,15 @@ class ConfigParameters(object):
|
||||
|
||||
def updateToServer(self, server, environment):
|
||||
options = {}
|
||||
for o in ["abort", "force", "invalidate_stamp",
|
||||
"verbose", "debug", "dry_run", "dump_signatures",
|
||||
for o in ["abort", "tryaltconfigs", "force", "invalidate_stamp",
|
||||
"verbose", "debug", "dry_run", "dump_signatures",
|
||||
"debug_domains", "extra_assume_provided", "profile",
|
||||
"prefile", "postfile", "server_timeout"]:
|
||||
"prefile", "postfile"]:
|
||||
options[o] = getattr(self.options, o)
|
||||
|
||||
ret, error = server.runCommand(["updateConfig", options, environment, sys.argv])
|
||||
ret, error = server.runCommand(["updateConfig", options, environment])
|
||||
if error:
|
||||
raise Exception("Unable to update the server configuration with local parameters: %s" % error)
|
||||
raise Exception("Unable to update the server configuration with local parameters: %s" % error)
|
||||
|
||||
def parseActions(self):
|
||||
# Parse any commandline into actions
|
||||
@@ -127,6 +131,8 @@ class CookerConfiguration(object):
|
||||
self.extra_assume_provided = []
|
||||
self.prefile = []
|
||||
self.postfile = []
|
||||
self.prefile_server = []
|
||||
self.postfile_server = []
|
||||
self.debug = 0
|
||||
self.cmd = None
|
||||
self.abort = True
|
||||
@@ -138,13 +144,8 @@ class CookerConfiguration(object):
|
||||
self.dump_signatures = []
|
||||
self.dry_run = False
|
||||
self.tracking = False
|
||||
self.xmlrpcinterface = []
|
||||
self.server_timeout = None
|
||||
self.interface = []
|
||||
self.writeeventlog = False
|
||||
self.server_only = False
|
||||
self.limited_deps = False
|
||||
self.runall = []
|
||||
self.runonly = []
|
||||
|
||||
self.env = {}
|
||||
|
||||
@@ -153,6 +154,7 @@ class CookerConfiguration(object):
|
||||
if key in parameters.options.__dict__:
|
||||
setattr(self, key, parameters.options.__dict__[key])
|
||||
self.env = parameters.environment.copy()
|
||||
self.tracking = parameters.tracking
|
||||
|
||||
def setServerRegIdleCallback(self, srcb):
|
||||
self.server_register_idlecallback = srcb
|
||||
@@ -168,7 +170,7 @@ class CookerConfiguration(object):
|
||||
|
||||
def __setstate__(self,state):
|
||||
for k in state:
|
||||
setattr(self, k, state[k])
|
||||
setattr(self, k, state[k])
|
||||
|
||||
|
||||
def catch_parse_error(func):
|
||||
@@ -210,7 +212,7 @@ def _inherit(bbclass, data):
|
||||
|
||||
def findConfigFile(configfile, data):
|
||||
search = []
|
||||
bbpath = data.getVar("BBPATH")
|
||||
bbpath = data.getVar("BBPATH", True)
|
||||
if bbpath:
|
||||
for i in bbpath.split(":"):
|
||||
search.append(os.path.join(i, "conf", configfile))
|
||||
@@ -225,27 +227,6 @@ def findConfigFile(configfile, data):
|
||||
|
||||
return None
|
||||
|
||||
#
|
||||
# We search for a conf/bblayers.conf under an entry in BBPATH or in cwd working
|
||||
# up to /. If that fails, we search for a conf/bitbake.conf in BBPATH.
|
||||
#
|
||||
|
||||
def findTopdir():
|
||||
d = bb.data.init()
|
||||
bbpath = None
|
||||
if 'BBPATH' in os.environ:
|
||||
bbpath = os.environ['BBPATH']
|
||||
d.setVar('BBPATH', bbpath)
|
||||
|
||||
layerconf = findConfigFile("bblayers.conf", d)
|
||||
if layerconf:
|
||||
return os.path.dirname(os.path.dirname(layerconf))
|
||||
if bbpath:
|
||||
bitbakeconf = bb.utils.which(bbpath, "conf/bitbake.conf")
|
||||
if bitbakeconf:
|
||||
return os.path.dirname(os.path.dirname(bitbakeconf))
|
||||
return None
|
||||
|
||||
class CookerDataBuilder(object):
|
||||
|
||||
def __init__(self, cookercfg, worker = False):
|
||||
@@ -271,7 +252,7 @@ class CookerDataBuilder(object):
|
||||
filtered_keys = bb.utils.approved_variables()
|
||||
bb.data.inheritFromOS(self.basedata, self.savedenv, filtered_keys)
|
||||
self.basedata.setVar("BB_ORIGENV", self.savedenv)
|
||||
|
||||
|
||||
if worker:
|
||||
self.basedata.setVar("BB_WORKERCONTEXT", "1")
|
||||
|
||||
@@ -305,13 +286,11 @@ class CookerDataBuilder(object):
|
||||
self.data_hash = self.data.get_hash()
|
||||
self.mcdata[''] = self.data
|
||||
|
||||
multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
|
||||
multiconfig = (self.data.getVar("BBMULTICONFIG", True) or "").split()
|
||||
for config in multiconfig:
|
||||
mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config)
|
||||
bb.event.fire(bb.event.ConfigParsed(), mcdata)
|
||||
self.mcdata[config] = mcdata
|
||||
if multiconfig:
|
||||
bb.event.fire(bb.event.MultiConfigParsed(self.mcdata), self.data)
|
||||
|
||||
except (SyntaxError, bb.BBHandledException):
|
||||
raise bb.BBHandledException
|
||||
@@ -322,18 +301,6 @@ class CookerDataBuilder(object):
|
||||
logger.exception("Error parsing configuration files")
|
||||
raise bb.BBHandledException
|
||||
|
||||
# Create a copy so we can reset at a later date when UIs disconnect
|
||||
self.origdata = self.data
|
||||
self.data = bb.data.createCopy(self.origdata)
|
||||
self.mcdata[''] = self.data
|
||||
|
||||
def reset(self):
|
||||
# We may not have run parseBaseConfiguration() yet
|
||||
if not hasattr(self, 'origdata'):
|
||||
return
|
||||
self.data = bb.data.createCopy(self.origdata)
|
||||
self.mcdata[''] = self.data
|
||||
|
||||
def _findLayerConf(self, data):
|
||||
return findConfigFile("bblayers.conf", data)
|
||||
|
||||
@@ -353,7 +320,7 @@ class CookerDataBuilder(object):
|
||||
data.setVar("TOPDIR", os.path.dirname(os.path.dirname(layerconf)))
|
||||
data = parse_config_file(layerconf, data)
|
||||
|
||||
layers = (data.getVar('BBLAYERS') or "").split()
|
||||
layers = (data.getVar('BBLAYERS', True) or "").split()
|
||||
|
||||
data = bb.data.createCopy(data)
|
||||
approved = bb.utils.approved_variables()
|
||||
@@ -376,34 +343,7 @@ class CookerDataBuilder(object):
|
||||
data.delVar('LAYERDIR_RE')
|
||||
data.delVar('LAYERDIR')
|
||||
|
||||
bbfiles_dynamic = (data.getVar('BBFILES_DYNAMIC') or "").split()
|
||||
collections = (data.getVar('BBFILE_COLLECTIONS') or "").split()
|
||||
invalid = []
|
||||
for entry in bbfiles_dynamic:
|
||||
parts = entry.split(":", 1)
|
||||
if len(parts) != 2:
|
||||
invalid.append(entry)
|
||||
continue
|
||||
l, f = parts
|
||||
if l in collections:
|
||||
data.appendVar("BBFILES", " " + f)
|
||||
if invalid:
|
||||
bb.fatal("BBFILES_DYNAMIC entries must be of the form <collection name>:<filename pattern>, not:\n %s" % "\n ".join(invalid))
|
||||
|
||||
layerseries = set((data.getVar("LAYERSERIES_CORENAMES") or "").split())
|
||||
collections_tmp = collections[:]
|
||||
for c in collections:
|
||||
collections_tmp.remove(c)
|
||||
if c in collections_tmp:
|
||||
bb.fatal("Found duplicated BBFILE_COLLECTIONS '%s', check bblayers.conf or layer.conf to fix it." % c)
|
||||
compat = set((data.getVar("LAYERSERIES_COMPAT_%s" % c) or "").split())
|
||||
if compat and not (compat & layerseries):
|
||||
bb.fatal("Layer %s is not compatible with the core layer which only supports these series: %s (layer is compatible with %s)"
|
||||
% (c, " ".join(layerseries), " ".join(compat)))
|
||||
elif not compat and not data.getVar("BB_WORKERCONTEXT"):
|
||||
bb.warn("Layer %s should set LAYERSERIES_COMPAT_%s in its conf/layer.conf file to list the core layer names it is compatible with." % (c, c))
|
||||
|
||||
if not data.getVar("BBPATH"):
|
||||
if not data.getVar("BBPATH", True):
|
||||
msg = "The BBPATH variable is not set"
|
||||
if not layerconf:
|
||||
msg += (" and bitbake did not find a conf/bblayers.conf file in"
|
||||
@@ -418,7 +358,7 @@ class CookerDataBuilder(object):
|
||||
data = parse_config_file(p, data)
|
||||
|
||||
# Handle any INHERITs and inherit the base class
|
||||
bbclasses = ["base"] + (data.getVar('INHERIT') or "").split()
|
||||
bbclasses = ["base"] + (data.getVar('INHERIT', True) or "").split()
|
||||
for bbclass in bbclasses:
|
||||
data = _inherit(bbclass, data)
|
||||
|
||||
@@ -430,7 +370,7 @@ class CookerDataBuilder(object):
|
||||
parselog.critical("Undefined event handler function '%s'" % var)
|
||||
sys.exit(1)
|
||||
handlerln = int(data.getVarFlag(var, "lineno", False))
|
||||
bb.event.register(var, data.getVar(var, False), (data.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln)
|
||||
bb.event.register(var, data.getVar(var, False), (data.getVarFlag(var, "eventmask", True) or "").split(), handlerfn, handlerln)
|
||||
|
||||
data.setVar('BBINCLUDED',bb.parse.get_file_depends(data))
|
||||
|
||||
|
||||
@@ -1,14 +1,48 @@
|
||||
"""
|
||||
Python Daemonizing helper
|
||||
|
||||
Originally based on code Copyright (C) 2005 Chad J. Schroeder but now heavily modified
|
||||
to allow a function to be daemonized and return for bitbake use by Richard Purdie
|
||||
Configurable daemon behaviors:
|
||||
|
||||
1.) The current working directory set to the "/" directory.
|
||||
2.) The current file creation mode mask set to 0.
|
||||
3.) Close all open files (1024).
|
||||
4.) Redirect standard I/O streams to "/dev/null".
|
||||
|
||||
A failed call to fork() now raises an exception.
|
||||
|
||||
References:
|
||||
1) Advanced Programming in the Unix Environment: W. Richard Stevens
|
||||
http://www.apuebook.com/apue3e.html
|
||||
2) The Linux Programming Interface: Michael Kerrisk
|
||||
http://man7.org/tlpi/index.html
|
||||
3) Unix Programming Frequently Asked Questions:
|
||||
http://www.faqs.org/faqs/unix-faq/programmer/faq/
|
||||
|
||||
Modified to allow a function to be daemonized and return for
|
||||
bitbake use by Richard Purdie
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import io
|
||||
import traceback
|
||||
__author__ = "Chad J. Schroeder"
|
||||
__copyright__ = "Copyright (C) 2005 Chad J. Schroeder"
|
||||
__version__ = "0.2"
|
||||
|
||||
# Standard Python modules.
|
||||
import os # Miscellaneous OS interfaces.
|
||||
import sys # System-specific parameters and functions.
|
||||
|
||||
# Default daemon parameters.
|
||||
# File mode creation mask of the daemon.
|
||||
# For BitBake's children, we do want to inherit the parent umask.
|
||||
UMASK = None
|
||||
|
||||
# Default maximum for the number of available file descriptors.
|
||||
MAXFD = 1024
|
||||
|
||||
# The standard I/O file descriptors are redirected to /dev/null by default.
|
||||
if (hasattr(os, "devnull")):
|
||||
REDIRECT_TO = os.devnull
|
||||
else:
|
||||
REDIRECT_TO = "/dev/null"
|
||||
|
||||
def createDaemon(function, logfile):
|
||||
"""
|
||||
@@ -16,10 +50,6 @@ def createDaemon(function, logfile):
|
||||
background as a daemon, returning control to the caller.
|
||||
"""
|
||||
|
||||
# Ensure stdout/stderror are flushed before forking to avoid duplicate output
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
|
||||
try:
|
||||
# Fork a child process so the parent can exit. This returns control to
|
||||
# the command-line or shell. It also guarantees that the child will not
|
||||
@@ -35,6 +65,36 @@ def createDaemon(function, logfile):
|
||||
# leader of the new process group, we call os.setsid(). The process is
|
||||
# also guaranteed not to have a controlling terminal.
|
||||
os.setsid()
|
||||
|
||||
# Is ignoring SIGHUP necessary?
|
||||
#
|
||||
# It's often suggested that the SIGHUP signal should be ignored before
|
||||
# the second fork to avoid premature termination of the process. The
|
||||
# reason is that when the first child terminates, all processes, e.g.
|
||||
# the second child, in the orphaned group will be sent a SIGHUP.
|
||||
#
|
||||
# "However, as part of the session management system, there are exactly
|
||||
# two cases where SIGHUP is sent on the death of a process:
|
||||
#
|
||||
# 1) When the process that dies is the session leader of a session that
|
||||
# is attached to a terminal device, SIGHUP is sent to all processes
|
||||
# in the foreground process group of that terminal device.
|
||||
# 2) When the death of a process causes a process group to become
|
||||
# orphaned, and one or more processes in the orphaned group are
|
||||
# stopped, then SIGHUP and SIGCONT are sent to all members of the
|
||||
# orphaned group." [2]
|
||||
#
|
||||
# The first case can be ignored since the child is guaranteed not to have
|
||||
# a controlling terminal. The second case isn't so easy to dismiss.
|
||||
# The process group is orphaned when the first child terminates and
|
||||
# POSIX.1 requires that every STOPPED process in an orphaned process
|
||||
# group be sent a SIGHUP signal followed by a SIGCONT signal. Since the
|
||||
# second child is not STOPPED though, we can safely forego ignoring the
|
||||
# SIGHUP signal. In any case, there are no ill-effects if it is ignored.
|
||||
#
|
||||
# import signal # Set handlers for asynchronous events.
|
||||
# signal.signal(signal.SIGHUP, signal.SIG_IGN)
|
||||
|
||||
try:
|
||||
# Fork a second child and exit immediately to prevent zombies. This
|
||||
# causes the second child process to be orphaned, making the init
|
||||
@@ -48,46 +108,86 @@ def createDaemon(function, logfile):
|
||||
except OSError as e:
|
||||
raise Exception("%s [%d]" % (e.strerror, e.errno))
|
||||
|
||||
if (pid != 0):
|
||||
if (pid == 0): # The second child.
|
||||
# We probably don't want the file mode creation mask inherited from
|
||||
# the parent, so we give the child complete control over permissions.
|
||||
if UMASK is not None:
|
||||
os.umask(UMASK)
|
||||
else:
|
||||
# Parent (the first child) of the second child.
|
||||
# exit() or _exit()?
|
||||
# _exit is like exit(), but it doesn't call any functions registered
|
||||
# with atexit (and on_exit) or any registered signal handlers. It also
|
||||
# closes any open file descriptors, but doesn't flush any buffered output.
|
||||
# Using exit() may cause all any temporary files to be unexpectedly
|
||||
# removed. It's therefore recommended that child branches of a fork()
|
||||
# and the parent branch(es) of a daemon use _exit().
|
||||
os._exit(0)
|
||||
else:
|
||||
os.waitpid(pid, 0)
|
||||
# exit() or _exit()?
|
||||
# _exit is like exit(), but it doesn't call any functions registered
|
||||
# with atexit (and on_exit) or any registered signal handlers. It also
|
||||
# closes any open file descriptors. Using exit() may cause all stdio
|
||||
# streams to be flushed twice and any temporary files may be unexpectedly
|
||||
# removed. It's therefore recommended that child branches of a fork()
|
||||
# and the parent branch(es) of a daemon use _exit().
|
||||
return
|
||||
|
||||
# The second child.
|
||||
# Close all open file descriptors. This prevents the child from keeping
|
||||
# open any file descriptors inherited from the parent. There is a variety
|
||||
# of methods to accomplish this task. Three are listed below.
|
||||
#
|
||||
# Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
|
||||
# number of open file descriptors to close. If it doesn't exist, use
|
||||
# the default value (configurable).
|
||||
#
|
||||
# try:
|
||||
# maxfd = os.sysconf("SC_OPEN_MAX")
|
||||
# except (AttributeError, ValueError):
|
||||
# maxfd = MAXFD
|
||||
#
|
||||
# OR
|
||||
#
|
||||
# if (os.sysconf_names.has_key("SC_OPEN_MAX")):
|
||||
# maxfd = os.sysconf("SC_OPEN_MAX")
|
||||
# else:
|
||||
# maxfd = MAXFD
|
||||
#
|
||||
# OR
|
||||
#
|
||||
# Use the getrlimit method to retrieve the maximum file descriptor number
|
||||
# that can be opened by this process. If there is no limit on the
|
||||
# resource, use the default value.
|
||||
#
|
||||
import resource # Resource usage information.
|
||||
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
|
||||
if (maxfd == resource.RLIM_INFINITY):
|
||||
maxfd = MAXFD
|
||||
|
||||
# Iterate through and close all file descriptors.
|
||||
# for fd in range(0, maxfd):
|
||||
# try:
|
||||
# os.close(fd)
|
||||
# except OSError: # ERROR, fd wasn't open to begin with (ignored)
|
||||
# pass
|
||||
|
||||
# Replace standard fds with our own
|
||||
with open('/dev/null', 'r') as si:
|
||||
os.dup2(si.fileno(), sys.stdin.fileno())
|
||||
# Redirect the standard I/O file descriptors to the specified file. Since
|
||||
# the daemon has no controlling terminal, most daemons redirect stdin,
|
||||
# stdout, and stderr to /dev/null. This is done to prevent side-effects
|
||||
# from reads and writes to the standard I/O file descriptors.
|
||||
|
||||
try:
|
||||
so = open(logfile, 'a+')
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(so.fileno(), sys.stderr.fileno())
|
||||
except io.UnsupportedOperation:
|
||||
sys.stdout = open(logfile, 'a+')
|
||||
# This call to open is guaranteed to return the lowest file descriptor,
|
||||
# which will be 0 (stdin), since it was closed above.
|
||||
# os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
|
||||
|
||||
# Have stdout and stderr be the same so log output matches chronologically
|
||||
# and there aren't two seperate buffers
|
||||
sys.stderr = sys.stdout
|
||||
# Duplicate standard input to standard output and standard error.
|
||||
# os.dup2(0, 1) # standard output (1)
|
||||
# os.dup2(0, 2) # standard error (2)
|
||||
|
||||
try:
|
||||
function()
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
bb.event.print_ui_queue()
|
||||
# os._exit() doesn't flush open files like os.exit() does. Manually flush
|
||||
# stdout and stderr so that any logging output will be seen, particularly
|
||||
# exception tracebacks.
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
os._exit(0)
|
||||
|
||||
si = open('/dev/null', 'r')
|
||||
so = open(logfile, 'w')
|
||||
se = so
|
||||
|
||||
|
||||
# Replace those fds with our own
|
||||
os.dup2(si.fileno(), sys.stdin.fileno())
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(se.fileno(), sys.stderr.fileno())
|
||||
|
||||
function()
|
||||
|
||||
os._exit(0)
|
||||
|
||||
@@ -38,7 +38,6 @@ the speed is more critical here.
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import sys, os, re
|
||||
import hashlib
|
||||
if sys.argv[0][-5:] == "pydoc":
|
||||
path = os.path.dirname(os.path.dirname(sys.argv[1]))
|
||||
else:
|
||||
@@ -79,6 +78,59 @@ def initVar(var, d):
|
||||
"""Non-destructive var init for data structure"""
|
||||
d.initVar(var)
|
||||
|
||||
|
||||
def setVar(var, value, d):
|
||||
"""Set a variable to a given value"""
|
||||
d.setVar(var, value)
|
||||
|
||||
|
||||
def getVar(var, d, exp = False):
|
||||
"""Gets the value of a variable"""
|
||||
return d.getVar(var, exp)
|
||||
|
||||
|
||||
def renameVar(key, newkey, d):
|
||||
"""Renames a variable from key to newkey"""
|
||||
d.renameVar(key, newkey)
|
||||
|
||||
def delVar(var, d):
|
||||
"""Removes a variable from the data set"""
|
||||
d.delVar(var)
|
||||
|
||||
def appendVar(var, value, d):
|
||||
"""Append additional value to a variable"""
|
||||
d.appendVar(var, value)
|
||||
|
||||
def setVarFlag(var, flag, flagvalue, d):
|
||||
"""Set a flag for a given variable to a given value"""
|
||||
d.setVarFlag(var, flag, flagvalue)
|
||||
|
||||
def getVarFlag(var, flag, d):
|
||||
"""Gets given flag from given var"""
|
||||
return d.getVarFlag(var, flag, False)
|
||||
|
||||
def delVarFlag(var, flag, d):
|
||||
"""Removes a given flag from the variable's flags"""
|
||||
d.delVarFlag(var, flag)
|
||||
|
||||
def setVarFlags(var, flags, d):
|
||||
"""Set the flags for a given variable
|
||||
|
||||
Note:
|
||||
setVarFlags will not clear previous
|
||||
flags. Think of this method as
|
||||
addVarFlags
|
||||
"""
|
||||
d.setVarFlags(var, flags)
|
||||
|
||||
def getVarFlags(var, d):
|
||||
"""Gets a variable's flags"""
|
||||
return d.getVarFlags(var)
|
||||
|
||||
def delVarFlags(var, d):
|
||||
"""Removes a variable's flags"""
|
||||
d.delVarFlags(var)
|
||||
|
||||
def keys(d):
|
||||
"""Return a list of keys in d"""
|
||||
return d.keys()
|
||||
@@ -122,7 +174,7 @@ def inheritFromOS(d, savedenv, permitted):
|
||||
for s in savedenv.keys():
|
||||
if s in permitted:
|
||||
try:
|
||||
d.setVar(s, savedenv.getVar(s), op = 'from env')
|
||||
d.setVar(s, savedenv.getVar(s, True), op = 'from env')
|
||||
if s in exportlist:
|
||||
d.setVarFlag(s, "export", True, op = 'auto env export')
|
||||
except TypeError:
|
||||
@@ -142,7 +194,7 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False):
|
||||
try:
|
||||
if all:
|
||||
oval = d.getVar(var, False)
|
||||
val = d.getVar(var)
|
||||
val = d.getVar(var, True)
|
||||
except (KeyboardInterrupt, bb.build.FuncFailed):
|
||||
raise
|
||||
except Exception as exc:
|
||||
@@ -197,7 +249,7 @@ def emit_env(o=sys.__stdout__, d = init(), all=False):
|
||||
keys = sorted((key for key in d.keys() if not key.startswith("__")), key=isfunc)
|
||||
grouped = groupby(keys, isfunc)
|
||||
for isfunc, keys in grouped:
|
||||
for key in sorted(keys):
|
||||
for key in keys:
|
||||
emit_var(key, o, d, all and not isfunc) and o.write('\n')
|
||||
|
||||
def exported_keys(d):
|
||||
@@ -209,9 +261,9 @@ def exported_vars(d):
|
||||
k = list(exported_keys(d))
|
||||
for key in k:
|
||||
try:
|
||||
value = d.getVar(key)
|
||||
value = d.getVar(key, True)
|
||||
except Exception as err:
|
||||
bb.warn("%s: Unable to export ${%s}: %s" % (d.getVar("FILE"), key, err))
|
||||
bb.warn("%s: Unable to export ${%s}: %s" % (d.getVar("FILE", True), key, err))
|
||||
continue
|
||||
|
||||
if value is not None:
|
||||
@@ -221,13 +273,13 @@ def emit_func(func, o=sys.__stdout__, d = init()):
|
||||
"""Emits all items in the data store in a format such that it can be sourced by a shell."""
|
||||
|
||||
keys = (key for key in d.keys() if not key.startswith("__") and not d.getVarFlag(key, "func", False))
|
||||
for key in sorted(keys):
|
||||
for key in keys:
|
||||
emit_var(key, o, d, False)
|
||||
|
||||
o.write('\n')
|
||||
emit_var(func, o, d, False) and o.write('\n')
|
||||
newdeps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func))
|
||||
newdeps |= set((d.getVarFlag(func, "vardeps") or "").split())
|
||||
newdeps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func, True))
|
||||
newdeps |= set((d.getVarFlag(func, "vardeps", True) or "").split())
|
||||
seen = set()
|
||||
while newdeps:
|
||||
deps = newdeps
|
||||
@@ -236,8 +288,8 @@ def emit_func(func, o=sys.__stdout__, d = init()):
|
||||
for dep in deps:
|
||||
if d.getVarFlag(dep, "func", False) and not d.getVarFlag(dep, "python", False):
|
||||
emit_var(dep, o, d, False) and o.write('\n')
|
||||
newdeps |= bb.codeparser.ShellParser(dep, logger).parse_shell(d.getVar(dep))
|
||||
newdeps |= set((d.getVarFlag(dep, "vardeps") or "").split())
|
||||
newdeps |= bb.codeparser.ShellParser(dep, logger).parse_shell(d.getVar(dep, True))
|
||||
newdeps |= set((d.getVarFlag(dep, "vardeps", True) or "").split())
|
||||
newdeps -= seen
|
||||
|
||||
_functionfmt = """
|
||||
@@ -260,7 +312,7 @@ def emit_func_python(func, o=sys.__stdout__, d = init()):
|
||||
pp = bb.codeparser.PythonParser(func, logger)
|
||||
pp.parse_python(d.getVar(func, False))
|
||||
newdeps = pp.execs
|
||||
newdeps |= set((d.getVarFlag(func, "vardeps") or "").split())
|
||||
newdeps |= set((d.getVarFlag(func, "vardeps", True) or "").split())
|
||||
seen = set()
|
||||
while newdeps:
|
||||
deps = newdeps
|
||||
@@ -272,7 +324,7 @@ def emit_func_python(func, o=sys.__stdout__, d = init()):
|
||||
pp = bb.codeparser.PythonParser(dep, logger)
|
||||
pp.parse_python(d.getVar(dep, False))
|
||||
newdeps |= pp.execs
|
||||
newdeps |= set((d.getVarFlag(dep, "vardeps") or "").split())
|
||||
newdeps |= set((d.getVarFlag(dep, "vardeps", True) or "").split())
|
||||
newdeps -= seen
|
||||
|
||||
def update_data(d):
|
||||
@@ -284,58 +336,49 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
|
||||
try:
|
||||
if key[-1] == ']':
|
||||
vf = key[:-1].split('[')
|
||||
value, parser = d.getVarFlag(vf[0], vf[1], False, retparser=True)
|
||||
value = d.getVarFlag(vf[0], vf[1], False)
|
||||
parser = d.expandWithRefs(value, key)
|
||||
deps |= parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
return deps, value
|
||||
varflags = d.getVarFlags(key, ["vardeps", "vardepvalue", "vardepsexclude", "exports", "postfuncs", "prefuncs", "lineno", "filename"]) or {}
|
||||
vardeps = varflags.get("vardeps")
|
||||
value = d.getVar(key, False)
|
||||
|
||||
def handle_contains(value, contains, d):
|
||||
newvalue = ""
|
||||
for k in sorted(contains):
|
||||
l = (d.getVar(k) or "").split()
|
||||
for item in sorted(contains[k]):
|
||||
for word in item.split():
|
||||
if not word in l:
|
||||
newvalue += "\n%s{%s} = Unset" % (k, item)
|
||||
break
|
||||
l = (d.getVar(k, True) or "").split()
|
||||
for word in sorted(contains[k]):
|
||||
if word in l:
|
||||
newvalue += "\n%s{%s} = Set" % (k, word)
|
||||
else:
|
||||
newvalue += "\n%s{%s} = Set" % (k, item)
|
||||
newvalue += "\n%s{%s} = Unset" % (k, word)
|
||||
if not newvalue:
|
||||
return value
|
||||
if not value:
|
||||
return newvalue
|
||||
return value + newvalue
|
||||
|
||||
def handle_remove(value, deps, removes, d):
|
||||
for r in sorted(removes):
|
||||
r2 = d.expandWithRefs(r, None)
|
||||
value += "\n_remove of %s" % r
|
||||
deps |= r2.references
|
||||
deps = deps | (keys & r2.execs)
|
||||
return value
|
||||
|
||||
if "vardepvalue" in varflags:
|
||||
value = varflags.get("vardepvalue")
|
||||
value = varflags.get("vardepvalue")
|
||||
elif varflags.get("func"):
|
||||
if varflags.get("python"):
|
||||
value = d.getVarFlag(key, "_content", False)
|
||||
parser = bb.codeparser.PythonParser(key, logger)
|
||||
if value and "\t" in value:
|
||||
logger.warning("Variable %s contains tabs, please remove these (%s)" % (key, d.getVar("FILE", True)))
|
||||
parser.parse_python(value, filename=varflags.get("filename"), lineno=varflags.get("lineno"))
|
||||
deps = deps | parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
value = handle_contains(value, parser.contains, d)
|
||||
else:
|
||||
value, parsedvar = d.getVarFlag(key, "_content", False, retparser=True)
|
||||
parsedvar = d.expandWithRefs(value, key)
|
||||
parser = bb.codeparser.ShellParser(key, logger)
|
||||
parser.parse_shell(parsedvar.value)
|
||||
deps = deps | shelldeps
|
||||
deps = deps | parsedvar.references
|
||||
deps = deps | (keys & parser.execs) | (keys & parsedvar.execs)
|
||||
value = handle_contains(value, parsedvar.contains, d)
|
||||
if hasattr(parsedvar, "removes"):
|
||||
value = handle_remove(value, deps, parsedvar.removes, d)
|
||||
if vardeps is None:
|
||||
parser.log.flush()
|
||||
if "prefuncs" in varflags:
|
||||
@@ -345,12 +388,10 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
|
||||
if "exports" in varflags:
|
||||
deps = deps | set(varflags["exports"].split())
|
||||
else:
|
||||
value, parser = d.getVarFlag(key, "_content", False, retparser=True)
|
||||
parser = d.expandWithRefs(value, key)
|
||||
deps |= parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
value = handle_contains(value, parser.contains, d)
|
||||
if hasattr(parser, "removes"):
|
||||
value = handle_remove(value, deps, parser.removes, d)
|
||||
|
||||
if "vardepvalueexclude" in varflags:
|
||||
exclude = varflags.get("vardepvalueexclude")
|
||||
@@ -369,8 +410,6 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
|
||||
|
||||
deps |= set((vardeps or "").split())
|
||||
deps -= set(varflags.get("vardepsexclude", "").split())
|
||||
except bb.parse.SkipRecipe:
|
||||
raise
|
||||
except Exception as e:
|
||||
bb.warn("Exception during build_dependencies for %s" % key)
|
||||
raise
|
||||
@@ -382,7 +421,7 @@ def generate_dependencies(d):
|
||||
|
||||
keys = set(key for key in d if not key.startswith("__"))
|
||||
shelldeps = set(key for key in d.getVar("__exportlist", False) if d.getVarFlag(key, "export", False) and not d.getVarFlag(key, "unexport", False))
|
||||
varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS')
|
||||
varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS', True)
|
||||
|
||||
deps = {}
|
||||
values = {}
|
||||
@@ -404,43 +443,6 @@ def generate_dependencies(d):
|
||||
#print "For %s: %s" % (task, str(deps[task]))
|
||||
return tasklist, deps, values
|
||||
|
||||
def generate_dependency_hash(tasklist, gendeps, lookupcache, whitelist, fn):
|
||||
taskdeps = {}
|
||||
basehash = {}
|
||||
|
||||
for task in tasklist:
|
||||
data = lookupcache[task]
|
||||
|
||||
if data is None:
|
||||
bb.error("Task %s from %s seems to be empty?!" % (task, fn))
|
||||
data = ''
|
||||
|
||||
gendeps[task] -= whitelist
|
||||
newdeps = gendeps[task]
|
||||
seen = set()
|
||||
while newdeps:
|
||||
nextdeps = newdeps
|
||||
seen |= nextdeps
|
||||
newdeps = set()
|
||||
for dep in nextdeps:
|
||||
if dep in whitelist:
|
||||
continue
|
||||
gendeps[dep] -= whitelist
|
||||
newdeps |= gendeps[dep]
|
||||
newdeps -= seen
|
||||
|
||||
alldeps = sorted(seen)
|
||||
for dep in alldeps:
|
||||
data = data + dep
|
||||
var = lookupcache[dep]
|
||||
if var is not None:
|
||||
data = data + str(var)
|
||||
k = fn + "." + task
|
||||
basehash[k] = hashlib.sha256(data.encode("utf-8")).hexdigest()
|
||||
taskdeps[task] = alldeps
|
||||
|
||||
return taskdeps, basehash
|
||||
|
||||
def inherits_class(klass, d):
|
||||
val = d.getVar('__inherit_cache', False) or []
|
||||
needle = os.path.join('classes', '%s.bbclass' % klass)
|
||||
|
||||
@@ -39,11 +39,9 @@ from bb.COW import COWDictBase
|
||||
logger = logging.getLogger("BitBake.Data")
|
||||
|
||||
__setvar_keyword__ = ["_append", "_prepend", "_remove"]
|
||||
__setvar_regexp__ = re.compile(r'(?P<base>.*?)(?P<keyword>_append|_prepend|_remove)(_(?P<add>[^A-Z]*))?$')
|
||||
__expand_var_regexp__ = re.compile(r"\${[a-zA-Z0-9\-_+./~]+?}")
|
||||
__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend|_remove)(_(?P<add>.*))?$')
|
||||
__expand_var_regexp__ = re.compile(r"\${[^{}@\n\t :]+}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@.+?}")
|
||||
__whitespace_split__ = re.compile(r'(\s)')
|
||||
__override_regexp__ = re.compile(r'[a-z0-9]+')
|
||||
|
||||
def infer_caller_details(loginfo, parent = False, varval = True):
|
||||
"""Save the caller the trouble of specifying everything."""
|
||||
@@ -106,7 +104,11 @@ class VariableParse:
|
||||
if self.varname and key:
|
||||
if self.varname == key:
|
||||
raise Exception("variable %s references itself!" % self.varname)
|
||||
var = self.d.getVarFlag(key, "_content")
|
||||
if key in self.d.expand_cache:
|
||||
varparse = self.d.expand_cache[key]
|
||||
var = varparse.value
|
||||
else:
|
||||
var = self.d.getVarFlag(key, "_content", True)
|
||||
self.references.add(key)
|
||||
if var is not None:
|
||||
return var
|
||||
@@ -114,25 +116,13 @@ class VariableParse:
|
||||
return match.group()
|
||||
|
||||
def python_sub(self, match):
|
||||
if isinstance(match, str):
|
||||
code = match
|
||||
else:
|
||||
code = match.group()[3:-1]
|
||||
|
||||
if "_remote_data" in self.d:
|
||||
connector = self.d["_remote_data"]
|
||||
return connector.expandPythonRef(self.varname, code, self.d)
|
||||
|
||||
if self.varname:
|
||||
varname = 'Var <%s>' % self.varname
|
||||
else:
|
||||
varname = '<expansion>'
|
||||
codeobj = compile(code.strip(), varname, "eval")
|
||||
code = match.group()[3:-1]
|
||||
codeobj = compile(code.strip(), self.varname or "<expansion>", "eval")
|
||||
|
||||
parser = bb.codeparser.PythonParser(self.varname, logger)
|
||||
parser.parse_python(code)
|
||||
if self.varname:
|
||||
vardeps = self.d.getVarFlag(self.varname, "vardeps")
|
||||
vardeps = self.d.getVarFlag(self.varname, "vardeps", True)
|
||||
if vardeps is None:
|
||||
parser.log.flush()
|
||||
else:
|
||||
@@ -156,7 +146,7 @@ class DataContext(dict):
|
||||
self['d'] = metadata
|
||||
|
||||
def __missing__(self, key):
|
||||
value = self.metadata.getVar(key)
|
||||
value = self.metadata.getVar(key, True)
|
||||
if value is None or self.metadata.getVarFlag(key, 'func', False):
|
||||
raise KeyError(key)
|
||||
else:
|
||||
@@ -232,19 +222,6 @@ class VariableHistory(object):
|
||||
new.variables = self.variables.copy()
|
||||
return new
|
||||
|
||||
def __getstate__(self):
|
||||
vardict = {}
|
||||
for k, v in self.variables.iteritems():
|
||||
vardict[k] = v
|
||||
return {'dataroot': self.dataroot,
|
||||
'variables': vardict}
|
||||
|
||||
def __setstate__(self, state):
|
||||
self.dataroot = state['dataroot']
|
||||
self.variables = COWDictBase.copy()
|
||||
for k, v in state['variables'].items():
|
||||
self.variables[k] = v
|
||||
|
||||
def record(self, *kwonly, **loginfo):
|
||||
if not self.dataroot._tracking:
|
||||
return
|
||||
@@ -269,26 +246,11 @@ class VariableHistory(object):
|
||||
return
|
||||
self.variables[var].append(loginfo.copy())
|
||||
|
||||
def rename_variable_hist(self, oldvar, newvar):
|
||||
if not self.dataroot._tracking:
|
||||
return
|
||||
if oldvar not in self.variables:
|
||||
return
|
||||
if newvar not in self.variables:
|
||||
self.variables[newvar] = []
|
||||
for i in self.variables[oldvar]:
|
||||
self.variables[newvar].append(i.copy())
|
||||
|
||||
def variable(self, var):
|
||||
remote_connector = self.dataroot.getVar('_remote_data', False)
|
||||
if remote_connector:
|
||||
varhistory = remote_connector.getVarHistory(var)
|
||||
else:
|
||||
varhistory = []
|
||||
|
||||
if var in self.variables:
|
||||
varhistory.extend(self.variables[var])
|
||||
return varhistory
|
||||
return self.variables[var]
|
||||
else:
|
||||
return []
|
||||
|
||||
def emit(self, var, oval, val, o, d):
|
||||
history = self.variable(var)
|
||||
@@ -356,7 +318,7 @@ class VariableHistory(object):
|
||||
the files in which they were added.
|
||||
"""
|
||||
history = self.variable(var)
|
||||
finalitems = (d.getVar(var) or '').split()
|
||||
finalitems = (d.getVar(var, True) or '').split()
|
||||
filemap = {}
|
||||
isset = False
|
||||
for event in history:
|
||||
@@ -413,6 +375,9 @@ class DataSmart(MutableMapping):
|
||||
if not isinstance(s, str): # sanity check
|
||||
return VariableParse(varname, self, s)
|
||||
|
||||
if varname and varname in self.expand_cache:
|
||||
return self.expand_cache[varname]
|
||||
|
||||
varparse = VariableParse(varname, self)
|
||||
|
||||
while s.find('${') != -1:
|
||||
@@ -432,11 +397,13 @@ class DataSmart(MutableMapping):
|
||||
except bb.parse.SkipRecipe:
|
||||
raise
|
||||
except Exception as exc:
|
||||
tb = sys.exc_info()[2]
|
||||
raise ExpansionError(varname, s, exc).with_traceback(tb) from exc
|
||||
raise ExpansionError(varname, s, exc) from exc
|
||||
|
||||
varparse.value = s
|
||||
|
||||
if varname:
|
||||
self.expand_cache[varname] = varparse
|
||||
|
||||
return varparse
|
||||
|
||||
def expand(self, s, varname = None):
|
||||
@@ -459,11 +426,11 @@ class DataSmart(MutableMapping):
|
||||
# Can end up here recursively so setup dummy values
|
||||
self.overrides = []
|
||||
self.overridesset = set()
|
||||
self.overrides = (self.getVar("OVERRIDES") or "").split(":") or []
|
||||
self.overrides = (self.getVar("OVERRIDES", True) or "").split(":") or []
|
||||
self.overridesset = set(self.overrides)
|
||||
self.inoverride = False
|
||||
self.expand_cache = {}
|
||||
newoverrides = (self.getVar("OVERRIDES") or "").split(":") or []
|
||||
newoverrides = (self.getVar("OVERRIDES", True) or "").split(":") or []
|
||||
if newoverrides == self.overrides:
|
||||
break
|
||||
self.overrides = newoverrides
|
||||
@@ -480,22 +447,17 @@ class DataSmart(MutableMapping):
|
||||
dest = self.dict
|
||||
while dest:
|
||||
if var in dest:
|
||||
return dest[var], self.overridedata.get(var, None)
|
||||
|
||||
if "_remote_data" in dest:
|
||||
connector = dest["_remote_data"]["_content"]
|
||||
return connector.getVar(var)
|
||||
return dest[var]
|
||||
|
||||
if "_data" not in dest:
|
||||
break
|
||||
dest = dest["_data"]
|
||||
return None, self.overridedata.get(var, None)
|
||||
|
||||
def _makeShadowCopy(self, var):
|
||||
if var in self.dict:
|
||||
return
|
||||
|
||||
local_var, _ = self._findVar(var)
|
||||
local_var = self._findVar(var)
|
||||
|
||||
if local_var:
|
||||
self.dict[var] = copy.copy(local_var)
|
||||
@@ -505,20 +467,13 @@ class DataSmart(MutableMapping):
|
||||
|
||||
def setVar(self, var, value, **loginfo):
|
||||
#print("var=" + str(var) + " val=" + str(value))
|
||||
self.expand_cache = {}
|
||||
parsing=False
|
||||
if 'parsing' in loginfo:
|
||||
parsing=True
|
||||
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.setVar(var, value)
|
||||
if not res:
|
||||
return
|
||||
|
||||
if 'op' not in loginfo:
|
||||
loginfo['op'] = "set"
|
||||
|
||||
self.expand_cache = {}
|
||||
match = __setvar_regexp__.match(var)
|
||||
if match and match.group("keyword") in __setvar_keyword__:
|
||||
base = match.group('base')
|
||||
@@ -554,8 +509,6 @@ class DataSmart(MutableMapping):
|
||||
del self.dict[var]["_append"]
|
||||
if "_prepend" in self.dict[var]:
|
||||
del self.dict[var]["_prepend"]
|
||||
if "_remove" in self.dict[var]:
|
||||
del self.dict[var]["_remove"]
|
||||
if var in self.overridedata:
|
||||
active = []
|
||||
self.need_overrides()
|
||||
@@ -588,7 +541,7 @@ class DataSmart(MutableMapping):
|
||||
nextnew = set()
|
||||
self.overridevars.update(new)
|
||||
for i in new:
|
||||
vardata = self.expandWithRefs(self.getVar(i), i)
|
||||
vardata = self.expandWithRefs(self.getVar(i, True), i)
|
||||
nextnew.update(vardata.references)
|
||||
nextnew.update(vardata.contains.keys())
|
||||
new = nextnew
|
||||
@@ -598,7 +551,7 @@ class DataSmart(MutableMapping):
|
||||
# aka pay the cookie monster
|
||||
override = var[var.rfind('_')+1:]
|
||||
shortvar = var[:var.rfind('_')]
|
||||
while override and __override_regexp__.match(override):
|
||||
while override and override.islower():
|
||||
if shortvar not in self.overridedata:
|
||||
self.overridedata[shortvar] = []
|
||||
if [var, override] not in self.overridedata[shortvar]:
|
||||
@@ -612,22 +565,15 @@ class DataSmart(MutableMapping):
|
||||
if len(shortvar) == 0:
|
||||
override = None
|
||||
|
||||
def getVar(self, var, expand=True, noweakdefault=False, parsing=False):
|
||||
def getVar(self, var, expand, noweakdefault=False, parsing=False):
|
||||
return self.getVarFlag(var, "_content", expand, noweakdefault, parsing)
|
||||
|
||||
def renameVar(self, key, newkey, **loginfo):
|
||||
"""
|
||||
Rename the variable key to newkey
|
||||
"""
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.renameVar(key, newkey)
|
||||
if not res:
|
||||
return
|
||||
|
||||
val = self.getVar(key, 0, parsing=True)
|
||||
if val is not None:
|
||||
self.varhistory.rename_variable_hist(key, newkey)
|
||||
loginfo['variable'] = newkey
|
||||
loginfo['op'] = 'rename from %s' % key
|
||||
loginfo['detail'] = val
|
||||
@@ -669,16 +615,10 @@ class DataSmart(MutableMapping):
|
||||
self.setVar(var + "_prepend", value, ignore=True, parsing=True)
|
||||
|
||||
def delVar(self, var, **loginfo):
|
||||
self.expand_cache = {}
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.delVar(var)
|
||||
if not res:
|
||||
return
|
||||
|
||||
loginfo['detail'] = ""
|
||||
loginfo['op'] = 'del'
|
||||
self.varhistory.record(**loginfo)
|
||||
self.expand_cache = {}
|
||||
self.dict[var] = {}
|
||||
if var in self.overridedata:
|
||||
del self.overridedata[var]
|
||||
@@ -702,12 +642,6 @@ class DataSmart(MutableMapping):
|
||||
|
||||
def setVarFlag(self, var, flag, value, **loginfo):
|
||||
self.expand_cache = {}
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.setVarFlag(var, flag, value)
|
||||
if not res:
|
||||
return
|
||||
|
||||
if 'op' not in loginfo:
|
||||
loginfo['op'] = "set"
|
||||
loginfo['flag'] = flag
|
||||
@@ -728,26 +662,14 @@ class DataSmart(MutableMapping):
|
||||
self.dict["__exportlist"]["_content"] = set()
|
||||
self.dict["__exportlist"]["_content"].add(var)
|
||||
|
||||
def getVarFlag(self, var, flag, expand=True, noweakdefault=False, parsing=False, retparser=False):
|
||||
if flag == "_content":
|
||||
cachename = var
|
||||
else:
|
||||
if not flag:
|
||||
bb.warn("Calling getVarFlag with flag unset is invalid")
|
||||
return None
|
||||
cachename = var + "[" + flag + "]"
|
||||
|
||||
if expand and cachename in self.expand_cache:
|
||||
return self.expand_cache[cachename].value
|
||||
|
||||
local_var, overridedata = self._findVar(var)
|
||||
def getVarFlag(self, var, flag, expand, noweakdefault=False, parsing=False):
|
||||
local_var = self._findVar(var)
|
||||
value = None
|
||||
removes = set()
|
||||
if flag == "_content" and overridedata is not None and not parsing:
|
||||
if flag == "_content" and var in self.overridedata and not parsing:
|
||||
match = False
|
||||
active = {}
|
||||
self.need_overrides()
|
||||
for (r, o) in overridedata:
|
||||
for (r, o) in self.overridedata[var]:
|
||||
# What about double overrides both with "_" in the name?
|
||||
if o in self.overridesset:
|
||||
active[o] = r
|
||||
@@ -769,11 +691,7 @@ class DataSmart(MutableMapping):
|
||||
match = active[a]
|
||||
del active[a]
|
||||
if match:
|
||||
value, subparser = self.getVarFlag(match, "_content", False, retparser=True)
|
||||
if hasattr(subparser, "removes"):
|
||||
# We have to carry the removes from the overridden variable to apply at the
|
||||
# end of processing
|
||||
removes = subparser.removes
|
||||
value = self.getVar(match, False)
|
||||
|
||||
if local_var is not None and value is None:
|
||||
if flag in local_var:
|
||||
@@ -809,13 +727,17 @@ class DataSmart(MutableMapping):
|
||||
if match:
|
||||
value = r + value
|
||||
|
||||
parser = None
|
||||
if expand or retparser:
|
||||
parser = self.expandWithRefs(value, cachename)
|
||||
if expand:
|
||||
value = parser.value
|
||||
if expand and value:
|
||||
# Only getvar (flag == _content) hits the expand cache
|
||||
cachename = None
|
||||
if flag == "_content":
|
||||
cachename = var
|
||||
else:
|
||||
cachename = var + "[" + flag + "]"
|
||||
value = self.expand(value, cachename)
|
||||
|
||||
if value and flag == "_content" and local_var is not None and "_remove" in local_var and not parsing:
|
||||
if value and flag == "_content" and local_var is not None and "_remove" in local_var:
|
||||
removes = []
|
||||
self.need_overrides()
|
||||
for (r, o) in local_var["_remove"]:
|
||||
match = True
|
||||
@@ -824,46 +746,21 @@ class DataSmart(MutableMapping):
|
||||
if not o2 in self.overrides:
|
||||
match = False
|
||||
if match:
|
||||
removes.add(r)
|
||||
|
||||
if value and flag == "_content" and not parsing:
|
||||
if removes and parser:
|
||||
expanded_removes = {}
|
||||
for r in removes:
|
||||
expanded_removes[r] = self.expand(r).split()
|
||||
|
||||
parser.removes = set()
|
||||
val = ""
|
||||
for v in __whitespace_split__.split(parser.value):
|
||||
skip = False
|
||||
for r in removes:
|
||||
if v in expanded_removes[r]:
|
||||
parser.removes.add(r)
|
||||
skip = True
|
||||
if skip:
|
||||
continue
|
||||
val = val + v
|
||||
parser.value = val
|
||||
if expand:
|
||||
value = parser.value
|
||||
|
||||
if parser:
|
||||
self.expand_cache[cachename] = parser
|
||||
|
||||
if retparser:
|
||||
return value, parser
|
||||
removes.extend(self.expand(r).split())
|
||||
|
||||
if removes:
|
||||
filtered = filter(lambda v: v not in removes,
|
||||
value.split())
|
||||
value = " ".join(filtered)
|
||||
if expand and var in self.expand_cache:
|
||||
# We need to ensure the expand cache has the correct value
|
||||
# flag == "_content" here
|
||||
self.expand_cache[var].value = value
|
||||
return value
|
||||
|
||||
def delVarFlag(self, var, flag, **loginfo):
|
||||
self.expand_cache = {}
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.delVarFlag(var, flag)
|
||||
if not res:
|
||||
return
|
||||
|
||||
local_var, _ = self._findVar(var)
|
||||
local_var = self._findVar(var)
|
||||
if not local_var:
|
||||
return
|
||||
if not var in self.dict:
|
||||
@@ -906,7 +803,7 @@ class DataSmart(MutableMapping):
|
||||
self.dict[var][i] = flags[i]
|
||||
|
||||
def getVarFlags(self, var, expand = False, internalflags=False):
|
||||
local_var, _ = self._findVar(var)
|
||||
local_var = self._findVar(var)
|
||||
flags = {}
|
||||
|
||||
if local_var:
|
||||
@@ -948,7 +845,7 @@ class DataSmart(MutableMapping):
|
||||
data = DataSmart()
|
||||
data.dict["_data"] = self.dict
|
||||
data.varhistory = self.varhistory.copy()
|
||||
data.varhistory.dataroot = data
|
||||
data.varhistory.datasmart = data
|
||||
data.inchistory = self.inchistory.copy()
|
||||
|
||||
data._tracking = self._tracking
|
||||
@@ -979,7 +876,7 @@ class DataSmart(MutableMapping):
|
||||
|
||||
def localkeys(self):
|
||||
for key in self.dict:
|
||||
if key not in ['_data', '_remote_data']:
|
||||
if key != '_data':
|
||||
yield key
|
||||
|
||||
def __iter__(self):
|
||||
@@ -988,7 +885,7 @@ class DataSmart(MutableMapping):
|
||||
def keylist(d):
|
||||
klist = set()
|
||||
for key in d:
|
||||
if key in ["_data", "_remote_data"]:
|
||||
if key == "_data":
|
||||
continue
|
||||
if key in deleted:
|
||||
continue
|
||||
@@ -1002,13 +899,6 @@ class DataSmart(MutableMapping):
|
||||
if "_data" in d:
|
||||
klist |= keylist(d["_data"])
|
||||
|
||||
if "_remote_data" in d:
|
||||
connector = d["_remote_data"]["_content"]
|
||||
for key in connector.getKeys():
|
||||
if key in deleted:
|
||||
continue
|
||||
klist.add(key)
|
||||
|
||||
return klist
|
||||
|
||||
self.need_overrides()
|
||||
@@ -1046,8 +936,9 @@ class DataSmart(MutableMapping):
|
||||
data = {}
|
||||
d = self.createCopy()
|
||||
bb.data.expandKeys(d)
|
||||
bb.data.update_data(d)
|
||||
|
||||
config_whitelist = set((d.getVar("BB_HASHCONFIG_WHITELIST") or "").split())
|
||||
config_whitelist = set((d.getVar("BB_HASHCONFIG_WHITELIST", True) or "").split())
|
||||
keys = set(key for key in iter(d) if not key.startswith("__"))
|
||||
for key in keys:
|
||||
if key in config_whitelist:
|
||||
@@ -1066,6 +957,7 @@ class DataSmart(MutableMapping):
|
||||
|
||||
for key in ["__BBTASKS", "__BBANONFUNCS", "__BBHANDLERS"]:
|
||||
bb_list = d.getVar(key, False) or []
|
||||
bb_list.sort()
|
||||
data.update({key:str(bb_list)})
|
||||
|
||||
if key == "__BBANONFUNCS":
|
||||
@@ -1074,4 +966,4 @@ class DataSmart(MutableMapping):
|
||||
data.update({i:value})
|
||||
|
||||
data_str = str([(k, data[k]) for k in sorted(data.keys())])
|
||||
return hashlib.sha256(data_str.encode("utf-8")).hexdigest()
|
||||
return hashlib.md5(data_str.encode("utf-8")).hexdigest()
|
||||
|
||||
@@ -48,16 +48,6 @@ class Event(object):
|
||||
def __init__(self):
|
||||
self.pid = worker_pid
|
||||
|
||||
|
||||
class HeartbeatEvent(Event):
|
||||
"""Triggered at regular time intervals of 10 seconds. Other events can fire much more often
|
||||
(runQueueTaskStarted when there are many short tasks) or not at all for long periods
|
||||
of time (again runQueueTaskStarted, when there is just one long-running task), so this
|
||||
event is more suitable for doing some task-independent work occassionally."""
|
||||
def __init__(self, time):
|
||||
Event.__init__(self)
|
||||
self.time = time
|
||||
|
||||
Registered = 10
|
||||
AlreadyRegistered = 14
|
||||
|
||||
@@ -141,9 +131,6 @@ def print_ui_queue():
|
||||
logger = logging.getLogger("BitBake")
|
||||
if not _uiready:
|
||||
from bb.msg import BBLogFormatter
|
||||
# Flush any existing buffered content
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
stdout = logging.StreamHandler(sys.stdout)
|
||||
stderr = logging.StreamHandler(sys.stderr)
|
||||
formatter = BBLogFormatter("%(levelname)s: %(message)s")
|
||||
@@ -152,34 +139,23 @@ def print_ui_queue():
|
||||
|
||||
# First check to see if we have any proper messages
|
||||
msgprint = False
|
||||
msgerrs = False
|
||||
|
||||
# Should we print to stderr?
|
||||
for event in ui_queue[:]:
|
||||
if isinstance(event, logging.LogRecord) and event.levelno >= logging.WARNING:
|
||||
msgerrs = True
|
||||
break
|
||||
|
||||
if msgerrs:
|
||||
logger.addHandler(stderr)
|
||||
else:
|
||||
logger.addHandler(stdout)
|
||||
|
||||
for event in ui_queue[:]:
|
||||
if isinstance(event, logging.LogRecord):
|
||||
if event.levelno > logging.DEBUG:
|
||||
if event.levelno >= logging.WARNING:
|
||||
logger.addHandler(stderr)
|
||||
else:
|
||||
logger.addHandler(stdout)
|
||||
logger.handle(event)
|
||||
msgprint = True
|
||||
if msgprint:
|
||||
return
|
||||
|
||||
# Nope, so just print all of the messages we have (including debug messages)
|
||||
if not msgprint:
|
||||
for event in ui_queue[:]:
|
||||
if isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
if msgerrs:
|
||||
logger.removeHandler(stderr)
|
||||
else:
|
||||
logger.removeHandler(stdout)
|
||||
logger.addHandler(stdout)
|
||||
for event in ui_queue[:]:
|
||||
if isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
|
||||
def fire_ui_handlers(event, d):
|
||||
global _thread_lock
|
||||
@@ -226,12 +202,6 @@ def fire(event, d):
|
||||
if worker_fire:
|
||||
worker_fire(event, d)
|
||||
else:
|
||||
# If messages have been queued up, clear the queue
|
||||
global _uiready, ui_queue
|
||||
if _uiready and ui_queue:
|
||||
for queue_event in ui_queue:
|
||||
fire_ui_handlers(queue_event, d)
|
||||
ui_queue = []
|
||||
fire_ui_handlers(event, d)
|
||||
|
||||
def fire_from_worker(event, d):
|
||||
@@ -284,11 +254,6 @@ def register(name, handler, mask=None, filename=None, lineno=None):
|
||||
def remove(name, handler):
|
||||
"""Remove an Event handler"""
|
||||
_handlers.pop(name)
|
||||
if name in _catchall_handlers:
|
||||
_catchall_handlers.pop(name)
|
||||
for event in _event_handler_map.keys():
|
||||
if name in _event_handler_map[event]:
|
||||
_event_handler_map[event].pop(name)
|
||||
|
||||
def get_handlers():
|
||||
return _handlers
|
||||
@@ -302,28 +267,20 @@ def set_eventfilter(func):
|
||||
_eventfilter = func
|
||||
|
||||
def register_UIHhandler(handler, mainui=False):
|
||||
if mainui:
|
||||
global _uiready
|
||||
_uiready = True
|
||||
bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1
|
||||
_ui_handlers[_ui_handler_seq] = handler
|
||||
level, debug_domains = bb.msg.constructLogOptions()
|
||||
_ui_logfilters[_ui_handler_seq] = UIEventFilter(level, debug_domains)
|
||||
if mainui:
|
||||
global _uiready
|
||||
_uiready = _ui_handler_seq
|
||||
return _ui_handler_seq
|
||||
|
||||
def unregister_UIHhandler(handlerNum, mainui=False):
|
||||
if mainui:
|
||||
global _uiready
|
||||
_uiready = False
|
||||
def unregister_UIHhandler(handlerNum):
|
||||
if handlerNum in _ui_handlers:
|
||||
del _ui_handlers[handlerNum]
|
||||
return
|
||||
|
||||
def get_uihandler():
|
||||
if _uiready is False:
|
||||
return None
|
||||
return _uiready
|
||||
|
||||
# Class to allow filtering of events and specific filtering of LogRecords *before* we put them over the IPC
|
||||
class UIEventFilter(object):
|
||||
def __init__(self, level, debug_domains):
|
||||
@@ -386,30 +343,13 @@ class OperationProgress(Event):
|
||||
class ConfigParsed(Event):
|
||||
"""Configuration Parsing Complete"""
|
||||
|
||||
class MultiConfigParsed(Event):
|
||||
"""Multi-Config Parsing Complete"""
|
||||
def __init__(self, mcdata):
|
||||
self.mcdata = mcdata
|
||||
Event.__init__(self)
|
||||
|
||||
class RecipeEvent(Event):
|
||||
def __init__(self, fn):
|
||||
self.fn = fn
|
||||
Event.__init__(self)
|
||||
|
||||
class RecipePreFinalise(RecipeEvent):
|
||||
""" Recipe Parsing Complete but not yet finalised"""
|
||||
|
||||
class RecipeTaskPreProcess(RecipeEvent):
|
||||
"""
|
||||
Recipe Tasks about to be finalised
|
||||
The list of tasks should be final at this point and handlers
|
||||
are only able to change interdependencies
|
||||
"""
|
||||
def __init__(self, fn, tasklist):
|
||||
self.fn = fn
|
||||
self.tasklist = tasklist
|
||||
Event.__init__(self)
|
||||
""" Recipe Parsing Complete but not yet finialised"""
|
||||
|
||||
class RecipeParsed(RecipeEvent):
|
||||
""" Recipe Parsing Complete """
|
||||
@@ -432,7 +372,7 @@ class StampUpdate(Event):
|
||||
targets = property(getTargets)
|
||||
|
||||
class BuildBase(Event):
|
||||
"""Base class for bitbake build events"""
|
||||
"""Base class for bbmake run events"""
|
||||
|
||||
def __init__(self, n, p, failures = 0):
|
||||
self._name = n
|
||||
@@ -452,6 +392,12 @@ class BuildBase(Event):
|
||||
def setName(self, name):
|
||||
self._name = name
|
||||
|
||||
def getCfg(self):
|
||||
return self.data
|
||||
|
||||
def setCfg(self, cfg):
|
||||
self.data = cfg
|
||||
|
||||
def getFailures(self):
|
||||
"""
|
||||
Return the number of failed packages
|
||||
@@ -460,6 +406,9 @@ class BuildBase(Event):
|
||||
|
||||
pkgs = property(getPkgs, setPkgs, None, "pkgs property")
|
||||
name = property(getName, setName, None, "name property")
|
||||
cfg = property(getCfg, setCfg, None, "cfg property")
|
||||
|
||||
|
||||
|
||||
class BuildInit(BuildBase):
|
||||
"""buildFile or buildTargets was invoked"""
|
||||
@@ -468,13 +417,13 @@ class BuildInit(BuildBase):
|
||||
BuildBase.__init__(self, name, p)
|
||||
|
||||
class BuildStarted(BuildBase, OperationStarted):
|
||||
"""Event when builds start"""
|
||||
"""bbmake build run started"""
|
||||
def __init__(self, n, p, failures = 0):
|
||||
OperationStarted.__init__(self, "Building Started")
|
||||
BuildBase.__init__(self, n, p, failures)
|
||||
|
||||
class BuildCompleted(BuildBase, OperationCompleted):
|
||||
"""Event when builds have completed"""
|
||||
"""bbmake build run completed"""
|
||||
def __init__(self, total, n, p, failures=0, interrupted=0):
|
||||
if not failures:
|
||||
OperationCompleted.__init__(self, total, "Building Succeeded")
|
||||
@@ -492,23 +441,6 @@ class DiskFull(Event):
|
||||
self._free = freespace
|
||||
self._mountpoint = mountpoint
|
||||
|
||||
class DiskUsageSample:
|
||||
def __init__(self, available_bytes, free_bytes, total_bytes):
|
||||
# Number of bytes available to non-root processes.
|
||||
self.available_bytes = available_bytes
|
||||
# Number of bytes available to root processes.
|
||||
self.free_bytes = free_bytes
|
||||
# Total capacity of the volume.
|
||||
self.total_bytes = total_bytes
|
||||
|
||||
class MonitorDiskEvent(Event):
|
||||
"""If BB_DISKMON_DIRS is set, then this event gets triggered each time disk space is checked.
|
||||
Provides information about devices that are getting monitored."""
|
||||
def __init__(self, disk_usage):
|
||||
Event.__init__(self)
|
||||
# hash of device root path -> DiskUsageSample
|
||||
self.disk_usage = disk_usage
|
||||
|
||||
class NoProvider(Event):
|
||||
"""No Provider for an Event"""
|
||||
|
||||
@@ -526,28 +458,6 @@ class NoProvider(Event):
|
||||
def isRuntime(self):
|
||||
return self._runtime
|
||||
|
||||
def __str__(self):
|
||||
msg = ''
|
||||
if self._runtime:
|
||||
r = "R"
|
||||
else:
|
||||
r = ""
|
||||
|
||||
extra = ''
|
||||
if not self._reasons:
|
||||
if self._close_matches:
|
||||
extra = ". Close matches:\n %s" % '\n '.join(self._close_matches)
|
||||
|
||||
if self._dependees:
|
||||
msg = "Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)%s" % (r, self._item, ", ".join(self._dependees), r, extra)
|
||||
else:
|
||||
msg = "Nothing %sPROVIDES '%s'%s" % (r, self._item, extra)
|
||||
if self._reasons:
|
||||
for reason in self._reasons:
|
||||
msg += '\n' + reason
|
||||
return msg
|
||||
|
||||
|
||||
class MultipleProviders(Event):
|
||||
"""Multiple Providers"""
|
||||
|
||||
@@ -575,16 +485,6 @@ class MultipleProviders(Event):
|
||||
"""
|
||||
return self._candidates
|
||||
|
||||
def __str__(self):
|
||||
msg = "Multiple providers are available for %s%s (%s)" % (self._is_runtime and "runtime " or "",
|
||||
self._item,
|
||||
", ".join(self._candidates))
|
||||
rtime = ""
|
||||
if self._is_runtime:
|
||||
rtime = "R"
|
||||
msg += "\nConsider defining a PREFERRED_%sPROVIDER entry to match %s" % (rtime, self._item)
|
||||
return msg
|
||||
|
||||
class ParseStarted(OperationStarted):
|
||||
"""Recipe parsing for the runqueue has begun"""
|
||||
def __init__(self, total):
|
||||
@@ -678,6 +578,14 @@ class FilesMatchingFound(Event):
|
||||
self._pattern = pattern
|
||||
self._matches = matches
|
||||
|
||||
class CoreBaseFilesFound(Event):
|
||||
"""
|
||||
Event when a list of appropriate config files has been generated
|
||||
"""
|
||||
def __init__(self, paths):
|
||||
Event.__init__(self)
|
||||
self._paths = paths
|
||||
|
||||
class ConfigFilesFound(Event):
|
||||
"""
|
||||
Event when a list of appropriate config files has been generated
|
||||
@@ -748,6 +656,19 @@ class LogHandler(logging.Handler):
|
||||
record.taskpid = worker_pid
|
||||
return True
|
||||
|
||||
class RequestPackageInfo(Event):
|
||||
"""
|
||||
Event to request package information
|
||||
"""
|
||||
|
||||
class PackageInfo(Event):
|
||||
"""
|
||||
Package information for GUI
|
||||
"""
|
||||
def __init__(self, pkginfolist):
|
||||
Event.__init__(self)
|
||||
self._pkginfolist = pkginfolist
|
||||
|
||||
class MetadataEvent(Event):
|
||||
"""
|
||||
Generic event that target for OE-Core classes
|
||||
@@ -825,10 +746,3 @@ class NetworkTestFailed(Event):
|
||||
Event to indicate network test has failed
|
||||
"""
|
||||
|
||||
class FindSigInfoResult(Event):
|
||||
"""
|
||||
Event to return results from findSigInfo command
|
||||
"""
|
||||
def __init__(self, result):
|
||||
Event.__init__(self)
|
||||
self.result = result
|
||||
|
||||
@@ -35,11 +35,10 @@ import operator
|
||||
import collections
|
||||
import subprocess
|
||||
import pickle
|
||||
import errno
|
||||
import bb.persist_data, bb.utils
|
||||
import bb.checksum
|
||||
from bb import data
|
||||
import bb.process
|
||||
import bb.event
|
||||
|
||||
__version__ = "2"
|
||||
_checksum_cache = bb.checksum.FileChecksumCache()
|
||||
@@ -49,11 +48,11 @@ logger = logging.getLogger("BitBake.Fetcher")
|
||||
class BBFetchException(Exception):
|
||||
"""Class all fetch exceptions inherit from"""
|
||||
def __init__(self, message):
|
||||
self.msg = message
|
||||
Exception.__init__(self, message)
|
||||
self.msg = message
|
||||
Exception.__init__(self, message)
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
return self.msg
|
||||
|
||||
class UntrustedUrl(BBFetchException):
|
||||
"""Exception raised when encountering a host not listed in BB_ALLOWED_NETWORKS"""
|
||||
@@ -69,24 +68,24 @@ class UntrustedUrl(BBFetchException):
|
||||
class MalformedUrl(BBFetchException):
|
||||
"""Exception raised when encountering an invalid url"""
|
||||
def __init__(self, url, message=''):
|
||||
if message:
|
||||
msg = message
|
||||
else:
|
||||
msg = "The URL: '%s' is invalid and cannot be interpreted" % url
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (url,)
|
||||
if message:
|
||||
msg = message
|
||||
else:
|
||||
msg = "The URL: '%s' is invalid and cannot be interpreted" % url
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (url,)
|
||||
|
||||
class FetchError(BBFetchException):
|
||||
"""General fetcher exception when something happens incorrectly"""
|
||||
def __init__(self, message, url = None):
|
||||
if url:
|
||||
if url:
|
||||
msg = "Fetcher failure for URL: '%s'. %s" % (url, message)
|
||||
else:
|
||||
else:
|
||||
msg = "Fetcher failure: %s" % message
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (message, url)
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (message, url)
|
||||
|
||||
class ChecksumError(FetchError):
|
||||
"""Exception when mismatched checksum encountered"""
|
||||
@@ -100,56 +99,49 @@ class NoChecksumError(FetchError):
|
||||
class UnpackError(BBFetchException):
|
||||
"""General fetcher exception when something happens incorrectly when unpacking"""
|
||||
def __init__(self, message, url):
|
||||
msg = "Unpack failure for URL: '%s'. %s" % (url, message)
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (message, url)
|
||||
msg = "Unpack failure for URL: '%s'. %s" % (url, message)
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (message, url)
|
||||
|
||||
class NoMethodError(BBFetchException):
|
||||
"""Exception raised when there is no method to obtain a supplied url or set of urls"""
|
||||
def __init__(self, url):
|
||||
msg = "Could not find a fetcher which supports the URL: '%s'" % url
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (url,)
|
||||
msg = "Could not find a fetcher which supports the URL: '%s'" % url
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (url,)
|
||||
|
||||
class MissingParameterError(BBFetchException):
|
||||
"""Exception raised when a fetch method is missing a critical parameter in the url"""
|
||||
def __init__(self, missing, url):
|
||||
msg = "URL: '%s' is missing the required parameter '%s'" % (url, missing)
|
||||
self.url = url
|
||||
self.missing = missing
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (missing, url)
|
||||
msg = "URL: '%s' is missing the required parameter '%s'" % (url, missing)
|
||||
self.url = url
|
||||
self.missing = missing
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (missing, url)
|
||||
|
||||
class ParameterError(BBFetchException):
|
||||
"""Exception raised when a url cannot be proccessed due to invalid parameters."""
|
||||
def __init__(self, message, url):
|
||||
msg = "URL: '%s' has invalid parameters. %s" % (url, message)
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (message, url)
|
||||
msg = "URL: '%s' has invalid parameters. %s" % (url, message)
|
||||
self.url = url
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (message, url)
|
||||
|
||||
class NetworkAccess(BBFetchException):
|
||||
"""Exception raised when network access is disabled but it is required."""
|
||||
def __init__(self, url, cmd):
|
||||
msg = "Network access disabled through BB_NO_NETWORK (or set indirectly due to use of BB_FETCH_PREMIRRORONLY) but access requested with command %s (for url %s)" % (cmd, url)
|
||||
self.url = url
|
||||
self.cmd = cmd
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (url, cmd)
|
||||
msg = "Network access disabled through BB_NO_NETWORK (or set indirectly due to use of BB_FETCH_PREMIRRORONLY) but access requested with command %s (for url %s)" % (cmd, url)
|
||||
self.url = url
|
||||
self.cmd = cmd
|
||||
BBFetchException.__init__(self, msg)
|
||||
self.args = (url, cmd)
|
||||
|
||||
class NonLocalMethod(Exception):
|
||||
def __init__(self):
|
||||
Exception.__init__(self)
|
||||
|
||||
class MissingChecksumEvent(bb.event.Event):
|
||||
def __init__(self, url, md5sum, sha256sum):
|
||||
self.url = url
|
||||
self.checksums = {'md5sum': md5sum,
|
||||
'sha256sum': sha256sum}
|
||||
bb.event.Event.__init__(self)
|
||||
|
||||
|
||||
class URI(object):
|
||||
"""
|
||||
@@ -256,7 +248,7 @@ class URI(object):
|
||||
|
||||
# Identify if the URI is relative or not
|
||||
if urlp.scheme in self._relative_schemes and \
|
||||
re.compile(r"^\w+:(?!//)").match(uri):
|
||||
re.compile("^\w+:(?!//)").match(uri):
|
||||
self.relative = True
|
||||
|
||||
if not self.relative:
|
||||
@@ -363,7 +355,7 @@ def decodeurl(url):
|
||||
user, password, parameters).
|
||||
"""
|
||||
|
||||
m = re.compile('(?P<type>[^:]*)://((?P<user>[^/;]+)@)?(?P<location>[^;]+)(;(?P<parm>.*))?').match(url)
|
||||
m = re.compile('(?P<type>[^:]*)://((?P<user>[^/]+)@)?(?P<location>[^;]+)(;(?P<parm>.*))?').match(url)
|
||||
if not m:
|
||||
raise MalformedUrl(url)
|
||||
|
||||
@@ -383,7 +375,7 @@ def decodeurl(url):
|
||||
path = location
|
||||
else:
|
||||
host = location
|
||||
path = "/"
|
||||
path = ""
|
||||
if user:
|
||||
m = re.compile('(?P<user>[^:]+)(:?(?P<pswd>.*))').match(user)
|
||||
if m:
|
||||
@@ -411,6 +403,8 @@ def encodeurl(decoded):
|
||||
|
||||
type, host, path, user, pswd, p = decoded
|
||||
|
||||
if not path:
|
||||
raise MissingParameterError('path', "encoded from the data %s" % str(decoded))
|
||||
if not type:
|
||||
raise MissingParameterError('type', "encoded from the data %s" % str(decoded))
|
||||
url = '%s://' % type
|
||||
@@ -421,18 +415,17 @@ def encodeurl(decoded):
|
||||
url += "@"
|
||||
if host and type != "file":
|
||||
url += "%s" % host
|
||||
if path:
|
||||
# Standardise path to ensure comparisons work
|
||||
while '//' in path:
|
||||
path = path.replace("//", "/")
|
||||
url += "%s" % urllib.parse.quote(path)
|
||||
# Standardise path to ensure comparisons work
|
||||
while '//' in path:
|
||||
path = path.replace("//", "/")
|
||||
url += "%s" % urllib.parse.quote(path)
|
||||
if p:
|
||||
for parm in p:
|
||||
url += ";%s=%s" % (parm, p[parm])
|
||||
|
||||
return url
|
||||
|
||||
def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
def uri_replace(ud, uri_find, uri_replace, replacements, d):
|
||||
if not ud.url or not uri_find or not uri_replace:
|
||||
logger.error("uri_replace: passed an undefined value, not replacing")
|
||||
return None
|
||||
@@ -452,8 +445,8 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
# Handle URL parameters
|
||||
if i:
|
||||
# Any specified URL parameters must match
|
||||
for k in uri_find_decoded[loc]:
|
||||
if uri_decoded[loc][k] != uri_find_decoded[loc][k]:
|
||||
for k in uri_replace_decoded[loc]:
|
||||
if uri_decoded[loc][k] != uri_replace_decoded[loc][k]:
|
||||
return None
|
||||
# Overwrite any specified replacement parameters
|
||||
for k in uri_replace_decoded[loc]:
|
||||
@@ -462,7 +455,7 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
result_decoded[loc][k] = uri_replace_decoded[loc][k]
|
||||
elif (re.match(regexp, uri_decoded[loc])):
|
||||
if not uri_replace_decoded[loc]:
|
||||
result_decoded[loc] = ""
|
||||
result_decoded[loc] = ""
|
||||
else:
|
||||
for k in replacements:
|
||||
uri_replace_decoded[loc] = uri_replace_decoded[loc].replace(k, replacements[k])
|
||||
@@ -471,9 +464,9 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
if loc == 2:
|
||||
# Handle path manipulations
|
||||
basename = None
|
||||
if uri_decoded[0] != uri_replace_decoded[0] and mirrortarball:
|
||||
if uri_decoded[0] != uri_replace_decoded[0] and ud.mirrortarball:
|
||||
# If the source and destination url types differ, must be a mirrortarball mapping
|
||||
basename = os.path.basename(mirrortarball)
|
||||
basename = os.path.basename(ud.mirrortarball)
|
||||
# Kill parameters, they make no sense for mirror tarballs
|
||||
uri_decoded[5] = {}
|
||||
elif ud.localpath and ud.method.supports_checksum(ud):
|
||||
@@ -498,7 +491,7 @@ def fetcher_init(d):
|
||||
Calls before this must not hit the cache.
|
||||
"""
|
||||
# When to drop SCM head revisions controlled by user policy
|
||||
srcrev_policy = d.getVar('BB_SRCREV_POLICY') or "clear"
|
||||
srcrev_policy = d.getVar('BB_SRCREV_POLICY', True) or "clear"
|
||||
if srcrev_policy == "cache":
|
||||
logger.debug(1, "Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
elif srcrev_policy == "clear":
|
||||
@@ -524,7 +517,7 @@ def fetcher_parse_save():
|
||||
def fetcher_parse_done():
|
||||
_checksum_cache.save_merge()
|
||||
|
||||
def fetcher_compare_revisions(d):
|
||||
def fetcher_compare_revisions():
|
||||
"""
|
||||
Compare the revisions in the persistant cache with current values and
|
||||
return true/false on whether they've changed.
|
||||
@@ -544,11 +537,7 @@ def fetcher_compare_revisions(d):
|
||||
return False
|
||||
|
||||
def mirror_from_string(data):
|
||||
mirrors = (data or "").replace('\\n',' ').split()
|
||||
# Split into pairs
|
||||
if len(mirrors) % 2 != 0:
|
||||
bb.warn('Invalid mirror data %s, should have paired members.' % data)
|
||||
return list(zip(*[iter(mirrors)]*2))
|
||||
return [ i.split() for i in (data or "").replace('\\n','\n').split('\n') if i ]
|
||||
|
||||
def verify_checksum(ud, d, precomputed={}):
|
||||
"""
|
||||
@@ -583,7 +572,7 @@ def verify_checksum(ud, d, precomputed={}):
|
||||
|
||||
if ud.method.recommends_checksum(ud) and not ud.md5_expected and not ud.sha256_expected:
|
||||
# If strict checking enabled and neither sum defined, raise error
|
||||
strict = d.getVar("BB_STRICT_CHECKSUM") or "0"
|
||||
strict = d.getVar("BB_STRICT_CHECKSUM", True) or "0"
|
||||
if strict == "1":
|
||||
logger.error('No checksum specified for %s, please add at least one to the recipe:\n'
|
||||
'SRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"' %
|
||||
@@ -591,14 +580,6 @@ def verify_checksum(ud, d, precomputed={}):
|
||||
ud.sha256_name, sha256data))
|
||||
raise NoChecksumError('Missing SRC_URI checksum', ud.url)
|
||||
|
||||
bb.event.fire(MissingChecksumEvent(ud.url, md5data, sha256data), d)
|
||||
|
||||
if strict == "ignore":
|
||||
return {
|
||||
_MD5_KEY: md5data,
|
||||
_SHA256_KEY: sha256data
|
||||
}
|
||||
|
||||
# Log missing sums so user can more easily add them
|
||||
logger.warning('Missing md5 SRC_URI checksum for %s, consider adding to the recipe:\n'
|
||||
'SRC_URI[%s] = "%s"',
|
||||
@@ -640,28 +621,29 @@ def verify_donestamp(ud, d, origud=None):
|
||||
Returns True, if the donestamp exists and is valid, False otherwise. When
|
||||
returning False, any existing done stamps are removed.
|
||||
"""
|
||||
if not ud.needdonestamp or (origud and not origud.needdonestamp):
|
||||
if not ud.needdonestamp:
|
||||
return True
|
||||
|
||||
if not os.path.exists(ud.localpath):
|
||||
# local path does not exist
|
||||
if os.path.exists(ud.donestamp):
|
||||
# done stamp exists, but the downloaded file does not; the done stamp
|
||||
# must be incorrect, re-trigger the download
|
||||
bb.utils.remove(ud.donestamp)
|
||||
if not os.path.exists(ud.donestamp):
|
||||
return False
|
||||
|
||||
if (not ud.method.supports_checksum(ud) or
|
||||
(origud and not origud.method.supports_checksum(origud))):
|
||||
# if done stamp exists and checksums not supported; assume the local
|
||||
# file is current
|
||||
return os.path.exists(ud.donestamp)
|
||||
# done stamp exists, checksums not supported; assume the local file is
|
||||
# current
|
||||
return True
|
||||
|
||||
if not os.path.exists(ud.localpath):
|
||||
# done stamp exists, but the downloaded file does not; the done stamp
|
||||
# must be incorrect, re-trigger the download
|
||||
bb.utils.remove(ud.donestamp)
|
||||
return False
|
||||
|
||||
precomputed_checksums = {}
|
||||
# Only re-use the precomputed checksums if the donestamp is newer than the
|
||||
# file. Do not rely on the mtime of directories, though. If ud.localpath is
|
||||
# a directory, there will probably not be any checksums anyway.
|
||||
if os.path.exists(ud.donestamp) and (os.path.isdir(ud.localpath) or
|
||||
if (os.path.isdir(ud.localpath) or
|
||||
os.path.getmtime(ud.localpath) < os.path.getmtime(ud.donestamp)):
|
||||
try:
|
||||
with open(ud.donestamp, "rb") as cachefile:
|
||||
@@ -736,18 +718,18 @@ def subprocess_setup():
|
||||
|
||||
def get_autorev(d):
|
||||
# only not cache src rev in autorev case
|
||||
if d.getVar('BB_SRCREV_POLICY') != "cache":
|
||||
if d.getVar('BB_SRCREV_POLICY', True) != "cache":
|
||||
d.setVar('BB_DONT_CACHE', '1')
|
||||
return "AUTOINC"
|
||||
|
||||
def get_srcrev(d, method_name='sortable_revision'):
|
||||
"""
|
||||
Return the revision string, usually for use in the version string (PV) of the current package
|
||||
Return the revsion string, usually for use in the version string (PV) of the current package
|
||||
Most packages usually only have one SCM so we just pass on the call.
|
||||
In the multi SCM case, we build a value based on SRCREV_FORMAT which must
|
||||
have been set.
|
||||
|
||||
The idea here is that we put the string "AUTOINC+" into return value if the revisions are not
|
||||
The idea here is that we put the string "AUTOINC+" into return value if the revisions are not
|
||||
incremental, other code is then responsible for turning that into an increasing value (if needed)
|
||||
|
||||
A method_name can be supplied to retrieve an alternatively formatted revision from a fetcher, if
|
||||
@@ -755,7 +737,7 @@ def get_srcrev(d, method_name='sortable_revision'):
|
||||
"""
|
||||
|
||||
scms = []
|
||||
fetcher = Fetch(d.getVar('SRC_URI').split(), d)
|
||||
fetcher = Fetch(d.getVar('SRC_URI', True).split(), d)
|
||||
urldata = fetcher.ud
|
||||
for u in urldata:
|
||||
if urldata[u].method.supports_srcrev():
|
||||
@@ -775,10 +757,9 @@ def get_srcrev(d, method_name='sortable_revision'):
|
||||
#
|
||||
# Mutiple SCMs are in SRC_URI so we resort to SRCREV_FORMAT
|
||||
#
|
||||
format = d.getVar('SRCREV_FORMAT')
|
||||
format = d.getVar('SRCREV_FORMAT', True)
|
||||
if not format:
|
||||
raise FetchError("The SRCREV_FORMAT variable must be set when multiple SCMs are used.\n"\
|
||||
"The SCMs are:\n%s" % '\n'.join(scms))
|
||||
raise FetchError("The SRCREV_FORMAT variable must be set when multiple SCMs are used.")
|
||||
|
||||
name_to_rev = {}
|
||||
seenautoinc = False
|
||||
@@ -800,7 +781,7 @@ def get_srcrev(d, method_name='sortable_revision'):
|
||||
format = re.sub(name_to_rev_re, lambda match: name_to_rev[match.group(0)], format)
|
||||
|
||||
if seenautoinc:
|
||||
format = "AUTOINC+" + format
|
||||
format = "AUTOINC+" + format
|
||||
|
||||
return format
|
||||
|
||||
@@ -828,7 +809,6 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
'NO_PROXY', 'no_proxy',
|
||||
'ALL_PROXY', 'all_proxy',
|
||||
'GIT_PROXY_COMMAND',
|
||||
'GIT_SSH',
|
||||
'GIT_SSL_CAINFO',
|
||||
'GIT_SMART_HTTP',
|
||||
'SSH_AUTH_SOCK', 'SSH_AGENT_PID',
|
||||
@@ -839,30 +819,13 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
if not cleanup:
|
||||
cleanup = []
|
||||
|
||||
# If PATH contains WORKDIR which contains PV-PR which contains SRCPV we
|
||||
# can end up in circular recursion here so give the option of breaking it
|
||||
# in a data store copy.
|
||||
try:
|
||||
d.getVar("PV")
|
||||
d.getVar("PR")
|
||||
except bb.data_smart.ExpansionError:
|
||||
d = bb.data.createCopy(d)
|
||||
d.setVar("PV", "fetcheravoidrecurse")
|
||||
d.setVar("PR", "fetcheravoidrecurse")
|
||||
|
||||
origenv = d.getVar("BB_ORIGENV", False)
|
||||
for var in exportvars:
|
||||
val = d.getVar(var) or (origenv and origenv.getVar(var))
|
||||
val = d.getVar(var, True) or (origenv and origenv.getVar(var, True))
|
||||
if val:
|
||||
cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd)
|
||||
|
||||
# Disable pseudo as it may affect ssh, potentially causing it to hang.
|
||||
cmd = 'export PSEUDO_DISABLED=1; ' + cmd
|
||||
|
||||
if workdir:
|
||||
logger.debug(1, "Running '%s' in %s" % (cmd, workdir))
|
||||
else:
|
||||
logger.debug(1, "Running %s", cmd)
|
||||
logger.debug(1, "Running %s", cmd)
|
||||
|
||||
success = False
|
||||
error_message = ""
|
||||
@@ -893,15 +856,12 @@ def runfetchcmd(cmd, d, quiet=False, cleanup=None, log=None, workdir=None):
|
||||
|
||||
return output
|
||||
|
||||
def check_network_access(d, info, url):
|
||||
def check_network_access(d, info = "", url = None):
|
||||
"""
|
||||
log remote network access, and error if BB_NO_NETWORK is set or the given
|
||||
URI is untrusted
|
||||
log remote network access, and error if BB_NO_NETWORK is set
|
||||
"""
|
||||
if bb.utils.to_boolean(d.getVar("BB_NO_NETWORK")):
|
||||
if d.getVar("BB_NO_NETWORK", True) == "1":
|
||||
raise NetworkAccess(url, info)
|
||||
elif not trusted_network(d, url):
|
||||
raise UntrustedUrl(url, info)
|
||||
else:
|
||||
logger.debug(1, "Fetcher accessed the network with the command %s" % info)
|
||||
|
||||
@@ -916,47 +876,45 @@ def build_mirroruris(origud, mirrors, ld):
|
||||
replacements["BASENAME"] = origud.path.split("/")[-1]
|
||||
replacements["MIRRORNAME"] = origud.host.replace(':','.') + origud.path.replace('/', '.').replace('*', '.')
|
||||
|
||||
def adduri(ud, uris, uds, mirrors, tarballs):
|
||||
def adduri(ud, uris, uds, mirrors):
|
||||
for line in mirrors:
|
||||
try:
|
||||
(find, replace) = line
|
||||
except ValueError:
|
||||
continue
|
||||
newuri = uri_replace(ud, find, replace, replacements, ld)
|
||||
if not newuri or newuri in uris or newuri == origud.url:
|
||||
continue
|
||||
|
||||
for tarball in tarballs:
|
||||
newuri = uri_replace(ud, find, replace, replacements, ld, tarball)
|
||||
if not newuri or newuri in uris or newuri == origud.url:
|
||||
continue
|
||||
if not trusted_network(ld, newuri):
|
||||
logger.debug(1, "Mirror %s not in the list of trusted networks, skipping" % (newuri))
|
||||
continue
|
||||
|
||||
if not trusted_network(ld, newuri):
|
||||
logger.debug(1, "Mirror %s not in the list of trusted networks, skipping" % (newuri))
|
||||
continue
|
||||
|
||||
# Create a local copy of the mirrors minus the current line
|
||||
# this will prevent us from recursively processing the same line
|
||||
# as well as indirect recursion A -> B -> C -> A
|
||||
localmirrors = list(mirrors)
|
||||
localmirrors.remove(line)
|
||||
# Create a local copy of the mirrors minus the current line
|
||||
# this will prevent us from recursively processing the same line
|
||||
# as well as indirect recursion A -> B -> C -> A
|
||||
localmirrors = list(mirrors)
|
||||
localmirrors.remove(line)
|
||||
|
||||
try:
|
||||
newud = FetchData(newuri, ld)
|
||||
newud.setup_localpath(ld)
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url))
|
||||
logger.debug(1, str(e))
|
||||
try:
|
||||
newud = FetchData(newuri, ld)
|
||||
newud.setup_localpath(ld)
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url))
|
||||
logger.debug(1, str(e))
|
||||
try:
|
||||
# setup_localpath of file:// urls may fail, we should still see
|
||||
# if mirrors of the url exist
|
||||
adduri(newud, uris, uds, localmirrors, tarballs)
|
||||
except UnboundLocalError:
|
||||
pass
|
||||
continue
|
||||
uris.append(newuri)
|
||||
uds.append(newud)
|
||||
# setup_localpath of file:// urls may fail, we should still see
|
||||
# if mirrors of the url exist
|
||||
adduri(newud, uris, uds, localmirrors)
|
||||
except UnboundLocalError:
|
||||
pass
|
||||
continue
|
||||
uris.append(newuri)
|
||||
uds.append(newud)
|
||||
|
||||
adduri(newud, uris, uds, localmirrors, tarballs)
|
||||
adduri(newud, uris, uds, localmirrors)
|
||||
|
||||
adduri(origud, uris, uds, mirrors, origud.mirrortarballs or [None])
|
||||
adduri(origud, uris, uds, mirrors)
|
||||
|
||||
return uris, uds
|
||||
|
||||
@@ -1000,42 +958,34 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
|
||||
|
||||
# We may be obtaining a mirror tarball which needs further processing by the real fetcher
|
||||
# If that tarball is a local file:// we need to provide a symlink to it
|
||||
dldir = ld.getVar("DL_DIR")
|
||||
|
||||
if origud.mirrortarballs and os.path.basename(ud.localpath) in origud.mirrortarballs and os.path.basename(ud.localpath) != os.path.basename(origud.localpath):
|
||||
dldir = ld.getVar("DL_DIR", True)
|
||||
if origud.mirrortarball and os.path.basename(ud.localpath) == os.path.basename(origud.mirrortarball) \
|
||||
and os.path.basename(ud.localpath) != os.path.basename(origud.localpath):
|
||||
# Create donestamp in old format to avoid triggering a re-download
|
||||
if ud.donestamp:
|
||||
bb.utils.mkdirhier(os.path.dirname(ud.donestamp))
|
||||
open(ud.donestamp, 'w').close()
|
||||
dest = os.path.join(dldir, os.path.basename(ud.localpath))
|
||||
if not os.path.exists(dest):
|
||||
# In case this is executing without any file locks held (as is
|
||||
# the case for file:// URLs), two tasks may end up here at the
|
||||
# same time, in which case we do not want the second task to
|
||||
# fail when the link has already been created by the first task.
|
||||
try:
|
||||
os.symlink(ud.localpath, dest)
|
||||
except FileExistsError:
|
||||
pass
|
||||
os.symlink(ud.localpath, dest)
|
||||
if not verify_donestamp(origud, ld) or origud.method.need_update(origud, ld):
|
||||
origud.method.download(origud, ld)
|
||||
if hasattr(origud.method, "build_mirror_data"):
|
||||
if hasattr(origud.method,"build_mirror_data"):
|
||||
origud.method.build_mirror_data(origud, ld)
|
||||
return origud.localpath
|
||||
# Otherwise the result is a local file:// and we symlink to it
|
||||
ensure_symlink(ud.localpath, origud.localpath)
|
||||
if not os.path.exists(origud.localpath):
|
||||
if os.path.islink(origud.localpath):
|
||||
# Broken symbolic link
|
||||
os.unlink(origud.localpath)
|
||||
|
||||
os.symlink(ud.localpath, origud.localpath)
|
||||
update_stamp(origud, ld)
|
||||
return ud.localpath
|
||||
|
||||
except bb.fetch2.NetworkAccess:
|
||||
raise
|
||||
|
||||
except IOError as e:
|
||||
if e.errno in [errno.ESTALE]:
|
||||
logger.warning("Stale Error Observed %s." % ud.url)
|
||||
return False
|
||||
raise
|
||||
|
||||
except bb.fetch2.BBFetchException as e:
|
||||
if isinstance(e, ChecksumError):
|
||||
logger.warning("Mirror checksum failure for url %s (original url: %s)\nCleaning and trying again." % (ud.url, origud.url))
|
||||
@@ -1057,22 +1007,6 @@ def try_mirror_url(fetch, origud, ud, ld, check = False):
|
||||
bb.utils.unlockfile(lf)
|
||||
|
||||
|
||||
def ensure_symlink(target, link_name):
|
||||
if not os.path.exists(link_name):
|
||||
if os.path.islink(link_name):
|
||||
# Broken symbolic link
|
||||
os.unlink(link_name)
|
||||
|
||||
# In case this is executing without any file locks held (as is
|
||||
# the case for file:// URLs), two tasks may end up here at the
|
||||
# same time, in which case we do not want the second task to
|
||||
# fail when the link has already been created by the first task.
|
||||
try:
|
||||
os.symlink(target, link_name)
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
|
||||
def try_mirrors(fetch, d, origud, mirrors, check = False):
|
||||
"""
|
||||
Try to use a mirrored version of the sources.
|
||||
@@ -1098,16 +1032,14 @@ def trusted_network(d, url):
|
||||
BB_ALLOWED_NETWORKS is set globally or for a specific recipe.
|
||||
Note: modifies SRC_URI & mirrors.
|
||||
"""
|
||||
if bb.utils.to_boolean(d.getVar("BB_NO_NETWORK")):
|
||||
if d.getVar('BB_NO_NETWORK', True) == "1":
|
||||
return True
|
||||
|
||||
pkgname = d.expand(d.getVar('PN', False))
|
||||
trusted_hosts = None
|
||||
if pkgname:
|
||||
trusted_hosts = d.getVarFlag('BB_ALLOWED_NETWORKS', pkgname, False)
|
||||
trusted_hosts = d.getVarFlag('BB_ALLOWED_NETWORKS', pkgname, False)
|
||||
|
||||
if not trusted_hosts:
|
||||
trusted_hosts = d.getVar('BB_ALLOWED_NETWORKS')
|
||||
trusted_hosts = d.getVar('BB_ALLOWED_NETWORKS', True)
|
||||
|
||||
# Not enabled.
|
||||
if not trusted_hosts:
|
||||
@@ -1139,7 +1071,7 @@ def srcrev_internal_helper(ud, d, name):
|
||||
"""
|
||||
|
||||
srcrev = None
|
||||
pn = d.getVar("PN")
|
||||
pn = d.getVar("PN", True)
|
||||
attempts = []
|
||||
if name != '' and pn:
|
||||
attempts.append("SRCREV_%s_pn-%s" % (name, pn))
|
||||
@@ -1150,7 +1082,7 @@ def srcrev_internal_helper(ud, d, name):
|
||||
attempts.append("SRCREV")
|
||||
|
||||
for a in attempts:
|
||||
srcrev = d.getVar(a)
|
||||
srcrev = d.getVar(a, True)
|
||||
if srcrev and srcrev != "INVALID":
|
||||
break
|
||||
|
||||
@@ -1165,7 +1097,7 @@ def srcrev_internal_helper(ud, d, name):
|
||||
if srcrev == "INVALID" or not srcrev:
|
||||
return parmrev
|
||||
if srcrev != parmrev:
|
||||
raise FetchError("Conflicting revisions (%s from SRCREV and %s from the url) found, please specify one valid value" % (srcrev, parmrev))
|
||||
raise FetchError("Conflicting revisions (%s from SRCREV and %s from the url) found, please spcify one valid value" % (srcrev, parmrev))
|
||||
return parmrev
|
||||
|
||||
if srcrev == "INVALID" or not srcrev:
|
||||
@@ -1183,7 +1115,7 @@ def get_checksum_file_list(d):
|
||||
"""
|
||||
fetch = Fetch([], d, cache = False, localonly = True)
|
||||
|
||||
dl_dir = d.getVar('DL_DIR')
|
||||
dl_dir = d.getVar('DL_DIR', True)
|
||||
filelist = []
|
||||
for u in fetch.urls:
|
||||
ud = fetch.ud[u]
|
||||
@@ -1197,9 +1129,9 @@ def get_checksum_file_list(d):
|
||||
if f.startswith(dl_dir):
|
||||
# The local fetcher's behaviour is to return a path under DL_DIR if it couldn't find the file anywhere else
|
||||
if os.path.exists(f):
|
||||
bb.warn("Getting checksum for %s SRC_URI entry %s: file not found except in DL_DIR" % (d.getVar('PN'), os.path.basename(f)))
|
||||
bb.warn("Getting checksum for %s SRC_URI entry %s: file not found except in DL_DIR" % (d.getVar('PN', True), os.path.basename(f)))
|
||||
else:
|
||||
bb.warn("Unable to get checksum for %s SRC_URI entry %s: file could not be found" % (d.getVar('PN'), os.path.basename(f)))
|
||||
bb.warn("Unable to get checksum for %s SRC_URI entry %s: file could not be found" % (d.getVar('PN', True), os.path.basename(f)))
|
||||
filelist.append(f + ":" + str(os.path.exists(f)))
|
||||
|
||||
return " ".join(filelist)
|
||||
@@ -1225,10 +1157,10 @@ class FetchData(object):
|
||||
self.localfile = ""
|
||||
self.localpath = None
|
||||
self.lockfile = None
|
||||
self.mirrortarballs = []
|
||||
self.mirrortarball = None
|
||||
self.basename = None
|
||||
self.basepath = None
|
||||
(self.type, self.host, self.path, self.user, self.pswd, self.parm) = decodeurl(d.expand(url))
|
||||
(self.type, self.host, self.path, self.user, self.pswd, self.parm) = decodeurl(data.expand(url, d))
|
||||
self.date = self.getSRCDate(d)
|
||||
self.url = url
|
||||
if not self.user and "user" in self.parm:
|
||||
@@ -1245,16 +1177,16 @@ class FetchData(object):
|
||||
self.sha256_name = "sha256sum"
|
||||
if self.md5_name in self.parm:
|
||||
self.md5_expected = self.parm[self.md5_name]
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]:
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp"]:
|
||||
self.md5_expected = None
|
||||
else:
|
||||
self.md5_expected = d.getVarFlag("SRC_URI", self.md5_name)
|
||||
self.md5_expected = d.getVarFlag("SRC_URI", self.md5_name, True)
|
||||
if self.sha256_name in self.parm:
|
||||
self.sha256_expected = self.parm[self.sha256_name]
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3"]:
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp"]:
|
||||
self.sha256_expected = None
|
||||
else:
|
||||
self.sha256_expected = d.getVarFlag("SRC_URI", self.sha256_name)
|
||||
self.sha256_expected = d.getVarFlag("SRC_URI", self.sha256_name, True)
|
||||
self.ignore_checksums = False
|
||||
|
||||
self.names = self.parm.get("name",'default').split(',')
|
||||
@@ -1263,7 +1195,7 @@ class FetchData(object):
|
||||
for m in methods:
|
||||
if m.supports(self, d):
|
||||
self.method = m
|
||||
break
|
||||
break
|
||||
|
||||
if not self.method:
|
||||
raise NoMethodError(url)
|
||||
@@ -1272,7 +1204,7 @@ class FetchData(object):
|
||||
raise NonLocalMethod()
|
||||
|
||||
if self.parm.get("proto", None) and "protocol" not in self.parm:
|
||||
logger.warning('Consider updating %s recipe to use "protocol" not "proto" in SRC_URI.', d.getVar('PN'))
|
||||
logger.warning('Consider updating %s recipe to use "protocol" not "proto" in SRC_URI.', d.getVar('PN', True))
|
||||
self.parm["protocol"] = self.parm.get("proto", None)
|
||||
|
||||
if hasattr(self.method, "urldata_init"):
|
||||
@@ -1285,7 +1217,7 @@ class FetchData(object):
|
||||
elif self.localfile:
|
||||
self.localpath = self.method.localpath(self, d)
|
||||
|
||||
dldir = d.getVar("DL_DIR")
|
||||
dldir = d.getVar("DL_DIR", True)
|
||||
|
||||
if not self.needdonestamp:
|
||||
return
|
||||
@@ -1298,12 +1230,12 @@ class FetchData(object):
|
||||
elif self.basepath or self.basename:
|
||||
basepath = dldir + os.sep + (self.basepath or self.basename)
|
||||
else:
|
||||
bb.fatal("Can't determine lock path for url %s" % url)
|
||||
bb.fatal("Can't determine lock path for url %s" % url)
|
||||
|
||||
self.donestamp = basepath + '.done'
|
||||
self.lockfile = basepath + '.lock'
|
||||
|
||||
def setup_revisions(self, d):
|
||||
def setup_revisons(self, d):
|
||||
self.revisions = {}
|
||||
for name in self.names:
|
||||
self.revisions[name] = srcrev_internal_helper(self, d, name)
|
||||
@@ -1325,12 +1257,12 @@ class FetchData(object):
|
||||
if "srcdate" in self.parm:
|
||||
return self.parm['srcdate']
|
||||
|
||||
pn = d.getVar("PN")
|
||||
pn = d.getVar("PN", True)
|
||||
|
||||
if pn:
|
||||
return d.getVar("SRCDATE_%s" % pn) or d.getVar("SRCDATE") or d.getVar("DATE")
|
||||
return d.getVar("SRCDATE_%s" % pn, True) or d.getVar("SRCDATE", True) or d.getVar("DATE", True)
|
||||
|
||||
return d.getVar("SRCDATE") or d.getVar("DATE")
|
||||
return d.getVar("SRCDATE", True) or d.getVar("DATE", True)
|
||||
|
||||
class FetchMethod(object):
|
||||
"""Base class for 'fetch'ing data"""
|
||||
@@ -1350,7 +1282,7 @@ class FetchMethod(object):
|
||||
Can also setup variables in urldata for use in go (saving code duplication
|
||||
and duplicate code execution)
|
||||
"""
|
||||
return os.path.join(d.getVar("DL_DIR"), urldata.localfile)
|
||||
return os.path.join(data.getVar("DL_DIR", d, True), urldata.localfile)
|
||||
|
||||
def supports_checksum(self, urldata):
|
||||
"""
|
||||
@@ -1361,13 +1293,13 @@ class FetchMethod(object):
|
||||
if os.path.isdir(urldata.localpath) == True:
|
||||
return False
|
||||
if urldata.localpath.find("*") != -1:
|
||||
return False
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def recommends_checksum(self, urldata):
|
||||
"""
|
||||
Is the backend on where checksumming is recommended (should warnings
|
||||
Is the backend on where checksumming is recommended (should warnings
|
||||
be displayed if there is no checksum)?
|
||||
"""
|
||||
return False
|
||||
@@ -1407,7 +1339,7 @@ class FetchMethod(object):
|
||||
Fetch urls
|
||||
Assumes localpath was called first
|
||||
"""
|
||||
raise NoMethodError(urldata.url)
|
||||
raise NoMethodError(url)
|
||||
|
||||
def unpack(self, urldata, rootdir, data):
|
||||
iterate = False
|
||||
@@ -1442,7 +1374,7 @@ class FetchMethod(object):
|
||||
cmd = 'gzip -dc %s > %s' % (file, efile)
|
||||
elif file.endswith('.bz2'):
|
||||
cmd = 'bzip2 -dc %s > %s' % (file, efile)
|
||||
elif file.endswith('.txz') or file.endswith('.tar.xz'):
|
||||
elif file.endswith('.tar.xz'):
|
||||
cmd = 'xz -dc %s | tar x --no-same-owner -f -' % file
|
||||
elif file.endswith('.xz'):
|
||||
cmd = 'xz -dc %s > %s' % (file, efile)
|
||||
@@ -1450,10 +1382,6 @@ class FetchMethod(object):
|
||||
cmd = 'lzip -dc %s | tar x --no-same-owner -f -' % file
|
||||
elif file.endswith('.lz'):
|
||||
cmd = 'lzip -dc %s > %s' % (file, efile)
|
||||
elif file.endswith('.tar.7z'):
|
||||
cmd = '7z x -so %s | tar x --no-same-owner -f -' % file
|
||||
elif file.endswith('.7z'):
|
||||
cmd = '7za x -y %s 1>/dev/null' % file
|
||||
elif file.endswith('.zip') or file.endswith('.jar'):
|
||||
try:
|
||||
dos = bb.utils.to_boolean(urldata.parm.get('dos'), False)
|
||||
@@ -1473,7 +1401,7 @@ class FetchMethod(object):
|
||||
else:
|
||||
cmd = 'rpm2cpio.sh %s | cpio -id' % (file)
|
||||
elif file.endswith('.deb') or file.endswith('.ipk'):
|
||||
output = subprocess.check_output(['ar', '-t', file], preexec_fn=subprocess_setup)
|
||||
output = subprocess.check_output('ar -t %s' % file, preexec_fn=subprocess_setup, shell=True)
|
||||
datafile = None
|
||||
if output:
|
||||
for line in output.decode().splitlines():
|
||||
@@ -1485,6 +1413,10 @@ class FetchMethod(object):
|
||||
else:
|
||||
raise UnpackError("Unable to unpack deb/ipk package - could not list contents", urldata.url)
|
||||
cmd = 'ar x %s %s && tar --no-same-owner -xpf %s && rm %s' % (file, datafile, datafile, datafile)
|
||||
elif file.endswith('.tar.7z'):
|
||||
cmd = '7z x -so %s | tar xf - ' % file
|
||||
elif file.endswith('.7z'):
|
||||
cmd = '7za x -y %s 1>/dev/null' % file
|
||||
|
||||
# If 'subdir' param exists, create a dir and use it as destination for unpack cmd
|
||||
if 'subdir' in urldata.parm:
|
||||
@@ -1518,7 +1450,7 @@ class FetchMethod(object):
|
||||
if not cmd:
|
||||
return
|
||||
|
||||
path = data.getVar('PATH')
|
||||
path = data.getVar('PATH', True)
|
||||
if path:
|
||||
cmd = "PATH=\"%s\" %s" % (path, cmd)
|
||||
bb.note("Unpacking %s to %s/" % (file, unpackdir))
|
||||
@@ -1551,7 +1483,7 @@ class FetchMethod(object):
|
||||
Check the status of a URL
|
||||
Assumes localpath was called first
|
||||
"""
|
||||
logger.info("URL %s could not be checked for status since no method exists.", urldata.url)
|
||||
logger.info("URL %s could not be checked for status since no method exists.", url)
|
||||
return True
|
||||
|
||||
def latest_revision(self, ud, d, name):
|
||||
@@ -1559,7 +1491,7 @@ class FetchMethod(object):
|
||||
Look in the cache for the latest revision, if not present ask the SCM.
|
||||
"""
|
||||
if not hasattr(self, "_latest_revision"):
|
||||
raise ParameterError("The fetcher for this URL does not support _latest_revision", ud.url)
|
||||
raise ParameterError("The fetcher for this URL does not support _latest_revision", url)
|
||||
|
||||
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
|
||||
key = self.generate_revision_key(ud, d, name)
|
||||
@@ -1575,15 +1507,7 @@ class FetchMethod(object):
|
||||
|
||||
def generate_revision_key(self, ud, d, name):
|
||||
key = self._revision_key(ud, d, name)
|
||||
return "%s-%s" % (key, d.getVar("PN") or "")
|
||||
|
||||
def latest_versionstring(self, ud, d):
|
||||
"""
|
||||
Compute the latest release name like "x.y.x" in "x.y.x+gitHASH"
|
||||
by searching through the tags output of ls-remote, comparing
|
||||
versions and returning the highest match as a (version, revision) pair.
|
||||
"""
|
||||
return ('', '')
|
||||
return "%s-%s" % (key, d.getVar("PN", True) or "")
|
||||
|
||||
class Fetch(object):
|
||||
def __init__(self, urls, d, cache = True, localonly = False, connection_cache = None):
|
||||
@@ -1591,14 +1515,14 @@ class Fetch(object):
|
||||
raise Exception("bb.fetch2.Fetch.__init__: cannot set cache and localonly at same time")
|
||||
|
||||
if len(urls) == 0:
|
||||
urls = d.getVar("SRC_URI").split()
|
||||
urls = d.getVar("SRC_URI", True).split()
|
||||
self.urls = urls
|
||||
self.d = d
|
||||
self.ud = {}
|
||||
self.connection_cache = connection_cache
|
||||
|
||||
fn = d.getVar('FILE')
|
||||
mc = d.getVar('__BBMULTICONFIG') or ""
|
||||
fn = d.getVar('FILE', True)
|
||||
mc = d.getVar('__BBMULTICONFIG', True) or ""
|
||||
if cache and fn and mc + fn in urldata_cache:
|
||||
self.ud = urldata_cache[mc + fn]
|
||||
|
||||
@@ -1641,8 +1565,8 @@ class Fetch(object):
|
||||
if not urls:
|
||||
urls = self.urls
|
||||
|
||||
network = self.d.getVar("BB_NO_NETWORK")
|
||||
premirroronly = bb.utils.to_boolean(self.d.getVar("BB_FETCH_PREMIRRORONLY"))
|
||||
network = self.d.getVar("BB_NO_NETWORK", True)
|
||||
premirroronly = (self.d.getVar("BB_FETCH_PREMIRRORONLY", True) == "1")
|
||||
|
||||
for u in urls:
|
||||
ud = self.ud[u]
|
||||
@@ -1655,22 +1579,13 @@ class Fetch(object):
|
||||
|
||||
try:
|
||||
self.d.setVar("BB_NO_NETWORK", network)
|
||||
|
||||
|
||||
if verify_donestamp(ud, self.d) and not m.need_update(ud, self.d):
|
||||
localpath = ud.localpath
|
||||
elif m.try_premirror(ud, self.d):
|
||||
logger.debug(1, "Trying PREMIRRORS")
|
||||
mirrors = mirror_from_string(self.d.getVar('PREMIRRORS'))
|
||||
mirrors = mirror_from_string(self.d.getVar('PREMIRRORS', True))
|
||||
localpath = try_mirrors(self, self.d, ud, mirrors, False)
|
||||
if localpath:
|
||||
try:
|
||||
# early checksum verification so that if the checksum of the premirror
|
||||
# contents mismatch the fetcher can still try upstream and mirrors
|
||||
update_stamp(ud, self.d)
|
||||
except ChecksumError as e:
|
||||
logger.warning("Checksum failure encountered with premirror download of %s - will attempt other sources." % u)
|
||||
logger.debug(1, str(e))
|
||||
localpath = ""
|
||||
|
||||
if premirroronly:
|
||||
self.d.setVar("BB_NO_NETWORK", "1")
|
||||
@@ -1709,7 +1624,7 @@ class Fetch(object):
|
||||
if not verified_stamp:
|
||||
m.clean(ud, self.d)
|
||||
logger.debug(1, "Trying MIRRORS")
|
||||
mirrors = mirror_from_string(self.d.getVar('MIRRORS'))
|
||||
mirrors = mirror_from_string(self.d.getVar('MIRRORS', True))
|
||||
localpath = try_mirrors(self, self.d, ud, mirrors)
|
||||
|
||||
if not localpath or ((not os.path.exists(localpath)) and localpath.find("*") == -1):
|
||||
@@ -1719,11 +1634,6 @@ class Fetch(object):
|
||||
|
||||
update_stamp(ud, self.d)
|
||||
|
||||
except IOError as e:
|
||||
if e.errno in [errno.ESTALE]:
|
||||
logger.error("Stale Error Observed %s." % u)
|
||||
raise ChecksumError("Stale Error Detected")
|
||||
|
||||
except BBFetchException as e:
|
||||
if isinstance(e, ChecksumError):
|
||||
logger.error("Checksum failure fetching %s" % u)
|
||||
@@ -1747,14 +1657,15 @@ class Fetch(object):
|
||||
m = ud.method
|
||||
logger.debug(1, "Testing URL %s", u)
|
||||
# First try checking uri, u, from PREMIRRORS
|
||||
mirrors = mirror_from_string(self.d.getVar('PREMIRRORS'))
|
||||
mirrors = mirror_from_string(self.d.getVar('PREMIRRORS', True))
|
||||
ret = try_mirrors(self, self.d, ud, mirrors, True)
|
||||
if not ret:
|
||||
# Next try checking from the original uri, u
|
||||
ret = m.checkstatus(self, ud, self.d)
|
||||
if not ret:
|
||||
try:
|
||||
ret = m.checkstatus(self, ud, self.d)
|
||||
except:
|
||||
# Finally, try checking uri, u, from MIRRORS
|
||||
mirrors = mirror_from_string(self.d.getVar('MIRRORS'))
|
||||
mirrors = mirror_from_string(self.d.getVar('MIRRORS', True))
|
||||
ret = try_mirrors(self, self.d, ud, mirrors, True)
|
||||
|
||||
if not ret:
|
||||
@@ -1762,7 +1673,7 @@ class Fetch(object):
|
||||
|
||||
def unpack(self, root, urls=None):
|
||||
"""
|
||||
Unpack urls to root
|
||||
Check all urls exist upstream
|
||||
"""
|
||||
|
||||
if not urls:
|
||||
@@ -1790,7 +1701,7 @@ class Fetch(object):
|
||||
|
||||
for url in urls:
|
||||
if url not in self.ud:
|
||||
self.ud[url] = FetchData(url, self.d)
|
||||
self.ud[url] = FetchData(url, d)
|
||||
ud = self.ud[url]
|
||||
ud.setup_localpath(self.d)
|
||||
|
||||
@@ -1852,7 +1763,6 @@ from . import svn
|
||||
from . import wget
|
||||
from . import ssh
|
||||
from . import sftp
|
||||
from . import s3
|
||||
from . import perforce
|
||||
from . import bzr
|
||||
from . import hg
|
||||
@@ -1870,7 +1780,6 @@ methods.append(gitannex.GitANNEX())
|
||||
methods.append(cvs.Cvs())
|
||||
methods.append(ssh.SSH())
|
||||
methods.append(sftp.SFTP())
|
||||
methods.append(s3.S3())
|
||||
methods.append(perforce.Perforce())
|
||||
methods.append(bzr.Bzr())
|
||||
methods.append(hg.Hg())
|
||||
|
||||
@@ -27,6 +27,7 @@ import os
|
||||
import sys
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
@@ -41,16 +42,15 @@ class Bzr(FetchMethod):
|
||||
init bzr specific variable within url data
|
||||
"""
|
||||
# Create paths to bzr checkouts
|
||||
bzrdir = d.getVar("BZRDIR") or (d.getVar("DL_DIR") + "/bzr")
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(bzrdir, ud.host, relpath)
|
||||
ud.pkgdir = os.path.join(data.expand('${BZRDIR}', d), ud.host, relpath)
|
||||
|
||||
ud.setup_revisions(d)
|
||||
ud.setup_revisons(d)
|
||||
|
||||
if not ud.revision:
|
||||
ud.revision = self.latest_revision(ud, d)
|
||||
|
||||
ud.localfile = d.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision))
|
||||
ud.localfile = data.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision), d)
|
||||
|
||||
def _buildbzrcommand(self, ud, d, command):
|
||||
"""
|
||||
@@ -58,7 +58,7 @@ class Bzr(FetchMethod):
|
||||
command is "fetch", "update", "revno"
|
||||
"""
|
||||
|
||||
basecmd = d.getVar("FETCHCMD_bzr") or "/usr/bin/env bzr"
|
||||
basecmd = data.expand('${FETCHCMD_bzr}', d)
|
||||
|
||||
proto = ud.parm.get('protocol', 'http')
|
||||
|
||||
|
||||
@@ -65,10 +65,12 @@ import os
|
||||
import sys
|
||||
import shutil
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
from distutils import spawn
|
||||
|
||||
class ClearCase(FetchMethod):
|
||||
"""Class to fetch urls via 'clearcase'"""
|
||||
@@ -106,13 +108,13 @@ class ClearCase(FetchMethod):
|
||||
else:
|
||||
ud.module = ""
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_ccrc") or "/usr/bin/env cleartool || rcleartool"
|
||||
ud.basecmd = d.getVar("FETCHCMD_ccrc", True) or spawn.find_executable("cleartool") or spawn.find_executable("rcleartool")
|
||||
|
||||
if d.getVar("SRCREV") == "INVALID":
|
||||
if data.getVar("SRCREV", d, True) == "INVALID":
|
||||
raise FetchError("Set a valid SRCREV for the clearcase fetcher in your recipe, e.g. SRCREV = \"/main/LATEST\" or any other label of your choice.")
|
||||
|
||||
ud.label = d.getVar("SRCREV", False)
|
||||
ud.customspec = d.getVar("CCASE_CUSTOM_CONFIG_SPEC")
|
||||
ud.customspec = d.getVar("CCASE_CUSTOM_CONFIG_SPEC", True)
|
||||
|
||||
ud.server = "%s://%s%s" % (ud.proto, ud.host, ud.path)
|
||||
|
||||
@@ -122,7 +124,7 @@ class ClearCase(FetchMethod):
|
||||
|
||||
ud.viewname = "%s-view%s" % (ud.identifier, d.getVar("DATETIME", d, True))
|
||||
ud.csname = "%s-config-spec" % (ud.identifier)
|
||||
ud.ccasedir = os.path.join(d.getVar("DL_DIR"), ud.type)
|
||||
ud.ccasedir = os.path.join(data.getVar("DL_DIR", d, True), ud.type)
|
||||
ud.viewdir = os.path.join(ud.ccasedir, ud.viewname)
|
||||
ud.configspecfile = os.path.join(ud.ccasedir, ud.csname)
|
||||
ud.localfile = "%s.tar.gz" % (ud.identifier)
|
||||
@@ -142,7 +144,7 @@ class ClearCase(FetchMethod):
|
||||
self.debug("configspecfile = %s" % ud.configspecfile)
|
||||
self.debug("localfile = %s" % ud.localfile)
|
||||
|
||||
ud.localfile = os.path.join(d.getVar("DL_DIR"), ud.localfile)
|
||||
ud.localfile = os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
|
||||
|
||||
def _build_ccase_command(self, ud, command):
|
||||
"""
|
||||
|
||||
@@ -63,7 +63,7 @@ class Cvs(FetchMethod):
|
||||
if 'fullpath' in ud.parm:
|
||||
fullpath = '_fullpath'
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s_%s%s%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date, norecurse, fullpath))
|
||||
ud.localfile = bb.data.expand('%s_%s_%s_%s%s%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date, norecurse, fullpath), d)
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if (ud.date == "now"):
|
||||
@@ -87,10 +87,10 @@ class Cvs(FetchMethod):
|
||||
cvsroot = ud.path
|
||||
else:
|
||||
cvsroot = ":" + method
|
||||
cvsproxyhost = d.getVar('CVS_PROXY_HOST')
|
||||
cvsproxyhost = d.getVar('CVS_PROXY_HOST', True)
|
||||
if cvsproxyhost:
|
||||
cvsroot += ";proxy=" + cvsproxyhost
|
||||
cvsproxyport = d.getVar('CVS_PROXY_PORT')
|
||||
cvsproxyport = d.getVar('CVS_PROXY_PORT', True)
|
||||
if cvsproxyport:
|
||||
cvsroot += ";proxyport=" + cvsproxyport
|
||||
cvsroot += ":" + ud.user
|
||||
@@ -110,7 +110,7 @@ class Cvs(FetchMethod):
|
||||
if ud.tag:
|
||||
options.append("-r %s" % ud.tag)
|
||||
|
||||
cvsbasecmd = d.getVar("FETCHCMD_cvs") or "/usr/bin/env cvs"
|
||||
cvsbasecmd = d.getVar("FETCHCMD_cvs", True)
|
||||
cvscmd = cvsbasecmd + " '-d" + cvsroot + "' co " + " ".join(options) + " " + ud.module
|
||||
cvsupdatecmd = cvsbasecmd + " '-d" + cvsroot + "' update -d -P " + " ".join(options)
|
||||
|
||||
@@ -120,9 +120,8 @@ class Cvs(FetchMethod):
|
||||
|
||||
# create module directory
|
||||
logger.debug(2, "Fetch: checking for module directory")
|
||||
pkg = d.getVar('PN')
|
||||
cvsdir = d.getVar("CVSDIR") or (d.getVar("DL_DIR") + "/cvs")
|
||||
pkgdir = os.path.join(cvsdir, pkg)
|
||||
pkg = d.getVar('PN', True)
|
||||
pkgdir = os.path.join(d.getVar('CVSDIR', True), pkg)
|
||||
moddir = os.path.join(pkgdir, localdir)
|
||||
workdir = None
|
||||
if os.access(os.path.join(moddir, 'CVS'), os.R_OK):
|
||||
@@ -165,8 +164,8 @@ class Cvs(FetchMethod):
|
||||
def clean(self, ud, d):
|
||||
""" Clean CVS Files and tarballs """
|
||||
|
||||
pkg = d.getVar('PN')
|
||||
pkgdir = os.path.join(d.getVar("CVSDIR"), pkg)
|
||||
pkg = d.getVar('PN', True)
|
||||
pkgdir = os.path.join(d.getVar("CVSDIR", True), pkg)
|
||||
|
||||
bb.utils.remove(pkgdir, True)
|
||||
bb.utils.remove(ud.localpath)
|
||||
|
||||
@@ -50,7 +50,7 @@ Supported SRC_URI options are:
|
||||
The default is "0", set nobranch=1 if needed.
|
||||
|
||||
- usehead
|
||||
For local git:// urls to use the current branch HEAD as the revision for use with
|
||||
For local git:// urls to use the current branch HEAD as the revsion for use with
|
||||
AUTOREV. Implies nobranch.
|
||||
|
||||
"""
|
||||
@@ -70,15 +70,13 @@ Supported SRC_URI options are:
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import collections
|
||||
import errno
|
||||
import fnmatch
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import tempfile
|
||||
import bb
|
||||
import errno
|
||||
import bb.progress
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
@@ -125,9 +123,6 @@ class GitProgressHandler(bb.progress.LineFilterProgressHandler):
|
||||
|
||||
|
||||
class Git(FetchMethod):
|
||||
bitbake_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.join(os.path.abspath(__file__))), '..', '..', '..'))
|
||||
make_shallow_path = os.path.join(bitbake_dir, 'bin', 'git-make-shallow')
|
||||
|
||||
"""Class to fetch a module or modules from git repositories"""
|
||||
def init(self, d):
|
||||
pass
|
||||
@@ -178,68 +173,20 @@ class Git(FetchMethod):
|
||||
branches = ud.parm.get("branch", "master").split(',')
|
||||
if len(branches) != len(ud.names):
|
||||
raise bb.fetch2.ParameterError("The number of name and branch parameters is not balanced", ud.url)
|
||||
|
||||
ud.cloneflags = "-s -n"
|
||||
if ud.bareclone:
|
||||
ud.cloneflags += " --mirror"
|
||||
|
||||
ud.shallow = d.getVar("BB_GIT_SHALLOW") == "1"
|
||||
ud.shallow_extra_refs = (d.getVar("BB_GIT_SHALLOW_EXTRA_REFS") or "").split()
|
||||
|
||||
depth_default = d.getVar("BB_GIT_SHALLOW_DEPTH")
|
||||
if depth_default is not None:
|
||||
try:
|
||||
depth_default = int(depth_default or 0)
|
||||
except ValueError:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH: %s" % depth_default)
|
||||
else:
|
||||
if depth_default < 0:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH: %s" % depth_default)
|
||||
else:
|
||||
depth_default = 1
|
||||
ud.shallow_depths = collections.defaultdict(lambda: depth_default)
|
||||
|
||||
revs_default = d.getVar("BB_GIT_SHALLOW_REVS")
|
||||
ud.shallow_revs = []
|
||||
ud.branches = {}
|
||||
for pos, name in enumerate(ud.names):
|
||||
branch = branches[pos]
|
||||
for name in ud.names:
|
||||
branch = branches[ud.names.index(name)]
|
||||
ud.branches[name] = branch
|
||||
ud.unresolvedrev[name] = branch
|
||||
|
||||
shallow_depth = d.getVar("BB_GIT_SHALLOW_DEPTH_%s" % name)
|
||||
if shallow_depth is not None:
|
||||
try:
|
||||
shallow_depth = int(shallow_depth or 0)
|
||||
except ValueError:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth))
|
||||
else:
|
||||
if shallow_depth < 0:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth))
|
||||
ud.shallow_depths[name] = shallow_depth
|
||||
|
||||
revs = d.getVar("BB_GIT_SHALLOW_REVS_%s" % name)
|
||||
if revs is not None:
|
||||
ud.shallow_revs.extend(revs.split())
|
||||
elif revs_default is not None:
|
||||
ud.shallow_revs.extend(revs_default.split())
|
||||
|
||||
if (ud.shallow and
|
||||
not ud.shallow_revs and
|
||||
all(ud.shallow_depths[n] == 0 for n in ud.names)):
|
||||
# Shallow disabled for this URL
|
||||
ud.shallow = False
|
||||
|
||||
if ud.usehead:
|
||||
ud.unresolvedrev['default'] = 'HEAD'
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c core.fsyncobjectfiles=0"
|
||||
ud.basecmd = data.getVar("FETCHCMD_git", d, True) or "git -c core.fsyncobjectfiles=0"
|
||||
|
||||
write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0"
|
||||
ud.write_tarballs = write_tarballs != "0" or ud.rebaseable
|
||||
ud.write_shallow_tarballs = (d.getVar("BB_GENERATE_SHALLOW_TARBALLS") or write_tarballs) != "0"
|
||||
ud.write_tarballs = ((data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True) or "0") != "0") or ud.rebaseable
|
||||
|
||||
ud.setup_revisions(d)
|
||||
ud.setup_revisons(d)
|
||||
|
||||
for name in ud.names:
|
||||
# Ensure anything that doesn't look like a sha256 checksum/revision is translated into one
|
||||
@@ -259,66 +206,30 @@ class Git(FetchMethod):
|
||||
if ud.rebaseable:
|
||||
for name in ud.names:
|
||||
gitsrcname = gitsrcname + '_' + ud.revisions[name]
|
||||
|
||||
dl_dir = d.getVar("DL_DIR")
|
||||
gitdir = d.getVar("GITDIR") or (dl_dir + "/git2")
|
||||
ud.mirrortarball = 'git2_%s.tar.gz' % (gitsrcname)
|
||||
ud.fullmirror = os.path.join(d.getVar("DL_DIR", True), ud.mirrortarball)
|
||||
gitdir = d.getVar("GITDIR", True) or (d.getVar("DL_DIR", True) + "/git2/")
|
||||
ud.clonedir = os.path.join(gitdir, gitsrcname)
|
||||
|
||||
ud.localfile = ud.clonedir
|
||||
|
||||
mirrortarball = 'git2_%s.tar.gz' % gitsrcname
|
||||
ud.fullmirror = os.path.join(dl_dir, mirrortarball)
|
||||
ud.mirrortarballs = [mirrortarball]
|
||||
if ud.shallow:
|
||||
tarballname = gitsrcname
|
||||
if ud.bareclone:
|
||||
tarballname = "%s_bare" % tarballname
|
||||
|
||||
if ud.shallow_revs:
|
||||
tarballname = "%s_%s" % (tarballname, "_".join(sorted(ud.shallow_revs)))
|
||||
|
||||
for name, revision in sorted(ud.revisions.items()):
|
||||
tarballname = "%s_%s" % (tarballname, ud.revisions[name][:7])
|
||||
depth = ud.shallow_depths[name]
|
||||
if depth:
|
||||
tarballname = "%s-%s" % (tarballname, depth)
|
||||
|
||||
shallow_refs = []
|
||||
if not ud.nobranch:
|
||||
shallow_refs.extend(ud.branches.values())
|
||||
if ud.shallow_extra_refs:
|
||||
shallow_refs.extend(r.replace('refs/heads/', '').replace('*', 'ALL') for r in ud.shallow_extra_refs)
|
||||
if shallow_refs:
|
||||
tarballname = "%s_%s" % (tarballname, "_".join(sorted(shallow_refs)).replace('/', '.'))
|
||||
|
||||
fetcher = self.__class__.__name__.lower()
|
||||
ud.shallowtarball = '%sshallow_%s.tar.gz' % (fetcher, tarballname)
|
||||
ud.fullshallow = os.path.join(dl_dir, ud.shallowtarball)
|
||||
ud.mirrortarballs.insert(0, ud.shallowtarball)
|
||||
|
||||
def localpath(self, ud, d):
|
||||
return ud.clonedir
|
||||
|
||||
def need_update(self, ud, d):
|
||||
return self.clonedir_need_update(ud, d) or self.shallow_tarball_need_update(ud) or self.tarball_need_update(ud)
|
||||
|
||||
def clonedir_need_update(self, ud, d):
|
||||
if not os.path.exists(ud.clonedir):
|
||||
return True
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
return True
|
||||
if ud.write_tarballs and not os.path.exists(ud.fullmirror):
|
||||
return True
|
||||
return False
|
||||
|
||||
def shallow_tarball_need_update(self, ud):
|
||||
return ud.shallow and ud.write_shallow_tarballs and not os.path.exists(ud.fullshallow)
|
||||
|
||||
def tarball_need_update(self, ud):
|
||||
return ud.write_tarballs and not os.path.exists(ud.fullmirror)
|
||||
|
||||
def try_premirror(self, ud, d):
|
||||
# If we don't do this, updating an existing checkout with only premirrors
|
||||
# is not possible
|
||||
if bb.utils.to_boolean(d.getVar("BB_FETCH_PREMIRRORONLY")):
|
||||
if d.getVar("BB_FETCH_PREMIRRORONLY", True) is not None:
|
||||
return True
|
||||
if os.path.exists(ud.clonedir):
|
||||
return False
|
||||
@@ -327,15 +238,10 @@ class Git(FetchMethod):
|
||||
def download(self, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
# A current clone is preferred to either tarball, a shallow tarball is
|
||||
# preferred to an out of date clone, and a missing clone will use
|
||||
# either tarball.
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and self.need_update(ud, d):
|
||||
ud.localpath = ud.fullshallow
|
||||
return
|
||||
elif os.path.exists(ud.fullmirror) and not os.path.exists(ud.clonedir):
|
||||
# If the checkout doesn't exist and the mirror tarball does, extract it
|
||||
if not os.path.exists(ud.clonedir) and os.path.exists(ud.fullmirror):
|
||||
bb.utils.mkdirhier(ud.clonedir)
|
||||
runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=ud.clonedir)
|
||||
runfetchcmd("tar -xzf %s" % (ud.fullmirror), d, workdir=ud.clonedir)
|
||||
|
||||
repourl = self._get_repo_url(ud)
|
||||
|
||||
@@ -346,7 +252,7 @@ class Git(FetchMethod):
|
||||
repourl = repourl[7:]
|
||||
clone_cmd = "LANG=C %s clone --bare --mirror %s %s --progress" % (ud.basecmd, repourl, ud.clonedir)
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, clone_cmd, ud.url)
|
||||
bb.fetch2.check_network_access(d, clone_cmd)
|
||||
progresshandler = GitProgressHandler(d)
|
||||
runfetchcmd(clone_cmd, d, log=progresshandler)
|
||||
|
||||
@@ -355,12 +261,11 @@ class Git(FetchMethod):
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
needupdate = True
|
||||
break
|
||||
|
||||
if needupdate:
|
||||
output = runfetchcmd("%s remote" % ud.basecmd, d, quiet=True, workdir=ud.clonedir)
|
||||
if "origin" in output:
|
||||
runfetchcmd("%s remote rm origin" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
try:
|
||||
runfetchcmd("%s remote rm origin" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
except bb.fetch2.FetchError:
|
||||
logger.debug(1, "No Origin")
|
||||
|
||||
runfetchcmd("%s remote add --mirror=fetch origin %s" % (ud.basecmd, repourl), d, workdir=ud.clonedir)
|
||||
fetch_cmd = "LANG=C %s fetch -f --prune --progress %s refs/*:refs/*" % (ud.basecmd, repourl)
|
||||
@@ -369,103 +274,33 @@ class Git(FetchMethod):
|
||||
progresshandler = GitProgressHandler(d)
|
||||
runfetchcmd(fetch_cmd, d, log=progresshandler, workdir=ud.clonedir)
|
||||
runfetchcmd("%s prune-packed" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
runfetchcmd("%s pack-refs --all" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
runfetchcmd("%s pack-redundant --all | xargs -r rm" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
try:
|
||||
os.unlink(ud.fullmirror)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
raise bb.fetch2.FetchError("Unable to find revision %s in branch %s even from upstream" % (ud.revisions[name], ud.branches[name]))
|
||||
|
||||
def build_mirror_data(self, ud, d):
|
||||
if ud.shallow and ud.write_shallow_tarballs:
|
||||
if not os.path.exists(ud.fullshallow):
|
||||
if os.path.islink(ud.fullshallow):
|
||||
os.unlink(ud.fullshallow)
|
||||
tempdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
|
||||
shallowclone = os.path.join(tempdir, 'git')
|
||||
try:
|
||||
self.clone_shallow_local(ud, shallowclone, d)
|
||||
|
||||
logger.info("Creating tarball of git repository")
|
||||
runfetchcmd("tar -czf %s ." % ud.fullshallow, d, workdir=shallowclone)
|
||||
runfetchcmd("touch %s.done" % ud.fullshallow, d)
|
||||
finally:
|
||||
bb.utils.remove(tempdir, recurse=True)
|
||||
elif ud.write_tarballs and not os.path.exists(ud.fullmirror):
|
||||
# Generate a mirror tarball if needed
|
||||
if ud.write_tarballs and not os.path.exists(ud.fullmirror):
|
||||
# it's possible that this symlink points to read-only filesystem with PREMIRROR
|
||||
if os.path.islink(ud.fullmirror):
|
||||
os.unlink(ud.fullmirror)
|
||||
|
||||
logger.info("Creating tarball of git repository")
|
||||
runfetchcmd("tar -czf %s ." % ud.fullmirror, d, workdir=ud.clonedir)
|
||||
runfetchcmd("touch %s.done" % ud.fullmirror, d)
|
||||
|
||||
def clone_shallow_local(self, ud, dest, d):
|
||||
"""Clone the repo and make it shallow.
|
||||
|
||||
The upstream url of the new clone isn't set at this time, as it'll be
|
||||
set correctly when unpacked."""
|
||||
runfetchcmd("%s clone %s %s %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, dest), d)
|
||||
|
||||
to_parse, shallow_branches = [], []
|
||||
for name in ud.names:
|
||||
revision = ud.revisions[name]
|
||||
depth = ud.shallow_depths[name]
|
||||
if depth:
|
||||
to_parse.append('%s~%d^{}' % (revision, depth - 1))
|
||||
|
||||
# For nobranch, we need a ref, otherwise the commits will be
|
||||
# removed, and for non-nobranch, we truncate the branch to our
|
||||
# srcrev, to avoid keeping unnecessary history beyond that.
|
||||
branch = ud.branches[name]
|
||||
if ud.nobranch:
|
||||
ref = "refs/shallow/%s" % name
|
||||
elif ud.bareclone:
|
||||
ref = "refs/heads/%s" % branch
|
||||
else:
|
||||
ref = "refs/remotes/origin/%s" % branch
|
||||
|
||||
shallow_branches.append(ref)
|
||||
runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest)
|
||||
|
||||
# Map srcrev+depths to revisions
|
||||
parsed_depths = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join(to_parse)), d, workdir=dest)
|
||||
|
||||
# Resolve specified revisions
|
||||
parsed_revs = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join('"%s^{}"' % r for r in ud.shallow_revs)), d, workdir=dest)
|
||||
shallow_revisions = parsed_depths.splitlines() + parsed_revs.splitlines()
|
||||
|
||||
# Apply extra ref wildcards
|
||||
all_refs = runfetchcmd('%s for-each-ref "--format=%%(refname)"' % ud.basecmd,
|
||||
d, workdir=dest).splitlines()
|
||||
for r in ud.shallow_extra_refs:
|
||||
if not ud.bareclone:
|
||||
r = r.replace('refs/heads/', 'refs/remotes/origin/')
|
||||
|
||||
if '*' in r:
|
||||
matches = filter(lambda a: fnmatch.fnmatchcase(a, r), all_refs)
|
||||
shallow_branches.extend(matches)
|
||||
else:
|
||||
shallow_branches.append(r)
|
||||
|
||||
# Make the repository shallow
|
||||
shallow_cmd = [self.make_shallow_path, '-s']
|
||||
for b in shallow_branches:
|
||||
shallow_cmd.append('-r')
|
||||
shallow_cmd.append(b)
|
||||
shallow_cmd.extend(shallow_revisions)
|
||||
runfetchcmd(subprocess.list2cmdline(shallow_cmd), d, workdir=dest)
|
||||
runfetchcmd("tar -czf %s %s" % (ud.fullmirror, os.path.join(".") ), d, workdir=ud.clonedir)
|
||||
runfetchcmd("touch %s.done" % (ud.fullmirror), d, workdir=ud.clonedir)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
""" unpack the downloaded src to destdir"""
|
||||
|
||||
subdir = ud.parm.get("subpath", "")
|
||||
if subdir != "":
|
||||
readpathspec = ":%s" % subdir
|
||||
readpathspec = ":%s" % (subdir)
|
||||
def_destsuffix = "%s/" % os.path.basename(subdir.rstrip('/'))
|
||||
else:
|
||||
readpathspec = ""
|
||||
@@ -476,31 +311,11 @@ class Git(FetchMethod):
|
||||
if os.path.exists(destdir):
|
||||
bb.utils.prunedir(destdir)
|
||||
|
||||
source_found = False
|
||||
source_error = []
|
||||
|
||||
if not source_found:
|
||||
clonedir_is_up_to_date = not self.clonedir_need_update(ud, d)
|
||||
if clonedir_is_up_to_date:
|
||||
runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, destdir), d)
|
||||
source_found = True
|
||||
else:
|
||||
source_error.append("clone directory not available or not up to date: " + ud.clonedir)
|
||||
|
||||
if not source_found:
|
||||
if ud.shallow:
|
||||
if os.path.exists(ud.fullshallow):
|
||||
bb.utils.mkdirhier(destdir)
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=destdir)
|
||||
source_found = True
|
||||
else:
|
||||
source_error.append("shallow clone not available: " + ud.fullshallow)
|
||||
else:
|
||||
source_error.append("shallow clone not enabled")
|
||||
|
||||
if not source_found:
|
||||
raise bb.fetch2.UnpackError("No up to date source found: " + "; ".join(source_error), ud.url)
|
||||
cloneflags = "-s -n"
|
||||
if ud.bareclone:
|
||||
cloneflags += " --mirror"
|
||||
|
||||
runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, cloneflags, ud.clonedir, destdir), d)
|
||||
repourl = self._get_repo_url(ud)
|
||||
runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, repourl), d, workdir=destdir)
|
||||
if not ud.nocheckout:
|
||||
@@ -512,7 +327,7 @@ class Git(FetchMethod):
|
||||
branchname = ud.branches[ud.names[0]]
|
||||
runfetchcmd("%s checkout -B %s %s" % (ud.basecmd, branchname, \
|
||||
ud.revisions[ud.names[0]]), d, workdir=destdir)
|
||||
runfetchcmd("%s branch %s --set-upstream-to origin/%s" % (ud.basecmd, branchname, \
|
||||
runfetchcmd("%s branch --set-upstream %s origin/%s" % (ud.basecmd, branchname, \
|
||||
branchname), d, workdir=destdir)
|
||||
else:
|
||||
runfetchcmd("%s checkout %s" % (ud.basecmd, ud.revisions[ud.names[0]]), d, workdir=destdir)
|
||||
@@ -522,17 +337,9 @@ class Git(FetchMethod):
|
||||
def clean(self, ud, d):
|
||||
""" clean the git directory """
|
||||
|
||||
to_remove = [ud.localpath, ud.fullmirror, ud.fullmirror + ".done"]
|
||||
# The localpath is a symlink to clonedir when it is cloned from a
|
||||
# mirror, so remove both of them.
|
||||
if os.path.islink(ud.localpath):
|
||||
clonedir = os.path.realpath(ud.localpath)
|
||||
to_remove.append(clonedir)
|
||||
|
||||
for r in to_remove:
|
||||
if os.path.exists(r):
|
||||
bb.note('Removing %s' % r)
|
||||
bb.utils.remove(r, True)
|
||||
bb.utils.remove(ud.localpath, True)
|
||||
bb.utils.remove(ud.fullmirror)
|
||||
bb.utils.remove(ud.fullmirror + ".done")
|
||||
|
||||
def supports_srcrev(self):
|
||||
return True
|
||||
@@ -573,26 +380,14 @@ class Git(FetchMethod):
|
||||
"""
|
||||
Run git ls-remote with the specified search string
|
||||
"""
|
||||
# Prevent recursion e.g. in OE if SRCPV is in PV, PV is in WORKDIR,
|
||||
# and WORKDIR is in PATH (as a result of RSS), our call to
|
||||
# runfetchcmd() exports PATH so this function will get called again (!)
|
||||
# In this scenario the return call of the function isn't actually
|
||||
# important - WORKDIR isn't needed in PATH to call git ls-remote
|
||||
# anyway.
|
||||
if d.getVar('_BB_GIT_IN_LSREMOTE', False):
|
||||
return ''
|
||||
d.setVar('_BB_GIT_IN_LSREMOTE', '1')
|
||||
try:
|
||||
repourl = self._get_repo_url(ud)
|
||||
cmd = "%s ls-remote %s %s" % \
|
||||
(ud.basecmd, repourl, search)
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, cmd, repourl)
|
||||
output = runfetchcmd(cmd, d, True)
|
||||
if not output:
|
||||
raise bb.fetch2.FetchError("The command %s gave empty output unexpectedly" % cmd, ud.url)
|
||||
finally:
|
||||
d.delVar('_BB_GIT_IN_LSREMOTE')
|
||||
repourl = self._get_repo_url(ud)
|
||||
cmd = "%s ls-remote %s %s" % \
|
||||
(ud.basecmd, repourl, search)
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, cmd)
|
||||
output = runfetchcmd(cmd, d, True)
|
||||
if not output:
|
||||
raise bb.fetch2.FetchError("The command %s gave empty output unexpectedly" % cmd, ud.url)
|
||||
return output
|
||||
|
||||
def _latest_revision(self, ud, d, name):
|
||||
@@ -623,11 +418,10 @@ class Git(FetchMethod):
|
||||
"""
|
||||
pupver = ('', '')
|
||||
|
||||
tagregex = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX') or r"(?P<pver>([0-9][\.|_]?)+)")
|
||||
tagregex = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX', True) or "(?P<pver>([0-9][\.|_]?)+)")
|
||||
try:
|
||||
output = self._lsremote(ud, d, "refs/tags/*")
|
||||
except (bb.fetch2.FetchError, bb.fetch2.NetworkAccess) as e:
|
||||
bb.note("Could not list remote: %s" % str(e))
|
||||
except bb.fetch2.FetchError or bb.fetch2.NetworkAccess:
|
||||
return pupver
|
||||
|
||||
verstring = ""
|
||||
@@ -638,7 +432,7 @@ class Git(FetchMethod):
|
||||
|
||||
tag_head = line.split("/")[-1]
|
||||
# Ignore non-released branches
|
||||
m = re.search(r"(alpha|beta|rc|final)+", tag_head)
|
||||
m = re.search("(alpha|beta|rc|final)+", tag_head)
|
||||
if m:
|
||||
continue
|
||||
|
||||
@@ -676,7 +470,7 @@ class Git(FetchMethod):
|
||||
if not os.path.exists(rev_file) or not os.path.getsize(rev_file):
|
||||
from pipes import quote
|
||||
commits = bb.fetch2.runfetchcmd(
|
||||
"git rev-list %s -- | wc -l" % quote(rev),
|
||||
"git rev-list %s -- | wc -l" % (quote(rev)),
|
||||
d, quiet=True).strip().lstrip('0')
|
||||
if commits:
|
||||
open(rev_file, "w").write("%d\n" % int(commits))
|
||||
@@ -691,5 +485,5 @@ class Git(FetchMethod):
|
||||
try:
|
||||
self._lsremote(ud, d, "")
|
||||
return True
|
||||
except bb.fetch2.FetchError:
|
||||
except FetchError:
|
||||
return False
|
||||
|
||||
@@ -22,6 +22,7 @@ BitBake 'Fetch' git annex implementation
|
||||
|
||||
import os
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2.git import Git
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
@@ -33,11 +34,6 @@ class GitANNEX(Git):
|
||||
"""
|
||||
return ud.type in ['gitannex']
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
super(GitANNEX, self).urldata_init(ud, d)
|
||||
if ud.shallow:
|
||||
ud.shallow_extra_refs += ['refs/heads/git-annex', 'refs/heads/synced/*']
|
||||
|
||||
def uses_annex(self, ud, d, wd):
|
||||
for name in ud.names:
|
||||
try:
|
||||
@@ -60,21 +56,9 @@ class GitANNEX(Git):
|
||||
def download(self, ud, d):
|
||||
Git.download(self, ud, d)
|
||||
|
||||
if not ud.shallow or ud.localpath != ud.fullshallow:
|
||||
if self.uses_annex(ud, d, ud.clonedir):
|
||||
self.update_annex(ud, d, ud.clonedir)
|
||||
|
||||
def clone_shallow_local(self, ud, dest, d):
|
||||
super(GitANNEX, self).clone_shallow_local(ud, dest, d)
|
||||
|
||||
try:
|
||||
runfetchcmd("%s annex init" % ud.basecmd, d, workdir=dest)
|
||||
except bb.fetch.FetchError:
|
||||
pass
|
||||
|
||||
if self.uses_annex(ud, d, dest):
|
||||
runfetchcmd("%s annex get" % ud.basecmd, d, workdir=dest)
|
||||
runfetchcmd("chmod u+w -R %s/.git/annex" % (dest), d, quiet=True, workdir=dest)
|
||||
annex = self.uses_annex(ud, d, ud.clonedir)
|
||||
if annex:
|
||||
self.update_annex(ud, d, ud.clonedir)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
Git.unpack(self, ud, destdir, d)
|
||||
|
||||
@@ -31,12 +31,10 @@ NOTE: Switching a SRC_URI from "git://" to "gitsm://" requires a clean of your r
|
||||
|
||||
import os
|
||||
import bb
|
||||
import copy
|
||||
from bb import data
|
||||
from bb.fetch2.git import Git
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import Fetch
|
||||
from bb.fetch2 import BBFetchException
|
||||
|
||||
class GitSM(Git):
|
||||
def supports(self, ud, d):
|
||||
@@ -45,181 +43,89 @@ class GitSM(Git):
|
||||
"""
|
||||
return ud.type in ['gitsm']
|
||||
|
||||
def process_submodules(self, ud, workdir, function, d):
|
||||
"""
|
||||
Iterate over all of the submodules in this repository and execute
|
||||
the 'function' for each of them.
|
||||
"""
|
||||
|
||||
submodules = []
|
||||
paths = {}
|
||||
revision = {}
|
||||
uris = {}
|
||||
subrevision = {}
|
||||
|
||||
def parse_gitmodules(gitmodules):
|
||||
modules = {}
|
||||
module = ""
|
||||
for line in gitmodules.splitlines():
|
||||
if line.startswith('[submodule'):
|
||||
module = line.split('"')[1]
|
||||
modules[module] = {}
|
||||
elif module and line.strip().startswith('path'):
|
||||
path = line.split('=')[1].strip()
|
||||
modules[module]['path'] = path
|
||||
elif module and line.strip().startswith('url'):
|
||||
url = line.split('=')[1].strip()
|
||||
modules[module]['url'] = url
|
||||
return modules
|
||||
|
||||
# Collect the defined submodules, and their attributes
|
||||
def uses_submodules(self, ud, d, wd):
|
||||
for name in ud.names:
|
||||
try:
|
||||
gitmodules = runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revisions[name]), d, quiet=True, workdir=workdir)
|
||||
except:
|
||||
# No submodules to update
|
||||
continue
|
||||
|
||||
for m, md in parse_gitmodules(gitmodules).items():
|
||||
try:
|
||||
module_hash = runfetchcmd("%s ls-tree -z -d %s %s" % (ud.basecmd, ud.revisions[name], md['path']), d, quiet=True, workdir=workdir)
|
||||
except:
|
||||
# If the command fails, we don't have a valid file to check. If it doesn't
|
||||
# fail -- it still might be a failure, see next check...
|
||||
module_hash = ""
|
||||
|
||||
if not module_hash:
|
||||
logger.debug(1, "submodule %s is defined, but is not initialized in the repository. Skipping", m)
|
||||
continue
|
||||
|
||||
submodules.append(m)
|
||||
paths[m] = md['path']
|
||||
revision[m] = ud.revisions[name]
|
||||
uris[m] = md['url']
|
||||
subrevision[m] = module_hash.split()[2]
|
||||
|
||||
# Convert relative to absolute uri based on parent uri
|
||||
if uris[m].startswith('..'):
|
||||
newud = copy.copy(ud)
|
||||
newud.path = os.path.realpath(os.path.join(newud.path, uris[m]))
|
||||
uris[m] = Git._get_repo_url(self, newud)
|
||||
|
||||
for module in submodules:
|
||||
# Translate the module url into a SRC_URI
|
||||
|
||||
if "://" in uris[module]:
|
||||
# Properly formated URL already
|
||||
proto = uris[module].split(':', 1)[0]
|
||||
url = uris[module].replace('%s:' % proto, 'gitsm:', 1)
|
||||
else:
|
||||
if ":" in uris[module]:
|
||||
# Most likely an SSH style reference
|
||||
proto = "ssh"
|
||||
if ":/" in uris[module]:
|
||||
# Absolute reference, easy to convert..
|
||||
url = "gitsm://" + uris[module].replace(':/', '/', 1)
|
||||
else:
|
||||
# Relative reference, no way to know if this is right!
|
||||
logger.warning("Submodule included by %s refers to relative ssh reference %s. References may fail if not absolute." % (ud.url, uris[module]))
|
||||
url = "gitsm://" + uris[module].replace(':', '/', 1)
|
||||
else:
|
||||
# This has to be a file reference
|
||||
proto = "file"
|
||||
url = "gitsm://" + uris[module]
|
||||
|
||||
url += ';protocol=%s' % proto
|
||||
url += ";name=%s" % module
|
||||
url += ";subpath=%s" % paths[module]
|
||||
|
||||
ld = d.createCopy()
|
||||
# Not necessary to set SRC_URI, since we're passing the URI to
|
||||
# Fetch.
|
||||
#ld.setVar('SRC_URI', url)
|
||||
ld.setVar('SRCREV_%s' % module, subrevision[module])
|
||||
|
||||
# Workaround for issues with SRCPV/SRCREV_FORMAT errors
|
||||
# error refer to 'multiple' repositories. Only the repository
|
||||
# in the original SRC_URI actually matters...
|
||||
ld.setVar('SRCPV', d.getVar('SRCPV'))
|
||||
ld.setVar('SRCREV_FORMAT', module)
|
||||
|
||||
function(ud, url, module, paths[module], ld)
|
||||
|
||||
return submodules != []
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if Git.need_update(self, ud, d):
|
||||
return True
|
||||
|
||||
try:
|
||||
# Check for the nugget dropped by the download operation
|
||||
known_srcrevs = runfetchcmd("%s config --get-all bitbake.srcrev" % \
|
||||
(ud.basecmd), d, workdir=ud.clonedir)
|
||||
|
||||
if ud.revisions[ud.names[0]] not in known_srcrevs.split():
|
||||
runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revisions[name]), d, quiet=True, workdir=wd)
|
||||
return True
|
||||
except bb.fetch2.FetchError:
|
||||
# No srcrev nuggets, so this is new and needs to be updated
|
||||
return True
|
||||
|
||||
except bb.fetch.FetchError:
|
||||
pass
|
||||
return False
|
||||
|
||||
def _set_relative_paths(self, repopath):
|
||||
"""
|
||||
Fix submodule paths to be relative instead of absolute,
|
||||
so that when we move the repo it doesn't break
|
||||
(In Git 1.7.10+ this is done automatically)
|
||||
"""
|
||||
submodules = []
|
||||
with open(os.path.join(repopath, '.gitmodules'), 'r') as f:
|
||||
for line in f.readlines():
|
||||
if line.startswith('[submodule'):
|
||||
submodules.append(line.split('"')[1])
|
||||
|
||||
for module in submodules:
|
||||
repo_conf = os.path.join(repopath, module, '.git')
|
||||
if os.path.exists(repo_conf):
|
||||
with open(repo_conf, 'r') as f:
|
||||
lines = f.readlines()
|
||||
newpath = ''
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith('gitdir:'):
|
||||
oldpath = line.split(': ')[-1].rstrip()
|
||||
if oldpath.startswith('/'):
|
||||
newpath = '../' * (module.count('/') + 1) + '.git/modules/' + module
|
||||
lines[i] = 'gitdir: %s\n' % newpath
|
||||
break
|
||||
if newpath:
|
||||
with open(repo_conf, 'w') as f:
|
||||
for line in lines:
|
||||
f.write(line)
|
||||
|
||||
repo_conf2 = os.path.join(repopath, '.git', 'modules', module, 'config')
|
||||
if os.path.exists(repo_conf2):
|
||||
with open(repo_conf2, 'r') as f:
|
||||
lines = f.readlines()
|
||||
newpath = ''
|
||||
for i, line in enumerate(lines):
|
||||
if line.lstrip().startswith('worktree = '):
|
||||
oldpath = line.split(' = ')[-1].rstrip()
|
||||
if oldpath.startswith('/'):
|
||||
newpath = '../' * (module.count('/') + 3) + module
|
||||
lines[i] = '\tworktree = %s\n' % newpath
|
||||
break
|
||||
if newpath:
|
||||
with open(repo_conf2, 'w') as f:
|
||||
for line in lines:
|
||||
f.write(line)
|
||||
|
||||
def update_submodules(self, ud, d):
|
||||
# We have to convert bare -> full repo, do the submodule bit, then convert back
|
||||
tmpclonedir = ud.clonedir + ".tmp"
|
||||
gitdir = tmpclonedir + os.sep + ".git"
|
||||
bb.utils.remove(tmpclonedir, True)
|
||||
os.mkdir(tmpclonedir)
|
||||
os.rename(ud.clonedir, gitdir)
|
||||
runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*true/bare = false/'", d)
|
||||
runfetchcmd(ud.basecmd + " reset --hard", d, workdir=tmpclonedir)
|
||||
runfetchcmd(ud.basecmd + " checkout " + ud.revisions[ud.names[0]], d, workdir=tmpclonedir)
|
||||
runfetchcmd(ud.basecmd + " submodule update --init --recursive", d, workdir=tmpclonedir)
|
||||
self._set_relative_paths(tmpclonedir)
|
||||
runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*false/bare = true/'", d, workdir=tmpclonedir)
|
||||
os.rename(gitdir, ud.clonedir,)
|
||||
bb.utils.remove(tmpclonedir, True)
|
||||
|
||||
def download(self, ud, d):
|
||||
def download_submodule(ud, url, module, modpath, d):
|
||||
url += ";bareclone=1;nobranch=1"
|
||||
|
||||
# Is the following still needed?
|
||||
#url += ";nocheckout=1"
|
||||
|
||||
try:
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
newfetch.download()
|
||||
# Drop a nugget to add each of the srcrevs we've fetched (used by need_update)
|
||||
runfetchcmd("%s config --add bitbake.srcrev %s" % \
|
||||
(ud.basecmd, ud.revisions[ud.names[0]]), d, workdir=ud.clonedir)
|
||||
except Exception as e:
|
||||
logger.error('gitsm: submodule download failed: %s %s' % (type(e).__name__, str(e)))
|
||||
raise
|
||||
|
||||
Git.download(self, ud, d)
|
||||
self.process_submodules(ud, ud.clonedir, download_submodule, d)
|
||||
|
||||
submodules = self.uses_submodules(ud, d, ud.clonedir)
|
||||
if submodules:
|
||||
self.update_submodules(ud, d)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
def unpack_submodules(ud, url, module, modpath, d):
|
||||
url += ";bareclone=1;nobranch=1"
|
||||
|
||||
# Figure out where we clone over the bare submodules...
|
||||
if ud.bareclone:
|
||||
repo_conf = ud.destdir
|
||||
else:
|
||||
repo_conf = os.path.join(ud.destdir, '.git')
|
||||
|
||||
try:
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
newfetch.unpack(root=os.path.dirname(os.path.join(repo_conf, 'modules', modpath)))
|
||||
except Exception as e:
|
||||
logger.error('gitsm: submodule unpack failed: %s %s' % (type(e).__name__, str(e)))
|
||||
raise
|
||||
|
||||
local_path = newfetch.localpath(url)
|
||||
|
||||
# Correct the submodule references to the local download version...
|
||||
runfetchcmd("%(basecmd)s config submodule.%(module)s.url %(url)s" % {'basecmd': ud.basecmd, 'module': module, 'url' : local_path}, d, workdir=ud.destdir)
|
||||
|
||||
if ud.shallow:
|
||||
runfetchcmd("%(basecmd)s config submodule.%(module)s.shallow true" % {'basecmd': ud.basecmd, 'module': module}, d, workdir=ud.destdir)
|
||||
|
||||
# Ensure the submodule repository is NOT set to bare, since we're checking it out...
|
||||
try:
|
||||
runfetchcmd("%s config core.bare false" % (ud.basecmd), d, quiet=True, workdir=os.path.join(repo_conf, 'modules', modpath))
|
||||
except:
|
||||
logger.error("Unable to set git config core.bare to false for %s" % os.path.join(repo_conf, 'modules', modpath))
|
||||
raise
|
||||
|
||||
Git.unpack(self, ud, destdir, d)
|
||||
|
||||
ret = self.process_submodules(ud, ud.destdir, unpack_submodules, d)
|
||||
|
||||
if not ud.bareclone and ret:
|
||||
# Run submodule update, this sets up the directories -- without touching the config
|
||||
runfetchcmd("%s submodule update --recursive --no-fetch" % (ud.basecmd), d, quiet=True, workdir=ud.destdir)
|
||||
|
||||
submodules = self.uses_submodules(ud, d, ud.destdir)
|
||||
if submodules:
|
||||
runfetchcmd(ud.basecmd + " checkout " + ud.revisions[ud.names[0]], d, workdir=ud.destdir)
|
||||
runfetchcmd(ud.basecmd + " submodule update --init --recursive", d, workdir=ud.destdir)
|
||||
|
||||
@@ -29,6 +29,7 @@ import sys
|
||||
import logging
|
||||
import bb
|
||||
import errno
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import MissingParameterError
|
||||
@@ -66,7 +67,7 @@ class Hg(FetchMethod):
|
||||
else:
|
||||
ud.proto = "hg"
|
||||
|
||||
ud.setup_revisions(d)
|
||||
ud.setup_revisons(d)
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
@@ -76,17 +77,16 @@ class Hg(FetchMethod):
|
||||
# Create paths to mercurial checkouts
|
||||
hgsrcname = '%s_%s_%s' % (ud.module.replace('/', '.'), \
|
||||
ud.host, ud.path.replace('/', '.'))
|
||||
mirrortarball = 'hg_%s.tar.gz' % hgsrcname
|
||||
ud.fullmirror = os.path.join(d.getVar("DL_DIR"), mirrortarball)
|
||||
ud.mirrortarballs = [mirrortarball]
|
||||
ud.mirrortarball = 'hg_%s.tar.gz' % hgsrcname
|
||||
ud.fullmirror = os.path.join(d.getVar("DL_DIR", True), ud.mirrortarball)
|
||||
|
||||
hgdir = d.getVar("HGDIR") or (d.getVar("DL_DIR") + "/hg")
|
||||
hgdir = d.getVar("HGDIR", True) or (d.getVar("DL_DIR", True) + "/hg/")
|
||||
ud.pkgdir = os.path.join(hgdir, hgsrcname)
|
||||
ud.moddir = os.path.join(ud.pkgdir, ud.module)
|
||||
ud.localfile = ud.moddir
|
||||
ud.basecmd = d.getVar("FETCHCMD_hg") or "/usr/bin/env hg"
|
||||
ud.basecmd = data.getVar("FETCHCMD_hg", d, True) or "/usr/bin/env hg"
|
||||
|
||||
ud.write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS")
|
||||
ud.write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS", True)
|
||||
|
||||
def need_update(self, ud, d):
|
||||
revTag = ud.parm.get('rev', 'tip')
|
||||
@@ -99,7 +99,7 @@ class Hg(FetchMethod):
|
||||
def try_premirror(self, ud, d):
|
||||
# If we don't do this, updating an existing checkout with only premirrors
|
||||
# is not possible
|
||||
if bb.utils.to_boolean(d.getVar("BB_FETCH_PREMIRRORONLY")):
|
||||
if d.getVar("BB_FETCH_PREMIRRORONLY", True) is not None:
|
||||
return True
|
||||
if os.path.exists(ud.moddir):
|
||||
return False
|
||||
@@ -221,7 +221,7 @@ class Hg(FetchMethod):
|
||||
"""
|
||||
Compute tip revision for the url
|
||||
"""
|
||||
bb.fetch2.check_network_access(d, self._buildhgcommand(ud, d, "info"), ud.url)
|
||||
bb.fetch2.check_network_access(d, self._buildhgcommand(ud, d, "info"))
|
||||
output = runfetchcmd(self._buildhgcommand(ud, d, "info"), d)
|
||||
return output.strip()
|
||||
|
||||
|
||||
@@ -29,6 +29,7 @@ import os
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import bb
|
||||
import bb.utils
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod, FetchError
|
||||
from bb.fetch2 import logger
|
||||
|
||||
@@ -62,11 +63,17 @@ class Local(FetchMethod):
|
||||
newpath = path
|
||||
if path[0] == "/":
|
||||
return [path]
|
||||
filespath = d.getVar('FILESPATH')
|
||||
filespath = data.getVar('FILESPATH', d, True)
|
||||
if filespath:
|
||||
logger.debug(2, "Searching for %s in paths:\n %s" % (path, "\n ".join(filespath.split(":"))))
|
||||
newpath, hist = bb.utils.which(filespath, path, history=True)
|
||||
searched.extend(hist)
|
||||
if not newpath:
|
||||
filesdir = data.getVar('FILESDIR', d, True)
|
||||
if filesdir:
|
||||
logger.debug(2, "Searching for %s in path: %s" % (path, filesdir))
|
||||
newpath = os.path.join(filesdir, path)
|
||||
searched.append(newpath)
|
||||
if (not newpath or not os.path.exists(newpath)) and path.find("*") != -1:
|
||||
# For expressions using '*', best we can do is take the first directory in FILESPATH that exists
|
||||
newpath, hist = bb.utils.which(filespath, ".", history=True)
|
||||
@@ -74,7 +81,7 @@ class Local(FetchMethod):
|
||||
logger.debug(2, "Searching for %s in path: %s" % (path, newpath))
|
||||
return searched
|
||||
if not os.path.exists(newpath):
|
||||
dldirfile = os.path.join(d.getVar("DL_DIR"), path)
|
||||
dldirfile = os.path.join(d.getVar("DL_DIR", True), path)
|
||||
logger.debug(2, "Defaulting to %s for %s" % (dldirfile, path))
|
||||
bb.utils.mkdirhier(os.path.dirname(dldirfile))
|
||||
searched.append(dldirfile)
|
||||
@@ -93,10 +100,13 @@ class Local(FetchMethod):
|
||||
# no need to fetch local files, we'll deal with them in place.
|
||||
if self.supports_checksum(urldata) and not os.path.exists(urldata.localpath):
|
||||
locations = []
|
||||
filespath = d.getVar('FILESPATH')
|
||||
filespath = data.getVar('FILESPATH', d, True)
|
||||
if filespath:
|
||||
locations = filespath.split(":")
|
||||
locations.append(d.getVar("DL_DIR"))
|
||||
filesdir = data.getVar('FILESDIR', d, True)
|
||||
if filesdir:
|
||||
locations.append(filesdir)
|
||||
locations.append(d.getVar("DL_DIR", True))
|
||||
|
||||
msg = "Unable to find file " + urldata.url + " anywhere. The paths that were searched were:\n " + "\n ".join(locations)
|
||||
raise FetchError(msg)
|
||||
|
||||
@@ -25,6 +25,7 @@ import json
|
||||
import subprocess
|
||||
import signal
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import ChecksumError
|
||||
@@ -32,6 +33,7 @@ from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import UnpackError
|
||||
from bb.fetch2 import ParameterError
|
||||
from distutils import spawn
|
||||
|
||||
def subprocess_setup():
|
||||
# Python installs a SIGPIPE handler by default. This is usually not what
|
||||
@@ -78,7 +80,6 @@ class Npm(FetchMethod):
|
||||
if not ud.version:
|
||||
raise ParameterError("NPM fetcher requires a version parameter", ud.url)
|
||||
ud.bbnpmmanifest = "%s-%s.deps.json" % (ud.pkgname, ud.version)
|
||||
ud.bbnpmmanifest = ud.bbnpmmanifest.replace('/', '-')
|
||||
ud.registry = "http://%s" % (ud.url.replace('npm://', '', 1).split(';'))[0]
|
||||
prefixdir = "npm/%s" % ud.pkgname
|
||||
ud.pkgdatadir = d.expand("${DL_DIR}/%s" % prefixdir)
|
||||
@@ -86,14 +87,12 @@ class Npm(FetchMethod):
|
||||
bb.utils.mkdirhier(ud.pkgdatadir)
|
||||
ud.localpath = d.expand("${DL_DIR}/npm/%s" % ud.bbnpmmanifest)
|
||||
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -O -t 2 -T 30 -nv --passive-ftp --no-check-certificate "
|
||||
self.basecmd = d.getVar("FETCHCMD_wget", True) or "/usr/bin/env wget -O -t 2 -T 30 -nv --passive-ftp --no-check-certificate "
|
||||
ud.prefixdir = prefixdir
|
||||
|
||||
ud.write_tarballs = ((d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0") != "0")
|
||||
mirrortarball = 'npm_%s-%s.tar.xz' % (ud.pkgname, ud.version)
|
||||
mirrortarball = mirrortarball.replace('/', '-')
|
||||
ud.fullmirror = os.path.join(d.getVar("DL_DIR"), mirrortarball)
|
||||
ud.mirrortarballs = [mirrortarball]
|
||||
ud.write_tarballs = ((data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True) or "0") != "0")
|
||||
ud.mirrortarball = 'npm_%s-%s.tar.xz' % (ud.pkgname, ud.version)
|
||||
ud.fullmirror = os.path.join(d.getVar("DL_DIR", True), ud.mirrortarball)
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if os.path.exists(ud.localpath):
|
||||
@@ -102,8 +101,8 @@ class Npm(FetchMethod):
|
||||
|
||||
def _runwget(self, ud, d, command, quiet):
|
||||
logger.debug(2, "Fetching %s using command '%s'" % (ud.url, command))
|
||||
bb.fetch2.check_network_access(d, command, ud.url)
|
||||
dldir = d.getVar("DL_DIR")
|
||||
bb.fetch2.check_network_access(d, command)
|
||||
dldir = d.getVar("DL_DIR", True)
|
||||
runfetchcmd(command, d, quiet, workdir=dldir)
|
||||
|
||||
def _unpackdep(self, ud, pkg, data, destdir, dldir, d):
|
||||
@@ -117,7 +116,7 @@ class Npm(FetchMethod):
|
||||
# Change to subdir before executing command
|
||||
if not os.path.exists(destdir):
|
||||
os.makedirs(destdir)
|
||||
path = d.getVar('PATH')
|
||||
path = d.getVar('PATH', True)
|
||||
if path:
|
||||
cmd = "PATH=\"%s\" %s" % (path, cmd)
|
||||
bb.note("Unpacking %s to %s/" % (file, destdir))
|
||||
@@ -133,8 +132,9 @@ class Npm(FetchMethod):
|
||||
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
dldir = d.getVar("DL_DIR")
|
||||
with open("%s/npm/%s" % (dldir, ud.bbnpmmanifest)) as datafile:
|
||||
dldir = d.getVar("DL_DIR", True)
|
||||
depdumpfile = "%s-%s.deps.json" % (ud.pkgname, ud.version)
|
||||
with open("%s/npm/%s" % (dldir, depdumpfile)) as datafile:
|
||||
workobj = json.load(datafile)
|
||||
dldir = "%s/%s" % (os.path.dirname(ud.localpath), ud.pkgname)
|
||||
|
||||
@@ -182,27 +182,19 @@ class Npm(FetchMethod):
|
||||
if pkg_os:
|
||||
if not isinstance(pkg_os, list):
|
||||
pkg_os = [pkg_os]
|
||||
blacklist = False
|
||||
for item in pkg_os:
|
||||
if item.startswith('!'):
|
||||
blacklist = True
|
||||
break
|
||||
if (not blacklist and 'linux' not in pkg_os) or '!linux' in pkg_os:
|
||||
if 'linux' not in pkg_os or '!linux' in pkg_os:
|
||||
logger.debug(2, "Skipping %s since it's incompatible with Linux" % pkg)
|
||||
return
|
||||
#logger.debug(2, "Output URL is %s - %s - %s" % (ud.basepath, ud.basename, ud.localfile))
|
||||
outputurl = pdata['dist']['tarball']
|
||||
data[pkg] = {}
|
||||
data[pkg]['tgz'] = os.path.basename(outputurl)
|
||||
if outputurl in fetchedlist:
|
||||
return
|
||||
|
||||
self._runwget(ud, d, "%s --directory-prefix=%s %s" % (self.basecmd, ud.prefixdir, outputurl), False)
|
||||
fetchedlist.append(outputurl)
|
||||
if not outputurl in fetchedlist:
|
||||
self._runwget(ud, d, "%s --directory-prefix=%s %s" % (self.basecmd, ud.prefixdir, outputurl), False)
|
||||
fetchedlist.append(outputurl)
|
||||
|
||||
dependencies = pdata.get('dependencies', {})
|
||||
optionalDependencies = pdata.get('optionalDependencies', {})
|
||||
dependencies.update(optionalDependencies)
|
||||
depsfound = {}
|
||||
optdepsfound = {}
|
||||
data[pkg]['deps'] = {}
|
||||
@@ -226,7 +218,7 @@ class Npm(FetchMethod):
|
||||
self._getshrinkeddependencies(obj, data['dependencies'][obj], data['dependencies'][obj]['version'], d, ud, lockdown, manifest, False)
|
||||
return
|
||||
outputurl = "invalid"
|
||||
if ('resolved' not in data) or (not data['resolved'].startswith('http://') and not data['resolved'].startswith('https://')):
|
||||
if ('resolved' not in data) or (not data['resolved'].startswith('http')):
|
||||
# will be the case for ${PN}
|
||||
fetchcmd = "npm view %s@%s dist.tarball --registry %s" % (pkg, version, ud.registry)
|
||||
logger.debug(2, "Found this matching URL: %s" % str(fetchcmd))
|
||||
@@ -259,32 +251,25 @@ class Npm(FetchMethod):
|
||||
lockdown = {}
|
||||
|
||||
if not os.listdir(ud.pkgdatadir) and os.path.exists(ud.fullmirror):
|
||||
dest = d.getVar("DL_DIR")
|
||||
dest = d.getVar("DL_DIR", True)
|
||||
bb.utils.mkdirhier(dest)
|
||||
runfetchcmd("tar -xJf %s" % (ud.fullmirror), d, workdir=dest)
|
||||
return
|
||||
|
||||
if ud.parm.get("noverify", None) != '1':
|
||||
shwrf = d.getVar('NPM_SHRINKWRAP')
|
||||
logger.debug(2, "NPM shrinkwrap file is %s" % shwrf)
|
||||
if shwrf:
|
||||
try:
|
||||
with open(shwrf) as datafile:
|
||||
shrinkobj = json.load(datafile)
|
||||
except Exception as e:
|
||||
raise FetchError('Error loading NPM_SHRINKWRAP file "%s" for %s: %s' % (shwrf, ud.pkgname, str(e)))
|
||||
elif not ud.ignore_checksums:
|
||||
logger.warning('Missing shrinkwrap file in NPM_SHRINKWRAP for %s, this will lead to unreliable builds!' % ud.pkgname)
|
||||
lckdf = d.getVar('NPM_LOCKDOWN')
|
||||
logger.debug(2, "NPM lockdown file is %s" % lckdf)
|
||||
if lckdf:
|
||||
try:
|
||||
with open(lckdf) as datafile:
|
||||
lockdown = json.load(datafile)
|
||||
except Exception as e:
|
||||
raise FetchError('Error loading NPM_LOCKDOWN file "%s" for %s: %s' % (lckdf, ud.pkgname, str(e)))
|
||||
elif not ud.ignore_checksums:
|
||||
logger.warning('Missing lockdown file in NPM_LOCKDOWN for %s, this will lead to unreproducible builds!' % ud.pkgname)
|
||||
shwrf = d.getVar('NPM_SHRINKWRAP', True)
|
||||
logger.debug(2, "NPM shrinkwrap file is %s" % shwrf)
|
||||
try:
|
||||
with open(shwrf) as datafile:
|
||||
shrinkobj = json.load(datafile)
|
||||
except:
|
||||
logger.warning('Missing shrinkwrap file in NPM_SHRINKWRAP for %s, this will lead to unreliable builds!' % ud.pkgname)
|
||||
lckdf = d.getVar('NPM_LOCKDOWN', True)
|
||||
logger.debug(2, "NPM lockdown file is %s" % lckdf)
|
||||
try:
|
||||
with open(lckdf) as datafile:
|
||||
lockdown = json.load(datafile)
|
||||
except:
|
||||
logger.warning('Missing lockdown file in NPM_LOCKDOWN for %s, this will lead to unreproducible builds!' % ud.pkgname)
|
||||
|
||||
if ('name' not in shrinkobj):
|
||||
self._getdependencies(ud.pkgname, jsondepobj, ud.version, d, ud)
|
||||
@@ -301,7 +286,7 @@ class Npm(FetchMethod):
|
||||
if os.path.islink(ud.fullmirror):
|
||||
os.unlink(ud.fullmirror)
|
||||
|
||||
dldir = d.getVar("DL_DIR")
|
||||
dldir = d.getVar("DL_DIR", True)
|
||||
logger.info("Creating tarball of npm data")
|
||||
runfetchcmd("tar -cJf %s npm/%s npm/%s" % (ud.fullmirror, ud.bbnpmmanifest, ud.pkgname), d,
|
||||
workdir=dldir)
|
||||
|
||||
@@ -10,6 +10,7 @@ import os
|
||||
import sys
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import MissingParameterError
|
||||
@@ -32,9 +33,8 @@ class Osc(FetchMethod):
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
# Create paths to osc checkouts
|
||||
oscdir = d.getVar("OSCDIR") or (d.getVar("DL_DIR") + "/osc")
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(oscdir, ud.host)
|
||||
ud.pkgdir = os.path.join(d.getVar('OSCDIR', True), ud.host)
|
||||
ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module)
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
@@ -47,7 +47,7 @@ class Osc(FetchMethod):
|
||||
else:
|
||||
ud.revision = ""
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision))
|
||||
ud.localfile = data.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision), d)
|
||||
|
||||
def _buildosccommand(self, ud, d, command):
|
||||
"""
|
||||
@@ -55,7 +55,7 @@ class Osc(FetchMethod):
|
||||
command is "fetch", "update", "info"
|
||||
"""
|
||||
|
||||
basecmd = d.getVar("FETCHCMD_osc") or "/usr/bin/env osc"
|
||||
basecmd = data.expand('${FETCHCMD_osc}', d)
|
||||
|
||||
proto = ud.parm.get('protocol', 'ocs')
|
||||
|
||||
@@ -84,7 +84,7 @@ class Osc(FetchMethod):
|
||||
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
if os.access(os.path.join(d.getVar('OSCDIR'), ud.path, ud.module), os.R_OK):
|
||||
if os.access(os.path.join(d.getVar('OSCDIR', True), ud.path, ud.module), os.R_OK):
|
||||
oscupdatecmd = self._buildosccommand(ud, d, "update")
|
||||
logger.info("Update "+ ud.url)
|
||||
# update sources there
|
||||
@@ -112,7 +112,7 @@ class Osc(FetchMethod):
|
||||
Generate a .oscrc to be used for this run.
|
||||
"""
|
||||
|
||||
config_path = os.path.join(d.getVar('OSCDIR'), "oscrc")
|
||||
config_path = os.path.join(d.getVar('OSCDIR', True), "oscrc")
|
||||
if (os.path.exists(config_path)):
|
||||
os.remove(config_path)
|
||||
|
||||
@@ -121,8 +121,8 @@ class Osc(FetchMethod):
|
||||
f.write("apisrv = %s\n" % ud.host)
|
||||
f.write("scheme = http\n")
|
||||
f.write("su-wrapper = su -c\n")
|
||||
f.write("build-root = %s\n" % d.getVar('WORKDIR'))
|
||||
f.write("urllist = %s\n" % d.getVar("OSCURLLIST"))
|
||||
f.write("build-root = %s\n" % d.getVar('WORKDIR', True))
|
||||
f.write("urllist = %s\n" % d.getVar("OSCURLLIST", True))
|
||||
f.write("extra-pkgs = gzip\n")
|
||||
f.write("\n")
|
||||
f.write("[%s]\n" % ud.host)
|
||||
|
||||
@@ -26,6 +26,7 @@ BitBake 'Fetch' implementation for perforce
|
||||
import os
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
@@ -43,9 +44,13 @@ class Perforce(FetchMethod):
|
||||
provided by the env, use it. If P4PORT is specified by the recipe, use
|
||||
its values, which may override the settings in P4CONFIG.
|
||||
"""
|
||||
ud.basecmd = d.getVar("FETCHCMD_p4") or "/usr/bin/env p4"
|
||||
ud.basecmd = d.getVar('FETCHCMD_p4', True)
|
||||
if not ud.basecmd:
|
||||
ud.basecmd = "/usr/bin/env p4"
|
||||
|
||||
ud.dldir = d.getVar("P4DIR") or (d.getVar("DL_DIR") + "/p4")
|
||||
ud.dldir = d.getVar('P4DIR', True)
|
||||
if not ud.dldir:
|
||||
ud.dldir = '%s/%s' % (d.getVar('DL_DIR', True), 'p4')
|
||||
|
||||
path = ud.url.split('://')[1]
|
||||
path = path.split(';')[0]
|
||||
@@ -57,7 +62,7 @@ class Perforce(FetchMethod):
|
||||
ud.path = path
|
||||
|
||||
ud.usingp4config = False
|
||||
p4port = d.getVar('P4PORT')
|
||||
p4port = d.getVar('P4PORT', True)
|
||||
|
||||
if p4port:
|
||||
logger.debug(1, 'Using recipe provided P4PORT: %s' % p4port)
|
||||
@@ -66,7 +71,7 @@ class Perforce(FetchMethod):
|
||||
logger.debug(1, 'Trying to use P4CONFIG to automatically set P4PORT...')
|
||||
ud.usingp4config = True
|
||||
p4cmd = '%s info | grep "Server address"' % ud.basecmd
|
||||
bb.fetch2.check_network_access(d, p4cmd, ud.url)
|
||||
bb.fetch2.check_network_access(d, p4cmd)
|
||||
ud.host = runfetchcmd(p4cmd, d, True)
|
||||
ud.host = ud.host.split(': ')[1].strip()
|
||||
logger.debug(1, 'Determined P4PORT to be: %s' % ud.host)
|
||||
@@ -82,9 +87,9 @@ class Perforce(FetchMethod):
|
||||
cleanedhost = ud.host.replace(':', '.')
|
||||
ud.pkgdir = os.path.join(ud.dldir, cleanedhost, cleanedpath)
|
||||
|
||||
ud.setup_revisions(d)
|
||||
ud.setup_revisons(d)
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s.tar.gz' % (cleanedhost, cleanedpath, ud.revision))
|
||||
ud.localfile = data.expand('%s_%s_%s.tar.gz' % (cleanedhost, cleanedpath, ud.revision), d)
|
||||
|
||||
def _buildp4command(self, ud, d, command, depot_filename=None):
|
||||
"""
|
||||
@@ -135,7 +140,7 @@ class Perforce(FetchMethod):
|
||||
'p4 files' command, including trailing '#rev' file revision indicator
|
||||
"""
|
||||
p4cmd = self._buildp4command(ud, d, 'files')
|
||||
bb.fetch2.check_network_access(d, p4cmd, ud.url)
|
||||
bb.fetch2.check_network_access(d, p4cmd)
|
||||
p4fileslist = runfetchcmd(p4cmd, d, True)
|
||||
p4fileslist = [f.rstrip() for f in p4fileslist.splitlines()]
|
||||
|
||||
@@ -166,7 +171,7 @@ class Perforce(FetchMethod):
|
||||
|
||||
for afile in filelist:
|
||||
p4fetchcmd = self._buildp4command(ud, d, 'print', afile)
|
||||
bb.fetch2.check_network_access(d, p4fetchcmd, ud.url)
|
||||
bb.fetch2.check_network_access(d, p4fetchcmd)
|
||||
runfetchcmd(p4fetchcmd, d, workdir=ud.pkgdir)
|
||||
|
||||
runfetchcmd('tar -czf %s p4' % (ud.localpath), d, cleanup=[ud.localpath], workdir=ud.pkgdir)
|
||||
@@ -186,7 +191,7 @@ class Perforce(FetchMethod):
|
||||
def _latest_revision(self, ud, d, name):
|
||||
""" Return the latest upstream scm revision number """
|
||||
p4cmd = self._buildp4command(ud, d, "changes")
|
||||
bb.fetch2.check_network_access(d, p4cmd, ud.url)
|
||||
bb.fetch2.check_network_access(d, p4cmd)
|
||||
tip = runfetchcmd(p4cmd, d, True)
|
||||
|
||||
if not tip:
|
||||
|
||||
@@ -25,9 +25,9 @@ BitBake "Fetch" repo (git) implementation
|
||||
|
||||
import os
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
|
||||
class Repo(FetchMethod):
|
||||
"""Class to fetch a module or modules from repo (git) repositories"""
|
||||
@@ -45,25 +45,23 @@ class Repo(FetchMethod):
|
||||
"master".
|
||||
"""
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_repo") or "/usr/bin/env repo"
|
||||
|
||||
ud.proto = ud.parm.get('protocol', 'git')
|
||||
ud.branch = ud.parm.get('branch', 'master')
|
||||
ud.manifest = ud.parm.get('manifest', 'default.xml')
|
||||
if not ud.manifest.endswith('.xml'):
|
||||
ud.manifest += '.xml'
|
||||
|
||||
ud.localfile = d.expand("repo_%s%s_%s_%s.tar.gz" % (ud.host, ud.path.replace("/", "."), ud.manifest, ud.branch))
|
||||
ud.localfile = data.expand("repo_%s%s_%s_%s.tar.gz" % (ud.host, ud.path.replace("/", "."), ud.manifest, ud.branch), d)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
if os.access(os.path.join(d.getVar("DL_DIR"), ud.localfile), os.R_OK):
|
||||
if os.access(os.path.join(data.getVar("DL_DIR", d, True), ud.localfile), os.R_OK):
|
||||
logger.debug(1, "%s already exists (or was stashed). Skipping repo init / sync.", ud.localpath)
|
||||
return
|
||||
|
||||
repodir = d.getVar("REPODIR") or (d.getVar("DL_DIR") + "/repo")
|
||||
gitsrcname = "%s%s" % (ud.host, ud.path.replace("/", "."))
|
||||
repodir = data.getVar("REPODIR", d, True) or os.path.join(data.getVar("DL_DIR", d, True), "repo")
|
||||
codir = os.path.join(repodir, gitsrcname, ud.manifest)
|
||||
|
||||
if ud.user:
|
||||
@@ -74,11 +72,11 @@ class Repo(FetchMethod):
|
||||
repodir = os.path.join(codir, "repo")
|
||||
bb.utils.mkdirhier(repodir)
|
||||
if not os.path.exists(os.path.join(repodir, ".repo")):
|
||||
bb.fetch2.check_network_access(d, "%s init -m %s -b %s -u %s://%s%s%s" % (ud.basecmd, ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), ud.url)
|
||||
runfetchcmd("%s init -m %s -b %s -u %s://%s%s%s" % (ud.basecmd, ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), d, workdir=repodir)
|
||||
bb.fetch2.check_network_access(d, "repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), ud.url)
|
||||
runfetchcmd("repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), d, workdir=repodir)
|
||||
|
||||
bb.fetch2.check_network_access(d, "%s sync %s" % (ud.basecmd, ud.url), ud.url)
|
||||
runfetchcmd("%s sync" % ud.basecmd, d, workdir=repodir)
|
||||
bb.fetch2.check_network_access(d, "repo sync %s" % ud.url, ud.url)
|
||||
runfetchcmd("repo sync", d, workdir=repodir)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
|
||||
@@ -1,98 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'Fetch' implementation for Amazon AWS S3.
|
||||
|
||||
Class for fetching files from Amazon S3 using the AWS Command Line Interface.
|
||||
The aws tool must be correctly installed and configured prior to use.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2017, Andre McCurdy <armccurdy@gmail.com>
|
||||
#
|
||||
# Based in part on bb.fetch2.wget:
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import bb
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
class S3(FetchMethod):
|
||||
"""Class to fetch urls via 'aws s3'"""
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with s3.
|
||||
"""
|
||||
return ud.type in ['s3']
|
||||
|
||||
def recommends_checksum(self, urldata):
|
||||
return True
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
if 'downloadfilename' in ud.parm:
|
||||
ud.basename = ud.parm['downloadfilename']
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_s3") or "/usr/bin/env aws s3"
|
||||
|
||||
def download(self, ud, d):
|
||||
"""
|
||||
Fetch urls
|
||||
Assumes localpath was called first
|
||||
"""
|
||||
|
||||
cmd = '%s cp s3://%s%s %s' % (ud.basecmd, ud.host, ud.path, ud.localpath)
|
||||
bb.fetch2.check_network_access(d, cmd, ud.url)
|
||||
runfetchcmd(cmd, d)
|
||||
|
||||
# Additional sanity checks copied from the wget class (although there
|
||||
# are no known issues which mean these are required, treat the aws cli
|
||||
# tool with a little healthy suspicion).
|
||||
|
||||
if not os.path.exists(ud.localpath):
|
||||
raise FetchError("The aws cp command returned success for s3://%s%s but %s doesn't exist?!" % (ud.host, ud.path, ud.localpath))
|
||||
|
||||
if os.path.getsize(ud.localpath) == 0:
|
||||
os.remove(ud.localpath)
|
||||
raise FetchError("The aws cp command for s3://%s%s resulted in a zero size file?! Deleting and failing since this isn't right." % (ud.host, ud.path))
|
||||
|
||||
return True
|
||||
|
||||
def checkstatus(self, fetch, ud, d):
|
||||
"""
|
||||
Check the status of a URL
|
||||
"""
|
||||
|
||||
cmd = '%s ls s3://%s%s' % (ud.basecmd, ud.host, ud.path)
|
||||
bb.fetch2.check_network_access(d, cmd, ud.url)
|
||||
output = runfetchcmd(cmd, d)
|
||||
|
||||
# "aws s3 ls s3://mybucket/foo" will exit with success even if the file
|
||||
# is not found, so check output of the command to confirm success.
|
||||
|
||||
if not output:
|
||||
raise FetchError("The aws ls command for s3://%s%s gave empty output" % (ud.host, ud.path))
|
||||
|
||||
return True
|
||||
@@ -62,10 +62,12 @@ SRC_URI = "sftp://user@host.example.com/dir/path.file.txt"
|
||||
import os
|
||||
import bb
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
from bb import data
|
||||
from bb.fetch2 import URI
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
|
||||
class SFTP(FetchMethod):
|
||||
"""Class to fetch urls via 'sftp'"""
|
||||
|
||||
@@ -90,7 +92,7 @@ class SFTP(FetchMethod):
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
ud.localfile = data.expand(urllib.parse.unquote(ud.basename), d)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""Fetch urls"""
|
||||
@@ -102,7 +104,7 @@ class SFTP(FetchMethod):
|
||||
port = '-P %d' % urlo.port
|
||||
urlo.port = None
|
||||
|
||||
dldir = d.getVar('DL_DIR')
|
||||
dldir = data.getVar('DL_DIR', d, True)
|
||||
lpath = os.path.join(dldir, ud.localfile)
|
||||
|
||||
user = ''
|
||||
|
||||
@@ -43,6 +43,7 @@ IETF secsh internet draft:
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import re, os
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
@@ -86,11 +87,11 @@ class SSH(FetchMethod):
|
||||
m = __pattern__.match(urldata.url)
|
||||
path = m.group('path')
|
||||
host = m.group('host')
|
||||
urldata.localpath = os.path.join(d.getVar('DL_DIR'),
|
||||
urldata.localpath = os.path.join(d.getVar('DL_DIR', True),
|
||||
os.path.basename(os.path.normpath(path)))
|
||||
|
||||
def download(self, urldata, d):
|
||||
dldir = d.getVar('DL_DIR')
|
||||
dldir = d.getVar('DL_DIR', True)
|
||||
|
||||
m = __pattern__.match(urldata.url)
|
||||
path = m.group('path')
|
||||
|
||||
@@ -28,6 +28,7 @@ import sys
|
||||
import logging
|
||||
import bb
|
||||
import re
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import MissingParameterError
|
||||
@@ -49,7 +50,7 @@ class Svn(FetchMethod):
|
||||
if not "module" in ud.parm:
|
||||
raise MissingParameterError('module', ud.url)
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_svn") or "/usr/bin/env svn --non-interactive --trust-server-cert"
|
||||
ud.basecmd = d.getVar('FETCHCMD_svn', True)
|
||||
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
@@ -59,20 +60,16 @@ class Svn(FetchMethod):
|
||||
ud.path_spec = ud.parm["path_spec"]
|
||||
|
||||
# Create paths to svn checkouts
|
||||
svndir = d.getVar("SVNDIR") or (d.getVar("DL_DIR") + "/svn")
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(svndir, ud.host, relpath)
|
||||
ud.pkgdir = os.path.join(data.expand('${SVNDIR}', d), ud.host, relpath)
|
||||
ud.moddir = os.path.join(ud.pkgdir, ud.module)
|
||||
# Protects the repository from concurrent updates, e.g. from two
|
||||
# recipes fetching different revisions at the same time
|
||||
ud.svnlock = os.path.join(ud.pkgdir, "svn.lock")
|
||||
|
||||
ud.setup_revisions(d)
|
||||
ud.setup_revisons(d)
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s_%s_.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision))
|
||||
ud.localfile = data.expand('%s_%s_%s_%s_.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision), d)
|
||||
|
||||
def _buildsvncommand(self, ud, d, command):
|
||||
"""
|
||||
@@ -82,9 +79,9 @@ class Svn(FetchMethod):
|
||||
|
||||
proto = ud.parm.get('protocol', 'svn')
|
||||
|
||||
svn_ssh = None
|
||||
if proto == "svn+ssh" and "ssh" in ud.parm:
|
||||
svn_ssh = ud.parm["ssh"]
|
||||
svn_rsh = None
|
||||
if proto == "svn+ssh" and "rsh" in ud.parm:
|
||||
svn_rsh = ud.parm["rsh"]
|
||||
|
||||
svnroot = ud.host + ud.path
|
||||
|
||||
@@ -116,8 +113,8 @@ class Svn(FetchMethod):
|
||||
else:
|
||||
raise FetchError("Invalid svn command %s" % command, ud.url)
|
||||
|
||||
if svn_ssh:
|
||||
svncmd = "SVN_SSH=\"%s\" %s" % (svn_ssh, svncmd)
|
||||
if svn_rsh:
|
||||
svncmd = "svn_RSH=\"%s\" %s" % (svn_rsh, svncmd)
|
||||
|
||||
return svncmd
|
||||
|
||||
@@ -126,40 +123,35 @@ class Svn(FetchMethod):
|
||||
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
lf = bb.utils.lockfile(ud.svnlock)
|
||||
if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK):
|
||||
svnupdatecmd = self._buildsvncommand(ud, d, "update")
|
||||
logger.info("Update " + ud.url)
|
||||
# We need to attempt to run svn upgrade first in case its an older working format
|
||||
try:
|
||||
runfetchcmd(ud.basecmd + " upgrade", d, workdir=ud.moddir)
|
||||
except FetchError:
|
||||
pass
|
||||
logger.debug(1, "Running %s", svnupdatecmd)
|
||||
bb.fetch2.check_network_access(d, svnupdatecmd, ud.url)
|
||||
runfetchcmd(svnupdatecmd, d, workdir=ud.moddir)
|
||||
else:
|
||||
svnfetchcmd = self._buildsvncommand(ud, d, "fetch")
|
||||
logger.info("Fetch " + ud.url)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", svnfetchcmd)
|
||||
bb.fetch2.check_network_access(d, svnfetchcmd, ud.url)
|
||||
runfetchcmd(svnfetchcmd, d, workdir=ud.pkgdir)
|
||||
|
||||
try:
|
||||
if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK):
|
||||
svnupdatecmd = self._buildsvncommand(ud, d, "update")
|
||||
logger.info("Update " + ud.url)
|
||||
# We need to attempt to run svn upgrade first in case its an older working format
|
||||
try:
|
||||
runfetchcmd(ud.basecmd + " upgrade", d, workdir=ud.moddir)
|
||||
except FetchError:
|
||||
pass
|
||||
logger.debug(1, "Running %s", svnupdatecmd)
|
||||
bb.fetch2.check_network_access(d, svnupdatecmd, ud.url)
|
||||
runfetchcmd(svnupdatecmd, d, workdir=ud.moddir)
|
||||
else:
|
||||
svnfetchcmd = self._buildsvncommand(ud, d, "fetch")
|
||||
logger.info("Fetch " + ud.url)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", svnfetchcmd)
|
||||
bb.fetch2.check_network_access(d, svnfetchcmd, ud.url)
|
||||
runfetchcmd(svnfetchcmd, d, workdir=ud.pkgdir)
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude='.svn'"
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude='.svn'"
|
||||
|
||||
# tar them up to a defined filename
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.path_spec), d,
|
||||
cleanup=[ud.localpath], workdir=ud.pkgdir)
|
||||
finally:
|
||||
bb.utils.unlockfile(lf)
|
||||
# tar them up to a defined filename
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.path_spec), d,
|
||||
cleanup=[ud.localpath], workdir=ud.pkgdir)
|
||||
|
||||
def clean(self, ud, d):
|
||||
""" Clean SVN specific files and dirs """
|
||||
@@ -181,7 +173,7 @@ class Svn(FetchMethod):
|
||||
"""
|
||||
Return the latest upstream revision number
|
||||
"""
|
||||
bb.fetch2.check_network_access(d, self._buildsvncommand(ud, d, "log1"), ud.url)
|
||||
bb.fetch2.check_network_access(d, self._buildsvncommand(ud, d, "log1"))
|
||||
|
||||
output = runfetchcmd("LANG=C LC_ALL=C " + self._buildsvncommand(ud, d, "log1"), d, True)
|
||||
|
||||
|
||||
@@ -30,17 +30,14 @@ import tempfile
|
||||
import subprocess
|
||||
import os
|
||||
import logging
|
||||
import errno
|
||||
import bb
|
||||
import bb.progress
|
||||
import socket
|
||||
import http.client
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import FetchConnectionCache
|
||||
from bb.utils import export_proxies
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4 import SoupStrainer
|
||||
@@ -87,19 +84,19 @@ class Wget(FetchMethod):
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
ud.localfile = data.expand(urllib.parse.unquote(ud.basename), d)
|
||||
if not ud.localfile:
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.host + ud.path).replace("/", "."))
|
||||
ud.localfile = data.expand(urllib.parse.unquote(ud.host + ud.path).replace("/", "."), d)
|
||||
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 30 --passive-ftp --no-check-certificate"
|
||||
self.basecmd = d.getVar("FETCHCMD_wget", True) or "/usr/bin/env wget -t 2 -T 30 --passive-ftp --no-check-certificate"
|
||||
|
||||
def _runwget(self, ud, d, command, quiet, workdir=None):
|
||||
def _runwget(self, ud, d, command, quiet):
|
||||
|
||||
progresshandler = WgetProgressHandler(d)
|
||||
|
||||
logger.debug(2, "Fetching %s using command '%s'" % (ud.url, command))
|
||||
bb.fetch2.check_network_access(d, command, ud.url)
|
||||
runfetchcmd(command + ' --progress=dot -v', d, quiet, log=progresshandler, workdir=workdir)
|
||||
bb.fetch2.check_network_access(d, command)
|
||||
runfetchcmd(command + ' --progress=dot -v', d, quiet, log=progresshandler)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""Fetch urls"""
|
||||
@@ -107,7 +104,7 @@ class Wget(FetchMethod):
|
||||
fetchcmd = self.basecmd
|
||||
|
||||
if 'downloadfilename' in ud.parm:
|
||||
dldir = d.getVar("DL_DIR")
|
||||
dldir = d.getVar("DL_DIR", True)
|
||||
bb.utils.mkdirhier(os.path.dirname(dldir + os.sep + ud.localfile))
|
||||
fetchcmd += " -O " + dldir + os.sep + ud.localfile
|
||||
|
||||
@@ -135,6 +132,10 @@ class Wget(FetchMethod):
|
||||
return True
|
||||
|
||||
def checkstatus(self, fetch, ud, d, try_again=True):
|
||||
import urllib.request, urllib.error, urllib.parse, socket, http.client
|
||||
from urllib.response import addinfourl
|
||||
from bb.fetch2 import FetchConnectionCache
|
||||
|
||||
class HTTPConnectionCache(http.client.HTTPConnection):
|
||||
if fetch.connection_cache:
|
||||
def connect(self):
|
||||
@@ -167,7 +168,7 @@ class Wget(FetchMethod):
|
||||
"""
|
||||
host = req.host
|
||||
if not host:
|
||||
raise urllib.error.URLError('no host given')
|
||||
raise urlllib2.URLError('no host given')
|
||||
|
||||
h = http_class(host, timeout=req.timeout) # will parse host:port
|
||||
h.set_debuglevel(self._debuglevel)
|
||||
@@ -184,7 +185,7 @@ class Wget(FetchMethod):
|
||||
# request.
|
||||
|
||||
# Don't close connection when connection_cache is enabled,
|
||||
if fetch.connection_cache is None:
|
||||
if fetch.connection_cache is None:
|
||||
headers["Connection"] = "close"
|
||||
else:
|
||||
headers["Connection"] = "Keep-Alive" # Works for HTTP/1.0
|
||||
@@ -206,21 +207,8 @@ class Wget(FetchMethod):
|
||||
h.request(req.get_method(), req.selector, req.data, headers)
|
||||
except socket.error as err: # XXX what error?
|
||||
# Don't close connection when cache is enabled.
|
||||
# Instead, try to detect connections that are no longer
|
||||
# usable (for example, closed unexpectedly) and remove
|
||||
# them from the cache.
|
||||
if fetch.connection_cache is None:
|
||||
h.close()
|
||||
elif isinstance(err, OSError) and err.errno == errno.EBADF:
|
||||
# This happens when the server closes the connection despite the Keep-Alive.
|
||||
# Apparently urllib then uses the file descriptor, expecting it to be
|
||||
# connected, when in reality the connection is already gone.
|
||||
# We let the request fail and expect it to be
|
||||
# tried once more ("try_again" in check_status()),
|
||||
# with the dead connection removed from the cache.
|
||||
# If it still fails, we give up, which can happend for bad
|
||||
# HTTP proxy settings.
|
||||
fetch.connection_cache.remove_connection(h.host, h.port)
|
||||
raise urllib.error.URLError(err)
|
||||
else:
|
||||
try:
|
||||
@@ -249,9 +237,8 @@ class Wget(FetchMethod):
|
||||
return ""
|
||||
def close(self):
|
||||
pass
|
||||
closed = False
|
||||
|
||||
resp = urllib.response.addinfourl(fp_dummy(), r.msg, req.get_full_url())
|
||||
resp = addinfourl(fp_dummy(), r.msg, req.get_full_url())
|
||||
resp.code = r.status
|
||||
resp.msg = r.reason
|
||||
|
||||
@@ -270,18 +257,24 @@ class Wget(FetchMethod):
|
||||
fp.read()
|
||||
fp.close()
|
||||
|
||||
newheaders = dict((k, v) for k, v in list(req.headers.items())
|
||||
newheaders = dict((k,v) for k,v in list(req.headers.items())
|
||||
if k.lower() not in ("content-length", "content-type"))
|
||||
return self.parent.open(urllib.request.Request(req.get_full_url(),
|
||||
headers=newheaders,
|
||||
origin_req_host=req.origin_req_host,
|
||||
unverifiable=True))
|
||||
|
||||
|
||||
# Some servers (e.g. GitHub archives, hosted on Amazon S3) return 403
|
||||
# Forbidden when they actually mean 405 Method Not Allowed.
|
||||
"""
|
||||
Some servers (e.g. GitHub archives, hosted on Amazon S3) return 403
|
||||
Forbidden when they actually mean 405 Method Not Allowed.
|
||||
"""
|
||||
http_error_403 = http_error_405
|
||||
|
||||
"""
|
||||
Some servers (e.g. FusionForge) returns 406 Not Acceptable when they
|
||||
actually mean 405 Method Not Allowed.
|
||||
"""
|
||||
http_error_406 = http_error_405
|
||||
|
||||
class FixedHTTPRedirectHandler(urllib.request.HTTPRedirectHandler):
|
||||
"""
|
||||
@@ -290,15 +283,15 @@ class Wget(FetchMethod):
|
||||
"""
|
||||
def redirect_request(self, req, fp, code, msg, headers, newurl):
|
||||
newreq = urllib.request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, headers, newurl)
|
||||
newreq.get_method = req.get_method
|
||||
newreq.get_method = lambda: req.get_method()
|
||||
return newreq
|
||||
exported_proxies = export_proxies(d)
|
||||
|
||||
handlers = [FixedHTTPRedirectHandler, HTTPMethodFallback]
|
||||
if exported_proxies:
|
||||
if export_proxies:
|
||||
handlers.append(urllib.request.ProxyHandler())
|
||||
handlers.append(CacheHTTPHandler())
|
||||
# Since Python 2.7.9 ssl cert validation is enabled by default
|
||||
# XXX: Since Python 2.7.9 ssl cert validation is enabled by default
|
||||
# see PEP-0476, this causes verification errors on some https servers
|
||||
# so disable by default.
|
||||
import ssl
|
||||
@@ -310,29 +303,14 @@ class Wget(FetchMethod):
|
||||
uri = ud.url.split(";")[0]
|
||||
r = urllib.request.Request(uri)
|
||||
r.get_method = lambda: "HEAD"
|
||||
# Some servers (FusionForge, as used on Alioth) require that the
|
||||
# optional Accept header is set.
|
||||
r.add_header("Accept", "*/*")
|
||||
def add_basic_auth(login_str, request):
|
||||
'''Adds Basic auth to http request, pass in login:password as string'''
|
||||
|
||||
if ud.user:
|
||||
import base64
|
||||
encodeuser = base64.b64encode(login_str.encode('utf-8')).decode("utf-8")
|
||||
authheader = "Basic %s" % encodeuser
|
||||
encodeuser = base64.b64encode(ud.user.encode('utf-8')).decode("utf-8")
|
||||
authheader = "Basic %s" % encodeuser
|
||||
r.add_header("Authorization", authheader)
|
||||
|
||||
if ud.user and ud.pswd:
|
||||
add_basic_auth(ud.user + ':' + ud.pswd, r)
|
||||
|
||||
try:
|
||||
import netrc
|
||||
n = netrc.netrc()
|
||||
login, unused, password = n.authenticators(urllib.parse.urlparse(uri).hostname)
|
||||
add_basic_auth("%s:%s" % (login, password), r)
|
||||
except (TypeError, ImportError, IOError, netrc.NetrcParseError):
|
||||
pass
|
||||
|
||||
with opener.open(r) as response:
|
||||
pass
|
||||
opener.open(r)
|
||||
except urllib.error.URLError as e:
|
||||
if try_again:
|
||||
logger.debug(2, "checkstatus: trying again")
|
||||
@@ -394,14 +372,18 @@ class Wget(FetchMethod):
|
||||
(oldpn, oldpv, oldsuffix) = old
|
||||
(newpn, newpv, newsuffix) = new
|
||||
|
||||
# Check for a new suffix type that we have never heard of before
|
||||
if newsuffix:
|
||||
"""
|
||||
Check for a new suffix type that we have never heard of before
|
||||
"""
|
||||
if (newsuffix):
|
||||
m = self.suffix_regex_comp.search(newsuffix)
|
||||
if not m:
|
||||
bb.warn("%s has a possible unknown suffix: %s" % (newpn, newsuffix))
|
||||
return False
|
||||
|
||||
# Not our package so ignore it
|
||||
"""
|
||||
Not our package so ignore it
|
||||
"""
|
||||
if oldpn != newpn:
|
||||
return False
|
||||
|
||||
@@ -415,16 +397,17 @@ class Wget(FetchMethod):
|
||||
Run fetch checkstatus to get directory information
|
||||
"""
|
||||
f = tempfile.NamedTemporaryFile()
|
||||
with tempfile.TemporaryDirectory(prefix="wget-index-") as workdir, tempfile.NamedTemporaryFile(dir=workdir, prefix="wget-listing-") as f:
|
||||
agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12"
|
||||
fetchcmd = self.basecmd
|
||||
fetchcmd += " -O " + f.name + " --user-agent='" + agent + "' '" + uri + "'"
|
||||
try:
|
||||
self._runwget(ud, d, fetchcmd, True, workdir=workdir)
|
||||
fetchresult = f.read()
|
||||
except bb.fetch2.BBFetchException:
|
||||
fetchresult = ""
|
||||
|
||||
agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12"
|
||||
fetchcmd = self.basecmd
|
||||
fetchcmd += " -O " + f.name + " --user-agent='" + agent + "' '" + uri + "'"
|
||||
try:
|
||||
self._runwget(ud, d, fetchcmd, True)
|
||||
fetchresult = f.read()
|
||||
except bb.fetch2.BBFetchException:
|
||||
fetchresult = ""
|
||||
|
||||
f.close()
|
||||
return fetchresult
|
||||
|
||||
def _check_latest_version(self, url, package, package_regex, current_version, ud, d):
|
||||
@@ -467,14 +450,15 @@ class Wget(FetchMethod):
|
||||
|
||||
return ""
|
||||
|
||||
def _check_latest_version_by_dir(self, dirver, package, package_regex, current_version, ud, d):
|
||||
def _check_latest_version_by_dir(self, dirver, package, package_regex,
|
||||
current_version, ud, d):
|
||||
"""
|
||||
Scan every directory in order to get upstream version.
|
||||
Scan every directory in order to get upstream version.
|
||||
"""
|
||||
version_dir = ['', '', '']
|
||||
version = ['', '', '']
|
||||
|
||||
dirver_regex = re.compile(r"(?P<pfx>\D*)(?P<ver>(\d+[\.\-_])+(\d+))")
|
||||
dirver_regex = re.compile("(?P<pfx>\D*)(?P<ver>(\d+[\.\-_])+(\d+))")
|
||||
s = dirver_regex.search(dirver)
|
||||
if s:
|
||||
version_dir[1] = s.group('ver')
|
||||
@@ -534,38 +518,38 @@ class Wget(FetchMethod):
|
||||
gst-fluendo-mp3
|
||||
"""
|
||||
# match most patterns which uses "-" as separator to version digits
|
||||
pn_prefix1 = r"[a-zA-Z][a-zA-Z0-9]*([-_][a-zA-Z]\w+)*\+?[-_]"
|
||||
pn_prefix1 = "[a-zA-Z][a-zA-Z0-9]*([-_][a-zA-Z]\w+)*\+?[-_]"
|
||||
# a loose pattern such as for unzip552.tar.gz
|
||||
pn_prefix2 = r"[a-zA-Z]+"
|
||||
pn_prefix2 = "[a-zA-Z]+"
|
||||
# a loose pattern such as for 80325-quicky-0.4.tar.gz
|
||||
pn_prefix3 = r"[0-9]+[-]?[a-zA-Z]+"
|
||||
pn_prefix3 = "[0-9]+[-]?[a-zA-Z]+"
|
||||
# Save the Package Name (pn) Regex for use later
|
||||
pn_regex = r"(%s|%s|%s)" % (pn_prefix1, pn_prefix2, pn_prefix3)
|
||||
pn_regex = "(%s|%s|%s)" % (pn_prefix1, pn_prefix2, pn_prefix3)
|
||||
|
||||
# match version
|
||||
pver_regex = r"(([A-Z]*\d+[a-zA-Z]*[\.\-_]*)+)"
|
||||
pver_regex = "(([A-Z]*\d+[a-zA-Z]*[\.\-_]*)+)"
|
||||
|
||||
# match arch
|
||||
parch_regex = "-source|_all_"
|
||||
|
||||
# src.rpm extension was added only for rpm package. Can be removed if the rpm
|
||||
# packaged will always be considered as having to be manually upgraded
|
||||
psuffix_regex = r"(tar\.gz|tgz|tar\.bz2|zip|xz|tar\.lz|rpm|bz2|orig\.tar\.gz|tar\.xz|src\.tar\.gz|src\.tgz|svnr\d+\.tar\.bz2|stable\.tar\.gz|src\.rpm)"
|
||||
psuffix_regex = "(tar\.gz|tgz|tar\.bz2|zip|xz|rpm|bz2|orig\.tar\.gz|tar\.xz|src\.tar\.gz|src\.tgz|svnr\d+\.tar\.bz2|stable\.tar\.gz|src\.rpm)"
|
||||
|
||||
# match name, version and archive type of a package
|
||||
package_regex_comp = re.compile(r"(?P<name>%s?\.?v?)(?P<pver>%s)(?P<arch>%s)?[\.-](?P<type>%s$)"
|
||||
package_regex_comp = re.compile("(?P<name>%s?\.?v?)(?P<pver>%s)(?P<arch>%s)?[\.-](?P<type>%s$)"
|
||||
% (pn_regex, pver_regex, parch_regex, psuffix_regex))
|
||||
self.suffix_regex_comp = re.compile(psuffix_regex)
|
||||
|
||||
# compile regex, can be specific by package or generic regex
|
||||
pn_regex = d.getVar('UPSTREAM_CHECK_REGEX')
|
||||
pn_regex = d.getVar('UPSTREAM_CHECK_REGEX', True)
|
||||
if pn_regex:
|
||||
package_custom_regex_comp = re.compile(pn_regex)
|
||||
else:
|
||||
version = self._parse_path(package_regex_comp, package)
|
||||
if version:
|
||||
package_custom_regex_comp = re.compile(
|
||||
r"(?P<name>%s)(?P<pver>%s)(?P<arch>%s)?[\.-](?P<type>%s)" %
|
||||
"(?P<name>%s)(?P<pver>%s)(?P<arch>%s)?[\.-](?P<type>%s)" %
|
||||
(re.escape(version[0]), pver_regex, parch_regex, psuffix_regex))
|
||||
else:
|
||||
package_custom_regex_comp = None
|
||||
@@ -579,10 +563,10 @@ class Wget(FetchMethod):
|
||||
sanity check to ensure same name and type.
|
||||
"""
|
||||
package = ud.path.split("/")[-1]
|
||||
current_version = ['', d.getVar('PV'), '']
|
||||
current_version = ['', d.getVar('PV', True), '']
|
||||
|
||||
"""possible to have no version in pkg name, such as spectrum-fw"""
|
||||
if not re.search(r"\d+", package):
|
||||
if not re.search("\d+", package):
|
||||
current_version[1] = re.sub('_', '.', current_version[1])
|
||||
current_version[1] = re.sub('-', '.', current_version[1])
|
||||
return (current_version[1], '')
|
||||
@@ -594,19 +578,19 @@ class Wget(FetchMethod):
|
||||
bb.debug(3, "latest_versionstring, regex: %s" % (package_regex.pattern))
|
||||
|
||||
uri = ""
|
||||
regex_uri = d.getVar("UPSTREAM_CHECK_URI")
|
||||
regex_uri = d.getVar("UPSTREAM_CHECK_URI", True)
|
||||
if not regex_uri:
|
||||
path = ud.path.split(package)[0]
|
||||
|
||||
# search for version matches on folders inside the path, like:
|
||||
# "5.7" in http://download.gnome.org/sources/${PN}/5.7/${PN}-${PV}.tar.gz
|
||||
dirver_regex = re.compile(r"(?P<dirver>[^/]*(\d+\.)*\d+([-_]r\d+)*)/")
|
||||
dirver_regex = re.compile("(?P<dirver>[^/]*(\d+\.)*\d+([-_]r\d+)*)/")
|
||||
m = dirver_regex.search(path)
|
||||
if m:
|
||||
pn = d.getVar('PN')
|
||||
pn = d.getVar('PN', True)
|
||||
dirver = m.group('dirver')
|
||||
|
||||
dirver_pn_regex = re.compile(r"%s\d?" % (re.escape(pn)))
|
||||
dirver_pn_regex = re.compile("%s\d?" % (re.escape(pn)))
|
||||
if not dirver_pn_regex.search(dirver):
|
||||
return (self._check_latest_version_by_dir(dirver,
|
||||
package, package_regex, current_version, ud, d), '')
|
||||
|
||||
@@ -28,8 +28,6 @@ import logging
|
||||
import optparse
|
||||
import warnings
|
||||
import fcntl
|
||||
import time
|
||||
import traceback
|
||||
|
||||
import bb
|
||||
from bb import event
|
||||
@@ -39,17 +37,11 @@ from bb import ui
|
||||
from bb import server
|
||||
from bb import cookerdata
|
||||
|
||||
import bb.server.process
|
||||
import bb.server.xmlrpcclient
|
||||
|
||||
logger = logging.getLogger("BitBake")
|
||||
|
||||
class BBMainException(Exception):
|
||||
pass
|
||||
|
||||
class BBMainFatal(bb.BBHandledException):
|
||||
pass
|
||||
|
||||
def present_options(optionlist):
|
||||
if len(optionlist) > 1:
|
||||
return ' or '.join([', '.join(optionlist[:-1]), optionlist[-1]])
|
||||
@@ -66,6 +58,9 @@ class BitbakeHelpFormatter(optparse.IndentedHelpFormatter):
|
||||
if option.dest == 'ui':
|
||||
valid_uis = list_extension_modules(bb.ui, 'main')
|
||||
option.help = option.help.replace('@CHOICES@', present_options(valid_uis))
|
||||
elif option.dest == 'servertype':
|
||||
valid_server_types = list_extension_modules(bb.server, 'BitBakeServer')
|
||||
option.help = option.help.replace('@CHOICES@', present_options(valid_server_types))
|
||||
|
||||
return optparse.IndentedHelpFormatter.format_option(self, option)
|
||||
|
||||
@@ -153,6 +148,11 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
"failed and anything depending on it cannot be built, as much as "
|
||||
"possible will be built before stopping.")
|
||||
|
||||
parser.add_option("-a", "--tryaltconfigs", action="store_true",
|
||||
dest="tryaltconfigs", default=False,
|
||||
help="Continue with builds by trying to use alternative providers "
|
||||
"where possible.")
|
||||
|
||||
parser.add_option("-f", "--force", action="store_true", dest="force", default=False,
|
||||
help="Force the specified targets/task to run (invalidating any "
|
||||
"existing stamp file).")
|
||||
@@ -174,24 +174,13 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
help="Read the specified file after bitbake.conf.")
|
||||
|
||||
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
|
||||
help="Enable tracing of shell tasks (with 'set -x'). "
|
||||
"Also print bb.note(...) messages to stdout (in "
|
||||
"addition to writing them to ${T}/log.do_<task>).")
|
||||
help="Output more log message data to the terminal.")
|
||||
|
||||
parser.add_option("-D", "--debug", action="count", dest="debug", default=0,
|
||||
help="Increase the debug level. You can specify this "
|
||||
"more than once. -D sets the debug level to 1, "
|
||||
"where only bb.debug(1, ...) messages are printed "
|
||||
"to stdout; -DD sets the debug level to 2, where "
|
||||
"both bb.debug(1, ...) and bb.debug(2, ...) "
|
||||
"messages are printed; etc. Without -D, no debug "
|
||||
"messages are printed. Note that -D only affects "
|
||||
"output to stdout. All debug messages are written "
|
||||
"to ${T}/log.do_taskname, regardless of the debug "
|
||||
"level.")
|
||||
help="Increase the debug level. You can specify this more than once.")
|
||||
|
||||
parser.add_option("-q", "--quiet", action="count", dest="quiet", default=0,
|
||||
help="Output less log message data to the terminal. You can specify this more than once.")
|
||||
parser.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False,
|
||||
help="Output less log message data to the terminal.")
|
||||
|
||||
parser.add_option("-n", "--dry-run", action="store_true", dest="dry_run", default=False,
|
||||
help="Don't execute, just go through the motions.")
|
||||
@@ -238,6 +227,11 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
default=os.environ.get('BITBAKE_UI', 'knotty'),
|
||||
help="The user interface to use (@CHOICES@ - default %default).")
|
||||
|
||||
# @CHOICES@ is substituted out by BitbakeHelpFormatter above
|
||||
parser.add_option("-t", "--servertype", action="store", dest="servertype",
|
||||
default=["process", "xmlrpc"]["BBSERVER" in os.environ],
|
||||
help="Choose which server type to use (@CHOICES@ - default %default).")
|
||||
|
||||
parser.add_option("", "--token", action="store", dest="xmlrpctoken",
|
||||
default=os.environ.get("BBTOKEN"),
|
||||
help="Specify the connection token to be used when connecting "
|
||||
@@ -253,14 +247,15 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
help="Run bitbake without a UI, only starting a server "
|
||||
"(cooker) process.")
|
||||
|
||||
parser.add_option("-B", "--bind", action="store", dest="bind", default=False,
|
||||
help="The name/address for the bitbake xmlrpc server to bind to.")
|
||||
parser.add_option("", "--foreground", action="store_true",
|
||||
help="Run bitbake server in foreground.")
|
||||
|
||||
parser.add_option("-T", "--idle-timeout", type=float, dest="server_timeout",
|
||||
default=os.getenv("BB_SERVER_TIMEOUT"),
|
||||
help="Set timeout to unload bitbake server due to inactivity, "
|
||||
"set to -1 means no unload, "
|
||||
"default: Environment variable BB_SERVER_TIMEOUT.")
|
||||
parser.add_option("-B", "--bind", action="store", dest="bind", default=False,
|
||||
help="The name/address for the bitbake server to bind to.")
|
||||
|
||||
parser.add_option("-T", "--idle-timeout", type=int,
|
||||
default=int(os.environ.get("BBTIMEOUT", "0")),
|
||||
help="Set timeout to unload bitbake server due to inactivity")
|
||||
|
||||
parser.add_option("", "--no-setscene", action="store_true",
|
||||
dest="nosetscene", default=False,
|
||||
@@ -277,7 +272,7 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
|
||||
parser.add_option("-m", "--kill-server", action="store_true",
|
||||
dest="kill_server", default=False,
|
||||
help="Terminate any running bitbake server.")
|
||||
help="Terminate the remote server.")
|
||||
|
||||
parser.add_option("", "--observe-only", action="store_true",
|
||||
dest="observe_only", default=False,
|
||||
@@ -292,13 +287,6 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
help="Writes the event log of the build to a bitbake event json file. "
|
||||
"Use '' (empty string) to assign the name automatically.")
|
||||
|
||||
parser.add_option("", "--runall", action="append", dest="runall",
|
||||
help="Run the specified task for any recipe in the taskgraph of the specified target (even if it wouldn't otherwise have run).")
|
||||
|
||||
parser.add_option("", "--runonly", action="append", dest="runonly",
|
||||
help="Run only the specified task within the taskgraph of the specified targets (and any task dependencies those tasks may have).")
|
||||
|
||||
|
||||
options, targets = parser.parse_args(argv)
|
||||
|
||||
if options.quiet and options.verbose:
|
||||
@@ -320,20 +308,69 @@ class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
eventlog = "bitbake_eventlog_%s.json" % datetime.now().strftime("%Y%m%d%H%M%S")
|
||||
options.writeeventlog = eventlog
|
||||
|
||||
if options.bind:
|
||||
try:
|
||||
#Checking that the port is a number and is a ':' delimited value
|
||||
(host, port) = options.bind.split(':')
|
||||
# if BBSERVER says to autodetect, let's do that
|
||||
if options.remote_server:
|
||||
port = -1
|
||||
if options.remote_server != 'autostart':
|
||||
host, port = options.remote_server.split(":", 2)
|
||||
port = int(port)
|
||||
except (ValueError,IndexError):
|
||||
raise BBMainException("FATAL: Malformed host:port bind parameter")
|
||||
options.xmlrpcinterface = (host, port)
|
||||
else:
|
||||
options.xmlrpcinterface = (None, 0)
|
||||
# use automatic port if port set to -1, means read it from
|
||||
# the bitbake.lock file; this is a bit tricky, but we always expect
|
||||
# to be in the base of the build directory if we need to have a
|
||||
# chance to start the server later, anyway
|
||||
if port == -1:
|
||||
lock_location = "./bitbake.lock"
|
||||
# we try to read the address at all times; if the server is not started,
|
||||
# we'll try to start it after the first connect fails, below
|
||||
try:
|
||||
lf = open(lock_location, 'r')
|
||||
remotedef = lf.readline()
|
||||
[host, port] = remotedef.split(":")
|
||||
port = int(port)
|
||||
lf.close()
|
||||
options.remote_server = remotedef
|
||||
except Exception as e:
|
||||
if options.remote_server != 'autostart':
|
||||
raise BBMainException("Failed to read bitbake.lock (%s), invalid port" % str(e))
|
||||
|
||||
return options, targets[1:]
|
||||
|
||||
|
||||
def start_server(servermodule, configParams, configuration, features):
|
||||
server = servermodule.BitBakeServer()
|
||||
single_use = not configParams.server_only and os.getenv('BBSERVER') != 'autostart'
|
||||
if configParams.bind:
|
||||
(host, port) = configParams.bind.split(':')
|
||||
server.initServer((host, int(port)), single_use=single_use,
|
||||
idle_timeout=configParams.idle_timeout)
|
||||
configuration.interface = [server.serverImpl.host, server.serverImpl.port]
|
||||
else:
|
||||
server.initServer(single_use=single_use)
|
||||
configuration.interface = []
|
||||
|
||||
try:
|
||||
configuration.setServerRegIdleCallback(server.getServerIdleCB())
|
||||
|
||||
cooker = bb.cooker.BBCooker(configuration, features)
|
||||
|
||||
server.addcooker(cooker)
|
||||
server.saveConnectionDetails()
|
||||
except Exception as e:
|
||||
while hasattr(server, "event_queue"):
|
||||
import queue
|
||||
try:
|
||||
event = server.event_queue.get(block=False)
|
||||
except (queue.Empty, IOError):
|
||||
break
|
||||
if isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
raise
|
||||
if not configParams.foreground:
|
||||
server.detach()
|
||||
cooker.lock.close()
|
||||
return server
|
||||
|
||||
|
||||
def bitbake_main(configParams, configuration):
|
||||
|
||||
# Python multiprocessing requires /dev/shm on Linux
|
||||
@@ -352,17 +389,51 @@ def bitbake_main(configParams, configuration):
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
configuration.setConfigParameters(configParams)
|
||||
|
||||
if configParams.server_only and configParams.remote_server:
|
||||
ui_module = import_extension_module(bb.ui, configParams.ui, 'main')
|
||||
servermodule = import_extension_module(bb.server, configParams.servertype, 'BitBakeServer')
|
||||
|
||||
if configParams.server_only:
|
||||
if configParams.servertype != "xmlrpc":
|
||||
raise BBMainException("FATAL: If '--server-only' is defined, we must set the "
|
||||
"servertype as 'xmlrpc'.\n")
|
||||
if not configParams.bind:
|
||||
raise BBMainException("FATAL: The '--server-only' option requires a name/address "
|
||||
"to bind to with the -B option.\n")
|
||||
else:
|
||||
try:
|
||||
#Checking that the port is a number
|
||||
int(configParams.bind.split(":")[1])
|
||||
except (ValueError,IndexError):
|
||||
raise BBMainException(
|
||||
"FATAL: Malformed host:port bind parameter")
|
||||
if configParams.remote_server:
|
||||
raise BBMainException("FATAL: The '--server-only' option conflicts with %s.\n" %
|
||||
("the BBSERVER environment variable" if "BBSERVER" in os.environ \
|
||||
else "the '--remote-server' option"))
|
||||
|
||||
if configParams.observe_only and not (configParams.remote_server or configParams.bind):
|
||||
elif configParams.foreground:
|
||||
raise BBMainException("FATAL: The '--foreground' option can only be used "
|
||||
"with --server-only.\n")
|
||||
|
||||
if configParams.bind and configParams.servertype != "xmlrpc":
|
||||
raise BBMainException("FATAL: If '-B' or '--bind' is defined, we must "
|
||||
"set the servertype as 'xmlrpc'.\n")
|
||||
|
||||
if configParams.remote_server and configParams.servertype != "xmlrpc":
|
||||
raise BBMainException("FATAL: If '--remote-server' is defined, we must "
|
||||
"set the servertype as 'xmlrpc'.\n")
|
||||
|
||||
if configParams.observe_only and (not configParams.remote_server or configParams.bind):
|
||||
raise BBMainException("FATAL: '--observe-only' can only be used by UI clients "
|
||||
"connecting to a server.\n")
|
||||
|
||||
if configParams.kill_server and not configParams.remote_server:
|
||||
raise BBMainException("FATAL: '--kill-server' can only be used to "
|
||||
"terminate a remote server")
|
||||
|
||||
if "BBDEBUG" in os.environ:
|
||||
level = int(os.environ["BBDEBUG"])
|
||||
if level > configuration.debug:
|
||||
@@ -371,140 +442,81 @@ def bitbake_main(configParams, configuration):
|
||||
bb.msg.init_msgconfig(configParams.verbose, configuration.debug,
|
||||
configuration.debug_domains)
|
||||
|
||||
server_connection, ui_module = setup_bitbake(configParams, configuration)
|
||||
# No server connection
|
||||
if server_connection is None:
|
||||
if configParams.status_only:
|
||||
return 1
|
||||
if configParams.kill_server:
|
||||
return 0
|
||||
|
||||
if not configParams.server_only:
|
||||
if configParams.status_only:
|
||||
server_connection.terminate()
|
||||
return 0
|
||||
|
||||
try:
|
||||
for event in bb.event.ui_queue:
|
||||
server_connection.events.queue_event(event)
|
||||
bb.event.ui_queue = []
|
||||
|
||||
return ui_module.main(server_connection.connection, server_connection.events,
|
||||
configParams)
|
||||
finally:
|
||||
server_connection.terminate()
|
||||
else:
|
||||
return 0
|
||||
|
||||
return 1
|
||||
|
||||
def setup_bitbake(configParams, configuration, extrafeatures=None):
|
||||
# Ensure logging messages get sent to the UI as events
|
||||
handler = bb.event.LogHandler()
|
||||
if not configParams.status_only:
|
||||
# In status only mode there are no logs and no UI
|
||||
logger.addHandler(handler)
|
||||
|
||||
if configParams.server_only:
|
||||
featureset = []
|
||||
ui_module = None
|
||||
else:
|
||||
ui_module = import_extension_module(bb.ui, configParams.ui, 'main')
|
||||
# Clear away any spurious environment variables while we stoke up the cooker
|
||||
cleanedvars = bb.utils.clean_environment()
|
||||
|
||||
featureset = []
|
||||
if not configParams.server_only:
|
||||
# Collect the feature set for the UI
|
||||
featureset = getattr(ui_module, "featureSet", [])
|
||||
|
||||
if extrafeatures:
|
||||
for feature in extrafeatures:
|
||||
if not feature in featureset:
|
||||
featureset.append(feature)
|
||||
if configParams.server_only:
|
||||
for param in ('prefile', 'postfile'):
|
||||
value = getattr(configParams, param)
|
||||
if value:
|
||||
setattr(configuration, "%s_server" % param, value)
|
||||
param = "%s_server" % param
|
||||
|
||||
server_connection = None
|
||||
|
||||
# Clear away any spurious environment variables while we stoke up the cooker
|
||||
# (done after import_extension_module() above since for example import gi triggers env var usage)
|
||||
cleanedvars = bb.utils.clean_environment()
|
||||
|
||||
if configParams.remote_server:
|
||||
# Connect to a remote XMLRPC server
|
||||
server_connection = bb.server.xmlrpcclient.connectXMLRPC(configParams.remote_server, featureset,
|
||||
configParams.observe_only, configParams.xmlrpctoken)
|
||||
else:
|
||||
retries = 8
|
||||
while retries:
|
||||
try:
|
||||
topdir, lock = lockBitbake()
|
||||
sockname = topdir + "/bitbake.sock"
|
||||
if lock:
|
||||
if configParams.status_only or configParams.kill_server:
|
||||
logger.info("bitbake server is not running.")
|
||||
lock.close()
|
||||
return None, None
|
||||
# we start a server with a given configuration
|
||||
logger.info("Starting bitbake server...")
|
||||
# Clear the event queue since we already displayed messages
|
||||
bb.event.ui_queue = []
|
||||
server = bb.server.process.BitBakeServer(lock, sockname, configuration, featureset)
|
||||
|
||||
else:
|
||||
logger.info("Reconnecting to bitbake server...")
|
||||
if not os.path.exists(sockname):
|
||||
logger.info("Previous bitbake instance shutting down?, waiting to retry...")
|
||||
i = 0
|
||||
lock = None
|
||||
# Wait for 5s or until we can get the lock
|
||||
while not lock and i < 50:
|
||||
time.sleep(0.1)
|
||||
_, lock = lockBitbake()
|
||||
i += 1
|
||||
if lock:
|
||||
bb.utils.unlockfile(lock)
|
||||
raise bb.server.process.ProcessTimeout("Bitbake still shutting down as socket exists but no lock?")
|
||||
if not configParams.server_only:
|
||||
try:
|
||||
server_connection = bb.server.process.connectProcessServer(sockname, featureset)
|
||||
except EOFError:
|
||||
# The server may have been shutting down but not closed the socket yet. If that happened,
|
||||
# ignore it.
|
||||
pass
|
||||
|
||||
if server_connection or configParams.server_only:
|
||||
break
|
||||
except BBMainFatal:
|
||||
raise
|
||||
except (Exception, bb.server.process.ProcessTimeout) as e:
|
||||
if not retries:
|
||||
raise
|
||||
retries -= 1
|
||||
tryno = 8 - retries
|
||||
if isinstance(e, (bb.server.process.ProcessTimeout, BrokenPipeError)):
|
||||
logger.info("Retrying server connection (#%d)..." % tryno)
|
||||
else:
|
||||
logger.info("Retrying server connection (#%d)... (%s)" % (tryno, traceback.format_exc()))
|
||||
if not retries:
|
||||
bb.fatal("Unable to connect to bitbake server, or start one")
|
||||
if retries < 5:
|
||||
time.sleep(5)
|
||||
|
||||
if configParams.kill_server:
|
||||
server_connection.connection.terminateServer()
|
||||
server_connection.terminate()
|
||||
if not configParams.remote_server:
|
||||
# we start a server with a given configuration
|
||||
server = start_server(servermodule, configParams, configuration, featureset)
|
||||
bb.event.ui_queue = []
|
||||
logger.info("Terminated bitbake server.")
|
||||
return None, None
|
||||
else:
|
||||
if os.getenv('BBSERVER') == 'autostart':
|
||||
if configParams.remote_server == 'autostart' or \
|
||||
not servermodule.check_connection(configParams.remote_server, timeout=2):
|
||||
configParams.bind = 'localhost:0'
|
||||
srv = start_server(servermodule, configParams, configuration, featureset)
|
||||
configParams.remote_server = '%s:%d' % tuple(configuration.interface)
|
||||
bb.event.ui_queue = []
|
||||
|
||||
# Restore the environment in case the UI needs it
|
||||
for k in cleanedvars:
|
||||
os.environ[k] = cleanedvars[k]
|
||||
# we start a stub server that is actually a XMLRPClient that connects to a real server
|
||||
server = servermodule.BitBakeXMLRPCClient(configParams.observe_only,
|
||||
configParams.xmlrpctoken)
|
||||
server.saveConnectionDetails(configParams.remote_server)
|
||||
|
||||
logger.removeHandler(handler)
|
||||
|
||||
return server_connection, ui_module
|
||||
if not configParams.server_only:
|
||||
try:
|
||||
server_connection = server.establishConnection(featureset)
|
||||
except Exception as e:
|
||||
bb.fatal("Could not connect to server %s: %s" % (configParams.remote_server, str(e)))
|
||||
|
||||
def lockBitbake():
|
||||
topdir = bb.cookerdata.findTopdir()
|
||||
if not topdir:
|
||||
bb.error("Unable to find conf/bblayers.conf or conf/bitbake.conf. BBAPTH is unset and/or not in a build directory?")
|
||||
raise BBMainFatal
|
||||
lockfile = topdir + "/bitbake.lock"
|
||||
return topdir, bb.utils.lockfile(lockfile, False, False)
|
||||
if configParams.kill_server:
|
||||
server_connection.connection.terminateServer()
|
||||
bb.event.ui_queue = []
|
||||
return 0
|
||||
|
||||
server_connection.setupEventQueue()
|
||||
|
||||
# Restore the environment in case the UI needs it
|
||||
for k in cleanedvars:
|
||||
os.environ[k] = cleanedvars[k]
|
||||
|
||||
logger.removeHandler(handler)
|
||||
|
||||
|
||||
if configParams.status_only:
|
||||
server_connection.terminate()
|
||||
return 0
|
||||
|
||||
try:
|
||||
return ui_module.main(server_connection.connection, server_connection.events,
|
||||
configParams)
|
||||
finally:
|
||||
bb.event.ui_queue = []
|
||||
server_connection.terminate()
|
||||
else:
|
||||
print("Bitbake server address: %s, server port: %s" % (server.serverImpl.host,
|
||||
server.serverImpl.port))
|
||||
if configParams.foreground:
|
||||
server.serverImpl.serve_forever()
|
||||
return 0
|
||||
|
||||
return 1
|
||||
|
||||
@@ -28,16 +28,16 @@ def convertGMK(unit):
|
||||
|
||||
""" Convert the space unit G, M, K, the unit is case-insensitive """
|
||||
|
||||
unitG = re.match(r'([1-9][0-9]*)[gG]\s?$', unit)
|
||||
unitG = re.match('([1-9][0-9]*)[gG]\s?$', unit)
|
||||
if unitG:
|
||||
return int(unitG.group(1)) * (1024 ** 3)
|
||||
unitM = re.match(r'([1-9][0-9]*)[mM]\s?$', unit)
|
||||
unitM = re.match('([1-9][0-9]*)[mM]\s?$', unit)
|
||||
if unitM:
|
||||
return int(unitM.group(1)) * (1024 ** 2)
|
||||
unitK = re.match(r'([1-9][0-9]*)[kK]\s?$', unit)
|
||||
unitK = re.match('([1-9][0-9]*)[kK]\s?$', unit)
|
||||
if unitK:
|
||||
return int(unitK.group(1)) * 1024
|
||||
unitN = re.match(r'([1-9][0-9]*)\s?$', unit)
|
||||
unitN = re.match('([1-9][0-9]*)\s?$', unit)
|
||||
if unitN:
|
||||
return int(unitN.group(1))
|
||||
else:
|
||||
@@ -83,7 +83,7 @@ def getDiskData(BBDirs, configuration):
|
||||
for pathSpaceInode in BBDirs.split():
|
||||
# The input format is: "dir,space,inode", dir is a must, space
|
||||
# and inode are optional
|
||||
pathSpaceInodeRe = re.match(r'([^,]*),([^,]*),([^,]*),?(.*)', pathSpaceInode)
|
||||
pathSpaceInodeRe = re.match('([^,]*),([^,]*),([^,]*),?(.*)', pathSpaceInode)
|
||||
if not pathSpaceInodeRe:
|
||||
printErr("Invalid value in BB_DISKMON_DIRS: %s" % pathSpaceInode)
|
||||
return None
|
||||
@@ -129,7 +129,7 @@ def getDiskData(BBDirs, configuration):
|
||||
bb.utils.mkdirhier(path)
|
||||
dev = getMountedDev(path)
|
||||
# Use path/action as the key
|
||||
devDict[(path, action)] = [dev, minSpace, minInode]
|
||||
devDict[os.path.join(path, action)] = [dev, minSpace, minInode]
|
||||
|
||||
return devDict
|
||||
|
||||
@@ -141,13 +141,13 @@ def getInterval(configuration):
|
||||
spaceDefault = 50 * 1024 * 1024
|
||||
inodeDefault = 5 * 1024
|
||||
|
||||
interval = configuration.getVar("BB_DISKMON_WARNINTERVAL")
|
||||
interval = configuration.getVar("BB_DISKMON_WARNINTERVAL", True)
|
||||
if not interval:
|
||||
return spaceDefault, inodeDefault
|
||||
else:
|
||||
# The disk space or inode interval is optional, but it should
|
||||
# have a correct value once it is specified
|
||||
intervalRe = re.match(r'([^,]*),?\s*(.*)', interval)
|
||||
intervalRe = re.match('([^,]*),?\s*(.*)', interval)
|
||||
if intervalRe:
|
||||
intervalSpace = intervalRe.group(1)
|
||||
if intervalSpace:
|
||||
@@ -179,7 +179,7 @@ class diskMonitor:
|
||||
self.enableMonitor = False
|
||||
self.configuration = configuration
|
||||
|
||||
BBDirs = configuration.getVar("BB_DISKMON_DIRS") or None
|
||||
BBDirs = configuration.getVar("BB_DISKMON_DIRS", True) or None
|
||||
if BBDirs:
|
||||
self.devDict = getDiskData(BBDirs, configuration)
|
||||
if self.devDict:
|
||||
@@ -205,21 +205,18 @@ class diskMonitor:
|
||||
""" Take action for the monitor """
|
||||
|
||||
if self.enableMonitor:
|
||||
diskUsage = {}
|
||||
for k, attributes in self.devDict.items():
|
||||
path, action = k
|
||||
dev, minSpace, minInode = attributes
|
||||
for k in self.devDict:
|
||||
path = os.path.dirname(k)
|
||||
action = os.path.basename(k)
|
||||
dev = self.devDict[k][0]
|
||||
minSpace = self.devDict[k][1]
|
||||
minInode = self.devDict[k][2]
|
||||
|
||||
st = os.statvfs(path)
|
||||
|
||||
# The available free space, integer number
|
||||
# The free space, float point number
|
||||
freeSpace = st.f_bavail * st.f_frsize
|
||||
|
||||
# Send all relevant information in the event.
|
||||
freeSpaceRoot = st.f_bfree * st.f_frsize
|
||||
totalSpace = st.f_blocks * st.f_frsize
|
||||
diskUsage[dev] = bb.event.DiskUsageSample(freeSpace, freeSpaceRoot, totalSpace)
|
||||
|
||||
if minSpace and freeSpace < minSpace:
|
||||
# Always show warning, the self.checked would always be False if the action is WARN
|
||||
if self.preFreeS[k] == 0 or self.preFreeS[k] - freeSpace > self.spaceInterval and not self.checked[k]:
|
||||
@@ -238,7 +235,7 @@ class diskMonitor:
|
||||
rq.finish_runqueue(True)
|
||||
bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, path), self.configuration)
|
||||
|
||||
# The free inodes, integer number
|
||||
# The free inodes, float point number
|
||||
freeInode = st.f_favail
|
||||
|
||||
if minInode and freeInode < minInode:
|
||||
@@ -263,6 +260,4 @@ class diskMonitor:
|
||||
self.checked[k] = True
|
||||
rq.finish_runqueue(True)
|
||||
bb.event.fire(bb.event.DiskFull(dev, 'inode', freeInode, path), self.configuration)
|
||||
|
||||
bb.event.fire(bb.event.MonitorDiskEvent(diskUsage), self.configuration)
|
||||
return
|
||||
|
||||
@@ -40,7 +40,6 @@ class BBLogFormatter(logging.Formatter):
|
||||
VERBOSE = logging.INFO - 1
|
||||
NOTE = logging.INFO
|
||||
PLAIN = logging.INFO + 1
|
||||
VERBNOTE = logging.INFO + 2
|
||||
ERROR = logging.ERROR
|
||||
WARNING = logging.WARNING
|
||||
CRITICAL = logging.CRITICAL
|
||||
@@ -52,7 +51,6 @@ class BBLogFormatter(logging.Formatter):
|
||||
VERBOSE: 'NOTE',
|
||||
NOTE : 'NOTE',
|
||||
PLAIN : '',
|
||||
VERBNOTE: 'NOTE',
|
||||
WARNING : 'WARNING',
|
||||
ERROR : 'ERROR',
|
||||
CRITICAL: 'ERROR',
|
||||
@@ -68,7 +66,6 @@ class BBLogFormatter(logging.Formatter):
|
||||
VERBOSE : BASECOLOR,
|
||||
NOTE : BASECOLOR,
|
||||
PLAIN : BASECOLOR,
|
||||
VERBNOTE: BASECOLOR,
|
||||
WARNING : YELLOW,
|
||||
ERROR : RED,
|
||||
CRITICAL: RED,
|
||||
@@ -204,25 +201,3 @@ def fatal(msgdomain, msg):
|
||||
logger = logging.getLogger("BitBake")
|
||||
logger.critical(msg)
|
||||
sys.exit(1)
|
||||
|
||||
def logger_create(name, output=sys.stderr, level=logging.INFO, preserve_handlers=False, color='auto'):
|
||||
"""Standalone logger creation function"""
|
||||
logger = logging.getLogger(name)
|
||||
console = logging.StreamHandler(output)
|
||||
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
if color == 'always' or (color == 'auto' and output.isatty()):
|
||||
format.enable_color()
|
||||
console.setFormatter(format)
|
||||
if preserve_handlers:
|
||||
logger.addHandler(console)
|
||||
else:
|
||||
logger.handlers = [console]
|
||||
logger.setLevel(level)
|
||||
return logger
|
||||
|
||||
def has_console_handler(logger):
|
||||
for handler in logger.handlers:
|
||||
if isinstance(handler, logging.StreamHandler):
|
||||
if handler.stream in [sys.stderr, sys.stdout]:
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -84,10 +84,6 @@ def update_cache(f):
|
||||
logger.debug(1, "Updating mtime cache for %s" % f)
|
||||
update_mtime(f)
|
||||
|
||||
def clear_cache():
|
||||
global __mtime_cache
|
||||
__mtime_cache = {}
|
||||
|
||||
def mark_dependency(d, f):
|
||||
if f.startswith('./'):
|
||||
f = "%s/%s" % (os.getcwd(), f[2:])
|
||||
@@ -127,16 +123,15 @@ def init_parser(d):
|
||||
|
||||
def resolve_file(fn, d):
|
||||
if not os.path.isabs(fn):
|
||||
bbpath = d.getVar("BBPATH")
|
||||
bbpath = d.getVar("BBPATH", True)
|
||||
newfn, attempts = bb.utils.which(bbpath, fn, history=True)
|
||||
for af in attempts:
|
||||
mark_dependency(d, af)
|
||||
if not newfn:
|
||||
raise IOError(errno.ENOENT, "file %s not found in %s" % (fn, bbpath))
|
||||
fn = newfn
|
||||
else:
|
||||
mark_dependency(d, fn)
|
||||
|
||||
mark_dependency(d, fn)
|
||||
if not os.path.isfile(fn):
|
||||
raise IOError(errno.ENOENT, "file %s not found" % fn)
|
||||
|
||||
|
||||
@@ -30,6 +30,8 @@ import itertools
|
||||
from bb import methodpool
|
||||
from bb.parse import logger
|
||||
|
||||
_bbversions_re = re.compile(r"\[(?P<from>[0-9]+)-(?P<to>[0-9]+)\]")
|
||||
|
||||
class StatementGroup(list):
|
||||
def eval(self, data):
|
||||
for statement in self:
|
||||
@@ -130,6 +132,7 @@ class DataNode(AstNode):
|
||||
val = groupd["value"]
|
||||
elif "colon" in groupd and groupd["colon"] != None:
|
||||
e = data.createCopy()
|
||||
bb.data.update_data(e)
|
||||
op = "immediate"
|
||||
val = e.expand(groupd["value"], key + "[:=]")
|
||||
elif "append" in groupd and groupd["append"] != None:
|
||||
@@ -178,7 +181,7 @@ class MethodNode(AstNode):
|
||||
funcname = ("__anon_%s_%s" % (self.lineno, self.filename.translate(MethodNode.tr_tbl)))
|
||||
self.python = True
|
||||
text = "def %s(d):\n" % (funcname) + text
|
||||
bb.methodpool.insert_method(funcname, text, self.filename, self.lineno - len(self.body) - 1)
|
||||
bb.methodpool.insert_method(funcname, text, self.filename, self.lineno - len(self.body))
|
||||
anonfuncs = data.getVar('__BBANONFUNCS', False) or []
|
||||
anonfuncs.append(funcname)
|
||||
data.setVar('__BBANONFUNCS', anonfuncs)
|
||||
@@ -335,39 +338,36 @@ def handleInherit(statements, filename, lineno, m):
|
||||
classes = m.group(1)
|
||||
statements.append(InheritNode(filename, lineno, classes))
|
||||
|
||||
def runAnonFuncs(d):
|
||||
def finalize(fn, d, variant = None):
|
||||
saved_handlers = bb.event.get_handlers().copy()
|
||||
|
||||
for var in d.getVar('__BBHANDLERS', False) or []:
|
||||
# try to add the handler
|
||||
handlerfn = d.getVarFlag(var, "filename", False)
|
||||
if not handlerfn:
|
||||
bb.fatal("Undefined event handler function '%s'" % var)
|
||||
handlerln = int(d.getVarFlag(var, "lineno", False))
|
||||
bb.event.register(var, d.getVar(var, False), (d.getVarFlag(var, "eventmask", True) or "").split(), handlerfn, handlerln)
|
||||
|
||||
bb.event.fire(bb.event.RecipePreFinalise(fn), d)
|
||||
|
||||
bb.data.expandKeys(d)
|
||||
bb.data.update_data(d)
|
||||
code = []
|
||||
for funcname in d.getVar("__BBANONFUNCS", False) or []:
|
||||
code.append("%s(d)" % funcname)
|
||||
bb.utils.better_exec("\n".join(code), {"d": d})
|
||||
bb.data.update_data(d)
|
||||
|
||||
def finalize(fn, d, variant = None):
|
||||
saved_handlers = bb.event.get_handlers().copy()
|
||||
try:
|
||||
for var in d.getVar('__BBHANDLERS', False) or []:
|
||||
# try to add the handler
|
||||
handlerfn = d.getVarFlag(var, "filename", False)
|
||||
if not handlerfn:
|
||||
bb.fatal("Undefined event handler function '%s'" % var)
|
||||
handlerln = int(d.getVarFlag(var, "lineno", False))
|
||||
bb.event.register(var, d.getVar(var, False), (d.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln)
|
||||
tasklist = d.getVar('__BBTASKS', False) or []
|
||||
bb.build.add_tasks(tasklist, d)
|
||||
|
||||
bb.event.fire(bb.event.RecipePreFinalise(fn), d)
|
||||
bb.parse.siggen.finalise(fn, d, variant)
|
||||
|
||||
bb.data.expandKeys(d)
|
||||
runAnonFuncs(d)
|
||||
d.setVar('BBINCLUDED', bb.parse.get_file_depends(d))
|
||||
|
||||
tasklist = d.getVar('__BBTASKS', False) or []
|
||||
bb.event.fire(bb.event.RecipeTaskPreProcess(fn, list(tasklist)), d)
|
||||
bb.build.add_tasks(tasklist, d)
|
||||
|
||||
bb.parse.siggen.finalise(fn, d, variant)
|
||||
|
||||
d.setVar('BBINCLUDED', bb.parse.get_file_depends(d))
|
||||
|
||||
bb.event.fire(bb.event.RecipeParsed(fn), d)
|
||||
finally:
|
||||
bb.event.set_handlers(saved_handlers)
|
||||
bb.event.fire(bb.event.RecipeParsed(fn), d)
|
||||
bb.event.set_handlers(saved_handlers)
|
||||
|
||||
def _create_variants(datastores, names, function, onlyfinalise):
|
||||
def create_variant(name, orig_d, arg = None):
|
||||
@@ -385,8 +385,29 @@ def _create_variants(datastores, names, function, onlyfinalise):
|
||||
else:
|
||||
create_variant("%s-%s" % (variant, name), datastores[variant], name)
|
||||
|
||||
def _expand_versions(versions):
|
||||
def expand_one(version, start, end):
|
||||
for i in range(start, end + 1):
|
||||
ver = _bbversions_re.sub(str(i), version, 1)
|
||||
yield ver
|
||||
|
||||
versions = iter(versions)
|
||||
while True:
|
||||
try:
|
||||
version = next(versions)
|
||||
except StopIteration:
|
||||
break
|
||||
|
||||
range_ver = _bbversions_re.search(version)
|
||||
if not range_ver:
|
||||
yield version
|
||||
else:
|
||||
newversions = expand_one(version, int(range_ver.group("from")),
|
||||
int(range_ver.group("to")))
|
||||
versions = itertools.chain(newversions, versions)
|
||||
|
||||
def multi_finalize(fn, d):
|
||||
appends = (d.getVar("__BBAPPEND") or "").split()
|
||||
appends = (d.getVar("__BBAPPEND", True) or "").split()
|
||||
for append in appends:
|
||||
logger.debug(1, "Appending .bbappend file %s to %s", append, fn)
|
||||
bb.parse.BBHandler.handle(append, d, True)
|
||||
@@ -401,7 +422,51 @@ def multi_finalize(fn, d):
|
||||
d.setVar("__SKIPPED", e.args[0])
|
||||
datastores = {"": safe_d}
|
||||
|
||||
extended = d.getVar("BBCLASSEXTEND") or ""
|
||||
versions = (d.getVar("BBVERSIONS", True) or "").split()
|
||||
if versions:
|
||||
pv = orig_pv = d.getVar("PV", True)
|
||||
baseversions = {}
|
||||
|
||||
def verfunc(ver, d, pv_d = None):
|
||||
if pv_d is None:
|
||||
pv_d = d
|
||||
|
||||
overrides = d.getVar("OVERRIDES", True).split(":")
|
||||
pv_d.setVar("PV", ver)
|
||||
overrides.append(ver)
|
||||
bpv = baseversions.get(ver) or orig_pv
|
||||
pv_d.setVar("BPV", bpv)
|
||||
overrides.append(bpv)
|
||||
d.setVar("OVERRIDES", ":".join(overrides))
|
||||
|
||||
versions = list(_expand_versions(versions))
|
||||
for pos, version in enumerate(list(versions)):
|
||||
try:
|
||||
pv, bpv = version.split(":", 2)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
versions[pos] = pv
|
||||
baseversions[pv] = bpv
|
||||
|
||||
if pv in versions and not baseversions.get(pv):
|
||||
versions.remove(pv)
|
||||
else:
|
||||
pv = versions.pop()
|
||||
|
||||
# This is necessary because our existing main datastore
|
||||
# has already been finalized with the old PV, we need one
|
||||
# that's been finalized with the new PV.
|
||||
d = bb.data.createCopy(safe_d)
|
||||
verfunc(pv, d, safe_d)
|
||||
try:
|
||||
finalize(fn, d)
|
||||
except bb.parse.SkipRecipe as e:
|
||||
d.setVar("__SKIPPED", e.args[0])
|
||||
|
||||
_create_variants(datastores, versions, verfunc, onlyfinalise)
|
||||
|
||||
extended = d.getVar("BBCLASSEXTEND", True) or ""
|
||||
if extended:
|
||||
# the following is to support bbextends with arguments, for e.g. multilib
|
||||
# an example is as follows:
|
||||
@@ -419,7 +484,7 @@ def multi_finalize(fn, d):
|
||||
else:
|
||||
extendedmap[ext] = ext
|
||||
|
||||
pn = d.getVar("PN")
|
||||
pn = d.getVar("PN", True)
|
||||
def extendfunc(name, d):
|
||||
if name != extendedmap[name]:
|
||||
d.setVar("BBEXTENDCURR", extendedmap[name])
|
||||
|
||||
@@ -38,15 +38,14 @@ from .ConfHandler import include, init
|
||||
# For compatibility
|
||||
bb.deprecate_import(__name__, "bb.parse", ["vars_from_file"])
|
||||
|
||||
__func_start_regexp__ = re.compile(r"(((?P<py>python)|(?P<fr>fakeroot))\s*)*(?P<func>[\w\.\-\+\{\}\$]+)?\s*\(\s*\)\s*{$" )
|
||||
__inherit_regexp__ = re.compile(r"inherit\s+(.+)" )
|
||||
__export_func_regexp__ = re.compile(r"EXPORT_FUNCTIONS\s+(.+)" )
|
||||
__addtask_regexp__ = re.compile(r"addtask\s+(?P<func>\w+)\s*((before\s*(?P<before>((.*(?=after))|(.*))))|(after\s*(?P<after>((.*(?=before))|(.*)))))*")
|
||||
__deltask_regexp__ = re.compile(r"deltask\s+(?P<func>\w+)")
|
||||
__addhandler_regexp__ = re.compile(r"addhandler\s+(.+)" )
|
||||
__def_regexp__ = re.compile(r"def\s+(\w+).*:" )
|
||||
__python_func_regexp__ = re.compile(r"(\s+.*)|(^$)|(^#)" )
|
||||
__python_tab_regexp__ = re.compile(r" *\t")
|
||||
__func_start_regexp__ = re.compile( r"(((?P<py>python)|(?P<fr>fakeroot))\s*)*(?P<func>[\w\.\-\+\{\}\$]+)?\s*\(\s*\)\s*{$" )
|
||||
__inherit_regexp__ = re.compile( r"inherit\s+(.+)" )
|
||||
__export_func_regexp__ = re.compile( r"EXPORT_FUNCTIONS\s+(.+)" )
|
||||
__addtask_regexp__ = re.compile("addtask\s+(?P<func>\w+)\s*((before\s*(?P<before>((.*(?=after))|(.*))))|(after\s*(?P<after>((.*(?=before))|(.*)))))*")
|
||||
__deltask_regexp__ = re.compile("deltask\s+(?P<func>\w+)")
|
||||
__addhandler_regexp__ = re.compile( r"addhandler\s+(.+)" )
|
||||
__def_regexp__ = re.compile( r"def\s+(\w+).*:" )
|
||||
__python_func_regexp__ = re.compile( r"(\s+.*)|(^$)" )
|
||||
|
||||
__infunc__ = []
|
||||
__inpython__ = False
|
||||
@@ -67,7 +66,7 @@ def inherit(files, fn, lineno, d):
|
||||
file = os.path.join('classes', '%s.bbclass' % file)
|
||||
|
||||
if not os.path.isabs(file):
|
||||
bbpath = d.getVar("BBPATH")
|
||||
bbpath = d.getVar("BBPATH", True)
|
||||
abs_fn, attempts = bb.utils.which(bbpath, file, history=True)
|
||||
for af in attempts:
|
||||
if af != abs_fn:
|
||||
@@ -88,17 +87,17 @@ def get_statements(filename, absolute_filename, base_name):
|
||||
try:
|
||||
return cached_statements[absolute_filename]
|
||||
except KeyError:
|
||||
with open(absolute_filename, 'r') as f:
|
||||
statements = ast.StatementGroup()
|
||||
|
||||
lineno = 0
|
||||
while True:
|
||||
lineno = lineno + 1
|
||||
s = f.readline()
|
||||
if not s: break
|
||||
s = s.rstrip()
|
||||
feeder(lineno, s, filename, base_name, statements)
|
||||
file = open(absolute_filename, 'r')
|
||||
statements = ast.StatementGroup()
|
||||
|
||||
lineno = 0
|
||||
while True:
|
||||
lineno = lineno + 1
|
||||
s = file.readline()
|
||||
if not s: break
|
||||
s = s.rstrip()
|
||||
feeder(lineno, s, filename, base_name, statements)
|
||||
file.close()
|
||||
if __inpython__:
|
||||
# add a blank line to close out any python definition
|
||||
feeder(lineno, "", filename, base_name, statements, eof=True)
|
||||
@@ -132,6 +131,9 @@ def handle(fn, d, include):
|
||||
|
||||
abs_fn = resolve_file(fn, d)
|
||||
|
||||
if include:
|
||||
bb.parse.mark_dependency(d, abs_fn)
|
||||
|
||||
# actual loading
|
||||
statements = get_statements(fn, abs_fn, base_name)
|
||||
|
||||
@@ -142,7 +144,7 @@ def handle(fn, d, include):
|
||||
try:
|
||||
statements.eval(d)
|
||||
except bb.parse.SkipRecipe:
|
||||
d.setVar("__SKIPPED", True)
|
||||
bb.data.setVar("__SKIPPED", True, d)
|
||||
if include == 0:
|
||||
return { "" : d }
|
||||
|
||||
@@ -161,16 +163,6 @@ def handle(fn, d, include):
|
||||
|
||||
def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__, __infunc__, __body__, bb, __residue__, __classname__
|
||||
|
||||
# Check tabs in python functions:
|
||||
# - def py_funcname(): covered by __inpython__
|
||||
# - python(): covered by '__anonymous' == __infunc__[0]
|
||||
# - python funcname(): covered by __infunc__[3]
|
||||
if __inpython__ or (__infunc__ and ('__anonymous' == __infunc__[0] or __infunc__[3])):
|
||||
tab = __python_tab_regexp__.match(s)
|
||||
if tab:
|
||||
bb.warn('python should use 4 spaces indentation, but found tabs in %s, line %s' % (root, lineno))
|
||||
|
||||
if __infunc__:
|
||||
if s == '}':
|
||||
__body__.append('')
|
||||
|
||||
@@ -32,8 +32,8 @@ from bb.parse import ParseError, resolve_file, ast, logger, handle
|
||||
|
||||
__config_regexp__ = re.compile( r"""
|
||||
^
|
||||
(?P<exp>export\s+)?
|
||||
(?P<var>[a-zA-Z0-9\-_+.${}/~]+?)
|
||||
(?P<exp>export\s*)?
|
||||
(?P<var>[a-zA-Z0-9\-~_+.${}/]+?)
|
||||
(\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])?
|
||||
|
||||
\s* (
|
||||
@@ -56,9 +56,9 @@ __config_regexp__ = re.compile( r"""
|
||||
""", re.X)
|
||||
__include_regexp__ = re.compile( r"include\s+(.+)" )
|
||||
__require_regexp__ = re.compile( r"require\s+(.+)" )
|
||||
__export_regexp__ = re.compile( r"export\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
|
||||
__unset_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
|
||||
__unset_flag_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)\[([a-zA-Z0-9\-_+.]+)\]$" )
|
||||
__export_regexp__ = re.compile( r"export\s+([a-zA-Z0-9\-_+.${}/]+)$" )
|
||||
__unset_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/]+)$" )
|
||||
__unset_flag_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/]+)\[([a-zA-Z0-9\-_+.${}/]+)\]$" )
|
||||
|
||||
def init(data):
|
||||
topdir = data.getVar('TOPDIR', False)
|
||||
@@ -69,38 +69,30 @@ def init(data):
|
||||
def supports(fn, d):
|
||||
return fn[-5:] == ".conf"
|
||||
|
||||
def include(parentfn, fns, lineno, data, error_out):
|
||||
def include(parentfn, fn, lineno, data, error_out):
|
||||
"""
|
||||
error_out: A string indicating the verb (e.g. "include", "inherit") to be
|
||||
used in a ParseError that will be raised if the file to be included could
|
||||
not be included. Specify False to avoid raising an error in this case.
|
||||
"""
|
||||
fns = data.expand(fns)
|
||||
parentfn = data.expand(parentfn)
|
||||
|
||||
# "include" or "require" accept zero to n space-separated file names to include.
|
||||
for fn in fns.split():
|
||||
include_single_file(parentfn, fn, lineno, data, error_out)
|
||||
|
||||
def include_single_file(parentfn, fn, lineno, data, error_out):
|
||||
"""
|
||||
Helper function for include() which does not expand or split its parameters.
|
||||
"""
|
||||
if parentfn == fn: # prevent infinite recursion
|
||||
return None
|
||||
|
||||
fn = data.expand(fn)
|
||||
parentfn = data.expand(parentfn)
|
||||
|
||||
if not os.path.isabs(fn):
|
||||
dname = os.path.dirname(parentfn)
|
||||
bbpath = "%s:%s" % (dname, data.getVar("BBPATH"))
|
||||
bbpath = "%s:%s" % (dname, data.getVar("BBPATH", True))
|
||||
abs_fn, attempts = bb.utils.which(bbpath, fn, history=True)
|
||||
if abs_fn and bb.parse.check_dependency(data, abs_fn):
|
||||
logger.warning("Duplicate inclusion for %s in %s" % (abs_fn, data.getVar('FILE')))
|
||||
logger.warning("Duplicate inclusion for %s in %s" % (abs_fn, data.getVar('FILE', True)))
|
||||
for af in attempts:
|
||||
bb.parse.mark_dependency(data, af)
|
||||
if abs_fn:
|
||||
fn = abs_fn
|
||||
elif bb.parse.check_dependency(data, fn):
|
||||
logger.warning("Duplicate inclusion for %s in %s" % (fn, data.getVar('FILE')))
|
||||
logger.warning("Duplicate inclusion for %s in %s" % (fn, data.getVar('FILE', True)))
|
||||
|
||||
try:
|
||||
bb.parse.handle(fn, data, True)
|
||||
@@ -134,6 +126,9 @@ def handle(fn, data, include):
|
||||
abs_fn = resolve_file(fn, data)
|
||||
f = open(abs_fn, 'r')
|
||||
|
||||
if include:
|
||||
bb.parse.mark_dependency(data, abs_fn)
|
||||
|
||||
statements = ast.StatementGroup()
|
||||
lineno = 0
|
||||
while True:
|
||||
@@ -147,7 +142,7 @@ def handle(fn, data, include):
|
||||
continue
|
||||
s = s.rstrip()
|
||||
while s[-1] == '\\':
|
||||
s2 = f.readline().rstrip()
|
||||
s2 = f.readline().strip()
|
||||
lineno = lineno + 1
|
||||
if (not s2 or s2 and s2[0] != "#") and s[0] == "#" :
|
||||
bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s))
|
||||
|
||||
@@ -28,8 +28,11 @@ import sys
|
||||
import warnings
|
||||
from bb.compat import total_ordering
|
||||
from collections import Mapping
|
||||
import sqlite3
|
||||
import contextlib
|
||||
|
||||
try:
|
||||
import sqlite3
|
||||
except ImportError:
|
||||
from pysqlite2 import dbapi2 as sqlite3
|
||||
|
||||
sqlversion = sqlite3.sqlite_version_info
|
||||
if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
|
||||
@@ -37,181 +40,84 @@ if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
|
||||
|
||||
|
||||
logger = logging.getLogger("BitBake.PersistData")
|
||||
if hasattr(sqlite3, 'enable_shared_cache'):
|
||||
try:
|
||||
sqlite3.enable_shared_cache(True)
|
||||
except sqlite3.OperationalError:
|
||||
pass
|
||||
|
||||
|
||||
@total_ordering
|
||||
class SQLTable(collections.MutableMapping):
|
||||
class _Decorators(object):
|
||||
@staticmethod
|
||||
def retry(*, reconnect=True):
|
||||
"""
|
||||
Decorator that restarts a function if a database locked sqlite
|
||||
exception occurs. If reconnect is True, the database connection
|
||||
will be closed and reopened each time a failure occurs
|
||||
"""
|
||||
def retry_wrapper(f):
|
||||
def wrap_func(self, *args, **kwargs):
|
||||
# Reconnect if necessary
|
||||
if self.connection is None and reconnect:
|
||||
self.reconnect()
|
||||
|
||||
count = 0
|
||||
while True:
|
||||
try:
|
||||
return f(self, *args, **kwargs)
|
||||
except sqlite3.OperationalError as exc:
|
||||
if count < 500 and ('is locked' in str(exc) or 'locking protocol' in str(exc)):
|
||||
count = count + 1
|
||||
if reconnect:
|
||||
self.reconnect()
|
||||
continue
|
||||
raise
|
||||
return wrap_func
|
||||
return retry_wrapper
|
||||
|
||||
@staticmethod
|
||||
def transaction(f):
|
||||
"""
|
||||
Decorator that starts a database transaction and creates a database
|
||||
cursor for performing queries. If no exception is thrown, the
|
||||
database results are commited. If an exception occurs, the database
|
||||
is rolled back. In all cases, the cursor is closed after the
|
||||
function ends.
|
||||
|
||||
Note that the cursor is passed as an extra argument to the function
|
||||
after `self` and before any of the normal arguments
|
||||
"""
|
||||
def wrap_func(self, *args, **kwargs):
|
||||
# Context manager will COMMIT the database on success,
|
||||
# or ROLLBACK on an exception
|
||||
with self.connection:
|
||||
# Automatically close the cursor when done
|
||||
with contextlib.closing(self.connection.cursor()) as cursor:
|
||||
return f(self, cursor, *args, **kwargs)
|
||||
return wrap_func
|
||||
|
||||
"""Object representing a table/domain in the database"""
|
||||
def __init__(self, cachefile, table):
|
||||
self.cachefile = cachefile
|
||||
self.table = table
|
||||
self.cursor = connect(self.cachefile)
|
||||
|
||||
self.connection = None
|
||||
self._execute_single("CREATE TABLE IF NOT EXISTS %s(key TEXT PRIMARY KEY NOT NULL, value TEXT);" % table)
|
||||
self._execute("CREATE TABLE IF NOT EXISTS %s(key TEXT, value TEXT);"
|
||||
% table)
|
||||
|
||||
@_Decorators.retry(reconnect=False)
|
||||
@_Decorators.transaction
|
||||
def _setup_database(self, cursor):
|
||||
cursor.execute("pragma synchronous = off;")
|
||||
# Enable WAL and keep the autocheckpoint length small (the default is
|
||||
# usually 1000). Persistent caches are usually read-mostly, so keeping
|
||||
# this short will keep readers running quickly
|
||||
cursor.execute("pragma journal_mode = WAL;")
|
||||
cursor.execute("pragma wal_autocheckpoint = 100;")
|
||||
|
||||
def reconnect(self):
|
||||
if self.connection is not None:
|
||||
self.connection.close()
|
||||
self.connection = sqlite3.connect(self.cachefile, timeout=5)
|
||||
self.connection.text_factory = str
|
||||
self._setup_database()
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def _execute_single(self, cursor, *query):
|
||||
"""
|
||||
Executes a single query and discards the results. This correctly closes
|
||||
the database cursor when finished
|
||||
"""
|
||||
cursor.execute(*query)
|
||||
|
||||
@_Decorators.retry()
|
||||
def _row_iter(self, f, *query):
|
||||
"""
|
||||
Helper function that returns a row iterator. Each time __next__ is
|
||||
called on the iterator, the provided function is evaluated to determine
|
||||
the return value
|
||||
"""
|
||||
class CursorIter(object):
|
||||
def __init__(self, cursor):
|
||||
self.cursor = cursor
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
row = self.cursor.fetchone()
|
||||
if row is None:
|
||||
def _execute(self, *query):
|
||||
"""Execute a query, waiting to acquire a lock if necessary"""
|
||||
count = 0
|
||||
while True:
|
||||
try:
|
||||
return self.cursor.execute(*query)
|
||||
except sqlite3.OperationalError as exc:
|
||||
if 'database is locked' in str(exc) and count < 500:
|
||||
count = count + 1
|
||||
self.cursor.close()
|
||||
raise StopIteration
|
||||
return f(row)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, typ, value, traceback):
|
||||
self.cursor.close()
|
||||
return False
|
||||
|
||||
cursor = self.connection.cursor()
|
||||
try:
|
||||
cursor.execute(*query)
|
||||
return CursorIter(cursor)
|
||||
except:
|
||||
cursor.close()
|
||||
self.cursor = connect(self.cachefile)
|
||||
continue
|
||||
raise
|
||||
|
||||
def __enter__(self):
|
||||
self.connection.__enter__()
|
||||
self.cursor.__enter__()
|
||||
return self
|
||||
|
||||
def __exit__(self, *excinfo):
|
||||
self.connection.__exit__(*excinfo)
|
||||
self.cursor.__exit__(*excinfo)
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __getitem__(self, cursor, key):
|
||||
cursor.execute("SELECT * from %s where key=?;" % self.table, [key])
|
||||
row = cursor.fetchone()
|
||||
if row is not None:
|
||||
def __getitem__(self, key):
|
||||
data = self._execute("SELECT * from %s where key=?;" %
|
||||
self.table, [key])
|
||||
for row in data:
|
||||
return row[1]
|
||||
raise KeyError(key)
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __delitem__(self, cursor, key):
|
||||
def __delitem__(self, key):
|
||||
if key not in self:
|
||||
raise KeyError(key)
|
||||
cursor.execute("DELETE from %s where key=?;" % self.table, [key])
|
||||
self._execute("DELETE from %s where key=?;" % self.table, [key])
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __setitem__(self, cursor, key, value):
|
||||
def __setitem__(self, key, value):
|
||||
if not isinstance(key, str):
|
||||
raise TypeError('Only string keys are supported')
|
||||
elif not isinstance(value, str):
|
||||
raise TypeError('Only string values are supported')
|
||||
|
||||
cursor.execute("SELECT * from %s where key=?;" % self.table, [key])
|
||||
row = cursor.fetchone()
|
||||
if row is not None:
|
||||
cursor.execute("UPDATE %s SET value=? WHERE key=?;" % self.table, [value, key])
|
||||
data = self._execute("SELECT * from %s where key=?;" %
|
||||
self.table, [key])
|
||||
exists = len(list(data))
|
||||
if exists:
|
||||
self._execute("UPDATE %s SET value=? WHERE key=?;" % self.table,
|
||||
[value, key])
|
||||
else:
|
||||
cursor.execute("INSERT into %s(key, value) values (?, ?);" % self.table, [key, value])
|
||||
self._execute("INSERT into %s(key, value) values (?, ?);" %
|
||||
self.table, [key, value])
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __contains__(self, cursor, key):
|
||||
cursor.execute('SELECT * from %s where key=?;' % self.table, [key])
|
||||
return cursor.fetchone() is not None
|
||||
def __contains__(self, key):
|
||||
return key in set(self)
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __len__(self, cursor):
|
||||
cursor.execute("SELECT COUNT(key) FROM %s;" % self.table)
|
||||
row = cursor.fetchone()
|
||||
if row is not None:
|
||||
def __len__(self):
|
||||
data = self._execute("SELECT COUNT(key) FROM %s;" % self.table)
|
||||
for row in data:
|
||||
return row[0]
|
||||
|
||||
def __iter__(self):
|
||||
return self._row_iter(lambda row: row[0], "SELECT key from %s;" % self.table)
|
||||
data = self._execute("SELECT key FROM %s;" % self.table)
|
||||
return (row[0] for row in data)
|
||||
|
||||
def __lt__(self, other):
|
||||
if not isinstance(other, Mapping):
|
||||
@@ -220,27 +126,25 @@ class SQLTable(collections.MutableMapping):
|
||||
return len(self) < len(other)
|
||||
|
||||
def get_by_pattern(self, pattern):
|
||||
return self._row_iter(lambda row: row[1], "SELECT * FROM %s WHERE key LIKE ?;" %
|
||||
self.table, [pattern])
|
||||
data = self._execute("SELECT * FROM %s WHERE key LIKE ?;" %
|
||||
self.table, [pattern])
|
||||
return [row[1] for row in data]
|
||||
|
||||
def values(self):
|
||||
return list(self.itervalues())
|
||||
|
||||
def itervalues(self):
|
||||
return self._row_iter(lambda row: row[0], "SELECT value FROM %s;" %
|
||||
self.table)
|
||||
data = self._execute("SELECT value FROM %s;" % self.table)
|
||||
return (row[0] for row in data)
|
||||
|
||||
def items(self):
|
||||
return list(self.iteritems())
|
||||
|
||||
def iteritems(self):
|
||||
return self._row_iter(lambda row: (row[0], row[1]), "SELECT * FROM %s;" %
|
||||
self.table)
|
||||
return self._execute("SELECT * FROM %s;" % self.table)
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def clear(self, cursor):
|
||||
cursor.execute("DELETE FROM %s;" % self.table)
|
||||
def clear(self):
|
||||
self._execute("DELETE FROM %s;" % self.table)
|
||||
|
||||
def has_key(self, key):
|
||||
return key in self
|
||||
@@ -294,11 +198,17 @@ class PersistData(object):
|
||||
"""
|
||||
del self.data[domain][key]
|
||||
|
||||
def connect(database):
|
||||
connection = sqlite3.connect(database, timeout=5, isolation_level=None)
|
||||
connection.execute("pragma synchronous = off;")
|
||||
connection.text_factory = str
|
||||
return connection
|
||||
|
||||
def persist(domain, d):
|
||||
"""Convenience factory for SQLTable objects based upon metadata"""
|
||||
import bb.utils
|
||||
cachedir = (d.getVar("PERSISTENT_DIR") or
|
||||
d.getVar("CACHE"))
|
||||
cachedir = (d.getVar("PERSISTENT_DIR", True) or
|
||||
d.getVar("CACHE", True))
|
||||
if not cachedir:
|
||||
logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
|
||||
sys.exit(1)
|
||||
|
||||
@@ -94,53 +94,46 @@ def _logged_communicate(pipe, log, input, extrafiles):
|
||||
if data is not None:
|
||||
func(data)
|
||||
|
||||
def read_all_pipes(log, rin, outdata, errdata):
|
||||
rlist = rin
|
||||
stdoutbuf = b""
|
||||
stderrbuf = b""
|
||||
|
||||
try:
|
||||
r,w,e = select.select (rlist, [], [], 1)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EINTR:
|
||||
raise
|
||||
|
||||
readextras(r)
|
||||
|
||||
if pipe.stdout in r:
|
||||
data = stdoutbuf + pipe.stdout.read()
|
||||
if data is not None and len(data) > 0:
|
||||
try:
|
||||
data = data.decode("utf-8")
|
||||
outdata.append(data)
|
||||
log.write(data)
|
||||
log.flush()
|
||||
stdoutbuf = b""
|
||||
except UnicodeDecodeError:
|
||||
stdoutbuf = data
|
||||
|
||||
if pipe.stderr in r:
|
||||
data = stderrbuf + pipe.stderr.read()
|
||||
if data is not None and len(data) > 0:
|
||||
try:
|
||||
data = data.decode("utf-8")
|
||||
errdata.append(data)
|
||||
log.write(data)
|
||||
log.flush()
|
||||
stderrbuf = b""
|
||||
except UnicodeDecodeError:
|
||||
stderrbuf = data
|
||||
|
||||
try:
|
||||
# Read all pipes while the process is open
|
||||
while pipe.poll() is None:
|
||||
read_all_pipes(log, rin, outdata, errdata)
|
||||
rlist = rin
|
||||
stdoutbuf = b""
|
||||
stderrbuf = b""
|
||||
try:
|
||||
r,w,e = select.select (rlist, [], [], 1)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EINTR:
|
||||
raise
|
||||
|
||||
# Pocess closed, drain all pipes...
|
||||
read_all_pipes(log, rin, outdata, errdata)
|
||||
finally:
|
||||
if pipe.stdout in r:
|
||||
data = stdoutbuf + pipe.stdout.read()
|
||||
if data is not None and len(data) > 0:
|
||||
try:
|
||||
data = data.decode("utf-8")
|
||||
outdata.append(data)
|
||||
log.write(data)
|
||||
stdoutbuf = b""
|
||||
except UnicodeDecodeError:
|
||||
stdoutbuf = data
|
||||
|
||||
if pipe.stderr in r:
|
||||
data = stderrbuf + pipe.stderr.read()
|
||||
if data is not None and len(data) > 0:
|
||||
try:
|
||||
data = data.decode("utf-8")
|
||||
errdata.append(data)
|
||||
log.write(data)
|
||||
stderrbuf = b""
|
||||
except UnicodeDecodeError:
|
||||
stderrbuf = data
|
||||
|
||||
readextras(r)
|
||||
|
||||
finally:
|
||||
log.flush()
|
||||
|
||||
readextras([fobj for fobj, _ in extrafiles])
|
||||
|
||||
if pipe.stdout is not None:
|
||||
pipe.stdout.close()
|
||||
if pipe.stderr is not None:
|
||||
@@ -169,9 +162,9 @@ def run(cmd, input=None, log=None, extrafiles=None, **options):
|
||||
stdout, stderr = _logged_communicate(pipe, log, input, extrafiles)
|
||||
else:
|
||||
stdout, stderr = pipe.communicate(input)
|
||||
if not stdout is None:
|
||||
if stdout:
|
||||
stdout = stdout.decode("utf-8")
|
||||
if not stderr is None:
|
||||
if stderr:
|
||||
stderr = stderr.decode("utf-8")
|
||||
|
||||
if pipe.returncode != 0:
|
||||
|
||||
@@ -48,6 +48,7 @@ def findProviders(cfgData, dataCache, pkg_pn = None):
|
||||
|
||||
# Need to ensure data store is expanded
|
||||
localdata = data.createCopy(cfgData)
|
||||
bb.data.update_data(localdata)
|
||||
bb.data.expandKeys(localdata)
|
||||
|
||||
preferred_versions = {}
|
||||
@@ -122,14 +123,14 @@ def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
|
||||
# pn can contain '_', e.g. gcc-cross-x86_64 and an override cannot
|
||||
# hence we do this manually rather than use OVERRIDES
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION_pn-%s" % pn)
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION_pn-%s" % pn, True)
|
||||
if not preferred_v:
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION_%s" % pn)
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION_%s" % pn, True)
|
||||
if not preferred_v:
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION")
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION", True)
|
||||
|
||||
if preferred_v:
|
||||
m = re.match(r'(\d+:)*(.*)(_.*)*', preferred_v)
|
||||
m = re.match('(\d+:)*(.*)(_.*)*', preferred_v)
|
||||
if m:
|
||||
if m.group(1):
|
||||
preferred_e = m.group(1)[:-1]
|
||||
@@ -244,17 +245,17 @@ def _filterProviders(providers, item, cfgData, dataCache):
|
||||
pkg_pn[pn] = []
|
||||
pkg_pn[pn].append(p)
|
||||
|
||||
logger.debug(1, "providers for %s are: %s", item, list(sorted(pkg_pn.keys())))
|
||||
logger.debug(1, "providers for %s are: %s", item, list(pkg_pn.keys()))
|
||||
|
||||
# First add PREFERRED_VERSIONS
|
||||
for pn in sorted(pkg_pn):
|
||||
for pn in pkg_pn:
|
||||
sortpkg_pn[pn] = sortPriorities(pn, dataCache, pkg_pn)
|
||||
preferred_versions[pn] = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item)
|
||||
if preferred_versions[pn][1]:
|
||||
eligible.append(preferred_versions[pn][1])
|
||||
|
||||
# Now add latest versions
|
||||
for pn in sorted(sortpkg_pn):
|
||||
for pn in sortpkg_pn:
|
||||
if pn in preferred_versions and preferred_versions[pn][1]:
|
||||
continue
|
||||
preferred_versions[pn] = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[pn][0])
|
||||
@@ -288,7 +289,7 @@ def filterProviders(providers, item, cfgData, dataCache):
|
||||
|
||||
eligible = _filterProviders(providers, item, cfgData, dataCache)
|
||||
|
||||
prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % item)
|
||||
prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % item, True)
|
||||
if prefervar:
|
||||
dataCache.preferred[item] = prefervar
|
||||
|
||||
@@ -317,7 +318,7 @@ def filterProvidersRunTime(providers, item, cfgData, dataCache):
|
||||
eligible = _filterProviders(providers, item, cfgData, dataCache)
|
||||
|
||||
# First try and match any PREFERRED_RPROVIDER entry
|
||||
prefervar = cfgData.getVar('PREFERRED_RPROVIDER_%s' % item)
|
||||
prefervar = cfgData.getVar('PREFERRED_RPROVIDER_%s' % item, True)
|
||||
foundUnique = False
|
||||
if prefervar:
|
||||
for p in eligible:
|
||||
@@ -344,7 +345,7 @@ def filterProvidersRunTime(providers, item, cfgData, dataCache):
|
||||
pn = dataCache.pkg_fn[p]
|
||||
provides = dataCache.pn_provides[pn]
|
||||
for provide in provides:
|
||||
prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % provide)
|
||||
prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % provide, True)
|
||||
#logger.debug(1, "checking PREFERRED_PROVIDER_%s (value %s) against %s", provide, prefervar, pns.keys())
|
||||
if prefervar in pns and pns[prefervar] not in preferred:
|
||||
var = "PREFERRED_PROVIDER_%s = %s" % (provide, prefervar)
|
||||
@@ -384,7 +385,7 @@ def getRuntimeProviders(dataCache, rdepend):
|
||||
|
||||
# Only search dynamic packages if we can't find anything in other variables
|
||||
for pattern in dataCache.packages_dynamic:
|
||||
pattern = pattern.replace(r'+', r"\+")
|
||||
pattern = pattern.replace('+', "\+")
|
||||
if pattern in regexp_cache:
|
||||
regexp = regexp_cache[pattern]
|
||||
else:
|
||||
|
||||
710
bitbake/lib/bb/pysh/builtin.py
Normal file
710
bitbake/lib/bb/pysh/builtin.py
Normal file
@@ -0,0 +1,710 @@
|
||||
# builtin.py - builtins and utilities definitions for pysh.
|
||||
#
|
||||
# Copyright 2007 Patrick Mezard
|
||||
#
|
||||
# This software may be used and distributed according to the terms
|
||||
# of the GNU General Public License, incorporated herein by reference.
|
||||
|
||||
"""Builtin and internal utilities implementations.
|
||||
|
||||
- Beware not to use python interpreter environment as if it were the shell
|
||||
environment. For instance, commands working directory must be explicitely handled
|
||||
through env['PWD'] instead of relying on python working directory.
|
||||
"""
|
||||
import errno
|
||||
import optparse
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
def has_subprocess_bug():
|
||||
return getattr(subprocess, 'list2cmdline') and \
|
||||
( subprocess.list2cmdline(['']) == '' or \
|
||||
subprocess.list2cmdline(['foo|bar']) == 'foo|bar')
|
||||
|
||||
# Detect python bug 1634343: "subprocess swallows empty arguments under win32"
|
||||
# <http://sourceforge.net/tracker/index.php?func=detail&aid=1634343&group_id=5470&atid=105470>
|
||||
# Also detect: "[ 1710802 ] subprocess must escape redirection characters under win32"
|
||||
# <http://sourceforge.net/tracker/index.php?func=detail&aid=1710802&group_id=5470&atid=105470>
|
||||
if has_subprocess_bug():
|
||||
import subprocess_fix
|
||||
subprocess.list2cmdline = subprocess_fix.list2cmdline
|
||||
|
||||
from sherrors import *
|
||||
|
||||
class NonExitingParser(optparse.OptionParser):
|
||||
"""OptionParser default behaviour upon error is to print the error message and
|
||||
exit. Raise a utility error instead.
|
||||
"""
|
||||
def error(self, msg):
|
||||
raise UtilityError(msg)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# set special builtin
|
||||
#-------------------------------------------------------------------------------
|
||||
OPT_SET = NonExitingParser(usage="set - set or unset options and positional parameters")
|
||||
OPT_SET.add_option( '-f', action='store_true', dest='has_f', default=False,
|
||||
help='The shell shall disable pathname expansion.')
|
||||
OPT_SET.add_option('-e', action='store_true', dest='has_e', default=False,
|
||||
help="""When this option is on, if a simple command fails for any of the \
|
||||
reasons listed in Consequences of Shell Errors or returns an exit status \
|
||||
value >0, and is not part of the compound list following a while, until, \
|
||||
or if keyword, and is not a part of an AND or OR list, and is not a \
|
||||
pipeline preceded by the ! reserved word, then the shell shall immediately \
|
||||
exit.""")
|
||||
OPT_SET.add_option('-x', action='store_true', dest='has_x', default=False,
|
||||
help="""The shell shall write to standard error a trace for each command \
|
||||
after it expands the command and before it executes it. It is unspecified \
|
||||
whether the command that turns tracing off is traced.""")
|
||||
|
||||
def builtin_set(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
option, args = OPT_SET.parse_args(args)
|
||||
env = interp.get_env()
|
||||
|
||||
if option.has_f:
|
||||
env.set_opt('-f')
|
||||
if option.has_e:
|
||||
env.set_opt('-e')
|
||||
if option.has_x:
|
||||
env.set_opt('-x')
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# shift special builtin
|
||||
#-------------------------------------------------------------------------------
|
||||
def builtin_shift(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
params = interp.get_env().get_positional_args()
|
||||
if args:
|
||||
try:
|
||||
n = int(args[0])
|
||||
if n > len(params):
|
||||
raise ValueError()
|
||||
except ValueError:
|
||||
return 1
|
||||
else:
|
||||
n = 1
|
||||
|
||||
params[:n] = []
|
||||
interp.get_env().set_positional_args(params)
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# export special builtin
|
||||
#-------------------------------------------------------------------------------
|
||||
OPT_EXPORT = NonExitingParser(usage="set - set or unset options and positional parameters")
|
||||
OPT_EXPORT.add_option('-p', action='store_true', dest='has_p', default=False)
|
||||
|
||||
def builtin_export(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
option, args = OPT_EXPORT.parse_args(args)
|
||||
if option.has_p:
|
||||
raise NotImplementedError()
|
||||
|
||||
for arg in args:
|
||||
try:
|
||||
name, value = arg.split('=', 1)
|
||||
except ValueError:
|
||||
name, value = arg, None
|
||||
env = interp.get_env().export(name, value)
|
||||
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# return special builtin
|
||||
#-------------------------------------------------------------------------------
|
||||
def builtin_return(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
res = 0
|
||||
if args:
|
||||
try:
|
||||
res = int(args[0])
|
||||
except ValueError:
|
||||
res = 0
|
||||
if not 0<=res<=255:
|
||||
res = 0
|
||||
|
||||
# BUG: should be last executed command exit code
|
||||
raise ReturnSignal(res)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# trap special builtin
|
||||
#-------------------------------------------------------------------------------
|
||||
def builtin_trap(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
if len(args) < 2:
|
||||
stderr.write('trap: usage: trap [[arg] signal_spec ...]\n')
|
||||
return 2
|
||||
|
||||
action = args[0]
|
||||
for sig in args[1:]:
|
||||
try:
|
||||
env.traps[sig] = action
|
||||
except Exception as e:
|
||||
stderr.write('trap: %s\n' % str(e))
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# unset special builtin
|
||||
#-------------------------------------------------------------------------------
|
||||
OPT_UNSET = NonExitingParser("unset - unset values and attributes of variables and functions")
|
||||
OPT_UNSET.add_option( '-f', action='store_true', dest='has_f', default=False)
|
||||
OPT_UNSET.add_option( '-v', action='store_true', dest='has_v', default=False)
|
||||
|
||||
def builtin_unset(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
option, args = OPT_UNSET.parse_args(args)
|
||||
|
||||
status = 0
|
||||
env = interp.get_env()
|
||||
for arg in args:
|
||||
try:
|
||||
if option.has_f:
|
||||
env.remove_function(arg)
|
||||
else:
|
||||
del env[arg]
|
||||
except KeyError:
|
||||
pass
|
||||
except VarAssignmentError:
|
||||
status = 1
|
||||
|
||||
return status
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# wait special builtin
|
||||
#-------------------------------------------------------------------------------
|
||||
def builtin_wait(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
return interp.wait([int(arg) for arg in args])
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# cat utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_cat(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
if not args:
|
||||
args = ['-']
|
||||
|
||||
status = 0
|
||||
for arg in args:
|
||||
if arg == '-':
|
||||
data = stdin.read()
|
||||
else:
|
||||
path = os.path.join(env['PWD'], arg)
|
||||
try:
|
||||
f = file(path, 'rb')
|
||||
try:
|
||||
data = f.read()
|
||||
finally:
|
||||
f.close()
|
||||
except IOError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
status = 1
|
||||
continue
|
||||
stdout.write(data)
|
||||
stdout.flush()
|
||||
return status
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# cd utility
|
||||
#-------------------------------------------------------------------------------
|
||||
OPT_CD = NonExitingParser("cd - change the working directory")
|
||||
|
||||
def utility_cd(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
option, args = OPT_CD.parse_args(args)
|
||||
env = interp.get_env()
|
||||
|
||||
directory = None
|
||||
printdir = False
|
||||
if not args:
|
||||
home = env.get('HOME')
|
||||
if home:
|
||||
# Unspecified, do nothing
|
||||
return 0
|
||||
else:
|
||||
directory = home
|
||||
elif len(args)==1:
|
||||
directory = args[0]
|
||||
if directory=='-':
|
||||
if 'OLDPWD' not in env:
|
||||
raise UtilityError("OLDPWD not set")
|
||||
printdir = True
|
||||
directory = env['OLDPWD']
|
||||
else:
|
||||
raise UtilityError("too many arguments")
|
||||
|
||||
curpath = None
|
||||
# Absolute directories will be handled correctly by the os.path.join call.
|
||||
if not directory.startswith('.') and not directory.startswith('..'):
|
||||
cdpaths = env.get('CDPATH', '.').split(';')
|
||||
for cdpath in cdpaths:
|
||||
p = os.path.join(cdpath, directory)
|
||||
if os.path.isdir(p):
|
||||
curpath = p
|
||||
break
|
||||
|
||||
if curpath is None:
|
||||
curpath = directory
|
||||
curpath = os.path.join(env['PWD'], directory)
|
||||
|
||||
env['OLDPWD'] = env['PWD']
|
||||
env['PWD'] = curpath
|
||||
if printdir:
|
||||
stdout.write('%s\n' % curpath)
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# colon utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_colon(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# echo utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_echo(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
# Echo only takes arguments, no options. Use printf if you need fancy stuff.
|
||||
output = ' '.join(args) + '\n'
|
||||
stdout.write(output)
|
||||
stdout.flush()
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# egrep utility
|
||||
#-------------------------------------------------------------------------------
|
||||
# egrep is usually a shell script.
|
||||
# Unfortunately, pysh does not support shell scripts *with arguments* right now,
|
||||
# so the redirection is implemented here, assuming grep is available.
|
||||
def utility_egrep(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
return run_command('grep', ['-E'] + args, interp, env, stdin, stdout,
|
||||
stderr, debugflags)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# env utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_env(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
if args and args[0]=='-i':
|
||||
raise NotImplementedError('env: -i option is not implemented')
|
||||
|
||||
i = 0
|
||||
for arg in args:
|
||||
if '=' not in arg:
|
||||
break
|
||||
# Update the current environment
|
||||
name, value = arg.split('=', 1)
|
||||
env[name] = value
|
||||
i += 1
|
||||
|
||||
if args[i:]:
|
||||
# Find then execute the specified interpreter
|
||||
utility = env.find_in_path(args[i])
|
||||
if not utility:
|
||||
return 127
|
||||
args[i:i+1] = utility
|
||||
name = args[i]
|
||||
args = args[i+1:]
|
||||
try:
|
||||
return run_command(name, args, interp, env, stdin, stdout, stderr,
|
||||
debugflags)
|
||||
except UtilityError:
|
||||
stderr.write('env: failed to execute %s' % ' '.join([name]+args))
|
||||
return 126
|
||||
else:
|
||||
for pair in env.get_variables().iteritems():
|
||||
stdout.write('%s=%s\n' % pair)
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# exit utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_exit(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
res = None
|
||||
if args:
|
||||
try:
|
||||
res = int(args[0])
|
||||
except ValueError:
|
||||
res = None
|
||||
if not 0<=res<=255:
|
||||
res = None
|
||||
|
||||
if res is None:
|
||||
# BUG: should be last executed command exit code
|
||||
res = 0
|
||||
|
||||
raise ExitSignal(res)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# fgrep utility
|
||||
#-------------------------------------------------------------------------------
|
||||
# see egrep
|
||||
def utility_fgrep(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
return run_command('grep', ['-F'] + args, interp, env, stdin, stdout,
|
||||
stderr, debugflags)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# gunzip utility
|
||||
#-------------------------------------------------------------------------------
|
||||
# see egrep
|
||||
def utility_gunzip(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
return run_command('gzip', ['-d'] + args, interp, env, stdin, stdout,
|
||||
stderr, debugflags)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# kill utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_kill(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
for arg in args:
|
||||
pid = int(arg)
|
||||
status = subprocess.call(['pskill', '/T', str(pid)],
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
# pskill is asynchronous, hence the stupid polling loop
|
||||
while 1:
|
||||
p = subprocess.Popen(['pslist', str(pid)],
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
output = p.communicate()[0]
|
||||
if ('process %d was not' % pid) in output:
|
||||
break
|
||||
time.sleep(1)
|
||||
return status
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# mkdir utility
|
||||
#-------------------------------------------------------------------------------
|
||||
OPT_MKDIR = NonExitingParser("mkdir - make directories.")
|
||||
OPT_MKDIR.add_option('-p', action='store_true', dest='has_p', default=False)
|
||||
|
||||
def utility_mkdir(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
# TODO: implement umask
|
||||
# TODO: implement proper utility error report
|
||||
option, args = OPT_MKDIR.parse_args(args)
|
||||
for arg in args:
|
||||
path = os.path.join(env['PWD'], arg)
|
||||
if option.has_p:
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except IOError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
else:
|
||||
os.mkdir(path)
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# netstat utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_netstat(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
# Do you really expect me to implement netstat ?
|
||||
# This empty form is enough for Mercurial tests since it's
|
||||
# supposed to generate nothing upon success. Faking this test
|
||||
# is not a big deal either.
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# pwd utility
|
||||
#-------------------------------------------------------------------------------
|
||||
OPT_PWD = NonExitingParser("pwd - return working directory name")
|
||||
OPT_PWD.add_option('-L', action='store_true', dest='has_L', default=True,
|
||||
help="""If the PWD environment variable contains an absolute pathname of \
|
||||
the current directory that does not contain the filenames dot or dot-dot, \
|
||||
pwd shall write this pathname to standard output. Otherwise, the -L option \
|
||||
shall behave as the -P option.""")
|
||||
OPT_PWD.add_option('-P', action='store_true', dest='has_L', default=False,
|
||||
help="""The absolute pathname written shall not contain filenames that, in \
|
||||
the context of the pathname, refer to files of type symbolic link.""")
|
||||
|
||||
def utility_pwd(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
option, args = OPT_PWD.parse_args(args)
|
||||
stdout.write('%s\n' % env['PWD'])
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# printf utility
|
||||
#-------------------------------------------------------------------------------
|
||||
RE_UNESCAPE = re.compile(r'(\\x[a-zA-Z0-9]{2}|\\[0-7]{1,3}|\\.)')
|
||||
|
||||
def utility_printf(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
def replace(m):
|
||||
assert m.group()
|
||||
g = m.group()[1:]
|
||||
if g.startswith('x'):
|
||||
return chr(int(g[1:], 16))
|
||||
if len(g) <= 3 and len([c for c in g if c in '01234567']) == len(g):
|
||||
# Yay, an octal number
|
||||
return chr(int(g, 8))
|
||||
return {
|
||||
'a': '\a',
|
||||
'b': '\b',
|
||||
'f': '\f',
|
||||
'n': '\n',
|
||||
'r': '\r',
|
||||
't': '\t',
|
||||
'v': '\v',
|
||||
'\\': '\\',
|
||||
}.get(g)
|
||||
|
||||
# Convert escape sequences
|
||||
format = re.sub(RE_UNESCAPE, replace, args[0])
|
||||
stdout.write(format % tuple(args[1:]))
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# true utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_true(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# sed utility
|
||||
#-------------------------------------------------------------------------------
|
||||
RE_SED = re.compile(r'^s(.).*\1[a-zA-Z]*$')
|
||||
|
||||
# cygwin sed fails with some expressions when they do not end with a single space.
|
||||
# see unit tests for details. Interestingly, the same expressions works perfectly
|
||||
# in cygwin shell.
|
||||
def utility_sed(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
# Scan pattern arguments and append a space if necessary
|
||||
for i in range(len(args)):
|
||||
if not RE_SED.search(args[i]):
|
||||
continue
|
||||
args[i] = args[i] + ' '
|
||||
|
||||
return run_command(name, args, interp, env, stdin, stdout,
|
||||
stderr, debugflags)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# sleep utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_sleep(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
time.sleep(int(args[0]))
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# sort utility
|
||||
#-------------------------------------------------------------------------------
|
||||
OPT_SORT = NonExitingParser("sort - sort, merge, or sequence check text files")
|
||||
|
||||
def utility_sort(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
|
||||
def sort(path):
|
||||
if path == '-':
|
||||
lines = stdin.readlines()
|
||||
else:
|
||||
try:
|
||||
f = file(path)
|
||||
try:
|
||||
lines = f.readlines()
|
||||
finally:
|
||||
f.close()
|
||||
except IOError as e:
|
||||
stderr.write(str(e) + '\n')
|
||||
return 1
|
||||
|
||||
if lines and lines[-1][-1]!='\n':
|
||||
lines[-1] = lines[-1] + '\n'
|
||||
return lines
|
||||
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
option, args = OPT_SORT.parse_args(args)
|
||||
alllines = []
|
||||
|
||||
if len(args)<=0:
|
||||
args += ['-']
|
||||
|
||||
# Load all files lines
|
||||
curdir = os.getcwd()
|
||||
try:
|
||||
os.chdir(env['PWD'])
|
||||
for path in args:
|
||||
alllines += sort(path)
|
||||
finally:
|
||||
os.chdir(curdir)
|
||||
|
||||
alllines.sort()
|
||||
for line in alllines:
|
||||
stdout.write(line)
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# hg utility
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
hgcommands = [
|
||||
'add',
|
||||
'addremove',
|
||||
'commit', 'ci',
|
||||
'debugrename',
|
||||
'debugwalk',
|
||||
'falabala', # Dummy command used in a mercurial test
|
||||
'incoming',
|
||||
'locate',
|
||||
'pull',
|
||||
'push',
|
||||
'qinit',
|
||||
'remove', 'rm',
|
||||
'rename', 'mv',
|
||||
'revert',
|
||||
'showconfig',
|
||||
'status', 'st',
|
||||
'strip',
|
||||
]
|
||||
|
||||
def rewriteslashes(name, args):
|
||||
# Several hg commands output file paths, rewrite the separators
|
||||
if len(args) > 1 and name.lower().endswith('python') \
|
||||
and args[0].endswith('hg'):
|
||||
for cmd in hgcommands:
|
||||
if cmd in args[1:]:
|
||||
return True
|
||||
|
||||
# svn output contains many paths with OS specific separators.
|
||||
# Normalize these to unix paths.
|
||||
base = os.path.basename(name)
|
||||
if base.startswith('svn'):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def rewritehg(output):
|
||||
if not output:
|
||||
return output
|
||||
# Rewrite os specific messages
|
||||
output = output.replace(': The system cannot find the file specified',
|
||||
': No such file or directory')
|
||||
output = re.sub(': Access is denied.*$', ': Permission denied', output)
|
||||
output = output.replace(': No connection could be made because the target machine actively refused it',
|
||||
': Connection refused')
|
||||
return output
|
||||
|
||||
|
||||
def run_command(name, args, interp, env, stdin, stdout,
|
||||
stderr, debugflags):
|
||||
# Execute the command
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
hgbin = interp.options().hgbinary
|
||||
ishg = hgbin and ('hg' in name or args and 'hg' in args[0])
|
||||
unixoutput = 'cygwin' in name or ishg
|
||||
|
||||
exec_env = env.get_variables()
|
||||
try:
|
||||
# BUG: comparing file descriptor is clearly not a reliable way to tell
|
||||
# whether they point on the same underlying object. But in pysh limited
|
||||
# scope this is usually right, we do not expect complicated redirections
|
||||
# besides usual 2>&1.
|
||||
# Still there is one case we have but cannot deal with is when stdout
|
||||
# and stderr are redirected *by pysh caller*. This the reason for the
|
||||
# --redirect pysh() option.
|
||||
# Now, we want to know they are the same because we sometimes need to
|
||||
# transform the command output, mostly remove CR-LF to ensure that
|
||||
# command output is unix-like. Cygwin utilies are a special case because
|
||||
# they explicitely set their output streams to binary mode, so we have
|
||||
# nothing to do. For all others commands, we have to guess whether they
|
||||
# are sending text data, in which case the transformation must be done.
|
||||
# Again, the NUL character test is unreliable but should be enough for
|
||||
# hg tests.
|
||||
redirected = stdout.fileno()==stderr.fileno()
|
||||
if not redirected:
|
||||
p = subprocess.Popen([name] + args, cwd=env['PWD'], env=exec_env,
|
||||
stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
else:
|
||||
p = subprocess.Popen([name] + args, cwd=env['PWD'], env=exec_env,
|
||||
stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
out, err = p.communicate()
|
||||
except WindowsError as e:
|
||||
raise UtilityError(str(e))
|
||||
|
||||
if not unixoutput:
|
||||
def encode(s):
|
||||
if '\0' in s:
|
||||
return s
|
||||
return s.replace('\r\n', '\n')
|
||||
else:
|
||||
encode = lambda s: s
|
||||
|
||||
if rewriteslashes(name, args):
|
||||
encode1_ = encode
|
||||
def encode(s):
|
||||
s = encode1_(s)
|
||||
s = s.replace('\\\\', '\\')
|
||||
s = s.replace('\\', '/')
|
||||
return s
|
||||
|
||||
if ishg:
|
||||
encode2_ = encode
|
||||
def encode(s):
|
||||
return rewritehg(encode2_(s))
|
||||
|
||||
stdout.write(encode(out))
|
||||
if not redirected:
|
||||
stderr.write(encode(err))
|
||||
return p.returncode
|
||||
|
||||
1367
bitbake/lib/bb/pysh/interp.py
Normal file
1367
bitbake/lib/bb/pysh/interp.py
Normal file
File diff suppressed because it is too large
Load Diff
116
bitbake/lib/bb/pysh/lsprof.py
Normal file
116
bitbake/lib/bb/pysh/lsprof.py
Normal file
@@ -0,0 +1,116 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
import sys
|
||||
from _lsprof import Profiler, profiler_entry
|
||||
|
||||
__all__ = ['profile', 'Stats']
|
||||
|
||||
def profile(f, *args, **kwds):
|
||||
"""XXX docstring"""
|
||||
p = Profiler()
|
||||
p.enable(subcalls=True, builtins=True)
|
||||
try:
|
||||
f(*args, **kwds)
|
||||
finally:
|
||||
p.disable()
|
||||
return Stats(p.getstats())
|
||||
|
||||
|
||||
class Stats(object):
|
||||
"""XXX docstring"""
|
||||
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
|
||||
def sort(self, crit="inlinetime"):
|
||||
"""XXX docstring"""
|
||||
if crit not in profiler_entry.__dict__:
|
||||
raise ValueError("Can't sort by %s" % crit)
|
||||
self.data.sort(lambda b, a: cmp(getattr(a, crit),
|
||||
getattr(b, crit)))
|
||||
for e in self.data:
|
||||
if e.calls:
|
||||
e.calls.sort(lambda b, a: cmp(getattr(a, crit),
|
||||
getattr(b, crit)))
|
||||
|
||||
def pprint(self, top=None, file=None, limit=None, climit=None):
|
||||
"""XXX docstring"""
|
||||
if file is None:
|
||||
file = sys.stdout
|
||||
d = self.data
|
||||
if top is not None:
|
||||
d = d[:top]
|
||||
cols = "% 12s %12s %11.4f %11.4f %s\n"
|
||||
hcols = "% 12s %12s %12s %12s %s\n"
|
||||
cols2 = "+%12s %12s %11.4f %11.4f + %s\n"
|
||||
file.write(hcols % ("CallCount", "Recursive", "Total(ms)",
|
||||
"Inline(ms)", "module:lineno(function)"))
|
||||
count = 0
|
||||
for e in d:
|
||||
file.write(cols % (e.callcount, e.reccallcount, e.totaltime,
|
||||
e.inlinetime, label(e.code)))
|
||||
count += 1
|
||||
if limit is not None and count == limit:
|
||||
return
|
||||
ccount = 0
|
||||
if e.calls:
|
||||
for se in e.calls:
|
||||
file.write(cols % ("+%s" % se.callcount, se.reccallcount,
|
||||
se.totaltime, se.inlinetime,
|
||||
"+%s" % label(se.code)))
|
||||
count += 1
|
||||
ccount += 1
|
||||
if limit is not None and count == limit:
|
||||
return
|
||||
if climit is not None and ccount == climit:
|
||||
break
|
||||
|
||||
def freeze(self):
|
||||
"""Replace all references to code objects with string
|
||||
descriptions; this makes it possible to pickle the instance."""
|
||||
|
||||
# this code is probably rather ickier than it needs to be!
|
||||
for i in range(len(self.data)):
|
||||
e = self.data[i]
|
||||
if not isinstance(e.code, str):
|
||||
self.data[i] = type(e)((label(e.code),) + e[1:])
|
||||
if e.calls:
|
||||
for j in range(len(e.calls)):
|
||||
se = e.calls[j]
|
||||
if not isinstance(se.code, str):
|
||||
e.calls[j] = type(se)((label(se.code),) + se[1:])
|
||||
|
||||
_fn2mod = {}
|
||||
|
||||
def label(code):
|
||||
if isinstance(code, str):
|
||||
return code
|
||||
try:
|
||||
mname = _fn2mod[code.co_filename]
|
||||
except KeyError:
|
||||
for k, v in sys.modules.items():
|
||||
if v is None:
|
||||
continue
|
||||
if not hasattr(v, '__file__'):
|
||||
continue
|
||||
if not isinstance(v.__file__, str):
|
||||
continue
|
||||
if v.__file__.startswith(code.co_filename):
|
||||
mname = _fn2mod[code.co_filename] = k
|
||||
break
|
||||
else:
|
||||
mname = _fn2mod[code.co_filename] = '<%s>'%code.co_filename
|
||||
|
||||
return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import os
|
||||
sys.argv = sys.argv[1:]
|
||||
if not sys.argv:
|
||||
print >> sys.stderr, "usage: lsprof.py <script> <arguments...>"
|
||||
sys.exit(2)
|
||||
sys.path.insert(0, os.path.abspath(os.path.dirname(sys.argv[0])))
|
||||
stats = profile(execfile, sys.argv[0], globals(), locals())
|
||||
stats.sort()
|
||||
stats.pprint()
|
||||
167
bitbake/lib/bb/pysh/pysh.py
Normal file
167
bitbake/lib/bb/pysh/pysh.py
Normal file
@@ -0,0 +1,167 @@
|
||||
# pysh.py - command processing for pysh.
|
||||
#
|
||||
# Copyright 2007 Patrick Mezard
|
||||
#
|
||||
# This software may be used and distributed according to the terms
|
||||
# of the GNU General Public License, incorporated herein by reference.
|
||||
|
||||
import optparse
|
||||
import os
|
||||
import sys
|
||||
|
||||
import interp
|
||||
|
||||
SH_OPT = optparse.OptionParser(prog='pysh', usage="%prog [OPTIONS]", version='0.1')
|
||||
SH_OPT.add_option('-c', action='store_true', dest='command_string', default=None,
|
||||
help='A string that shall be interpreted by the shell as one or more commands')
|
||||
SH_OPT.add_option('--redirect-to', dest='redirect_to', default=None,
|
||||
help='Redirect script commands stdout and stderr to the specified file')
|
||||
# See utility_command in builtin.py about the reason for this flag.
|
||||
SH_OPT.add_option('--redirected', dest='redirected', action='store_true', default=False,
|
||||
help='Tell the interpreter that stdout and stderr are actually the same objects, which is really stdout')
|
||||
SH_OPT.add_option('--debug-parsing', action='store_true', dest='debug_parsing', default=False,
|
||||
help='Trace PLY execution')
|
||||
SH_OPT.add_option('--debug-tree', action='store_true', dest='debug_tree', default=False,
|
||||
help='Display the generated syntax tree.')
|
||||
SH_OPT.add_option('--debug-cmd', action='store_true', dest='debug_cmd', default=False,
|
||||
help='Trace command execution before parameters expansion and exit status.')
|
||||
SH_OPT.add_option('--debug-utility', action='store_true', dest='debug_utility', default=False,
|
||||
help='Trace utility calls, after parameters expansions')
|
||||
SH_OPT.add_option('--ast', action='store_true', dest='ast', default=False,
|
||||
help='Encoded commands to execute in a subprocess')
|
||||
SH_OPT.add_option('--profile', action='store_true', default=False,
|
||||
help='Profile pysh run')
|
||||
|
||||
|
||||
def split_args(args):
|
||||
# Separate shell arguments from command ones
|
||||
# Just stop at the first argument not starting with a dash. I know, this is completely broken,
|
||||
# it ignores files starting with a dash or may take option values for command file. This is not
|
||||
# supposed to happen for now
|
||||
command_index = len(args)
|
||||
for i,arg in enumerate(args):
|
||||
if not arg.startswith('-'):
|
||||
command_index = i
|
||||
break
|
||||
|
||||
return args[:command_index], args[command_index:]
|
||||
|
||||
|
||||
def fixenv(env):
|
||||
path = env.get('PATH')
|
||||
if path is not None:
|
||||
parts = path.split(os.pathsep)
|
||||
# Remove Windows utilities from PATH, they are useless at best and
|
||||
# some of them (find) may be confused with other utilities.
|
||||
parts = [p for p in parts if 'system32' not in p.lower()]
|
||||
env['PATH'] = os.pathsep.join(parts)
|
||||
if env.get('HOME') is None:
|
||||
# Several utilities, including cvsps, cannot work without
|
||||
# a defined HOME directory.
|
||||
env['HOME'] = os.path.expanduser('~')
|
||||
return env
|
||||
|
||||
def _sh(cwd, shargs, cmdargs, options, debugflags=None, env=None):
|
||||
if os.environ.get('PYSH_TEXT') != '1':
|
||||
import msvcrt
|
||||
for fp in (sys.stdin, sys.stdout, sys.stderr):
|
||||
msvcrt.setmode(fp.fileno(), os.O_BINARY)
|
||||
|
||||
hgbin = os.environ.get('PYSH_HGTEXT') != '1'
|
||||
|
||||
if debugflags is None:
|
||||
debugflags = []
|
||||
if options.debug_parsing: debugflags.append('debug-parsing')
|
||||
if options.debug_utility: debugflags.append('debug-utility')
|
||||
if options.debug_cmd: debugflags.append('debug-cmd')
|
||||
if options.debug_tree: debugflags.append('debug-tree')
|
||||
|
||||
if env is None:
|
||||
env = fixenv(dict(os.environ))
|
||||
if cwd is None:
|
||||
cwd = os.getcwd()
|
||||
|
||||
if not cmdargs:
|
||||
# Nothing to do
|
||||
return 0
|
||||
|
||||
ast = None
|
||||
command_file = None
|
||||
if options.command_string:
|
||||
input = cmdargs[0]
|
||||
if not options.ast:
|
||||
input += '\n'
|
||||
else:
|
||||
args, input = interp.decodeargs(input), None
|
||||
env, ast = args
|
||||
cwd = env.get('PWD', cwd)
|
||||
else:
|
||||
command_file = cmdargs[0]
|
||||
arguments = cmdargs[1:]
|
||||
|
||||
prefix = interp.resolve_shebang(command_file, ignoreshell=True)
|
||||
if prefix:
|
||||
input = ' '.join(prefix + [command_file] + arguments)
|
||||
else:
|
||||
# Read commands from file
|
||||
f = file(command_file)
|
||||
try:
|
||||
# Trailing newline to help the parser
|
||||
input = f.read() + '\n'
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
redirect = None
|
||||
try:
|
||||
if options.redirected:
|
||||
stdout = sys.stdout
|
||||
stderr = stdout
|
||||
elif options.redirect_to:
|
||||
redirect = open(options.redirect_to, 'wb')
|
||||
stdout = redirect
|
||||
stderr = redirect
|
||||
else:
|
||||
stdout = sys.stdout
|
||||
stderr = sys.stderr
|
||||
|
||||
# TODO: set arguments to environment variables
|
||||
opts = interp.Options()
|
||||
opts.hgbinary = hgbin
|
||||
ip = interp.Interpreter(cwd, debugflags, stdout=stdout, stderr=stderr,
|
||||
opts=opts)
|
||||
try:
|
||||
# Export given environment in shell object
|
||||
for k,v in env.iteritems():
|
||||
ip.get_env().export(k,v)
|
||||
return ip.execute_script(input, ast, scriptpath=command_file)
|
||||
finally:
|
||||
ip.close()
|
||||
finally:
|
||||
if redirect is not None:
|
||||
redirect.close()
|
||||
|
||||
def sh(cwd=None, args=None, debugflags=None, env=None):
|
||||
if args is None:
|
||||
args = sys.argv[1:]
|
||||
shargs, cmdargs = split_args(args)
|
||||
options, shargs = SH_OPT.parse_args(shargs)
|
||||
|
||||
if options.profile:
|
||||
import lsprof
|
||||
p = lsprof.Profiler()
|
||||
p.enable(subcalls=True)
|
||||
try:
|
||||
return _sh(cwd, shargs, cmdargs, options, debugflags, env)
|
||||
finally:
|
||||
p.disable()
|
||||
stats = lsprof.Stats(p.getstats())
|
||||
stats.sort()
|
||||
stats.pprint(top=10, file=sys.stderr, climit=5)
|
||||
else:
|
||||
return _sh(cwd, shargs, cmdargs, options, debugflags, env)
|
||||
|
||||
def main():
|
||||
sys.exit(sh())
|
||||
|
||||
if __name__=='__main__':
|
||||
main()
|
||||
@@ -13,6 +13,11 @@
|
||||
# PLY in pull mode. It was designed to work incrementally and it would not be
|
||||
# that hard to enable pull mode.
|
||||
import re
|
||||
try:
|
||||
s = set()
|
||||
del s
|
||||
except NameError:
|
||||
from Set import Set as set
|
||||
|
||||
from ply import lex
|
||||
from bb.pysh.sherrors import *
|
||||
|
||||
@@ -636,16 +636,13 @@ def p_empty(p):
|
||||
def p_error(p):
|
||||
msg = []
|
||||
w = msg.append
|
||||
if p:
|
||||
w('%r\n' % p)
|
||||
w('followed by:\n')
|
||||
for i in range(5):
|
||||
n = yacc.token()
|
||||
if not n:
|
||||
break
|
||||
w(' %r\n' % n)
|
||||
else:
|
||||
w('Unexpected EOF')
|
||||
w('%r\n' % p)
|
||||
w('followed by:\n')
|
||||
for i in range(5):
|
||||
n = yacc.token()
|
||||
if not n:
|
||||
break
|
||||
w(' %r\n' % n)
|
||||
raise sherrors.ShellSyntaxError(''.join(msg))
|
||||
|
||||
# Build the parser
|
||||
|
||||
@@ -13,3 +13,29 @@ class ShellError(Exception):
|
||||
|
||||
class ShellSyntaxError(ShellError):
|
||||
pass
|
||||
|
||||
class UtilityError(ShellError):
|
||||
"""Raised upon utility syntax error (option or operand error)."""
|
||||
pass
|
||||
|
||||
class ExpansionError(ShellError):
|
||||
pass
|
||||
|
||||
class CommandNotFound(ShellError):
|
||||
"""Specified command was not found."""
|
||||
pass
|
||||
|
||||
class RedirectionError(ShellError):
|
||||
pass
|
||||
|
||||
class VarAssignmentError(ShellError):
|
||||
"""Variable assignment error."""
|
||||
pass
|
||||
|
||||
class ExitSignal(ShellError):
|
||||
"""Exit signal."""
|
||||
pass
|
||||
|
||||
class ReturnSignal(ShellError):
|
||||
"""Exit signal."""
|
||||
pass
|
||||
|
||||
77
bitbake/lib/bb/pysh/subprocess_fix.py
Normal file
77
bitbake/lib/bb/pysh/subprocess_fix.py
Normal file
@@ -0,0 +1,77 @@
|
||||
# subprocess - Subprocesses with accessible I/O streams
|
||||
#
|
||||
# For more information about this module, see PEP 324.
|
||||
#
|
||||
# This module should remain compatible with Python 2.2, see PEP 291.
|
||||
#
|
||||
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
|
||||
#
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
# See http://www.python.org/2.4/license for licensing details.
|
||||
|
||||
def list2cmdline(seq):
|
||||
"""
|
||||
Translate a sequence of arguments into a command line
|
||||
string, using the same rules as the MS C runtime:
|
||||
|
||||
1) Arguments are delimited by white space, which is either a
|
||||
space or a tab.
|
||||
|
||||
2) A string surrounded by double quotation marks is
|
||||
interpreted as a single argument, regardless of white space
|
||||
contained within. A quoted string can be embedded in an
|
||||
argument.
|
||||
|
||||
3) A double quotation mark preceded by a backslash is
|
||||
interpreted as a literal double quotation mark.
|
||||
|
||||
4) Backslashes are interpreted literally, unless they
|
||||
immediately precede a double quotation mark.
|
||||
|
||||
5) If backslashes immediately precede a double quotation mark,
|
||||
every pair of backslashes is interpreted as a literal
|
||||
backslash. If the number of backslashes is odd, the last
|
||||
backslash escapes the next double quotation mark as
|
||||
described in rule 3.
|
||||
"""
|
||||
|
||||
# See
|
||||
# http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
|
||||
result = []
|
||||
needquote = False
|
||||
for arg in seq:
|
||||
bs_buf = []
|
||||
|
||||
# Add a space to separate this argument from the others
|
||||
if result:
|
||||
result.append(' ')
|
||||
|
||||
needquote = (" " in arg) or ("\t" in arg) or ("|" in arg) or arg == ""
|
||||
if needquote:
|
||||
result.append('"')
|
||||
|
||||
for c in arg:
|
||||
if c == '\\':
|
||||
# Don't know if we need to double yet.
|
||||
bs_buf.append(c)
|
||||
elif c == '"':
|
||||
# Double backspaces.
|
||||
result.append('\\' * len(bs_buf)*2)
|
||||
bs_buf = []
|
||||
result.append('\\"')
|
||||
else:
|
||||
# Normal char
|
||||
if bs_buf:
|
||||
result.extend(bs_buf)
|
||||
bs_buf = []
|
||||
result.append(c)
|
||||
|
||||
# Add remaining backspaces, if any.
|
||||
if bs_buf:
|
||||
result.extend(bs_buf)
|
||||
|
||||
if needquote:
|
||||
result.extend(bs_buf)
|
||||
result.append('"')
|
||||
|
||||
return ''.join(result)
|
||||
@@ -1,116 +0,0 @@
|
||||
"""
|
||||
BitBake 'remotedata' module
|
||||
|
||||
Provides support for using a datastore from the bitbake client
|
||||
"""
|
||||
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import bb.data
|
||||
|
||||
class RemoteDatastores:
|
||||
"""Used on the server side to manage references to server-side datastores"""
|
||||
def __init__(self, cooker):
|
||||
self.cooker = cooker
|
||||
self.datastores = {}
|
||||
self.locked = []
|
||||
self.nextindex = 1
|
||||
|
||||
def __len__(self):
|
||||
return len(self.datastores)
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key is None:
|
||||
return self.cooker.data
|
||||
else:
|
||||
return self.datastores[key]
|
||||
|
||||
def items(self):
|
||||
return self.datastores.items()
|
||||
|
||||
def store(self, d, locked=False):
|
||||
"""
|
||||
Put a datastore into the collection. If locked=True then the datastore
|
||||
is understood to be managed externally and cannot be released by calling
|
||||
release().
|
||||
"""
|
||||
idx = self.nextindex
|
||||
self.datastores[idx] = d
|
||||
if locked:
|
||||
self.locked.append(idx)
|
||||
self.nextindex += 1
|
||||
return idx
|
||||
|
||||
def check_store(self, d, locked=False):
|
||||
"""
|
||||
Put a datastore into the collection if it's not already in there;
|
||||
in either case return the index
|
||||
"""
|
||||
for key, val in self.datastores.items():
|
||||
if val is d:
|
||||
idx = key
|
||||
break
|
||||
else:
|
||||
idx = self.store(d, locked)
|
||||
return idx
|
||||
|
||||
def release(self, idx):
|
||||
"""Discard a datastore in the collection"""
|
||||
if idx in self.locked:
|
||||
raise Exception('Tried to release locked datastore %d' % idx)
|
||||
del self.datastores[idx]
|
||||
|
||||
def receive_datastore(self, remote_data):
|
||||
"""Receive a datastore object sent from the client (as prepared by transmit_datastore())"""
|
||||
dct = dict(remote_data)
|
||||
d = bb.data_smart.DataSmart()
|
||||
d.dict = dct
|
||||
while True:
|
||||
if '_remote_data' in dct:
|
||||
dsindex = dct['_remote_data']['_content']
|
||||
del dct['_remote_data']
|
||||
if dsindex is None:
|
||||
dct['_data'] = self.cooker.data.dict
|
||||
else:
|
||||
dct['_data'] = self.datastores[dsindex].dict
|
||||
break
|
||||
elif '_data' in dct:
|
||||
idct = dict(dct['_data'])
|
||||
dct['_data'] = idct
|
||||
dct = idct
|
||||
else:
|
||||
break
|
||||
return d
|
||||
|
||||
@staticmethod
|
||||
def transmit_datastore(d):
|
||||
"""Prepare a datastore object for sending over IPC from the client end"""
|
||||
# FIXME content might be a dict, need to turn that into a list as well
|
||||
def copy_dicts(dct):
|
||||
if '_remote_data' in dct:
|
||||
dsindex = dct['_remote_data']['_content'].dsindex
|
||||
newdct = dct.copy()
|
||||
newdct['_remote_data'] = {'_content': dsindex}
|
||||
return list(newdct.items())
|
||||
elif '_data' in dct:
|
||||
newdct = dct.copy()
|
||||
newdata = copy_dicts(dct['_data'])
|
||||
if newdata:
|
||||
newdct['_data'] = newdata
|
||||
return list(newdct.items())
|
||||
return None
|
||||
main_dict = copy_dicts(d.dict)
|
||||
return main_dict
|
||||
File diff suppressed because it is too large
Load Diff
@@ -18,4 +18,82 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
""" Base code for Bitbake server process
|
||||
|
||||
Have a common base for that all Bitbake server classes ensures a consistent
|
||||
approach to the interface, and minimize risks associated with code duplication.
|
||||
|
||||
"""
|
||||
|
||||
""" BaseImplServer() the base class for all XXServer() implementations.
|
||||
|
||||
These classes contain the actual code that runs the server side, i.e.
|
||||
listens for the commands and executes them. Although these implementations
|
||||
contain all the data of the original bitbake command, i.e the cooker instance,
|
||||
they may well run on a different process or even machine.
|
||||
|
||||
"""
|
||||
|
||||
class BaseImplServer():
|
||||
def __init__(self):
|
||||
self._idlefuns = {}
|
||||
|
||||
def addcooker(self, cooker):
|
||||
self.cooker = cooker
|
||||
|
||||
def register_idle_function(self, function, data):
|
||||
"""Register a function to be called while the server is idle"""
|
||||
assert hasattr(function, '__call__')
|
||||
self._idlefuns[function] = data
|
||||
|
||||
|
||||
|
||||
""" BitBakeBaseServerConnection class is the common ancestor to all
|
||||
BitBakeServerConnection classes.
|
||||
|
||||
These classes control the remote server. The only command currently
|
||||
implemented is the terminate() command.
|
||||
|
||||
"""
|
||||
|
||||
class BitBakeBaseServerConnection():
|
||||
def __init__(self, serverImpl):
|
||||
pass
|
||||
|
||||
def terminate(self):
|
||||
pass
|
||||
|
||||
def setupEventQueue(self):
|
||||
pass
|
||||
|
||||
|
||||
""" BitBakeBaseServer class is the common ancestor to all Bitbake servers
|
||||
|
||||
Derive this class in order to implement a BitBakeServer which is the
|
||||
controlling stub for the actual server implementation
|
||||
|
||||
"""
|
||||
class BitBakeBaseServer(object):
|
||||
def initServer(self):
|
||||
self.serverImpl = None # we ensure a runtime crash if not overloaded
|
||||
self.connection = None
|
||||
return
|
||||
|
||||
def addcooker(self, cooker):
|
||||
self.cooker = cooker
|
||||
self.serverImpl.addcooker(cooker)
|
||||
|
||||
def getServerIdleCB(self):
|
||||
return self.serverImpl.register_idle_function
|
||||
|
||||
def saveConnectionDetails(self):
|
||||
return
|
||||
|
||||
def detach(self):
|
||||
return
|
||||
|
||||
def establishConnection(self, featureset):
|
||||
raise "Must redefine the %s.establishConnection()" % self.__class__.__name__
|
||||
|
||||
def endSession(self):
|
||||
self.connection.terminate()
|
||||
|
||||
@@ -22,261 +22,115 @@
|
||||
|
||||
import bb
|
||||
import bb.event
|
||||
import itertools
|
||||
import logging
|
||||
import multiprocessing
|
||||
import threading
|
||||
import array
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import select
|
||||
import socket
|
||||
import subprocess
|
||||
import errno
|
||||
import re
|
||||
import datetime
|
||||
import bb.server.xmlrpcserver
|
||||
from bb import daemonize
|
||||
from multiprocessing import queues
|
||||
from queue import Empty
|
||||
from multiprocessing import Event, Process, util, Queue, Pipe, queues, Manager
|
||||
|
||||
from . import BitBakeBaseServer, BitBakeBaseServerConnection, BaseImplServer
|
||||
|
||||
logger = logging.getLogger('BitBake')
|
||||
|
||||
class ProcessTimeout(SystemExit):
|
||||
pass
|
||||
class ServerCommunicator():
|
||||
def __init__(self, connection, event_handle, server):
|
||||
self.connection = connection
|
||||
self.event_handle = event_handle
|
||||
self.server = server
|
||||
|
||||
class ProcessServer(multiprocessing.Process):
|
||||
def runCommand(self, command):
|
||||
# @todo try/except
|
||||
self.connection.send(command)
|
||||
|
||||
if not self.server.is_alive():
|
||||
raise SystemExit
|
||||
|
||||
while True:
|
||||
# don't let the user ctrl-c while we're waiting for a response
|
||||
try:
|
||||
for idx in range(0,4): # 0, 1, 2, 3
|
||||
if self.connection.poll(5):
|
||||
return self.connection.recv()
|
||||
else:
|
||||
bb.warn("Timeout while attempting to communicate with bitbake server")
|
||||
bb.fatal("Gave up; Too many tries: timeout while attempting to communicate with bitbake server")
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
def getEventHandle(self):
|
||||
return self.event_handle.value
|
||||
|
||||
class EventAdapter():
|
||||
"""
|
||||
Adapter to wrap our event queue since the caller (bb.event) expects to
|
||||
call a send() method, but our actual queue only has put()
|
||||
"""
|
||||
def __init__(self, queue):
|
||||
self.queue = queue
|
||||
|
||||
def send(self, event):
|
||||
try:
|
||||
self.queue.put(event)
|
||||
except Exception as err:
|
||||
print("EventAdapter puked: %s" % str(err))
|
||||
|
||||
|
||||
class ProcessServer(Process, BaseImplServer):
|
||||
profile_filename = "profile.log"
|
||||
profile_processed_filename = "profile.log.processed"
|
||||
|
||||
def __init__(self, lock, sock, sockname):
|
||||
multiprocessing.Process.__init__(self)
|
||||
self.command_channel = False
|
||||
self.command_channel_reply = False
|
||||
def __init__(self, command_channel, event_queue, featurelist):
|
||||
BaseImplServer.__init__(self)
|
||||
Process.__init__(self)
|
||||
self.command_channel = command_channel
|
||||
self.event_queue = event_queue
|
||||
self.event = EventAdapter(event_queue)
|
||||
self.featurelist = featurelist
|
||||
self.quit = False
|
||||
self.heartbeat_seconds = 1 # default, BB_HEARTBEAT_EVENT will be checked once we have a datastore.
|
||||
self.next_heartbeat = time.time()
|
||||
|
||||
self.event_handle = None
|
||||
self.haveui = False
|
||||
self.lastui = False
|
||||
self.xmlrpc = False
|
||||
|
||||
self._idlefuns = {}
|
||||
|
||||
self.bitbake_lock = lock
|
||||
self.sock = sock
|
||||
self.sockname = sockname
|
||||
|
||||
def register_idle_function(self, function, data):
|
||||
"""Register a function to be called while the server is idle"""
|
||||
assert hasattr(function, '__call__')
|
||||
self._idlefuns[function] = data
|
||||
self.quitin, self.quitout = Pipe()
|
||||
self.event_handle = multiprocessing.Value("i")
|
||||
|
||||
def run(self):
|
||||
for event in bb.event.ui_queue:
|
||||
self.event_queue.put(event)
|
||||
self.event_handle.value = bb.event.register_UIHhandler(self, True)
|
||||
|
||||
if self.xmlrpcinterface[0]:
|
||||
self.xmlrpc = bb.server.xmlrpcserver.BitBakeXMLRPCServer(self.xmlrpcinterface, self.cooker, self)
|
||||
|
||||
print("Bitbake XMLRPC server address: %s, server port: %s" % (self.xmlrpc.host, self.xmlrpc.port))
|
||||
|
||||
heartbeat_event = self.cooker.data.getVar('BB_HEARTBEAT_EVENT')
|
||||
if heartbeat_event:
|
||||
try:
|
||||
self.heartbeat_seconds = float(heartbeat_event)
|
||||
except:
|
||||
bb.warn('Ignoring invalid BB_HEARTBEAT_EVENT=%s, must be a float specifying seconds.' % heartbeat_event)
|
||||
|
||||
self.timeout = self.server_timeout or self.cooker.data.getVar('BB_SERVER_TIMEOUT')
|
||||
try:
|
||||
if self.timeout:
|
||||
self.timeout = float(self.timeout)
|
||||
except:
|
||||
bb.warn('Ignoring invalid BB_SERVER_TIMEOUT=%s, must be a float specifying seconds.' % self.timeout)
|
||||
|
||||
|
||||
try:
|
||||
self.bitbake_lock.seek(0)
|
||||
self.bitbake_lock.truncate()
|
||||
if self.xmlrpc:
|
||||
self.bitbake_lock.write("%s %s:%s\n" % (os.getpid(), self.xmlrpc.host, self.xmlrpc.port))
|
||||
else:
|
||||
self.bitbake_lock.write("%s\n" % (os.getpid()))
|
||||
self.bitbake_lock.flush()
|
||||
except Exception as e:
|
||||
print("Error writing to lock file: %s" % str(e))
|
||||
pass
|
||||
|
||||
if self.cooker.configuration.profile:
|
||||
try:
|
||||
import cProfile as profile
|
||||
except:
|
||||
import profile
|
||||
prof = profile.Profile()
|
||||
|
||||
ret = profile.Profile.runcall(prof, self.main)
|
||||
|
||||
prof.dump_stats("profile.log")
|
||||
bb.utils.process_profilelog("profile.log")
|
||||
print("Raw profiling information saved to profile.log and processed statistics to profile.log.processed")
|
||||
|
||||
else:
|
||||
ret = self.main()
|
||||
|
||||
return ret
|
||||
bb.cooker.server_main(self.cooker, self.main)
|
||||
|
||||
def main(self):
|
||||
self.cooker.pre_serve()
|
||||
|
||||
# Ignore SIGINT within the server, as all SIGINT handling is done by
|
||||
# the UI and communicated to us
|
||||
self.quitin.close()
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
bb.utils.set_process_name("Cooker")
|
||||
|
||||
ready = []
|
||||
newconnections = []
|
||||
|
||||
self.controllersock = False
|
||||
fds = [self.sock]
|
||||
if self.xmlrpc:
|
||||
fds.append(self.xmlrpc)
|
||||
print("Entering server connection loop")
|
||||
|
||||
def disconnect_client(self, fds):
|
||||
print("Disconnecting Client")
|
||||
if self.controllersock:
|
||||
fds.remove(self.controllersock)
|
||||
self.controllersock.close()
|
||||
self.controllersock = False
|
||||
if self.haveui:
|
||||
fds.remove(self.command_channel)
|
||||
bb.event.unregister_UIHhandler(self.event_handle, True)
|
||||
self.command_channel_reply.writer.close()
|
||||
self.event_writer.writer.close()
|
||||
self.command_channel.close()
|
||||
self.command_channel = False
|
||||
del self.event_writer
|
||||
self.lastui = time.time()
|
||||
self.cooker.clientComplete()
|
||||
self.haveui = False
|
||||
ready = select.select(fds,[],[],0)[0]
|
||||
if newconnections:
|
||||
print("Starting new client")
|
||||
conn = newconnections.pop(-1)
|
||||
fds.append(conn)
|
||||
self.controllersock = conn
|
||||
elif self.timeout is None and not ready:
|
||||
print("No timeout, exiting.")
|
||||
self.quit = True
|
||||
|
||||
while not self.quit:
|
||||
if self.sock in ready:
|
||||
while select.select([self.sock],[],[],0)[0]:
|
||||
controllersock, address = self.sock.accept()
|
||||
if self.controllersock:
|
||||
print("Queuing %s (%s)" % (str(ready), str(newconnections)))
|
||||
newconnections.append(controllersock)
|
||||
else:
|
||||
print("Accepting %s (%s)" % (str(ready), str(newconnections)))
|
||||
self.controllersock = controllersock
|
||||
fds.append(controllersock)
|
||||
if self.controllersock in ready:
|
||||
try:
|
||||
print("Processing Client")
|
||||
ui_fds = recvfds(self.controllersock, 3)
|
||||
print("Connecting Client")
|
||||
|
||||
# Where to write events to
|
||||
writer = ConnectionWriter(ui_fds[0])
|
||||
self.event_handle = bb.event.register_UIHhandler(writer, True)
|
||||
self.event_writer = writer
|
||||
|
||||
# Where to read commands from
|
||||
reader = ConnectionReader(ui_fds[1])
|
||||
fds.append(reader)
|
||||
self.command_channel = reader
|
||||
|
||||
# Where to send command return values to
|
||||
writer = ConnectionWriter(ui_fds[2])
|
||||
self.command_channel_reply = writer
|
||||
|
||||
self.haveui = True
|
||||
|
||||
except (EOFError, OSError):
|
||||
disconnect_client(self, fds)
|
||||
|
||||
if not self.timeout == -1.0 and not self.haveui and self.lastui and self.timeout and \
|
||||
(self.lastui + self.timeout) < time.time():
|
||||
print("Server timeout, exiting.")
|
||||
self.quit = True
|
||||
|
||||
if self.command_channel in ready:
|
||||
try:
|
||||
command = self.command_channel.get()
|
||||
except EOFError:
|
||||
# Client connection shutting down
|
||||
ready = []
|
||||
disconnect_client(self, fds)
|
||||
continue
|
||||
if command[0] == "terminateServer":
|
||||
try:
|
||||
if self.command_channel.poll():
|
||||
command = self.command_channel.recv()
|
||||
self.runCommand(command)
|
||||
if self.quitout.poll():
|
||||
self.quitout.recv()
|
||||
self.quit = True
|
||||
continue
|
||||
try:
|
||||
print("Running command %s" % command)
|
||||
self.command_channel_reply.send(self.cooker.command.runCommand(command))
|
||||
except Exception as e:
|
||||
logger.exception('Exception in server main event loop running command %s (%s)' % (command, str(e)))
|
||||
|
||||
if self.xmlrpc in ready:
|
||||
self.xmlrpc.handle_requests()
|
||||
|
||||
ready = self.idle_commands(.1, fds)
|
||||
|
||||
print("Exiting")
|
||||
# Remove the socket file so we don't get any more connections to avoid races
|
||||
os.unlink(self.sockname)
|
||||
self.sock.close()
|
||||
|
||||
try:
|
||||
self.cooker.shutdown(True)
|
||||
self.cooker.notifier.stop()
|
||||
self.cooker.confignotifier.stop()
|
||||
except:
|
||||
pass
|
||||
|
||||
self.cooker.post_serve()
|
||||
|
||||
# Finally release the lockfile but warn about other processes holding it open
|
||||
lock = self.bitbake_lock
|
||||
lockfile = lock.name
|
||||
lock.close()
|
||||
lock = None
|
||||
|
||||
while not lock:
|
||||
with bb.utils.timeout(3):
|
||||
lock = bb.utils.lockfile(lockfile, shared=False, retry=False, block=True)
|
||||
if lock:
|
||||
# We hold the lock so we can remove the file (hide stale pid data)
|
||||
bb.utils.remove(lockfile)
|
||||
bb.utils.unlockfile(lock)
|
||||
return
|
||||
|
||||
if not lock:
|
||||
# Some systems may not have lsof available
|
||||
procs = None
|
||||
try:
|
||||
procs = subprocess.check_output(["lsof", '-w', lockfile], stderr=subprocess.STDOUT)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
if procs is None:
|
||||
# Fall back to fuser if lsof is unavailable
|
||||
try:
|
||||
procs = subprocess.check_output(["fuser", '-v', lockfile], stderr=subprocess.STDOUT)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
self.runCommand(["stateForceShutdown"])
|
||||
except:
|
||||
pass
|
||||
|
||||
msg = "Delaying shutdown due to active processes which appear to be holding bitbake.lock"
|
||||
if procs:
|
||||
msg += ":\n%s" % str(procs)
|
||||
print(msg)
|
||||
self.idle_commands(.1, [self.command_channel, self.quitout])
|
||||
except Exception:
|
||||
logger.exception('Running command %s', command)
|
||||
|
||||
self.event_queue.close()
|
||||
bb.event.unregister_UIHhandler(self.event_handle.value)
|
||||
self.command_channel.close()
|
||||
self.cooker.shutdown(True)
|
||||
self.quitout.close()
|
||||
|
||||
def idle_commands(self, delay, fds=None):
|
||||
nextsleep = delay
|
||||
@@ -306,361 +160,109 @@ class ProcessServer(multiprocessing.Process):
|
||||
del self._idlefuns[function]
|
||||
self.quit = True
|
||||
|
||||
# Create new heartbeat event?
|
||||
now = time.time()
|
||||
if now >= self.next_heartbeat:
|
||||
# We might have missed heartbeats. Just trigger once in
|
||||
# that case and continue after the usual delay.
|
||||
self.next_heartbeat += self.heartbeat_seconds
|
||||
if self.next_heartbeat <= now:
|
||||
self.next_heartbeat = now + self.heartbeat_seconds
|
||||
heartbeat = bb.event.HeartbeatEvent(now)
|
||||
bb.event.fire(heartbeat, self.cooker.data)
|
||||
if nextsleep and now + nextsleep > self.next_heartbeat:
|
||||
# Shorten timeout so that we we wake up in time for
|
||||
# the heartbeat.
|
||||
nextsleep = self.next_heartbeat - now
|
||||
|
||||
if nextsleep is not None:
|
||||
if self.xmlrpc:
|
||||
nextsleep = self.xmlrpc.get_timeout(nextsleep)
|
||||
try:
|
||||
return select.select(fds,[],[],nextsleep)[0]
|
||||
except InterruptedError:
|
||||
# Ignore EINTR
|
||||
return []
|
||||
else:
|
||||
return select.select(fds,[],[],0)[0]
|
||||
|
||||
|
||||
class ServerCommunicator():
|
||||
def __init__(self, connection, recv):
|
||||
self.connection = connection
|
||||
self.recv = recv
|
||||
select.select(fds,[],[],nextsleep)
|
||||
|
||||
def runCommand(self, command):
|
||||
self.connection.send(command)
|
||||
if not self.recv.poll(30):
|
||||
raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server")
|
||||
return self.recv.get()
|
||||
"""
|
||||
Run a cooker command on the server
|
||||
"""
|
||||
self.command_channel.send(self.cooker.command.runCommand(command))
|
||||
|
||||
def updateFeatureSet(self, featureset):
|
||||
_, error = self.runCommand(["setFeatures", featureset])
|
||||
def stop(self):
|
||||
self.quitin.send("quit")
|
||||
self.quitin.close()
|
||||
|
||||
class BitBakeProcessServerConnection(BitBakeBaseServerConnection):
|
||||
def __init__(self, serverImpl, ui_channel, event_queue):
|
||||
self.procserver = serverImpl
|
||||
self.ui_channel = ui_channel
|
||||
self.event_queue = event_queue
|
||||
self.connection = ServerCommunicator(self.ui_channel, self.procserver.event_handle, self.procserver)
|
||||
self.events = self.event_queue
|
||||
self.terminated = False
|
||||
|
||||
def sigterm_terminate(self):
|
||||
bb.error("UI received SIGTERM")
|
||||
self.terminate()
|
||||
|
||||
def terminate(self):
|
||||
if self.terminated:
|
||||
return
|
||||
self.terminated = True
|
||||
def flushevents():
|
||||
while True:
|
||||
try:
|
||||
event = self.event_queue.get(block=False)
|
||||
except (Empty, IOError):
|
||||
break
|
||||
if isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
self.procserver.stop()
|
||||
|
||||
while self.procserver.is_alive():
|
||||
flushevents()
|
||||
self.procserver.join(0.1)
|
||||
|
||||
self.ui_channel.close()
|
||||
self.event_queue.close()
|
||||
self.event_queue.setexit()
|
||||
|
||||
# Wrap Queue to provide API which isn't server implementation specific
|
||||
class ProcessEventQueue(multiprocessing.queues.Queue):
|
||||
def __init__(self, maxsize):
|
||||
multiprocessing.queues.Queue.__init__(self, maxsize, ctx=multiprocessing.get_context())
|
||||
self.exit = False
|
||||
bb.utils.set_process_name("ProcessEQueue")
|
||||
|
||||
def setexit(self):
|
||||
self.exit = True
|
||||
|
||||
def waitEvent(self, timeout):
|
||||
if self.exit:
|
||||
return self.getEvent()
|
||||
try:
|
||||
if not self.server.is_alive():
|
||||
return self.getEvent()
|
||||
return self.get(True, timeout)
|
||||
except Empty:
|
||||
return None
|
||||
|
||||
def getEvent(self):
|
||||
try:
|
||||
if not self.server.is_alive():
|
||||
self.setexit()
|
||||
return self.get(False)
|
||||
except Empty:
|
||||
if self.exit:
|
||||
sys.exit(1)
|
||||
return None
|
||||
|
||||
|
||||
class BitBakeServer(BitBakeBaseServer):
|
||||
def initServer(self, single_use=True):
|
||||
# establish communication channels. We use bidirectional pipes for
|
||||
# ui <--> server command/response pairs
|
||||
# and a queue for server -> ui event notifications
|
||||
#
|
||||
self.ui_channel, self.server_channel = Pipe()
|
||||
self.event_queue = ProcessEventQueue(0)
|
||||
self.serverImpl = ProcessServer(self.server_channel, self.event_queue, None)
|
||||
self.event_queue.server = self.serverImpl
|
||||
|
||||
def detach(self):
|
||||
self.serverImpl.start()
|
||||
return
|
||||
|
||||
def establishConnection(self, featureset):
|
||||
|
||||
self.connection = BitBakeProcessServerConnection(self.serverImpl, self.ui_channel, self.event_queue)
|
||||
|
||||
_, error = self.connection.connection.runCommand(["setFeatures", featureset])
|
||||
if error:
|
||||
logger.error("Unable to set the cooker to the correct featureset: %s" % error)
|
||||
raise BaseException(error)
|
||||
|
||||
def getEventHandle(self):
|
||||
handle, error = self.runCommand(["getUIHandlerNum"])
|
||||
if error:
|
||||
logger.error("Unable to get UI Handler Number: %s" % error)
|
||||
raise BaseException(error)
|
||||
|
||||
return handle
|
||||
|
||||
def terminateServer(self):
|
||||
self.connection.send(['terminateServer'])
|
||||
return
|
||||
|
||||
class BitBakeProcessServerConnection(object):
|
||||
def __init__(self, ui_channel, recv, eq, sock):
|
||||
self.connection = ServerCommunicator(ui_channel, recv)
|
||||
self.events = eq
|
||||
# Save sock so it doesn't get gc'd for the life of our connection
|
||||
self.socket_connection = sock
|
||||
|
||||
def terminate(self):
|
||||
self.socket_connection.close()
|
||||
self.connection.connection.close()
|
||||
self.connection.recv.close()
|
||||
return
|
||||
|
||||
class BitBakeServer(object):
|
||||
start_log_format = '--- Starting bitbake server pid %s at %s ---'
|
||||
start_log_datetime_format = '%Y-%m-%d %H:%M:%S.%f'
|
||||
|
||||
def __init__(self, lock, sockname, configuration, featureset):
|
||||
|
||||
self.configuration = configuration
|
||||
self.featureset = featureset
|
||||
self.sockname = sockname
|
||||
self.bitbake_lock = lock
|
||||
self.readypipe, self.readypipein = os.pipe()
|
||||
|
||||
# Create server control socket
|
||||
if os.path.exists(sockname):
|
||||
os.unlink(sockname)
|
||||
|
||||
# Place the log in the builddirectory alongside the lock file
|
||||
logfile = os.path.join(os.path.dirname(self.bitbake_lock.name), "bitbake-cookerdaemon.log")
|
||||
|
||||
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
# AF_UNIX has path length issues so chdir here to workaround
|
||||
cwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(os.path.dirname(sockname))
|
||||
self.sock.bind(os.path.basename(sockname))
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
self.sock.listen(1)
|
||||
|
||||
os.set_inheritable(self.sock.fileno(), True)
|
||||
startdatetime = datetime.datetime.now()
|
||||
bb.daemonize.createDaemon(self._startServer, logfile)
|
||||
self.sock.close()
|
||||
self.bitbake_lock.close()
|
||||
os.close(self.readypipein)
|
||||
|
||||
ready = ConnectionReader(self.readypipe)
|
||||
r = ready.poll(5)
|
||||
if not r:
|
||||
bb.note("Bitbake server didn't start within 5 seconds, waiting for 90")
|
||||
r = ready.poll(90)
|
||||
if r:
|
||||
try:
|
||||
r = ready.get()
|
||||
except EOFError:
|
||||
# Trap the child exitting/closing the pipe and error out
|
||||
r = None
|
||||
if not r or r[0] != "r":
|
||||
ready.close()
|
||||
bb.error("Unable to start bitbake server (%s)" % str(r))
|
||||
if os.path.exists(logfile):
|
||||
logstart_re = re.compile(self.start_log_format % ('([0-9]+)', '([0-9-]+ [0-9:.]+)'))
|
||||
started = False
|
||||
lines = []
|
||||
lastlines = []
|
||||
with open(logfile, "r") as f:
|
||||
for line in f:
|
||||
if started:
|
||||
lines.append(line)
|
||||
else:
|
||||
lastlines.append(line)
|
||||
res = logstart_re.match(line.rstrip())
|
||||
if res:
|
||||
ldatetime = datetime.datetime.strptime(res.group(2), self.start_log_datetime_format)
|
||||
if ldatetime >= startdatetime:
|
||||
started = True
|
||||
lines.append(line)
|
||||
if len(lastlines) > 60:
|
||||
lastlines = lastlines[-60:]
|
||||
if lines:
|
||||
if len(lines) > 60:
|
||||
bb.error("Last 60 lines of server log for this session (%s):\n%s" % (logfile, "".join(lines[-60:])))
|
||||
else:
|
||||
bb.error("Server log for this session (%s):\n%s" % (logfile, "".join(lines)))
|
||||
elif lastlines:
|
||||
bb.error("Server didn't start, last 60 loglines (%s):\n%s" % (logfile, "".join(lastlines)))
|
||||
else:
|
||||
bb.error("%s doesn't exist" % logfile)
|
||||
|
||||
raise SystemExit(1)
|
||||
|
||||
ready.close()
|
||||
|
||||
def _startServer(self):
|
||||
print(self.start_log_format % (os.getpid(), datetime.datetime.now().strftime(self.start_log_datetime_format)))
|
||||
sys.stdout.flush()
|
||||
|
||||
server = ProcessServer(self.bitbake_lock, self.sock, self.sockname)
|
||||
self.configuration.setServerRegIdleCallback(server.register_idle_function)
|
||||
os.close(self.readypipe)
|
||||
writer = ConnectionWriter(self.readypipein)
|
||||
self.cooker = bb.cooker.BBCooker(self.configuration, self.featureset)
|
||||
writer.send("r")
|
||||
writer.close()
|
||||
server.cooker = self.cooker
|
||||
server.server_timeout = self.configuration.server_timeout
|
||||
server.xmlrpcinterface = self.configuration.xmlrpcinterface
|
||||
print("Started bitbake server pid %d" % os.getpid())
|
||||
sys.stdout.flush()
|
||||
|
||||
server.start()
|
||||
|
||||
def connectProcessServer(sockname, featureset):
|
||||
# Connect to socket
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
# AF_UNIX has path length issues so chdir here to workaround
|
||||
cwd = os.getcwd()
|
||||
|
||||
readfd = writefd = readfd1 = writefd1 = readfd2 = writefd2 = None
|
||||
eq = command_chan_recv = command_chan = None
|
||||
|
||||
sock.settimeout(10)
|
||||
|
||||
try:
|
||||
try:
|
||||
os.chdir(os.path.dirname(sockname))
|
||||
finished = False
|
||||
while not finished:
|
||||
try:
|
||||
sock.connect(os.path.basename(sockname))
|
||||
finished = True
|
||||
except IOError as e:
|
||||
if e.errno == errno.EWOULDBLOCK:
|
||||
pass
|
||||
raise
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
# Send an fd for the remote to write events to
|
||||
readfd, writefd = os.pipe()
|
||||
eq = BBUIEventQueue(readfd)
|
||||
# Send an fd for the remote to recieve commands from
|
||||
readfd1, writefd1 = os.pipe()
|
||||
command_chan = ConnectionWriter(writefd1)
|
||||
# Send an fd for the remote to write commands results to
|
||||
readfd2, writefd2 = os.pipe()
|
||||
command_chan_recv = ConnectionReader(readfd2)
|
||||
|
||||
sendfds(sock, [writefd, readfd1, writefd2])
|
||||
|
||||
server_connection = BitBakeProcessServerConnection(command_chan, command_chan_recv, eq, sock)
|
||||
|
||||
# Close the ends of the pipes we won't use
|
||||
for i in [writefd, readfd1, writefd2]:
|
||||
os.close(i)
|
||||
|
||||
server_connection.connection.updateFeatureSet(featureset)
|
||||
|
||||
except (Exception, SystemExit) as e:
|
||||
if command_chan_recv:
|
||||
command_chan_recv.close()
|
||||
if command_chan:
|
||||
command_chan.close()
|
||||
for i in [writefd, readfd1, writefd2]:
|
||||
try:
|
||||
if i:
|
||||
os.close(i)
|
||||
except OSError:
|
||||
pass
|
||||
sock.close()
|
||||
raise
|
||||
|
||||
return server_connection
|
||||
|
||||
def sendfds(sock, fds):
|
||||
'''Send an array of fds over an AF_UNIX socket.'''
|
||||
fds = array.array('i', fds)
|
||||
msg = bytes([len(fds) % 256])
|
||||
sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
|
||||
|
||||
def recvfds(sock, size):
|
||||
'''Receive an array of fds over an AF_UNIX socket.'''
|
||||
a = array.array('i')
|
||||
bytes_size = a.itemsize * size
|
||||
msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_LEN(bytes_size))
|
||||
if not msg and not ancdata:
|
||||
raise EOFError
|
||||
try:
|
||||
if len(ancdata) != 1:
|
||||
raise RuntimeError('received %d items of ancdata' %
|
||||
len(ancdata))
|
||||
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
|
||||
if (cmsg_level == socket.SOL_SOCKET and
|
||||
cmsg_type == socket.SCM_RIGHTS):
|
||||
if len(cmsg_data) % a.itemsize != 0:
|
||||
raise ValueError
|
||||
a.frombytes(cmsg_data)
|
||||
assert len(a) % 256 == msg[0]
|
||||
return list(a)
|
||||
except (ValueError, IndexError):
|
||||
pass
|
||||
raise RuntimeError('Invalid data received')
|
||||
|
||||
class BBUIEventQueue:
|
||||
def __init__(self, readfd):
|
||||
|
||||
self.eventQueue = []
|
||||
self.eventQueueLock = threading.Lock()
|
||||
self.eventQueueNotify = threading.Event()
|
||||
|
||||
self.reader = ConnectionReader(readfd)
|
||||
|
||||
self.t = threading.Thread()
|
||||
self.t.setDaemon(True)
|
||||
self.t.run = self.startCallbackHandler
|
||||
self.t.start()
|
||||
|
||||
def getEvent(self):
|
||||
self.eventQueueLock.acquire()
|
||||
|
||||
if len(self.eventQueue) == 0:
|
||||
self.eventQueueLock.release()
|
||||
return None
|
||||
|
||||
item = self.eventQueue.pop(0)
|
||||
|
||||
if len(self.eventQueue) == 0:
|
||||
self.eventQueueNotify.clear()
|
||||
|
||||
self.eventQueueLock.release()
|
||||
return item
|
||||
|
||||
def waitEvent(self, delay):
|
||||
self.eventQueueNotify.wait(delay)
|
||||
return self.getEvent()
|
||||
|
||||
def queue_event(self, event):
|
||||
self.eventQueueLock.acquire()
|
||||
self.eventQueue.append(event)
|
||||
self.eventQueueNotify.set()
|
||||
self.eventQueueLock.release()
|
||||
|
||||
def send_event(self, event):
|
||||
self.queue_event(pickle.loads(event))
|
||||
|
||||
def startCallbackHandler(self):
|
||||
bb.utils.set_process_name("UIEventQueue")
|
||||
while True:
|
||||
try:
|
||||
self.reader.wait()
|
||||
event = self.reader.get()
|
||||
self.queue_event(event)
|
||||
except EOFError:
|
||||
# Easiest way to exit is to close the file descriptor to cause an exit
|
||||
break
|
||||
self.reader.close()
|
||||
|
||||
class ConnectionReader(object):
|
||||
|
||||
def __init__(self, fd):
|
||||
self.reader = multiprocessing.connection.Connection(fd, writable=False)
|
||||
self.rlock = multiprocessing.Lock()
|
||||
|
||||
def wait(self, timeout=None):
|
||||
return multiprocessing.connection.wait([self.reader], timeout)
|
||||
|
||||
def poll(self, timeout=None):
|
||||
return self.reader.poll(timeout)
|
||||
|
||||
def get(self):
|
||||
with self.rlock:
|
||||
res = self.reader.recv_bytes()
|
||||
return multiprocessing.reduction.ForkingPickler.loads(res)
|
||||
|
||||
def fileno(self):
|
||||
return self.reader.fileno()
|
||||
|
||||
def close(self):
|
||||
return self.reader.close()
|
||||
|
||||
|
||||
class ConnectionWriter(object):
|
||||
|
||||
def __init__(self, fd):
|
||||
self.writer = multiprocessing.connection.Connection(fd, readable=False)
|
||||
self.wlock = multiprocessing.Lock()
|
||||
# Why bb.event needs this I have no idea
|
||||
self.event = self
|
||||
|
||||
def send(self, obj):
|
||||
obj = multiprocessing.reduction.ForkingPickler.dumps(obj)
|
||||
with self.wlock:
|
||||
self.writer.send_bytes(obj)
|
||||
|
||||
def fileno(self):
|
||||
return self.writer.fileno()
|
||||
|
||||
def close(self):
|
||||
return self.writer.close()
|
||||
signal.signal(signal.SIGTERM, lambda i, s: self.connection.sigterm_terminate())
|
||||
return self.connection
|
||||
|
||||
422
bitbake/lib/bb/server/xmlrpc.py
Normal file
422
bitbake/lib/bb/server/xmlrpc.py
Normal file
@@ -0,0 +1,422 @@
|
||||
#
|
||||
# BitBake XMLRPC Server
|
||||
#
|
||||
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
|
||||
# Copyright (C) 2006 - 2008 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
"""
|
||||
This module implements an xmlrpc server for BitBake.
|
||||
|
||||
Use this by deriving a class from BitBakeXMLRPCServer and then adding
|
||||
methods which you want to "export" via XMLRPC. If the methods have the
|
||||
prefix xmlrpc_, then registering those function will happen automatically,
|
||||
if not, you need to call register_function.
|
||||
|
||||
Use register_idle_function() to add a function which the xmlrpc server
|
||||
calls from within server_forever when no requests are pending. Make sure
|
||||
that those functions are non-blocking or else you will introduce latency
|
||||
in the server's main loop.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import hashlib
|
||||
import time
|
||||
import socket
|
||||
import signal
|
||||
import threading
|
||||
import pickle
|
||||
import inspect
|
||||
import select
|
||||
import http.client
|
||||
import xmlrpc.client
|
||||
from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
|
||||
|
||||
import bb
|
||||
from bb import daemonize
|
||||
from bb.ui import uievent
|
||||
from . import BitBakeBaseServer, BitBakeBaseServerConnection, BaseImplServer
|
||||
|
||||
DEBUG = False
|
||||
|
||||
class BBTransport(xmlrpc.client.Transport):
|
||||
def __init__(self, timeout):
|
||||
self.timeout = timeout
|
||||
self.connection_token = None
|
||||
xmlrpc.client.Transport.__init__(self)
|
||||
|
||||
# Modified from default to pass timeout to HTTPConnection
|
||||
def make_connection(self, host):
|
||||
#return an existing connection if possible. This allows
|
||||
#HTTP/1.1 keep-alive.
|
||||
if self._connection and host == self._connection[0]:
|
||||
return self._connection[1]
|
||||
|
||||
# create a HTTP connection object from a host descriptor
|
||||
chost, self._extra_headers, x509 = self.get_host_info(host)
|
||||
#store the host argument along with the connection object
|
||||
self._connection = host, http.client.HTTPConnection(chost, timeout=self.timeout)
|
||||
return self._connection[1]
|
||||
|
||||
def set_connection_token(self, token):
|
||||
self.connection_token = token
|
||||
|
||||
def send_content(self, h, body):
|
||||
if self.connection_token:
|
||||
h.putheader("Bitbake-token", self.connection_token)
|
||||
xmlrpc.client.Transport.send_content(self, h, body)
|
||||
|
||||
def _create_server(host, port, timeout = 60):
|
||||
t = BBTransport(timeout)
|
||||
s = xmlrpc.client.ServerProxy("http://%s:%d/" % (host, port), transport=t, allow_none=True, use_builtin_types=True)
|
||||
return s, t
|
||||
|
||||
def check_connection(remote, timeout):
|
||||
try:
|
||||
host, port = remote.split(":")
|
||||
port = int(port)
|
||||
except Exception as e:
|
||||
bb.warn("Failed to read remote definition (%s)" % str(e))
|
||||
raise e
|
||||
|
||||
server, _transport = _create_server(host, port, timeout)
|
||||
try:
|
||||
ret, err = server.runCommand(['getVariable', 'TOPDIR'])
|
||||
if err or not ret:
|
||||
return False
|
||||
except ConnectionError:
|
||||
return False
|
||||
return True
|
||||
|
||||
class BitBakeServerCommands():
|
||||
|
||||
def __init__(self, server):
|
||||
self.server = server
|
||||
self.has_client = False
|
||||
|
||||
def registerEventHandler(self, host, port):
|
||||
"""
|
||||
Register a remote UI Event Handler
|
||||
"""
|
||||
s, t = _create_server(host, port)
|
||||
|
||||
# we don't allow connections if the cooker is running
|
||||
if (self.cooker.state in [bb.cooker.state.parsing, bb.cooker.state.running]):
|
||||
return None, "Cooker is busy: %s" % bb.cooker.state.get_name(self.cooker.state)
|
||||
|
||||
self.event_handle = bb.event.register_UIHhandler(s, True)
|
||||
return self.event_handle, 'OK'
|
||||
|
||||
def unregisterEventHandler(self, handlerNum):
|
||||
"""
|
||||
Unregister a remote UI Event Handler
|
||||
"""
|
||||
return bb.event.unregister_UIHhandler(handlerNum)
|
||||
|
||||
def runCommand(self, command):
|
||||
"""
|
||||
Run a cooker command on the server
|
||||
"""
|
||||
return self.cooker.command.runCommand(command, self.server.readonly)
|
||||
|
||||
def getEventHandle(self):
|
||||
return self.event_handle
|
||||
|
||||
def terminateServer(self):
|
||||
"""
|
||||
Trigger the server to quit
|
||||
"""
|
||||
self.server.quit = True
|
||||
print("Server (cooker) exiting")
|
||||
return
|
||||
|
||||
def addClient(self):
|
||||
if self.has_client:
|
||||
return None
|
||||
token = hashlib.md5(str(time.time()).encode("utf-8")).hexdigest()
|
||||
self.server.set_connection_token(token)
|
||||
self.has_client = True
|
||||
return token
|
||||
|
||||
def removeClient(self):
|
||||
if self.has_client:
|
||||
self.server.set_connection_token(None)
|
||||
self.has_client = False
|
||||
if self.server.single_use:
|
||||
self.server.quit = True
|
||||
|
||||
# This request handler checks if the request has a "Bitbake-token" header
|
||||
# field (this comes from the client side) and compares it with its internal
|
||||
# "Bitbake-token" field (this comes from the server). If the two are not
|
||||
# equal, it is assumed that a client is trying to connect to the server
|
||||
# while another client is connected to the server. In this case, a 503 error
|
||||
# ("service unavailable") is returned to the client.
|
||||
class BitBakeXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||
def __init__(self, request, client_address, server):
|
||||
self.server = server
|
||||
SimpleXMLRPCRequestHandler.__init__(self, request, client_address, server)
|
||||
|
||||
def do_POST(self):
|
||||
try:
|
||||
remote_token = self.headers["Bitbake-token"]
|
||||
except:
|
||||
remote_token = None
|
||||
if remote_token != self.server.connection_token and remote_token != "observer":
|
||||
self.report_503()
|
||||
else:
|
||||
if remote_token == "observer":
|
||||
self.server.readonly = True
|
||||
else:
|
||||
self.server.readonly = False
|
||||
SimpleXMLRPCRequestHandler.do_POST(self)
|
||||
|
||||
def report_503(self):
|
||||
self.send_response(503)
|
||||
response = 'No more client allowed'
|
||||
self.send_header("Content-type", "text/plain")
|
||||
self.send_header("Content-length", str(len(response)))
|
||||
self.end_headers()
|
||||
self.wfile.write(response)
|
||||
|
||||
|
||||
class XMLRPCProxyServer(BaseImplServer):
|
||||
""" not a real working server, but a stub for a proxy server connection
|
||||
|
||||
"""
|
||||
def __init__(self, host, port, use_builtin_types=True):
|
||||
self.host = host
|
||||
self.port = port
|
||||
|
||||
class XMLRPCServer(SimpleXMLRPCServer, BaseImplServer):
|
||||
# remove this when you're done with debugging
|
||||
# allow_reuse_address = True
|
||||
|
||||
def __init__(self, interface, single_use=False, idle_timeout=0):
|
||||
"""
|
||||
Constructor
|
||||
"""
|
||||
BaseImplServer.__init__(self)
|
||||
self.single_use = single_use
|
||||
# Use auto port configuration
|
||||
if (interface[1] == -1):
|
||||
interface = (interface[0], 0)
|
||||
SimpleXMLRPCServer.__init__(self, interface,
|
||||
requestHandler=BitBakeXMLRPCRequestHandler,
|
||||
logRequests=False, allow_none=True)
|
||||
self.host, self.port = self.socket.getsockname()
|
||||
self.connection_token = None
|
||||
#self.register_introspection_functions()
|
||||
self.commands = BitBakeServerCommands(self)
|
||||
self.autoregister_all_functions(self.commands, "")
|
||||
self.interface = interface
|
||||
self.time = time.time()
|
||||
self.idle_timeout = idle_timeout
|
||||
if idle_timeout:
|
||||
self.register_idle_function(self.handle_idle_timeout, self)
|
||||
|
||||
def addcooker(self, cooker):
|
||||
BaseImplServer.addcooker(self, cooker)
|
||||
self.commands.cooker = cooker
|
||||
|
||||
def autoregister_all_functions(self, context, prefix):
|
||||
"""
|
||||
Convenience method for registering all functions in the scope
|
||||
of this class that start with a common prefix
|
||||
"""
|
||||
methodlist = inspect.getmembers(context, inspect.ismethod)
|
||||
for name, method in methodlist:
|
||||
if name.startswith(prefix):
|
||||
self.register_function(method, name[len(prefix):])
|
||||
|
||||
def handle_idle_timeout(self, server, data, abort):
|
||||
if not abort:
|
||||
if time.time() - server.time > server.idle_timeout:
|
||||
server.quit = True
|
||||
print("Server idle timeout expired")
|
||||
return []
|
||||
|
||||
def serve_forever(self):
|
||||
# Start the actual XMLRPC server
|
||||
bb.cooker.server_main(self.cooker, self._serve_forever)
|
||||
|
||||
def _serve_forever(self):
|
||||
"""
|
||||
Serve Requests. Overloaded to honor a quit command
|
||||
"""
|
||||
self.quit = False
|
||||
while not self.quit:
|
||||
fds = [self]
|
||||
nextsleep = 0.1
|
||||
for function, data in list(self._idlefuns.items()):
|
||||
retval = None
|
||||
try:
|
||||
retval = function(self, data, False)
|
||||
if retval is False:
|
||||
del self._idlefuns[function]
|
||||
elif retval is True:
|
||||
nextsleep = 0
|
||||
elif isinstance(retval, float):
|
||||
if (retval < nextsleep):
|
||||
nextsleep = retval
|
||||
else:
|
||||
fds = fds + retval
|
||||
except SystemExit:
|
||||
raise
|
||||
except:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
if retval == None:
|
||||
# the function execute failed; delete it
|
||||
del self._idlefuns[function]
|
||||
pass
|
||||
|
||||
socktimeout = self.socket.gettimeout() or nextsleep
|
||||
socktimeout = min(socktimeout, nextsleep)
|
||||
# Mirror what BaseServer handle_request would do
|
||||
try:
|
||||
fd_sets = select.select(fds, [], [], socktimeout)
|
||||
if fd_sets[0] and self in fd_sets[0]:
|
||||
if self.idle_timeout:
|
||||
self.time = time.time()
|
||||
self._handle_request_noblock()
|
||||
except IOError:
|
||||
# we ignore interrupted calls
|
||||
pass
|
||||
|
||||
# Tell idle functions we're exiting
|
||||
for function, data in list(self._idlefuns.items()):
|
||||
try:
|
||||
retval = function(self, data, True)
|
||||
except:
|
||||
pass
|
||||
self.server_close()
|
||||
return
|
||||
|
||||
def set_connection_token(self, token):
|
||||
self.connection_token = token
|
||||
|
||||
class BitBakeXMLRPCServerConnection(BitBakeBaseServerConnection):
|
||||
def __init__(self, serverImpl, clientinfo=("localhost", 0), observer_only = False, featureset = None):
|
||||
self.connection, self.transport = _create_server(serverImpl.host, serverImpl.port)
|
||||
self.clientinfo = clientinfo
|
||||
self.serverImpl = serverImpl
|
||||
self.observer_only = observer_only
|
||||
if featureset:
|
||||
self.featureset = featureset
|
||||
else:
|
||||
self.featureset = []
|
||||
|
||||
def connect(self, token = None):
|
||||
if token is None:
|
||||
if self.observer_only:
|
||||
token = "observer"
|
||||
else:
|
||||
token = self.connection.addClient()
|
||||
|
||||
if token is None:
|
||||
return None
|
||||
|
||||
self.transport.set_connection_token(token)
|
||||
return self
|
||||
|
||||
def setupEventQueue(self):
|
||||
self.events = uievent.BBUIEventQueue(self.connection, self.clientinfo)
|
||||
for event in bb.event.ui_queue:
|
||||
self.events.queue_event(event)
|
||||
|
||||
_, error = self.connection.runCommand(["setFeatures", self.featureset])
|
||||
if error:
|
||||
# disconnect the client, we can't make the setFeature work
|
||||
self.connection.removeClient()
|
||||
# no need to log it here, the error shall be sent to the client
|
||||
raise BaseException(error)
|
||||
|
||||
def removeClient(self):
|
||||
if not self.observer_only:
|
||||
self.connection.removeClient()
|
||||
|
||||
def terminate(self):
|
||||
# Don't wait for server indefinitely
|
||||
import socket
|
||||
socket.setdefaulttimeout(2)
|
||||
try:
|
||||
self.events.system_quit()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
self.connection.removeClient()
|
||||
except:
|
||||
pass
|
||||
|
||||
class BitBakeServer(BitBakeBaseServer):
|
||||
def initServer(self, interface = ("localhost", 0),
|
||||
single_use = False, idle_timeout=0):
|
||||
self.interface = interface
|
||||
self.serverImpl = XMLRPCServer(interface, single_use, idle_timeout)
|
||||
|
||||
def detach(self):
|
||||
daemonize.createDaemon(self.serverImpl.serve_forever, "bitbake-cookerdaemon.log")
|
||||
del self.cooker
|
||||
|
||||
def establishConnection(self, featureset):
|
||||
self.connection = BitBakeXMLRPCServerConnection(self.serverImpl, self.interface, False, featureset)
|
||||
return self.connection.connect()
|
||||
|
||||
def set_connection_token(self, token):
|
||||
self.connection.transport.set_connection_token(token)
|
||||
|
||||
class BitBakeXMLRPCClient(BitBakeBaseServer):
|
||||
|
||||
def __init__(self, observer_only = False, token = None):
|
||||
self.token = token
|
||||
|
||||
self.observer_only = observer_only
|
||||
# if we need extra caches, just tell the server to load them all
|
||||
pass
|
||||
|
||||
def saveConnectionDetails(self, remote):
|
||||
self.remote = remote
|
||||
|
||||
def establishConnection(self, featureset):
|
||||
# The format of "remote" must be "server:port"
|
||||
try:
|
||||
[host, port] = self.remote.split(":")
|
||||
port = int(port)
|
||||
except Exception as e:
|
||||
bb.warn("Failed to read remote definition (%s)" % str(e))
|
||||
raise e
|
||||
|
||||
# We need our IP for the server connection. We get the IP
|
||||
# by trying to connect with the server
|
||||
try:
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
s.connect((host, port))
|
||||
ip = s.getsockname()[0]
|
||||
s.close()
|
||||
except Exception as e:
|
||||
bb.warn("Could not create socket for %s:%s (%s)" % (host, port, str(e)))
|
||||
raise e
|
||||
try:
|
||||
self.serverImpl = XMLRPCProxyServer(host, port, use_builtin_types=True)
|
||||
self.connection = BitBakeXMLRPCServerConnection(self.serverImpl, (ip, 0), self.observer_only, featureset)
|
||||
return self.connection.connect(self.token)
|
||||
except Exception as e:
|
||||
bb.warn("Could not connect to server at %s:%s (%s)" % (host, port, str(e)))
|
||||
raise e
|
||||
|
||||
def endSession(self):
|
||||
self.connection.removeClient()
|
||||
@@ -1,154 +0,0 @@
|
||||
#
|
||||
# BitBake XMLRPC Client Interface
|
||||
#
|
||||
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
|
||||
# Copyright (C) 2006 - 2008 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import socket
|
||||
import http.client
|
||||
import xmlrpc.client
|
||||
|
||||
import bb
|
||||
from bb.ui import uievent
|
||||
|
||||
class BBTransport(xmlrpc.client.Transport):
|
||||
def __init__(self, timeout):
|
||||
self.timeout = timeout
|
||||
self.connection_token = None
|
||||
xmlrpc.client.Transport.__init__(self)
|
||||
|
||||
# Modified from default to pass timeout to HTTPConnection
|
||||
def make_connection(self, host):
|
||||
#return an existing connection if possible. This allows
|
||||
#HTTP/1.1 keep-alive.
|
||||
if self._connection and host == self._connection[0]:
|
||||
return self._connection[1]
|
||||
|
||||
# create a HTTP connection object from a host descriptor
|
||||
chost, self._extra_headers, x509 = self.get_host_info(host)
|
||||
#store the host argument along with the connection object
|
||||
self._connection = host, http.client.HTTPConnection(chost, timeout=self.timeout)
|
||||
return self._connection[1]
|
||||
|
||||
def set_connection_token(self, token):
|
||||
self.connection_token = token
|
||||
|
||||
def send_content(self, h, body):
|
||||
if self.connection_token:
|
||||
h.putheader("Bitbake-token", self.connection_token)
|
||||
xmlrpc.client.Transport.send_content(self, h, body)
|
||||
|
||||
def _create_server(host, port, timeout = 60):
|
||||
t = BBTransport(timeout)
|
||||
s = xmlrpc.client.ServerProxy("http://%s:%d/" % (host, port), transport=t, allow_none=True, use_builtin_types=True)
|
||||
return s, t
|
||||
|
||||
def check_connection(remote, timeout):
|
||||
try:
|
||||
host, port = remote.split(":")
|
||||
port = int(port)
|
||||
except Exception as e:
|
||||
bb.warn("Failed to read remote definition (%s)" % str(e))
|
||||
raise e
|
||||
|
||||
server, _transport = _create_server(host, port, timeout)
|
||||
try:
|
||||
ret, err = server.runCommand(['getVariable', 'TOPDIR'])
|
||||
if err or not ret:
|
||||
return False
|
||||
except ConnectionError:
|
||||
return False
|
||||
return True
|
||||
|
||||
class BitBakeXMLRPCServerConnection(object):
|
||||
def __init__(self, host, port, clientinfo=("localhost", 0), observer_only = False, featureset = None):
|
||||
self.connection, self.transport = _create_server(host, port)
|
||||
self.clientinfo = clientinfo
|
||||
self.observer_only = observer_only
|
||||
if featureset:
|
||||
self.featureset = featureset
|
||||
else:
|
||||
self.featureset = []
|
||||
|
||||
self.events = uievent.BBUIEventQueue(self.connection, self.clientinfo)
|
||||
|
||||
_, error = self.connection.runCommand(["setFeatures", self.featureset])
|
||||
if error:
|
||||
# disconnect the client, we can't make the setFeature work
|
||||
self.connection.removeClient()
|
||||
# no need to log it here, the error shall be sent to the client
|
||||
raise BaseException(error)
|
||||
|
||||
def connect(self, token = None):
|
||||
if token is None:
|
||||
if self.observer_only:
|
||||
token = "observer"
|
||||
else:
|
||||
token = self.connection.addClient()
|
||||
|
||||
if token is None:
|
||||
return None
|
||||
|
||||
self.transport.set_connection_token(token)
|
||||
return self
|
||||
|
||||
def removeClient(self):
|
||||
if not self.observer_only:
|
||||
self.connection.removeClient()
|
||||
|
||||
def terminate(self):
|
||||
# Don't wait for server indefinitely
|
||||
socket.setdefaulttimeout(2)
|
||||
try:
|
||||
self.events.system_quit()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
self.connection.removeClient()
|
||||
except:
|
||||
pass
|
||||
|
||||
def connectXMLRPC(remote, featureset, observer_only = False, token = None):
|
||||
# The format of "remote" must be "server:port"
|
||||
try:
|
||||
[host, port] = remote.split(":")
|
||||
port = int(port)
|
||||
except Exception as e:
|
||||
bb.warn("Failed to parse remote definition %s (%s)" % (remote, str(e)))
|
||||
raise e
|
||||
|
||||
# We need our IP for the server connection. We get the IP
|
||||
# by trying to connect with the server
|
||||
try:
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
s.connect((host, port))
|
||||
ip = s.getsockname()[0]
|
||||
s.close()
|
||||
except Exception as e:
|
||||
bb.warn("Could not create socket for %s:%s (%s)" % (host, port, str(e)))
|
||||
raise e
|
||||
try:
|
||||
connection = BitBakeXMLRPCServerConnection(host, port, (ip, 0), observer_only, featureset)
|
||||
return connection.connect(token)
|
||||
except Exception as e:
|
||||
bb.warn("Could not connect to server at %s:%s (%s)" % (host, port, str(e)))
|
||||
raise e
|
||||
|
||||
|
||||
|
||||
@@ -1,158 +0,0 @@
|
||||
#
|
||||
# BitBake XMLRPC Server Interface
|
||||
#
|
||||
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
|
||||
# Copyright (C) 2006 - 2008 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import hashlib
|
||||
import time
|
||||
import inspect
|
||||
from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
|
||||
|
||||
import bb
|
||||
|
||||
# This request handler checks if the request has a "Bitbake-token" header
|
||||
# field (this comes from the client side) and compares it with its internal
|
||||
# "Bitbake-token" field (this comes from the server). If the two are not
|
||||
# equal, it is assumed that a client is trying to connect to the server
|
||||
# while another client is connected to the server. In this case, a 503 error
|
||||
# ("service unavailable") is returned to the client.
|
||||
class BitBakeXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||
def __init__(self, request, client_address, server):
|
||||
self.server = server
|
||||
SimpleXMLRPCRequestHandler.__init__(self, request, client_address, server)
|
||||
|
||||
def do_POST(self):
|
||||
try:
|
||||
remote_token = self.headers["Bitbake-token"]
|
||||
except:
|
||||
remote_token = None
|
||||
if 0 and remote_token != self.server.connection_token and remote_token != "observer":
|
||||
self.report_503()
|
||||
else:
|
||||
if remote_token == "observer":
|
||||
self.server.readonly = True
|
||||
else:
|
||||
self.server.readonly = False
|
||||
SimpleXMLRPCRequestHandler.do_POST(self)
|
||||
|
||||
def report_503(self):
|
||||
self.send_response(503)
|
||||
response = 'No more client allowed'
|
||||
self.send_header("Content-type", "text/plain")
|
||||
self.send_header("Content-length", str(len(response)))
|
||||
self.end_headers()
|
||||
self.wfile.write(bytes(response, 'utf-8'))
|
||||
|
||||
class BitBakeXMLRPCServer(SimpleXMLRPCServer):
|
||||
# remove this when you're done with debugging
|
||||
# allow_reuse_address = True
|
||||
|
||||
def __init__(self, interface, cooker, parent):
|
||||
# Use auto port configuration
|
||||
if (interface[1] == -1):
|
||||
interface = (interface[0], 0)
|
||||
SimpleXMLRPCServer.__init__(self, interface,
|
||||
requestHandler=BitBakeXMLRPCRequestHandler,
|
||||
logRequests=False, allow_none=True)
|
||||
self.host, self.port = self.socket.getsockname()
|
||||
self.interface = interface
|
||||
|
||||
self.connection_token = None
|
||||
self.commands = BitBakeXMLRPCServerCommands(self)
|
||||
self.register_functions(self.commands, "")
|
||||
|
||||
self.cooker = cooker
|
||||
self.parent = parent
|
||||
|
||||
|
||||
def register_functions(self, context, prefix):
|
||||
"""
|
||||
Convenience method for registering all functions in the scope
|
||||
of this class that start with a common prefix
|
||||
"""
|
||||
methodlist = inspect.getmembers(context, inspect.ismethod)
|
||||
for name, method in methodlist:
|
||||
if name.startswith(prefix):
|
||||
self.register_function(method, name[len(prefix):])
|
||||
|
||||
def get_timeout(self, delay):
|
||||
socktimeout = self.socket.gettimeout() or delay
|
||||
return min(socktimeout, delay)
|
||||
|
||||
def handle_requests(self):
|
||||
self._handle_request_noblock()
|
||||
|
||||
class BitBakeXMLRPCServerCommands():
|
||||
|
||||
def __init__(self, server):
|
||||
self.server = server
|
||||
self.has_client = False
|
||||
|
||||
def registerEventHandler(self, host, port):
|
||||
"""
|
||||
Register a remote UI Event Handler
|
||||
"""
|
||||
s, t = bb.server.xmlrpcclient._create_server(host, port)
|
||||
|
||||
# we don't allow connections if the cooker is running
|
||||
if (self.server.cooker.state in [bb.cooker.state.parsing, bb.cooker.state.running]):
|
||||
return None, "Cooker is busy: %s" % bb.cooker.state.get_name(self.server.cooker.state)
|
||||
|
||||
self.event_handle = bb.event.register_UIHhandler(s, True)
|
||||
return self.event_handle, 'OK'
|
||||
|
||||
def unregisterEventHandler(self, handlerNum):
|
||||
"""
|
||||
Unregister a remote UI Event Handler
|
||||
"""
|
||||
ret = bb.event.unregister_UIHhandler(handlerNum, True)
|
||||
self.event_handle = None
|
||||
return ret
|
||||
|
||||
def runCommand(self, command):
|
||||
"""
|
||||
Run a cooker command on the server
|
||||
"""
|
||||
return self.server.cooker.command.runCommand(command, self.server.readonly)
|
||||
|
||||
def getEventHandle(self):
|
||||
return self.event_handle
|
||||
|
||||
def terminateServer(self):
|
||||
"""
|
||||
Trigger the server to quit
|
||||
"""
|
||||
self.server.parent.quit = True
|
||||
print("XMLRPC Server triggering exit")
|
||||
return
|
||||
|
||||
def addClient(self):
|
||||
if self.server.parent.haveui:
|
||||
return None
|
||||
token = hashlib.md5(str(time.time()).encode("utf-8")).hexdigest()
|
||||
self.server.connection_token = token
|
||||
self.server.parent.haveui = True
|
||||
return token
|
||||
|
||||
def removeClient(self):
|
||||
if self.server.parent.haveui:
|
||||
self.server.connection_token = None
|
||||
self.server.parent.haveui = False
|
||||
|
||||
820
bitbake/lib/bb/shell.py
Normal file
820
bitbake/lib/bb/shell.py
Normal file
@@ -0,0 +1,820 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
##########################################################################
|
||||
#
|
||||
# Copyright (C) 2005-2006 Michael 'Mickey' Lauer <mickey@Vanille.de>
|
||||
# Copyright (C) 2005-2006 Vanille Media
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
##########################################################################
|
||||
#
|
||||
# Thanks to:
|
||||
# * Holger Freyther <zecke@handhelds.org>
|
||||
# * Justin Patrin <papercrane@reversefold.com>
|
||||
#
|
||||
##########################################################################
|
||||
|
||||
"""
|
||||
BitBake Shell
|
||||
|
||||
IDEAS:
|
||||
* list defined tasks per package
|
||||
* list classes
|
||||
* toggle force
|
||||
* command to reparse just one (or more) bbfile(s)
|
||||
* automatic check if reparsing is necessary (inotify?)
|
||||
* frontend for bb file manipulation
|
||||
* more shell-like features:
|
||||
- output control, i.e. pipe output into grep, sort, etc.
|
||||
- job control, i.e. bring running commands into background and foreground
|
||||
* start parsing in background right after startup
|
||||
* ncurses interface
|
||||
|
||||
PROBLEMS:
|
||||
* force doesn't always work
|
||||
* readline completion for commands with more than one parameters
|
||||
|
||||
"""
|
||||
|
||||
##########################################################################
|
||||
# Import and setup global variables
|
||||
##########################################################################
|
||||
|
||||
from __future__ import print_function
|
||||
from functools import reduce
|
||||
try:
|
||||
set
|
||||
except NameError:
|
||||
from sets import Set as set
|
||||
import sys, os, readline, socket, httplib, urllib, commands, popen2, shlex, Queue, fnmatch
|
||||
from bb import data, parse, build, cache, taskdata, runqueue, providers as Providers
|
||||
|
||||
__version__ = "0.5.3.1"
|
||||
__credits__ = """BitBake Shell Version %s (C) 2005 Michael 'Mickey' Lauer <mickey@Vanille.de>
|
||||
Type 'help' for more information, press CTRL-D to exit.""" % __version__
|
||||
|
||||
cmds = {}
|
||||
leave_mainloop = False
|
||||
last_exception = None
|
||||
cooker = None
|
||||
parsed = False
|
||||
debug = os.environ.get( "BBSHELL_DEBUG", "" )
|
||||
|
||||
##########################################################################
|
||||
# Class BitBakeShellCommands
|
||||
##########################################################################
|
||||
|
||||
class BitBakeShellCommands:
|
||||
"""This class contains the valid commands for the shell"""
|
||||
|
||||
def __init__( self, shell ):
|
||||
"""Register all the commands"""
|
||||
self._shell = shell
|
||||
for attr in BitBakeShellCommands.__dict__:
|
||||
if not attr.startswith( "_" ):
|
||||
if attr.endswith( "_" ):
|
||||
command = attr[:-1].lower()
|
||||
else:
|
||||
command = attr[:].lower()
|
||||
method = getattr( BitBakeShellCommands, attr )
|
||||
debugOut( "registering command '%s'" % command )
|
||||
# scan number of arguments
|
||||
usage = getattr( method, "usage", "" )
|
||||
if usage != "<...>":
|
||||
numArgs = len( usage.split() )
|
||||
else:
|
||||
numArgs = -1
|
||||
shell.registerCommand( command, method, numArgs, "%s %s" % ( command, usage ), method.__doc__ )
|
||||
|
||||
def _checkParsed( self ):
|
||||
if not parsed:
|
||||
print("SHELL: This command needs to parse bbfiles...")
|
||||
self.parse( None )
|
||||
|
||||
def _findProvider( self, item ):
|
||||
self._checkParsed()
|
||||
# Need to use taskData for this information
|
||||
preferred = data.getVar( "PREFERRED_PROVIDER_%s" % item, cooker.configuration.data, 1 )
|
||||
if not preferred: preferred = item
|
||||
try:
|
||||
lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status)
|
||||
except KeyError:
|
||||
if item in cooker.status.providers:
|
||||
pf = cooker.status.providers[item][0]
|
||||
else:
|
||||
pf = None
|
||||
return pf
|
||||
|
||||
def alias( self, params ):
|
||||
"""Register a new name for a command"""
|
||||
new, old = params
|
||||
if not old in cmds:
|
||||
print("ERROR: Command '%s' not known" % old)
|
||||
else:
|
||||
cmds[new] = cmds[old]
|
||||
print("OK")
|
||||
alias.usage = "<alias> <command>"
|
||||
|
||||
def buffer( self, params ):
|
||||
"""Dump specified output buffer"""
|
||||
index = params[0]
|
||||
print(self._shell.myout.buffer( int( index ) ))
|
||||
buffer.usage = "<index>"
|
||||
|
||||
def buffers( self, params ):
|
||||
"""Show the available output buffers"""
|
||||
commands = self._shell.myout.bufferedCommands()
|
||||
if not commands:
|
||||
print("SHELL: No buffered commands available yet. Start doing something.")
|
||||
else:
|
||||
print("="*35, "Available Output Buffers", "="*27)
|
||||
for index, cmd in enumerate( commands ):
|
||||
print("| %s %s" % ( str( index ).ljust( 3 ), cmd ))
|
||||
print("="*88)
|
||||
|
||||
def build( self, params, cmd = "build" ):
|
||||
"""Build a providee"""
|
||||
global last_exception
|
||||
globexpr = params[0]
|
||||
self._checkParsed()
|
||||
names = globfilter( cooker.status.pkg_pn, globexpr )
|
||||
if len( names ) == 0: names = [ globexpr ]
|
||||
print("SHELL: Building %s" % ' '.join( names ))
|
||||
|
||||
td = taskdata.TaskData(cooker.configuration.abort)
|
||||
localdata = data.createCopy(cooker.configuration.data)
|
||||
data.update_data(localdata)
|
||||
data.expandKeys(localdata)
|
||||
|
||||
try:
|
||||
tasks = []
|
||||
for name in names:
|
||||
td.add_provider(localdata, cooker.status, name)
|
||||
providers = td.get_provider(name)
|
||||
|
||||
if len(providers) == 0:
|
||||
raise Providers.NoProvider
|
||||
|
||||
tasks.append([name, "do_%s" % cmd])
|
||||
|
||||
td.add_unresolved(localdata, cooker.status)
|
||||
|
||||
rq = runqueue.RunQueue(cooker, localdata, cooker.status, td, tasks)
|
||||
rq.prepare_runqueue()
|
||||
rq.execute_runqueue()
|
||||
|
||||
except Providers.NoProvider:
|
||||
print("ERROR: No Provider")
|
||||
last_exception = Providers.NoProvider
|
||||
|
||||
except runqueue.TaskFailure as fnids:
|
||||
last_exception = runqueue.TaskFailure
|
||||
|
||||
except build.FuncFailed as e:
|
||||
print("ERROR: Couldn't build '%s'" % names)
|
||||
last_exception = e
|
||||
|
||||
|
||||
build.usage = "<providee>"
|
||||
|
||||
def clean( self, params ):
|
||||
"""Clean a providee"""
|
||||
self.build( params, "clean" )
|
||||
clean.usage = "<providee>"
|
||||
|
||||
def compile( self, params ):
|
||||
"""Execute 'compile' on a providee"""
|
||||
self.build( params, "compile" )
|
||||
compile.usage = "<providee>"
|
||||
|
||||
def configure( self, params ):
|
||||
"""Execute 'configure' on a providee"""
|
||||
self.build( params, "configure" )
|
||||
configure.usage = "<providee>"
|
||||
|
||||
def install( self, params ):
|
||||
"""Execute 'install' on a providee"""
|
||||
self.build( params, "install" )
|
||||
install.usage = "<providee>"
|
||||
|
||||
def edit( self, params ):
|
||||
"""Call $EDITOR on a providee"""
|
||||
name = params[0]
|
||||
bbfile = self._findProvider( name )
|
||||
if bbfile is not None:
|
||||
os.system( "%s %s" % ( os.environ.get( "EDITOR", "vi" ), bbfile ) )
|
||||
else:
|
||||
print("ERROR: Nothing provides '%s'" % name)
|
||||
edit.usage = "<providee>"
|
||||
|
||||
def environment( self, params ):
|
||||
"""Dump out the outer BitBake environment"""
|
||||
cooker.showEnvironment()
|
||||
|
||||
def exit_( self, params ):
|
||||
"""Leave the BitBake Shell"""
|
||||
debugOut( "setting leave_mainloop to true" )
|
||||
global leave_mainloop
|
||||
leave_mainloop = True
|
||||
|
||||
def fetch( self, params ):
|
||||
"""Fetch a providee"""
|
||||
self.build( params, "fetch" )
|
||||
fetch.usage = "<providee>"
|
||||
|
||||
def fileBuild( self, params, cmd = "build" ):
|
||||
"""Parse and build a .bb file"""
|
||||
global last_exception
|
||||
name = params[0]
|
||||
bf = completeFilePath( name )
|
||||
print("SHELL: Calling '%s' on '%s'" % ( cmd, bf ))
|
||||
|
||||
try:
|
||||
cooker.buildFile(bf, cmd)
|
||||
except parse.ParseError:
|
||||
print("ERROR: Unable to open or parse '%s'" % bf)
|
||||
except build.FuncFailed as e:
|
||||
print("ERROR: Couldn't build '%s'" % name)
|
||||
last_exception = e
|
||||
|
||||
fileBuild.usage = "<bbfile>"
|
||||
|
||||
def fileClean( self, params ):
|
||||
"""Clean a .bb file"""
|
||||
self.fileBuild( params, "clean" )
|
||||
fileClean.usage = "<bbfile>"
|
||||
|
||||
def fileEdit( self, params ):
|
||||
"""Call $EDITOR on a .bb file"""
|
||||
name = params[0]
|
||||
os.system( "%s %s" % ( os.environ.get( "EDITOR", "vi" ), completeFilePath( name ) ) )
|
||||
fileEdit.usage = "<bbfile>"
|
||||
|
||||
def fileRebuild( self, params ):
|
||||
"""Rebuild (clean & build) a .bb file"""
|
||||
self.fileBuild( params, "rebuild" )
|
||||
fileRebuild.usage = "<bbfile>"
|
||||
|
||||
def fileReparse( self, params ):
|
||||
"""(re)Parse a bb file"""
|
||||
bbfile = params[0]
|
||||
print("SHELL: Parsing '%s'" % bbfile)
|
||||
parse.update_mtime( bbfile )
|
||||
cooker.parser.reparse(bbfile)
|
||||
if False: #fromCache:
|
||||
print("SHELL: File has not been updated, not reparsing")
|
||||
else:
|
||||
print("SHELL: Parsed")
|
||||
fileReparse.usage = "<bbfile>"
|
||||
|
||||
def abort( self, params ):
|
||||
"""Toggle abort task execution flag (see bitbake -k)"""
|
||||
cooker.configuration.abort = not cooker.configuration.abort
|
||||
print("SHELL: Abort Flag is now '%s'" % repr( cooker.configuration.abort ))
|
||||
|
||||
def force( self, params ):
|
||||
"""Toggle force task execution flag (see bitbake -f)"""
|
||||
cooker.configuration.force = not cooker.configuration.force
|
||||
print("SHELL: Force Flag is now '%s'" % repr( cooker.configuration.force ))
|
||||
|
||||
def help( self, params ):
|
||||
"""Show a comprehensive list of commands and their purpose"""
|
||||
print("="*30, "Available Commands", "="*30)
|
||||
for cmd in sorted(cmds):
|
||||
function, numparams, usage, helptext = cmds[cmd]
|
||||
print("| %s | %s" % (usage.ljust(30), helptext))
|
||||
print("="*78)
|
||||
|
||||
def lastError( self, params ):
|
||||
"""Show the reason or log that was produced by the last BitBake event exception"""
|
||||
if last_exception is None:
|
||||
print("SHELL: No Errors yet (Phew)...")
|
||||
else:
|
||||
reason, event = last_exception.args
|
||||
print("SHELL: Reason for the last error: '%s'" % reason)
|
||||
if ':' in reason:
|
||||
msg, filename = reason.split( ':' )
|
||||
filename = filename.strip()
|
||||
print("SHELL: Dumping log file for last error:")
|
||||
try:
|
||||
print(open( filename ).read())
|
||||
except IOError:
|
||||
print("ERROR: Couldn't open '%s'" % filename)
|
||||
|
||||
def match( self, params ):
|
||||
"""Dump all files or providers matching a glob expression"""
|
||||
what, globexpr = params
|
||||
if what == "files":
|
||||
self._checkParsed()
|
||||
for key in globfilter( cooker.status.pkg_fn, globexpr ): print(key)
|
||||
elif what == "providers":
|
||||
self._checkParsed()
|
||||
for key in globfilter( cooker.status.pkg_pn, globexpr ): print(key)
|
||||
else:
|
||||
print("Usage: match %s" % self.print_.usage)
|
||||
match.usage = "<files|providers> <glob>"
|
||||
|
||||
def new( self, params ):
|
||||
"""Create a new .bb file and open the editor"""
|
||||
dirname, filename = params
|
||||
packages = '/'.join( data.getVar( "BBFILES", cooker.configuration.data, 1 ).split('/')[:-2] )
|
||||
fulldirname = "%s/%s" % ( packages, dirname )
|
||||
|
||||
if not os.path.exists( fulldirname ):
|
||||
print("SHELL: Creating '%s'" % fulldirname)
|
||||
os.mkdir( fulldirname )
|
||||
if os.path.exists( fulldirname ) and os.path.isdir( fulldirname ):
|
||||
if os.path.exists( "%s/%s" % ( fulldirname, filename ) ):
|
||||
print("SHELL: ERROR: %s/%s already exists" % ( fulldirname, filename ))
|
||||
return False
|
||||
print("SHELL: Creating '%s/%s'" % ( fulldirname, filename ))
|
||||
newpackage = open( "%s/%s" % ( fulldirname, filename ), "w" )
|
||||
print("""DESCRIPTION = ""
|
||||
SECTION = ""
|
||||
AUTHOR = ""
|
||||
HOMEPAGE = ""
|
||||
MAINTAINER = ""
|
||||
LICENSE = "GPL"
|
||||
PR = "r0"
|
||||
|
||||
SRC_URI = ""
|
||||
|
||||
#inherit base
|
||||
|
||||
#do_configure() {
|
||||
#
|
||||
#}
|
||||
|
||||
#do_compile() {
|
||||
#
|
||||
#}
|
||||
|
||||
#do_stage() {
|
||||
#
|
||||
#}
|
||||
|
||||
#do_install() {
|
||||
#
|
||||
#}
|
||||
""", file=newpackage)
|
||||
newpackage.close()
|
||||
os.system( "%s %s/%s" % ( os.environ.get( "EDITOR" ), fulldirname, filename ) )
|
||||
new.usage = "<directory> <filename>"
|
||||
|
||||
def package( self, params ):
|
||||
"""Execute 'package' on a providee"""
|
||||
self.build( params, "package" )
|
||||
package.usage = "<providee>"
|
||||
|
||||
def pasteBin( self, params ):
|
||||
"""Send a command + output buffer to the pastebin at http://rafb.net/paste"""
|
||||
index = params[0]
|
||||
contents = self._shell.myout.buffer( int( index ) )
|
||||
sendToPastebin( "output of " + params[0], contents )
|
||||
pasteBin.usage = "<index>"
|
||||
|
||||
def pasteLog( self, params ):
|
||||
"""Send the last event exception error log (if there is one) to http://rafb.net/paste"""
|
||||
if last_exception is None:
|
||||
print("SHELL: No Errors yet (Phew)...")
|
||||
else:
|
||||
reason, event = last_exception.args
|
||||
print("SHELL: Reason for the last error: '%s'" % reason)
|
||||
if ':' in reason:
|
||||
msg, filename = reason.split( ':' )
|
||||
filename = filename.strip()
|
||||
print("SHELL: Pasting log file to pastebin...")
|
||||
|
||||
file = open( filename ).read()
|
||||
sendToPastebin( "contents of " + filename, file )
|
||||
|
||||
def patch( self, params ):
|
||||
"""Execute 'patch' command on a providee"""
|
||||
self.build( params, "patch" )
|
||||
patch.usage = "<providee>"
|
||||
|
||||
def parse( self, params ):
|
||||
"""(Re-)parse .bb files and calculate the dependency graph"""
|
||||
cooker.status = cache.CacheData(cooker.caches_array)
|
||||
ignore = data.getVar("ASSUME_PROVIDED", cooker.configuration.data, 1) or ""
|
||||
cooker.status.ignored_dependencies = set( ignore.split() )
|
||||
cooker.handleCollections( data.getVar("BBFILE_COLLECTIONS", cooker.configuration.data, 1) )
|
||||
|
||||
(filelist, masked) = cooker.collect_bbfiles()
|
||||
cooker.parse_bbfiles(filelist, masked, cooker.myProgressCallback)
|
||||
cooker.buildDepgraph()
|
||||
global parsed
|
||||
parsed = True
|
||||
print()
|
||||
|
||||
def reparse( self, params ):
|
||||
"""(re)Parse a providee's bb file"""
|
||||
bbfile = self._findProvider( params[0] )
|
||||
if bbfile is not None:
|
||||
print("SHELL: Found bbfile '%s' for '%s'" % ( bbfile, params[0] ))
|
||||
self.fileReparse( [ bbfile ] )
|
||||
else:
|
||||
print("ERROR: Nothing provides '%s'" % params[0])
|
||||
reparse.usage = "<providee>"
|
||||
|
||||
def getvar( self, params ):
|
||||
"""Dump the contents of an outer BitBake environment variable"""
|
||||
var = params[0]
|
||||
value = data.getVar( var, cooker.configuration.data, 1 )
|
||||
print(value)
|
||||
getvar.usage = "<variable>"
|
||||
|
||||
def peek( self, params ):
|
||||
"""Dump contents of variable defined in providee's metadata"""
|
||||
name, var = params
|
||||
bbfile = self._findProvider( name )
|
||||
if bbfile is not None:
|
||||
the_data = cache.Cache.loadDataFull(bbfile, cooker.configuration.data)
|
||||
value = the_data.getVar( var, 1 )
|
||||
print(value)
|
||||
else:
|
||||
print("ERROR: Nothing provides '%s'" % name)
|
||||
peek.usage = "<providee> <variable>"
|
||||
|
||||
def poke( self, params ):
|
||||
"""Set contents of variable defined in providee's metadata"""
|
||||
name, var, value = params
|
||||
bbfile = self._findProvider( name )
|
||||
if bbfile is not None:
|
||||
print("ERROR: Sorry, this functionality is currently broken")
|
||||
#d = cooker.pkgdata[bbfile]
|
||||
#data.setVar( var, value, d )
|
||||
|
||||
# mark the change semi persistant
|
||||
#cooker.pkgdata.setDirty(bbfile, d)
|
||||
#print "OK"
|
||||
else:
|
||||
print("ERROR: Nothing provides '%s'" % name)
|
||||
poke.usage = "<providee> <variable> <value>"
|
||||
|
||||
def print_( self, params ):
|
||||
"""Dump all files or providers"""
|
||||
what = params[0]
|
||||
if what == "files":
|
||||
self._checkParsed()
|
||||
for key in cooker.status.pkg_fn: print(key)
|
||||
elif what == "providers":
|
||||
self._checkParsed()
|
||||
for key in cooker.status.providers: print(key)
|
||||
else:
|
||||
print("Usage: print %s" % self.print_.usage)
|
||||
print_.usage = "<files|providers>"
|
||||
|
||||
def python( self, params ):
|
||||
"""Enter the expert mode - an interactive BitBake Python Interpreter"""
|
||||
sys.ps1 = "EXPERT BB>>> "
|
||||
sys.ps2 = "EXPERT BB... "
|
||||
import code
|
||||
interpreter = code.InteractiveConsole( dict( globals() ) )
|
||||
interpreter.interact( "SHELL: Expert Mode - BitBake Python %s\nType 'help' for more information, press CTRL-D to switch back to BBSHELL." % sys.version )
|
||||
|
||||
def showdata( self, params ):
|
||||
"""Execute 'showdata' on a providee"""
|
||||
cooker.showEnvironment(None, params)
|
||||
showdata.usage = "<providee>"
|
||||
|
||||
def setVar( self, params ):
|
||||
"""Set an outer BitBake environment variable"""
|
||||
var, value = params
|
||||
data.setVar( var, value, cooker.configuration.data )
|
||||
print("OK")
|
||||
setVar.usage = "<variable> <value>"
|
||||
|
||||
def rebuild( self, params ):
|
||||
"""Clean and rebuild a .bb file or a providee"""
|
||||
self.build( params, "clean" )
|
||||
self.build( params, "build" )
|
||||
rebuild.usage = "<providee>"
|
||||
|
||||
def shell( self, params ):
|
||||
"""Execute a shell command and dump the output"""
|
||||
if params != "":
|
||||
print(commands.getoutput( " ".join( params ) ))
|
||||
shell.usage = "<...>"
|
||||
|
||||
def stage( self, params ):
|
||||
"""Execute 'stage' on a providee"""
|
||||
self.build( params, "populate_staging" )
|
||||
stage.usage = "<providee>"
|
||||
|
||||
def status( self, params ):
|
||||
"""<just for testing>"""
|
||||
print("-" * 78)
|
||||
print("building list = '%s'" % cooker.building_list)
|
||||
print("build path = '%s'" % cooker.build_path)
|
||||
print("consider_msgs_cache = '%s'" % cooker.consider_msgs_cache)
|
||||
print("build stats = '%s'" % cooker.stats)
|
||||
if last_exception is not None: print("last_exception = '%s'" % repr( last_exception.args ))
|
||||
print("memory output contents = '%s'" % self._shell.myout._buffer)
|
||||
|
||||
def test( self, params ):
|
||||
"""<just for testing>"""
|
||||
print("testCommand called with '%s'" % params)
|
||||
|
||||
def unpack( self, params ):
|
||||
"""Execute 'unpack' on a providee"""
|
||||
self.build( params, "unpack" )
|
||||
unpack.usage = "<providee>"
|
||||
|
||||
def which( self, params ):
|
||||
"""Computes the providers for a given providee"""
|
||||
# Need to use taskData for this information
|
||||
item = params[0]
|
||||
|
||||
self._checkParsed()
|
||||
|
||||
preferred = data.getVar( "PREFERRED_PROVIDER_%s" % item, cooker.configuration.data, 1 )
|
||||
if not preferred: preferred = item
|
||||
|
||||
try:
|
||||
lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status)
|
||||
except KeyError:
|
||||
lv, lf, pv, pf = (None,)*4
|
||||
|
||||
try:
|
||||
providers = cooker.status.providers[item]
|
||||
except KeyError:
|
||||
print("SHELL: ERROR: Nothing provides", preferred)
|
||||
else:
|
||||
for provider in providers:
|
||||
if provider == pf: provider = " (***) %s" % provider
|
||||
else: provider = " %s" % provider
|
||||
print(provider)
|
||||
which.usage = "<providee>"
|
||||
|
||||
##########################################################################
|
||||
# Common helper functions
|
||||
##########################################################################
|
||||
|
||||
def completeFilePath( bbfile ):
|
||||
"""Get the complete bbfile path"""
|
||||
if not cooker.status: return bbfile
|
||||
if not cooker.status.pkg_fn: return bbfile
|
||||
for key in cooker.status.pkg_fn:
|
||||
if key.endswith( bbfile ):
|
||||
return key
|
||||
return bbfile
|
||||
|
||||
def sendToPastebin( desc, content ):
|
||||
"""Send content to http://oe.pastebin.com"""
|
||||
mydata = {}
|
||||
mydata["lang"] = "Plain Text"
|
||||
mydata["desc"] = desc
|
||||
mydata["cvt_tabs"] = "No"
|
||||
mydata["nick"] = "%s@%s" % ( os.environ.get( "USER", "unknown" ), socket.gethostname() or "unknown" )
|
||||
mydata["text"] = content
|
||||
params = urllib.urlencode( mydata )
|
||||
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
|
||||
|
||||
host = "rafb.net"
|
||||
conn = httplib.HTTPConnection( "%s:80" % host )
|
||||
conn.request("POST", "/paste/paste.php", params, headers )
|
||||
|
||||
response = conn.getresponse()
|
||||
conn.close()
|
||||
|
||||
if response.status == 302:
|
||||
location = response.getheader( "location" ) or "unknown"
|
||||
print("SHELL: Pasted to http://%s%s" % ( host, location ))
|
||||
else:
|
||||
print("ERROR: %s %s" % ( response.status, response.reason ))
|
||||
|
||||
def completer( text, state ):
|
||||
"""Return a possible readline completion"""
|
||||
debugOut( "completer called with text='%s', state='%d'" % ( text, state ) )
|
||||
|
||||
if state == 0:
|
||||
line = readline.get_line_buffer()
|
||||
if " " in line:
|
||||
line = line.split()
|
||||
# we are in second (or more) argument
|
||||
if line[0] in cmds and hasattr( cmds[line[0]][0], "usage" ): # known command and usage
|
||||
u = getattr( cmds[line[0]][0], "usage" ).split()[0]
|
||||
if u == "<variable>":
|
||||
allmatches = cooker.configuration.data.keys()
|
||||
elif u == "<bbfile>":
|
||||
if cooker.status.pkg_fn is None: allmatches = [ "(No Matches Available. Parsed yet?)" ]
|
||||
else: allmatches = [ x.split("/")[-1] for x in cooker.status.pkg_fn ]
|
||||
elif u == "<providee>":
|
||||
if cooker.status.pkg_fn is None: allmatches = [ "(No Matches Available. Parsed yet?)" ]
|
||||
else: allmatches = cooker.status.providers.iterkeys()
|
||||
else: allmatches = [ "(No tab completion available for this command)" ]
|
||||
else: allmatches = [ "(No tab completion available for this command)" ]
|
||||
else:
|
||||
# we are in first argument
|
||||
allmatches = cmds.iterkeys()
|
||||
|
||||
completer.matches = [ x for x in allmatches if x[:len(text)] == text ]
|
||||
#print "completer.matches = '%s'" % completer.matches
|
||||
if len( completer.matches ) > state:
|
||||
return completer.matches[state]
|
||||
else:
|
||||
return None
|
||||
|
||||
def debugOut( text ):
|
||||
if debug:
|
||||
sys.stderr.write( "( %s )\n" % text )
|
||||
|
||||
def columnize( alist, width = 80 ):
|
||||
"""
|
||||
A word-wrap function that preserves existing line breaks
|
||||
and most spaces in the text. Expects that existing line
|
||||
breaks are posix newlines (\n).
|
||||
"""
|
||||
return reduce(lambda line, word, width=width: '%s%s%s' %
|
||||
(line,
|
||||
' \n'[(len(line[line.rfind('\n')+1:])
|
||||
+ len(word.split('\n', 1)[0]
|
||||
) >= width)],
|
||||
word),
|
||||
alist
|
||||
)
|
||||
|
||||
def globfilter( names, pattern ):
|
||||
return fnmatch.filter( names, pattern )
|
||||
|
||||
##########################################################################
|
||||
# Class MemoryOutput
|
||||
##########################################################################
|
||||
|
||||
class MemoryOutput:
|
||||
"""File-like output class buffering the output of the last 10 commands"""
|
||||
def __init__( self, delegate ):
|
||||
self.delegate = delegate
|
||||
self._buffer = []
|
||||
self.text = []
|
||||
self._command = None
|
||||
|
||||
def startCommand( self, command ):
|
||||
self._command = command
|
||||
self.text = []
|
||||
def endCommand( self ):
|
||||
if self._command is not None:
|
||||
if len( self._buffer ) == 10: del self._buffer[0]
|
||||
self._buffer.append( ( self._command, self.text ) )
|
||||
def removeLast( self ):
|
||||
if self._buffer:
|
||||
del self._buffer[ len( self._buffer ) - 1 ]
|
||||
self.text = []
|
||||
self._command = None
|
||||
def lastBuffer( self ):
|
||||
if self._buffer:
|
||||
return self._buffer[ len( self._buffer ) -1 ][1]
|
||||
def bufferedCommands( self ):
|
||||
return [ cmd for cmd, output in self._buffer ]
|
||||
def buffer( self, i ):
|
||||
if i < len( self._buffer ):
|
||||
return "BB>> %s\n%s" % ( self._buffer[i][0], "".join( self._buffer[i][1] ) )
|
||||
else: return "ERROR: Invalid buffer number. Buffer needs to be in (0, %d)" % ( len( self._buffer ) - 1 )
|
||||
def write( self, text ):
|
||||
if self._command is not None and text != "BB>> ": self.text.append( text )
|
||||
if self.delegate is not None: self.delegate.write( text )
|
||||
def flush( self ):
|
||||
return self.delegate.flush()
|
||||
def fileno( self ):
|
||||
return self.delegate.fileno()
|
||||
def isatty( self ):
|
||||
return self.delegate.isatty()
|
||||
|
||||
##########################################################################
|
||||
# Class BitBakeShell
|
||||
##########################################################################
|
||||
|
||||
class BitBakeShell:
|
||||
|
||||
def __init__( self ):
|
||||
"""Register commands and set up readline"""
|
||||
self.commandQ = Queue.Queue()
|
||||
self.commands = BitBakeShellCommands( self )
|
||||
self.myout = MemoryOutput( sys.stdout )
|
||||
self.historyfilename = os.path.expanduser( "~/.bbsh_history" )
|
||||
self.startupfilename = os.path.expanduser( "~/.bbsh_startup" )
|
||||
|
||||
readline.set_completer( completer )
|
||||
readline.set_completer_delims( " " )
|
||||
readline.parse_and_bind("tab: complete")
|
||||
|
||||
try:
|
||||
readline.read_history_file( self.historyfilename )
|
||||
except IOError:
|
||||
pass # It doesn't exist yet.
|
||||
|
||||
print(__credits__)
|
||||
|
||||
def cleanup( self ):
|
||||
"""Write readline history and clean up resources"""
|
||||
debugOut( "writing command history" )
|
||||
try:
|
||||
readline.write_history_file( self.historyfilename )
|
||||
except:
|
||||
print("SHELL: Unable to save command history")
|
||||
|
||||
def registerCommand( self, command, function, numparams = 0, usage = "", helptext = "" ):
|
||||
"""Register a command"""
|
||||
if usage == "": usage = command
|
||||
if helptext == "": helptext = function.__doc__ or "<not yet documented>"
|
||||
cmds[command] = ( function, numparams, usage, helptext )
|
||||
|
||||
def processCommand( self, command, params ):
|
||||
"""Process a command. Check number of params and print a usage string, if appropriate"""
|
||||
debugOut( "processing command '%s'..." % command )
|
||||
try:
|
||||
function, numparams, usage, helptext = cmds[command]
|
||||
except KeyError:
|
||||
print("SHELL: ERROR: '%s' command is not a valid command." % command)
|
||||
self.myout.removeLast()
|
||||
else:
|
||||
if (numparams != -1) and (not len( params ) == numparams):
|
||||
print("Usage: '%s'" % usage)
|
||||
return
|
||||
|
||||
result = function( self.commands, params )
|
||||
debugOut( "result was '%s'" % result )
|
||||
|
||||
def processStartupFile( self ):
|
||||
"""Read and execute all commands found in $HOME/.bbsh_startup"""
|
||||
if os.path.exists( self.startupfilename ):
|
||||
startupfile = open( self.startupfilename, "r" )
|
||||
for cmdline in startupfile:
|
||||
debugOut( "processing startup line '%s'" % cmdline )
|
||||
if not cmdline:
|
||||
continue
|
||||
if "|" in cmdline:
|
||||
print("ERROR: '|' in startup file is not allowed. Ignoring line")
|
||||
continue
|
||||
self.commandQ.put( cmdline.strip() )
|
||||
|
||||
def main( self ):
|
||||
"""The main command loop"""
|
||||
while not leave_mainloop:
|
||||
try:
|
||||
if self.commandQ.empty():
|
||||
sys.stdout = self.myout.delegate
|
||||
cmdline = raw_input( "BB>> " )
|
||||
sys.stdout = self.myout
|
||||
else:
|
||||
cmdline = self.commandQ.get()
|
||||
if cmdline:
|
||||
allCommands = cmdline.split( ';' )
|
||||
for command in allCommands:
|
||||
pipecmd = None
|
||||
#
|
||||
# special case for expert mode
|
||||
if command == 'python':
|
||||
sys.stdout = self.myout.delegate
|
||||
self.processCommand( command, "" )
|
||||
sys.stdout = self.myout
|
||||
else:
|
||||
self.myout.startCommand( command )
|
||||
if '|' in command: # disable output
|
||||
command, pipecmd = command.split( '|' )
|
||||
delegate = self.myout.delegate
|
||||
self.myout.delegate = None
|
||||
tokens = shlex.split( command, True )
|
||||
self.processCommand( tokens[0], tokens[1:] or "" )
|
||||
self.myout.endCommand()
|
||||
if pipecmd is not None: # restore output
|
||||
self.myout.delegate = delegate
|
||||
|
||||
pipe = popen2.Popen4( pipecmd )
|
||||
pipe.tochild.write( "\n".join( self.myout.lastBuffer() ) )
|
||||
pipe.tochild.close()
|
||||
sys.stdout.write( pipe.fromchild.read() )
|
||||
#
|
||||
except EOFError:
|
||||
print()
|
||||
return
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
|
||||
##########################################################################
|
||||
# Start function - called from the BitBake command line utility
|
||||
##########################################################################
|
||||
|
||||
def start( aCooker ):
|
||||
global cooker
|
||||
cooker = aCooker
|
||||
bbshell = BitBakeShell()
|
||||
bbshell.processStartupFile()
|
||||
bbshell.main()
|
||||
bbshell.cleanup()
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("SHELL: Sorry, this program should only be called by BitBake.")
|
||||
@@ -5,8 +5,6 @@ import re
|
||||
import tempfile
|
||||
import pickle
|
||||
import bb.data
|
||||
import difflib
|
||||
import simplediff
|
||||
from bb.checksum import FileChecksumCache
|
||||
|
||||
logger = logging.getLogger('BitBake.SigGen')
|
||||
@@ -15,7 +13,7 @@ def init(d):
|
||||
siggens = [obj for obj in globals().values()
|
||||
if type(obj) is type and issubclass(obj, SignatureGenerator)]
|
||||
|
||||
desired = d.getVar("BB_SIGNATURE_HANDLER") or "noop"
|
||||
desired = d.getVar("BB_SIGNATURE_HANDLER", True) or "noop"
|
||||
for sg in siggens:
|
||||
if desired == sg.name:
|
||||
return sg(d)
|
||||
@@ -41,9 +39,6 @@ class SignatureGenerator(object):
|
||||
def finalise(self, fn, d, varient):
|
||||
return
|
||||
|
||||
def get_unihash(self, task):
|
||||
return self.taskhash[task]
|
||||
|
||||
def get_taskhash(self, fn, task, deps, dataCache):
|
||||
return "0"
|
||||
|
||||
@@ -72,10 +67,6 @@ class SignatureGenerator(object):
|
||||
def set_taskdata(self, data):
|
||||
self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash = data
|
||||
|
||||
def reset(self, data):
|
||||
self.__init__(data)
|
||||
|
||||
|
||||
class SignatureGeneratorBasic(SignatureGenerator):
|
||||
"""
|
||||
"""
|
||||
@@ -90,11 +81,11 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
self.taints = {}
|
||||
self.gendeps = {}
|
||||
self.lookupcache = {}
|
||||
self.pkgnameextract = re.compile(r"(?P<fn>.*)\..*")
|
||||
self.basewhitelist = set((data.getVar("BB_HASHBASE_WHITELIST") or "").split())
|
||||
self.pkgnameextract = re.compile("(?P<fn>.*)\..*")
|
||||
self.basewhitelist = set((data.getVar("BB_HASHBASE_WHITELIST", True) or "").split())
|
||||
self.taskwhitelist = None
|
||||
self.init_rundepcheck(data)
|
||||
checksum_cache_file = data.getVar("BB_HASH_CHECKSUM_CACHE_FILE")
|
||||
checksum_cache_file = data.getVar("BB_HASH_CHECKSUM_CACHE_FILE", True)
|
||||
if checksum_cache_file:
|
||||
self.checksum_cache = FileChecksumCache()
|
||||
self.checksum_cache.init_cache(data, checksum_cache_file)
|
||||
@@ -102,7 +93,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
self.checksum_cache = None
|
||||
|
||||
def init_rundepcheck(self, data):
|
||||
self.taskwhitelist = data.getVar("BB_HASHTASK_WHITELIST") or None
|
||||
self.taskwhitelist = data.getVar("BB_HASHTASK_WHITELIST", True) or None
|
||||
if self.taskwhitelist:
|
||||
self.twl = re.compile(self.taskwhitelist)
|
||||
else:
|
||||
@@ -110,16 +101,44 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
|
||||
def _build_data(self, fn, d):
|
||||
|
||||
ignore_mismatch = ((d.getVar("BB_HASH_IGNORE_MISMATCH") or '') == '1')
|
||||
tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d)
|
||||
|
||||
taskdeps, basehash = bb.data.generate_dependency_hash(tasklist, gendeps, lookupcache, self.basewhitelist, fn)
|
||||
taskdeps = {}
|
||||
basehash = {}
|
||||
|
||||
for task in tasklist:
|
||||
data = lookupcache[task]
|
||||
|
||||
if data is None:
|
||||
bb.error("Task %s from %s seems to be empty?!" % (task, fn))
|
||||
data = ''
|
||||
|
||||
gendeps[task] -= self.basewhitelist
|
||||
newdeps = gendeps[task]
|
||||
seen = set()
|
||||
while newdeps:
|
||||
nextdeps = newdeps
|
||||
seen |= nextdeps
|
||||
newdeps = set()
|
||||
for dep in nextdeps:
|
||||
if dep in self.basewhitelist:
|
||||
continue
|
||||
gendeps[dep] -= self.basewhitelist
|
||||
newdeps |= gendeps[dep]
|
||||
newdeps -= seen
|
||||
|
||||
alldeps = sorted(seen)
|
||||
for dep in alldeps:
|
||||
data = data + dep
|
||||
var = lookupcache[dep]
|
||||
if var is not None:
|
||||
data = data + str(var)
|
||||
datahash = hashlib.md5(data.encode("utf-8")).hexdigest()
|
||||
k = fn + "." + task
|
||||
if not ignore_mismatch and k in self.basehash and self.basehash[k] != basehash[k]:
|
||||
bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (k, self.basehash[k], basehash[k]))
|
||||
self.basehash[k] = basehash[k]
|
||||
if k in self.basehash and self.basehash[k] != datahash:
|
||||
bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (k, self.basehash[k], datahash))
|
||||
self.basehash[k] = datahash
|
||||
taskdeps[task] = alldeps
|
||||
|
||||
self.taskdeps[fn] = taskdeps
|
||||
self.gendeps[fn] = gendeps
|
||||
@@ -135,15 +154,13 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
|
||||
try:
|
||||
taskdeps = self._build_data(fn, d)
|
||||
except bb.parse.SkipRecipe:
|
||||
raise
|
||||
except:
|
||||
bb.warn("Error during finalise of %s" % fn)
|
||||
raise
|
||||
|
||||
#Slow but can be useful for debugging mismatched basehashes
|
||||
#for task in self.taskdeps[fn]:
|
||||
# self.dump_sigtask(fn, task, d.getVar("STAMP"), False)
|
||||
# self.dump_sigtask(fn, task, d.getVar("STAMP", True), False)
|
||||
|
||||
for task in taskdeps:
|
||||
d.setVar("BB_BASEHASH_task-%s" % task, self.basehash[fn + "." + task])
|
||||
@@ -167,31 +184,20 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
return taint
|
||||
|
||||
def get_taskhash(self, fn, task, deps, dataCache):
|
||||
|
||||
mc = ''
|
||||
if fn.startswith('multiconfig:'):
|
||||
mc = fn.split(':')[1]
|
||||
k = fn + "." + task
|
||||
|
||||
data = dataCache.basetaskhash[k]
|
||||
self.basehash[k] = data
|
||||
self.runtaskdeps[k] = []
|
||||
self.file_checksum_values[k] = []
|
||||
recipename = dataCache.pkg_fn[fn]
|
||||
|
||||
for dep in sorted(deps, key=clean_basepath):
|
||||
pkgname = self.pkgnameextract.search(dep).group('fn')
|
||||
if mc:
|
||||
depmc = pkgname.split(':')[1]
|
||||
if mc != depmc:
|
||||
continue
|
||||
if dep.startswith("multiconfig:") and not mc:
|
||||
continue
|
||||
depname = dataCache.pkg_fn[pkgname]
|
||||
depname = dataCache.pkg_fn[self.pkgnameextract.search(dep).group('fn')]
|
||||
if not self.rundep_check(fn, recipename, task, dep, depname, dataCache):
|
||||
continue
|
||||
if dep not in self.taskhash:
|
||||
bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?" % dep)
|
||||
data = data + self.get_unihash(dep)
|
||||
bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?", dep)
|
||||
data = data + self.taskhash[dep]
|
||||
self.runtaskdeps[k].append(dep)
|
||||
|
||||
if task in dataCache.file_checksums[fn]:
|
||||
@@ -218,7 +224,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
self.taints[k] = taint
|
||||
logger.warning("%s is tainted from a forced run" % k)
|
||||
|
||||
h = hashlib.sha256(data.encode("utf-8")).hexdigest()
|
||||
h = hashlib.md5(data.encode("utf-8")).hexdigest()
|
||||
self.taskhash[k] = h
|
||||
#d.setVar("BB_TASKHASH_task-%s" % task, taskhash[task])
|
||||
return h
|
||||
@@ -266,7 +272,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
data['file_checksum_values'] = [(os.path.basename(f), cs) for f,cs in self.file_checksum_values[k]]
|
||||
data['runtaskhashes'] = {}
|
||||
for dep in data['runtaskdeps']:
|
||||
data['runtaskhashes'][dep] = self.get_unihash(dep)
|
||||
data['runtaskhashes'][dep] = self.taskhash[dep]
|
||||
data['taskhash'] = self.taskhash[k]
|
||||
|
||||
taint = self.read_taint(fn, task, referencestamp)
|
||||
@@ -300,8 +306,8 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
pass
|
||||
raise err
|
||||
|
||||
def dump_sigfn(self, fn, dataCaches, options):
|
||||
if fn in self.taskdeps:
|
||||
def dump_sigs(self, dataCaches, options):
|
||||
for fn in self.taskdeps:
|
||||
for task in self.taskdeps[fn]:
|
||||
tid = fn + ":" + task
|
||||
(mc, _, _) = bb.runqueue.split_tid(tid)
|
||||
@@ -316,13 +322,6 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
|
||||
name = "basichash"
|
||||
|
||||
def get_stampfile_hash(self, task):
|
||||
if task in self.taskhash:
|
||||
return self.taskhash[task]
|
||||
|
||||
# If task is not in basehash, then error
|
||||
return self.basehash[task]
|
||||
|
||||
def stampfile(self, stampbase, fn, taskname, extrainfo, clean=False):
|
||||
if taskname != "do_setscene" and taskname.endswith("_setscene"):
|
||||
k = fn + "." + taskname[:-9]
|
||||
@@ -330,81 +329,32 @@ class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
|
||||
k = fn + "." + taskname
|
||||
if clean:
|
||||
h = "*"
|
||||
elif k in self.taskhash:
|
||||
h = self.taskhash[k]
|
||||
else:
|
||||
h = self.get_stampfile_hash(k)
|
||||
|
||||
# If k is not in basehash, then error
|
||||
h = self.basehash[k]
|
||||
return ("%s.%s.%s.%s" % (stampbase, taskname, h, extrainfo)).rstrip('.')
|
||||
|
||||
def stampcleanmask(self, stampbase, fn, taskname, extrainfo):
|
||||
return self.stampfile(stampbase, fn, taskname, extrainfo, clean=True)
|
||||
|
||||
|
||||
def invalidate_task(self, task, d, fn):
|
||||
bb.note("Tainting hash to force rebuild of task %s, %s" % (fn, task))
|
||||
bb.build.write_taint(task, d, fn)
|
||||
|
||||
def dump_this_task(outfile, d):
|
||||
import bb.parse
|
||||
fn = d.getVar("BB_FILENAME")
|
||||
task = "do_" + d.getVar("BB_CURRENTTASK")
|
||||
fn = d.getVar("BB_FILENAME", True)
|
||||
task = "do_" + d.getVar("BB_CURRENTTASK", True)
|
||||
referencestamp = bb.build.stamp_internal(task, d, None, True)
|
||||
bb.parse.siggen.dump_sigtask(fn, task, outfile, "customfile:" + referencestamp)
|
||||
|
||||
def init_colors(enable_color):
|
||||
"""Initialise colour dict for passing to compare_sigfiles()"""
|
||||
# First set up the colours
|
||||
colors = {'color_title': '\033[1m',
|
||||
'color_default': '\033[0m',
|
||||
'color_add': '\033[0;32m',
|
||||
'color_remove': '\033[0;31m',
|
||||
}
|
||||
# Leave all keys present but clear the values
|
||||
if not enable_color:
|
||||
for k in colors.keys():
|
||||
colors[k] = ''
|
||||
return colors
|
||||
|
||||
def worddiff_str(oldstr, newstr, colors=None):
|
||||
if not colors:
|
||||
colors = init_colors(False)
|
||||
diff = simplediff.diff(oldstr.split(' '), newstr.split(' '))
|
||||
ret = []
|
||||
for change, value in diff:
|
||||
value = ' '.join(value)
|
||||
if change == '=':
|
||||
ret.append(value)
|
||||
elif change == '+':
|
||||
item = '{color_add}{{+{value}+}}{color_default}'.format(value=value, **colors)
|
||||
ret.append(item)
|
||||
elif change == '-':
|
||||
item = '{color_remove}[-{value}-]{color_default}'.format(value=value, **colors)
|
||||
ret.append(item)
|
||||
whitespace_note = ''
|
||||
if oldstr != newstr and ' '.join(oldstr.split()) == ' '.join(newstr.split()):
|
||||
whitespace_note = ' (whitespace changed)'
|
||||
return '"%s"%s' % (' '.join(ret), whitespace_note)
|
||||
|
||||
def list_inline_diff(oldlist, newlist, colors=None):
|
||||
if not colors:
|
||||
colors = init_colors(False)
|
||||
diff = simplediff.diff(oldlist, newlist)
|
||||
ret = []
|
||||
for change, value in diff:
|
||||
value = ' '.join(value)
|
||||
if change == '=':
|
||||
ret.append("'%s'" % value)
|
||||
elif change == '+':
|
||||
item = '{color_add}+{value}{color_default}'.format(value=value, **colors)
|
||||
ret.append(item)
|
||||
elif change == '-':
|
||||
item = '{color_remove}-{value}{color_default}'.format(value=value, **colors)
|
||||
ret.append(item)
|
||||
return '[%s]' % (', '.join(ret))
|
||||
|
||||
def clean_basepath(a):
|
||||
mc = None
|
||||
if a.startswith("multiconfig:"):
|
||||
_, mc, a = a.split(":", 2)
|
||||
b = a.rsplit("/", 2)[1] + '/' + a.rsplit("/", 2)[2]
|
||||
b = a.rsplit("/", 2)[1] + a.rsplit("/", 2)[2]
|
||||
if a.startswith("virtual:"):
|
||||
b = b + ":" + a.rsplit(":", 1)[0]
|
||||
if mc:
|
||||
@@ -423,26 +373,9 @@ def clean_basepaths_list(a):
|
||||
b.append(clean_basepath(x))
|
||||
return b
|
||||
|
||||
def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
def compare_sigfiles(a, b, recursecb = None):
|
||||
output = []
|
||||
|
||||
colors = init_colors(color)
|
||||
def color_format(formatstr, **values):
|
||||
"""
|
||||
Return colour formatted string.
|
||||
NOTE: call with the format string, not an already formatted string
|
||||
containing values (otherwise you could have trouble with { and }
|
||||
characters)
|
||||
"""
|
||||
if not formatstr.endswith('{color_default}'):
|
||||
formatstr += '{color_default}'
|
||||
# In newer python 3 versions you can pass both of these directly,
|
||||
# but we only require 3.4 at the moment
|
||||
formatparams = {}
|
||||
formatparams.update(colors)
|
||||
formatparams.update(values)
|
||||
return formatstr.format(**formatparams)
|
||||
|
||||
with open(a, 'rb') as f:
|
||||
p1 = pickle.Unpickler(f)
|
||||
a_data = p1.load()
|
||||
@@ -496,59 +429,39 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
return changed, added, removed
|
||||
|
||||
if 'basewhitelist' in a_data and a_data['basewhitelist'] != b_data['basewhitelist']:
|
||||
output.append(color_format("{color_title}basewhitelist changed{color_default} from '%s' to '%s'") % (a_data['basewhitelist'], b_data['basewhitelist']))
|
||||
output.append("basewhitelist changed from '%s' to '%s'" % (a_data['basewhitelist'], b_data['basewhitelist']))
|
||||
if a_data['basewhitelist'] and b_data['basewhitelist']:
|
||||
output.append("changed items: %s" % a_data['basewhitelist'].symmetric_difference(b_data['basewhitelist']))
|
||||
|
||||
if 'taskwhitelist' in a_data and a_data['taskwhitelist'] != b_data['taskwhitelist']:
|
||||
output.append(color_format("{color_title}taskwhitelist changed{color_default} from '%s' to '%s'") % (a_data['taskwhitelist'], b_data['taskwhitelist']))
|
||||
output.append("taskwhitelist changed from '%s' to '%s'" % (a_data['taskwhitelist'], b_data['taskwhitelist']))
|
||||
if a_data['taskwhitelist'] and b_data['taskwhitelist']:
|
||||
output.append("changed items: %s" % a_data['taskwhitelist'].symmetric_difference(b_data['taskwhitelist']))
|
||||
|
||||
if a_data['taskdeps'] != b_data['taskdeps']:
|
||||
output.append(color_format("{color_title}Task dependencies changed{color_default} from:\n%s\nto:\n%s") % (sorted(a_data['taskdeps']), sorted(b_data['taskdeps'])))
|
||||
output.append("Task dependencies changed from:\n%s\nto:\n%s" % (sorted(a_data['taskdeps']), sorted(b_data['taskdeps'])))
|
||||
|
||||
if a_data['basehash'] != b_data['basehash'] and not collapsed:
|
||||
output.append(color_format("{color_title}basehash changed{color_default} from %s to %s") % (a_data['basehash'], b_data['basehash']))
|
||||
if a_data['basehash'] != b_data['basehash']:
|
||||
output.append("basehash changed from %s to %s" % (a_data['basehash'], b_data['basehash']))
|
||||
|
||||
changed, added, removed = dict_diff(a_data['gendeps'], b_data['gendeps'], a_data['basewhitelist'] & b_data['basewhitelist'])
|
||||
if changed:
|
||||
for dep in changed:
|
||||
output.append(color_format("{color_title}List of dependencies for variable %s changed from '{color_default}%s{color_title}' to '{color_default}%s{color_title}'") % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep]))
|
||||
output.append("List of dependencies for variable %s changed from '%s' to '%s'" % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep]))
|
||||
if a_data['gendeps'][dep] and b_data['gendeps'][dep]:
|
||||
output.append("changed items: %s" % a_data['gendeps'][dep].symmetric_difference(b_data['gendeps'][dep]))
|
||||
if added:
|
||||
for dep in added:
|
||||
output.append(color_format("{color_title}Dependency on variable %s was added") % (dep))
|
||||
output.append("Dependency on variable %s was added" % (dep))
|
||||
if removed:
|
||||
for dep in removed:
|
||||
output.append(color_format("{color_title}Dependency on Variable %s was removed") % (dep))
|
||||
output.append("Dependency on Variable %s was removed" % (dep))
|
||||
|
||||
|
||||
changed, added, removed = dict_diff(a_data['varvals'], b_data['varvals'])
|
||||
if changed:
|
||||
for dep in changed:
|
||||
oldval = a_data['varvals'][dep]
|
||||
newval = b_data['varvals'][dep]
|
||||
if newval and oldval and ('\n' in oldval or '\n' in newval):
|
||||
diff = difflib.unified_diff(oldval.splitlines(), newval.splitlines(), lineterm='')
|
||||
# Cut off the first two lines, since we aren't interested in
|
||||
# the old/new filename (they are blank anyway in this case)
|
||||
difflines = list(diff)[2:]
|
||||
if color:
|
||||
# Add colour to diff output
|
||||
for i, line in enumerate(difflines):
|
||||
if line.startswith('+'):
|
||||
line = color_format('{color_add}{line}', line=line)
|
||||
difflines[i] = line
|
||||
elif line.startswith('-'):
|
||||
line = color_format('{color_remove}{line}', line=line)
|
||||
difflines[i] = line
|
||||
output.append(color_format("{color_title}Variable {var} value changed:{color_default}\n{diff}", var=dep, diff='\n'.join(difflines)))
|
||||
elif newval and oldval and (' ' in oldval or ' ' in newval):
|
||||
output.append(color_format("{color_title}Variable {var} value changed:{color_default}\n{diff}", var=dep, diff=worddiff_str(oldval, newval, colors)))
|
||||
else:
|
||||
output.append(color_format("{color_title}Variable {var} value changed from '{color_default}{oldval}{color_title}' to '{color_default}{newval}{color_title}'{color_default}", var=dep, oldval=oldval, newval=newval))
|
||||
output.append("Variable %s value changed from '%s' to '%s'" % (dep, a_data['varvals'][dep], b_data['varvals'][dep]))
|
||||
|
||||
if not 'file_checksum_values' in a_data:
|
||||
a_data['file_checksum_values'] = {}
|
||||
@@ -558,38 +471,32 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
changed, added, removed = file_checksums_diff(a_data['file_checksum_values'], b_data['file_checksum_values'])
|
||||
if changed:
|
||||
for f, old, new in changed:
|
||||
output.append(color_format("{color_title}Checksum for file %s changed{color_default} from %s to %s") % (f, old, new))
|
||||
output.append("Checksum for file %s changed from %s to %s" % (f, old, new))
|
||||
if added:
|
||||
for f in added:
|
||||
output.append(color_format("{color_title}Dependency on checksum of file %s was added") % (f))
|
||||
output.append("Dependency on checksum of file %s was added" % (f))
|
||||
if removed:
|
||||
for f in removed:
|
||||
output.append(color_format("{color_title}Dependency on checksum of file %s was removed") % (f))
|
||||
output.append("Dependency on checksum of file %s was removed" % (f))
|
||||
|
||||
if not 'runtaskdeps' in a_data:
|
||||
a_data['runtaskdeps'] = {}
|
||||
if not 'runtaskdeps' in b_data:
|
||||
b_data['runtaskdeps'] = {}
|
||||
|
||||
if not collapsed:
|
||||
if len(a_data['runtaskdeps']) != len(b_data['runtaskdeps']):
|
||||
changed = ["Number of task dependencies changed"]
|
||||
else:
|
||||
changed = []
|
||||
for idx, task in enumerate(a_data['runtaskdeps']):
|
||||
a = a_data['runtaskdeps'][idx]
|
||||
b = b_data['runtaskdeps'][idx]
|
||||
if a_data['runtaskhashes'][a] != b_data['runtaskhashes'][b] and not collapsed:
|
||||
changed.append("%s with hash %s\n changed to\n%s with hash %s" % (clean_basepath(a), a_data['runtaskhashes'][a], clean_basepath(b), b_data['runtaskhashes'][b]))
|
||||
if len(a_data['runtaskdeps']) != len(b_data['runtaskdeps']):
|
||||
changed = ["Number of task dependencies changed"]
|
||||
else:
|
||||
changed = []
|
||||
for idx, task in enumerate(a_data['runtaskdeps']):
|
||||
a = a_data['runtaskdeps'][idx]
|
||||
b = b_data['runtaskdeps'][idx]
|
||||
if a_data['runtaskhashes'][a] != b_data['runtaskhashes'][b]:
|
||||
changed.append("%s with hash %s\n changed to\n%s with hash %s" % (a, a_data['runtaskhashes'][a], b, b_data['runtaskhashes'][b]))
|
||||
|
||||
if changed:
|
||||
clean_a = clean_basepaths_list(a_data['runtaskdeps'])
|
||||
clean_b = clean_basepaths_list(b_data['runtaskdeps'])
|
||||
if clean_a != clean_b:
|
||||
output.append(color_format("{color_title}runtaskdeps changed:{color_default}\n%s") % list_inline_diff(clean_a, clean_b, colors))
|
||||
else:
|
||||
output.append(color_format("{color_title}runtaskdeps changed:"))
|
||||
output.append("\n".join(changed))
|
||||
if changed:
|
||||
output.append("runtaskdeps changed from %s to %s" % (clean_basepaths_list(a_data['runtaskdeps']), clean_basepaths_list(b_data['runtaskdeps'])))
|
||||
output.append("\n".join(changed))
|
||||
|
||||
|
||||
if 'runtaskhashes' in a_data and 'runtaskhashes' in b_data:
|
||||
@@ -605,7 +512,7 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
#output.append("Dependency on task %s was replaced by %s with same hash" % (dep, bdep))
|
||||
bdep_found = True
|
||||
if not bdep_found:
|
||||
output.append(color_format("{color_title}Dependency on task %s was added{color_default} with hash %s") % (clean_basepath(dep), b[dep]))
|
||||
output.append("Dependency on task %s was added with hash %s" % (clean_basepath(dep), b[dep]))
|
||||
if removed:
|
||||
for dep in removed:
|
||||
adep_found = False
|
||||
@@ -615,25 +522,21 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
#output.append("Dependency on task %s was replaced by %s with same hash" % (adep, dep))
|
||||
adep_found = True
|
||||
if not adep_found:
|
||||
output.append(color_format("{color_title}Dependency on task %s was removed{color_default} with hash %s") % (clean_basepath(dep), a[dep]))
|
||||
output.append("Dependency on task %s was removed with hash %s" % (clean_basepath(dep), a[dep]))
|
||||
if changed:
|
||||
for dep in changed:
|
||||
if not collapsed:
|
||||
output.append(color_format("{color_title}Hash for dependent task %s changed{color_default} from %s to %s") % (clean_basepath(dep), a[dep], b[dep]))
|
||||
output.append("Hash for dependent task %s changed from %s to %s" % (clean_basepath(dep), a[dep], b[dep]))
|
||||
if callable(recursecb):
|
||||
# If a dependent hash changed, might as well print the line above and then defer to the changes in
|
||||
# that hash since in all likelyhood, they're the same changes this task also saw.
|
||||
recout = recursecb(dep, a[dep], b[dep])
|
||||
if recout:
|
||||
if collapsed:
|
||||
output.extend(recout)
|
||||
else:
|
||||
# If a dependent hash changed, might as well print the line above and then defer to the changes in
|
||||
# that hash since in all likelyhood, they're the same changes this task also saw.
|
||||
output = [output[-1]] + recout
|
||||
output = [output[-1]] + recout
|
||||
|
||||
a_taint = a_data.get('taint', None)
|
||||
b_taint = b_data.get('taint', None)
|
||||
if a_taint != b_taint:
|
||||
output.append(color_format("{color_title}Taint (by forced/invalidated task) changed{color_default} from %s to %s") % (a_taint, b_taint))
|
||||
output.append("Taint (by forced/invalidated task) changed from %s to %s" % (a_taint, b_taint))
|
||||
|
||||
return output
|
||||
|
||||
@@ -652,7 +555,7 @@ def calc_basehash(sigdata):
|
||||
if val is not None:
|
||||
basedata = basedata + str(val)
|
||||
|
||||
return hashlib.sha256(basedata.encode("utf-8")).hexdigest()
|
||||
return hashlib.md5(basedata.encode("utf-8")).hexdigest()
|
||||
|
||||
def calc_taskhash(sigdata):
|
||||
data = sigdata['basehash']
|
||||
@@ -670,7 +573,7 @@ def calc_taskhash(sigdata):
|
||||
else:
|
||||
data = data + sigdata['taint']
|
||||
|
||||
return hashlib.sha256(data.encode("utf-8")).hexdigest()
|
||||
return hashlib.md5(data.encode("utf-8")).hexdigest()
|
||||
|
||||
|
||||
def dump_sigfile(a):
|
||||
|
||||
@@ -47,7 +47,7 @@ class TaskData:
|
||||
"""
|
||||
BitBake Task Data implementation
|
||||
"""
|
||||
def __init__(self, abort = True, skiplist = None, allowincomplete = False):
|
||||
def __init__(self, abort = True, tryaltconfigs = False, skiplist = None, allowincomplete = False):
|
||||
self.build_targets = {}
|
||||
self.run_targets = {}
|
||||
|
||||
@@ -66,12 +66,11 @@ class TaskData:
|
||||
self.failed_fns = []
|
||||
|
||||
self.abort = abort
|
||||
self.tryaltconfigs = tryaltconfigs
|
||||
self.allowincomplete = allowincomplete
|
||||
|
||||
self.skiplist = skiplist
|
||||
|
||||
self.mcdepends = []
|
||||
|
||||
def add_tasks(self, fn, dataCache):
|
||||
"""
|
||||
Add tasks for a given fn to the database
|
||||
@@ -90,26 +89,6 @@ class TaskData:
|
||||
|
||||
self.add_extra_deps(fn, dataCache)
|
||||
|
||||
def add_mcdepends(task):
|
||||
for dep in task_deps['mcdepends'][task].split():
|
||||
if len(dep.split(':')) != 5:
|
||||
bb.msg.fatal("TaskData", "Error for %s:%s[%s], multiconfig dependency %s does not contain exactly four ':' characters.\n Task '%s' should be specified in the form 'multiconfig:fromMC:toMC:packagename:task'" % (fn, task, 'mcdepends', dep, 'mcdepends'))
|
||||
if dep not in self.mcdepends:
|
||||
self.mcdepends.append(dep)
|
||||
|
||||
# Common code for dep_name/depends = 'depends'/idepends and 'rdepends'/irdepends
|
||||
def handle_deps(task, dep_name, depends, seen):
|
||||
if dep_name in task_deps and task in task_deps[dep_name]:
|
||||
ids = []
|
||||
for dep in task_deps[dep_name][task].split():
|
||||
if dep:
|
||||
parts = dep.split(":")
|
||||
if len(parts) != 2:
|
||||
bb.msg.fatal("TaskData", "Error for %s:%s[%s], dependency %s in '%s' does not contain exactly one ':' character.\n Task '%s' should be specified in the form 'packagename:task'" % (fn, task, dep_name, dep, task_deps[dep_name][task], dep_name))
|
||||
ids.append((parts[0], parts[1]))
|
||||
seen(parts[0])
|
||||
depends.extend(ids)
|
||||
|
||||
for task in task_deps['tasks']:
|
||||
|
||||
tid = "%s:%s" % (fn, task)
|
||||
@@ -119,19 +98,31 @@ class TaskData:
|
||||
parentids = []
|
||||
for dep in task_deps['parents'][task]:
|
||||
if dep not in task_deps['tasks']:
|
||||
bb.debug(2, "Not adding dependency of %s on %s since %s does not exist" % (task, dep, dep))
|
||||
bb.debug(2, "Not adding dependeny of %s on %s since %s does not exist" % (task, dep, dep))
|
||||
continue
|
||||
parentid = "%s:%s" % (fn, dep)
|
||||
parentids.append(parentid)
|
||||
self.taskentries[tid].tdepends.extend(parentids)
|
||||
|
||||
|
||||
# Touch all intertask dependencies
|
||||
handle_deps(task, 'depends', self.taskentries[tid].idepends, self.seen_build_target)
|
||||
handle_deps(task, 'rdepends', self.taskentries[tid].irdepends, self.seen_run_target)
|
||||
|
||||
if 'mcdepends' in task_deps and task in task_deps['mcdepends']:
|
||||
add_mcdepends(task)
|
||||
if 'depends' in task_deps and task in task_deps['depends']:
|
||||
ids = []
|
||||
for dep in task_deps['depends'][task].split():
|
||||
if dep:
|
||||
if ":" not in dep:
|
||||
bb.msg.fatal("TaskData", "Error for %s, dependency %s does not contain ':' character\n. Task 'depends' should be specified in the form 'packagename:task'" % (fn, dep))
|
||||
ids.append(((dep.split(":")[0]), dep.split(":")[1]))
|
||||
self.seen_build_target(dep.split(":")[0])
|
||||
self.taskentries[tid].idepends.extend(ids)
|
||||
if 'rdepends' in task_deps and task in task_deps['rdepends']:
|
||||
ids = []
|
||||
for dep in task_deps['rdepends'][task].split():
|
||||
if dep:
|
||||
if ":" not in dep:
|
||||
bb.msg.fatal("TaskData", "Error for %s, dependency %s does not contain ':' character\n. Task 'rdepends' should be specified in the form 'packagename:task'" % (fn, dep))
|
||||
ids.append(((dep.split(":")[0]), dep.split(":")[1]))
|
||||
self.seen_run_target(dep.split(":")[0])
|
||||
self.taskentries[tid].irdepends.extend(ids)
|
||||
|
||||
# Work out build dependencies
|
||||
if not fn in self.depids:
|
||||
@@ -550,9 +541,6 @@ class TaskData:
|
||||
provmap[name] = provider[0]
|
||||
return provmap
|
||||
|
||||
def get_mcdepends(self):
|
||||
return self.mcdepends
|
||||
|
||||
def dump_data(self):
|
||||
"""
|
||||
Dump some debug information on the internal data structures
|
||||
|
||||
@@ -49,9 +49,6 @@ class ReferenceTest(unittest.TestCase):
|
||||
def assertExecs(self, execs):
|
||||
self.assertEqual(self.execs, execs)
|
||||
|
||||
def assertContains(self, contains):
|
||||
self.assertEqual(self.contains, contains)
|
||||
|
||||
class VariableReferenceTest(ReferenceTest):
|
||||
|
||||
def parseExpression(self, exp):
|
||||
@@ -71,7 +68,7 @@ class VariableReferenceTest(ReferenceTest):
|
||||
|
||||
def test_python_reference(self):
|
||||
self.setEmptyVars(["BAR"])
|
||||
self.parseExpression("${@d.getVar('BAR') + 'foo'}")
|
||||
self.parseExpression("${@bb.data.getVar('BAR', d, True) + 'foo'}")
|
||||
self.assertReferences(set(["BAR"]))
|
||||
|
||||
class ShellReferenceTest(ReferenceTest):
|
||||
@@ -123,13 +120,6 @@ ${D}${libdir}/pkgconfig/*.pc
|
||||
self.parseExpression("sed -i -e 's:IP{:I${:g' $pc")
|
||||
self.assertExecs(set(["sed"]))
|
||||
|
||||
def test_parameter_expansion_modifiers(self):
|
||||
# - and + are also valid modifiers for parameter expansion, but are
|
||||
# valid characters in bitbake variable names, so are not included here
|
||||
for i in ('=', ':-', ':=', '?', ':?', ':+', '#', '%', '##', '%%'):
|
||||
name = "foo%sbar" % i
|
||||
self.parseExpression("${%s}" % name)
|
||||
self.assertNotIn(name, self.references)
|
||||
|
||||
def test_until(self):
|
||||
self.parseExpression("until false; do echo true; done")
|
||||
@@ -211,7 +201,6 @@ class PythonReferenceTest(ReferenceTest):
|
||||
|
||||
self.references = parsedvar.references | parser.references
|
||||
self.execs = parser.execs
|
||||
self.contains = parser.contains
|
||||
|
||||
@staticmethod
|
||||
def indent(value):
|
||||
@@ -220,17 +209,17 @@ be. These unit tests are testing snippets."""
|
||||
return " " + value
|
||||
|
||||
def test_getvar_reference(self):
|
||||
self.parseExpression("d.getVar('foo')")
|
||||
self.parseExpression("bb.data.getVar('foo', d, True)")
|
||||
self.assertReferences(set(["foo"]))
|
||||
self.assertExecs(set())
|
||||
|
||||
def test_getvar_computed_reference(self):
|
||||
self.parseExpression("d.getVar('f' + 'o' + 'o')")
|
||||
self.parseExpression("bb.data.getVar('f' + 'o' + 'o', d, True)")
|
||||
self.assertReferences(set())
|
||||
self.assertExecs(set())
|
||||
|
||||
def test_getvar_exec_reference(self):
|
||||
self.parseExpression("eval('d.getVar(\"foo\")')")
|
||||
self.parseExpression("eval('bb.data.getVar(\"foo\", d, True)')")
|
||||
self.assertReferences(set())
|
||||
self.assertExecs(set(["eval"]))
|
||||
|
||||
@@ -276,35 +265,15 @@ be. These unit tests are testing snippets."""
|
||||
self.assertExecs(set(["testget"]))
|
||||
del self.context["testget"]
|
||||
|
||||
def test_contains(self):
|
||||
self.parseExpression('bb.utils.contains("TESTVAR", "one", "true", "false", d)')
|
||||
self.assertContains({'TESTVAR': {'one'}})
|
||||
|
||||
def test_contains_multi(self):
|
||||
self.parseExpression('bb.utils.contains("TESTVAR", "one two", "true", "false", d)')
|
||||
self.assertContains({'TESTVAR': {'one two'}})
|
||||
|
||||
def test_contains_any(self):
|
||||
self.parseExpression('bb.utils.contains_any("TESTVAR", "hello", "true", "false", d)')
|
||||
self.assertContains({'TESTVAR': {'hello'}})
|
||||
|
||||
def test_contains_any_multi(self):
|
||||
self.parseExpression('bb.utils.contains_any("TESTVAR", "one two three", "true", "false", d)')
|
||||
self.assertContains({'TESTVAR': {'one', 'two', 'three'}})
|
||||
|
||||
def test_contains_filter(self):
|
||||
self.parseExpression('bb.utils.filter("TESTVAR", "hello there world", d)')
|
||||
self.assertContains({'TESTVAR': {'hello', 'there', 'world'}})
|
||||
|
||||
|
||||
class DependencyReferenceTest(ReferenceTest):
|
||||
|
||||
pydata = """
|
||||
d.getVar('somevar')
|
||||
bb.data.getVar('somevar', d, True)
|
||||
def test(d):
|
||||
foo = 'bar %s' % 'foo'
|
||||
def test2(d):
|
||||
d.getVar(foo)
|
||||
d.getVar(foo, True)
|
||||
d.getVar('bar', False)
|
||||
test2(d)
|
||||
|
||||
@@ -316,9 +285,9 @@ def a():
|
||||
|
||||
test(d)
|
||||
|
||||
d.expand(d.getVar("something", False))
|
||||
d.expand("${inexpand} somethingelse")
|
||||
d.getVar(a(), False)
|
||||
bb.data.expand(bb.data.getVar("something", False, d), d)
|
||||
bb.data.expand("${inexpand} somethingelse", d)
|
||||
bb.data.getVar(a(), d, False)
|
||||
"""
|
||||
|
||||
def test_python(self):
|
||||
@@ -401,30 +370,6 @@ esac
|
||||
|
||||
self.assertEqual(deps, set(["oe_libinstall"]))
|
||||
|
||||
def test_contains_vardeps(self):
|
||||
expr = '${@bb.utils.filter("TESTVAR", "somevalue anothervalue", d)} \
|
||||
${@bb.utils.contains("TESTVAR", "testval testval2", "yetanothervalue", "", d)} \
|
||||
${@bb.utils.contains("TESTVAR", "testval2 testval3", "blah", "", d)} \
|
||||
${@bb.utils.contains_any("TESTVAR", "testval2 testval3", "lastone", "", d)}'
|
||||
parsedvar = self.d.expandWithRefs(expr, None)
|
||||
# Check contains
|
||||
self.assertEqual(parsedvar.contains, {'TESTVAR': {'testval2 testval3', 'anothervalue', 'somevalue', 'testval testval2', 'testval2', 'testval3'}})
|
||||
# Check dependencies
|
||||
self.d.setVar('ANOTHERVAR', expr)
|
||||
self.d.setVar('TESTVAR', 'anothervalue testval testval2')
|
||||
deps, values = bb.data.build_dependencies("ANOTHERVAR", set(self.d.keys()), set(), set(), self.d)
|
||||
self.assertEqual(sorted(values.splitlines()),
|
||||
sorted([expr,
|
||||
'TESTVAR{anothervalue} = Set',
|
||||
'TESTVAR{somevalue} = Unset',
|
||||
'TESTVAR{testval testval2} = Set',
|
||||
'TESTVAR{testval2 testval3} = Unset',
|
||||
'TESTVAR{testval2} = Set',
|
||||
'TESTVAR{testval3} = Unset'
|
||||
]))
|
||||
# Check final value
|
||||
self.assertEqual(self.d.getVar('ANOTHERVAR').split(), ['anothervalue', 'yetanothervalue', 'lastone'])
|
||||
|
||||
#Currently no wildcard support
|
||||
#def test_vardeps_wildcards(self):
|
||||
# self.d.setVar("oe_libinstall", "echo test")
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# BitBake Tests for cooker.py
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
|
||||
import unittest
|
||||
import tempfile
|
||||
import os
|
||||
import bb, bb.cooker
|
||||
import re
|
||||
import logging
|
||||
|
||||
# Cooker tests
|
||||
class CookerTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
# At least one variable needs to be set
|
||||
self.d = bb.data.init()
|
||||
topdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "testdata/cooker")
|
||||
self.d.setVar('TOPDIR', topdir)
|
||||
|
||||
def test_CookerCollectFiles_sublayers(self):
|
||||
'''Test that a sublayer of an existing layer does not trigger
|
||||
No bb files matched ...'''
|
||||
|
||||
def append_collection(topdir, path, d):
|
||||
collection = path.split('/')[-1]
|
||||
pattern = "^" + topdir + "/" + path + "/"
|
||||
regex = re.compile(pattern)
|
||||
priority = 5
|
||||
|
||||
d.setVar('BBFILE_COLLECTIONS', (d.getVar('BBFILE_COLLECTIONS') or "") + " " + collection)
|
||||
d.setVar('BBFILE_PATTERN_%s' % (collection), pattern)
|
||||
d.setVar('BBFILE_PRIORITY_%s' % (collection), priority)
|
||||
|
||||
return (collection, pattern, regex, priority)
|
||||
|
||||
topdir = self.d.getVar("TOPDIR")
|
||||
|
||||
# Priorities: list of (collection, pattern, regex, priority)
|
||||
bbfile_config_priorities = []
|
||||
# Order is important for this test, shortest to longest is typical failure case
|
||||
bbfile_config_priorities.append( append_collection(topdir, 'first', self.d) )
|
||||
bbfile_config_priorities.append( append_collection(topdir, 'second', self.d) )
|
||||
bbfile_config_priorities.append( append_collection(topdir, 'second/third', self.d) )
|
||||
|
||||
pkgfns = [ topdir + '/first/recipes/sample1_1.0.bb',
|
||||
topdir + '/second/recipes/sample2_1.0.bb',
|
||||
topdir + '/second/third/recipes/sample3_1.0.bb' ]
|
||||
|
||||
class LogHandler(logging.Handler):
|
||||
def __init__(self):
|
||||
logging.Handler.__init__(self)
|
||||
self.logdata = []
|
||||
|
||||
def emit(self, record):
|
||||
self.logdata.append(record.getMessage())
|
||||
|
||||
# Move cooker to use my special logging
|
||||
logger = bb.cooker.logger
|
||||
log_handler = LogHandler()
|
||||
logger.addHandler(log_handler)
|
||||
collection = bb.cooker.CookerCollectFiles(bbfile_config_priorities)
|
||||
collection.collection_priorities(pkgfns, self.d)
|
||||
logger.removeHandler(log_handler)
|
||||
|
||||
# Should be empty (no generated messages)
|
||||
expected = []
|
||||
|
||||
self.assertEqual(log_handler.logdata, expected)
|
||||
@@ -77,13 +77,13 @@ class DataExpansions(unittest.TestCase):
|
||||
self.assertEqual(str(val), "boo value_of_foo")
|
||||
|
||||
def test_python_snippet_getvar(self):
|
||||
val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
|
||||
val = self.d.expand("${@d.getVar('foo', True) + ' ${bar}'}")
|
||||
self.assertEqual(str(val), "value_of_foo value_of_bar")
|
||||
|
||||
def test_python_unexpanded(self):
|
||||
self.d.setVar("bar", "${unsetvar}")
|
||||
val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
|
||||
self.assertEqual(str(val), "${@d.getVar('foo') + ' ${unsetvar}'}")
|
||||
val = self.d.expand("${@d.getVar('foo', True) + ' ${bar}'}")
|
||||
self.assertEqual(str(val), "${@d.getVar('foo', True) + ' ${unsetvar}'}")
|
||||
|
||||
def test_python_snippet_syntax_error(self):
|
||||
self.d.setVar("FOO", "${@foo = 5}")
|
||||
@@ -99,7 +99,7 @@ class DataExpansions(unittest.TestCase):
|
||||
self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
|
||||
|
||||
def test_value_containing_value(self):
|
||||
val = self.d.expand("${@d.getVar('foo') + ' ${bar}'}")
|
||||
val = self.d.expand("${@d.getVar('foo', True) + ' ${bar}'}")
|
||||
self.assertEqual(str(val), "value_of_foo value_of_bar")
|
||||
|
||||
def test_reference_undefined_var(self):
|
||||
@@ -109,7 +109,7 @@ class DataExpansions(unittest.TestCase):
|
||||
def test_double_reference(self):
|
||||
self.d.setVar("BAR", "bar value")
|
||||
self.d.setVar("FOO", "${BAR} foo ${BAR}")
|
||||
val = self.d.getVar("FOO")
|
||||
val = self.d.getVar("FOO", True)
|
||||
self.assertEqual(str(val), "bar value foo bar value")
|
||||
|
||||
def test_direct_recursion(self):
|
||||
@@ -129,12 +129,12 @@ class DataExpansions(unittest.TestCase):
|
||||
|
||||
def test_incomplete_varexp_single_quotes(self):
|
||||
self.d.setVar("FOO", "sed -i -e 's:IP{:I${:g' $pc")
|
||||
val = self.d.getVar("FOO")
|
||||
val = self.d.getVar("FOO", True)
|
||||
self.assertEqual(str(val), "sed -i -e 's:IP{:I${:g' $pc")
|
||||
|
||||
def test_nonstring(self):
|
||||
self.d.setVar("TEST", 5)
|
||||
val = self.d.getVar("TEST")
|
||||
val = self.d.getVar("TEST", True)
|
||||
self.assertEqual(str(val), "5")
|
||||
|
||||
def test_rename(self):
|
||||
@@ -234,19 +234,19 @@ class TestConcat(unittest.TestCase):
|
||||
def test_prepend(self):
|
||||
self.d.setVar("TEST", "${VAL}")
|
||||
self.d.prependVar("TEST", "${FOO}:")
|
||||
self.assertEqual(self.d.getVar("TEST"), "foo:val")
|
||||
self.assertEqual(self.d.getVar("TEST", True), "foo:val")
|
||||
|
||||
def test_append(self):
|
||||
self.d.setVar("TEST", "${VAL}")
|
||||
self.d.appendVar("TEST", ":${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "val:bar")
|
||||
self.assertEqual(self.d.getVar("TEST", True), "val:bar")
|
||||
|
||||
def test_multiple_append(self):
|
||||
self.d.setVar("TEST", "${VAL}")
|
||||
self.d.prependVar("TEST", "${FOO}:")
|
||||
self.d.appendVar("TEST", ":val2")
|
||||
self.d.appendVar("TEST", ":${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "foo:val:val2:bar")
|
||||
self.assertEqual(self.d.getVar("TEST", True), "foo:val:val2:bar")
|
||||
|
||||
class TestConcatOverride(unittest.TestCase):
|
||||
def setUp(self):
|
||||
@@ -258,78 +258,62 @@ class TestConcatOverride(unittest.TestCase):
|
||||
def test_prepend(self):
|
||||
self.d.setVar("TEST", "${VAL}")
|
||||
self.d.setVar("TEST_prepend", "${FOO}:")
|
||||
self.assertEqual(self.d.getVar("TEST"), "foo:val")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "foo:val")
|
||||
|
||||
def test_append(self):
|
||||
self.d.setVar("TEST", "${VAL}")
|
||||
self.d.setVar("TEST_append", ":${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "val:bar")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "val:bar")
|
||||
|
||||
def test_multiple_append(self):
|
||||
self.d.setVar("TEST", "${VAL}")
|
||||
self.d.setVar("TEST_prepend", "${FOO}:")
|
||||
self.d.setVar("TEST_append", ":val2")
|
||||
self.d.setVar("TEST_append", ":${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "foo:val:val2:bar")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "foo:val:val2:bar")
|
||||
|
||||
def test_append_unset(self):
|
||||
self.d.setVar("TEST_prepend", "${FOO}:")
|
||||
self.d.setVar("TEST_append", ":val2")
|
||||
self.d.setVar("TEST_append", ":${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "foo::val2:bar")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "foo::val2:bar")
|
||||
|
||||
def test_remove(self):
|
||||
self.d.setVar("TEST", "${VAL} ${BAR}")
|
||||
self.d.setVar("TEST_remove", "val")
|
||||
self.assertEqual(self.d.getVar("TEST"), " bar")
|
||||
|
||||
def test_remove_cleared(self):
|
||||
self.d.setVar("TEST", "${VAL} ${BAR}")
|
||||
self.d.setVar("TEST_remove", "val")
|
||||
self.d.setVar("TEST", "${VAL} ${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "val bar")
|
||||
|
||||
# Ensure the value is unchanged if we have an inactive remove override
|
||||
# (including that whitespace is preserved)
|
||||
def test_remove_inactive_override(self):
|
||||
self.d.setVar("TEST", "${VAL} ${BAR} 123")
|
||||
self.d.setVar("TEST_remove_inactiveoverride", "val")
|
||||
self.assertEqual(self.d.getVar("TEST"), "val bar 123")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "bar")
|
||||
|
||||
def test_doubleref_remove(self):
|
||||
self.d.setVar("TEST", "${VAL} ${BAR}")
|
||||
self.d.setVar("TEST_remove", "val")
|
||||
self.d.setVar("TEST_TEST", "${TEST} ${TEST}")
|
||||
self.assertEqual(self.d.getVar("TEST_TEST"), " bar bar")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST_TEST", True), "bar bar")
|
||||
|
||||
def test_empty_remove(self):
|
||||
self.d.setVar("TEST", "")
|
||||
self.d.setVar("TEST_remove", "val")
|
||||
self.assertEqual(self.d.getVar("TEST"), "")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "")
|
||||
|
||||
def test_remove_expansion(self):
|
||||
self.d.setVar("BAR", "Z")
|
||||
self.d.setVar("TEST", "${BAR}/X Y")
|
||||
self.d.setVar("TEST_remove", "${BAR}/X")
|
||||
self.assertEqual(self.d.getVar("TEST"), " Y")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "Y")
|
||||
|
||||
def test_remove_expansion_items(self):
|
||||
self.d.setVar("TEST", "A B C D")
|
||||
self.d.setVar("BAR", "B D")
|
||||
self.d.setVar("TEST_remove", "${BAR}")
|
||||
self.assertEqual(self.d.getVar("TEST"), "A C ")
|
||||
|
||||
def test_remove_preserve_whitespace(self):
|
||||
# When the removal isn't active, the original value should be preserved
|
||||
self.d.setVar("TEST", " A B")
|
||||
self.d.setVar("TEST_remove", "C")
|
||||
self.assertEqual(self.d.getVar("TEST"), " A B")
|
||||
|
||||
def test_remove_preserve_whitespace2(self):
|
||||
# When the removal is active preserve the whitespace
|
||||
self.d.setVar("TEST", " A B")
|
||||
self.d.setVar("TEST_remove", "B")
|
||||
self.assertEqual(self.d.getVar("TEST"), " A ")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "A C")
|
||||
|
||||
class TestOverrides(unittest.TestCase):
|
||||
def setUp(self):
|
||||
@@ -338,71 +322,60 @@ class TestOverrides(unittest.TestCase):
|
||||
self.d.setVar("TEST", "testvalue")
|
||||
|
||||
def test_no_override(self):
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "testvalue")
|
||||
|
||||
def test_one_override(self):
|
||||
self.d.setVar("TEST_bar", "testvalue2")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue2")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "testvalue2")
|
||||
|
||||
def test_one_override_unset(self):
|
||||
self.d.setVar("TEST2_bar", "testvalue2")
|
||||
|
||||
self.assertEqual(self.d.getVar("TEST2"), "testvalue2")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST2", True), "testvalue2")
|
||||
self.assertCountEqual(list(self.d.keys()), ['TEST', 'TEST2', 'OVERRIDES', 'TEST2_bar'])
|
||||
|
||||
def test_multiple_override(self):
|
||||
self.d.setVar("TEST_bar", "testvalue2")
|
||||
self.d.setVar("TEST_local", "testvalue3")
|
||||
self.d.setVar("TEST_foo", "testvalue4")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "testvalue3")
|
||||
self.assertCountEqual(list(self.d.keys()), ['TEST', 'TEST_foo', 'OVERRIDES', 'TEST_bar', 'TEST_local'])
|
||||
|
||||
def test_multiple_combined_overrides(self):
|
||||
self.d.setVar("TEST_local_foo_bar", "testvalue3")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST", True), "testvalue3")
|
||||
|
||||
def test_multiple_overrides_unset(self):
|
||||
self.d.setVar("TEST2_local_foo_bar", "testvalue3")
|
||||
self.assertEqual(self.d.getVar("TEST2"), "testvalue3")
|
||||
bb.data.update_data(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST2", True), "testvalue3")
|
||||
|
||||
def test_keyexpansion_override(self):
|
||||
self.d.setVar("LOCAL", "local")
|
||||
self.d.setVar("TEST_bar", "testvalue2")
|
||||
self.d.setVar("TEST_${LOCAL}", "testvalue3")
|
||||
self.d.setVar("TEST_foo", "testvalue4")
|
||||
bb.data.update_data(self.d)
|
||||
bb.data.expandKeys(self.d)
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
|
||||
self.assertEqual(self.d.getVar("TEST", True), "testvalue3")
|
||||
|
||||
def test_rename_override(self):
|
||||
self.d.setVar("ALTERNATIVE_ncurses-tools_class-target", "a")
|
||||
self.d.setVar("OVERRIDES", "class-target")
|
||||
bb.data.update_data(self.d)
|
||||
self.d.renameVar("ALTERNATIVE_ncurses-tools", "ALTERNATIVE_lib32-ncurses-tools")
|
||||
self.assertEqual(self.d.getVar("ALTERNATIVE_lib32-ncurses-tools"), "a")
|
||||
self.assertEqual(self.d.getVar("ALTERNATIVE_lib32-ncurses-tools", True), "a")
|
||||
|
||||
def test_underscore_override(self):
|
||||
self.d.setVar("TEST_bar", "testvalue2")
|
||||
self.d.setVar("TEST_some_val", "testvalue3")
|
||||
self.d.setVar("TEST_foo", "testvalue4")
|
||||
self.d.setVar("OVERRIDES", "foo:bar:some_val")
|
||||
self.assertEqual(self.d.getVar("TEST"), "testvalue3")
|
||||
|
||||
def test_remove_with_override(self):
|
||||
self.d.setVar("TEST_bar", "testvalue2")
|
||||
self.d.setVar("TEST_some_val", "testvalue3 testvalue5")
|
||||
self.d.setVar("TEST_some_val_remove", "testvalue3")
|
||||
self.d.setVar("TEST_foo", "testvalue4")
|
||||
self.d.setVar("OVERRIDES", "foo:bar:some_val")
|
||||
self.assertEqual(self.d.getVar("TEST"), " testvalue5")
|
||||
|
||||
# Test an override with _<numeric> in it based on a real world OE issue
|
||||
def test_underscore_override(self):
|
||||
self.d.setVar("TARGET_ARCH", "x86_64")
|
||||
self.d.setVar("PN", "test-${TARGET_ARCH}")
|
||||
self.d.setVar("VERSION", "1")
|
||||
self.d.setVar("VERSION_pn-test-${TARGET_ARCH}", "2")
|
||||
self.d.setVar("OVERRIDES", "pn-${PN}")
|
||||
bb.data.expandKeys(self.d)
|
||||
self.assertEqual(self.d.getVar("VERSION"), "2")
|
||||
self.assertEqual(self.d.getVar("TEST", True), "testvalue3")
|
||||
|
||||
class TestKeyExpansion(unittest.TestCase):
|
||||
def setUp(self):
|
||||
@@ -416,7 +389,7 @@ class TestKeyExpansion(unittest.TestCase):
|
||||
with LogRecord() as logs:
|
||||
bb.data.expandKeys(self.d)
|
||||
self.assertTrue(logContains("Variable key VAL_${FOO} (A) replaces original key VAL_foo (B)", logs))
|
||||
self.assertEqual(self.d.getVar("VAL_foo"), "A")
|
||||
self.assertEqual(self.d.getVar("VAL_foo", True), "A")
|
||||
|
||||
class TestFlags(unittest.TestCase):
|
||||
def setUp(self):
|
||||
@@ -471,215 +444,3 @@ class Contains(unittest.TestCase):
|
||||
|
||||
self.assertFalse(bb.utils.contains_any("SOMEFLAG", "x", True, False, self.d))
|
||||
self.assertFalse(bb.utils.contains_any("SOMEFLAG", "x y z", True, False, self.d))
|
||||
|
||||
|
||||
class TaskHash(unittest.TestCase):
|
||||
def test_taskhashes(self):
|
||||
def gettask_bashhash(taskname, d):
|
||||
tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d)
|
||||
taskdeps, basehash = bb.data.generate_dependency_hash(tasklist, gendeps, lookupcache, set(), "somefile")
|
||||
bb.warn(str(lookupcache))
|
||||
return basehash["somefile." + taskname]
|
||||
|
||||
d = bb.data.init()
|
||||
d.setVar("__BBTASKS", ["mytask"])
|
||||
d.setVar("__exportlist", [])
|
||||
d.setVar("mytask", "${MYCOMMAND}")
|
||||
d.setVar("MYCOMMAND", "${VAR}; foo; bar; exit 0")
|
||||
d.setVar("VAR", "val")
|
||||
orighash = gettask_bashhash("mytask", d)
|
||||
|
||||
# Changing a variable should change the hash
|
||||
d.setVar("VAR", "val2")
|
||||
nexthash = gettask_bashhash("mytask", d)
|
||||
self.assertNotEqual(orighash, nexthash)
|
||||
|
||||
d.setVar("VAR", "val")
|
||||
# Adding an inactive removal shouldn't change the hash
|
||||
d.setVar("BAR", "notbar")
|
||||
d.setVar("MYCOMMAND_remove", "${BAR}")
|
||||
nexthash = gettask_bashhash("mytask", d)
|
||||
self.assertEqual(orighash, nexthash)
|
||||
|
||||
# Adding an active removal should change the hash
|
||||
d.setVar("BAR", "bar;")
|
||||
nexthash = gettask_bashhash("mytask", d)
|
||||
self.assertNotEqual(orighash, nexthash)
|
||||
|
||||
# Setup an inactive contains()
|
||||
d.setVar("VAR", "${@bb.utils.contains('VAR2', 'A', 'val', '', d)}")
|
||||
orighash = gettask_bashhash("mytask", d)
|
||||
|
||||
# Activate the contains() and the hash should change
|
||||
d.setVar("VAR2", "A")
|
||||
nexthash = gettask_bashhash("mytask", d)
|
||||
self.assertNotEqual(orighash, nexthash)
|
||||
|
||||
# The contains should be inactive but even though VAR2 has a
|
||||
# different value the hash should match the original
|
||||
d.setVar("VAR2", "B")
|
||||
nexthash = gettask_bashhash("mytask", d)
|
||||
self.assertEqual(orighash, nexthash)
|
||||
|
||||
class Serialize(unittest.TestCase):
|
||||
|
||||
def test_serialize(self):
|
||||
import tempfile
|
||||
import pickle
|
||||
d = bb.data.init()
|
||||
d.enableTracking()
|
||||
d.setVar('HELLO', 'world')
|
||||
d.setVarFlag('HELLO', 'other', 'planet')
|
||||
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
|
||||
tmpfilename = tmpfile.name
|
||||
pickle.dump(d, tmpfile)
|
||||
|
||||
with open(tmpfilename, 'rb') as f:
|
||||
newd = pickle.load(f)
|
||||
|
||||
os.remove(tmpfilename)
|
||||
|
||||
self.assertEqual(d, newd)
|
||||
self.assertEqual(newd.getVar('HELLO'), 'world')
|
||||
self.assertEqual(newd.getVarFlag('HELLO', 'other'), 'planet')
|
||||
|
||||
|
||||
# Remote datastore tests
|
||||
# These really only test the interface, since in actual usage we have a
|
||||
# tinfoil connector that does everything over RPC, and this doesn't test
|
||||
# that.
|
||||
|
||||
class TestConnector:
|
||||
d = None
|
||||
def __init__(self, d):
|
||||
self.d = d
|
||||
def getVar(self, name):
|
||||
return self.d._findVar(name)
|
||||
def getKeys(self):
|
||||
return set(self.d.keys())
|
||||
def getVarHistory(self, name):
|
||||
return self.d.varhistory.variable(name)
|
||||
def expandPythonRef(self, varname, expr, d):
|
||||
localdata = self.d.createCopy()
|
||||
for key in d.localkeys():
|
||||
localdata.setVar(d.getVar(key))
|
||||
varparse = bb.data_smart.VariableParse(varname, localdata)
|
||||
return varparse.python_sub(expr)
|
||||
def setVar(self, name, value):
|
||||
self.d.setVar(name, value)
|
||||
def setVarFlag(self, name, flag, value):
|
||||
self.d.setVarFlag(name, flag, value)
|
||||
def delVar(self, name):
|
||||
self.d.delVar(name)
|
||||
return False
|
||||
def delVarFlag(self, name, flag):
|
||||
self.d.delVarFlag(name, flag)
|
||||
return False
|
||||
def renameVar(self, name, newname):
|
||||
self.d.renameVar(name, newname)
|
||||
return False
|
||||
|
||||
class Remote(unittest.TestCase):
|
||||
def test_remote(self):
|
||||
|
||||
d1 = bb.data.init()
|
||||
d1.enableTracking()
|
||||
d2 = bb.data.init()
|
||||
d2.enableTracking()
|
||||
connector = TestConnector(d1)
|
||||
|
||||
d2.setVar('_remote_data', connector)
|
||||
|
||||
d1.setVar('HELLO', 'world')
|
||||
d1.setVarFlag('OTHER', 'flagname', 'flagvalue')
|
||||
self.assertEqual(d2.getVar('HELLO'), 'world')
|
||||
self.assertEqual(d2.expand('${HELLO}'), 'world')
|
||||
self.assertEqual(d2.expand('${@d.getVar("HELLO")}'), 'world')
|
||||
self.assertIn('flagname', d2.getVarFlags('OTHER'))
|
||||
self.assertEqual(d2.getVarFlag('OTHER', 'flagname'), 'flagvalue')
|
||||
self.assertEqual(d1.varhistory.variable('HELLO'), d2.varhistory.variable('HELLO'))
|
||||
# Test setVar on client side affects server
|
||||
d2.setVar('HELLO', 'other-world')
|
||||
self.assertEqual(d1.getVar('HELLO'), 'other-world')
|
||||
# Test setVarFlag on client side affects server
|
||||
d2.setVarFlag('HELLO', 'flagname', 'flagvalue')
|
||||
self.assertEqual(d1.getVarFlag('HELLO', 'flagname'), 'flagvalue')
|
||||
# Test client side data is incorporated in python expansion (which is done on server)
|
||||
d2.setVar('FOO', 'bar')
|
||||
self.assertEqual(d2.expand('${@d.getVar("FOO")}'), 'bar')
|
||||
# Test overrides work
|
||||
d1.setVar('FOO_test', 'baz')
|
||||
d1.appendVar('OVERRIDES', ':test')
|
||||
self.assertEqual(d2.getVar('FOO'), 'baz')
|
||||
|
||||
|
||||
# Remote equivalents of local test classes
|
||||
# Note that these aren't perfect since we only test in one direction
|
||||
|
||||
class RemoteDataExpansions(DataExpansions):
|
||||
def setUp(self):
|
||||
self.d1 = bb.data.init()
|
||||
self.d = bb.data.init()
|
||||
self.d1["foo"] = "value_of_foo"
|
||||
self.d1["bar"] = "value_of_bar"
|
||||
self.d1["value_of_foo"] = "value_of_'value_of_foo'"
|
||||
connector = TestConnector(self.d1)
|
||||
self.d.setVar('_remote_data', connector)
|
||||
|
||||
class TestRemoteNestedExpansions(TestNestedExpansions):
|
||||
def setUp(self):
|
||||
self.d1 = bb.data.init()
|
||||
self.d = bb.data.init()
|
||||
self.d1["foo"] = "foo"
|
||||
self.d1["bar"] = "bar"
|
||||
self.d1["value_of_foobar"] = "187"
|
||||
connector = TestConnector(self.d1)
|
||||
self.d.setVar('_remote_data', connector)
|
||||
|
||||
class TestRemoteConcat(TestConcat):
|
||||
def setUp(self):
|
||||
self.d1 = bb.data.init()
|
||||
self.d = bb.data.init()
|
||||
self.d1.setVar("FOO", "foo")
|
||||
self.d1.setVar("VAL", "val")
|
||||
self.d1.setVar("BAR", "bar")
|
||||
connector = TestConnector(self.d1)
|
||||
self.d.setVar('_remote_data', connector)
|
||||
|
||||
class TestRemoteConcatOverride(TestConcatOverride):
|
||||
def setUp(self):
|
||||
self.d1 = bb.data.init()
|
||||
self.d = bb.data.init()
|
||||
self.d1.setVar("FOO", "foo")
|
||||
self.d1.setVar("VAL", "val")
|
||||
self.d1.setVar("BAR", "bar")
|
||||
connector = TestConnector(self.d1)
|
||||
self.d.setVar('_remote_data', connector)
|
||||
|
||||
class TestRemoteOverrides(TestOverrides):
|
||||
def setUp(self):
|
||||
self.d1 = bb.data.init()
|
||||
self.d = bb.data.init()
|
||||
self.d1.setVar("OVERRIDES", "foo:bar:local")
|
||||
self.d1.setVar("TEST", "testvalue")
|
||||
connector = TestConnector(self.d1)
|
||||
self.d.setVar('_remote_data', connector)
|
||||
|
||||
class TestRemoteKeyExpansion(TestKeyExpansion):
|
||||
def setUp(self):
|
||||
self.d1 = bb.data.init()
|
||||
self.d = bb.data.init()
|
||||
self.d1.setVar("FOO", "foo")
|
||||
self.d1.setVar("BAR", "foo")
|
||||
connector = TestConnector(self.d1)
|
||||
self.d.setVar('_remote_data', connector)
|
||||
|
||||
class TestRemoteFlags(TestFlags):
|
||||
def setUp(self):
|
||||
self.d1 = bb.data.init()
|
||||
self.d = bb.data.init()
|
||||
self.d1.setVar("foo", "value of foo")
|
||||
self.d1.setVarFlag("foo", "flag1", "value of flag1")
|
||||
self.d1.setVarFlag("foo", "flag2", "value of flag2")
|
||||
connector = TestConnector(self.d1)
|
||||
self.d.setVar('_remote_data', connector)
|
||||
|
||||
@@ -1,986 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# BitBake Tests for the Event implementation (event.py)
|
||||
#
|
||||
# Copyright (C) 2017 Intel Corporation
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
|
||||
import unittest
|
||||
import bb
|
||||
import logging
|
||||
import bb.compat
|
||||
import bb.event
|
||||
import importlib
|
||||
import threading
|
||||
import time
|
||||
import pickle
|
||||
from unittest.mock import Mock
|
||||
from unittest.mock import call
|
||||
from bb.msg import BBLogFormatter
|
||||
|
||||
|
||||
class EventQueueStubBase(object):
|
||||
""" Base class for EventQueueStub classes """
|
||||
def __init__(self):
|
||||
self.event_calls = []
|
||||
return
|
||||
|
||||
def _store_event_data_string(self, event):
|
||||
if isinstance(event, logging.LogRecord):
|
||||
formatter = BBLogFormatter("%(levelname)s: %(message)s")
|
||||
self.event_calls.append(formatter.format(event))
|
||||
else:
|
||||
self.event_calls.append(bb.event.getName(event))
|
||||
return
|
||||
|
||||
|
||||
class EventQueueStub(EventQueueStubBase):
|
||||
""" Class used as specification for UI event handler queue stub objects """
|
||||
def __init__(self):
|
||||
super(EventQueueStub, self).__init__()
|
||||
|
||||
def send(self, event):
|
||||
super(EventQueueStub, self)._store_event_data_string(event)
|
||||
|
||||
|
||||
class PickleEventQueueStub(EventQueueStubBase):
|
||||
""" Class used as specification for UI event handler queue stub objects
|
||||
with sendpickle method """
|
||||
def __init__(self):
|
||||
super(PickleEventQueueStub, self).__init__()
|
||||
|
||||
def sendpickle(self, pickled_event):
|
||||
event = pickle.loads(pickled_event)
|
||||
super(PickleEventQueueStub, self)._store_event_data_string(event)
|
||||
|
||||
|
||||
class UIClientStub(object):
|
||||
""" Class used as specification for UI event handler stub objects """
|
||||
def __init__(self):
|
||||
self.event = None
|
||||
|
||||
|
||||
class EventHandlingTest(unittest.TestCase):
|
||||
""" Event handling test class """
|
||||
|
||||
|
||||
def setUp(self):
|
||||
self._test_process = Mock()
|
||||
ui_client1 = UIClientStub()
|
||||
ui_client2 = UIClientStub()
|
||||
self._test_ui1 = Mock(wraps=ui_client1)
|
||||
self._test_ui2 = Mock(wraps=ui_client2)
|
||||
importlib.reload(bb.event)
|
||||
|
||||
def _create_test_handlers(self):
|
||||
""" Method used to create a test handler ordered dictionary """
|
||||
test_handlers = bb.compat.OrderedDict()
|
||||
test_handlers["handler1"] = self._test_process.handler1
|
||||
test_handlers["handler2"] = self._test_process.handler2
|
||||
return test_handlers
|
||||
|
||||
def test_class_handlers(self):
|
||||
""" Test set_class_handlers and get_class_handlers methods """
|
||||
test_handlers = self._create_test_handlers()
|
||||
bb.event.set_class_handlers(test_handlers)
|
||||
self.assertEqual(test_handlers,
|
||||
bb.event.get_class_handlers())
|
||||
|
||||
def test_handlers(self):
|
||||
""" Test set_handlers and get_handlers """
|
||||
test_handlers = self._create_test_handlers()
|
||||
bb.event.set_handlers(test_handlers)
|
||||
self.assertEqual(test_handlers,
|
||||
bb.event.get_handlers())
|
||||
|
||||
def test_clean_class_handlers(self):
|
||||
""" Test clean_class_handlers method """
|
||||
cleanDict = bb.compat.OrderedDict()
|
||||
self.assertEqual(cleanDict,
|
||||
bb.event.clean_class_handlers())
|
||||
|
||||
def test_register(self):
|
||||
""" Test register method for class handlers """
|
||||
result = bb.event.register("handler", self._test_process.handler)
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
handlers_dict = bb.event.get_class_handlers()
|
||||
self.assertIn("handler", handlers_dict)
|
||||
|
||||
def test_already_registered(self):
|
||||
""" Test detection of an already registed class handler """
|
||||
bb.event.register("handler", self._test_process.handler)
|
||||
handlers_dict = bb.event.get_class_handlers()
|
||||
self.assertIn("handler", handlers_dict)
|
||||
result = bb.event.register("handler", self._test_process.handler)
|
||||
self.assertEqual(result, bb.event.AlreadyRegistered)
|
||||
|
||||
def test_register_from_string(self):
|
||||
""" Test register method receiving code in string """
|
||||
result = bb.event.register("string_handler", " return True")
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
handlers_dict = bb.event.get_class_handlers()
|
||||
self.assertIn("string_handler", handlers_dict)
|
||||
|
||||
def test_register_with_mask(self):
|
||||
""" Test register method with event masking """
|
||||
mask = ["bb.event.OperationStarted",
|
||||
"bb.event.OperationCompleted"]
|
||||
result = bb.event.register("event_handler",
|
||||
self._test_process.event_handler,
|
||||
mask)
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
handlers_dict = bb.event.get_class_handlers()
|
||||
self.assertIn("event_handler", handlers_dict)
|
||||
|
||||
def test_remove(self):
|
||||
""" Test remove method for class handlers """
|
||||
test_handlers = self._create_test_handlers()
|
||||
bb.event.set_class_handlers(test_handlers)
|
||||
count = len(test_handlers)
|
||||
bb.event.remove("handler1", None)
|
||||
test_handlers = bb.event.get_class_handlers()
|
||||
self.assertEqual(len(test_handlers), count - 1)
|
||||
with self.assertRaises(KeyError):
|
||||
bb.event.remove("handler1", None)
|
||||
|
||||
def test_execute_handler(self):
|
||||
""" Test execute_handler method for class handlers """
|
||||
mask = ["bb.event.OperationProgress"]
|
||||
result = bb.event.register("event_handler",
|
||||
self._test_process.event_handler,
|
||||
mask)
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
event = bb.event.OperationProgress(current=10, total=100)
|
||||
bb.event.execute_handler("event_handler",
|
||||
self._test_process.event_handler,
|
||||
event,
|
||||
None)
|
||||
self._test_process.event_handler.assert_called_once_with(event)
|
||||
|
||||
def test_fire_class_handlers(self):
|
||||
""" Test fire_class_handlers method """
|
||||
mask = ["bb.event.OperationStarted"]
|
||||
result = bb.event.register("event_handler1",
|
||||
self._test_process.event_handler1,
|
||||
mask)
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
result = bb.event.register("event_handler2",
|
||||
self._test_process.event_handler2,
|
||||
"*")
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
event1 = bb.event.OperationStarted()
|
||||
event2 = bb.event.OperationCompleted(total=123)
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected_event_handler1 = [call(event1)]
|
||||
expected_event_handler2 = [call(event1),
|
||||
call(event2),
|
||||
call(event2)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected_event_handler1)
|
||||
self.assertEqual(self._test_process.event_handler2.call_args_list,
|
||||
expected_event_handler2)
|
||||
|
||||
def test_class_handler_filters(self):
|
||||
""" Test filters for class handlers """
|
||||
mask = ["bb.event.OperationStarted"]
|
||||
result = bb.event.register("event_handler1",
|
||||
self._test_process.event_handler1,
|
||||
mask)
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
result = bb.event.register("event_handler2",
|
||||
self._test_process.event_handler2,
|
||||
"*")
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
bb.event.set_eventfilter(
|
||||
lambda name, handler, event, d :
|
||||
name == 'event_handler2' and
|
||||
bb.event.getName(event) == "OperationStarted")
|
||||
event1 = bb.event.OperationStarted()
|
||||
event2 = bb.event.OperationCompleted(total=123)
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected_event_handler1 = []
|
||||
expected_event_handler2 = [call(event1)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected_event_handler1)
|
||||
self.assertEqual(self._test_process.event_handler2.call_args_list,
|
||||
expected_event_handler2)
|
||||
|
||||
def test_change_handler_event_mapping(self):
|
||||
""" Test changing the event mapping for class handlers """
|
||||
event1 = bb.event.OperationStarted()
|
||||
event2 = bb.event.OperationCompleted(total=123)
|
||||
|
||||
# register handler for all events
|
||||
result = bb.event.register("event_handler1",
|
||||
self._test_process.event_handler1,
|
||||
"*")
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected = [call(event1), call(event2)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected)
|
||||
|
||||
# unregister handler and register it only for OperationStarted
|
||||
bb.event.remove("event_handler1",
|
||||
self._test_process.event_handler1)
|
||||
mask = ["bb.event.OperationStarted"]
|
||||
result = bb.event.register("event_handler1",
|
||||
self._test_process.event_handler1,
|
||||
mask)
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected = [call(event1), call(event2), call(event1)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected)
|
||||
|
||||
# unregister handler and register it only for OperationCompleted
|
||||
bb.event.remove("event_handler1",
|
||||
self._test_process.event_handler1)
|
||||
mask = ["bb.event.OperationCompleted"]
|
||||
result = bb.event.register("event_handler1",
|
||||
self._test_process.event_handler1,
|
||||
mask)
|
||||
self.assertEqual(result, bb.event.Registered)
|
||||
bb.event.fire_class_handlers(event1, None)
|
||||
bb.event.fire_class_handlers(event2, None)
|
||||
expected = [call(event1), call(event2), call(event1), call(event2)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected)
|
||||
|
||||
def test_register_UIHhandler(self):
|
||||
""" Test register_UIHhandler method """
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
def test_UIHhandler_already_registered(self):
|
||||
""" Test registering an UIHhandler already existing """
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
self.assertEqual(result, 1)
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
self.assertEqual(result, 2)
|
||||
|
||||
def test_unregister_UIHhandler(self):
|
||||
""" Test unregister_UIHhandler method """
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
self.assertEqual(result, 1)
|
||||
result = bb.event.unregister_UIHhandler(1)
|
||||
self.assertIs(result, None)
|
||||
|
||||
def test_fire_ui_handlers(self):
|
||||
""" Test fire_ui_handlers method """
|
||||
self._test_ui1.event = Mock(spec_set=EventQueueStub)
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
self.assertEqual(result, 1)
|
||||
self._test_ui2.event = Mock(spec_set=PickleEventQueueStub)
|
||||
result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
|
||||
self.assertEqual(result, 2)
|
||||
event1 = bb.event.OperationStarted()
|
||||
bb.event.fire_ui_handlers(event1, None)
|
||||
expected = [call(event1)]
|
||||
self.assertEqual(self._test_ui1.event.send.call_args_list,
|
||||
expected)
|
||||
expected = [call(pickle.dumps(event1))]
|
||||
self.assertEqual(self._test_ui2.event.sendpickle.call_args_list,
|
||||
expected)
|
||||
|
||||
def test_ui_handler_mask_filter(self):
|
||||
""" Test filters for UI handlers """
|
||||
mask = ["bb.event.OperationStarted"]
|
||||
debug_domains = {}
|
||||
self._test_ui1.event = Mock(spec_set=EventQueueStub)
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
bb.event.set_UIHmask(result, logging.INFO, debug_domains, mask)
|
||||
self._test_ui2.event = Mock(spec_set=PickleEventQueueStub)
|
||||
result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
|
||||
bb.event.set_UIHmask(result, logging.INFO, debug_domains, mask)
|
||||
|
||||
event1 = bb.event.OperationStarted()
|
||||
event2 = bb.event.OperationCompleted(total=1)
|
||||
|
||||
bb.event.fire_ui_handlers(event1, None)
|
||||
bb.event.fire_ui_handlers(event2, None)
|
||||
expected = [call(event1)]
|
||||
self.assertEqual(self._test_ui1.event.send.call_args_list,
|
||||
expected)
|
||||
expected = [call(pickle.dumps(event1))]
|
||||
self.assertEqual(self._test_ui2.event.sendpickle.call_args_list,
|
||||
expected)
|
||||
|
||||
def test_ui_handler_log_filter(self):
|
||||
""" Test log filters for UI handlers """
|
||||
mask = ["*"]
|
||||
debug_domains = {'BitBake.Foo': logging.WARNING}
|
||||
|
||||
self._test_ui1.event = EventQueueStub()
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
bb.event.set_UIHmask(result, logging.ERROR, debug_domains, mask)
|
||||
self._test_ui2.event = PickleEventQueueStub()
|
||||
result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
|
||||
bb.event.set_UIHmask(result, logging.ERROR, debug_domains, mask)
|
||||
|
||||
event1 = bb.event.OperationStarted()
|
||||
bb.event.fire_ui_handlers(event1, None) # All events match
|
||||
|
||||
event_log_handler = bb.event.LogHandler()
|
||||
logger = logging.getLogger("BitBake")
|
||||
logger.addHandler(event_log_handler)
|
||||
logger1 = logging.getLogger("BitBake.Foo")
|
||||
logger1.warning("Test warning LogRecord1") # Matches debug_domains level
|
||||
logger1.info("Test info LogRecord") # Filtered out
|
||||
logger2 = logging.getLogger("BitBake.Bar")
|
||||
logger2.error("Test error LogRecord") # Matches filter base level
|
||||
logger2.warning("Test warning LogRecord2") # Filtered out
|
||||
logger.removeHandler(event_log_handler)
|
||||
|
||||
expected = ['OperationStarted',
|
||||
'WARNING: Test warning LogRecord1',
|
||||
'ERROR: Test error LogRecord']
|
||||
self.assertEqual(self._test_ui1.event.event_calls, expected)
|
||||
self.assertEqual(self._test_ui2.event.event_calls, expected)
|
||||
|
||||
def test_fire(self):
|
||||
""" Test fire method used to trigger class and ui event handlers """
|
||||
mask = ["bb.event.ConfigParsed"]
|
||||
result = bb.event.register("event_handler1",
|
||||
self._test_process.event_handler1,
|
||||
mask)
|
||||
|
||||
self._test_ui1.event = Mock(spec_set=EventQueueStub)
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
event1 = bb.event.ConfigParsed()
|
||||
bb.event.fire(event1, None)
|
||||
expected = [call(event1)]
|
||||
self.assertEqual(self._test_process.event_handler1.call_args_list,
|
||||
expected)
|
||||
self.assertEqual(self._test_ui1.event.send.call_args_list,
|
||||
expected)
|
||||
|
||||
def test_fire_from_worker(self):
|
||||
""" Test fire_from_worker method """
|
||||
self._test_ui1.event = Mock(spec_set=EventQueueStub)
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
self.assertEqual(result, 1)
|
||||
event1 = bb.event.ConfigParsed()
|
||||
bb.event.fire_from_worker(event1, None)
|
||||
expected = [call(event1)]
|
||||
self.assertEqual(self._test_ui1.event.send.call_args_list,
|
||||
expected)
|
||||
|
||||
def test_worker_fire(self):
|
||||
""" Test the triggering of bb.event.worker_fire callback """
|
||||
bb.event.worker_fire = Mock()
|
||||
event = bb.event.Event()
|
||||
bb.event.fire(event, None)
|
||||
expected = [call(event, None)]
|
||||
self.assertEqual(bb.event.worker_fire.call_args_list, expected)
|
||||
|
||||
def test_print_ui_queue(self):
|
||||
""" Test print_ui_queue method """
|
||||
event1 = bb.event.OperationStarted()
|
||||
event2 = bb.event.OperationCompleted(total=123)
|
||||
bb.event.fire(event1, None)
|
||||
bb.event.fire(event2, None)
|
||||
event_log_handler = bb.event.LogHandler()
|
||||
logger = logging.getLogger("BitBake")
|
||||
logger.addHandler(event_log_handler)
|
||||
logger.info("Test info LogRecord")
|
||||
logger.warning("Test warning LogRecord")
|
||||
with self.assertLogs("BitBake", level="INFO") as cm:
|
||||
bb.event.print_ui_queue()
|
||||
logger.removeHandler(event_log_handler)
|
||||
self.assertEqual(cm.output,
|
||||
["INFO:BitBake:Test info LogRecord",
|
||||
"WARNING:BitBake:Test warning LogRecord"])
|
||||
|
||||
def _set_threadlock_test_mockups(self):
|
||||
""" Create UI event handler mockups used in enable and disable
|
||||
threadlock tests """
|
||||
def ui1_event_send(event):
|
||||
if type(event) is bb.event.ConfigParsed:
|
||||
self._threadlock_test_calls.append("w1_ui1")
|
||||
if type(event) is bb.event.OperationStarted:
|
||||
self._threadlock_test_calls.append("w2_ui1")
|
||||
time.sleep(2)
|
||||
|
||||
def ui2_event_send(event):
|
||||
if type(event) is bb.event.ConfigParsed:
|
||||
self._threadlock_test_calls.append("w1_ui2")
|
||||
if type(event) is bb.event.OperationStarted:
|
||||
self._threadlock_test_calls.append("w2_ui2")
|
||||
time.sleep(2)
|
||||
|
||||
self._threadlock_test_calls = []
|
||||
self._test_ui1.event = EventQueueStub()
|
||||
self._test_ui1.event.send = ui1_event_send
|
||||
result = bb.event.register_UIHhandler(self._test_ui1, mainui=True)
|
||||
self.assertEqual(result, 1)
|
||||
self._test_ui2.event = EventQueueStub()
|
||||
self._test_ui2.event.send = ui2_event_send
|
||||
result = bb.event.register_UIHhandler(self._test_ui2, mainui=True)
|
||||
self.assertEqual(result, 2)
|
||||
|
||||
def _set_and_run_threadlock_test_workers(self):
|
||||
""" Create and run the workers used to trigger events in enable and
|
||||
disable threadlock tests """
|
||||
worker1 = threading.Thread(target=self._thread_lock_test_worker1)
|
||||
worker2 = threading.Thread(target=self._thread_lock_test_worker2)
|
||||
worker1.start()
|
||||
time.sleep(1)
|
||||
worker2.start()
|
||||
worker1.join()
|
||||
worker2.join()
|
||||
|
||||
def _thread_lock_test_worker1(self):
|
||||
""" First worker used to fire the ConfigParsed event for enable and
|
||||
disable threadlocks tests """
|
||||
bb.event.fire(bb.event.ConfigParsed(), None)
|
||||
|
||||
def _thread_lock_test_worker2(self):
|
||||
""" Second worker used to fire the OperationStarted event for enable
|
||||
and disable threadlocks tests """
|
||||
bb.event.fire(bb.event.OperationStarted(), None)
|
||||
|
||||
def test_enable_threadlock(self):
|
||||
""" Test enable_threadlock method """
|
||||
self._set_threadlock_test_mockups()
|
||||
bb.event.enable_threadlock()
|
||||
self._set_and_run_threadlock_test_workers()
|
||||
# Calls to UI handlers should be in order as all the registered
|
||||
# handlers for the event coming from the first worker should be
|
||||
# called before processing the event from the second worker.
|
||||
self.assertEqual(self._threadlock_test_calls,
|
||||
["w1_ui1", "w1_ui2", "w2_ui1", "w2_ui2"])
|
||||
|
||||
|
||||
def test_disable_threadlock(self):
|
||||
""" Test disable_threadlock method """
|
||||
self._set_threadlock_test_mockups()
|
||||
bb.event.disable_threadlock()
|
||||
self._set_and_run_threadlock_test_workers()
|
||||
# Calls to UI handlers should be intertwined together. Thanks to the
|
||||
# delay in the registered handlers for the event coming from the first
|
||||
# worker, the event coming from the second worker starts being
|
||||
# processed before finishing handling the first worker event.
|
||||
self.assertEqual(self._threadlock_test_calls,
|
||||
["w1_ui1", "w2_ui1", "w1_ui2", "w2_ui2"])
|
||||
|
||||
|
||||
class EventClassesTest(unittest.TestCase):
|
||||
""" Event classes test class """
|
||||
|
||||
_worker_pid = 54321
|
||||
|
||||
def setUp(self):
|
||||
bb.event.worker_pid = EventClassesTest._worker_pid
|
||||
|
||||
def test_Event(self):
|
||||
""" Test the Event base class """
|
||||
event = bb.event.Event()
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_HeartbeatEvent(self):
|
||||
""" Test the HeartbeatEvent class """
|
||||
time = 10
|
||||
event = bb.event.HeartbeatEvent(time)
|
||||
self.assertEqual(event.time, time)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_OperationStarted(self):
|
||||
""" Test OperationStarted event class """
|
||||
msg = "Foo Bar"
|
||||
event = bb.event.OperationStarted(msg)
|
||||
self.assertEqual(event.msg, msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_OperationCompleted(self):
|
||||
""" Test OperationCompleted event class """
|
||||
msg = "Foo Bar"
|
||||
total = 123
|
||||
event = bb.event.OperationCompleted(total, msg)
|
||||
self.assertEqual(event.msg, msg)
|
||||
self.assertEqual(event.total, total)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_OperationProgress(self):
|
||||
""" Test OperationProgress event class """
|
||||
msg = "Foo Bar"
|
||||
total = 123
|
||||
current = 111
|
||||
event = bb.event.OperationProgress(current, total, msg)
|
||||
self.assertEqual(event.msg, msg + ": %s/%s" % (current, total))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ConfigParsed(self):
|
||||
""" Test the ConfigParsed class """
|
||||
event = bb.event.ConfigParsed()
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_MultiConfigParsed(self):
|
||||
""" Test MultiConfigParsed event class """
|
||||
mcdata = {"foobar": "Foo Bar"}
|
||||
event = bb.event.MultiConfigParsed(mcdata)
|
||||
self.assertEqual(event.mcdata, mcdata)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_RecipeEvent(self):
|
||||
""" Test RecipeEvent event base class """
|
||||
callback = lambda a: 2 * a
|
||||
event = bb.event.RecipeEvent(callback)
|
||||
self.assertEqual(event.fn(1), callback(1))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_RecipePreFinalise(self):
|
||||
""" Test RecipePreFinalise event class """
|
||||
callback = lambda a: 2 * a
|
||||
event = bb.event.RecipePreFinalise(callback)
|
||||
self.assertEqual(event.fn(1), callback(1))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_RecipeTaskPreProcess(self):
|
||||
""" Test RecipeTaskPreProcess event class """
|
||||
callback = lambda a: 2 * a
|
||||
tasklist = [("foobar", callback)]
|
||||
event = bb.event.RecipeTaskPreProcess(callback, tasklist)
|
||||
self.assertEqual(event.fn(1), callback(1))
|
||||
self.assertEqual(event.tasklist, tasklist)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_RecipeParsed(self):
|
||||
""" Test RecipeParsed event base class """
|
||||
callback = lambda a: 2 * a
|
||||
event = bb.event.RecipeParsed(callback)
|
||||
self.assertEqual(event.fn(1), callback(1))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_StampUpdate(self):
|
||||
targets = ["foo", "bar"]
|
||||
stampfns = [lambda:"foobar"]
|
||||
event = bb.event.StampUpdate(targets, stampfns)
|
||||
self.assertEqual(event.targets, targets)
|
||||
self.assertEqual(event.stampPrefix, stampfns)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_BuildBase(self):
|
||||
""" Test base class for bitbake build events """
|
||||
name = "foo"
|
||||
pkgs = ["bar"]
|
||||
failures = 123
|
||||
event = bb.event.BuildBase(name, pkgs, failures)
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), failures)
|
||||
name = event.name = "bar"
|
||||
pkgs = event.pkgs = ["foo"]
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), failures)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_BuildInit(self):
|
||||
""" Test class for bitbake build invocation events """
|
||||
event = bb.event.BuildInit()
|
||||
self.assertEqual(event.name, None)
|
||||
self.assertEqual(event.pkgs, [])
|
||||
self.assertEqual(event.getFailures(), 0)
|
||||
name = event.name = "bar"
|
||||
pkgs = event.pkgs = ["foo"]
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), 0)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_BuildStarted(self):
|
||||
""" Test class for build started events """
|
||||
name = "foo"
|
||||
pkgs = ["bar"]
|
||||
failures = 123
|
||||
event = bb.event.BuildStarted(name, pkgs, failures)
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), failures)
|
||||
self.assertEqual(event.msg, "Building Started")
|
||||
name = event.name = "bar"
|
||||
pkgs = event.pkgs = ["foo"]
|
||||
msg = event.msg = "foobar"
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), failures)
|
||||
self.assertEqual(event.msg, msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_BuildCompleted(self):
|
||||
""" Test class for build completed events """
|
||||
total = 1000
|
||||
name = "foo"
|
||||
pkgs = ["bar"]
|
||||
failures = 123
|
||||
interrupted = 1
|
||||
event = bb.event.BuildCompleted(total, name, pkgs, failures,
|
||||
interrupted)
|
||||
self.assertEqual(event.name, name)
|
||||
self.assertEqual(event.pkgs, pkgs)
|
||||
self.assertEqual(event.getFailures(), failures)
|
||||
self.assertEqual(event.msg, "Building Failed")
|
||||
event2 = bb.event.BuildCompleted(total, name, pkgs)
|
||||
self.assertEqual(event2.name, name)
|
||||
self.assertEqual(event2.pkgs, pkgs)
|
||||
self.assertEqual(event2.getFailures(), 0)
|
||||
self.assertEqual(event2.msg, "Building Succeeded")
|
||||
self.assertEqual(event2.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_DiskFull(self):
|
||||
""" Test DiskFull event class """
|
||||
dev = "/dev/foo"
|
||||
type = "ext4"
|
||||
freespace = "104M"
|
||||
mountpoint = "/"
|
||||
event = bb.event.DiskFull(dev, type, freespace, mountpoint)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_MonitorDiskEvent(self):
|
||||
""" Test MonitorDiskEvent class """
|
||||
available_bytes = 10000000
|
||||
free_bytes = 90000000
|
||||
total_bytes = 1000000000
|
||||
du = bb.event.DiskUsageSample(available_bytes, free_bytes,
|
||||
total_bytes)
|
||||
event = bb.event.MonitorDiskEvent(du)
|
||||
self.assertEqual(event.disk_usage.available_bytes, available_bytes)
|
||||
self.assertEqual(event.disk_usage.free_bytes, free_bytes)
|
||||
self.assertEqual(event.disk_usage.total_bytes, total_bytes)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_NoProvider(self):
|
||||
""" Test NoProvider event class """
|
||||
item = "foobar"
|
||||
event1 = bb.event.NoProvider(item)
|
||||
self.assertEqual(event1.getItem(), item)
|
||||
self.assertEqual(event1.isRuntime(), False)
|
||||
self.assertEqual(str(event1), "Nothing PROVIDES 'foobar'")
|
||||
runtime = True
|
||||
dependees = ["foo", "bar"]
|
||||
reasons = None
|
||||
close_matches = ["foibar", "footbar"]
|
||||
event2 = bb.event.NoProvider(item, runtime, dependees, reasons,
|
||||
close_matches)
|
||||
self.assertEqual(event2.isRuntime(), True)
|
||||
expected = ("Nothing RPROVIDES 'foobar' (but foo, bar RDEPENDS"
|
||||
" on or otherwise requires it). Close matches:\n"
|
||||
" foibar\n"
|
||||
" footbar")
|
||||
self.assertEqual(str(event2), expected)
|
||||
reasons = ["Item does not exist on database"]
|
||||
close_matches = ["foibar", "footbar"]
|
||||
event3 = bb.event.NoProvider(item, runtime, dependees, reasons,
|
||||
close_matches)
|
||||
expected = ("Nothing RPROVIDES 'foobar' (but foo, bar RDEPENDS"
|
||||
" on or otherwise requires it)\n"
|
||||
"Item does not exist on database")
|
||||
self.assertEqual(str(event3), expected)
|
||||
self.assertEqual(event3.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_MultipleProviders(self):
|
||||
""" Test MultipleProviders event class """
|
||||
item = "foobar"
|
||||
candidates = ["foobarv1", "foobars"]
|
||||
event1 = bb.event.MultipleProviders(item, candidates)
|
||||
self.assertEqual(event1.isRuntime(), False)
|
||||
self.assertEqual(event1.getItem(), item)
|
||||
self.assertEqual(event1.getCandidates(), candidates)
|
||||
expected = ("Multiple providers are available for foobar (foobarv1,"
|
||||
" foobars)\n"
|
||||
"Consider defining a PREFERRED_PROVIDER entry to match "
|
||||
"foobar")
|
||||
self.assertEqual(str(event1), expected)
|
||||
runtime = True
|
||||
event2 = bb.event.MultipleProviders(item, candidates, runtime)
|
||||
self.assertEqual(event2.isRuntime(), runtime)
|
||||
expected = ("Multiple providers are available for runtime foobar "
|
||||
"(foobarv1, foobars)\n"
|
||||
"Consider defining a PREFERRED_RPROVIDER entry to match "
|
||||
"foobar")
|
||||
self.assertEqual(str(event2), expected)
|
||||
self.assertEqual(event2.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ParseStarted(self):
|
||||
""" Test ParseStarted event class """
|
||||
total = 123
|
||||
event = bb.event.ParseStarted(total)
|
||||
self.assertEqual(event.msg, "Recipe parsing Started")
|
||||
self.assertEqual(event.total, total)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ParseCompleted(self):
|
||||
""" Test ParseCompleted event class """
|
||||
cached = 10
|
||||
parsed = 13
|
||||
skipped = 7
|
||||
virtuals = 2
|
||||
masked = 1
|
||||
errors = 0
|
||||
total = 23
|
||||
event = bb.event.ParseCompleted(cached, parsed, skipped, masked,
|
||||
virtuals, errors, total)
|
||||
self.assertEqual(event.msg, "Recipe parsing Completed")
|
||||
expected = [cached, parsed, skipped, virtuals, masked, errors,
|
||||
cached + parsed, total]
|
||||
actual = [event.cached, event.parsed, event.skipped, event.virtuals,
|
||||
event.masked, event.errors, event.sofar, event.total]
|
||||
self.assertEqual(str(actual), str(expected))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ParseProgress(self):
|
||||
""" Test ParseProgress event class """
|
||||
current = 10
|
||||
total = 100
|
||||
event = bb.event.ParseProgress(current, total)
|
||||
self.assertEqual(event.msg,
|
||||
"Recipe parsing" + ": %s/%s" % (current, total))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_CacheLoadStarted(self):
|
||||
""" Test CacheLoadStarted event class """
|
||||
total = 123
|
||||
event = bb.event.CacheLoadStarted(total)
|
||||
self.assertEqual(event.msg, "Loading cache Started")
|
||||
self.assertEqual(event.total, total)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_CacheLoadProgress(self):
|
||||
""" Test CacheLoadProgress event class """
|
||||
current = 10
|
||||
total = 100
|
||||
event = bb.event.CacheLoadProgress(current, total)
|
||||
self.assertEqual(event.msg,
|
||||
"Loading cache" + ": %s/%s" % (current, total))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_CacheLoadCompleted(self):
|
||||
""" Test CacheLoadCompleted event class """
|
||||
total = 23
|
||||
num_entries = 12
|
||||
event = bb.event.CacheLoadCompleted(total, num_entries)
|
||||
self.assertEqual(event.msg, "Loading cache Completed")
|
||||
expected = [total, num_entries]
|
||||
actual = [event.total, event.num_entries]
|
||||
self.assertEqual(str(actual), str(expected))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_TreeDataPreparationStarted(self):
|
||||
""" Test TreeDataPreparationStarted event class """
|
||||
event = bb.event.TreeDataPreparationStarted()
|
||||
self.assertEqual(event.msg, "Preparing tree data Started")
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_TreeDataPreparationProgress(self):
|
||||
""" Test TreeDataPreparationProgress event class """
|
||||
current = 10
|
||||
total = 100
|
||||
event = bb.event.TreeDataPreparationProgress(current, total)
|
||||
self.assertEqual(event.msg,
|
||||
"Preparing tree data" + ": %s/%s" % (current, total))
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_TreeDataPreparationCompleted(self):
|
||||
""" Test TreeDataPreparationCompleted event class """
|
||||
total = 23
|
||||
event = bb.event.TreeDataPreparationCompleted(total)
|
||||
self.assertEqual(event.msg, "Preparing tree data Completed")
|
||||
self.assertEqual(event.total, total)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_DepTreeGenerated(self):
|
||||
""" Test DepTreeGenerated event class """
|
||||
depgraph = Mock()
|
||||
event = bb.event.DepTreeGenerated(depgraph)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_TargetsTreeGenerated(self):
|
||||
""" Test TargetsTreeGenerated event class """
|
||||
model = Mock()
|
||||
event = bb.event.TargetsTreeGenerated(model)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ReachableStamps(self):
|
||||
""" Test ReachableStamps event class """
|
||||
stamps = [Mock(), Mock()]
|
||||
event = bb.event.ReachableStamps(stamps)
|
||||
self.assertEqual(event.stamps, stamps)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_FilesMatchingFound(self):
|
||||
""" Test FilesMatchingFound event class """
|
||||
pattern = "foo.*bar"
|
||||
matches = ["foobar"]
|
||||
event = bb.event.FilesMatchingFound(pattern, matches)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ConfigFilesFound(self):
|
||||
""" Test ConfigFilesFound event class """
|
||||
variable = "FOO_BAR"
|
||||
values = ["foo", "bar"]
|
||||
event = bb.event.ConfigFilesFound(variable, values)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ConfigFilePathFound(self):
|
||||
""" Test ConfigFilePathFound event class """
|
||||
path = "/foo/bar"
|
||||
event = bb.event.ConfigFilePathFound(path)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_message_classes(self):
|
||||
""" Test message event classes """
|
||||
msg = "foobar foo bar"
|
||||
event = bb.event.MsgBase(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgDebug(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgNote(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgWarn(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgError(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgFatal(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
event = bb.event.MsgPlain(msg)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_LogExecTTY(self):
|
||||
""" Test LogExecTTY event class """
|
||||
msg = "foo bar"
|
||||
prog = "foo.sh"
|
||||
sleep_delay = 10
|
||||
retries = 3
|
||||
event = bb.event.LogExecTTY(msg, prog, sleep_delay, retries)
|
||||
self.assertEqual(event.msg, msg)
|
||||
self.assertEqual(event.prog, prog)
|
||||
self.assertEqual(event.sleep_delay, sleep_delay)
|
||||
self.assertEqual(event.retries, retries)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def _throw_zero_division_exception(self):
|
||||
a = 1 / 0
|
||||
return
|
||||
|
||||
def _worker_handler(self, event, d):
|
||||
self._returned_event = event
|
||||
return
|
||||
|
||||
def test_LogHandler(self):
|
||||
""" Test LogHandler class """
|
||||
logger = logging.getLogger("TestEventClasses")
|
||||
logger.propagate = False
|
||||
handler = bb.event.LogHandler(logging.INFO)
|
||||
logger.addHandler(handler)
|
||||
bb.event.worker_fire = self._worker_handler
|
||||
try:
|
||||
self._throw_zero_division_exception()
|
||||
except ZeroDivisionError as ex:
|
||||
logger.exception(ex)
|
||||
event = self._returned_event
|
||||
try:
|
||||
pe = pickle.dumps(event)
|
||||
newevent = pickle.loads(pe)
|
||||
except:
|
||||
self.fail('Logged event is not serializable')
|
||||
self.assertEqual(event.taskpid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_MetadataEvent(self):
|
||||
""" Test MetadataEvent class """
|
||||
eventtype = "footype"
|
||||
eventdata = {"foo": "bar"}
|
||||
event = bb.event.MetadataEvent(eventtype, eventdata)
|
||||
self.assertEqual(event.type, eventtype)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ProcessStarted(self):
|
||||
""" Test ProcessStarted class """
|
||||
processname = "foo"
|
||||
total = 9783128974
|
||||
event = bb.event.ProcessStarted(processname, total)
|
||||
self.assertEqual(event.processname, processname)
|
||||
self.assertEqual(event.total, total)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ProcessProgress(self):
|
||||
""" Test ProcessProgress class """
|
||||
processname = "foo"
|
||||
progress = 243224
|
||||
event = bb.event.ProcessProgress(processname, progress)
|
||||
self.assertEqual(event.processname, processname)
|
||||
self.assertEqual(event.progress, progress)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_ProcessFinished(self):
|
||||
""" Test ProcessFinished class """
|
||||
processname = "foo"
|
||||
total = 1242342344
|
||||
event = bb.event.ProcessFinished(processname)
|
||||
self.assertEqual(event.processname, processname)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_SanityCheck(self):
|
||||
""" Test SanityCheck class """
|
||||
event1 = bb.event.SanityCheck()
|
||||
self.assertEqual(event1.generateevents, True)
|
||||
self.assertEqual(event1.pid, EventClassesTest._worker_pid)
|
||||
generateevents = False
|
||||
event2 = bb.event.SanityCheck(generateevents)
|
||||
self.assertEqual(event2.generateevents, generateevents)
|
||||
self.assertEqual(event2.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_SanityCheckPassed(self):
|
||||
""" Test SanityCheckPassed class """
|
||||
event = bb.event.SanityCheckPassed()
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_SanityCheckFailed(self):
|
||||
""" Test SanityCheckFailed class """
|
||||
msg = "The sanity test failed."
|
||||
event1 = bb.event.SanityCheckFailed(msg)
|
||||
self.assertEqual(event1.pid, EventClassesTest._worker_pid)
|
||||
network_error = True
|
||||
event2 = bb.event.SanityCheckFailed(msg, network_error)
|
||||
self.assertEqual(event2.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_network_event_classes(self):
|
||||
""" Test network event classes """
|
||||
event1 = bb.event.NetworkTest()
|
||||
generateevents = False
|
||||
self.assertEqual(event1.pid, EventClassesTest._worker_pid)
|
||||
event2 = bb.event.NetworkTest(generateevents)
|
||||
self.assertEqual(event2.pid, EventClassesTest._worker_pid)
|
||||
event3 = bb.event.NetworkTestPassed()
|
||||
self.assertEqual(event3.pid, EventClassesTest._worker_pid)
|
||||
event4 = bb.event.NetworkTestFailed()
|
||||
self.assertEqual(event4.pid, EventClassesTest._worker_pid)
|
||||
|
||||
def test_FindSigInfoResult(self):
|
||||
""" Test FindSigInfoResult event class """
|
||||
result = [Mock()]
|
||||
event = bb.event.FindSigInfoResult(result)
|
||||
self.assertEqual(event.result, result)
|
||||
self.assertEqual(event.pid, EventClassesTest._worker_pid)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -44,13 +44,9 @@ C = "3"
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
self.origdir = os.getcwd()
|
||||
self.d = bb.data.init()
|
||||
bb.parse.siggen = bb.siggen.init(self.d)
|
||||
|
||||
def tearDown(self):
|
||||
os.chdir(self.origdir)
|
||||
|
||||
def parsehelper(self, content, suffix = ".bb"):
|
||||
|
||||
f = tempfile.NamedTemporaryFile(suffix = suffix)
|
||||
@@ -62,9 +58,9 @@ C = "3"
|
||||
def test_parse_simple(self):
|
||||
f = self.parsehelper(self.testfile)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
self.assertEqual(d.getVar("A"), "1")
|
||||
self.assertEqual(d.getVar("B"), "2")
|
||||
self.assertEqual(d.getVar("C"), "3")
|
||||
self.assertEqual(d.getVar("A", True), "1")
|
||||
self.assertEqual(d.getVar("B", True), "2")
|
||||
self.assertEqual(d.getVar("C", True), "3")
|
||||
|
||||
def test_parse_incomplete_function(self):
|
||||
testfileB = self.testfile.replace("}", "")
|
||||
@@ -84,31 +80,10 @@ unset B[flag]
|
||||
def test_parse_unset(self):
|
||||
f = self.parsehelper(self.unsettest)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
self.assertEqual(d.getVar("A"), None)
|
||||
self.assertEqual(d.getVarFlag("A","flag"), None)
|
||||
self.assertEqual(d.getVar("B"), "2")
|
||||
|
||||
exporttest = """
|
||||
A = "a"
|
||||
export B = "b"
|
||||
export C
|
||||
exportD = "d"
|
||||
"""
|
||||
|
||||
def test_parse_exports(self):
|
||||
f = self.parsehelper(self.exporttest)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
self.assertEqual(d.getVar("A"), "a")
|
||||
self.assertIsNone(d.getVarFlag("A", "export"))
|
||||
self.assertEqual(d.getVar("B"), "b")
|
||||
self.assertEqual(d.getVarFlag("B", "export"), 1)
|
||||
self.assertIsNone(d.getVar("C"))
|
||||
self.assertEqual(d.getVarFlag("C", "export"), 1)
|
||||
self.assertIsNone(d.getVar("D"))
|
||||
self.assertIsNone(d.getVarFlag("D", "export"))
|
||||
self.assertEqual(d.getVar("exportD"), "d")
|
||||
self.assertIsNone(d.getVarFlag("exportD", "export"))
|
||||
|
||||
self.assertEqual(d.getVar("A", True), None)
|
||||
self.assertEqual(d.getVarFlag("A","flag", True), None)
|
||||
self.assertEqual(d.getVar("B", True), "2")
|
||||
|
||||
|
||||
overridetest = """
|
||||
RRECOMMENDS_${PN} = "a"
|
||||
@@ -120,11 +95,11 @@ PN = "gtk+"
|
||||
def test_parse_overrides(self):
|
||||
f = self.parsehelper(self.overridetest)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
self.assertEqual(d.getVar("RRECOMMENDS"), "b")
|
||||
self.assertEqual(d.getVar("RRECOMMENDS", True), "b")
|
||||
bb.data.expandKeys(d)
|
||||
self.assertEqual(d.getVar("RRECOMMENDS"), "b")
|
||||
self.assertEqual(d.getVar("RRECOMMENDS", True), "b")
|
||||
d.setVar("RRECOMMENDS_gtk+", "c")
|
||||
self.assertEqual(d.getVar("RRECOMMENDS"), "c")
|
||||
self.assertEqual(d.getVar("RRECOMMENDS", True), "c")
|
||||
|
||||
overridetest2 = """
|
||||
EXTRA_OECONF = ""
|
||||
@@ -137,7 +112,7 @@ EXTRA_OECONF_append = " c"
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
d.appendVar("EXTRA_OECONF", " d")
|
||||
d.setVar("OVERRIDES", "class-target")
|
||||
self.assertEqual(d.getVar("EXTRA_OECONF"), "b c d")
|
||||
self.assertEqual(d.getVar("EXTRA_OECONF", True), "b c d")
|
||||
|
||||
overridetest3 = """
|
||||
DESCRIPTION = "A"
|
||||
@@ -149,11 +124,11 @@ PN = "bc"
|
||||
f = self.parsehelper(self.overridetest3)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
bb.data.expandKeys(d)
|
||||
self.assertEqual(d.getVar("DESCRIPTION_bc-dev"), "A B")
|
||||
self.assertEqual(d.getVar("DESCRIPTION_bc-dev", True), "A B")
|
||||
d.setVar("DESCRIPTION", "E")
|
||||
d.setVar("DESCRIPTION_bc-dev", "C D")
|
||||
d.setVar("OVERRIDES", "bc-dev")
|
||||
self.assertEqual(d.getVar("DESCRIPTION"), "C D")
|
||||
self.assertEqual(d.getVar("DESCRIPTION", True), "C D")
|
||||
|
||||
|
||||
classextend = """
|
||||
@@ -184,6 +159,6 @@ python () {
|
||||
alldata = bb.parse.handle(f.name, self.d)
|
||||
d1 = alldata['']
|
||||
d2 = alldata[cls.name]
|
||||
self.assertEqual(d1.getVar("VAR_var"), "B")
|
||||
self.assertEqual(d2.getVar("VAR_var"), None)
|
||||
self.assertEqual(d1.getVar("VAR_var", True), "B")
|
||||
self.assertEqual(d2.getVar("VAR_var", True), None)
|
||||
|
||||
|
||||
@@ -1,142 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# BitBake Test for lib/bb/persist_data/
|
||||
#
|
||||
# Copyright (C) 2018 Garmin Ltd.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
|
||||
import unittest
|
||||
import bb.data
|
||||
import bb.persist_data
|
||||
import tempfile
|
||||
import threading
|
||||
|
||||
class PersistDataTest(unittest.TestCase):
|
||||
def _create_data(self):
|
||||
return bb.persist_data.persist('TEST_PERSIST_DATA', self.d)
|
||||
|
||||
def setUp(self):
|
||||
self.d = bb.data.init()
|
||||
self.tempdir = tempfile.TemporaryDirectory()
|
||||
self.d['PERSISTENT_DIR'] = self.tempdir.name
|
||||
self.data = self._create_data()
|
||||
self.items = {
|
||||
'A1': '1',
|
||||
'B1': '2',
|
||||
'C2': '3'
|
||||
}
|
||||
self.stress_count = 10000
|
||||
self.thread_count = 5
|
||||
|
||||
for k,v in self.items.items():
|
||||
self.data[k] = v
|
||||
|
||||
def tearDown(self):
|
||||
self.tempdir.cleanup()
|
||||
|
||||
def _iter_helper(self, seen, iterator):
|
||||
with iter(iterator):
|
||||
for v in iterator:
|
||||
self.assertTrue(v in seen)
|
||||
seen.remove(v)
|
||||
self.assertEqual(len(seen), 0, '%s not seen' % seen)
|
||||
|
||||
def test_get(self):
|
||||
for k, v in self.items.items():
|
||||
self.assertEqual(self.data[k], v)
|
||||
|
||||
self.assertIsNone(self.data.get('D'))
|
||||
with self.assertRaises(KeyError):
|
||||
self.data['D']
|
||||
|
||||
def test_set(self):
|
||||
for k, v in self.items.items():
|
||||
self.data[k] += '-foo'
|
||||
|
||||
for k, v in self.items.items():
|
||||
self.assertEqual(self.data[k], v + '-foo')
|
||||
|
||||
def test_delete(self):
|
||||
self.data['D'] = '4'
|
||||
self.assertEqual(self.data['D'], '4')
|
||||
del self.data['D']
|
||||
self.assertIsNone(self.data.get('D'))
|
||||
with self.assertRaises(KeyError):
|
||||
self.data['D']
|
||||
|
||||
def test_contains(self):
|
||||
for k in self.items:
|
||||
self.assertTrue(k in self.data)
|
||||
self.assertTrue(self.data.has_key(k))
|
||||
self.assertFalse('NotFound' in self.data)
|
||||
self.assertFalse(self.data.has_key('NotFound'))
|
||||
|
||||
def test_len(self):
|
||||
self.assertEqual(len(self.data), len(self.items))
|
||||
|
||||
def test_iter(self):
|
||||
self._iter_helper(set(self.items.keys()), self.data)
|
||||
|
||||
def test_itervalues(self):
|
||||
self._iter_helper(set(self.items.values()), self.data.itervalues())
|
||||
|
||||
def test_iteritems(self):
|
||||
self._iter_helper(set(self.items.items()), self.data.iteritems())
|
||||
|
||||
def test_get_by_pattern(self):
|
||||
self._iter_helper({'1', '2'}, self.data.get_by_pattern('_1'))
|
||||
|
||||
def _stress_read(self, data):
|
||||
for i in range(self.stress_count):
|
||||
for k in self.items:
|
||||
data[k]
|
||||
|
||||
def _stress_write(self, data):
|
||||
for i in range(self.stress_count):
|
||||
for k, v in self.items.items():
|
||||
data[k] = v + str(i)
|
||||
|
||||
def _validate_stress(self):
|
||||
for k, v in self.items.items():
|
||||
self.assertEqual(self.data[k], v + str(self.stress_count - 1))
|
||||
|
||||
def test_stress(self):
|
||||
self._stress_read(self.data)
|
||||
self._stress_write(self.data)
|
||||
self._validate_stress()
|
||||
|
||||
def test_stress_threads(self):
|
||||
def read_thread():
|
||||
data = self._create_data()
|
||||
self._stress_read(data)
|
||||
|
||||
def write_thread():
|
||||
data = self._create_data()
|
||||
self._stress_write(data)
|
||||
|
||||
threads = []
|
||||
for i in range(self.thread_count):
|
||||
threads.append(threading.Thread(target=read_thread))
|
||||
threads.append(threading.Thread(target=write_thread))
|
||||
|
||||
for t in threads:
|
||||
t.start()
|
||||
self._stress_read(self.data)
|
||||
for t in threads:
|
||||
t.join()
|
||||
self._validate_stress()
|
||||
|
||||
@@ -42,10 +42,6 @@ class VerCmpString(unittest.TestCase):
|
||||
self.assertTrue(result < 0)
|
||||
result = bb.utils.vercmp_string('1.1', '1.0+1.1-beta1')
|
||||
self.assertTrue(result > 0)
|
||||
result = bb.utils.vercmp_string('1.', '1.1')
|
||||
self.assertTrue(result < 0)
|
||||
result = bb.utils.vercmp_string('1.1', '1.')
|
||||
self.assertTrue(result > 0)
|
||||
|
||||
def test_explode_dep_versions(self):
|
||||
correctresult = {"foo" : ["= 1.10"]}
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
# tinfoil: a simple wrapper around cooker for bitbake-based command-line utilities
|
||||
#
|
||||
# Copyright (C) 2012-2017 Intel Corporation
|
||||
# Copyright (C) 2012 Intel Corporation
|
||||
# Copyright (C) 2011 Mentor Graphics Corporation
|
||||
# Copyright (C) 2006-2012 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
@@ -18,319 +17,50 @@
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import logging
|
||||
import warnings
|
||||
import os
|
||||
import sys
|
||||
import atexit
|
||||
import re
|
||||
from collections import OrderedDict, defaultdict
|
||||
|
||||
import bb.cache
|
||||
import bb.cooker
|
||||
import bb.providers
|
||||
import bb.taskdata
|
||||
import bb.utils
|
||||
import bb.command
|
||||
import bb.remotedata
|
||||
from bb.cooker import state, BBCooker, CookerFeatures
|
||||
from bb.cookerdata import CookerConfiguration, ConfigParameters
|
||||
from bb.main import setup_bitbake, BitBakeConfigParameters, BBMainException
|
||||
import bb.fetch2
|
||||
|
||||
|
||||
# We need this in order to shut down the connection to the bitbake server,
|
||||
# otherwise the process will never properly exit
|
||||
_server_connections = []
|
||||
def _terminate_connections():
|
||||
for connection in _server_connections:
|
||||
connection.terminate()
|
||||
atexit.register(_terminate_connections)
|
||||
|
||||
class TinfoilUIException(Exception):
|
||||
"""Exception raised when the UI returns non-zero from its main function"""
|
||||
def __init__(self, returncode):
|
||||
self.returncode = returncode
|
||||
def __repr__(self):
|
||||
return 'UI module main returned %d' % self.returncode
|
||||
|
||||
class TinfoilCommandFailed(Exception):
|
||||
"""Exception raised when run_command fails"""
|
||||
|
||||
class TinfoilDataStoreConnector:
|
||||
"""Connector object used to enable access to datastore objects via tinfoil"""
|
||||
|
||||
def __init__(self, tinfoil, dsindex):
|
||||
self.tinfoil = tinfoil
|
||||
self.dsindex = dsindex
|
||||
def getVar(self, name):
|
||||
value = self.tinfoil.run_command('dataStoreConnectorFindVar', self.dsindex, name)
|
||||
overrides = None
|
||||
if isinstance(value, dict):
|
||||
if '_connector_origtype' in value:
|
||||
value['_content'] = self.tinfoil._reconvert_type(value['_content'], value['_connector_origtype'])
|
||||
del value['_connector_origtype']
|
||||
if '_connector_overrides' in value:
|
||||
overrides = value['_connector_overrides']
|
||||
del value['_connector_overrides']
|
||||
return value, overrides
|
||||
def getKeys(self):
|
||||
return set(self.tinfoil.run_command('dataStoreConnectorGetKeys', self.dsindex))
|
||||
def getVarHistory(self, name):
|
||||
return self.tinfoil.run_command('dataStoreConnectorGetVarHistory', self.dsindex, name)
|
||||
def expandPythonRef(self, varname, expr, d):
|
||||
ds = bb.remotedata.RemoteDatastores.transmit_datastore(d)
|
||||
ret = self.tinfoil.run_command('dataStoreConnectorExpandPythonRef', ds, varname, expr)
|
||||
return ret
|
||||
def setVar(self, varname, value):
|
||||
if self.dsindex is None:
|
||||
self.tinfoil.run_command('setVariable', varname, value)
|
||||
else:
|
||||
# Not currently implemented - indicate that setting should
|
||||
# be redirected to local side
|
||||
return True
|
||||
def setVarFlag(self, varname, flagname, value):
|
||||
if self.dsindex is None:
|
||||
self.tinfoil.run_command('dataStoreConnectorSetVarFlag', self.dsindex, varname, flagname, value)
|
||||
else:
|
||||
# Not currently implemented - indicate that setting should
|
||||
# be redirected to local side
|
||||
return True
|
||||
def delVar(self, varname):
|
||||
if self.dsindex is None:
|
||||
self.tinfoil.run_command('dataStoreConnectorDelVar', self.dsindex, varname)
|
||||
else:
|
||||
# Not currently implemented - indicate that setting should
|
||||
# be redirected to local side
|
||||
return True
|
||||
def delVarFlag(self, varname, flagname):
|
||||
if self.dsindex is None:
|
||||
self.tinfoil.run_command('dataStoreConnectorDelVar', self.dsindex, varname, flagname)
|
||||
else:
|
||||
# Not currently implemented - indicate that setting should
|
||||
# be redirected to local side
|
||||
return True
|
||||
def renameVar(self, name, newname):
|
||||
if self.dsindex is None:
|
||||
self.tinfoil.run_command('dataStoreConnectorRenameVar', self.dsindex, name, newname)
|
||||
else:
|
||||
# Not currently implemented - indicate that setting should
|
||||
# be redirected to local side
|
||||
return True
|
||||
|
||||
class TinfoilCookerAdapter:
|
||||
"""
|
||||
Provide an adapter for existing code that expects to access a cooker object via Tinfoil,
|
||||
since now Tinfoil is on the client side it no longer has direct access.
|
||||
"""
|
||||
|
||||
class TinfoilCookerCollectionAdapter:
|
||||
""" cooker.collection adapter """
|
||||
def __init__(self, tinfoil):
|
||||
self.tinfoil = tinfoil
|
||||
def get_file_appends(self, fn):
|
||||
return self.tinfoil.get_file_appends(fn)
|
||||
def __getattr__(self, name):
|
||||
if name == 'overlayed':
|
||||
return self.tinfoil.get_overlayed_recipes()
|
||||
elif name == 'bbappends':
|
||||
return self.tinfoil.run_command('getAllAppends')
|
||||
else:
|
||||
raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name))
|
||||
|
||||
class TinfoilRecipeCacheAdapter:
|
||||
""" cooker.recipecache adapter """
|
||||
def __init__(self, tinfoil):
|
||||
self.tinfoil = tinfoil
|
||||
self._cache = {}
|
||||
|
||||
def get_pkg_pn_fn(self):
|
||||
pkg_pn = defaultdict(list, self.tinfoil.run_command('getRecipes') or [])
|
||||
pkg_fn = {}
|
||||
for pn, fnlist in pkg_pn.items():
|
||||
for fn in fnlist:
|
||||
pkg_fn[fn] = pn
|
||||
self._cache['pkg_pn'] = pkg_pn
|
||||
self._cache['pkg_fn'] = pkg_fn
|
||||
|
||||
def __getattr__(self, name):
|
||||
# Grab these only when they are requested since they aren't always used
|
||||
if name in self._cache:
|
||||
return self._cache[name]
|
||||
elif name == 'pkg_pn':
|
||||
self.get_pkg_pn_fn()
|
||||
return self._cache[name]
|
||||
elif name == 'pkg_fn':
|
||||
self.get_pkg_pn_fn()
|
||||
return self._cache[name]
|
||||
elif name == 'deps':
|
||||
attrvalue = defaultdict(list, self.tinfoil.run_command('getRecipeDepends') or [])
|
||||
elif name == 'rundeps':
|
||||
attrvalue = defaultdict(lambda: defaultdict(list), self.tinfoil.run_command('getRuntimeDepends') or [])
|
||||
elif name == 'runrecs':
|
||||
attrvalue = defaultdict(lambda: defaultdict(list), self.tinfoil.run_command('getRuntimeRecommends') or [])
|
||||
elif name == 'pkg_pepvpr':
|
||||
attrvalue = self.tinfoil.run_command('getRecipeVersions') or {}
|
||||
elif name == 'inherits':
|
||||
attrvalue = self.tinfoil.run_command('getRecipeInherits') or {}
|
||||
elif name == 'bbfile_priority':
|
||||
attrvalue = self.tinfoil.run_command('getBbFilePriority') or {}
|
||||
elif name == 'pkg_dp':
|
||||
attrvalue = self.tinfoil.run_command('getDefaultPreference') or {}
|
||||
elif name == 'fn_provides':
|
||||
attrvalue = self.tinfoil.run_command('getRecipeProvides') or {}
|
||||
elif name == 'packages':
|
||||
attrvalue = self.tinfoil.run_command('getRecipePackages') or {}
|
||||
elif name == 'packages_dynamic':
|
||||
attrvalue = self.tinfoil.run_command('getRecipePackagesDynamic') or {}
|
||||
elif name == 'rproviders':
|
||||
attrvalue = self.tinfoil.run_command('getRProviders') or {}
|
||||
else:
|
||||
raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name))
|
||||
|
||||
self._cache[name] = attrvalue
|
||||
return attrvalue
|
||||
|
||||
def __init__(self, tinfoil):
|
||||
self.tinfoil = tinfoil
|
||||
self.collection = self.TinfoilCookerCollectionAdapter(tinfoil)
|
||||
self.recipecaches = {}
|
||||
# FIXME all machines
|
||||
self.recipecaches[''] = self.TinfoilRecipeCacheAdapter(tinfoil)
|
||||
self._cache = {}
|
||||
def __getattr__(self, name):
|
||||
# Grab these only when they are requested since they aren't always used
|
||||
if name in self._cache:
|
||||
return self._cache[name]
|
||||
elif name == 'skiplist':
|
||||
attrvalue = self.tinfoil.get_skipped_recipes()
|
||||
elif name == 'bbfile_config_priorities':
|
||||
ret = self.tinfoil.run_command('getLayerPriorities')
|
||||
bbfile_config_priorities = []
|
||||
for collection, pattern, regex, pri in ret:
|
||||
bbfile_config_priorities.append((collection, pattern, re.compile(regex), pri))
|
||||
|
||||
attrvalue = bbfile_config_priorities
|
||||
else:
|
||||
raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name))
|
||||
|
||||
self._cache[name] = attrvalue
|
||||
return attrvalue
|
||||
|
||||
def findBestProvider(self, pn):
|
||||
return self.tinfoil.find_best_provider(pn)
|
||||
|
||||
|
||||
class TinfoilRecipeInfo:
|
||||
"""
|
||||
Provides a convenient representation of the cached information for a single recipe.
|
||||
Some attributes are set on construction, others are read on-demand (which internally
|
||||
may result in a remote procedure call to the bitbake server the first time).
|
||||
Note that only information which is cached is available through this object - if
|
||||
you need other variable values you will need to parse the recipe using
|
||||
Tinfoil.parse_recipe().
|
||||
"""
|
||||
def __init__(self, recipecache, d, pn, fn, fns):
|
||||
self._recipecache = recipecache
|
||||
self._d = d
|
||||
self.pn = pn
|
||||
self.fn = fn
|
||||
self.fns = fns
|
||||
self.inherit_files = recipecache.inherits[fn]
|
||||
self.depends = recipecache.deps[fn]
|
||||
(self.pe, self.pv, self.pr) = recipecache.pkg_pepvpr[fn]
|
||||
self._cached_packages = None
|
||||
self._cached_rprovides = None
|
||||
self._cached_packages_dynamic = None
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name == 'alternates':
|
||||
return [x for x in self.fns if x != self.fn]
|
||||
elif name == 'rdepends':
|
||||
return self._recipecache.rundeps[self.fn]
|
||||
elif name == 'rrecommends':
|
||||
return self._recipecache.runrecs[self.fn]
|
||||
elif name == 'provides':
|
||||
return self._recipecache.fn_provides[self.fn]
|
||||
elif name == 'packages':
|
||||
if self._cached_packages is None:
|
||||
self._cached_packages = []
|
||||
for pkg, fns in self._recipecache.packages.items():
|
||||
if self.fn in fns:
|
||||
self._cached_packages.append(pkg)
|
||||
return self._cached_packages
|
||||
elif name == 'packages_dynamic':
|
||||
if self._cached_packages_dynamic is None:
|
||||
self._cached_packages_dynamic = []
|
||||
for pkg, fns in self._recipecache.packages_dynamic.items():
|
||||
if self.fn in fns:
|
||||
self._cached_packages_dynamic.append(pkg)
|
||||
return self._cached_packages_dynamic
|
||||
elif name == 'rprovides':
|
||||
if self._cached_rprovides is None:
|
||||
self._cached_rprovides = []
|
||||
for pkg, fns in self._recipecache.rproviders.items():
|
||||
if self.fn in fns:
|
||||
self._cached_rprovides.append(pkg)
|
||||
return self._cached_rprovides
|
||||
else:
|
||||
raise AttributeError("%s instance has no attribute '%s'" % (self.__class__.__name__, name))
|
||||
def inherits(self, only_recipe=False):
|
||||
"""
|
||||
Get the inherited classes for a recipe. Returns the class names only.
|
||||
Parameters:
|
||||
only_recipe: True to return only the classes inherited by the recipe
|
||||
itself, False to return all classes inherited within
|
||||
the context for the recipe (which includes globally
|
||||
inherited classes).
|
||||
"""
|
||||
if only_recipe:
|
||||
global_inherit = [x for x in (self._d.getVar('BBINCLUDED') or '').split() if x.endswith('.bbclass')]
|
||||
else:
|
||||
global_inherit = []
|
||||
for clsfile in self.inherit_files:
|
||||
if only_recipe and clsfile in global_inherit:
|
||||
continue
|
||||
clsname = os.path.splitext(os.path.basename(clsfile))[0]
|
||||
yield clsname
|
||||
def __str__(self):
|
||||
return '%s' % self.pn
|
||||
|
||||
|
||||
class Tinfoil:
|
||||
"""
|
||||
Tinfoil - an API for scripts and utilities to query
|
||||
BitBake internals and perform build operations.
|
||||
"""
|
||||
def __init__(self, output=sys.stdout, tracking=False):
|
||||
# Needed to avoid deprecation warnings with python 2.6
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
||||
|
||||
def __init__(self, output=sys.stdout, tracking=False, setup_logging=True):
|
||||
"""
|
||||
Create a new tinfoil object.
|
||||
Parameters:
|
||||
output: specifies where console output should be sent. Defaults
|
||||
to sys.stdout.
|
||||
tracking: True to enable variable history tracking, False to
|
||||
disable it (default). Enabling this has a minor
|
||||
performance impact so typically it isn't enabled
|
||||
unless you need to query variable history.
|
||||
setup_logging: True to setup a logger so that things like
|
||||
bb.warn() will work immediately and timeout warnings
|
||||
are visible; False to let BitBake do this itself.
|
||||
"""
|
||||
# Set up logging
|
||||
self.logger = logging.getLogger('BitBake')
|
||||
self.config_data = None
|
||||
self.cooker = None
|
||||
self.tracking = tracking
|
||||
self.ui_module = None
|
||||
self.server_connection = None
|
||||
self.recipes_parsed = False
|
||||
self.quiet = 0
|
||||
self.oldhandlers = self.logger.handlers[:]
|
||||
if setup_logging:
|
||||
# This is the *client-side* logger, nothing to do with
|
||||
# logging messages from the server
|
||||
bb.msg.logger_create('BitBake', output)
|
||||
self.localhandlers = []
|
||||
for handler in self.logger.handlers:
|
||||
if handler not in self.oldhandlers:
|
||||
self.localhandlers.append(handler)
|
||||
self._log_hdlr = logging.StreamHandler(output)
|
||||
bb.msg.addDefaultlogFilter(self._log_hdlr)
|
||||
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
if output.isatty():
|
||||
format.enable_color()
|
||||
self._log_hdlr.setFormatter(format)
|
||||
self.logger.addHandler(self._log_hdlr)
|
||||
|
||||
self.config = CookerConfiguration()
|
||||
configparams = TinfoilConfigParameters(parse_only=True)
|
||||
self.config.setConfigParameters(configparams)
|
||||
self.config.setServerRegIdleCallback(self.register_idle_function)
|
||||
features = []
|
||||
if tracking:
|
||||
features.append(CookerFeatures.BASEDATASTORE_TRACKING)
|
||||
cleanedvars = bb.utils.clean_environment()
|
||||
self.cooker = BBCooker(self.config, features)
|
||||
self.config_data = self.cooker.data
|
||||
bb.providers.logger.setLevel(logging.ERROR)
|
||||
self.cooker_data = None
|
||||
for k in cleanedvars:
|
||||
os.environ[k] = cleanedvars[k]
|
||||
|
||||
def register_idle_function(self, function, data):
|
||||
pass
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
@@ -338,290 +68,30 @@ class Tinfoil:
|
||||
def __exit__(self, type, value, traceback):
|
||||
self.shutdown()
|
||||
|
||||
def prepare(self, config_only=False, config_params=None, quiet=0, extra_features=None):
|
||||
"""
|
||||
Prepares the underlying BitBake system to be used via tinfoil.
|
||||
This function must be called prior to calling any of the other
|
||||
functions in the API.
|
||||
NOTE: if you call prepare() you must absolutely call shutdown()
|
||||
before your code terminates. You can use a "with" block to ensure
|
||||
this happens e.g.
|
||||
|
||||
with bb.tinfoil.Tinfoil() as tinfoil:
|
||||
tinfoil.prepare()
|
||||
...
|
||||
|
||||
Parameters:
|
||||
config_only: True to read only the configuration and not load
|
||||
the cache / parse recipes. This is useful if you just
|
||||
want to query the value of a variable at the global
|
||||
level or you want to do anything else that doesn't
|
||||
involve knowing anything about the recipes in the
|
||||
current configuration. False loads the cache / parses
|
||||
recipes.
|
||||
config_params: optionally specify your own configuration
|
||||
parameters. If not specified an instance of
|
||||
TinfoilConfigParameters will be created internally.
|
||||
quiet: quiet level controlling console output - equivalent
|
||||
to bitbake's -q/--quiet option. Default of 0 gives
|
||||
the same output level as normal bitbake execution.
|
||||
extra_features: extra features to be added to the feature
|
||||
set requested from the server. See
|
||||
CookerFeatures._feature_list for possible
|
||||
features.
|
||||
"""
|
||||
self.quiet = quiet
|
||||
|
||||
if self.tracking:
|
||||
extrafeatures = [bb.cooker.CookerFeatures.BASEDATASTORE_TRACKING]
|
||||
else:
|
||||
extrafeatures = []
|
||||
|
||||
if extra_features:
|
||||
extrafeatures += extra_features
|
||||
|
||||
if not config_params:
|
||||
config_params = TinfoilConfigParameters(config_only=config_only, quiet=quiet)
|
||||
|
||||
cookerconfig = CookerConfiguration()
|
||||
cookerconfig.setConfigParameters(config_params)
|
||||
|
||||
if not config_only:
|
||||
# Disable local loggers because the UI module is going to set up its own
|
||||
for handler in self.localhandlers:
|
||||
self.logger.handlers.remove(handler)
|
||||
self.localhandlers = []
|
||||
|
||||
self.server_connection, ui_module = setup_bitbake(config_params,
|
||||
cookerconfig,
|
||||
extrafeatures)
|
||||
|
||||
self.ui_module = ui_module
|
||||
|
||||
# Ensure the path to bitbake's bin directory is in PATH so that things like
|
||||
# bitbake-worker can be run (usually this is the case, but it doesn't have to be)
|
||||
path = os.getenv('PATH').split(':')
|
||||
bitbakebinpath = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'bin'))
|
||||
for entry in path:
|
||||
if entry.endswith(os.sep):
|
||||
entry = entry[:-1]
|
||||
if os.path.abspath(entry) == bitbakebinpath:
|
||||
break
|
||||
else:
|
||||
path.insert(0, bitbakebinpath)
|
||||
os.environ['PATH'] = ':'.join(path)
|
||||
|
||||
if self.server_connection:
|
||||
_server_connections.append(self.server_connection)
|
||||
if config_only:
|
||||
config_params.updateToServer(self.server_connection.connection, os.environ.copy())
|
||||
self.run_command('parseConfiguration')
|
||||
else:
|
||||
self.run_actions(config_params)
|
||||
self.recipes_parsed = True
|
||||
|
||||
self.config_data = bb.data.init()
|
||||
connector = TinfoilDataStoreConnector(self, None)
|
||||
self.config_data.setVar('_remote_data', connector)
|
||||
self.cooker = TinfoilCookerAdapter(self)
|
||||
self.cooker_data = self.cooker.recipecaches['']
|
||||
else:
|
||||
raise Exception('Failed to start bitbake server')
|
||||
|
||||
def run_actions(self, config_params):
|
||||
"""
|
||||
Run the actions specified in config_params through the UI.
|
||||
"""
|
||||
ret = self.ui_module.main(self.server_connection.connection, self.server_connection.events, config_params)
|
||||
if ret:
|
||||
raise TinfoilUIException(ret)
|
||||
|
||||
def parseRecipes(self):
|
||||
"""
|
||||
Legacy function - use parse_recipes() instead.
|
||||
"""
|
||||
self.parse_recipes()
|
||||
sys.stderr.write("Parsing recipes..")
|
||||
self.logger.setLevel(logging.WARNING)
|
||||
|
||||
def parse_recipes(self):
|
||||
"""
|
||||
Load information on all recipes. Normally you should specify
|
||||
config_only=False when calling prepare() instead of using this
|
||||
function; this function is designed for situations where you need
|
||||
to initialise Tinfoil and use it with config_only=True first and
|
||||
then conditionally call this function to parse recipes later.
|
||||
"""
|
||||
config_params = TinfoilConfigParameters(config_only=False)
|
||||
self.run_actions(config_params)
|
||||
self.recipes_parsed = True
|
||||
try:
|
||||
while self.cooker.state in (state.initial, state.parsing):
|
||||
self.cooker.updateCache()
|
||||
except KeyboardInterrupt:
|
||||
self.cooker.shutdown()
|
||||
self.cooker.updateCache()
|
||||
sys.exit(2)
|
||||
|
||||
def run_command(self, command, *params):
|
||||
"""
|
||||
Run a command on the server (as implemented in bb.command).
|
||||
Note that there are two types of command - synchronous and
|
||||
asynchronous; in order to receive the results of asynchronous
|
||||
commands you will need to set an appropriate event mask
|
||||
using set_event_mask() and listen for the result using
|
||||
wait_event() - with the correct event mask you'll at least get
|
||||
bb.command.CommandCompleted and possibly other events before
|
||||
that depending on the command.
|
||||
"""
|
||||
if not self.server_connection:
|
||||
raise Exception('Not connected to server (did you call .prepare()?)')
|
||||
self.logger.setLevel(logging.INFO)
|
||||
sys.stderr.write("done.\n")
|
||||
|
||||
commandline = [command]
|
||||
if params:
|
||||
commandline.extend(params)
|
||||
result = self.server_connection.connection.runCommand(commandline)
|
||||
if result[1]:
|
||||
raise TinfoilCommandFailed(result[1])
|
||||
return result[0]
|
||||
self.cooker_data = self.cooker.recipecaches['']
|
||||
|
||||
def set_event_mask(self, eventlist):
|
||||
"""Set the event mask which will be applied within wait_event()"""
|
||||
if not self.server_connection:
|
||||
raise Exception('Not connected to server (did you call .prepare()?)')
|
||||
llevel, debug_domains = bb.msg.constructLogOptions()
|
||||
ret = self.run_command('setEventMask', self.server_connection.connection.getEventHandle(), llevel, debug_domains, eventlist)
|
||||
if not ret:
|
||||
raise Exception('setEventMask failed')
|
||||
|
||||
def wait_event(self, timeout=0):
|
||||
"""
|
||||
Wait for an event from the server for the specified time.
|
||||
A timeout of 0 means don't wait if there are no events in the queue.
|
||||
Returns the next event in the queue or None if the timeout was
|
||||
reached. Note that in order to recieve any events you will
|
||||
first need to set the internal event mask using set_event_mask()
|
||||
(otherwise whatever event mask the UI set up will be in effect).
|
||||
"""
|
||||
if not self.server_connection:
|
||||
raise Exception('Not connected to server (did you call .prepare()?)')
|
||||
return self.server_connection.events.waitEvent(timeout)
|
||||
|
||||
def get_overlayed_recipes(self):
|
||||
"""
|
||||
Find recipes which are overlayed (i.e. where recipes exist in multiple layers)
|
||||
"""
|
||||
return defaultdict(list, self.run_command('getOverlayedRecipes'))
|
||||
|
||||
def get_skipped_recipes(self):
|
||||
"""
|
||||
Find recipes which were skipped (i.e. SkipRecipe was raised
|
||||
during parsing).
|
||||
"""
|
||||
return OrderedDict(self.run_command('getSkippedRecipes'))
|
||||
|
||||
def get_all_providers(self):
|
||||
return defaultdict(list, self.run_command('allProviders'))
|
||||
|
||||
def find_providers(self):
|
||||
return self.run_command('findProviders')
|
||||
|
||||
def find_best_provider(self, pn):
|
||||
return self.run_command('findBestProvider', pn)
|
||||
|
||||
def get_runtime_providers(self, rdep):
|
||||
return self.run_command('getRuntimeProviders', rdep)
|
||||
|
||||
def get_recipe_file(self, pn):
|
||||
"""
|
||||
Get the file name for the specified recipe/target. Raises
|
||||
bb.providers.NoProvider if there is no match or the recipe was
|
||||
skipped.
|
||||
"""
|
||||
best = self.find_best_provider(pn)
|
||||
if not best or (len(best) > 3 and not best[3]):
|
||||
skiplist = self.get_skipped_recipes()
|
||||
taskdata = bb.taskdata.TaskData(None, skiplist=skiplist)
|
||||
skipreasons = taskdata.get_reasons(pn)
|
||||
if skipreasons:
|
||||
raise bb.providers.NoProvider('%s is unavailable:\n %s' % (pn, ' \n'.join(skipreasons)))
|
||||
def prepare(self, config_only = False):
|
||||
if not self.cooker_data:
|
||||
if config_only:
|
||||
self.cooker.parseConfiguration()
|
||||
self.cooker_data = self.cooker.recipecaches['']
|
||||
else:
|
||||
raise bb.providers.NoProvider('Unable to find any recipe file matching "%s"' % pn)
|
||||
return best[3]
|
||||
|
||||
def get_file_appends(self, fn):
|
||||
"""
|
||||
Find the bbappends for a recipe file
|
||||
"""
|
||||
return self.run_command('getFileAppends', fn)
|
||||
|
||||
def all_recipes(self, mc='', sort=True):
|
||||
"""
|
||||
Enable iterating over all recipes in the current configuration.
|
||||
Returns an iterator over TinfoilRecipeInfo objects created on demand.
|
||||
Parameters:
|
||||
mc: The multiconfig, default of '' uses the main configuration.
|
||||
sort: True to sort recipes alphabetically (default), False otherwise
|
||||
"""
|
||||
recipecache = self.cooker.recipecaches[mc]
|
||||
if sort:
|
||||
recipes = sorted(recipecache.pkg_pn.items())
|
||||
else:
|
||||
recipes = recipecache.pkg_pn.items()
|
||||
for pn, fns in recipes:
|
||||
prov = self.find_best_provider(pn)
|
||||
recipe = TinfoilRecipeInfo(recipecache,
|
||||
self.config_data,
|
||||
pn=pn,
|
||||
fn=prov[3],
|
||||
fns=fns)
|
||||
yield recipe
|
||||
|
||||
def all_recipe_files(self, mc='', variants=True, preferred_only=False):
|
||||
"""
|
||||
Enable iterating over all recipe files in the current configuration.
|
||||
Returns an iterator over file paths.
|
||||
Parameters:
|
||||
mc: The multiconfig, default of '' uses the main configuration.
|
||||
variants: True to include variants of recipes created through
|
||||
BBCLASSEXTEND (default) or False to exclude them
|
||||
preferred_only: True to include only the preferred recipe where
|
||||
multiple exist providing the same PN, False to list
|
||||
all recipes
|
||||
"""
|
||||
recipecache = self.cooker.recipecaches[mc]
|
||||
if preferred_only:
|
||||
files = []
|
||||
for pn in recipecache.pkg_pn.keys():
|
||||
prov = self.find_best_provider(pn)
|
||||
files.append(prov[3])
|
||||
else:
|
||||
files = recipecache.pkg_fn.keys()
|
||||
for fn in sorted(files):
|
||||
if not variants and fn.startswith('virtual:'):
|
||||
continue
|
||||
yield fn
|
||||
|
||||
|
||||
def get_recipe_info(self, pn, mc=''):
|
||||
"""
|
||||
Get information on a specific recipe in the current configuration by name (PN).
|
||||
Returns a TinfoilRecipeInfo object created on demand.
|
||||
Parameters:
|
||||
mc: The multiconfig, default of '' uses the main configuration.
|
||||
"""
|
||||
recipecache = self.cooker.recipecaches[mc]
|
||||
prov = self.find_best_provider(pn)
|
||||
fn = prov[3]
|
||||
if fn:
|
||||
actual_pn = recipecache.pkg_fn[fn]
|
||||
recipe = TinfoilRecipeInfo(recipecache,
|
||||
self.config_data,
|
||||
pn=actual_pn,
|
||||
fn=fn,
|
||||
fns=recipecache.pkg_pn[actual_pn])
|
||||
return recipe
|
||||
else:
|
||||
return None
|
||||
|
||||
def parse_recipe(self, pn):
|
||||
"""
|
||||
Parse the specified recipe and return a datastore object
|
||||
representing the environment for the recipe.
|
||||
"""
|
||||
fn = self.get_recipe_file(pn)
|
||||
return self.parse_recipe_file(fn)
|
||||
self.parseRecipes()
|
||||
|
||||
def parse_recipe_file(self, fn, appends=True, appendlist=None, config_data=None):
|
||||
"""
|
||||
@@ -638,263 +108,43 @@ class Tinfoil:
|
||||
specify config_data then you cannot use a virtual
|
||||
specification for fn.
|
||||
"""
|
||||
if self.tracking:
|
||||
# Enable history tracking just for the parse operation
|
||||
self.run_command('enableDataTracking')
|
||||
try:
|
||||
if appends and appendlist == []:
|
||||
appends = False
|
||||
if config_data:
|
||||
dctr = bb.remotedata.RemoteDatastores.transmit_datastore(config_data)
|
||||
dscon = self.run_command('parseRecipeFile', fn, appends, appendlist, dctr)
|
||||
if appends and appendlist == []:
|
||||
appends = False
|
||||
if appends:
|
||||
if appendlist:
|
||||
appendfiles = appendlist
|
||||
else:
|
||||
dscon = self.run_command('parseRecipeFile', fn, appends, appendlist)
|
||||
if dscon:
|
||||
return self._reconvert_type(dscon, 'DataStoreConnectionHandle')
|
||||
else:
|
||||
return None
|
||||
finally:
|
||||
if self.tracking:
|
||||
self.run_command('disableDataTracking')
|
||||
|
||||
def build_file(self, buildfile, task, internal=True):
|
||||
"""
|
||||
Runs the specified task for just a single recipe (i.e. no dependencies).
|
||||
This is equivalent to bitbake -b, except with the default internal=True
|
||||
no warning about dependencies will be produced, normal info messages
|
||||
from the runqueue will be silenced and BuildInit, BuildStarted and
|
||||
BuildCompleted events will not be fired.
|
||||
"""
|
||||
return self.run_command('buildFile', buildfile, task, internal)
|
||||
|
||||
def build_targets(self, targets, task=None, handle_events=True, extra_events=None, event_callback=None):
|
||||
"""
|
||||
Builds the specified targets. This is equivalent to a normal invocation
|
||||
of bitbake. Has built-in event handling which is enabled by default and
|
||||
can be extended if needed.
|
||||
Parameters:
|
||||
targets:
|
||||
One or more targets to build. Can be a list or a
|
||||
space-separated string.
|
||||
task:
|
||||
The task to run; if None then the value of BB_DEFAULT_TASK
|
||||
will be used. Default None.
|
||||
handle_events:
|
||||
True to handle events in a similar way to normal bitbake
|
||||
invocation with knotty; False to return immediately (on the
|
||||
assumption that the caller will handle the events instead).
|
||||
Default True.
|
||||
extra_events:
|
||||
An optional list of events to add to the event mask (if
|
||||
handle_events=True). If you add events here you also need
|
||||
to specify a callback function in event_callback that will
|
||||
handle the additional events. Default None.
|
||||
event_callback:
|
||||
An optional function taking a single parameter which
|
||||
will be called first upon receiving any event (if
|
||||
handle_events=True) so that the caller can override or
|
||||
extend the event handling. Default None.
|
||||
"""
|
||||
if isinstance(targets, str):
|
||||
targets = targets.split()
|
||||
if not task:
|
||||
task = self.config_data.getVar('BB_DEFAULT_TASK')
|
||||
|
||||
if handle_events:
|
||||
# A reasonable set of default events matching up with those we handle below
|
||||
eventmask = [
|
||||
'bb.event.BuildStarted',
|
||||
'bb.event.BuildCompleted',
|
||||
'logging.LogRecord',
|
||||
'bb.event.NoProvider',
|
||||
'bb.command.CommandCompleted',
|
||||
'bb.command.CommandFailed',
|
||||
'bb.build.TaskStarted',
|
||||
'bb.build.TaskFailed',
|
||||
'bb.build.TaskSucceeded',
|
||||
'bb.build.TaskFailedSilent',
|
||||
'bb.build.TaskProgress',
|
||||
'bb.runqueue.runQueueTaskStarted',
|
||||
'bb.runqueue.sceneQueueTaskStarted',
|
||||
'bb.event.ProcessStarted',
|
||||
'bb.event.ProcessProgress',
|
||||
'bb.event.ProcessFinished',
|
||||
]
|
||||
if extra_events:
|
||||
eventmask.extend(extra_events)
|
||||
ret = self.set_event_mask(eventmask)
|
||||
|
||||
includelogs = self.config_data.getVar('BBINCLUDELOGS')
|
||||
loglines = self.config_data.getVar('BBINCLUDELOGS_LINES')
|
||||
|
||||
ret = self.run_command('buildTargets', targets, task)
|
||||
if handle_events:
|
||||
result = False
|
||||
# Borrowed from knotty, instead somewhat hackily we use the helper
|
||||
# as the object to store "shutdown" on
|
||||
helper = bb.ui.uihelper.BBUIHelper()
|
||||
# We set up logging optionally in the constructor so now we need to
|
||||
# grab the handlers to pass to TerminalFilter
|
||||
console = None
|
||||
errconsole = None
|
||||
for handler in self.logger.handlers:
|
||||
if isinstance(handler, logging.StreamHandler):
|
||||
if handler.stream == sys.stdout:
|
||||
console = handler
|
||||
elif handler.stream == sys.stderr:
|
||||
errconsole = handler
|
||||
format_str = "%(levelname)s: %(message)s"
|
||||
format = bb.msg.BBLogFormatter(format_str)
|
||||
helper.shutdown = 0
|
||||
parseprogress = None
|
||||
termfilter = bb.ui.knotty.TerminalFilter(helper, helper, console, errconsole, format, quiet=self.quiet)
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
event = self.wait_event(0.25)
|
||||
if event:
|
||||
if event_callback and event_callback(event):
|
||||
continue
|
||||
if helper.eventHandler(event):
|
||||
if isinstance(event, bb.build.TaskFailedSilent):
|
||||
logger.warning("Logfile for failed setscene task is %s" % event.logfile)
|
||||
elif isinstance(event, bb.build.TaskFailed):
|
||||
bb.ui.knotty.print_event_log(event, includelogs, loglines, termfilter)
|
||||
continue
|
||||
if isinstance(event, bb.event.ProcessStarted):
|
||||
if self.quiet > 1:
|
||||
continue
|
||||
parseprogress = bb.ui.knotty.new_progress(event.processname, event.total)
|
||||
parseprogress.start(False)
|
||||
continue
|
||||
if isinstance(event, bb.event.ProcessProgress):
|
||||
if self.quiet > 1:
|
||||
continue
|
||||
if parseprogress:
|
||||
parseprogress.update(event.progress)
|
||||
else:
|
||||
bb.warn("Got ProcessProgress event for someting that never started?")
|
||||
continue
|
||||
if isinstance(event, bb.event.ProcessFinished):
|
||||
if self.quiet > 1:
|
||||
continue
|
||||
if parseprogress:
|
||||
parseprogress.finish()
|
||||
parseprogress = None
|
||||
continue
|
||||
if isinstance(event, bb.command.CommandCompleted):
|
||||
result = True
|
||||
break
|
||||
if isinstance(event, bb.command.CommandFailed):
|
||||
self.logger.error(str(event))
|
||||
result = False
|
||||
break
|
||||
if isinstance(event, logging.LogRecord):
|
||||
if event.taskpid == 0 or event.levelno > logging.INFO:
|
||||
self.logger.handle(event)
|
||||
continue
|
||||
if isinstance(event, bb.event.NoProvider):
|
||||
self.logger.error(str(event))
|
||||
result = False
|
||||
break
|
||||
|
||||
elif helper.shutdown > 1:
|
||||
break
|
||||
termfilter.updateFooter()
|
||||
except KeyboardInterrupt:
|
||||
termfilter.clearFooter()
|
||||
if helper.shutdown == 1:
|
||||
print("\nSecond Keyboard Interrupt, stopping...\n")
|
||||
ret = self.run_command("stateForceShutdown")
|
||||
if ret and ret[2]:
|
||||
self.logger.error("Unable to cleanly stop: %s" % ret[2])
|
||||
elif helper.shutdown == 0:
|
||||
print("\nKeyboard Interrupt, closing down...\n")
|
||||
interrupted = True
|
||||
ret = self.run_command("stateShutdown")
|
||||
if ret and ret[2]:
|
||||
self.logger.error("Unable to cleanly shutdown: %s" % ret[2])
|
||||
helper.shutdown = helper.shutdown + 1
|
||||
termfilter.clearFooter()
|
||||
finally:
|
||||
termfilter.finish()
|
||||
if helper.failed_tasks:
|
||||
result = False
|
||||
return result
|
||||
if not hasattr(self.cooker, 'collection'):
|
||||
raise Exception('You must call tinfoil.prepare() with config_only=False in order to get bbappends')
|
||||
appendfiles = self.cooker.collection.get_file_appends(fn)
|
||||
else:
|
||||
return ret
|
||||
appendfiles = None
|
||||
if config_data:
|
||||
# We have to use a different function here if we're passing in a datastore
|
||||
localdata = bb.data.createCopy(config_data)
|
||||
envdata = bb.cache.parse_recipe(localdata, fn, appendfiles)['']
|
||||
else:
|
||||
# Use the standard path
|
||||
parser = bb.cache.NoCache(self.cooker.databuilder)
|
||||
envdata = parser.loadDataFull(fn, appendfiles)
|
||||
return envdata
|
||||
|
||||
def shutdown(self):
|
||||
"""
|
||||
Shut down tinfoil. Disconnects from the server and gracefully
|
||||
releases any associated resources. You must call this function if
|
||||
prepare() has been called, or use a with... block when you create
|
||||
the tinfoil object which will ensure that it gets called.
|
||||
"""
|
||||
if self.server_connection:
|
||||
self.run_command('clientComplete')
|
||||
_server_connections.remove(self.server_connection)
|
||||
bb.event.ui_queue = []
|
||||
self.server_connection.terminate()
|
||||
self.server_connection = None
|
||||
self.cooker.shutdown(force=True)
|
||||
self.cooker.post_serve()
|
||||
self.cooker.unlockBitbake()
|
||||
self.logger.removeHandler(self._log_hdlr)
|
||||
|
||||
# Restore logging handlers to how it looked when we started
|
||||
if self.oldhandlers:
|
||||
for handler in self.logger.handlers:
|
||||
if handler not in self.oldhandlers:
|
||||
self.logger.handlers.remove(handler)
|
||||
class TinfoilConfigParameters(ConfigParameters):
|
||||
|
||||
def _reconvert_type(self, obj, origtypename):
|
||||
"""
|
||||
Convert an object back to the right type, in the case
|
||||
that marshalling has changed it (especially with xmlrpc)
|
||||
"""
|
||||
supported_types = {
|
||||
'set': set,
|
||||
'DataStoreConnectionHandle': bb.command.DataStoreConnectionHandle,
|
||||
}
|
||||
|
||||
origtype = supported_types.get(origtypename, None)
|
||||
if origtype is None:
|
||||
raise Exception('Unsupported type "%s"' % origtypename)
|
||||
if type(obj) == origtype:
|
||||
newobj = obj
|
||||
elif isinstance(obj, dict):
|
||||
# New style class
|
||||
newobj = origtype()
|
||||
for k,v in obj.items():
|
||||
setattr(newobj, k, v)
|
||||
else:
|
||||
# Assume we can coerce the type
|
||||
newobj = origtype(obj)
|
||||
|
||||
if isinstance(newobj, bb.command.DataStoreConnectionHandle):
|
||||
connector = TinfoilDataStoreConnector(self, newobj.dsindex)
|
||||
newobj = bb.data.init()
|
||||
newobj.setVar('_remote_data', connector)
|
||||
|
||||
return newobj
|
||||
|
||||
|
||||
class TinfoilConfigParameters(BitBakeConfigParameters):
|
||||
|
||||
def __init__(self, config_only, **options):
|
||||
def __init__(self, **options):
|
||||
self.initial_options = options
|
||||
# Apply some sane defaults
|
||||
if not 'parse_only' in options:
|
||||
self.initial_options['parse_only'] = not config_only
|
||||
#if not 'status_only' in options:
|
||||
# self.initial_options['status_only'] = config_only
|
||||
if not 'ui' in options:
|
||||
self.initial_options['ui'] = 'knotty'
|
||||
if not 'argv' in options:
|
||||
self.initial_options['argv'] = []
|
||||
|
||||
super(TinfoilConfigParameters, self).__init__()
|
||||
|
||||
def parseCommandLine(self, argv=None):
|
||||
# We don't want any parameters parsed from the command line
|
||||
opts = super(TinfoilConfigParameters, self).parseCommandLine([])
|
||||
for key, val in self.initial_options.items():
|
||||
setattr(opts[0], key, val)
|
||||
return opts
|
||||
def parseCommandLine(self, argv=sys.argv):
|
||||
class DummyOptions:
|
||||
def __init__(self, initial_options):
|
||||
for key, val in initial_options.items():
|
||||
setattr(self, key, val)
|
||||
|
||||
return DummyOptions(self.initial_options), None
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user