mirror of
https://git.yoctoproject.org/poky
synced 2026-01-30 13:28:43 +01:00
Compare commits
383 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6717d19848 | ||
|
|
aeb31e09f0 | ||
|
|
26f0c306cb | ||
|
|
d65bcef0ae | ||
|
|
f1729cdbfa | ||
|
|
925e46623a | ||
|
|
b8603a494d | ||
|
|
dc2ee2feb7 | ||
|
|
9901828b0d | ||
|
|
ad89b1fe80 | ||
|
|
ff505d781d | ||
|
|
905938d0a1 | ||
|
|
1b58412449 | ||
|
|
2cd70c9150 | ||
|
|
9e9bb64fb5 | ||
|
|
39ec0738d4 | ||
|
|
3d7df7b5b5 | ||
|
|
0b1be9dc67 | ||
|
|
b9ec9f7425 | ||
|
|
9e4aad97c3 | ||
|
|
240da75616 | ||
|
|
b2ba41b575 | ||
|
|
56bd68e82c | ||
|
|
0fb598c6b9 | ||
|
|
b0c1820261 | ||
|
|
ccd470ba5f | ||
|
|
90a33dde44 | ||
|
|
b9da1f441b | ||
|
|
ccbb7ef72f | ||
|
|
cafdccb29c | ||
|
|
13eda67126 | ||
|
|
91c507ce1c | ||
|
|
97e9be8130 | ||
|
|
523aaea8e2 | ||
|
|
e625a82af2 | ||
|
|
efde5a1303 | ||
|
|
f1bb6acacc | ||
|
|
7effa6edd0 | ||
|
|
02e603e48c | ||
|
|
2d80a6bc8a | ||
|
|
159f66aea7 | ||
|
|
6b8f7999c3 | ||
|
|
827dc7f12c | ||
|
|
7c0d759c55 | ||
|
|
9ca89fe495 | ||
|
|
c82164fd0a | ||
|
|
586a3d5ff5 | ||
|
|
7849633469 | ||
|
|
367b862d59 | ||
|
|
c4e9d9d9ae | ||
|
|
7a43fb95d1 | ||
|
|
46e8377c42 | ||
|
|
148b7d20d4 | ||
|
|
d759301a34 | ||
|
|
134246d3d4 | ||
|
|
c088bac2f0 | ||
|
|
9766c76268 | ||
|
|
78b1cbcc72 | ||
|
|
3a4ee6bfd9 | ||
|
|
9bb6f7f3f0 | ||
|
|
15919f7e76 | ||
|
|
1e216c8087 | ||
|
|
a67b95ade2 | ||
|
|
1e668ccf1a | ||
|
|
e3dd621197 | ||
|
|
30b8d9378b | ||
|
|
895c86d71d | ||
|
|
07a7905689 | ||
|
|
e76d790bbf | ||
|
|
b60383c1b9 | ||
|
|
19dc8bf950 | ||
|
|
6c576a4ac8 | ||
|
|
9f5ea81070 | ||
|
|
51a5a5df84 | ||
|
|
6c9eb8a67a | ||
|
|
b8e521809b | ||
|
|
994b637d58 | ||
|
|
a85328245d | ||
|
|
61da1b1197 | ||
|
|
3428e70035 | ||
|
|
dbf99ab134 | ||
|
|
18d859a8c9 | ||
|
|
313864bf52 | ||
|
|
e93f9a8382 | ||
|
|
df4a397df9 | ||
|
|
9b3389e023 | ||
|
|
4b22a21b4e | ||
|
|
27a877becf | ||
|
|
2d5bd89565 | ||
|
|
bda51ee782 | ||
|
|
5dd12beccd | ||
|
|
1d21eaf4e0 | ||
|
|
95821e8566 | ||
|
|
57138de0fc | ||
|
|
57a806cc32 | ||
|
|
452619ba41 | ||
|
|
8d28013312 | ||
|
|
570345adfd | ||
|
|
9062377624 | ||
|
|
0143097095 | ||
|
|
4b1b580749 | ||
|
|
65ed47e597 | ||
|
|
608ac7794f | ||
|
|
4dada3c092 | ||
|
|
43903a5bfd | ||
|
|
e464615684 | ||
|
|
7a12eda785 | ||
|
|
d88fa68141 | ||
|
|
3a93bfe1c7 | ||
|
|
9fee4d138b | ||
|
|
87e924e377 | ||
|
|
8de7e102cf | ||
|
|
6780f20525 | ||
|
|
1d04721fe8 | ||
|
|
a4d8015687 | ||
|
|
7b57145498 | ||
|
|
38b1b68923 | ||
|
|
ec3c8fcf81 | ||
|
|
b2f045c400 | ||
|
|
87671f72e7 | ||
|
|
b028947d67 | ||
|
|
f5847d4f24 | ||
|
|
59198004c0 | ||
|
|
984be33145 | ||
|
|
96ee64c96b | ||
|
|
7a8f9114bc | ||
|
|
21ac977e2a | ||
|
|
33a4425d6d | ||
|
|
8abf510a13 | ||
|
|
3ff30c0bfb | ||
|
|
7e68f57dec | ||
|
|
3b998b3f4c | ||
|
|
d8155f1f95 | ||
|
|
e378410fb2 | ||
|
|
3a1d9e9e11 | ||
|
|
26dcc54c60 | ||
|
|
8e05d5e3fe | ||
|
|
5c1f10f56e | ||
|
|
0843e07873 | ||
|
|
793afb3e81 | ||
|
|
aebbf8c8f3 | ||
|
|
c4f1f0f491 | ||
|
|
810dd79720 | ||
|
|
b0ce70ffa8 | ||
|
|
ac2d94b684 | ||
|
|
ce2336ddc7 | ||
|
|
5b8c5ea151 | ||
|
|
7a42bfecc2 | ||
|
|
26db62e359 | ||
|
|
18224a4a46 | ||
|
|
6d0ae0ef44 | ||
|
|
153787d4df | ||
|
|
c55dea6a82 | ||
|
|
4ab29fc58f | ||
|
|
0bc0ee66a8 | ||
|
|
db6819b0c3 | ||
|
|
bc3484e76c | ||
|
|
9d84b2440d | ||
|
|
f8e61ed564 | ||
|
|
1db22d39b5 | ||
|
|
b7bf8bb051 | ||
|
|
839892ed27 | ||
|
|
232af2ec04 | ||
|
|
6a6bd2e96b | ||
|
|
3103f04a30 | ||
|
|
bb27ca7562 | ||
|
|
7dcd9a6b72 | ||
|
|
a43dba8c29 | ||
|
|
d52b91316e | ||
|
|
efbf15ce20 | ||
|
|
3caae900f3 | ||
|
|
3428e6e0e4 | ||
|
|
96ca984621 | ||
|
|
8386f4203d | ||
|
|
b4b50e52d2 | ||
|
|
6101dd2b4c | ||
|
|
ebf62ba85d | ||
|
|
9deb3333b0 | ||
|
|
5bb9a05e0f | ||
|
|
9ee3f77ed9 | ||
|
|
a714cf8700 | ||
|
|
d376e31c92 | ||
|
|
333e5f7076 | ||
|
|
a2fa51bdde | ||
|
|
cb468dfaf0 | ||
|
|
d4c5f12601 | ||
|
|
7e6902963f | ||
|
|
c166a5add3 | ||
|
|
303d17ac3c | ||
|
|
28938930ba | ||
|
|
bff6db6712 | ||
|
|
62b1fef787 | ||
|
|
fc9229e4ba | ||
|
|
8509c1a7e5 | ||
|
|
05d751c23a | ||
|
|
b8f6c7c794 | ||
|
|
474ea6b826 | ||
|
|
21d15fac0e | ||
|
|
e98512e1e3 | ||
|
|
1f80e7f675 | ||
|
|
6a4a66aabb | ||
|
|
c03bb4d0c7 | ||
|
|
f91b780b1a | ||
|
|
938e925356 | ||
|
|
c899777010 | ||
|
|
afb6a3688f | ||
|
|
4209379cc8 | ||
|
|
6add5ac648 | ||
|
|
af515ca686 | ||
|
|
f9f97a1fed | ||
|
|
48169ac9bc | ||
|
|
f091b8a3cf | ||
|
|
38083d01e7 | ||
|
|
278c551168 | ||
|
|
83d1ce9e27 | ||
|
|
d44881fecc | ||
|
|
948b8461e8 | ||
|
|
7bcc609bf0 | ||
|
|
f372806546 | ||
|
|
2361a8171b | ||
|
|
ee4d106987 | ||
|
|
896511d564 | ||
|
|
01c613e4bc | ||
|
|
295dd76931 | ||
|
|
cd7e7addd7 | ||
|
|
ac9725acc5 | ||
|
|
b6124bdbfb | ||
|
|
989013222e | ||
|
|
87eaf4cf4a | ||
|
|
8e22337e22 | ||
|
|
e130d2c8eb | ||
|
|
a692a9182a | ||
|
|
44fddc9ba1 | ||
|
|
8bbd5958b0 | ||
|
|
6d898aef4c | ||
|
|
412cb58083 | ||
|
|
57ccbc4c15 | ||
|
|
5d3c54a318 | ||
|
|
e5727ad31a | ||
|
|
0cafa0eafe | ||
|
|
bea6067392 | ||
|
|
c056b5e9a2 | ||
|
|
7bb4692ead | ||
|
|
dea4a69cfc | ||
|
|
53d2def225 | ||
|
|
94e2a1793e | ||
|
|
b20ba9c4e5 | ||
|
|
7f4ff1a5c5 | ||
|
|
8fd7098318 | ||
|
|
9662a47204 | ||
|
|
78366c7e2c | ||
|
|
287c3bec51 | ||
|
|
80f625a364 | ||
|
|
978c6c00d6 | ||
|
|
090cb60d49 | ||
|
|
2784c08229 | ||
|
|
b1ab59a8d0 | ||
|
|
3141bc16a5 | ||
|
|
b057375f77 | ||
|
|
4f0c5e5b32 | ||
|
|
9fb409bcc5 | ||
|
|
3d95a1cce5 | ||
|
|
361ddb10de | ||
|
|
133472e7aa | ||
|
|
e4b9dabfbb | ||
|
|
45dbb4a080 | ||
|
|
517c2cc88d | ||
|
|
a1958d47c6 | ||
|
|
6547137fa3 | ||
|
|
651f3dc078 | ||
|
|
8333887235 | ||
|
|
33a8687635 | ||
|
|
7fee883b8b | ||
|
|
e6ea60b131 | ||
|
|
342eff6b38 | ||
|
|
8e5103a026 | ||
|
|
52fa8b8582 | ||
|
|
7892063223 | ||
|
|
5f4a75f904 | ||
|
|
b4e7ebe227 | ||
|
|
9ac13c344b | ||
|
|
9b6c56a07d | ||
|
|
02faddb5ca | ||
|
|
77439dafd0 | ||
|
|
5709daae36 | ||
|
|
f0a153a7f6 | ||
|
|
f2103de785 | ||
|
|
ec984f1697 | ||
|
|
619c449b68 | ||
|
|
8bd20eb128 | ||
|
|
2645411074 | ||
|
|
d8b564530e | ||
|
|
af91e98e32 | ||
|
|
46c39b60c5 | ||
|
|
9153d11e6c | ||
|
|
de20bf01e4 | ||
|
|
56aaa6450b | ||
|
|
5ca9285434 | ||
|
|
7631f6bbfc | ||
|
|
08e2f06d36 | ||
|
|
424643f463 | ||
|
|
84396ed610 | ||
|
|
b28a902253 | ||
|
|
b94ebc582f | ||
|
|
07600df4cb | ||
|
|
00d8024741 | ||
|
|
aa39d9a2df | ||
|
|
98ad3cb2c0 | ||
|
|
88b7b1a88a | ||
|
|
ec1f93c50c | ||
|
|
6157ab451b | ||
|
|
5306aaab07 | ||
|
|
31ab5dafa8 | ||
|
|
84d524c938 | ||
|
|
7bbc4b8a77 | ||
|
|
33dfe60c35 | ||
|
|
78217d37d2 | ||
|
|
69d4c63428 | ||
|
|
1eb75407ae | ||
|
|
2c79d57ded | ||
|
|
de87ba4b37 | ||
|
|
14a666b094 | ||
|
|
84bcf66436 | ||
|
|
cba4a8b80d | ||
|
|
c23e7052fb | ||
|
|
7253253972 | ||
|
|
cddb415f72 | ||
|
|
6aed9f819d | ||
|
|
30ac79c16d | ||
|
|
3f00873a8a | ||
|
|
4f6fb8c362 | ||
|
|
a3dcfa6a6a | ||
|
|
e0999660a8 | ||
|
|
897b87195c | ||
|
|
1dfcb8968c | ||
|
|
3f7bfb38a2 | ||
|
|
5b09536d38 | ||
|
|
21cd3d6212 | ||
|
|
4dc19ba0a9 | ||
|
|
bbaf0c65f1 | ||
|
|
0cb01121eb | ||
|
|
f9c2b9083e | ||
|
|
3c8da7d5bc | ||
|
|
3e49cee7e8 | ||
|
|
0ba2239abb | ||
|
|
87a71c5017 | ||
|
|
761c6172f6 | ||
|
|
b958f2e6dc | ||
|
|
4123b4e575 | ||
|
|
9301072deb | ||
|
|
6d3e061287 | ||
|
|
3ff180c173 | ||
|
|
19b9fde3b2 | ||
|
|
bdc27cc405 | ||
|
|
7e30874db2 | ||
|
|
f9d0fd9bb1 | ||
|
|
3353d6bcce | ||
|
|
904c35e049 | ||
|
|
9ff3a1de42 | ||
|
|
9ab4d1f5e6 | ||
|
|
a0f9efe7d6 | ||
|
|
a6193f3822 | ||
|
|
f0cbff052e | ||
|
|
1929766ed5 | ||
|
|
bd1e9a6a3a | ||
|
|
409d3cb7a2 | ||
|
|
49efe23169 | ||
|
|
46c0518279 | ||
|
|
40396bee2b | ||
|
|
cdbe3b5cee | ||
|
|
aba074edbf | ||
|
|
f11e51056d | ||
|
|
9a178b6016 | ||
|
|
d8ee1658de | ||
|
|
b5c29e15f4 | ||
|
|
21da2dbb78 | ||
|
|
520b36fe41 | ||
|
|
82733c9f71 | ||
|
|
32857c5596 | ||
|
|
1d6146e0b1 | ||
|
|
a095826126 | ||
|
|
fd435cbfc5 | ||
|
|
6ca67b3288 |
15
.gitignore
vendored
15
.gitignore
vendored
@@ -1,28 +1,23 @@
|
||||
*.pyc
|
||||
*.pyo
|
||||
/*.patch
|
||||
/build*/
|
||||
build*/
|
||||
pyshtables.py
|
||||
pstage/
|
||||
scripts/oe-git-proxy-socks
|
||||
sources/
|
||||
meta-*/
|
||||
!meta-skeleton
|
||||
!meta-selftest
|
||||
!meta-hob
|
||||
hob-image-*.bb
|
||||
*.swp
|
||||
*.orig
|
||||
*.rej
|
||||
*~
|
||||
!meta-poky
|
||||
!meta-yocto
|
||||
!meta-yocto-bsp
|
||||
!meta-yocto-imported
|
||||
/documentation/*/eclipse/
|
||||
/documentation/*/*.html
|
||||
/documentation/*/*.pdf
|
||||
/documentation/*/*.tgz
|
||||
documentation/user-manual/user-manual.html
|
||||
documentation/user-manual/user-manual.pdf
|
||||
documentation/user-manual/user-manual.tgz
|
||||
pull-*/
|
||||
bitbake/lib/toaster/contrib/tts/backlog.txt
|
||||
bitbake/lib/toaster/contrib/tts/log/*
|
||||
bitbake/lib/toaster/contrib/tts/.cache/*
|
||||
@@ -1,2 +1,2 @@
|
||||
# Template settings
|
||||
TEMPLATECONF=${TEMPLATECONF:-meta-poky/conf}
|
||||
TEMPLATECONF=${TEMPLATECONF:-meta-yocto/conf}
|
||||
|
||||
49
README
Normal file
49
README
Normal file
@@ -0,0 +1,49 @@
|
||||
Poky
|
||||
====
|
||||
|
||||
Poky is an integration of various components to form a complete prepackaged
|
||||
build system and development environment. It features support for building
|
||||
customised embedded device style images. There are reference demo images
|
||||
featuring a X11/Matchbox/GTK themed UI called Sato. The system supports
|
||||
cross-architecture application development using QEMU emulation and a
|
||||
standalone toolchain and SDK with IDE integration.
|
||||
|
||||
Additional information on the specifics of hardware that Poky supports
|
||||
is available in README.hardware. Further hardware support can easily be added
|
||||
in the form of layers which extend the systems capabilities in a modular way.
|
||||
|
||||
As an integration layer Poky consists of several upstream projects such as
|
||||
BitBake, OpenEmbedded-Core, Yocto documentation and various sources of information
|
||||
e.g. for the hardware support. Poky is in turn a component of the Yocto Project.
|
||||
|
||||
The Yocto Project has extensive documentation about the system including a
|
||||
reference manual which can be found at:
|
||||
http://yoctoproject.org/documentation
|
||||
|
||||
OpenEmbedded-Core is a layer containing the core metadata for current versions
|
||||
of OpenEmbedded. It is distro-less (can build a functional image with
|
||||
DISTRO = "nodistro") and contains only emulated machine support.
|
||||
|
||||
For information about OpenEmbedded, see the OpenEmbedded website:
|
||||
http://www.openembedded.org/
|
||||
|
||||
Where to Send Patches
|
||||
=====================
|
||||
|
||||
As Poky is an integration repository, patches against the various components
|
||||
should be sent to their respective upstreams.
|
||||
|
||||
bitbake:
|
||||
bitbake-devel@lists.openembedded.org
|
||||
|
||||
meta-yocto:
|
||||
poky@yoctoproject.org
|
||||
|
||||
Most everything else should be sent to the OpenEmbedded Core mailing list. If
|
||||
in doubt, check the oe-core git repository for the content you intend to modify.
|
||||
Before sending, be sure the patches apply cleanly to the current oe-core git
|
||||
repository.
|
||||
openembedded-core@lists.openembedded.org
|
||||
|
||||
Note: The scripts directory should be treated with extra care as it is a mix
|
||||
of oe-core and poky-specific files.
|
||||
26
README.LSB
26
README.LSB
@@ -1,26 +0,0 @@
|
||||
OE-Core aims to be able to provide basic LSB compatible images. There
|
||||
are some challenges for OE as LSB isn't always 100% relevant to its
|
||||
target embedded and IoT audiences.
|
||||
|
||||
One challenge is that the LSB spec is no longer being actively
|
||||
developed [https://github.com/LinuxStandardBase/lsb] and has
|
||||
components which are end of life or significantly dated. OE
|
||||
therefore provides compatibility with the following caveats:
|
||||
|
||||
* Qt4 is provided by the separate meta-qt4 layer. Its noted that Qt4
|
||||
is end of life and this isn't something the core project regularly
|
||||
tests any longer. Users are recommended to group together to support
|
||||
maintenance of that layer. [http://git.yoctoproject.org/cgit/cgit.cgi/meta-qt4/]
|
||||
|
||||
* mailx has been dropped since its no longer being developed upstream
|
||||
and there are better, more modern replacements such as s-nail
|
||||
(http://sdaoden.eu/code.html) or mailutils (http://mailutils.org/).
|
||||
|
||||
* A few perl modules that were required by LSB 4.x aren't provided:
|
||||
libclass-isa, libenv, libdumpvalue, libfile-checktree,
|
||||
libi18n-collate, libpod-plainer.
|
||||
|
||||
* libpng 1.2 isn't provided; oe-core includes the latest release of libpng
|
||||
instead.
|
||||
|
||||
* pax (POSIX standard archive) tool is not provided.
|
||||
@@ -1 +0,0 @@
|
||||
meta-yocto-bsp/README.hardware
|
||||
476
README.hardware
Normal file
476
README.hardware
Normal file
@@ -0,0 +1,476 @@
|
||||
Poky Hardware README
|
||||
====================
|
||||
|
||||
This file gives details about using Poky with the reference machines
|
||||
supported out of the box. A full list of supported reference target machines
|
||||
can be found by looking in the following directories:
|
||||
|
||||
meta/conf/machine/
|
||||
meta-yocto-bsp/conf/machine/
|
||||
|
||||
If you are in doubt about using Poky/OpenEmbedded with your hardware, consult
|
||||
the documentation for your board/device.
|
||||
|
||||
Support for additional devices is normally added by creating BSP layers - for
|
||||
more information please see the Yocto Board Support Package (BSP) Developer's
|
||||
Guide - documentation source is in documentation/bspguide or download the PDF
|
||||
from:
|
||||
|
||||
http://yoctoproject.org/documentation
|
||||
|
||||
Support for physical reference hardware has now been split out into a
|
||||
meta-yocto-bsp layer which can be removed separately from other layers if not
|
||||
needed.
|
||||
|
||||
|
||||
QEMU Emulation Targets
|
||||
======================
|
||||
|
||||
To simplify development, the build system supports building images to
|
||||
work with the QEMU emulator in system emulation mode. Several architectures
|
||||
are currently supported:
|
||||
|
||||
* ARM (qemuarm)
|
||||
* x86 (qemux86)
|
||||
* x86-64 (qemux86-64)
|
||||
* PowerPC (qemuppc)
|
||||
* MIPS (qemumips)
|
||||
|
||||
Use of the QEMU images is covered in the Yocto Project Reference Manual.
|
||||
The appropriate MACHINE variable value corresponding to the target is given
|
||||
in brackets.
|
||||
|
||||
|
||||
Hardware Reference Boards
|
||||
=========================
|
||||
|
||||
The following boards are supported by the meta-yocto-bsp layer:
|
||||
|
||||
* Texas Instruments Beaglebone (beaglebone)
|
||||
* Freescale MPC8315E-RDB (mpc8315e-rdb)
|
||||
|
||||
For more information see the board's section below. The appropriate MACHINE
|
||||
variable value corresponding to the board is given in brackets.
|
||||
|
||||
|
||||
Consumer Devices
|
||||
================
|
||||
|
||||
The following consumer devices are supported by the meta-yocto-bsp layer:
|
||||
|
||||
* Intel x86 based PCs and devices (genericx86)
|
||||
* Ubiquiti Networks EdgeRouter Lite (edgerouter)
|
||||
|
||||
For more information see the device's section below. The appropriate MACHINE
|
||||
variable value corresponding to the device is given in brackets.
|
||||
|
||||
|
||||
|
||||
Specific Hardware Documentation
|
||||
===============================
|
||||
|
||||
|
||||
Intel x86 based PCs and devices (genericx86)
|
||||
==========================================
|
||||
|
||||
The genericx86 MACHINE is tested on the following platforms:
|
||||
|
||||
Intel Xeon/Core i-Series:
|
||||
+ Intel Romley Server: Sandy Bridge Xeon processor, C600 PCH (Patsburg), (Canoe Pass CRB)
|
||||
+ Intel Romley Server: Ivy Bridge Xeon processor, C600 PCH (Patsburg), (Intel SDP S2R3)
|
||||
+ Intel Crystal Forest Server: Sandy Bridge Xeon processor, DH89xx PCH (Cave Creek), (Stargo CRB)
|
||||
+ Intel Chief River Mobile: Ivy Bridge Mobile processor, QM77 PCH (Panther Point-M), (Emerald Lake II CRB, Sabino Canyon CRB)
|
||||
+ Intel Huron River Mobile: Sandy Bridge processor, QM67 PCH (Cougar Point), (Emerald Lake CRB, EVOC EC7-1817LNAR board)
|
||||
+ Intel Calpella Platform: Core i7 processor, QM57 PCH (Ibex Peak-M), (Red Fort CRB, Emerson MATXM CORE-411-B)
|
||||
+ Intel Nehalem/Westmere-EP Server: Xeon 56xx/55xx processors, 5520 chipset, ICH10R IOH (82801), (Hanlan Creek CRB)
|
||||
+ Intel Nehalem Workstation: Xeon 56xx/55xx processors, System SC5650SCWS (Greencity CRB)
|
||||
+ Intel Picket Post Server: Xeon 56xx/55xx processors (Jasper Forest), 3420 chipset (Ibex Peak), (Osage CRB)
|
||||
+ Intel Storage Platform: Sandy Bridge Xeon processor, C600 PCH (Patsburg), (Oak Creek Canyon CRB)
|
||||
+ Intel Shark Bay Client Platform: Haswell processor, LynxPoint PCH, (Walnut Canyon CRB, Lava Canyon CRB, Basking Ridge CRB, Flathead Creek CRB)
|
||||
+ Intel Shark Bay Ultrabook Platform: Haswell ULT processor, Lynx Point-LP PCH, (WhiteTip Mountain 1 CRB)
|
||||
|
||||
Intel Atom platforms:
|
||||
+ Intel embedded Menlow: Intel Atom Z510/530 CPU, System Controller Hub US15W (Portwell NANO-8044)
|
||||
+ Intel Luna Pier: Intel Atom N4xx/D5xx series CPU (aka: Pineview-D & -M), 82801HM I/O Hub (ICH8M), (Advantech AIMB-212, Moon Creek CRB)
|
||||
+ Intel Queens Bay platform: Intel Atom E6xx CPU (aka: Tunnel Creek), Topcliff EG20T I/O Hub (Emerson NITX-315, Crown Bay CRB, Minnow Board)
|
||||
+ Intel Fish River Island platform: Intel Atom E6xx CPU (aka: Tunnel Creek), Topcliff EG20T I/O Hub (Kontron KM2M806)
|
||||
+ Intel Cedar Trail platform: Intel Atom N2000 & D2000 series CPU (aka: Cedarview), NM10 Express Chipset (Norco kit BIS-6630, Cedar Rock CRB)
|
||||
|
||||
and is likely to work on many unlisted Atom/Core/Xeon based devices. The MACHINE
|
||||
type supports ethernet, wifi, sound, and Intel/vesa graphics by default in
|
||||
addition to common PC input devices, busses, and so on. Note that it does not
|
||||
included the binary-only graphic drivers used on some Atom platforms, for
|
||||
accelerated graphics on these machines please refer to meta-intel.
|
||||
|
||||
Depending on the device, it can boot from a traditional hard-disk, a USB device,
|
||||
or over the network. Writing generated images to physical media is
|
||||
straightforward with a caveat for USB devices. The following examples assume the
|
||||
target boot device is /dev/sdb, be sure to verify this and use the correct
|
||||
device as the following commands are run as root and are not reversable.
|
||||
|
||||
USB Device:
|
||||
1. Build a live image. This image type consists of a simple filesystem
|
||||
without a partition table, which is suitable for USB keys, and with the
|
||||
default setup for the genericx86 machine, this image type is built
|
||||
automatically for any image you build. For example:
|
||||
|
||||
$ bitbake core-image-minimal
|
||||
|
||||
2. Use the "dd" utility to write the image to the raw block device. For
|
||||
example:
|
||||
|
||||
# dd if=core-image-minimal-genericx86.hddimg of=/dev/sdb
|
||||
|
||||
If the device fails to boot with "Boot error" displayed, or apparently
|
||||
stops just after the SYSLINUX version banner, it is likely the BIOS cannot
|
||||
understand the physical layout of the disk (or rather it expects a
|
||||
particular layout and cannot handle anything else). There are two possible
|
||||
solutions to this problem:
|
||||
|
||||
1. Change the BIOS USB Device setting to HDD mode. The label will vary by
|
||||
device, but the idea is to force BIOS to read the Cylinder/Head/Sector
|
||||
geometry from the device.
|
||||
|
||||
2. Without such an option, the BIOS generally boots the device in USB-ZIP
|
||||
mode. To write an image to a USB device that will be bootable in
|
||||
USB-ZIP mode, carry out the following actions:
|
||||
|
||||
a. Determine the geometry of your USB device using fdisk:
|
||||
|
||||
# fdisk /dev/sdb
|
||||
Command (m for help): p
|
||||
|
||||
Disk /dev/sdb: 4011 MB, 4011491328 bytes
|
||||
124 heads, 62 sectors/track, 1019 cylinders, total 7834944 sectors
|
||||
...
|
||||
|
||||
Command (m for help): q
|
||||
|
||||
b. Configure the USB device for USB-ZIP mode:
|
||||
|
||||
# mkdiskimage -4 /dev/sdb 1019 124 62
|
||||
|
||||
Where 1019, 124 and 62 are the cylinder, head and sectors/track counts
|
||||
as reported by fdisk (substitute the values reported for your device).
|
||||
When the operation has finished and the access LED (if any) on the
|
||||
device stops flashing, remove and reinsert the device to allow the
|
||||
kernel to detect the new partition layout.
|
||||
|
||||
c. Copy the contents of the image to the USB-ZIP mode device:
|
||||
|
||||
# mkdir /tmp/image
|
||||
# mkdir /tmp/usbkey
|
||||
# mount -o loop core-image-minimal-genericx86.hddimg /tmp/image
|
||||
# mount /dev/sdb4 /tmp/usbkey
|
||||
# cp -rf /tmp/image/* /tmp/usbkey
|
||||
|
||||
d. Install the syslinux boot loader:
|
||||
|
||||
# syslinux /dev/sdb4
|
||||
|
||||
e. Unmount everything:
|
||||
|
||||
# umount /tmp/image
|
||||
# umount /tmp/usbkey
|
||||
|
||||
Install the boot device in the target board and configure the BIOS to boot
|
||||
from it.
|
||||
|
||||
For more details on the USB-ZIP scenario, see the syslinux documentation:
|
||||
http://git.kernel.org/?p=boot/syslinux/syslinux.git;a=blob_plain;f=doc/usbkey.txt;hb=HEAD
|
||||
|
||||
|
||||
Texas Instruments Beaglebone (beaglebone)
|
||||
=========================================
|
||||
|
||||
The Beaglebone is an ARM Cortex-A8 development board with USB, Ethernet, 2D/3D
|
||||
accelerated graphics, audio, serial, JTAG, and SD/MMC. The Black adds a faster
|
||||
CPU, more RAM, eMMC flash and a micro HDMI port. The beaglebone MACHINE is
|
||||
tested on the following platforms:
|
||||
|
||||
o Beaglebone Black A6
|
||||
o Beaglebone A6 (the original "White" model)
|
||||
|
||||
The Beaglebone Black has eMMC, while the White does not. Pressing the USER/BOOT
|
||||
button when powering on will temporarily change the boot order. But for the sake
|
||||
of simplicity, these instructions assume you have erased the eMMC on the Black,
|
||||
so its boot behavior matches that of the White and boots off of SD card. To do
|
||||
this, issue the following commands from the u-boot prompt:
|
||||
|
||||
# mmc dev 1
|
||||
# mmc erase 0 512
|
||||
|
||||
To further tailor these instructions for your board, please refer to the
|
||||
documentation at http://www.beagleboard.org/bone and http://www.beagleboard.org/black
|
||||
|
||||
From a Linux system with access to the image files perform the following steps
|
||||
as root, replacing mmcblk0* with the SD card device on your machine (such as sdc
|
||||
if used via a usb card reader):
|
||||
|
||||
1. Partition and format an SD card:
|
||||
# fdisk -lu /dev/mmcblk0
|
||||
|
||||
Disk /dev/mmcblk0: 3951 MB, 3951034368 bytes
|
||||
255 heads, 63 sectors/track, 480 cylinders, total 7716864 sectors
|
||||
Units = sectors of 1 * 512 = 512 bytes
|
||||
|
||||
Device Boot Start End Blocks Id System
|
||||
/dev/mmcblk0p1 * 63 144584 72261 c Win95 FAT32 (LBA)
|
||||
/dev/mmcblk0p2 144585 465884 160650 83 Linux
|
||||
|
||||
# mkfs.vfat -F 16 -n "boot" /dev/mmcblk0p1
|
||||
# mke2fs -j -L "root" /dev/mmcblk0p2
|
||||
|
||||
The following assumes the SD card partitions 1 and 2 are mounted at
|
||||
/media/boot and /media/root respectively. Removing the card and reinserting
|
||||
it will do just that on most modern Linux desktop environments.
|
||||
|
||||
The files referenced below are made available after the build in
|
||||
build/tmp/deploy/images.
|
||||
|
||||
2. Install the boot loaders
|
||||
# cp MLO-beaglebone /media/boot/MLO
|
||||
# cp u-boot-beaglebone.img /media/boot/u-boot.img
|
||||
|
||||
3. Install the root filesystem
|
||||
# tar x -C /media/root -f core-image-$IMAGE_TYPE-beaglebone.tar.bz2
|
||||
|
||||
4. If using core-image-base or core-image-sato images, the SD card is ready
|
||||
and rootfs already contains the kernel, modules and device tree (DTB)
|
||||
files necessary to be booted with U-boot's default configuration, so
|
||||
skip directly to step 8.
|
||||
For core-image-minimal, proceed through next steps.
|
||||
|
||||
5. If using core-image-minimal rootfs, install the modules
|
||||
# tar x -C /media/root -f modules-beaglebone.tgz
|
||||
|
||||
6. If using core-image-minimal rootfs, install the kernel uImage into /boot
|
||||
directory of rootfs
|
||||
# cp uImage-beaglebone.bin /media/root/boot/uImage
|
||||
|
||||
7. If using core-image-minimal rootfs, also install device tree (DTB) files
|
||||
into /boot directory of rootfs
|
||||
# cp uImage-am335x-bone.dtb /media/root/boot/am335x-bone.dtb
|
||||
# cp uImage-am335x-boneblack.dtb /media/root/boot/am335x-boneblack.dtb
|
||||
|
||||
8. Unmount the SD partitions, insert the SD card into the Beaglebone, and
|
||||
boot the Beaglebone
|
||||
|
||||
|
||||
Freescale MPC8315E-RDB (mpc8315e-rdb)
|
||||
=====================================
|
||||
|
||||
The MPC8315 PowerPC reference platform (MPC8315E-RDB) is aimed at hardware and
|
||||
software development of network attached storage (NAS) and digital media server
|
||||
applications. The MPC8315E-RDB features the PowerQUICC II Pro processor, which
|
||||
includes a built-in security accelerator.
|
||||
|
||||
(Note: you may find it easier to order MPC8315E-RDBA; this appears to be the
|
||||
same board in an enclosure with accessories. In any case it is fully
|
||||
compatible with the instructions given here.)
|
||||
|
||||
Setup instructions
|
||||
------------------
|
||||
|
||||
You will need the following:
|
||||
* NFS root setup on your workstation
|
||||
* TFTP server installed on your workstation
|
||||
* Straight-thru 9-conductor serial cable (DB9, M/F) connected from your
|
||||
PC to UART1
|
||||
* Ethernet connected to the first ethernet port on the board
|
||||
|
||||
--- Preparation ---
|
||||
|
||||
Note: if you have altered your board's ethernet MAC address(es) from the
|
||||
defaults, or you need to do so because you want multiple boards on the same
|
||||
network, then you will need to change the values in the dts file (patch
|
||||
linux/arch/powerpc/boot/dts/mpc8315erdb.dts within the kernel source). If
|
||||
you have left them at the factory default then you shouldn't need to do
|
||||
anything here.
|
||||
|
||||
--- Booting from NFS root ---
|
||||
|
||||
Load the kernel and dtb (device tree blob), and boot the system as follows:
|
||||
|
||||
1. Get the kernel (uImage-mpc8315e-rdb.bin) and dtb (uImage-mpc8315e-rdb.dtb)
|
||||
files from the tmp/deploy directory, and make them available on your TFTP
|
||||
server.
|
||||
|
||||
2. Connect the board's first serial port to your workstation and then start up
|
||||
your favourite serial terminal so that you will be able to interact with
|
||||
the serial console. If you don't have a favourite, picocom is suggested:
|
||||
|
||||
$ picocom /dev/ttyUSB0 -b 115200
|
||||
|
||||
3. Power up or reset the board and press a key on the terminal when prompted
|
||||
to get to the U-Boot command line
|
||||
|
||||
4. Set up the environment in U-Boot:
|
||||
|
||||
=> setenv ipaddr <board ip>
|
||||
=> setenv serverip <tftp server ip>
|
||||
=> setenv bootargs root=/dev/nfs rw nfsroot=<nfsroot ip>:<rootfs path> ip=<board ip>:<server ip>:<gateway ip>:255.255.255.0:mpc8315e:eth0:off console=ttyS0,115200
|
||||
|
||||
5. Download the kernel and dtb, and boot:
|
||||
|
||||
=> tftp 1000000 uImage-mpc8315e-rdb.bin
|
||||
=> tftp 2000000 uImage-mpc8315e-rdb.dtb
|
||||
=> bootm 1000000 - 2000000
|
||||
|
||||
|
||||
Ubiquiti Networks EdgeRouter Lite (edgerouter)
|
||||
==============================================
|
||||
|
||||
The EdgeRouter Lite is part of the EdgeMax series. It is a MIPS64 router
|
||||
(based on the Cavium Octeon processor) with 512MB of RAM, which uses an
|
||||
internal USB pendrive for storage.
|
||||
|
||||
Setup instructions
|
||||
------------------
|
||||
|
||||
You will need the following:
|
||||
* NFS root setup on your workstation
|
||||
* TFTP server installed on your workstation
|
||||
* RJ45 -> serial ("rollover") cable connected from your PC to the CONSOLE
|
||||
port on the board
|
||||
* Ethernet connected to the first ethernet port on the board
|
||||
|
||||
--- Preparation ---
|
||||
|
||||
Build an image (e.g. core-image-minimal) using "edgerouter" as the MACHINE.
|
||||
In the following instruction it is based on core-image-minimal. Another target
|
||||
may be similiar with it.
|
||||
|
||||
--- Booting from NFS root ---
|
||||
|
||||
Load the kernel, and boot the system as follows:
|
||||
|
||||
1. Get the kernel (vmlinux) file from the tmp/deploy/images/edgerouter
|
||||
directory, and make them available on your TFTP server.
|
||||
|
||||
2. Connect the board's first serial port to your workstation and then start up
|
||||
your favourite serial terminal so that you will be able to interact with
|
||||
the serial console. If you don't have a favourite, picocom is suggested:
|
||||
|
||||
$ picocom /dev/ttyS0 -b 115200
|
||||
|
||||
3. Power up or reset the board and press a key on the terminal when prompted
|
||||
to get to the U-Boot command line
|
||||
|
||||
4. Set up the environment in U-Boot:
|
||||
|
||||
=> setenv ipaddr <board ip>
|
||||
=> setenv serverip <tftp server ip>
|
||||
|
||||
5. Download the kernel and boot:
|
||||
|
||||
=> tftp tftp $loadaddr vmlinux
|
||||
=> bootoctlinux $loadaddr coremask=0x3 root=/dev/nfs rw nfsroot=<nfsroot ip>:<rootfs path> ip=<board ip>:<server ip>:<gateway ip>:<netmask>:edgerouter:eth0:off mtdparts=phys_mapped_flash:512k(boot0),512k(boot1),64k@3072k(eeprom)
|
||||
|
||||
--- Booting from USB root ---
|
||||
|
||||
To boot from the USB disk, you either need to remove it from the edgerouter
|
||||
box and populate it from another computer, or use a previously booted NFS
|
||||
image and populate from the edgerouter itself.
|
||||
|
||||
Type 1: Mounted USB disk
|
||||
------------------------
|
||||
|
||||
To boot from the USB disk there are two available partitions on the factory
|
||||
USB storage. The rest of this guide assumes that these partitions are left
|
||||
intact. If you change the partition scheme, you must update your boot method
|
||||
appropriately.
|
||||
|
||||
The standard partitions are:
|
||||
|
||||
- 1: vfat partition containing factory kernels
|
||||
- 2: ext3 partition for the root filesystem.
|
||||
|
||||
You can place the kernel on either partition 1, or partition 2, but the roofs
|
||||
must go on partition 2 (due to its size).
|
||||
|
||||
Note: If you place the kernel on the ext3 partition, you must re-create the
|
||||
ext3 filesystem, since the factory u-boot can only handle 128 byte inodes and
|
||||
cannot read the partition otherwise.
|
||||
|
||||
Steps:
|
||||
|
||||
1. Remove the USB disk from the edgerouter and insert it into a computer
|
||||
that has access to your build artifacts.
|
||||
|
||||
2. Copy the kernel image to the USB storage (assuming discovered as 'sdb' on
|
||||
the development machine):
|
||||
|
||||
2a) if booting from vfat
|
||||
|
||||
# mount /dev/sdb1 /mnt
|
||||
# cp tmp/deploy/images/edgerouter/vmlinux /mnt
|
||||
# umount /mnt
|
||||
|
||||
2b) if booting from ext3
|
||||
|
||||
# mkfs.ext3 -I 128 /dev/sdb2
|
||||
# mount /dev/sdb2 /mnt
|
||||
# mkdir /mnt/boot
|
||||
# cp tmp/deploy/images/edgerouter/vmlinux /mnt/boot
|
||||
# umount /mnt
|
||||
|
||||
3. Extract the rootfs to the USB storage ext3 partition
|
||||
|
||||
# mount /dev/sdb2 /mnt
|
||||
# tar -xvjpf core-image-minimal-XXX.tar.bz2 -C /mnt
|
||||
# umount /mnt
|
||||
|
||||
4. Reboot the board and press a key on the terminal when prompted to get to the U-Boot
|
||||
command line:
|
||||
|
||||
5. Load the kernel and boot:
|
||||
|
||||
5a) vfat boot
|
||||
|
||||
=> fatload usb 0:1 $loadaddr vmlinux
|
||||
|
||||
5b) ext3 boot
|
||||
|
||||
=> ext2load usb 0:2 $loadaddr boot/vmlinux
|
||||
|
||||
=> bootoctlinux $loadaddr coremask=0x3 root=/dev/sda2 rw rootwait mtdparts=phys_mapped_flash:512k(boot0),512k(boot1),64k@3072k(eeprom)
|
||||
|
||||
|
||||
Type 2: NFS
|
||||
-----------
|
||||
|
||||
Note: If you place the kernel on the ext3 partition, you must re-create the
|
||||
ext3 filesystem, since the factory u-boot can only handle 128 byte inodes and
|
||||
cannot read the partition otherwise.
|
||||
|
||||
These boot instructions assume that you have recreated the ext3 filesystem with
|
||||
128 byte inodes, you have an updated uboot or you are running and image capable
|
||||
of making the filesystem on the board itself.
|
||||
|
||||
|
||||
1. Boot from NFS root
|
||||
|
||||
2. Mount the USB disk partition 2 and then extract the contents of
|
||||
tmp/deploy/core-image-XXXX.tar.bz2 into it.
|
||||
|
||||
Before starting, copy core-image-minimal-xxx.tar.bz2 and vmlinux into
|
||||
rootfs path on your workstation.
|
||||
|
||||
and then,
|
||||
|
||||
# mount /dev/sda2 /media/sda2
|
||||
# tar -xvjpf core-image-minimal-XXX.tar.bz2 -C /media/sda2
|
||||
# cp vmlinux /media/sda2/boot/vmlinux
|
||||
# umount /media/sda2
|
||||
# reboot
|
||||
|
||||
3. Reboot the board and press a key on the terminal when prompted to get to the U-Boot
|
||||
command line:
|
||||
|
||||
# reboot
|
||||
|
||||
4. Load the kernel and boot:
|
||||
|
||||
=> ext2load usb 0:2 $loadaddr boot/vmlinux
|
||||
=> bootoctlinux $loadaddr coremask=0x3 root=/dev/sda2 rw rootwait mtdparts=phys_mapped_flash:512k(boot0),512k(boot1),64k@3072k(eeprom)
|
||||
@@ -1 +0,0 @@
|
||||
meta-poky/README.poky
|
||||
15
README.qemu
15
README.qemu
@@ -1,15 +0,0 @@
|
||||
QEMU Emulation Targets
|
||||
======================
|
||||
|
||||
To simplify development, the build system supports building images to
|
||||
work with the QEMU emulator in system emulation mode. Several architectures
|
||||
are currently supported in 32 and 64 bit variants:
|
||||
|
||||
* ARM (qemuarm + qemuarm64)
|
||||
* x86 (qemux86 + qemux86-64)
|
||||
* PowerPC (qemuppc only)
|
||||
* MIPS (qemumips + qemumips64)
|
||||
|
||||
Use of the QEMU images is covered in the Yocto Project Reference Manual.
|
||||
The appropriate MACHINE variable value corresponding to the target is given
|
||||
in brackets.
|
||||
@@ -5,15 +5,6 @@ The following external components are distributed with this software:
|
||||
* The Toaster Simple UI application is based upon the Django project template, the files of which are covered by the BSD license and are copyright (c) Django Software
|
||||
Foundation and individual contributors.
|
||||
|
||||
* Twitter Bootstrap (including Glyphicons), redistributed under the MIT license
|
||||
* Twitter Bootstrap (including Glyphicons), redistributed under the Apache License 2.0.
|
||||
|
||||
* jQuery is redistributed under the MIT license.
|
||||
|
||||
* Twitter typeahead.js redistributed under the MIT license. Note that the JS source has one small modification, so the full unminified file is currently included to make it obvious where this is.
|
||||
|
||||
* jsrender is redistributed under the MIT license.
|
||||
|
||||
* QUnit is redistributed under the MIT license.
|
||||
|
||||
* Font Awesome fonts redistributed under the SIL Open Font License 1.1
|
||||
|
||||
* simplediff is distributed under the zlib license.
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
Bitbake
|
||||
=======
|
||||
|
||||
BitBake is a generic task execution engine that allows shell and Python tasks to be run
|
||||
efficiently and in parallel while working within complex inter-task dependency constraints.
|
||||
One of BitBake's main users, OpenEmbedded, takes this core and builds embedded Linux software
|
||||
stacks using a task-oriented approach.
|
||||
|
||||
For information about Bitbake, see the OpenEmbedded website:
|
||||
http://www.openembedded.org/
|
||||
|
||||
Bitbake plain documentation can be found under the doc directory or its integrated
|
||||
html version at the Yocto Project website:
|
||||
http://yoctoproject.org/documentation
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to
|
||||
http://www.openembedded.org/wiki/How_to_submit_a_patch_to_OpenEmbedded
|
||||
for guidelines on how to submit patches, just note that the latter documentation is intended
|
||||
for OpenEmbedded (and its core) not bitbake patches (bitbake-devel@lists.openembedded.org)
|
||||
but in general main guidelines apply. Once the commit(s) have been created, the way to send
|
||||
the patch is through git-send-email. For example, to send the last commit (HEAD) on current
|
||||
branch, type:
|
||||
|
||||
git send-email -M -1 --to bitbake-devel@lists.openembedded.org
|
||||
|
||||
Mailing list:
|
||||
|
||||
http://lists.openembedded.org/mailman/listinfo/bitbake-devel
|
||||
|
||||
Source code:
|
||||
|
||||
http://git.openembedded.org/bitbake/
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
@@ -23,34 +23,340 @@
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import sys, logging
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),
|
||||
'lib'))
|
||||
|
||||
import optparse
|
||||
import warnings
|
||||
from traceback import format_exception
|
||||
try:
|
||||
import bb
|
||||
except RuntimeError as exc:
|
||||
sys.exit(str(exc))
|
||||
|
||||
from bb import event
|
||||
import bb.msg
|
||||
from bb import cooker
|
||||
from bb import ui
|
||||
from bb import server
|
||||
from bb import cookerdata
|
||||
from bb.main import bitbake_main, BitBakeConfigParameters, BBMainException
|
||||
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
__version__ = "1.22.0"
|
||||
logger = logging.getLogger("BitBake")
|
||||
|
||||
__version__ = "1.37.0"
|
||||
# Python multiprocessing requires /dev/shm
|
||||
if not os.access('/dev/shm', os.W_OK | os.X_OK):
|
||||
sys.exit("FATAL: /dev/shm does not exist or is not writable")
|
||||
|
||||
# Unbuffer stdout to avoid log truncation in the event
|
||||
# of an unorderly exit as well as to provide timely
|
||||
# updates to log files for use with tail
|
||||
try:
|
||||
if sys.stdout.name == '<stdout>':
|
||||
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
def get_ui(config):
|
||||
if not config.ui:
|
||||
# modify 'ui' attribute because it is also read by cooker
|
||||
config.ui = os.environ.get('BITBAKE_UI', 'knotty')
|
||||
|
||||
interface = config.ui
|
||||
|
||||
try:
|
||||
# Dynamically load the UI based on the ui name. Although we
|
||||
# suggest a fixed set this allows you to have flexibility in which
|
||||
# ones are available.
|
||||
module = __import__("bb.ui", fromlist = [interface])
|
||||
return getattr(module, interface)
|
||||
except AttributeError:
|
||||
sys.exit("FATAL: Invalid user interface '%s' specified.\n"
|
||||
"Valid interfaces: depexp, goggle, ncurses, hob, knotty [default]." % interface)
|
||||
|
||||
|
||||
# Display bitbake/OE warnings via the BitBake.Warnings logger, ignoring others"""
|
||||
warnlog = logging.getLogger("BitBake.Warnings")
|
||||
_warnings_showwarning = warnings.showwarning
|
||||
def _showwarning(message, category, filename, lineno, file=None, line=None):
|
||||
if file is not None:
|
||||
if _warnings_showwarning is not None:
|
||||
_warnings_showwarning(message, category, filename, lineno, file, line)
|
||||
else:
|
||||
s = warnings.formatwarning(message, category, filename, lineno)
|
||||
warnlog.warn(s)
|
||||
|
||||
warnings.showwarning = _showwarning
|
||||
warnings.filterwarnings("ignore")
|
||||
warnings.filterwarnings("default", module="(<string>$|(oe|bb)\.)")
|
||||
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
|
||||
warnings.filterwarnings("ignore", category=ImportWarning)
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning, module="<string>$")
|
||||
warnings.filterwarnings("ignore", message="With-statements now directly support multiple context managers")
|
||||
|
||||
class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
|
||||
def parseCommandLine(self):
|
||||
parser = optparse.OptionParser(
|
||||
version = "BitBake Build Tool Core version %s, %%prog version %s" % (bb.__version__, __version__),
|
||||
usage = """%prog [options] [recipename/target ...]
|
||||
|
||||
Executes the specified task (default is 'build') for a given set of target recipes (.bb files).
|
||||
It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH which
|
||||
will provide the layer, BBFILES and other configuration information.""")
|
||||
|
||||
parser.add_option("-b", "--buildfile", help = "Execute tasks from a specific .bb recipe directly. WARNING: Does not handle any dependencies from other recipes.",
|
||||
action = "store", dest = "buildfile", default = None)
|
||||
|
||||
parser.add_option("-k", "--continue", help = "Continue as much as possible after an error. While the target that failed and anything depending on it cannot be built, as much as possible will be built before stopping.",
|
||||
action = "store_false", dest = "abort", default = True)
|
||||
|
||||
parser.add_option("-a", "--tryaltconfigs", help = "Continue with builds by trying to use alternative providers where possible.",
|
||||
action = "store_true", dest = "tryaltconfigs", default = False)
|
||||
|
||||
parser.add_option("-f", "--force", help = "Force the specified targets/task to run (invalidating any existing stamp file).",
|
||||
action = "store_true", dest = "force", default = False)
|
||||
|
||||
parser.add_option("-c", "--cmd", help = "Specify the task to execute. The exact options available depend on the metadata. Some examples might be 'compile' or 'populate_sysroot' or 'listtasks' may give a list of the tasks available.",
|
||||
action = "store", dest = "cmd")
|
||||
|
||||
parser.add_option("-C", "--clear-stamp", help = "Invalidate the stamp for the specified task such as 'compile' and then run the default task for the specified target(s).",
|
||||
action = "store", dest = "invalidate_stamp")
|
||||
|
||||
parser.add_option("-r", "--read", help = "Read the specified file before bitbake.conf.",
|
||||
action = "append", dest = "prefile", default = [])
|
||||
|
||||
parser.add_option("-R", "--postread", help = "Read the specified file after bitbake.conf.",
|
||||
action = "append", dest = "postfile", default = [])
|
||||
|
||||
parser.add_option("-v", "--verbose", help = "Output more log message data to the terminal.",
|
||||
action = "store_true", dest = "verbose", default = False)
|
||||
|
||||
parser.add_option("-D", "--debug", help = "Increase the debug level. You can specify this more than once.",
|
||||
action = "count", dest="debug", default = 0)
|
||||
|
||||
parser.add_option("-n", "--dry-run", help = "Don't execute, just go through the motions.",
|
||||
action = "store_true", dest = "dry_run", default = False)
|
||||
|
||||
parser.add_option("-S", "--dump-signatures", help = "Dump out the signature construction information, with no task execution. Parameters are passed to the signature handling code, use 'none' if no specific handler is required.",
|
||||
action = "append", dest = "dump_signatures", default = [])
|
||||
|
||||
parser.add_option("-p", "--parse-only", help = "Quit after parsing the BB recipes.",
|
||||
action = "store_true", dest = "parse_only", default = False)
|
||||
|
||||
parser.add_option("-s", "--show-versions", help = "Show current and preferred versions of all recipes.",
|
||||
action = "store_true", dest = "show_versions", default = False)
|
||||
|
||||
parser.add_option("-e", "--environment", help = "Show the global or per-package environment complete with information about where variables were set/changed.",
|
||||
action = "store_true", dest = "show_environment", default = False)
|
||||
|
||||
parser.add_option("-g", "--graphviz", help = "Save dependency tree information for the specified targets in the dot syntax.",
|
||||
action = "store_true", dest = "dot_graph", default = False)
|
||||
|
||||
parser.add_option("-I", "--ignore-deps", help = """Assume these dependencies don't exist and are already provided (equivalent to ASSUME_PROVIDED). Useful to make dependency graphs more appealing""",
|
||||
action = "append", dest = "extra_assume_provided", default = [])
|
||||
|
||||
parser.add_option("-l", "--log-domains", help = """Show debug logging for the specified logging domains""",
|
||||
action = "append", dest = "debug_domains", default = [])
|
||||
|
||||
parser.add_option("-P", "--profile", help = "Profile the command and save reports.",
|
||||
action = "store_true", dest = "profile", default = False)
|
||||
|
||||
parser.add_option("-u", "--ui", help = "The user interface to use (e.g. knotty, hob, depexp).",
|
||||
action = "store", dest = "ui")
|
||||
|
||||
parser.add_option("-t", "--servertype", help = "Choose which server to use, process or xmlrpc.",
|
||||
action = "store", dest = "servertype")
|
||||
|
||||
parser.add_option("", "--revisions-changed", help = "Set the exit code depending on whether upstream floating revisions have changed or not.",
|
||||
action = "store_true", dest = "revisions_changed", default = False)
|
||||
|
||||
parser.add_option("", "--server-only", help = "Run bitbake without a UI, only starting a server (cooker) process.",
|
||||
action = "store_true", dest = "server_only", default = False)
|
||||
|
||||
parser.add_option("-B", "--bind", help = "The name/address for the bitbake server to bind to.",
|
||||
action = "store", dest = "bind", default = False)
|
||||
|
||||
parser.add_option("", "--no-setscene", help = "Do not run any setscene tasks. sstate will be ignored and everything needed, built.",
|
||||
action = "store_true", dest = "nosetscene", default = False)
|
||||
|
||||
parser.add_option("", "--remote-server", help = "Connect to the specified server.",
|
||||
action = "store", dest = "remote_server", default = False)
|
||||
|
||||
parser.add_option("-m", "--kill-server", help = "Terminate the remote server.",
|
||||
action = "store_true", dest = "kill_server", default = False)
|
||||
|
||||
parser.add_option("", "--observe-only", help = "Connect to a server as an observing-only client.",
|
||||
action = "store_true", dest = "observe_only", default = False)
|
||||
|
||||
parser.add_option("", "--status-only", help = "Check the status of the remote bitbake server.",
|
||||
action = "store_true", dest = "status_only", default = False)
|
||||
|
||||
options, targets = parser.parse_args(sys.argv)
|
||||
|
||||
# some environmental variables set also configuration options
|
||||
if "BBSERVER" in os.environ:
|
||||
options.servertype = "xmlrpc"
|
||||
options.remote_server = os.environ["BBSERVER"]
|
||||
|
||||
return options, targets[1:]
|
||||
|
||||
|
||||
def start_server(servermodule, configParams, configuration, features):
|
||||
server = servermodule.BitBakeServer()
|
||||
if configParams.bind:
|
||||
(host, port) = configParams.bind.split(':')
|
||||
server.initServer((host, int(port)))
|
||||
configuration.interface = [ server.serverImpl.host, server.serverImpl.port ]
|
||||
else:
|
||||
server.initServer()
|
||||
configuration.interface = []
|
||||
|
||||
try:
|
||||
configuration.setServerRegIdleCallback(server.getServerIdleCB())
|
||||
|
||||
cooker = bb.cooker.BBCooker(configuration, features)
|
||||
|
||||
server.addcooker(cooker)
|
||||
server.saveConnectionDetails()
|
||||
except Exception as e:
|
||||
exc_info = sys.exc_info()
|
||||
while hasattr(server, "event_queue"):
|
||||
try:
|
||||
import queue
|
||||
except ImportError:
|
||||
import Queue as queue
|
||||
try:
|
||||
event = server.event_queue.get(block=False)
|
||||
except (queue.Empty, IOError):
|
||||
break
|
||||
if isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
raise exc_info[1], None, exc_info[2]
|
||||
server.detach()
|
||||
return server
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
configParams = BitBakeConfigParameters()
|
||||
configuration = cookerdata.CookerConfiguration()
|
||||
configuration.setConfigParameters(configParams)
|
||||
|
||||
ui_module = get_ui(configParams)
|
||||
|
||||
# Server type can be xmlrpc or process currently, if nothing is specified,
|
||||
# the default server is process
|
||||
if configParams.servertype:
|
||||
server_type = configParams.servertype
|
||||
else:
|
||||
server_type = 'process'
|
||||
|
||||
try:
|
||||
module = __import__("bb.server", fromlist = [server_type])
|
||||
servermodule = getattr(module, server_type)
|
||||
except AttributeError:
|
||||
sys.exit("FATAL: Invalid server type '%s' specified.\n"
|
||||
"Valid interfaces: xmlrpc, process [default]." % server_type)
|
||||
|
||||
if configParams.server_only:
|
||||
if configParams.servertype != "xmlrpc":
|
||||
sys.exit("FATAL: If '--server-only' is defined, we must set the servertype as 'xmlrpc'.\n")
|
||||
if not configParams.bind:
|
||||
sys.exit("FATAL: The '--server-only' option requires a name/address to bind to with the -B option.\n")
|
||||
if configParams.remote_server:
|
||||
sys.exit("FATAL: The '--server-only' option conflicts with %s.\n" %
|
||||
("the BBSERVER environment variable" if "BBSERVER" in os.environ else "the '--remote-server' option" ))
|
||||
|
||||
if configParams.bind and configParams.servertype != "xmlrpc":
|
||||
sys.exit("FATAL: If '-B' or '--bind' is defined, we must set the servertype as 'xmlrpc'.\n")
|
||||
|
||||
if configParams.remote_server and configParams.servertype != "xmlrpc":
|
||||
sys.exit("FATAL: If '--remote-server' is defined, we must set the servertype as 'xmlrpc'.\n")
|
||||
|
||||
if configParams.observe_only and (not configParams.remote_server or configParams.bind):
|
||||
sys.exit("FATAL: '--observe-only' can only be used by UI clients connecting to a server.\n")
|
||||
|
||||
if "BBDEBUG" in os.environ:
|
||||
level = int(os.environ["BBDEBUG"])
|
||||
if level > configuration.debug:
|
||||
configuration.debug = level
|
||||
|
||||
bb.msg.init_msgconfig(configParams.verbose, configuration.debug,
|
||||
configuration.debug_domains)
|
||||
|
||||
# Ensure logging messages get sent to the UI as events
|
||||
handler = bb.event.LogHandler()
|
||||
if not configParams.status_only:
|
||||
# In status only mode there are no logs and no UI
|
||||
logger.addHandler(handler)
|
||||
|
||||
# Clear away any spurious environment variables while we stoke up the cooker
|
||||
cleanedvars = bb.utils.clean_environment()
|
||||
|
||||
featureset = []
|
||||
if not configParams.server_only:
|
||||
# Collect the feature set for the UI
|
||||
featureset = getattr(ui_module, "featureSet", [])
|
||||
|
||||
if not configParams.remote_server:
|
||||
# we start a server with a given configuration
|
||||
server = start_server(servermodule, configParams, configuration, featureset)
|
||||
bb.event.ui_queue = []
|
||||
else:
|
||||
# we start a stub server that is actually a XMLRPClient that connects to a real server
|
||||
server = servermodule.BitBakeXMLRPCClient(configParams.observe_only)
|
||||
server.saveConnectionDetails(configParams.remote_server)
|
||||
server.saveConnectionConfigParams(configParams)
|
||||
|
||||
if not configParams.server_only:
|
||||
if configParams.status_only:
|
||||
try:
|
||||
server_connection = server.establishConnection(featureset)
|
||||
except:
|
||||
sys.exit(1)
|
||||
if not server_connection:
|
||||
sys.exit(1)
|
||||
server_connection.terminate()
|
||||
sys.exit(0)
|
||||
|
||||
# Setup a connection to the server (cooker)
|
||||
server_connection = server.establishConnection(featureset)
|
||||
if not server_connection:
|
||||
if configParams.kill_server:
|
||||
bb.fatal("Server already killed")
|
||||
configParams.bind = configParams.remote_server
|
||||
start_server(servermodule, configParams, configuration, featureset)
|
||||
bb.event.ui_queue = []
|
||||
server_connection = server.establishConnection(featureset)
|
||||
|
||||
# Restore the environment in case the UI needs it
|
||||
for k in cleanedvars:
|
||||
os.environ[k] = cleanedvars[k]
|
||||
|
||||
logger.removeHandler(handler)
|
||||
|
||||
try:
|
||||
return ui_module.main(server_connection.connection, server_connection.events, configParams)
|
||||
finally:
|
||||
bb.event.ui_queue = []
|
||||
server_connection.terminate()
|
||||
else:
|
||||
print("server address: %s, server port: %s" % (server.serverImpl.host, server.serverImpl.port))
|
||||
return 0
|
||||
|
||||
return 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __version__ != bb.__version__:
|
||||
sys.exit("Bitbake core version and program version mismatch!")
|
||||
try:
|
||||
sys.exit(bitbake_main(BitBakeConfigParameters(sys.argv),
|
||||
cookerdata.CookerConfiguration()))
|
||||
except BBMainException as err:
|
||||
sys.exit(err)
|
||||
ret = main()
|
||||
except bb.BBHandledException:
|
||||
sys.exit(1)
|
||||
ret = 1
|
||||
except Exception:
|
||||
ret = 1
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
sys.exit(ret)
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python
|
||||
|
||||
# bitbake-diffsigs
|
||||
# BitBake task signature data comparison utility
|
||||
#
|
||||
# Copyright (C) 2012-2013, 2017 Intel Corporation
|
||||
# Copyright (C) 2012-2013 Intel Corporation
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
@@ -22,162 +22,101 @@ import os
|
||||
import sys
|
||||
import warnings
|
||||
import fnmatch
|
||||
import argparse
|
||||
import optparse
|
||||
import logging
|
||||
import pickle
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
|
||||
import bb.tinfoil
|
||||
import bb.siggen
|
||||
import bb.msg
|
||||
|
||||
logger = bb.msg.logger_create('bitbake-diffsigs')
|
||||
def logger_create(name, output=sys.stderr):
|
||||
logger = logging.getLogger(name)
|
||||
console = logging.StreamHandler(output)
|
||||
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
if output.isatty():
|
||||
format.enable_color()
|
||||
console.setFormatter(format)
|
||||
logger.addHandler(console)
|
||||
logger.setLevel(logging.INFO)
|
||||
return logger
|
||||
|
||||
def find_siginfo(tinfoil, pn, taskname, sigs=None):
|
||||
result = None
|
||||
tinfoil.set_event_mask(['bb.event.FindSigInfoResult',
|
||||
'logging.LogRecord',
|
||||
'bb.command.CommandCompleted',
|
||||
'bb.command.CommandFailed'])
|
||||
ret = tinfoil.run_command('findSigInfo', pn, taskname, sigs)
|
||||
if ret:
|
||||
while True:
|
||||
event = tinfoil.wait_event(1)
|
||||
if event:
|
||||
if isinstance(event, bb.command.CommandCompleted):
|
||||
break
|
||||
elif isinstance(event, bb.command.CommandFailed):
|
||||
logger.error(str(event))
|
||||
sys.exit(2)
|
||||
elif isinstance(event, bb.event.FindSigInfoResult):
|
||||
result = event.result
|
||||
elif isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
else:
|
||||
logger.error('No result returned from findSigInfo command')
|
||||
sys.exit(2)
|
||||
return result
|
||||
logger = logger_create('bitbake-diffsigs')
|
||||
|
||||
def find_compare_task(bbhandler, pn, taskname, sig1=None, sig2=None, color=False):
|
||||
def find_compare_task(bbhandler, pn, taskname):
|
||||
""" Find the most recent signature files for the specified PN/task and compare them """
|
||||
|
||||
if not hasattr(bb.siggen, 'find_siginfo'):
|
||||
logger.error('Metadata does not support finding signature data files')
|
||||
sys.exit(1)
|
||||
|
||||
if not taskname.startswith('do_'):
|
||||
taskname = 'do_%s' % taskname
|
||||
|
||||
if sig1 and sig2:
|
||||
sigfiles = find_siginfo(bbhandler, pn, taskname, [sig1, sig2])
|
||||
if len(sigfiles) == 0:
|
||||
logger.error('No sigdata files found matching %s %s matching either %s or %s' % (pn, taskname, sig1, sig2))
|
||||
sys.exit(1)
|
||||
elif not sig1 in sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig1))
|
||||
sys.exit(1)
|
||||
elif not sig2 in sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig2))
|
||||
sys.exit(1)
|
||||
latestfiles = [sigfiles[sig1], sigfiles[sig2]]
|
||||
filedates = bb.siggen.find_siginfo(pn, taskname, None, bbhandler.config_data)
|
||||
latestfiles = sorted(filedates.keys(), key=lambda f: filedates[f])[-2:]
|
||||
if not latestfiles:
|
||||
logger.error('No sigdata files found matching %s %s' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
elif len(latestfiles) < 2:
|
||||
logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
else:
|
||||
filedates = find_siginfo(bbhandler, pn, taskname)
|
||||
latestfiles = sorted(filedates.keys(), key=lambda f: filedates[f])[-3:]
|
||||
if not latestfiles:
|
||||
logger.error('No sigdata files found matching %s %s' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
elif len(latestfiles) < 2:
|
||||
logger.error('Only one matching sigdata file found for the specified task (%s %s)' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
# Define recursion callback
|
||||
def recursecb(key, hash1, hash2):
|
||||
hashes = [hash1, hash2]
|
||||
hashfiles = bb.siggen.find_siginfo(key, None, hashes, bbhandler.config_data)
|
||||
|
||||
# Define recursion callback
|
||||
def recursecb(key, hash1, hash2):
|
||||
hashes = [hash1, hash2]
|
||||
hashfiles = find_siginfo(bbhandler, key, None, hashes)
|
||||
recout = []
|
||||
if len(hashfiles) == 2:
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb)
|
||||
recout.extend(list(' ' + l for l in out2))
|
||||
else:
|
||||
recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
|
||||
|
||||
recout = []
|
||||
if len(hashfiles) == 0:
|
||||
recout.append("Unable to find matching sigdata for %s with hashes %s or %s" % (key, hash1, hash2))
|
||||
elif not hash1 in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash1))
|
||||
elif not hash2 in hashfiles:
|
||||
recout.append("Unable to find matching sigdata for %s with hash %s" % (key, hash2))
|
||||
else:
|
||||
out2 = bb.siggen.compare_sigfiles(hashfiles[hash1], hashfiles[hash2], recursecb, color=color)
|
||||
for change in out2:
|
||||
for line in change.splitlines():
|
||||
recout.append(' ' + line)
|
||||
return recout
|
||||
|
||||
return recout
|
||||
|
||||
# Recurse into signature comparison
|
||||
logger.debug("Signature file (previous): %s" % latestfiles[-2])
|
||||
logger.debug("Signature file (latest): %s" % latestfiles[-1])
|
||||
output = bb.siggen.compare_sigfiles(latestfiles[-2], latestfiles[-1], recursecb, color=color)
|
||||
if output:
|
||||
print('\n'.join(output))
|
||||
# Recurse into signature comparison
|
||||
output = bb.siggen.compare_sigfiles(latestfiles[0], latestfiles[1], recursecb)
|
||||
if output:
|
||||
print '\n'.join(output)
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Compares siginfo/sigdata files written out by BitBake")
|
||||
parser = optparse.OptionParser(
|
||||
description = "Compares siginfo/sigdata files written out by BitBake",
|
||||
usage = """
|
||||
%prog -t recipename taskname
|
||||
%prog sigdatafile1 sigdatafile2
|
||||
%prog sigdatafile1""")
|
||||
|
||||
parser.add_argument('-d', '--debug',
|
||||
help='Enable debug output',
|
||||
action='store_true')
|
||||
parser.add_option("-t", "--task",
|
||||
help = "find the signature data files for last two runs of the specified task and compare them",
|
||||
action="store", dest="taskargs", nargs=2, metavar='recipename taskname')
|
||||
|
||||
parser.add_argument('--color',
|
||||
help='Colorize output (where %(metavar)s is %(choices)s)',
|
||||
choices=['auto', 'always', 'never'], default='auto', metavar='color')
|
||||
|
||||
parser.add_argument("-t", "--task",
|
||||
help="find the signature data files for last two runs of the specified task and compare them",
|
||||
action="store", dest="taskargs", nargs=2, metavar=('recipename', 'taskname'))
|
||||
|
||||
parser.add_argument("-s", "--signature",
|
||||
help="With -t/--task, specify the signatures to look for instead of taking the last two",
|
||||
action="store", dest="sigargs", nargs=2, metavar=('fromsig', 'tosig'))
|
||||
|
||||
parser.add_argument("sigdatafile1",
|
||||
help="First signature file to compare (or signature file to dump, if second not specified). Not used when using -t/--task.",
|
||||
action="store", nargs='?')
|
||||
|
||||
parser.add_argument("sigdatafile2",
|
||||
help="Second signature file to compare",
|
||||
action="store", nargs='?')
|
||||
|
||||
|
||||
options = parser.parse_args()
|
||||
|
||||
if options.debug:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
color = (options.color == 'always' or (options.color == 'auto' and sys.stdout.isatty()))
|
||||
options, args = parser.parse_args(sys.argv)
|
||||
|
||||
if options.taskargs:
|
||||
with bb.tinfoil.Tinfoil() as tinfoil:
|
||||
tinfoil.prepare(config_only=True)
|
||||
if options.sigargs:
|
||||
find_compare_task(tinfoil, options.taskargs[0], options.taskargs[1], options.sigargs[0], options.sigargs[1], color=color)
|
||||
else:
|
||||
find_compare_task(tinfoil, options.taskargs[0], options.taskargs[1], color=color)
|
||||
tinfoil = bb.tinfoil.Tinfoil()
|
||||
tinfoil.prepare(config_only = True)
|
||||
find_compare_task(tinfoil, options.taskargs[0], options.taskargs[1])
|
||||
else:
|
||||
if options.sigargs:
|
||||
logger.error('-s/--signature can only be used together with -t/--task')
|
||||
sys.exit(1)
|
||||
try:
|
||||
if options.sigdatafile1 and options.sigdatafile2:
|
||||
output = bb.siggen.compare_sigfiles(options.sigdatafile1, options.sigdatafile2, color=color)
|
||||
elif options.sigdatafile1:
|
||||
output = bb.siggen.dump_sigfile(options.sigdatafile1)
|
||||
else:
|
||||
logger.error('Must specify signature file(s) or -t/--task')
|
||||
parser.print_help()
|
||||
if len(args) == 1:
|
||||
parser.print_help()
|
||||
else:
|
||||
import cPickle
|
||||
try:
|
||||
if len(args) == 2:
|
||||
output = bb.siggen.dump_sigfile(sys.argv[1])
|
||||
else:
|
||||
output = bb.siggen.compare_sigfiles(sys.argv[1], sys.argv[2])
|
||||
except IOError as e:
|
||||
logger.error(str(e))
|
||||
sys.exit(1)
|
||||
except cPickle.UnpicklingError, EOFError:
|
||||
logger.error('Invalid signature data - ensure you are specifying sigdata/siginfo files')
|
||||
sys.exit(1)
|
||||
except IOError as e:
|
||||
logger.error(str(e))
|
||||
sys.exit(1)
|
||||
except (pickle.UnpicklingError, EOFError):
|
||||
logger.error('Invalid signature data - ensure you are specifying sigdata/siginfo files')
|
||||
sys.exit(1)
|
||||
|
||||
if output:
|
||||
print('\n'.join(output))
|
||||
if output:
|
||||
print '\n'.join(output)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python
|
||||
|
||||
# bitbake-dumpsig
|
||||
# BitBake task signature dump utility
|
||||
@@ -23,72 +23,43 @@ import sys
|
||||
import warnings
|
||||
import optparse
|
||||
import logging
|
||||
import pickle
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
|
||||
import bb.tinfoil
|
||||
import bb.siggen
|
||||
import bb.msg
|
||||
|
||||
logger = bb.msg.logger_create('bitbake-dumpsig')
|
||||
def logger_create(name, output=sys.stderr):
|
||||
logger = logging.getLogger(name)
|
||||
console = logging.StreamHandler(output)
|
||||
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
if output.isatty():
|
||||
format.enable_color()
|
||||
console.setFormatter(format)
|
||||
logger.addHandler(console)
|
||||
logger.setLevel(logging.INFO)
|
||||
return logger
|
||||
|
||||
def find_siginfo_task(bbhandler, pn, taskname):
|
||||
""" Find the most recent signature file for the specified PN/task """
|
||||
|
||||
if not hasattr(bb.siggen, 'find_siginfo'):
|
||||
logger.error('Metadata does not support finding signature data files')
|
||||
sys.exit(1)
|
||||
|
||||
if not taskname.startswith('do_'):
|
||||
taskname = 'do_%s' % taskname
|
||||
|
||||
filedates = bb.siggen.find_siginfo(pn, taskname, None, bbhandler.config_data)
|
||||
latestfiles = sorted(filedates.keys(), key=lambda f: filedates[f])[-1:]
|
||||
if not latestfiles:
|
||||
logger.error('No sigdata files found matching %s %s' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
|
||||
return latestfiles[0]
|
||||
logger = logger_create('bitbake-dumpsig')
|
||||
|
||||
parser = optparse.OptionParser(
|
||||
description = "Dumps siginfo/sigdata files written out by BitBake",
|
||||
usage = """
|
||||
%prog -t recipename taskname
|
||||
%prog sigdatafile""")
|
||||
|
||||
parser.add_option("-D", "--debug",
|
||||
help = "enable debug",
|
||||
action = "store_true", dest="debug", default = False)
|
||||
|
||||
parser.add_option("-t", "--task",
|
||||
help = "find the signature data file for the specified task",
|
||||
action="store", dest="taskargs", nargs=2, metavar='recipename taskname')
|
||||
|
||||
options, args = parser.parse_args(sys.argv)
|
||||
|
||||
if options.debug:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
if options.taskargs:
|
||||
tinfoil = bb.tinfoil.Tinfoil()
|
||||
tinfoil.prepare(config_only = True)
|
||||
file = find_siginfo_task(tinfoil, options.taskargs[0], options.taskargs[1])
|
||||
logger.debug("Signature file: %s" % file)
|
||||
elif len(args) == 1:
|
||||
if len(args) == 1:
|
||||
parser.print_help()
|
||||
sys.exit(0)
|
||||
else:
|
||||
file = args[1]
|
||||
import cPickle
|
||||
try:
|
||||
output = bb.siggen.dump_sigfile(args[1])
|
||||
except IOError as e:
|
||||
logger.error(str(e))
|
||||
sys.exit(1)
|
||||
except cPickle.UnpicklingError, EOFError:
|
||||
logger.error('Invalid signature data - ensure you are specifying a sigdata/siginfo file')
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
output = bb.siggen.dump_sigfile(file)
|
||||
except IOError as e:
|
||||
logger.error(str(e))
|
||||
sys.exit(1)
|
||||
except (pickle.UnpicklingError, EOFError):
|
||||
logger.error('Invalid signature data - ensure you are specifying a sigdata/siginfo file')
|
||||
sys.exit(1)
|
||||
|
||||
if output:
|
||||
print('\n'.join(output))
|
||||
if output:
|
||||
print '\n'.join(output)
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python
|
||||
|
||||
# This script has subcommands which operate against your bitbake layers, either
|
||||
# displaying useful information, or acting against them.
|
||||
# See the help output for details on available commands.
|
||||
|
||||
# Copyright (C) 2011 Mentor Graphics Corporation
|
||||
# Copyright (C) 2011-2015 Intel Corporation
|
||||
# Copyright (C) 2012 Intel Corporation
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
@@ -20,91 +20,707 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import cmd
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import signal
|
||||
import fnmatch
|
||||
from collections import defaultdict
|
||||
import re
|
||||
|
||||
bindir = os.path.dirname(__file__)
|
||||
topdir = os.path.dirname(bindir)
|
||||
sys.path[0:0] = [os.path.join(topdir, 'lib')]
|
||||
|
||||
import bb.cache
|
||||
import bb.cooker
|
||||
import bb.providers
|
||||
import bb.utils
|
||||
import bb.tinfoil
|
||||
import bb.msg
|
||||
|
||||
logger = bb.msg.logger_create('bitbake-layers', sys.stdout)
|
||||
|
||||
def main():
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||
parser = argparse.ArgumentParser(
|
||||
description="BitBake layers utility",
|
||||
epilog="Use %(prog)s <subcommand> --help to get help on a specific command",
|
||||
add_help=False)
|
||||
parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
|
||||
parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
|
||||
parser.add_argument('-F', '--force', help='Force add without recipe parse verification', action='store_true')
|
||||
parser.add_argument('--color', choices=['auto', 'always', 'never'], default='auto', help='Colorize output (where %(metavar)s is %(choices)s)', metavar='COLOR')
|
||||
|
||||
global_args, unparsed_args = parser.parse_known_args()
|
||||
|
||||
# Help is added here rather than via add_help=True, as we don't want it to
|
||||
# be handled by parse_known_args()
|
||||
parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
|
||||
help='show this help message and exit')
|
||||
subparsers = parser.add_subparsers(title='subcommands', metavar='<subcommand>')
|
||||
subparsers.required = True
|
||||
|
||||
if global_args.debug:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
elif global_args.quiet:
|
||||
logger.setLevel(logging.ERROR)
|
||||
|
||||
# Need to re-run logger_create with color argument
|
||||
# (will be the same logger since it has the same name)
|
||||
bb.msg.logger_create('bitbake-layers', output=sys.stdout, color=global_args.color)
|
||||
|
||||
plugins = []
|
||||
tinfoil = bb.tinfoil.Tinfoil(tracking=True)
|
||||
tinfoil.logger.setLevel(logger.getEffectiveLevel())
|
||||
try:
|
||||
tinfoil.prepare(True)
|
||||
for path in ([topdir] +
|
||||
tinfoil.config_data.getVar('BBPATH').split(':')):
|
||||
pluginpath = os.path.join(path, 'lib', 'bblayers')
|
||||
bb.utils.load_plugins(logger, plugins, pluginpath)
|
||||
|
||||
registered = False
|
||||
for plugin in plugins:
|
||||
if hasattr(plugin, 'register_commands'):
|
||||
registered = True
|
||||
plugin.register_commands(subparsers)
|
||||
if hasattr(plugin, 'tinfoil_init'):
|
||||
plugin.tinfoil_init(tinfoil)
|
||||
|
||||
if not registered:
|
||||
logger.error("No commands registered - missing plugins?")
|
||||
sys.exit(1)
|
||||
|
||||
args = parser.parse_args(unparsed_args, namespace=global_args)
|
||||
|
||||
if getattr(args, 'parserecipes', False):
|
||||
tinfoil.config_data.disableTracking()
|
||||
tinfoil.parse_recipes()
|
||||
tinfoil.config_data.enableTracking()
|
||||
|
||||
return args.func(args)
|
||||
finally:
|
||||
tinfoil.shutdown()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
ret = main()
|
||||
except bb.BBHandledException:
|
||||
ret = 1
|
||||
except Exception:
|
||||
ret = 1
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(ret)
|
||||
logger = logging.getLogger('BitBake')
|
||||
|
||||
|
||||
def main(args):
|
||||
cmds = Commands()
|
||||
if args:
|
||||
# Allow user to specify e.g. show-layers instead of show_layers
|
||||
args = [args[0].replace('-', '_')] + args[1:]
|
||||
cmds.onecmd(' '.join(args))
|
||||
else:
|
||||
cmds.do_help('')
|
||||
return cmds.returncode
|
||||
|
||||
|
||||
class Commands(cmd.Cmd):
|
||||
def __init__(self):
|
||||
self.bbhandler = None
|
||||
self.returncode = 0
|
||||
self.bblayers = []
|
||||
cmd.Cmd.__init__(self)
|
||||
|
||||
def init_bbhandler(self, config_only = False):
|
||||
if not self.bbhandler:
|
||||
self.bbhandler = bb.tinfoil.Tinfoil()
|
||||
self.bblayers = (self.bbhandler.config_data.getVar('BBLAYERS', True) or "").split()
|
||||
self.bbhandler.prepare(config_only)
|
||||
|
||||
def default(self, line):
|
||||
"""Handle unrecognised commands"""
|
||||
sys.stderr.write("Unrecognised command or option\n")
|
||||
self.do_help('')
|
||||
|
||||
def do_help(self, topic):
|
||||
"""display general help or help on a specified command"""
|
||||
if topic:
|
||||
sys.stdout.write('%s: ' % topic)
|
||||
cmd.Cmd.do_help(self, topic.replace('-', '_'))
|
||||
else:
|
||||
sys.stdout.write("usage: bitbake-layers <command> [arguments]\n\n")
|
||||
sys.stdout.write("Available commands:\n")
|
||||
procnames = list(set(self.get_names()))
|
||||
for procname in procnames:
|
||||
if procname[:3] == 'do_':
|
||||
sys.stdout.write(" %s\n" % procname[3:].replace('_', '-'))
|
||||
doc = getattr(self, procname).__doc__
|
||||
if doc:
|
||||
sys.stdout.write(" %s\n" % doc.splitlines()[0])
|
||||
|
||||
def do_show_layers(self, args):
|
||||
"""show current configured layers"""
|
||||
self.init_bbhandler(config_only = True)
|
||||
logger.plain("%s %s %s" % ("layer".ljust(20), "path".ljust(40), "priority"))
|
||||
logger.plain('=' * 74)
|
||||
for layerdir in self.bblayers:
|
||||
layername = self.get_layer_name(layerdir)
|
||||
layerpri = 0
|
||||
for layer, _, regex, pri in self.bbhandler.cooker.recipecache.bbfile_config_priorities:
|
||||
if regex.match(os.path.join(layerdir, 'test')):
|
||||
layerpri = pri
|
||||
break
|
||||
|
||||
logger.plain("%s %s %d" % (layername.ljust(20), layerdir.ljust(40), layerpri))
|
||||
|
||||
|
||||
def version_str(self, pe, pv, pr = None):
|
||||
verstr = "%s" % pv
|
||||
if pr:
|
||||
verstr = "%s-%s" % (verstr, pr)
|
||||
if pe:
|
||||
verstr = "%s:%s" % (pe, verstr)
|
||||
return verstr
|
||||
|
||||
|
||||
def do_show_overlayed(self, args):
|
||||
"""list overlayed recipes (where the same recipe exists in another layer)
|
||||
|
||||
usage: show-overlayed [-f] [-s]
|
||||
|
||||
Lists the names of overlayed recipes and the available versions in each
|
||||
layer, with the preferred version first. Note that skipped recipes that
|
||||
are overlayed will also be listed, with a " (skipped)" suffix.
|
||||
|
||||
Options:
|
||||
-f instead of the default formatting, list filenames of higher priority
|
||||
recipes with the ones they overlay indented underneath
|
||||
-s only list overlayed recipes where the version is the same
|
||||
"""
|
||||
self.init_bbhandler()
|
||||
|
||||
show_filenames = False
|
||||
show_same_ver_only = False
|
||||
for arg in args.split():
|
||||
if arg == '-f':
|
||||
show_filenames = True
|
||||
elif arg == '-s':
|
||||
show_same_ver_only = True
|
||||
else:
|
||||
sys.stderr.write("show-overlayed: invalid option %s\n" % arg)
|
||||
self.do_help('')
|
||||
return
|
||||
|
||||
items_listed = self.list_recipes('Overlayed recipes', None, True, show_same_ver_only, show_filenames, True)
|
||||
|
||||
# Check for overlayed .bbclass files
|
||||
classes = defaultdict(list)
|
||||
for layerdir in self.bblayers:
|
||||
classdir = os.path.join(layerdir, 'classes')
|
||||
if os.path.exists(classdir):
|
||||
for classfile in os.listdir(classdir):
|
||||
if os.path.splitext(classfile)[1] == '.bbclass':
|
||||
classes[classfile].append(classdir)
|
||||
|
||||
# Locating classes and other files is a bit more complicated than recipes -
|
||||
# layer priority is not a factor; instead BitBake uses the first matching
|
||||
# file in BBPATH, which is manipulated directly by each layer's
|
||||
# conf/layer.conf in turn, thus the order of layers in bblayers.conf is a
|
||||
# factor - however, each layer.conf is free to either prepend or append to
|
||||
# BBPATH (or indeed do crazy stuff with it). Thus the order in BBPATH might
|
||||
# not be exactly the order present in bblayers.conf either.
|
||||
bbpath = str(self.bbhandler.config_data.getVar('BBPATH', True))
|
||||
overlayed_class_found = False
|
||||
for (classfile, classdirs) in classes.items():
|
||||
if len(classdirs) > 1:
|
||||
if not overlayed_class_found:
|
||||
logger.plain('=== Overlayed classes ===')
|
||||
overlayed_class_found = True
|
||||
|
||||
mainfile = bb.utils.which(bbpath, os.path.join('classes', classfile))
|
||||
if show_filenames:
|
||||
logger.plain('%s' % mainfile)
|
||||
else:
|
||||
# We effectively have to guess the layer here
|
||||
logger.plain('%s:' % classfile)
|
||||
mainlayername = '?'
|
||||
for layerdir in self.bblayers:
|
||||
classdir = os.path.join(layerdir, 'classes')
|
||||
if mainfile.startswith(classdir):
|
||||
mainlayername = self.get_layer_name(layerdir)
|
||||
logger.plain(' %s' % mainlayername)
|
||||
for classdir in classdirs:
|
||||
fullpath = os.path.join(classdir, classfile)
|
||||
if fullpath != mainfile:
|
||||
if show_filenames:
|
||||
print(' %s' % fullpath)
|
||||
else:
|
||||
print(' %s' % self.get_layer_name(os.path.dirname(classdir)))
|
||||
|
||||
if overlayed_class_found:
|
||||
items_listed = True;
|
||||
|
||||
if not items_listed:
|
||||
logger.plain('No overlayed files found.')
|
||||
|
||||
|
||||
def do_show_recipes(self, args):
|
||||
"""list available recipes, showing the layer they are provided by
|
||||
|
||||
usage: show-recipes [-f] [-m] [pnspec]
|
||||
|
||||
Lists the names of overlayed recipes and the available versions in each
|
||||
layer, with the preferred version first. Optionally you may specify
|
||||
pnspec to match a specified recipe name (supports wildcards). Note that
|
||||
skipped recipes will also be listed, with a " (skipped)" suffix.
|
||||
|
||||
Options:
|
||||
-f instead of the default formatting, list filenames of higher priority
|
||||
recipes with other available recipes indented underneath
|
||||
-m only list where multiple recipes (in the same layer or different
|
||||
layers) exist for the same recipe name
|
||||
"""
|
||||
self.init_bbhandler()
|
||||
|
||||
show_filenames = False
|
||||
show_multi_provider_only = False
|
||||
pnspec = None
|
||||
title = 'Available recipes:'
|
||||
for arg in args.split():
|
||||
if arg == '-f':
|
||||
show_filenames = True
|
||||
elif arg == '-m':
|
||||
show_multi_provider_only = True
|
||||
elif not arg.startswith('-'):
|
||||
pnspec = arg
|
||||
title = 'Available recipes matching %s:' % pnspec
|
||||
else:
|
||||
sys.stderr.write("show-recipes: invalid option %s\n" % arg)
|
||||
self.do_help('')
|
||||
return
|
||||
self.list_recipes(title, pnspec, False, False, show_filenames, show_multi_provider_only)
|
||||
|
||||
|
||||
def list_recipes(self, title, pnspec, show_overlayed_only, show_same_ver_only, show_filenames, show_multi_provider_only):
|
||||
pkg_pn = self.bbhandler.cooker.recipecache.pkg_pn
|
||||
(latest_versions, preferred_versions) = bb.providers.findProviders(self.bbhandler.config_data, self.bbhandler.cooker.recipecache, pkg_pn)
|
||||
allproviders = bb.providers.allProviders(self.bbhandler.cooker.recipecache)
|
||||
|
||||
# Ensure we list skipped recipes
|
||||
# We are largely guessing about PN, PV and the preferred version here,
|
||||
# but we have no choice since skipped recipes are not fully parsed
|
||||
skiplist = self.bbhandler.cooker.skiplist.keys()
|
||||
skiplist.sort( key=lambda fileitem: self.bbhandler.cooker.collection.calc_bbfile_priority(fileitem) )
|
||||
skiplist.reverse()
|
||||
for fn in skiplist:
|
||||
recipe_parts = os.path.splitext(os.path.basename(fn))[0].split('_')
|
||||
p = recipe_parts[0]
|
||||
if len(recipe_parts) > 1:
|
||||
ver = (None, recipe_parts[1], None)
|
||||
else:
|
||||
ver = (None, 'unknown', None)
|
||||
allproviders[p].append((ver, fn))
|
||||
if not p in pkg_pn:
|
||||
pkg_pn[p] = 'dummy'
|
||||
preferred_versions[p] = (ver, fn)
|
||||
|
||||
def print_item(f, pn, ver, layer, ispref):
|
||||
if f in skiplist:
|
||||
skipped = ' (skipped)'
|
||||
else:
|
||||
skipped = ''
|
||||
if show_filenames:
|
||||
if ispref:
|
||||
logger.plain("%s%s", f, skipped)
|
||||
else:
|
||||
logger.plain(" %s%s", f, skipped)
|
||||
else:
|
||||
if ispref:
|
||||
logger.plain("%s:", pn)
|
||||
logger.plain(" %s %s%s", layer.ljust(20), ver, skipped)
|
||||
|
||||
preffiles = []
|
||||
items_listed = False
|
||||
for p in sorted(pkg_pn):
|
||||
if pnspec:
|
||||
if not fnmatch.fnmatch(p, pnspec):
|
||||
continue
|
||||
|
||||
if len(allproviders[p]) > 1 or not show_multi_provider_only:
|
||||
pref = preferred_versions[p]
|
||||
preffile = bb.cache.Cache.virtualfn2realfn(pref[1])[0]
|
||||
if preffile not in preffiles:
|
||||
preflayer = self.get_file_layer(preffile)
|
||||
multilayer = False
|
||||
same_ver = True
|
||||
provs = []
|
||||
for prov in allproviders[p]:
|
||||
provfile = bb.cache.Cache.virtualfn2realfn(prov[1])[0]
|
||||
provlayer = self.get_file_layer(provfile)
|
||||
provs.append((provfile, provlayer, prov[0]))
|
||||
if provlayer != preflayer:
|
||||
multilayer = True
|
||||
if prov[0] != pref[0]:
|
||||
same_ver = False
|
||||
|
||||
if (multilayer or not show_overlayed_only) and (same_ver or not show_same_ver_only):
|
||||
if not items_listed:
|
||||
logger.plain('=== %s ===' % title)
|
||||
items_listed = True
|
||||
print_item(preffile, p, self.version_str(pref[0][0], pref[0][1]), preflayer, True)
|
||||
for (provfile, provlayer, provver) in provs:
|
||||
if provfile != preffile:
|
||||
print_item(provfile, p, self.version_str(provver[0], provver[1]), provlayer, False)
|
||||
# Ensure we don't show two entries for BBCLASSEXTENDed recipes
|
||||
preffiles.append(preffile)
|
||||
|
||||
return items_listed
|
||||
|
||||
|
||||
def do_flatten(self, args):
|
||||
"""flattens layer configuration into a separate output directory.
|
||||
|
||||
usage: flatten [layer1 layer2 [layer3]...] <outputdir>
|
||||
|
||||
Takes the specified layers (or all layers in the current layer
|
||||
configuration if none are specified) and builds a "flattened" directory
|
||||
containing the contents of all layers, with any overlayed recipes removed
|
||||
and bbappends appended to the corresponding recipes. Note that some manual
|
||||
cleanup may still be necessary afterwards, in particular:
|
||||
|
||||
* where non-recipe files (such as patches) are overwritten (the flatten
|
||||
command will show a warning for these)
|
||||
* where anything beyond the normal layer setup has been added to
|
||||
layer.conf (only the lowest priority number layer's layer.conf is used)
|
||||
* overridden/appended items from bbappends will need to be tidied up
|
||||
* when the flattened layers do not have the same directory structure (the
|
||||
flatten command should show a warning when this will cause a problem)
|
||||
|
||||
Warning: if you flatten several layers where another layer is intended to
|
||||
be used "inbetween" them (in layer priority order) such that recipes /
|
||||
bbappends in the layers interact, and then attempt to use the new output
|
||||
layer together with that other layer, you may no longer get the same
|
||||
build results (as the layer priority order has effectively changed).
|
||||
"""
|
||||
arglist = args.split()
|
||||
if len(arglist) < 1:
|
||||
logger.error('Please specify an output directory')
|
||||
self.do_help('flatten')
|
||||
return
|
||||
|
||||
if len(arglist) == 2:
|
||||
logger.error('If you specify layers to flatten you must specify at least two')
|
||||
self.do_help('flatten')
|
||||
return
|
||||
|
||||
outputdir = arglist[-1]
|
||||
if os.path.exists(outputdir) and os.listdir(outputdir):
|
||||
logger.error('Directory %s exists and is non-empty, please clear it out first' % outputdir)
|
||||
return
|
||||
|
||||
self.init_bbhandler()
|
||||
layers = self.bblayers
|
||||
if len(arglist) > 2:
|
||||
layernames = arglist[:-1]
|
||||
found_layernames = []
|
||||
found_layerdirs = []
|
||||
for layerdir in layers:
|
||||
layername = self.get_layer_name(layerdir)
|
||||
if layername in layernames:
|
||||
found_layerdirs.append(layerdir)
|
||||
found_layernames.append(layername)
|
||||
|
||||
for layername in layernames:
|
||||
if not layername in found_layernames:
|
||||
logger.error('Unable to find layer %s in current configuration, please run "%s show-layers" to list configured layers' % (layername, os.path.basename(sys.argv[0])))
|
||||
return
|
||||
layers = found_layerdirs
|
||||
else:
|
||||
layernames = []
|
||||
|
||||
# Ensure a specified path matches our list of layers
|
||||
def layer_path_match(path):
|
||||
for layerdir in layers:
|
||||
if path.startswith(os.path.join(layerdir, '')):
|
||||
return layerdir
|
||||
return None
|
||||
|
||||
appended_recipes = []
|
||||
for layer in layers:
|
||||
overlayed = []
|
||||
for f in self.bbhandler.cooker.collection.overlayed.iterkeys():
|
||||
for of in self.bbhandler.cooker.collection.overlayed[f]:
|
||||
if of.startswith(layer):
|
||||
overlayed.append(of)
|
||||
|
||||
logger.plain('Copying files from %s...' % layer )
|
||||
for root, dirs, files in os.walk(layer):
|
||||
for f1 in files:
|
||||
f1full = os.sep.join([root, f1])
|
||||
if f1full in overlayed:
|
||||
logger.plain(' Skipping overlayed file %s' % f1full )
|
||||
else:
|
||||
ext = os.path.splitext(f1)[1]
|
||||
if ext != '.bbappend':
|
||||
fdest = f1full[len(layer):]
|
||||
fdest = os.path.normpath(os.sep.join([outputdir,fdest]))
|
||||
bb.utils.mkdirhier(os.path.dirname(fdest))
|
||||
if os.path.exists(fdest):
|
||||
if f1 == 'layer.conf' and root.endswith('/conf'):
|
||||
logger.plain(' Skipping layer config file %s' % f1full )
|
||||
continue
|
||||
else:
|
||||
logger.warn('Overwriting file %s', fdest)
|
||||
bb.utils.copyfile(f1full, fdest)
|
||||
if ext == '.bb':
|
||||
if f1 in self.bbhandler.cooker.collection.appendlist:
|
||||
appends = self.bbhandler.cooker.collection.appendlist[f1]
|
||||
if appends:
|
||||
logger.plain(' Applying appends to %s' % fdest )
|
||||
for appendname in appends:
|
||||
if layer_path_match(appendname):
|
||||
self.apply_append(appendname, fdest)
|
||||
appended_recipes.append(f1)
|
||||
|
||||
# Take care of when some layers are excluded and yet we have included bbappends for those recipes
|
||||
for recipename in self.bbhandler.cooker.collection.appendlist.iterkeys():
|
||||
if recipename not in appended_recipes:
|
||||
appends = self.bbhandler.cooker.collection.appendlist[recipename]
|
||||
first_append = None
|
||||
for appendname in appends:
|
||||
layer = layer_path_match(appendname)
|
||||
if layer:
|
||||
if first_append:
|
||||
self.apply_append(appendname, first_append)
|
||||
else:
|
||||
fdest = appendname[len(layer):]
|
||||
fdest = os.path.normpath(os.sep.join([outputdir,fdest]))
|
||||
bb.utils.mkdirhier(os.path.dirname(fdest))
|
||||
bb.utils.copyfile(appendname, fdest)
|
||||
first_append = fdest
|
||||
|
||||
# Get the regex for the first layer in our list (which is where the conf/layer.conf file will
|
||||
# have come from)
|
||||
first_regex = None
|
||||
layerdir = layers[0]
|
||||
for layername, pattern, regex, _ in self.bbhandler.cooker.recipecache.bbfile_config_priorities:
|
||||
if regex.match(os.path.join(layerdir, 'test')):
|
||||
first_regex = regex
|
||||
break
|
||||
|
||||
if first_regex:
|
||||
# Find the BBFILES entries that match (which will have come from this conf/layer.conf file)
|
||||
bbfiles = str(self.bbhandler.config_data.getVar('BBFILES', True)).split()
|
||||
bbfiles_layer = []
|
||||
for item in bbfiles:
|
||||
if first_regex.match(item):
|
||||
newpath = os.path.join(outputdir, item[len(layerdir)+1:])
|
||||
bbfiles_layer.append(newpath)
|
||||
|
||||
if bbfiles_layer:
|
||||
# Check that all important layer files match BBFILES
|
||||
for root, dirs, files in os.walk(outputdir):
|
||||
for f1 in files:
|
||||
ext = os.path.splitext(f1)[1]
|
||||
if ext in ['.bb', '.bbappend']:
|
||||
f1full = os.sep.join([root, f1])
|
||||
entry_found = False
|
||||
for item in bbfiles_layer:
|
||||
if fnmatch.fnmatch(f1full, item):
|
||||
entry_found = True
|
||||
break
|
||||
if not entry_found:
|
||||
logger.warning("File %s does not match the flattened layer's BBFILES setting, you may need to edit conf/layer.conf or move the file elsewhere" % f1full)
|
||||
|
||||
def get_file_layer(self, filename):
|
||||
for layer, _, regex, _ in self.bbhandler.cooker.recipecache.bbfile_config_priorities:
|
||||
if regex.match(filename):
|
||||
for layerdir in self.bblayers:
|
||||
if regex.match(os.path.join(layerdir, 'test')) and re.match(layerdir, filename):
|
||||
return self.get_layer_name(layerdir)
|
||||
return "?"
|
||||
|
||||
def get_file_layerdir(self, filename):
|
||||
for layer, _, regex, _ in self.bbhandler.cooker.recipecache.bbfile_config_priorities:
|
||||
if regex.match(filename):
|
||||
for layerdir in self.bblayers:
|
||||
if regex.match(os.path.join(layerdir, 'test')) and re.match(layerdir, filename):
|
||||
return layerdir
|
||||
return "?"
|
||||
|
||||
def remove_layer_prefix(self, f):
|
||||
"""Remove the layer_dir prefix, e.g., f = /path/to/layer_dir/foo/blah, the
|
||||
return value will be: layer_dir/foo/blah"""
|
||||
f_layerdir = self.get_file_layerdir(f)
|
||||
prefix = os.path.join(os.path.dirname(f_layerdir), '')
|
||||
return f[len(prefix):] if f.startswith(prefix) else f
|
||||
|
||||
def get_layer_name(self, layerdir):
|
||||
return os.path.basename(layerdir.rstrip(os.sep))
|
||||
|
||||
def apply_append(self, appendname, recipename):
|
||||
appendfile = open(appendname, 'r')
|
||||
recipefile = open(recipename, 'a')
|
||||
recipefile.write('\n')
|
||||
recipefile.write('##### bbappended from %s #####\n' % self.get_file_layer(appendname))
|
||||
recipefile.writelines(appendfile.readlines())
|
||||
recipefile.close()
|
||||
appendfile.close()
|
||||
|
||||
def do_show_appends(self, args):
|
||||
"""list bbappend files and recipe files they apply to
|
||||
|
||||
usage: show-appends
|
||||
|
||||
Recipes are listed with the bbappends that apply to them as subitems.
|
||||
"""
|
||||
self.init_bbhandler()
|
||||
if not self.bbhandler.cooker.collection.appendlist:
|
||||
logger.plain('No append files found')
|
||||
return
|
||||
|
||||
logger.plain('=== Appended recipes ===')
|
||||
|
||||
pnlist = list(self.bbhandler.cooker_data.pkg_pn.keys())
|
||||
pnlist.sort()
|
||||
for pn in pnlist:
|
||||
self.show_appends_for_pn(pn)
|
||||
|
||||
self.show_appends_for_skipped()
|
||||
|
||||
def show_appends_for_pn(self, pn):
|
||||
filenames = self.bbhandler.cooker_data.pkg_pn[pn]
|
||||
|
||||
best = bb.providers.findBestProvider(pn,
|
||||
self.bbhandler.config_data,
|
||||
self.bbhandler.cooker_data,
|
||||
self.bbhandler.cooker_data.pkg_pn)
|
||||
best_filename = os.path.basename(best[3])
|
||||
|
||||
self.show_appends_output(filenames, best_filename)
|
||||
|
||||
def show_appends_for_skipped(self):
|
||||
filenames = [os.path.basename(f)
|
||||
for f in self.bbhandler.cooker.skiplist.iterkeys()]
|
||||
self.show_appends_output(filenames, None, " (skipped)")
|
||||
|
||||
def show_appends_output(self, filenames, best_filename, name_suffix = ''):
|
||||
appended, missing = self.get_appends_for_files(filenames)
|
||||
if appended:
|
||||
for basename, appends in appended:
|
||||
logger.plain('%s%s:', basename, name_suffix)
|
||||
for append in appends:
|
||||
logger.plain(' %s', append)
|
||||
|
||||
if best_filename:
|
||||
if best_filename in missing:
|
||||
logger.warn('%s: missing append for preferred version',
|
||||
best_filename)
|
||||
self.returncode |= 1
|
||||
|
||||
|
||||
def get_appends_for_files(self, filenames):
|
||||
appended, notappended = [], []
|
||||
for filename in filenames:
|
||||
_, cls = bb.cache.Cache.virtualfn2realfn(filename)
|
||||
if cls:
|
||||
continue
|
||||
|
||||
basename = os.path.basename(filename)
|
||||
appends = self.bbhandler.cooker.collection.appendlist.get(basename)
|
||||
if appends:
|
||||
appended.append((basename, list(appends)))
|
||||
else:
|
||||
notappended.append(basename)
|
||||
return appended, notappended
|
||||
|
||||
def do_show_cross_depends(self, args):
|
||||
"""figure out the dependency between recipes that crosses a layer boundary.
|
||||
|
||||
usage: show-cross-depends [-f]
|
||||
|
||||
Figure out the dependency between recipes that crosses a layer boundary.
|
||||
|
||||
Options:
|
||||
-f show full file path
|
||||
|
||||
NOTE:
|
||||
The .bbappend file can impact the dependency.
|
||||
"""
|
||||
self.init_bbhandler()
|
||||
|
||||
show_filenames = False
|
||||
for arg in args.split():
|
||||
if arg == '-f':
|
||||
show_filenames = True
|
||||
else:
|
||||
sys.stderr.write("show-cross-depends: invalid option %s\n" % arg)
|
||||
self.do_help('')
|
||||
return
|
||||
|
||||
pkg_fn = self.bbhandler.cooker_data.pkg_fn
|
||||
bbpath = str(self.bbhandler.config_data.getVar('BBPATH', True))
|
||||
self.require_re = re.compile(r"require\s+(.+)")
|
||||
self.include_re = re.compile(r"include\s+(.+)")
|
||||
self.inherit_re = re.compile(r"inherit\s+(.+)")
|
||||
|
||||
# The bb's DEPENDS and RDEPENDS
|
||||
for f in pkg_fn:
|
||||
f = bb.cache.Cache.virtualfn2realfn(f)[0]
|
||||
# Get the layername that the file is in
|
||||
layername = self.get_file_layer(f)
|
||||
|
||||
# The DEPENDS
|
||||
deps = self.bbhandler.cooker_data.deps[f]
|
||||
for pn in deps:
|
||||
if pn in self.bbhandler.cooker_data.pkg_pn:
|
||||
best = bb.providers.findBestProvider(pn,
|
||||
self.bbhandler.config_data,
|
||||
self.bbhandler.cooker_data,
|
||||
self.bbhandler.cooker_data.pkg_pn)
|
||||
self.check_cross_depends("DEPENDS", layername, f, best[3], show_filenames)
|
||||
|
||||
# The RDPENDS
|
||||
all_rdeps = self.bbhandler.cooker_data.rundeps[f].values()
|
||||
# Remove the duplicated or null one.
|
||||
sorted_rdeps = {}
|
||||
# The all_rdeps is the list in list, so we need two for loops
|
||||
for k1 in all_rdeps:
|
||||
for k2 in k1:
|
||||
sorted_rdeps[k2] = 1
|
||||
all_rdeps = sorted_rdeps.keys()
|
||||
for rdep in all_rdeps:
|
||||
all_p = bb.providers.getRuntimeProviders(self.bbhandler.cooker_data, rdep)
|
||||
if all_p:
|
||||
best = bb.providers.filterProvidersRunTime(all_p, rdep,
|
||||
self.bbhandler.config_data,
|
||||
self.bbhandler.cooker_data)[0][0]
|
||||
self.check_cross_depends("RDEPENDS", layername, f, best, show_filenames)
|
||||
|
||||
# The inherit class
|
||||
cls_re = re.compile('classes/')
|
||||
if f in self.bbhandler.cooker_data.inherits:
|
||||
inherits = self.bbhandler.cooker_data.inherits[f]
|
||||
for cls in inherits:
|
||||
# The inherits' format is [classes/cls, /path/to/classes/cls]
|
||||
# ignore the classes/cls.
|
||||
if not cls_re.match(cls):
|
||||
inherit_layername = self.get_file_layer(cls)
|
||||
if inherit_layername != layername:
|
||||
if not show_filenames:
|
||||
f_short = self.remove_layer_prefix(f)
|
||||
cls = self.remove_layer_prefix(cls)
|
||||
else:
|
||||
f_short = f
|
||||
logger.plain("%s inherits %s" % (f_short, cls))
|
||||
|
||||
# The 'require/include xxx' in the bb file
|
||||
pv_re = re.compile(r"\${PV}")
|
||||
fnfile = open(f, 'r')
|
||||
line = fnfile.readline()
|
||||
while line:
|
||||
m, keyword = self.match_require_include(line)
|
||||
# Found the 'require/include xxxx'
|
||||
if m:
|
||||
needed_file = m.group(1)
|
||||
# Replace the ${PV} with the real PV
|
||||
if pv_re.search(needed_file) and f in self.bbhandler.cooker_data.pkg_pepvpr:
|
||||
pv = self.bbhandler.cooker_data.pkg_pepvpr[f][1]
|
||||
needed_file = re.sub(r"\${PV}", pv, needed_file)
|
||||
self.print_cross_files(bbpath, keyword, layername, f, needed_file, show_filenames)
|
||||
line = fnfile.readline()
|
||||
fnfile.close()
|
||||
|
||||
# The "require/include xxx" in conf/machine/*.conf, .inc and .bbclass
|
||||
conf_re = re.compile(".*/conf/machine/[^\/]*\.conf$")
|
||||
inc_re = re.compile(".*\.inc$")
|
||||
# The "inherit xxx" in .bbclass
|
||||
bbclass_re = re.compile(".*\.bbclass$")
|
||||
for layerdir in self.bblayers:
|
||||
layername = self.get_layer_name(layerdir)
|
||||
for dirpath, dirnames, filenames in os.walk(layerdir):
|
||||
for name in filenames:
|
||||
f = os.path.join(dirpath, name)
|
||||
s = conf_re.match(f) or inc_re.match(f) or bbclass_re.match(f)
|
||||
if s:
|
||||
ffile = open(f, 'r')
|
||||
line = ffile.readline()
|
||||
while line:
|
||||
m, keyword = self.match_require_include(line)
|
||||
# Only bbclass has the "inherit xxx" here.
|
||||
bbclass=""
|
||||
if not m and f.endswith(".bbclass"):
|
||||
m, keyword = self.match_inherit(line)
|
||||
bbclass=".bbclass"
|
||||
# Find a 'require/include xxxx'
|
||||
if m:
|
||||
self.print_cross_files(bbpath, keyword, layername, f, m.group(1) + bbclass, show_filenames)
|
||||
line = ffile.readline()
|
||||
ffile.close()
|
||||
|
||||
def print_cross_files(self, bbpath, keyword, layername, f, needed_filename, show_filenames):
|
||||
"""Print the depends that crosses a layer boundary"""
|
||||
needed_file = bb.utils.which(bbpath, needed_filename)
|
||||
if needed_file:
|
||||
# Which layer is this file from
|
||||
needed_layername = self.get_file_layer(needed_file)
|
||||
if needed_layername != layername:
|
||||
if not show_filenames:
|
||||
f = self.remove_layer_prefix(f)
|
||||
needed_file = self.remove_layer_prefix(needed_file)
|
||||
logger.plain("%s %s %s" %(f, keyword, needed_file))
|
||||
def match_inherit(self, line):
|
||||
"""Match the inherit xxx line"""
|
||||
return (self.inherit_re.match(line), "inherits")
|
||||
|
||||
def match_require_include(self, line):
|
||||
"""Match the require/include xxx line"""
|
||||
m = self.require_re.match(line)
|
||||
keyword = "requires"
|
||||
if not m:
|
||||
m = self.include_re.match(line)
|
||||
keyword = "includes"
|
||||
return (m, keyword)
|
||||
|
||||
def check_cross_depends(self, keyword, layername, f, needed_file, show_filenames):
|
||||
"""Print the DEPENDS/RDEPENDS file that crosses a layer boundary"""
|
||||
best_realfn = bb.cache.Cache.virtualfn2realfn(needed_file)[0]
|
||||
needed_layername = self.get_file_layer(best_realfn)
|
||||
if needed_layername != layername:
|
||||
if not show_filenames:
|
||||
f = self.remove_layer_prefix(f)
|
||||
best_realfn = self.remove_layer_prefix(best_realfn)
|
||||
|
||||
logger.plain("%s %s %s" % (f, keyword, best_realfn))
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main(sys.argv[1:]) or 0)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
import sys,logging
|
||||
import optparse
|
||||
@@ -50,6 +50,6 @@ if __name__ == "__main__":
|
||||
except Exception:
|
||||
ret = 1
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
traceback.print_exc(5)
|
||||
sys.exit(ret)
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python
|
||||
#
|
||||
# Copyright (C) 2012 Richard Purdie
|
||||
#
|
||||
@@ -25,49 +25,25 @@ try:
|
||||
except RuntimeError as exc:
|
||||
sys.exit(str(exc))
|
||||
|
||||
tests = ["bb.tests.codeparser",
|
||||
"bb.tests.cow",
|
||||
"bb.tests.data",
|
||||
"bb.tests.event",
|
||||
"bb.tests.fetch",
|
||||
"bb.tests.parse",
|
||||
"bb.tests.utils"]
|
||||
def usage():
|
||||
print('usage: %s [testname1 [testname2]...]' % os.path.basename(sys.argv[0]))
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
if '--help' in sys.argv[1:]:
|
||||
usage()
|
||||
sys.exit(0)
|
||||
|
||||
tests = sys.argv[1:]
|
||||
else:
|
||||
tests = ["bb.tests.codeparser",
|
||||
"bb.tests.cow",
|
||||
"bb.tests.data",
|
||||
"bb.tests.fetch",
|
||||
"bb.tests.utils"]
|
||||
|
||||
for t in tests:
|
||||
t = '.'.join(t.split('.')[:3])
|
||||
__import__(t)
|
||||
|
||||
unittest.main(argv=["bitbake-selftest"] + tests)
|
||||
|
||||
# Set-up logging
|
||||
class StdoutStreamHandler(logging.StreamHandler):
|
||||
"""Special handler so that unittest is able to capture stdout"""
|
||||
def __init__(self):
|
||||
# Override __init__() because we don't want to set self.stream here
|
||||
logging.Handler.__init__(self)
|
||||
|
||||
@property
|
||||
def stream(self):
|
||||
# We want to dynamically write wherever sys.stdout is pointing to
|
||||
return sys.stdout
|
||||
|
||||
|
||||
handler = StdoutStreamHandler()
|
||||
bb.logger.addHandler(handler)
|
||||
bb.logger.setLevel(logging.DEBUG)
|
||||
|
||||
|
||||
ENV_HELP = """\
|
||||
Environment variables:
|
||||
BB_SKIP_NETTESTS set to 'yes' in order to skip tests using network
|
||||
connection
|
||||
BB_TMPDIR_NOCLEAN set to 'yes' to preserve test tmp directories
|
||||
"""
|
||||
|
||||
class main(unittest.main):
|
||||
def _print_help(self, *args, **kwargs):
|
||||
super(main, self)._print_help(*args, **kwargs)
|
||||
print(ENV_HELP)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(defaultTest=tests, buffer=True)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
@@ -10,48 +10,23 @@ import bb
|
||||
import select
|
||||
import errno
|
||||
import signal
|
||||
import pickle
|
||||
import traceback
|
||||
import queue
|
||||
from multiprocessing import Lock
|
||||
from threading import Thread
|
||||
|
||||
if sys.getfilesystemencoding() != "utf-8":
|
||||
sys.exit("Please use a locale setting which supports UTF-8 (such as LANG=en_US.UTF-8).\nPython can't change the filesystem locale after loading so we need a UTF-8 when Python starts or things won't work.")
|
||||
|
||||
# Users shouldn't be running this code directly
|
||||
if len(sys.argv) != 2 or not sys.argv[1].startswith("decafbad"):
|
||||
if len(sys.argv) != 2 or sys.argv[1] != "decafbad":
|
||||
print("bitbake-worker is meant for internal execution by bitbake itself, please don't use it standalone.")
|
||||
sys.exit(1)
|
||||
|
||||
profiling = False
|
||||
if sys.argv[1].startswith("decafbadbad"):
|
||||
profiling = True
|
||||
try:
|
||||
import cProfile as profile
|
||||
except:
|
||||
import profile
|
||||
|
||||
# Unbuffer stdout to avoid log truncation in the event
|
||||
# of an unorderly exit as well as to provide timely
|
||||
# updates to log files for use with tail
|
||||
try:
|
||||
if sys.stdout.name == '<stdout>':
|
||||
import fcntl
|
||||
fl = fcntl.fcntl(sys.stdout.fileno(), fcntl.F_GETFL)
|
||||
fl |= os.O_SYNC
|
||||
fcntl.fcntl(sys.stdout.fileno(), fcntl.F_SETFL, fl)
|
||||
#sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
|
||||
except:
|
||||
pass
|
||||
|
||||
logger = logging.getLogger("BitBake")
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
bb.msg.note(1, bb.msg.domain.Cache, "Importing cPickle failed. Falling back to a very slow implementation.")
|
||||
|
||||
|
||||
worker_pipe = sys.stdout.fileno()
|
||||
bb.utils.nonblockingfd(worker_pipe)
|
||||
# Need to guard against multiprocessing being used in child processes
|
||||
# and multiple processes trying to write to the parent at the same time
|
||||
worker_pipe_lock = None
|
||||
|
||||
handler = bb.event.LogHandler()
|
||||
logger.addHandler(handler)
|
||||
@@ -66,61 +41,36 @@ if 0:
|
||||
consolelog.setFormatter(conlogformat)
|
||||
logger.addHandler(consolelog)
|
||||
|
||||
worker_queue = queue.Queue()
|
||||
worker_queue = ""
|
||||
|
||||
def worker_fire(event, d):
|
||||
data = b"<event>" + pickle.dumps(event) + b"</event>"
|
||||
data = "<event>" + pickle.dumps(event) + "</event>"
|
||||
worker_fire_prepickled(data)
|
||||
|
||||
def worker_fire_prepickled(event):
|
||||
global worker_queue
|
||||
|
||||
worker_queue.put(event)
|
||||
worker_queue = worker_queue + event
|
||||
worker_flush()
|
||||
|
||||
#
|
||||
# We can end up with write contention with the cooker, it can be trying to send commands
|
||||
# and we can be trying to send event data back. Therefore use a separate thread for writing
|
||||
# back data to cooker.
|
||||
#
|
||||
worker_thread_exit = False
|
||||
def worker_flush():
|
||||
global worker_queue, worker_pipe
|
||||
|
||||
def worker_flush(worker_queue):
|
||||
worker_queue_int = b""
|
||||
global worker_pipe, worker_thread_exit
|
||||
if not worker_queue:
|
||||
return
|
||||
|
||||
while True:
|
||||
try:
|
||||
worker_queue_int = worker_queue_int + worker_queue.get(True, 1)
|
||||
except queue.Empty:
|
||||
pass
|
||||
while (worker_queue_int or not worker_queue.empty()):
|
||||
try:
|
||||
(_, ready, _) = select.select([], [worker_pipe], [], 1)
|
||||
if not worker_queue.empty():
|
||||
worker_queue_int = worker_queue_int + worker_queue.get()
|
||||
written = os.write(worker_pipe, worker_queue_int)
|
||||
worker_queue_int = worker_queue_int[written:]
|
||||
except (IOError, OSError) as e:
|
||||
if e.errno != errno.EAGAIN and e.errno != errno.EPIPE:
|
||||
raise
|
||||
if worker_thread_exit and worker_queue.empty() and not worker_queue_int:
|
||||
return
|
||||
|
||||
worker_thread = Thread(target=worker_flush, args=(worker_queue,))
|
||||
worker_thread.start()
|
||||
try:
|
||||
written = os.write(worker_pipe, worker_queue)
|
||||
worker_queue = worker_queue[written:]
|
||||
except (IOError, OSError) as e:
|
||||
if e.errno != errno.EAGAIN:
|
||||
raise
|
||||
|
||||
def worker_child_fire(event, d):
|
||||
global worker_pipe
|
||||
global worker_pipe_lock
|
||||
|
||||
data = b"<event>" + pickle.dumps(event) + b"</event>"
|
||||
try:
|
||||
worker_pipe_lock.acquire()
|
||||
worker_pipe.write(data)
|
||||
worker_pipe_lock.release()
|
||||
except IOError:
|
||||
sigterm_handler(None, None)
|
||||
raise
|
||||
data = "<event>" + pickle.dumps(event) + "</event>"
|
||||
worker_pipe.write(data)
|
||||
|
||||
bb.event.worker_fire = worker_fire
|
||||
|
||||
@@ -131,12 +81,7 @@ def workerlog_write(msg):
|
||||
lf.write(msg)
|
||||
lf.flush()
|
||||
|
||||
def sigterm_handler(signum, frame):
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
os.killpg(0, signal.SIGTERM)
|
||||
sys.exit()
|
||||
|
||||
def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, appends, taskdepdata, extraconfigdata, quieterrors=False, dry_run_exec=False):
|
||||
def fork_off_task(cfg, data, workerdata, fn, task, taskname, appends, taskdepdata, quieterrors=False):
|
||||
# We need to setup the environment BEFORE the fork, since
|
||||
# a fork() or exec*() activates PSEUDO...
|
||||
|
||||
@@ -152,10 +97,8 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, append
|
||||
except TypeError:
|
||||
umask = taskdep['umask'][taskname]
|
||||
|
||||
dry_run = cfg.dry_run or dry_run_exec
|
||||
|
||||
# We can't use the fakeroot environment in a dry run as it possibly hasn't been built
|
||||
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not dry_run:
|
||||
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot'] and not cfg.dry_run:
|
||||
envvars = (workerdata["fakerootenv"][fn] or "").split()
|
||||
for key, value in (var.split('=') for var in envvars):
|
||||
envbackup[key] = os.environ.get(key)
|
||||
@@ -183,32 +126,22 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, append
|
||||
pipeout = os.fdopen(pipeout, 'wb', 0)
|
||||
pid = os.fork()
|
||||
except OSError as e:
|
||||
logger.critical("fork failed: %d (%s)" % (e.errno, e.strerror))
|
||||
sys.exit(1)
|
||||
bb.msg.fatal("RunQueue", "fork failed: %d (%s)" % (e.errno, e.strerror))
|
||||
|
||||
if pid == 0:
|
||||
def child():
|
||||
global worker_pipe
|
||||
global worker_pipe_lock
|
||||
pipein.close()
|
||||
|
||||
signal.signal(signal.SIGTERM, sigterm_handler)
|
||||
# Let SIGHUP exit as SIGTERM
|
||||
signal.signal(signal.SIGHUP, sigterm_handler)
|
||||
bb.utils.signal_on_parent_exit("SIGTERM")
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
|
||||
# Save out the PID so that the event can include it the
|
||||
# events
|
||||
bb.event.worker_pid = os.getpid()
|
||||
bb.event.worker_fire = worker_child_fire
|
||||
worker_pipe = pipeout
|
||||
worker_pipe_lock = Lock()
|
||||
|
||||
# Make the child the process group leader and ensure no
|
||||
# child process will be controlled by the current terminal
|
||||
# This ensures signals sent to the controlling terminal like Ctrl+C
|
||||
# don't stop the child processes.
|
||||
os.setsid()
|
||||
# Make the child the process group leader
|
||||
os.setpgid(0, 0)
|
||||
# No stdin
|
||||
newsi = os.open(os.devnull, os.O_RDWR)
|
||||
os.dup2(newsi, sys.stdin.fileno())
|
||||
@@ -216,75 +149,47 @@ def fork_off_task(cfg, data, databuilder, workerdata, fn, task, taskname, append
|
||||
if umask:
|
||||
os.umask(umask)
|
||||
|
||||
data.setVar("BB_WORKERCONTEXT", "1")
|
||||
data.setVar("BB_TASKDEPDATA", taskdepdata)
|
||||
data.setVar("BUILDNAME", workerdata["buildname"])
|
||||
data.setVar("DATE", workerdata["date"])
|
||||
data.setVar("TIME", workerdata["time"])
|
||||
bb.parse.siggen.set_taskdata(workerdata["hashes"], workerdata["hash_deps"], workerdata["sigchecksums"])
|
||||
ret = 0
|
||||
try:
|
||||
bb_cache = bb.cache.NoCache(databuilder)
|
||||
(realfn, virtual, mc) = bb.cache.virtualfn2realfn(fn)
|
||||
the_data = databuilder.mcdata[mc]
|
||||
the_data.setVar("BB_WORKERCONTEXT", "1")
|
||||
the_data.setVar("BB_TASKDEPDATA", taskdepdata)
|
||||
if cfg.limited_deps:
|
||||
the_data.setVar("BB_LIMITEDDEPS", "1")
|
||||
the_data.setVar("BUILDNAME", workerdata["buildname"])
|
||||
the_data.setVar("DATE", workerdata["date"])
|
||||
the_data.setVar("TIME", workerdata["time"])
|
||||
for varname, value in extraconfigdata.items():
|
||||
the_data.setVar(varname, value)
|
||||
|
||||
bb.parse.siggen.set_taskdata(workerdata["sigdata"])
|
||||
ret = 0
|
||||
|
||||
the_data = bb_cache.loadDataFull(fn, appends)
|
||||
the_data = bb.cache.Cache.loadDataFull(fn, appends, data)
|
||||
the_data.setVar('BB_TASKHASH', workerdata["runq_hash"][task])
|
||||
|
||||
bb.utils.set_process_name("%s:%s" % (the_data.getVar("PN"), taskname.replace("do_", "")))
|
||||
for h in workerdata["hashes"]:
|
||||
the_data.setVar("BBHASH_%s" % h, workerdata["hashes"][h])
|
||||
for h in workerdata["hash_deps"]:
|
||||
the_data.setVar("BBHASHDEPS_%s" % h, workerdata["hash_deps"][h])
|
||||
|
||||
# exported_vars() returns a generator which *cannot* be passed to os.environ.update()
|
||||
# successfully. We also need to unset anything from the environment which shouldn't be there
|
||||
exports = bb.data.exported_vars(the_data)
|
||||
|
||||
bb.utils.empty_environment()
|
||||
for e, v in exports:
|
||||
os.environ[e] = v
|
||||
|
||||
for e in fakeenv:
|
||||
os.environ[e] = fakeenv[e]
|
||||
the_data.setVar(e, fakeenv[e])
|
||||
the_data.setVarFlag(e, 'export', "1")
|
||||
|
||||
task_exports = the_data.getVarFlag(taskname, 'exports')
|
||||
if task_exports:
|
||||
for e in task_exports.split():
|
||||
the_data.setVarFlag(e, 'export', '1')
|
||||
v = the_data.getVar(e)
|
||||
if v is not None:
|
||||
os.environ[e] = v
|
||||
|
||||
if quieterrors:
|
||||
the_data.setVarFlag(taskname, "quieterrors", "1")
|
||||
|
||||
except Exception:
|
||||
except Exception as exc:
|
||||
if not quieterrors:
|
||||
logger.critical(traceback.format_exc())
|
||||
logger.critical(str(exc))
|
||||
os._exit(1)
|
||||
try:
|
||||
if dry_run:
|
||||
return 0
|
||||
return bb.build.exec_task(fn, taskname, the_data, cfg.profile)
|
||||
if not cfg.dry_run:
|
||||
ret = bb.build.exec_task(fn, taskname, the_data, cfg.profile)
|
||||
os._exit(ret)
|
||||
except:
|
||||
os._exit(1)
|
||||
if not profiling:
|
||||
os._exit(child())
|
||||
else:
|
||||
profname = "profile-%s.log" % (fn.replace("/", "-") + "-" + taskname)
|
||||
prof = profile.Profile()
|
||||
try:
|
||||
ret = profile.Profile.runcall(prof, child)
|
||||
finally:
|
||||
prof.dump_stats(profname)
|
||||
bb.utils.process_profilelog(profname)
|
||||
os._exit(ret)
|
||||
else:
|
||||
for key, value in iter(envbackup.items()):
|
||||
for key, value in envbackup.iteritems():
|
||||
if value is None:
|
||||
del os.environ[key]
|
||||
else:
|
||||
@@ -301,22 +206,22 @@ class runQueueWorkerPipe():
|
||||
if pipeout:
|
||||
pipeout.close()
|
||||
bb.utils.nonblockingfd(self.input)
|
||||
self.queue = b""
|
||||
self.queue = ""
|
||||
|
||||
def read(self):
|
||||
start = len(self.queue)
|
||||
try:
|
||||
self.queue = self.queue + (self.input.read(102400) or b"")
|
||||
self.queue = self.queue + self.input.read(102400)
|
||||
except (OSError, IOError) as e:
|
||||
if e.errno != errno.EAGAIN:
|
||||
raise
|
||||
|
||||
end = len(self.queue)
|
||||
index = self.queue.find(b"</event>")
|
||||
index = self.queue.find("</event>")
|
||||
while index != -1:
|
||||
worker_fire_prepickled(self.queue[:index+8])
|
||||
self.queue = self.queue[index+8:]
|
||||
index = self.queue.find(b"</event>")
|
||||
index = self.queue.find("</event>")
|
||||
return (end > start)
|
||||
|
||||
def close(self):
|
||||
@@ -332,27 +237,17 @@ class BitbakeWorker(object):
|
||||
def __init__(self, din):
|
||||
self.input = din
|
||||
bb.utils.nonblockingfd(self.input)
|
||||
self.queue = b""
|
||||
self.queue = ""
|
||||
self.cookercfg = None
|
||||
self.databuilder = None
|
||||
self.data = None
|
||||
self.extraconfigdata = None
|
||||
self.build_pids = {}
|
||||
self.build_pipes = {}
|
||||
|
||||
signal.signal(signal.SIGTERM, self.sigterm_exception)
|
||||
# Let SIGHUP exit as SIGTERM
|
||||
signal.signal(signal.SIGHUP, self.sigterm_exception)
|
||||
if "beef" in sys.argv[1]:
|
||||
bb.utils.set_process_name("Worker (Fakeroot)")
|
||||
else:
|
||||
bb.utils.set_process_name("Worker")
|
||||
|
||||
def sigterm_exception(self, signum, stackframe):
|
||||
if signum == signal.SIGTERM:
|
||||
bb.warn("Worker received SIGTERM, shutting down...")
|
||||
elif signum == signal.SIGHUP:
|
||||
bb.warn("Worker received SIGHUP, shutting down...")
|
||||
bb.warn("Worker recieved SIGTERM, shutting down...")
|
||||
self.handle_finishnow(None)
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
os.kill(os.getpid(), signal.SIGTERM)
|
||||
@@ -360,39 +255,34 @@ class BitbakeWorker(object):
|
||||
def serve(self):
|
||||
while True:
|
||||
(ready, _, _) = select.select([self.input] + [i.input for i in self.build_pipes.values()], [] , [], 1)
|
||||
if self.input in ready:
|
||||
if self.input in ready or len(self.queue):
|
||||
start = len(self.queue)
|
||||
try:
|
||||
r = self.input.read()
|
||||
if len(r) == 0:
|
||||
# EOF on pipe, server must have terminated
|
||||
self.sigterm_exception(signal.SIGTERM, None)
|
||||
self.queue = self.queue + r
|
||||
self.queue = self.queue + self.input.read()
|
||||
except (OSError, IOError):
|
||||
pass
|
||||
if len(self.queue):
|
||||
self.handle_item(b"cookerconfig", self.handle_cookercfg)
|
||||
self.handle_item(b"extraconfigdata", self.handle_extraconfigdata)
|
||||
self.handle_item(b"workerdata", self.handle_workerdata)
|
||||
self.handle_item(b"runtask", self.handle_runtask)
|
||||
self.handle_item(b"finishnow", self.handle_finishnow)
|
||||
self.handle_item(b"ping", self.handle_ping)
|
||||
self.handle_item(b"quit", self.handle_quit)
|
||||
end = len(self.queue)
|
||||
self.handle_item("cookerconfig", self.handle_cookercfg)
|
||||
self.handle_item("workerdata", self.handle_workerdata)
|
||||
self.handle_item("runtask", self.handle_runtask)
|
||||
self.handle_item("finishnow", self.handle_finishnow)
|
||||
self.handle_item("ping", self.handle_ping)
|
||||
self.handle_item("quit", self.handle_quit)
|
||||
|
||||
for pipe in self.build_pipes:
|
||||
if self.build_pipes[pipe].input in ready:
|
||||
self.build_pipes[pipe].read()
|
||||
self.build_pipes[pipe].read()
|
||||
if len(self.build_pids):
|
||||
while self.process_waitpid():
|
||||
continue
|
||||
self.process_waitpid()
|
||||
worker_flush()
|
||||
|
||||
|
||||
def handle_item(self, item, func):
|
||||
if self.queue.startswith(b"<" + item + b">"):
|
||||
index = self.queue.find(b"</" + item + b">")
|
||||
if self.queue.startswith("<" + item + ">"):
|
||||
index = self.queue.find("</" + item + ">")
|
||||
while index != -1:
|
||||
func(self.queue[(len(item) + 2):index])
|
||||
self.queue = self.queue[(index + len(item) + 3):]
|
||||
index = self.queue.find(b"</" + item + b">")
|
||||
index = self.queue.find("</" + item + ">")
|
||||
|
||||
def handle_cookercfg(self, data):
|
||||
self.cookercfg = pickle.loads(data)
|
||||
@@ -400,22 +290,18 @@ class BitbakeWorker(object):
|
||||
self.databuilder.parseBaseConfiguration()
|
||||
self.data = self.databuilder.data
|
||||
|
||||
def handle_extraconfigdata(self, data):
|
||||
self.extraconfigdata = pickle.loads(data)
|
||||
|
||||
def handle_workerdata(self, data):
|
||||
self.workerdata = pickle.loads(data)
|
||||
bb.msg.loggerDefaultDebugLevel = self.workerdata["logdefaultdebug"]
|
||||
bb.msg.loggerDefaultVerbose = self.workerdata["logdefaultverbose"]
|
||||
bb.msg.loggerVerboseLogs = self.workerdata["logdefaultverboselogs"]
|
||||
bb.msg.loggerDefaultDomains = self.workerdata["logdefaultdomain"]
|
||||
for mc in self.databuilder.mcdata:
|
||||
self.databuilder.mcdata[mc].setVar("PRSERV_HOST", self.workerdata["prhost"])
|
||||
self.data.setVar("PRSERV_HOST", self.workerdata["prhost"])
|
||||
|
||||
def handle_ping(self, _):
|
||||
workerlog_write("Handling ping\n")
|
||||
|
||||
logger.warning("Pong from bitbake-worker!")
|
||||
logger.warn("Pong from bitbake-worker!")
|
||||
|
||||
def handle_quit(self, data):
|
||||
workerlog_write("Handling quit\n")
|
||||
@@ -425,10 +311,10 @@ class BitbakeWorker(object):
|
||||
sys.exit(0)
|
||||
|
||||
def handle_runtask(self, data):
|
||||
fn, task, taskname, quieterrors, appends, taskdepdata, dry_run_exec = pickle.loads(data)
|
||||
fn, task, taskname, quieterrors, appends, taskdepdata = pickle.loads(data)
|
||||
workerlog_write("Handling runtask %s %s %s\n" % (task, fn, taskname))
|
||||
|
||||
pid, pipein, pipeout = fork_off_task(self.cookercfg, self.data, self.databuilder, self.workerdata, fn, task, taskname, appends, taskdepdata, self.extraconfigdata, quieterrors, dry_run_exec)
|
||||
pid, pipein, pipeout = fork_off_task(self.cookercfg, self.data, self.workerdata, fn, task, taskname, appends, taskdepdata, quieterrors)
|
||||
|
||||
self.build_pids[pid] = task
|
||||
self.build_pipes[pid] = runQueueWorkerPipe(pipein, pipeout)
|
||||
@@ -441,9 +327,9 @@ class BitbakeWorker(object):
|
||||
try:
|
||||
pid, status = os.waitpid(-1, os.WNOHANG)
|
||||
if pid == 0 or os.WIFSTOPPED(status):
|
||||
return False
|
||||
return None
|
||||
except OSError:
|
||||
return False
|
||||
return None
|
||||
|
||||
workerlog_write("Exit code of %s for pid %s\n" % (status, pid))
|
||||
|
||||
@@ -460,14 +346,12 @@ class BitbakeWorker(object):
|
||||
self.build_pipes[pid].close()
|
||||
del self.build_pipes[pid]
|
||||
|
||||
worker_fire_prepickled(b"<exitcode>" + pickle.dumps((task, status)) + b"</exitcode>")
|
||||
|
||||
return True
|
||||
worker_fire_prepickled("<exitcode>" + pickle.dumps((task, status)) + "</exitcode>")
|
||||
|
||||
def handle_finishnow(self, _):
|
||||
if self.build_pids:
|
||||
logger.info("Sending SIGTERM to remaining %s tasks", len(self.build_pids))
|
||||
for k, v in iter(self.build_pids.items()):
|
||||
for k, v in self.build_pids.iteritems():
|
||||
try:
|
||||
os.kill(-k, signal.SIGTERM)
|
||||
os.waitpid(-1, 0)
|
||||
@@ -477,25 +361,15 @@ class BitbakeWorker(object):
|
||||
self.build_pipes[pipe].read()
|
||||
|
||||
try:
|
||||
worker = BitbakeWorker(os.fdopen(sys.stdin.fileno(), 'rb'))
|
||||
if not profiling:
|
||||
worker.serve()
|
||||
else:
|
||||
profname = "profile-worker.log"
|
||||
prof = profile.Profile()
|
||||
try:
|
||||
profile.Profile.runcall(prof, worker.serve)
|
||||
finally:
|
||||
prof.dump_stats(profname)
|
||||
bb.utils.process_profilelog(profname)
|
||||
worker = BitbakeWorker(sys.stdin)
|
||||
worker.serve()
|
||||
except BaseException as e:
|
||||
if not normalexit:
|
||||
import traceback
|
||||
sys.stderr.write(traceback.format_exc())
|
||||
sys.stderr.write(str(e))
|
||||
|
||||
worker_thread_exit = True
|
||||
worker_thread.join()
|
||||
|
||||
while len(worker_queue):
|
||||
worker_flush()
|
||||
workerlog_write("exitting")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
@@ -462,7 +462,7 @@ def main():
|
||||
state_group = 2
|
||||
|
||||
for key in bb.data.keys(documentation):
|
||||
data = documentation.getVarFlag(key, "doc", False)
|
||||
data = documentation.getVarFlag(key, "doc")
|
||||
if not data:
|
||||
continue
|
||||
|
||||
|
||||
@@ -1,165 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""git-make-shallow: make the current git repository shallow
|
||||
|
||||
Remove the history of the specified revisions, then optionally filter the
|
||||
available refs to those specified.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import collections
|
||||
import errno
|
||||
import itertools
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
version = 1.0
|
||||
|
||||
|
||||
def main():
|
||||
if sys.version_info < (3, 4, 0):
|
||||
sys.exit('Python 3.4 or greater is required')
|
||||
|
||||
git_dir = check_output(['git', 'rev-parse', '--git-dir']).rstrip()
|
||||
shallow_file = os.path.join(git_dir, 'shallow')
|
||||
if os.path.exists(shallow_file):
|
||||
try:
|
||||
check_output(['git', 'fetch', '--unshallow'])
|
||||
except subprocess.CalledProcessError:
|
||||
try:
|
||||
os.unlink(shallow_file)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
args = process_args()
|
||||
revs = check_output(['git', 'rev-list'] + args.revisions).splitlines()
|
||||
|
||||
make_shallow(shallow_file, args.revisions, args.refs)
|
||||
|
||||
ref_revs = check_output(['git', 'rev-list'] + args.refs).splitlines()
|
||||
remaining_history = set(revs) & set(ref_revs)
|
||||
for rev in remaining_history:
|
||||
if check_output(['git', 'rev-parse', '{}^@'.format(rev)]):
|
||||
sys.exit('Error: %s was not made shallow' % rev)
|
||||
|
||||
filter_refs(args.refs)
|
||||
|
||||
if args.shrink:
|
||||
shrink_repo(git_dir)
|
||||
subprocess.check_call(['git', 'fsck', '--unreachable'])
|
||||
|
||||
|
||||
def process_args():
|
||||
# TODO: add argument to automatically keep local-only refs, since they
|
||||
# can't be easily restored with a git fetch.
|
||||
parser = argparse.ArgumentParser(description='Remove the history of the specified revisions, then optionally filter the available refs to those specified.')
|
||||
parser.add_argument('--ref', '-r', metavar='REF', action='append', dest='refs', help='remove all but the specified refs (cumulative)')
|
||||
parser.add_argument('--shrink', '-s', action='store_true', help='shrink the git repository by repacking and pruning')
|
||||
parser.add_argument('revisions', metavar='REVISION', nargs='+', help='a git revision/commit')
|
||||
if len(sys.argv) < 2:
|
||||
parser.print_help()
|
||||
sys.exit(2)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.refs:
|
||||
args.refs = check_output(['git', 'rev-parse', '--symbolic-full-name'] + args.refs).splitlines()
|
||||
else:
|
||||
args.refs = get_all_refs(lambda r, t, tt: t == 'commit' or tt == 'commit')
|
||||
|
||||
args.refs = list(filter(lambda r: not r.endswith('/HEAD'), args.refs))
|
||||
args.revisions = check_output(['git', 'rev-parse'] + ['%s^{}' % i for i in args.revisions]).splitlines()
|
||||
return args
|
||||
|
||||
|
||||
def check_output(cmd, input=None):
|
||||
return subprocess.check_output(cmd, universal_newlines=True, input=input)
|
||||
|
||||
|
||||
def make_shallow(shallow_file, revisions, refs):
|
||||
"""Remove the history of the specified revisions."""
|
||||
for rev in follow_history_intersections(revisions, refs):
|
||||
print("Processing %s" % rev)
|
||||
with open(shallow_file, 'a') as f:
|
||||
f.write(rev + '\n')
|
||||
|
||||
|
||||
def get_all_refs(ref_filter=None):
|
||||
"""Return all the existing refs in this repository, optionally filtering the refs."""
|
||||
ref_output = check_output(['git', 'for-each-ref', '--format=%(refname)\t%(objecttype)\t%(*objecttype)'])
|
||||
ref_split = [tuple(iter_extend(l.rsplit('\t'), 3)) for l in ref_output.splitlines()]
|
||||
if ref_filter:
|
||||
ref_split = (e for e in ref_split if ref_filter(*e))
|
||||
refs = [r[0] for r in ref_split]
|
||||
return refs
|
||||
|
||||
|
||||
def iter_extend(iterable, length, obj=None):
|
||||
"""Ensure that iterable is the specified length by extending with obj."""
|
||||
return itertools.islice(itertools.chain(iterable, itertools.repeat(obj)), length)
|
||||
|
||||
|
||||
def filter_refs(refs):
|
||||
"""Remove all but the specified refs from the git repository."""
|
||||
all_refs = get_all_refs()
|
||||
to_remove = set(all_refs) - set(refs)
|
||||
if to_remove:
|
||||
check_output(['xargs', '-0', '-n', '1', 'git', 'update-ref', '-d', '--no-deref'],
|
||||
input=''.join(l + '\0' for l in to_remove))
|
||||
|
||||
|
||||
def follow_history_intersections(revisions, refs):
|
||||
"""Determine all the points where the history of the specified revisions intersects the specified refs."""
|
||||
queue = collections.deque(revisions)
|
||||
seen = set()
|
||||
|
||||
for rev in iter_except(queue.popleft, IndexError):
|
||||
if rev in seen:
|
||||
continue
|
||||
|
||||
parents = check_output(['git', 'rev-parse', '%s^@' % rev]).splitlines()
|
||||
|
||||
yield rev
|
||||
seen.add(rev)
|
||||
|
||||
if not parents:
|
||||
continue
|
||||
|
||||
check_refs = check_output(['git', 'merge-base', '--independent'] + sorted(refs)).splitlines()
|
||||
for parent in parents:
|
||||
for ref in check_refs:
|
||||
print("Checking %s vs %s" % (parent, ref))
|
||||
try:
|
||||
merge_base = check_output(['git', 'merge-base', parent, ref]).rstrip()
|
||||
except subprocess.CalledProcessError:
|
||||
continue
|
||||
else:
|
||||
queue.append(merge_base)
|
||||
|
||||
|
||||
def iter_except(func, exception, start=None):
|
||||
"""Yield a function repeatedly until it raises an exception."""
|
||||
try:
|
||||
if start is not None:
|
||||
yield start()
|
||||
while True:
|
||||
yield func()
|
||||
except exception:
|
||||
pass
|
||||
|
||||
|
||||
def shrink_repo(git_dir):
|
||||
"""Shrink the newly shallow repository, removing the unreachable objects."""
|
||||
subprocess.check_call(['git', 'reflog', 'expire', '--expire-unreachable=now', '--all'])
|
||||
subprocess.check_call(['git', 'repack', '-ad'])
|
||||
try:
|
||||
os.unlink(os.path.join(git_dir, 'objects', 'info', 'alternates'))
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
subprocess.check_call(['git', 'prune', '--expire', 'now'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
122
bitbake/bin/image-writer
Executable file
122
bitbake/bin/image-writer
Executable file
@@ -0,0 +1,122 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) 2012 Wind River Systems, Inc.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
import os
|
||||
import sys
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname( \
|
||||
os.path.abspath(__file__))), 'lib'))
|
||||
try:
|
||||
import bb
|
||||
except RuntimeError as exc:
|
||||
sys.exit(str(exc))
|
||||
|
||||
import gtk
|
||||
import optparse
|
||||
import pygtk
|
||||
|
||||
from bb.ui.crumbs.hobwidget import HobAltButton, HobButton
|
||||
from bb.ui.crumbs.hig.crumbsmessagedialog import CrumbsMessageDialog
|
||||
from bb.ui.crumbs.hig.deployimagedialog import DeployImageDialog
|
||||
from bb.ui.crumbs.hig.imageselectiondialog import ImageSelectionDialog
|
||||
|
||||
# I put all the fs bitbake supported here. Need more test.
|
||||
DEPLOYABLE_IMAGE_TYPES = ["jffs2", "cramfs", "ext2", "ext3", "btrfs", "squashfs", "ubi", "vmdk"]
|
||||
Title = "USB Image Writer"
|
||||
|
||||
class DeployWindow(gtk.Window):
|
||||
def __init__(self, image_path=''):
|
||||
super(DeployWindow, self).__init__()
|
||||
|
||||
if len(image_path) > 0:
|
||||
valid = True
|
||||
if not os.path.exists(image_path):
|
||||
valid = False
|
||||
lbl = "<b>Invalid image file path: %s.</b>\nPress <b>Select Image</b> to select an image." % image_path
|
||||
else:
|
||||
image_path = os.path.abspath(image_path)
|
||||
extend_name = os.path.splitext(image_path)[1][1:]
|
||||
if extend_name not in DEPLOYABLE_IMAGE_TYPES:
|
||||
valid = False
|
||||
lbl = "<b>Undeployable imge type: %s</b>\nPress <b>Select Image</b> to select an image." % extend_name
|
||||
|
||||
if not valid:
|
||||
image_path = ''
|
||||
crumbs_dialog = CrumbsMessageDialog(self, lbl, gtk.STOCK_DIALOG_INFO)
|
||||
button = crumbs_dialog.add_button("Close", gtk.RESPONSE_OK)
|
||||
HobButton.style_button(button)
|
||||
crumbs_dialog.run()
|
||||
crumbs_dialog.destroy()
|
||||
|
||||
self.deploy_dialog = DeployImageDialog(Title, image_path, self,
|
||||
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT
|
||||
| gtk.DIALOG_NO_SEPARATOR, None, standalone=True)
|
||||
close_button = self.deploy_dialog.add_button("Close", gtk.RESPONSE_NO)
|
||||
HobAltButton.style_button(close_button)
|
||||
close_button.connect('clicked', gtk.main_quit)
|
||||
|
||||
write_button = self.deploy_dialog.add_button("Write USB image", gtk.RESPONSE_YES)
|
||||
HobAltButton.style_button(write_button)
|
||||
|
||||
self.deploy_dialog.connect('select_image_clicked', self.select_image_clicked_cb)
|
||||
self.deploy_dialog.connect('destroy', gtk.main_quit)
|
||||
response = self.deploy_dialog.show()
|
||||
|
||||
def select_image_clicked_cb(self, dialog):
|
||||
cwd = os.getcwd()
|
||||
dialog = ImageSelectionDialog(cwd, DEPLOYABLE_IMAGE_TYPES, Title, self, gtk.FILE_CHOOSER_ACTION_SAVE )
|
||||
button = dialog.add_button("Cancel", gtk.RESPONSE_NO)
|
||||
HobAltButton.style_button(button)
|
||||
button = dialog.add_button("Open", gtk.RESPONSE_YES)
|
||||
HobAltButton.style_button(button)
|
||||
response = dialog.run()
|
||||
|
||||
if response == gtk.RESPONSE_YES:
|
||||
if not dialog.image_names:
|
||||
lbl = "<b>No selections made</b>\nClicked the radio button to select a image."
|
||||
crumbs_dialog = CrumbsMessageDialog(self, lbl, gtk.STOCK_DIALOG_INFO)
|
||||
button = crumbs_dialog.add_button("Close", gtk.RESPONSE_OK)
|
||||
HobButton.style_button(button)
|
||||
crumbs_dialog.run()
|
||||
crumbs_dialog.destroy()
|
||||
dialog.destroy()
|
||||
return
|
||||
|
||||
# get the full path of image
|
||||
image_path = os.path.join(dialog.image_folder, dialog.image_names[0])
|
||||
self.deploy_dialog.set_image_text_buffer(image_path)
|
||||
self.deploy_dialog.set_image_path(image_path)
|
||||
|
||||
dialog.destroy()
|
||||
|
||||
def main():
|
||||
parser = optparse.OptionParser(
|
||||
usage = """%prog [-h] [image_file]
|
||||
|
||||
%prog writes bootable images to USB devices. You can
|
||||
provide the image file on the command line or select it using the GUI.""")
|
||||
|
||||
options, args = parser.parse_args(sys.argv)
|
||||
image_file = args[1] if len(args) > 1 else ''
|
||||
dw = DeployWindow(image_file)
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
main()
|
||||
gtk.main()
|
||||
except Exception:
|
||||
import traceback
|
||||
traceback.print_exc(3)
|
||||
@@ -1,8 +1,5 @@
|
||||
#!/bin/echo ERROR: This script needs to be sourced. Please run as .
|
||||
|
||||
# toaster - shell script to start Toaster
|
||||
|
||||
# Copyright (C) 2013-2015 Intel Corp.
|
||||
#!/bin/bash
|
||||
# (c) 2013 Intel Corp.
|
||||
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
@@ -15,300 +12,203 @@
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see http://www.gnu.org/licenses/.
|
||||
# along with this program; if not, write to the Free Software
|
||||
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
HELP="
|
||||
Usage: source toaster start|stop [webport=<address:port>] [noweb] [nobuild]
|
||||
Optional arguments:
|
||||
[nobuild] Setup the environment for capturing builds with toaster but disable managed builds
|
||||
[noweb] Setup the environment for capturing builds with toaster but don't start the web server
|
||||
[webport] Set the development server (default: localhost:8000)
|
||||
"
|
||||
|
||||
custom_extention()
|
||||
# This script enables toaster event logging and
|
||||
# starts bitbake resident server
|
||||
# use as: source toaster [start|stop]
|
||||
|
||||
# Helper function to kill a background toaster development server
|
||||
|
||||
function webserverKillAll()
|
||||
{
|
||||
custom_extension=$BBBASEDIR/lib/toaster/orm/fixtures/custom_toaster_append.sh
|
||||
if [ -f $custom_extension ] ; then
|
||||
$custom_extension $*
|
||||
fi
|
||||
local pidfile
|
||||
for pidfile in ${BUILDDIR}/.toastermain.pid; do
|
||||
if [ -f ${pidfile} ]; then
|
||||
while kill -0 $(< ${pidfile}) 2>/dev/null; do
|
||||
kill -SIGTERM -$(< ${pidfile}) 2>/dev/null
|
||||
sleep 1;
|
||||
done;
|
||||
rm ${pidfile}
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
databaseCheck()
|
||||
function webserverStartAll()
|
||||
{
|
||||
retval=0
|
||||
# you can always add a superuser later via
|
||||
# ../bitbake/lib/toaster/manage.py createsuperuser --username=<ME>
|
||||
$MANAGE migrate --noinput || retval=1
|
||||
|
||||
if [ $retval -eq 1 ]; then
|
||||
echo "Failed migrations, aborting system start" 1>&2
|
||||
return $retval
|
||||
fi
|
||||
# Make sure that checksettings can pick up any value for TEMPLATECONF
|
||||
export TEMPLATECONF
|
||||
$MANAGE checksettings --traceback || retval=1
|
||||
|
||||
if [ $retval -eq 1 ]; then
|
||||
printf "\nError while checking settings; aborting\n"
|
||||
return $retval
|
||||
fi
|
||||
|
||||
return $retval
|
||||
}
|
||||
|
||||
webserverKillAll()
|
||||
{
|
||||
local pidfile
|
||||
if [ -f ${BUILDDIR}/.toastermain.pid ] ; then
|
||||
custom_extention web_stop_postpend
|
||||
else
|
||||
custom_extention noweb_stop_postpend
|
||||
fi
|
||||
for pidfile in ${BUILDDIR}/.toastermain.pid ${BUILDDIR}/.runbuilds.pid; do
|
||||
if [ -f ${pidfile} ]; then
|
||||
pid=`cat ${pidfile}`
|
||||
while kill -0 $pid 2>/dev/null; do
|
||||
kill -SIGTERM -$pid 2>/dev/null
|
||||
sleep 1
|
||||
done
|
||||
rm ${pidfile}
|
||||
retval=0
|
||||
python $BBBASEDIR/lib/toaster/manage.py syncdb || retval=1
|
||||
python $BBBASEDIR/lib/toaster/manage.py migrate orm || retval=2
|
||||
if [ $retval -eq 1 ]; then
|
||||
echo "Failed db sync, stopping system start" 1>&2
|
||||
elif [ $retval -eq 2 ]; then
|
||||
echo -e "\nError on migration, trying to recover... \n"
|
||||
python $BBBASEDIR/lib/toaster/manage.py migrate orm 0001_initial --fake
|
||||
retval=0
|
||||
python $BBBASEDIR/lib/toaster/manage.py migrate orm || retval=1
|
||||
fi
|
||||
done
|
||||
if [ $retval -eq 0 ]; then
|
||||
python $BBBASEDIR/lib/toaster/manage.py runserver 0.0.0.0:8000 </dev/null >${BUILDDIR}/toaster_web.log 2>&1 & echo $! >${BUILDDIR}/.toastermain.pid
|
||||
sleep 1
|
||||
if ! cat "${BUILDDIR}/.toastermain.pid" | xargs -I{} kill -0 {} ; then
|
||||
retval=1
|
||||
rm "${BUILDDIR}/.toastermain.pid"
|
||||
fi
|
||||
fi
|
||||
return $retval
|
||||
}
|
||||
|
||||
webserverStartAll()
|
||||
# Helper functions to add a special configuration file
|
||||
|
||||
function addtoConfiguration()
|
||||
{
|
||||
# do not start if toastermain points to a valid process
|
||||
if ! cat "${BUILDDIR}/.toastermain.pid" 2>/dev/null | xargs -I{} kill -0 {} ; then
|
||||
retval=1
|
||||
rm "${BUILDDIR}/.toastermain.pid"
|
||||
fi
|
||||
|
||||
retval=0
|
||||
|
||||
# check the database
|
||||
databaseCheck || return 1
|
||||
|
||||
echo "Starting webserver..."
|
||||
|
||||
$MANAGE runserver "$ADDR_PORT" \
|
||||
</dev/null >>${BUILDDIR}/toaster_web.log 2>&1 \
|
||||
& echo $! >${BUILDDIR}/.toastermain.pid
|
||||
|
||||
sleep 1
|
||||
|
||||
if ! cat "${BUILDDIR}/.toastermain.pid" | xargs -I{} kill -0 {} ; then
|
||||
retval=1
|
||||
rm "${BUILDDIR}/.toastermain.pid"
|
||||
else
|
||||
echo "Toaster development webserver started at http://$ADDR_PORT"
|
||||
echo -e "\nYou can now run 'bitbake <target>' on the command line and monitor your build in Toaster.\nYou can also use a Toaster project to configure and run a build.\n"
|
||||
custom_extention web_start_postpend $ADDR_PORT
|
||||
fi
|
||||
|
||||
return $retval
|
||||
echo "#Created by toaster start script" > ${BUILDDIR}/conf/$2
|
||||
echo $1 >> ${BUILDDIR}/conf/$2
|
||||
}
|
||||
|
||||
INSTOPSYSTEM=0
|
||||
|
||||
# define the stop command
|
||||
stop_system()
|
||||
function stop_system()
|
||||
{
|
||||
# prevent reentry
|
||||
if [ $INSTOPSYSTEM -eq 1 ]; then return; fi
|
||||
if [ $INSTOPSYSTEM == 1 ]; then return; fi
|
||||
INSTOPSYSTEM=1
|
||||
if [ -f ${BUILDDIR}/.toasterui.pid ]; then
|
||||
kill $(< ${BUILDDIR}/.toasterui.pid ) 2>/dev/null
|
||||
rm ${BUILDDIR}/.toasterui.pid
|
||||
fi
|
||||
BBSERVER=localhost:8200 bitbake -m
|
||||
unset BBSERVER
|
||||
webserverKillAll
|
||||
# unset exported variables
|
||||
unset TOASTER_DIR
|
||||
unset BITBAKE_UI
|
||||
unset BBBASEDIR
|
||||
# force stop any misbehaving bitbake server
|
||||
lsof bitbake.lock | awk '{print $2}' | grep "[0-9]\+" | xargs -n1 -r kill
|
||||
trap - SIGHUP
|
||||
#trap - SIGCHLD
|
||||
INSTOPSYSTEM=0
|
||||
}
|
||||
|
||||
verify_prereq() {
|
||||
# Verify Django version
|
||||
reqfile=$(python3 -c "import os; print(os.path.realpath('$BBBASEDIR/toaster-requirements.txt'))")
|
||||
exp='s/Django\([><=]\+\)\([^,]\+\),\([><=]\+\)\(.\+\)/'
|
||||
# expand version parts to 2 digits to support 1.10.x > 1.8
|
||||
# (note:helper functions hard to insert in-line)
|
||||
exp=$exp'import sys,django;'
|
||||
exp=$exp'version=["%02d" % int(n) for n in django.get_version().split(".")];'
|
||||
exp=$exp'vmin=["%02d" % int(n) for n in "\2".split(".")];'
|
||||
exp=$exp'vmax=["%02d" % int(n) for n in "\4".split(".")];'
|
||||
exp=$exp'sys.exit(not (version \1 vmin and version \3 vmax))'
|
||||
exp=$exp'/p'
|
||||
if ! sed -n "$exp" $reqfile | python3 - ; then
|
||||
req=`grep ^Django $reqfile`
|
||||
echo "This program needs $req"
|
||||
echo "Please install with pip3 install -r $reqfile"
|
||||
return 2
|
||||
fi
|
||||
|
||||
return 0
|
||||
function check_pidbyfile() {
|
||||
[ -e $1 ] && kill -0 $(< $1) 2>/dev/null
|
||||
}
|
||||
|
||||
# read command line parameters
|
||||
if [ -n "$BASH_SOURCE" ] ; then
|
||||
TOASTER=${BASH_SOURCE}
|
||||
elif [ -n "$ZSH_NAME" ] ; then
|
||||
TOASTER=${(%):-%x}
|
||||
else
|
||||
TOASTER=$0
|
||||
fi
|
||||
|
||||
export BBBASEDIR=`dirname $TOASTER`/..
|
||||
MANAGE="python3 $BBBASEDIR/lib/toaster/manage.py"
|
||||
OE_ROOT=`dirname $TOASTER`/../..
|
||||
|
||||
# this is the configuraton file we are using for toaster
|
||||
# we are using the same logic that oe-setup-builddir uses
|
||||
# (based on TEMPLATECONF and .templateconf) to determine
|
||||
# which toasterconf.json to use.
|
||||
# note: There are a number of relative path assumptions
|
||||
# in the local layers that currently make using an arbitrary
|
||||
# toasterconf.json difficult.
|
||||
|
||||
. $OE_ROOT/.templateconf
|
||||
if [ -n "$TEMPLATECONF" ]; then
|
||||
if [ ! -d "$TEMPLATECONF" ]; then
|
||||
# Allow TEMPLATECONF=meta-xyz/conf as a shortcut
|
||||
if [ -d "$OE_ROOT/$TEMPLATECONF" ]; then
|
||||
TEMPLATECONF="$OE_ROOT/$TEMPLATECONF"
|
||||
fi
|
||||
function notify_chldexit() {
|
||||
if [ $NOTOASTERUI == 0 ]; then
|
||||
check_pidbyfile ${BUILDDIR}/.toasterui.pid && return
|
||||
stop_system
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
unset OE_ROOT
|
||||
|
||||
|
||||
WEBSERVER=1
|
||||
export TOASTER_BUILDSERVER=1
|
||||
ADDR_PORT="localhost:8000"
|
||||
unset CMD
|
||||
for param in $*; do
|
||||
case $param in
|
||||
noweb )
|
||||
WEBSERVER=0
|
||||
;;
|
||||
nobuild )
|
||||
TOASTER_BUILDSERVER=0
|
||||
;;
|
||||
start )
|
||||
CMD=$param
|
||||
;;
|
||||
stop )
|
||||
CMD=$param
|
||||
;;
|
||||
webport=*)
|
||||
ADDR_PORT="${param#*=}"
|
||||
# Split the addr:port string
|
||||
ADDR=`echo $ADDR_PORT | cut -f 1 -d ':'`
|
||||
PORT=`echo $ADDR_PORT | cut -f 2 -d ':'`
|
||||
# If only a port has been speified then set address to localhost.
|
||||
if [ $ADDR = $PORT ] ; then
|
||||
ADDR_PORT="localhost:$PORT"
|
||||
fi
|
||||
;;
|
||||
--help)
|
||||
echo "$HELP"
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
echo "$HELP"
|
||||
return 1
|
||||
;;
|
||||
|
||||
esac
|
||||
done
|
||||
|
||||
if [ `basename \"$0\"` = `basename \"${TOASTER}\"` ]; then
|
||||
echo "Error: This script needs to be sourced. Please run as . $TOASTER"
|
||||
return 1
|
||||
fi
|
||||
|
||||
verify_prereq || return 1
|
||||
|
||||
# We make sure we're running in the current shell and in a good environment
|
||||
if [ -z "$BUILDDIR" ] || ! which bitbake >/dev/null 2>&1 ; then
|
||||
echo "Error: Build environment is not setup or bitbake is not in path." 1>&2
|
||||
|
||||
if [ -z "$ZSH_NAME" ] && [ `basename \"$0\"` = `basename \"$BASH_SOURCE\"` ]; then
|
||||
echo "Error: This script needs to be sourced. Please run as 'source toaster [start|stop]'" 1>&2;
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$BUILDDIR" ] || [ -z `which bitbake` ]; then
|
||||
echo "Error: Build environment is not setup or bitbake is not in path." 1>&2;
|
||||
return 2
|
||||
fi
|
||||
|
||||
# this defines the dir toaster will use for
|
||||
# 1) clones of layers (in _toaster_clones )
|
||||
# 2) the build dir (in build)
|
||||
# 3) the sqlite db if that is being used.
|
||||
# 4) pid's we need to clean up on exit/shutdown
|
||||
export TOASTER_DIR=`dirname $BUILDDIR`
|
||||
export BB_ENV_EXTRAWHITE="$BB_ENV_EXTRAWHITE TOASTER_DIR"
|
||||
BBBASEDIR=`dirname ${BASH_SOURCE}`/..
|
||||
|
||||
|
||||
# Verify prerequisites
|
||||
|
||||
if ! echo "import django; print (1,5) == django.VERSION[0:2]" | python 2>/dev/null | grep True >/dev/null; then
|
||||
echo -e "This program needs Django 1.5. Please install with\n\nsudo pip install django==1.5"
|
||||
return 2
|
||||
fi
|
||||
|
||||
if ! echo "import south; print [0,8,4] == map(int,south.__version__.split(\".\"))" | python 2>/dev/null | grep True >/dev/null; then
|
||||
echo -e "This program needs South 0.8.4. Please install with\n\nsudo pip install south==0.8.4"
|
||||
return 2
|
||||
fi
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Determine the action. If specified by arguments, fine, if not, toggle it
|
||||
if [ "$CMD" = "start" ] ; then
|
||||
if [ -n "$BBSERVER" ]; then
|
||||
echo " Toaster is already running. Exiting..."
|
||||
return 1
|
||||
fi
|
||||
elif [ "$CMD" = "" ]; then
|
||||
echo "No command specified"
|
||||
echo "$HELP"
|
||||
return 1
|
||||
if [ "x$1" == "xstart" ] || [ "x$1" == "xstop" ]; then
|
||||
CMD="$1"
|
||||
else
|
||||
if [ -z "$BBSERVER" ]; then
|
||||
CMD="start"
|
||||
else
|
||||
CMD="stop"
|
||||
fi;
|
||||
fi
|
||||
|
||||
NOTOASTERUI=0
|
||||
for param in $*; do
|
||||
case $param in
|
||||
noui )
|
||||
NOTOASTERUI=1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "The system will $CMD."
|
||||
|
||||
# Make sure it's safe to run by checking bitbake lock
|
||||
|
||||
lock=1
|
||||
if [ -e $BUILDDIR/bitbake.lock ]; then
|
||||
(flock -n 200 ) 200<$BUILDDIR/bitbake.lock || lock=0
|
||||
fi
|
||||
|
||||
if [ ${CMD} == "start" ] && ( [ $lock -eq 0 ] || [ -e $BUILDDIR/.toastermain.pid ] ); then
|
||||
echo "Error: bitbake lock state error. File locks show that the system is on." 2>&1
|
||||
echo "If you see problems, stop and then start the system again." 2>&1
|
||||
return 3
|
||||
fi
|
||||
|
||||
|
||||
# Execute the commands
|
||||
custom_extention toaster_prepend $CMD $ADDR_PORT
|
||||
|
||||
case $CMD in
|
||||
start )
|
||||
# check if addr:port is not in use
|
||||
if [ "$CMD" == 'start' ]; then
|
||||
if [ $WEBSERVER -gt 0 ]; then
|
||||
$MANAGE checksocket "$ADDR_PORT" || return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create configuration file
|
||||
conf=${BUILDDIR}/conf/local.conf
|
||||
line='INHERIT+="toaster buildhistory"'
|
||||
grep -q "$line" $conf || echo $line >> $conf
|
||||
|
||||
if [ $WEBSERVER -eq 0 ] ; then
|
||||
# Do not update the database for "noweb" unless
|
||||
# it does not yet exist
|
||||
if [ ! -f "$TOASTER_DIR/toaster.sqlite" ] ; then
|
||||
if ! databaseCheck; then
|
||||
echo "Failed ${CMD}."
|
||||
return 4
|
||||
fi
|
||||
fi
|
||||
custom_extention noweb_start_postpend $ADDR_PORT
|
||||
fi
|
||||
if [ $WEBSERVER -gt 0 ] && ! webserverStartAll; then
|
||||
start_success=1
|
||||
addtoConfiguration "INHERIT+=\"toaster buildhistory\"" toaster.conf
|
||||
if ! webserverStartAll; then
|
||||
echo "Failed ${CMD}."
|
||||
return 4
|
||||
fi
|
||||
export BITBAKE_UI='toasterui'
|
||||
if [ $TOASTER_BUILDSERVER -eq 1 ] ; then
|
||||
$MANAGE runbuilds \
|
||||
</dev/null >>${BUILDDIR}/toaster_runbuilds.log 2>&1 \
|
||||
& echo $! >${BUILDDIR}/.runbuilds.pid
|
||||
unset BBSERVER
|
||||
bitbake --postread conf/toaster.conf --server-only -t xmlrpc -B localhost:8200
|
||||
if [ $? -ne 0 ]; then
|
||||
start_success=0
|
||||
echo "Bitbake server start failed"
|
||||
else
|
||||
echo "Toaster build server not started."
|
||||
export BBSERVER=localhost:8200
|
||||
if [ $NOTOASTERUI == 0 ]; then # we start the TOASTERUI only if not inhibited
|
||||
bitbake --observe-only -u toasterui >${BUILDDIR}/toaster_ui.log 2>&1 & echo $! >${BUILDDIR}/.toasterui.pid
|
||||
fi
|
||||
fi
|
||||
|
||||
# set fail safe stop system on terminal exit
|
||||
if [ $start_success -eq 1 ]; then
|
||||
# set fail safe stop system on terminal exit
|
||||
trap stop_system SIGHUP
|
||||
echo "Successful ${CMD}."
|
||||
else
|
||||
# failed start, do stop
|
||||
stop_system
|
||||
echo "Failed ${CMD}."
|
||||
fi
|
||||
# stop system on terminal exit
|
||||
set -o monitor
|
||||
trap stop_system SIGHUP
|
||||
echo "Successful ${CMD}."
|
||||
custom_extention toaster_postpend $CMD $ADDR_PORT
|
||||
return 0
|
||||
#trap notify_chldexit SIGCHLD
|
||||
;;
|
||||
stop )
|
||||
stop_system
|
||||
echo "Successful ${CMD}."
|
||||
;;
|
||||
esac
|
||||
custom_extention toaster_postpend $CMD $ADDR_PORT
|
||||
|
||||
|
||||
|
||||
@@ -1,126 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# Copyright (C) 2014 Alex Damian
|
||||
#
|
||||
# This file re-uses code spread throughout other Bitbake source files.
|
||||
# As such, all other copyrights belong to their own right holders.
|
||||
#
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
"""
|
||||
This command takes a filename as a single parameter. The filename is read
|
||||
as a build eventlog, and the ToasterUI is used to process events in the file
|
||||
and log data in the database
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import pickle
|
||||
import codecs
|
||||
|
||||
from collections import namedtuple
|
||||
|
||||
# mangle syspath to allow easy import of modules
|
||||
from os.path import join, dirname, abspath
|
||||
sys.path.insert(0, join(dirname(dirname(abspath(__file__))), 'lib'))
|
||||
|
||||
import bb.cooker
|
||||
from bb.ui import toasterui
|
||||
|
||||
class EventPlayer:
|
||||
"""Emulate a connection to a bitbake server."""
|
||||
|
||||
def __init__(self, eventfile, variables):
|
||||
self.eventfile = eventfile
|
||||
self.variables = variables
|
||||
self.eventmask = []
|
||||
|
||||
def waitEvent(self, _timeout):
|
||||
"""Read event from the file."""
|
||||
line = self.eventfile.readline().strip()
|
||||
if not line:
|
||||
return
|
||||
try:
|
||||
event_str = json.loads(line)['vars'].encode('utf-8')
|
||||
event = pickle.loads(codecs.decode(event_str, 'base64'))
|
||||
event_name = "%s.%s" % (event.__module__, event.__class__.__name__)
|
||||
if event_name not in self.eventmask:
|
||||
return
|
||||
return event
|
||||
except ValueError as err:
|
||||
print("Failed loading ", line)
|
||||
raise err
|
||||
|
||||
def runCommand(self, command_line):
|
||||
"""Emulate running a command on the server."""
|
||||
name = command_line[0]
|
||||
|
||||
if name == "getVariable":
|
||||
var_name = command_line[1]
|
||||
variable = self.variables.get(var_name)
|
||||
if variable:
|
||||
return variable['v'], None
|
||||
return None, "Missing variable %s" % var_name
|
||||
|
||||
elif name == "getAllKeysWithFlags":
|
||||
dump = {}
|
||||
flaglist = command_line[1]
|
||||
for key, val in self.variables.items():
|
||||
try:
|
||||
if not key.startswith("__"):
|
||||
dump[key] = {
|
||||
'v': val['v'],
|
||||
'history' : val['history'],
|
||||
}
|
||||
for flag in flaglist:
|
||||
dump[key][flag] = val[flag]
|
||||
except Exception as err:
|
||||
print(err)
|
||||
return (dump, None)
|
||||
|
||||
elif name == 'setEventMask':
|
||||
self.eventmask = command_line[-1]
|
||||
return True, None
|
||||
|
||||
else:
|
||||
raise Exception("Command %s not implemented" % command_line[0])
|
||||
|
||||
def getEventHandle(self):
|
||||
"""
|
||||
This method is called by toasterui.
|
||||
The return value is passed to self.runCommand but not used there.
|
||||
"""
|
||||
pass
|
||||
|
||||
def main(argv):
|
||||
with open(argv[-1]) as eventfile:
|
||||
# load variables from the first line
|
||||
variables = json.loads(eventfile.readline().strip())['allvariables']
|
||||
|
||||
params = namedtuple('ConfigParams', ['observe_only'])(True)
|
||||
player = EventPlayer(eventfile, variables)
|
||||
|
||||
return toasterui.main(player, player, params)
|
||||
|
||||
# run toaster ui on our mock bitbake class
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: %s <event file>" % os.path.basename(sys.argv[0]))
|
||||
sys.exit(1)
|
||||
|
||||
sys.exit(main(sys.argv))
|
||||
@@ -1,8 +1,8 @@
|
||||
#!/usr/bin/env python3
|
||||
#!/usr/bin/env python
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# Copyright (C) 2012, 2018 Wind River Systems, Inc.
|
||||
# Copyright (C) 2012 Wind River Systems, Inc.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
@@ -18,68 +18,51 @@
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
#
|
||||
# Used for dumping the bb_cache.dat
|
||||
# This is used for dumping the bb_cache.dat, the output format is:
|
||||
# recipe_path PN PV PACKAGES
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import warnings
|
||||
|
||||
# For importing bb.cache
|
||||
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), '../lib'))
|
||||
from bb.cache import CoreRecipeInfo
|
||||
|
||||
import pickle
|
||||
import cPickle as pickle
|
||||
|
||||
class DumpCache(object):
|
||||
def __init__(self):
|
||||
parser = argparse.ArgumentParser(
|
||||
description="bb_cache.dat's dumper",
|
||||
epilog="Use %(prog)s --help to get help")
|
||||
parser.add_argument("-r", "--recipe",
|
||||
help="specify the recipe, default: all recipes", action="store")
|
||||
parser.add_argument("-m", "--members",
|
||||
help = "specify the member, use comma as separator for multiple ones, default: all members", action="store", default="")
|
||||
parser.add_argument("-s", "--skip",
|
||||
help = "skip skipped recipes", action="store_true")
|
||||
parser.add_argument("cachefile",
|
||||
help = "specify bb_cache.dat", nargs = 1, action="store", default="")
|
||||
def main(argv=None):
|
||||
"""
|
||||
Get the mapping for the target recipe.
|
||||
"""
|
||||
if len(argv) != 1:
|
||||
print >>sys.stderr, "Error, need one argument!"
|
||||
return 2
|
||||
|
||||
self.args = parser.parse_args()
|
||||
cachefile = argv[0]
|
||||
|
||||
def main(self):
|
||||
with open(self.args.cachefile[0], "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
while True:
|
||||
try:
|
||||
key = pickled.load()
|
||||
val = pickled.load()
|
||||
except Exception:
|
||||
break
|
||||
if isinstance(val, CoreRecipeInfo):
|
||||
pn = val.pn
|
||||
with open(cachefile, "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
while cachefile:
|
||||
try:
|
||||
key = pickled.load()
|
||||
val = pickled.load()
|
||||
except Exception:
|
||||
break
|
||||
if isinstance(val, CoreRecipeInfo) and (not val.skipped):
|
||||
pn = val.pn
|
||||
# Filter out the native recipes.
|
||||
if key.startswith('virtual:native:') or pn.endswith("-native"):
|
||||
continue
|
||||
|
||||
if self.args.recipe and self.args.recipe != pn:
|
||||
continue
|
||||
# 1.0 is the default version for a no PV recipe.
|
||||
if val.__dict__.has_key("pv"):
|
||||
pv = val.pv
|
||||
else:
|
||||
pv = "1.0"
|
||||
|
||||
if self.args.skip and val.skipped:
|
||||
continue
|
||||
|
||||
if self.args.members:
|
||||
out = key
|
||||
for member in self.args.members.split(','):
|
||||
out += ": %s" % val.__dict__.get(member)
|
||||
print("%s" % out)
|
||||
else:
|
||||
print("%s: %s" % (key, val.__dict__))
|
||||
elif not self.args.recipe:
|
||||
print("%s %s" % (key, val))
|
||||
print("%s %s %s %s" % (key, pn, pv, ' '.join(val.packages)))
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
dump = DumpCache()
|
||||
ret = dump.main()
|
||||
except Exception as esc:
|
||||
ret = 1
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(ret)
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
|
||||
|
||||
@@ -53,6 +53,7 @@ fun! NewBBTemplate()
|
||||
put ='LICENSE = \"\"'
|
||||
put ='SECTION = \"\"'
|
||||
put ='DEPENDS = \"\"'
|
||||
put ='PR = \"r0\"'
|
||||
put =''
|
||||
put ='SRC_URI = \"\"'
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
# validate: validates
|
||||
# clean: removes files
|
||||
#
|
||||
# The Makefile generates an HTML version of every document. The
|
||||
# The Makefile generates an HTML and PDF version of every document. The
|
||||
# variable DOC indicates the folder name for a given manual.
|
||||
#
|
||||
# To build a manual, you must invoke 'make' with the DOC argument.
|
||||
@@ -21,8 +21,8 @@
|
||||
# make DOC=bitbake-user-manual
|
||||
# make pdf DOC=bitbake-user-manual
|
||||
#
|
||||
# The first example generates the HTML version of the User Manual.
|
||||
# The second example generates the PDF version of the User Manual.
|
||||
# The first example generates the HTML and PDF versions of the User Manual.
|
||||
# The second example generates the HTML version only of the User Manual.
|
||||
#
|
||||
|
||||
ifeq ($(DOC),bitbake-user-manual)
|
||||
@@ -31,9 +31,9 @@ XSLTOPTS = --stringparam html.stylesheet bitbake-user-manual-style.css \
|
||||
--stringparam section.autolabel 1 \
|
||||
--stringparam section.label.includes.component.label 1 \
|
||||
--xinclude
|
||||
ALLPREQ = html tarball
|
||||
TARFILES = bitbake-user-manual-style.css bitbake-user-manual.html figures/bitbake-title.png
|
||||
MANUALS = $(DOC)/$(DOC).html
|
||||
ALLPREQ = html pdf tarball
|
||||
TARFILES = bitbake-user-manual-style.css bitbake-user-manual.html bitbake-user-manual.pdf figures/bitbake-title.png
|
||||
MANUALS = $(DOC)/$(DOC).html $(DOC)/$(DOC).pdf
|
||||
FIGURES = figures
|
||||
STYLESHEET = $(DOC)/*.css
|
||||
|
||||
|
||||
@@ -1,25 +1,11 @@
|
||||
<?xml version='1.0'?>
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns="http://www.w3.org/1999/xhtml" xmlns:fo="http://www.w3.org/1999/XSL/Format" version="1.0">
|
||||
|
||||
<xsl:import href="http://downloads.yoctoproject.org/mirror/docbook-mirror/docbook-xsl-1.76.1/xhtml/docbook.xsl" />
|
||||
|
||||
<!--
|
||||
|
||||
<xsl:import href="../template/1.76.1/docbook-xsl-1.76.1/xhtml/docbook.xsl" />
|
||||
|
||||
<xsl:import href="http://docbook.sourceforge.net/release/xsl/1.76.1/xhtml/docbook.xsl" />
|
||||
|
||||
-->
|
||||
|
||||
<xsl:include href="../template/permalinks.xsl"/>
|
||||
<xsl:include href="../template/section.title.xsl"/>
|
||||
<xsl:include href="../template/component.title.xsl"/>
|
||||
<xsl:include href="../template/division.title.xsl"/>
|
||||
<xsl:include href="../template/formal.object.heading.xsl"/>
|
||||
<xsl:include href="../template/gloss-permalinks.xsl"/>
|
||||
<xsl:import href="http://docbook.sourceforge.net/release/xsl/current/xhtml/docbook.xsl" />
|
||||
|
||||
<xsl:param name="html.stylesheet" select="'user-manual-style.css'" />
|
||||
<xsl:param name="chapter.autolabel" select="1" />
|
||||
<!-- <xsl:param name="appendix.autolabel" select="A" /> -->
|
||||
<xsl:param name="section.autolabel" select="1" />
|
||||
<xsl:param name="section.label.includes.component.label" select="1" />
|
||||
<xsl:param name="appendix.autolabel">A</xsl:param>
|
||||
|
||||
@@ -6,9 +6,8 @@
|
||||
|
||||
<para>
|
||||
The primary purpose for running BitBake is to produce some kind
|
||||
of output such as a single installable package, a kernel, a software
|
||||
development kit, or even a full, board-specific bootable Linux image,
|
||||
complete with bootloader, kernel, and root filesystem.
|
||||
of output such as an image, a kernel, or a software development
|
||||
kit.
|
||||
Of course, you can execute the <filename>bitbake</filename>
|
||||
command with options that cause it to execute single tasks,
|
||||
compile single recipe files, capture or clear data, or simply
|
||||
@@ -21,48 +20,29 @@
|
||||
The execution process is launched using the following command
|
||||
form:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake <replaceable>target</replaceable>
|
||||
$ bitbake <target>
|
||||
</literallayout>
|
||||
For information on the BitBake command and its options,
|
||||
see
|
||||
"<link linkend='bitbake-user-manual-command'>The BitBake Command</link>"
|
||||
section.
|
||||
<note>
|
||||
<para>
|
||||
Prior to executing BitBake, you should take advantage of available
|
||||
parallel thread execution on your build host by setting the
|
||||
<link linkend='var-BB_NUMBER_THREADS'><filename>BB_NUMBER_THREADS</filename></link>
|
||||
variable in your project's <filename>local.conf</filename>
|
||||
configuration file.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
A common method to determine this value for your build host is to run
|
||||
the following:
|
||||
<literallayout class='monospaced'>
|
||||
$ grep processor /proc/cpuinfo
|
||||
</literallayout>
|
||||
This command returns the number of processors, which takes into
|
||||
account hyper-threading.
|
||||
Thus, a quad-core build host with hyper-threading most likely
|
||||
shows eight processors, which is the value you would then assign to
|
||||
<filename>BB_NUMBER_THREADS</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
A possibly simpler solution is that some Linux distributions
|
||||
(e.g. Debian and Ubuntu) provide the <filename>ncpus</filename> command.
|
||||
</para>
|
||||
</note>
|
||||
</para>
|
||||
|
||||
<note>
|
||||
Prior to executing BitBake, you should take advantage of parallel
|
||||
thread execution by setting the
|
||||
<link linkend='var-BB_NUMBER_THREADS'><filename>BB_NUMBER_THREADS</filename></link>
|
||||
variable in your <filename>local.conf</filename>
|
||||
configuration file.
|
||||
</note>
|
||||
|
||||
<section id='parsing-the-base-configuration-metadata'>
|
||||
<title>Parsing the Base Configuration Metadata</title>
|
||||
|
||||
<para>
|
||||
The first thing BitBake does is parse base configuration
|
||||
metadata.
|
||||
Base configuration metadata consists of your project's
|
||||
Base configuration metadata consists of the
|
||||
<filename>bblayers.conf</filename> file to determine what
|
||||
layers BitBake needs to recognize, all necessary
|
||||
<filename>layer.conf</filename> files (one from each layer),
|
||||
@@ -91,11 +71,10 @@
|
||||
and
|
||||
<link linkend='var-BBFILES'><filename>BBFILES</filename></link>.
|
||||
<filename>BBPATH</filename> is used to search for
|
||||
configuration and class files under the
|
||||
<filename>conf</filename> and <filename>classes</filename>
|
||||
configuration and class files under
|
||||
<filename>conf/</filename> and <filename>class/</filename>
|
||||
directories, respectively.
|
||||
<filename>BBFILES</filename> is used to locate both recipe
|
||||
and recipe append files
|
||||
<filename>BBFILES</filename> is used to find recipe files
|
||||
(<filename>.bb</filename> and <filename>.bbappend</filename>).
|
||||
If there is no <filename>bblayers.conf</filename> file,
|
||||
it is assumed the user has set the <filename>BBPATH</filename>
|
||||
@@ -103,7 +82,7 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Next, the <filename>bitbake.conf</filename> file is located
|
||||
Next, the <filename>bitbake.conf</filename> file is searched
|
||||
using the <filename>BBPATH</filename> variable that was
|
||||
just constructed.
|
||||
The <filename>bitbake.conf</filename> file may also include other
|
||||
@@ -116,35 +95,16 @@
|
||||
Prior to parsing configuration files, Bitbake looks
|
||||
at certain variables, including:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<link linkend='var-BB_ENV_WHITELIST'><filename>BB_ENV_WHITELIST</filename></link>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-BB_ENV_EXTRAWHITE'><filename>BB_ENV_EXTRAWHITE</filename></link>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-BB_PRESERVE_ENV'><filename>BB_PRESERVE_ENV</filename></link>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-BB_ORIGENV'><filename>BB_ORIGENV</filename></link>
|
||||
</para></listitem>
|
||||
<listitem><para><link linkend='var-BB_ENV_WHITELIST'><filename>BB_ENV_WHITELIST</filename></link></para></listitem>
|
||||
<listitem><para><link linkend='var-BB_PRESERVE_ENV'><filename>BB_PRESERVE_ENV</filename></link></para></listitem>
|
||||
<listitem><para><link linkend='var-BB_ENV_EXTRAWHITE'><filename>BB_ENV_EXTRAWHITE</filename></link></para></listitem>
|
||||
<listitem><para>
|
||||
<link linkend='var-BITBAKE_UI'><filename>BITBAKE_UI</filename></link>
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
The first four variables in this list relate to how BitBake treats shell
|
||||
environment variables during task execution.
|
||||
By default, BitBake cleans the environment variables and provides tight
|
||||
control over the shell execution environment.
|
||||
However, through the use of these first four variables, you can
|
||||
apply your control regarding the
|
||||
environment variables allowed to be used by BitBake in the shell
|
||||
during execution of tasks.
|
||||
See the
|
||||
"<link linkend='passing-information-into-the-build-task-environment'>Passing Information Into the Build Task Environment</link>"
|
||||
section and the information about these variables in the
|
||||
variable glossary for more information on how they work and
|
||||
on how to use them.
|
||||
You can find information on how to pass environment variables into the BitBake
|
||||
execution environment in the
|
||||
"<link linkend='passing-information-into-the-build-task-environment'>Passing Information Into the Build Task Environment</link>" section.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -157,18 +117,18 @@
|
||||
optional <filename>conf/bblayers.conf</filename> configuration file.
|
||||
This file is expected to contain a
|
||||
<link linkend='var-BBLAYERS'><filename>BBLAYERS</filename></link>
|
||||
variable that is a space-delimited list of 'layer' directories.
|
||||
variable that is a space delimited list of 'layer' directories.
|
||||
Recall that if BitBake cannot find a <filename>bblayers.conf</filename>
|
||||
file, then it is assumed the user has set the <filename>BBPATH</filename>
|
||||
and <filename>BBFILES</filename> variables directly in the environment.
|
||||
file then it is assumed the user has set the <filename>BBPATH</filename>
|
||||
and <filename>BBFILES</filename> directly in the environment.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For each directory (layer) in this list, a <filename>conf/layer.conf</filename>
|
||||
file is located and parsed with the
|
||||
file is searched for and parsed with the
|
||||
<link linkend='var-LAYERDIR'><filename>LAYERDIR</filename></link>
|
||||
variable being set to the directory where the layer was found.
|
||||
The idea is these files automatically set up
|
||||
The idea is these files automatically setup
|
||||
<link linkend='var-BBPATH'><filename>BBPATH</filename></link>
|
||||
and other variables correctly for a given build directory.
|
||||
</para>
|
||||
@@ -183,7 +143,7 @@
|
||||
|
||||
<para>
|
||||
Only variable definitions and include directives are allowed
|
||||
in BitBake <filename>.conf</filename> files.
|
||||
in <filename>.conf</filename> files.
|
||||
Some variables directly influence BitBake's behavior.
|
||||
These variables might have been set from the environment
|
||||
depending on the environment variables previously
|
||||
@@ -206,8 +166,7 @@
|
||||
Other classes that are specified in the configuration using the
|
||||
<link linkend='var-INHERIT'><filename>INHERIT</filename></link>
|
||||
variable are also included.
|
||||
BitBake searches for class files in a
|
||||
<filename>classes</filename> subdirectory under
|
||||
BitBake searches for class files in a "classes" subdirectory under
|
||||
the paths in <filename>BBPATH</filename> in the same way as
|
||||
configuration files.
|
||||
</para>
|
||||
@@ -230,7 +189,7 @@
|
||||
If a recipe uses a closing curly brace within the function and
|
||||
the character has no leading spaces, BitBake produces a parsing
|
||||
error.
|
||||
If you use a pair of curly braces in a shell function, the
|
||||
If you use a pair of curly brace in a shell function, the
|
||||
closing curly brace must not be located at the start of the line
|
||||
without leading spaces.
|
||||
</para>
|
||||
@@ -302,14 +261,14 @@
|
||||
One common convention is to use the recipe filename to define
|
||||
pieces of metadata.
|
||||
For example, in <filename>bitbake.conf</filename> the recipe
|
||||
name and version are used to set the variables
|
||||
name and version set
|
||||
<link linkend='var-PN'><filename>PN</filename></link> and
|
||||
<link linkend='var-PV'><filename>PV</filename></link>:
|
||||
<literallayout class='monospaced'>
|
||||
PN = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE', False),d)[0] or 'defaultpkgname'}"
|
||||
PV = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE', False),d)[1] or '1.0'}"
|
||||
PV = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE'),d)[1] or '1.0'}"
|
||||
PN = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE'),d)[0] or 'defaultpkgname'}"
|
||||
</literallayout>
|
||||
In this example, a recipe called "something_1.2.3.bb" would set
|
||||
In this example, a recipe called "something_1.2.3.bb" sets
|
||||
<filename>PN</filename> to "something" and
|
||||
<filename>PV</filename> to "1.2.3".
|
||||
</para>
|
||||
@@ -372,55 +331,38 @@
|
||||
</section>
|
||||
|
||||
<section id='bb-bitbake-providers'>
|
||||
<title>Providers</title>
|
||||
<title>Preferences and Providers</title>
|
||||
|
||||
<para>
|
||||
Assuming BitBake has been instructed to execute a target
|
||||
and that all the recipe files have been parsed, BitBake
|
||||
starts to figure out how to build the target.
|
||||
BitBake looks through the <filename>PROVIDES</filename> list
|
||||
for each of the recipes.
|
||||
A <filename>PROVIDES</filename> list is the list of names by which
|
||||
the recipe can be known.
|
||||
Each recipe's <filename>PROVIDES</filename> list is created
|
||||
implicitly through the recipe's
|
||||
<link linkend='var-PN'><filename>PN</filename></link> variable
|
||||
and explicitly through the recipe's
|
||||
BitBake starts by looking through the
|
||||
<link linkend='var-PROVIDES'><filename>PROVIDES</filename></link>
|
||||
variable, which is optional.
|
||||
set in recipe files.
|
||||
The default <filename>PROVIDES</filename> for a recipe is its name
|
||||
(<link linkend='var-PN'><filename>PN</filename></link>),
|
||||
however, a recipe can provide multiple things.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
When a recipe uses <filename>PROVIDES</filename>, that recipe's
|
||||
functionality can be found under an alternative name or names other
|
||||
than the implicit <filename>PN</filename> name.
|
||||
As an example, suppose a recipe named <filename>keyboard_1.0.bb</filename>
|
||||
contained the following:
|
||||
As an example of adding an extra provider, suppose a recipe named
|
||||
<filename>foo_1.0.bb</filename> contained the following:
|
||||
<literallayout class='monospaced'>
|
||||
PROVIDES += "fullkeyboard"
|
||||
PROVIDES += "virtual/bar_1.0"
|
||||
</literallayout>
|
||||
The <filename>PROVIDES</filename> list for this recipe becomes
|
||||
"keyboard", which is implicit, and "fullkeyboard", which is explicit.
|
||||
Consequently, the functionality found in
|
||||
<filename>keyboard_1.0.bb</filename> can be found under two
|
||||
different names.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='bb-bitbake-preferences'>
|
||||
<title>Preferences</title>
|
||||
|
||||
<para>
|
||||
The <filename>PROVIDES</filename> list is only part of the solution
|
||||
for figuring out a target's recipes.
|
||||
Because targets might have multiple providers, BitBake needs
|
||||
to prioritize providers by determining provider preferences.
|
||||
The recipe now provides both "foo_1.0" and "virtual/bar_1.0".
|
||||
The "virtual/" namespace is often used to denote cases where
|
||||
multiple providers are expected with the user choosing between
|
||||
them.
|
||||
Kernels and toolchain components are common cases of this in
|
||||
OpenEmbedded.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
A common example in which a target has multiple providers
|
||||
is "virtual/kernel", which is on the
|
||||
<filename>PROVIDES</filename> list for each kernel recipe.
|
||||
Sometimes a target might have multiple providers.
|
||||
A common example is "virtual/kernel", which is provided by each
|
||||
kernel recipe.
|
||||
Each machine often selects the best kernel provider by using a
|
||||
line similar to the following in the machine configuration file:
|
||||
<literallayout class='monospaced'>
|
||||
@@ -435,7 +377,7 @@
|
||||
|
||||
<para>
|
||||
Understanding how providers are chosen is made complicated by the fact
|
||||
that multiple versions might exist for a given provider.
|
||||
that multiple versions might exist.
|
||||
BitBake defaults to the highest version of a provider.
|
||||
Version comparisons are made using the same method as Debian.
|
||||
You can use the
|
||||
@@ -444,19 +386,13 @@
|
||||
You can influence the order by using the
|
||||
<link linkend='var-DEFAULT_PREFERENCE'><filename>DEFAULT_PREFERENCE</filename></link>
|
||||
variable.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
By default, files have a preference of "0".
|
||||
Setting <filename>DEFAULT_PREFERENCE</filename> to "-1" makes the
|
||||
Setting the <filename>DEFAULT_PREFERENCE</filename> to "-1" makes the
|
||||
recipe unlikely to be used unless it is explicitly referenced.
|
||||
Setting <filename>DEFAULT_PREFERENCE</filename> to "1" makes it
|
||||
likely the recipe is used.
|
||||
<filename>PREFERRED_VERSION</filename> overrides any
|
||||
<filename>DEFAULT_PREFERENCE</filename> setting.
|
||||
<filename>DEFAULT_PREFERENCE</filename> is often used to mark newer
|
||||
and more experimental recipe versions until they have undergone
|
||||
sufficient testing to be considered stable.
|
||||
Setting the <filename>DEFAULT_PREFERENCE</filename> to "1" makes it likely the recipe is used.
|
||||
<filename>PREFERRED_VERSION</filename> overrides any <filename>DEFAULT_PREFERENCE</filename> setting.
|
||||
<filename>DEFAULT_PREFERENCE</filename> is often used to mark newer and more experimental recipe
|
||||
versions until they have undergone sufficient testing to be considered stable.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -465,16 +401,18 @@
|
||||
version, unless otherwise specified.
|
||||
If the recipe in question has a
|
||||
<link linkend='var-DEFAULT_PREFERENCE'><filename>DEFAULT_PREFERENCE</filename></link>
|
||||
set lower than the other recipes (default is 0), then
|
||||
it will not be selected.
|
||||
set lower than
|
||||
the other recipes (default is 0), then it will not be
|
||||
selected.
|
||||
This allows the person or persons maintaining
|
||||
the repository of recipe files to specify
|
||||
their preference for the default selected version.
|
||||
Additionally, the user can specify their preferred version.
|
||||
In addition, the user can specify their preferred version.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If the first recipe is named <filename>a_1.1.bb</filename>, then the
|
||||
If the first recipe is named <filename>a_1.1.bb</filename>,
|
||||
then the
|
||||
<link linkend='var-PN'><filename>PN</filename></link> variable
|
||||
will be set to “a”, and the
|
||||
<link linkend='var-PV'><filename>PV</filename></link>
|
||||
@@ -482,38 +420,19 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Thus, if a recipe named <filename>a_1.2.bb</filename> exists, BitBake
|
||||
If we then have a recipe named <filename>a_1.2.bb</filename>, BitBake
|
||||
will choose 1.2 by default.
|
||||
However, if you define the following variable in a
|
||||
<filename>.conf</filename> file that BitBake parses, you
|
||||
can change that preference:
|
||||
However, if we define the following variable in a
|
||||
<filename>.conf</filename> file that BitBake parses, we
|
||||
can change that.
|
||||
<literallayout class='monospaced'>
|
||||
PREFERRED_VERSION_a = "1.1"
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<note>
|
||||
<para>
|
||||
It is common for a recipe to provide two versions -- a stable,
|
||||
numbered (and preferred) version, and a version that is
|
||||
automatically checked out from a source code repository that
|
||||
is considered more "bleeding edge" but can be selected only
|
||||
explicitly.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For example, in the OpenEmbedded codebase, there is a standard,
|
||||
versioned recipe file for BusyBox,
|
||||
<filename>busybox_1.22.1.bb</filename>,
|
||||
but there is also a Git-based version,
|
||||
<filename>busybox_git.bb</filename>, which explicitly contains the line
|
||||
<literallayout class='monospaced'>
|
||||
DEFAULT_PREFERENCE = "-1"
|
||||
</literallayout>
|
||||
to ensure that the numbered, stable version is always preferred
|
||||
unless the developer selects otherwise.
|
||||
</para>
|
||||
</note>
|
||||
<para>
|
||||
In summary, BitBake has created a list of providers, which is prioritized, for each target.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='bb-bitbake-dependencies'>
|
||||
@@ -576,7 +495,7 @@
|
||||
As each task completes, a timestamp is written to the directory specified by the
|
||||
<link linkend='var-STAMP'><filename>STAMP</filename></link> variable.
|
||||
On subsequent runs, BitBake looks in the build directory within
|
||||
<filename>tmp/stamps</filename> and does not rerun
|
||||
<filename>tmp/stamps</filename>and does not rerun
|
||||
tasks that are already completed unless a timestamp is found to be invalid.
|
||||
Currently, invalid timestamps are only considered on a per
|
||||
recipe file basis.
|
||||
@@ -596,7 +515,7 @@
|
||||
"<link linkend='checksums'>Checksums (Signatures)</link>"
|
||||
section for information).
|
||||
It is also possible to append extra metadata to the stamp using
|
||||
the <filename>[stamp-extra-info]</filename> task flag.
|
||||
the "stamp-extra-info" task flag.
|
||||
For example, OpenEmbedded uses this flag to make some tasks machine-specific.
|
||||
</para>
|
||||
|
||||
@@ -616,7 +535,7 @@
|
||||
<title>Executing Tasks</title>
|
||||
|
||||
<para>
|
||||
Tasks can be either a shell task or a Python task.
|
||||
Tasks can either be a shell task or a Python task.
|
||||
For shell tasks, BitBake writes a shell script to
|
||||
<filename>${</filename><link linkend='var-T'><filename>T</filename></link><filename>}/run.do_taskname.pid</filename>
|
||||
and then executes the script.
|
||||
@@ -653,8 +572,7 @@
|
||||
</itemizedlist>
|
||||
It is possible to have functions run before and after a task's main
|
||||
function.
|
||||
This is done using the <filename>[prefuncs]</filename>
|
||||
and <filename>[postfuncs]</filename> flags of the task
|
||||
This is done using the "prefuncs" and "postfuncs" flags of the task
|
||||
that lists the functions to run.
|
||||
</para>
|
||||
</section>
|
||||
@@ -804,13 +722,13 @@
|
||||
make some dependency and hash information available to the build.
|
||||
This information includes:
|
||||
<itemizedlist>
|
||||
<listitem><para><filename>BB_BASEHASH_task-</filename><replaceable>taskname</replaceable>:
|
||||
<listitem><para><filename>BB_BASEHASH_task-<taskname></filename>:
|
||||
The base hashes for each task in the recipe.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>BB_BASEHASH_</filename><replaceable>filename</replaceable><filename>:</filename><replaceable>taskname</replaceable>:
|
||||
<listitem><para><filename>BB_BASEHASH_<filename:taskname></filename>:
|
||||
The base hashes for each dependent task.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>BBHASHDEPS_</filename><replaceable>filename</replaceable><filename>:</filename><replaceable>taskname</replaceable>:
|
||||
<listitem><para><filename>BBHASHDEPS_<filename:taskname></filename>:
|
||||
The task dependencies for each task.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>BB_TASKHASH</filename>:
|
||||
@@ -828,7 +746,7 @@
|
||||
itself.
|
||||
The simplest parameter to pass is "none", which causes a
|
||||
set of signature information to be written out into
|
||||
<filename>STAMPS_DIR</filename>
|
||||
<filename>STAMP_DIR</filename>
|
||||
corresponding to the targets specified.
|
||||
The other currently available parameter is "printdiff",
|
||||
which causes BitBake to try to establish the closest
|
||||
@@ -837,9 +755,9 @@
|
||||
to determine the stamps and delta where these two
|
||||
stamp trees diverge.
|
||||
<note>
|
||||
It is likely that future versions of BitBake will
|
||||
It is likely that future versions of BitBake with
|
||||
provide other signature handlers triggered through
|
||||
additional "-S" parameters.
|
||||
additional "-S" paramters.
|
||||
</note>
|
||||
</para>
|
||||
|
||||
@@ -916,7 +834,7 @@
|
||||
<para>
|
||||
Finally, after all the setscene tasks have executed, BitBake calls the
|
||||
function listed in
|
||||
<link linkend='var-BB_SETSCENE_VERIFY_FUNCTION2'><filename>BB_SETSCENE_VERIFY_FUNCTION2</filename></link>
|
||||
<link linkend='var-BB_SETSCENE_VERIFY_FUNCTION'><filename>BB_SETSCENE_VERIFY_FUNCTION</filename></link>
|
||||
with the list of tasks BitBake thinks has been "covered".
|
||||
The metadata can then ensure that this list is correct and can
|
||||
inform BitBake that it wants specific tasks to be run regardless
|
||||
|
||||
@@ -8,14 +8,14 @@
|
||||
BitBake's fetch module is a standalone piece of library code
|
||||
that deals with the intricacies of downloading source code
|
||||
and files from remote systems.
|
||||
Fetching source code is one of the cornerstones of building software.
|
||||
Fetching source code is one of the corner stones of building software.
|
||||
As such, this module forms an important part of BitBake.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The current fetch module is called "fetch2" and refers to the
|
||||
fact that it is the second major version of the API.
|
||||
The original version is obsolete and has been removed from the codebase.
|
||||
The original version is obsolete and removed from the codebase.
|
||||
Thus, in all cases, "fetch" refers to "fetch2" in this
|
||||
manual.
|
||||
</para>
|
||||
@@ -38,7 +38,7 @@
|
||||
The code to execute the first part of this process, a fetch,
|
||||
looks something like the following:
|
||||
<literallayout class='monospaced'>
|
||||
src_uri = (d.getVar('SRC_URI') or "").split()
|
||||
src_uri = (d.getVar('SRC_URI', True) or "").split()
|
||||
fetcher = bb.fetch2.Fetch(src_uri, d)
|
||||
fetcher.download()
|
||||
</literallayout>
|
||||
@@ -52,7 +52,7 @@
|
||||
<para>
|
||||
The instantiation of the fetch class is usually followed by:
|
||||
<literallayout class='monospaced'>
|
||||
rootdir = l.getVar('WORKDIR')
|
||||
rootdir = l.getVar('WORKDIR', True)
|
||||
fetcher.unpack(rootdir)
|
||||
</literallayout>
|
||||
This code unpacks the downloaded files to the
|
||||
@@ -60,19 +60,17 @@
|
||||
<note>
|
||||
For convenience, the naming in these examples matches
|
||||
the variables used by OpenEmbedded.
|
||||
If you want to see the above code in action, examine
|
||||
the OpenEmbedded class file <filename>base.bbclass</filename>.
|
||||
</note>
|
||||
The <filename>SRC_URI</filename> and <filename>WORKDIR</filename>
|
||||
variables are not hardcoded into the fetcher, since those fetcher
|
||||
methods can be (and are) called with different variable names.
|
||||
variables are not coded into the fetcher.
|
||||
They variables can (and are) called with different variable names.
|
||||
In OpenEmbedded for example, the shared state (sstate) code uses
|
||||
the fetch module to fetch the sstate files.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
When the <filename>download()</filename> method is called,
|
||||
BitBake tries to resolve the URLs by looking for source files
|
||||
BitBake tries to fulfill the URLs by looking for source files
|
||||
in a specific search order:
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis>Pre-mirror Sites:</emphasis>
|
||||
@@ -86,7 +84,7 @@
|
||||
<filename>SRC_URI</filename>).
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>Mirror Sites:</emphasis>
|
||||
If fetch failures occur, BitBake next uses mirror locations as
|
||||
If fetch failures occur, BitBake next uses mirror location as
|
||||
defined by the
|
||||
<link linkend='var-MIRRORS'><filename>MIRRORS</filename></link>
|
||||
variable.
|
||||
@@ -151,14 +149,14 @@
|
||||
<para>
|
||||
File integrity is of key importance for reproducing builds.
|
||||
For non-local archive downloads, the fetcher code can verify
|
||||
SHA-256 and MD5 checksums to ensure the archives have been
|
||||
sha256 and md5 checksums to ensure the archives have been
|
||||
downloaded correctly.
|
||||
You can specify these checksums by using the
|
||||
<filename>SRC_URI</filename> variable with the appropriate
|
||||
varflags as follows:
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI[md5sum] = "<replaceable>value</replaceable>"
|
||||
SRC_URI[sha256sum] = "<replaceable>value</replaceable>"
|
||||
SRC_URI[md5sum] = "value"
|
||||
SRC_URI[sha256sum] = "value"
|
||||
</literallayout>
|
||||
You can also specify the checksums as parameters on the
|
||||
<filename>SRC_URI</filename> as shown below:
|
||||
@@ -262,12 +260,21 @@
|
||||
<para>
|
||||
This submodule handles URLs that begin with
|
||||
<filename>file://</filename>.
|
||||
The filename you specify within the URL can be
|
||||
either an absolute or relative path to a file.
|
||||
The filename you specify with in the URL can
|
||||
either be an absolute or relative path to a file.
|
||||
If the filename is relative, the contents of the
|
||||
<link linkend='var-FILESPATH'><filename>FILESPATH</filename></link>
|
||||
variable is used in the same way
|
||||
<filename>PATH</filename> is used to find executables.
|
||||
Failing that,
|
||||
<link linkend='var-FILESDIR'><filename>FILESDIR</filename></link>
|
||||
is used to find the appropriate relative file.
|
||||
<note>
|
||||
<filename>FILESDIR</filename> is deprecated and can
|
||||
be replaced with <filename>FILESPATH</filename>.
|
||||
Because <filename>FILESDIR</filename> is likely to be
|
||||
removed, you should not use this variable in any new code.
|
||||
</note>
|
||||
If the file cannot be found, it is assumed that it is available in
|
||||
<link linkend='var-DL_DIR'><filename>DL_DIR</filename></link>
|
||||
by the time the <filename>download()</filename> method is called.
|
||||
@@ -279,64 +286,15 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Here are a couple of example URLs, the first relative and
|
||||
the second absolute:
|
||||
Here are some example URLs:
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI = "file://relativefile.patch"
|
||||
SRC_URI = "file://relativefile.patch;this=ignored"
|
||||
SRC_URI = "file:///Users/ich/very_important_software"
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='http-ftp-fetcher'>
|
||||
<title>HTTP/FTP wget fetcher (<filename>http://</filename>, <filename>ftp://</filename>, <filename>https://</filename>)</title>
|
||||
|
||||
<para>
|
||||
This fetcher obtains files from web and FTP servers.
|
||||
Internally, the fetcher uses the wget utility.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The executable and parameters used are specified by the
|
||||
<filename>FETCHCMD_wget</filename> variable, which defaults
|
||||
to sensible values.
|
||||
The fetcher supports a parameter "downloadfilename" that
|
||||
allows the name of the downloaded file to be specified.
|
||||
Specifying the name of the downloaded file is useful
|
||||
for avoiding collisions in
|
||||
<link linkend='var-DL_DIR'><filename>DL_DIR</filename></link>
|
||||
when dealing with multiple files that have the same name.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Some example URLs are as follows:
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI = "http://oe.handhelds.org/not_there.aac"
|
||||
SRC_URI = "ftp://oe.handhelds.org/not_there_as_well.aac"
|
||||
SRC_URI = "ftp://you@oe.handhelds.org/home/you/secret.plan"
|
||||
</literallayout>
|
||||
</para>
|
||||
<note>
|
||||
Because URL parameters are delimited by semi-colons, this can
|
||||
introduce ambiguity when parsing URLs that also contain semi-colons,
|
||||
for example:
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI = "http://abc123.org/git/?p=gcc/gcc.git;a=snapshot;h=a5dd47"
|
||||
</literallayout>
|
||||
Such URLs should should be modified by replacing semi-colons with '&' characters:
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI = "http://abc123.org/git/?p=gcc/gcc.git&a=snapshot&h=a5dd47"
|
||||
</literallayout>
|
||||
In most cases this should work. Treating semi-colons and '&' in queries
|
||||
identically is recommended by the World Wide Web Consortium (W3C).
|
||||
Note that due to the nature of the URL, you may have to specify the name
|
||||
of the downloaded file as well:
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI = "http://abc123.org/git/?p=gcc/gcc.git&a=snapshot&h=a5dd47;downloadfilename=myfile.bz2"
|
||||
</literallayout>
|
||||
</note>
|
||||
</section>
|
||||
|
||||
<section id='cvs-fetcher'>
|
||||
<title>CVS fetcher (<filename>(cvs://</filename>)</title>
|
||||
|
||||
@@ -355,7 +313,7 @@
|
||||
A special value of "now" causes the checkout to
|
||||
be updated on every build.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis><link linkend='var-CVSDIR'><filename>CVSDIR</filename></link>:</emphasis>
|
||||
<listitem><para><emphasis><filename>CVSDIR</filename>:</emphasis>
|
||||
Specifies where a temporary checkout is saved.
|
||||
The location is often <filename>DL_DIR/cvs</filename>.
|
||||
</para></listitem>
|
||||
@@ -376,8 +334,7 @@
|
||||
The supported parameters are as follows:
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis>"method":</emphasis>
|
||||
The protocol over which to communicate with the CVS
|
||||
server.
|
||||
The protocol over which to communicate with the cvs server.
|
||||
By default, this protocol is "pserver".
|
||||
If "method" is set to "ext", BitBake examines the
|
||||
"rsh" parameter and sets <filename>CVS_RSH</filename>.
|
||||
@@ -405,8 +362,7 @@
|
||||
Effectively, you are renaming the output directory
|
||||
to which the module is unpacked.
|
||||
You are forcing the module into a special
|
||||
directory relative to
|
||||
<link linkend='var-CVSDIR'><filename>CVSDIR</filename></link>.
|
||||
directory relative to <filename>CVSDIR</filename>.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"rsh"</emphasis>
|
||||
Used in conjunction with the "method" parameter.
|
||||
@@ -438,6 +394,36 @@
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='http-ftp-fetcher'>
|
||||
<title>HTTP/FTP wget fetcher (<filename>http://</filename>, <filename>ftp://</filename>, <filename>https://</filename>)</title>
|
||||
|
||||
<para>
|
||||
This fetcher obtains files from web and FTP servers.
|
||||
Internally, the fetcher uses the wget utility.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The executable and parameters used are specified by the
|
||||
<filename>FETCHCMD_wget</filename> variable, which defaults
|
||||
to a sensible values.
|
||||
The fetcher supports a parameter "downloadfilename" that
|
||||
allows the name of the downloaded file to be specified.
|
||||
Specifying the name of the downloaded file is useful
|
||||
for avoiding collisions in
|
||||
<link linkend='var-DL_DIR'><filename>DL_DIR</filename></link>
|
||||
when dealing with multiple files that have the same name.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Some example URLs are as follows:
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI = "http://oe.handhelds.org/not_there.aac"
|
||||
SRC_URI = "ftp://oe.handhelds.org/not_there_as_well.aac"
|
||||
SRC_URI = "ftp://you@oe.handheld.sorg/home/you/secret.plan"
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='svn-fetcher'>
|
||||
<title>Subversion (SVN) Fetcher (<filename>svn://</filename>)</title>
|
||||
|
||||
@@ -447,9 +433,9 @@
|
||||
The executable used is specified by
|
||||
<filename>FETCHCMD_svn</filename>, which defaults
|
||||
to "svn".
|
||||
The fetcher's temporary working directory is set by
|
||||
<link linkend='var-SVNDIR'><filename>SVNDIR</filename></link>,
|
||||
which is usually <filename>DL_DIR/svn</filename>.
|
||||
The fetcher's temporary working directory is set
|
||||
by <filename>SVNDIR</filename>, which is usually
|
||||
<filename>DL_DIR/svn</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -461,56 +447,43 @@
|
||||
You can think of this parameter as the top-level
|
||||
directory of the repository data you want.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"path_spec":</emphasis>
|
||||
A specific directory in which to checkout the
|
||||
specified svn module.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"protocol":</emphasis>
|
||||
The protocol to use, which defaults to "svn".
|
||||
If "protocol" is set to "svn+ssh", the "ssh"
|
||||
parameter is also used.
|
||||
Other options are "svn+ssh" and "rsh".
|
||||
For "rsh", the "rsh" parameter is also used.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"rev":</emphasis>
|
||||
The revision of the source code to checkout.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"date":</emphasis>
|
||||
The date of the source code to checkout.
|
||||
Specific revisions are generally much safer to checkout
|
||||
rather than by date as they do not involve timezones
|
||||
(e.g. they are much more deterministic).
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"scmdata":</emphasis>
|
||||
Causes the “.svn” directories to be available during
|
||||
compile-time when set to "keep".
|
||||
By default, these directories are removed.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"ssh":</emphasis>
|
||||
An optional parameter used when "protocol" is set
|
||||
to "svn+ssh".
|
||||
You can use this parameter to specify the ssh
|
||||
program used by svn.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"transportuser":</emphasis>
|
||||
When required, sets the username for the transport.
|
||||
By default, this parameter is empty.
|
||||
The transport username is different than the username
|
||||
used in the main URL, which is passed to the subversion
|
||||
command.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
Following are three examples using svn:
|
||||
Following are two examples using svn:
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI = "svn://myrepos/proj1;module=vip;protocol=http;rev=667"
|
||||
SRC_URI = "svn://myrepos/proj1;module=opie;protocol=svn+ssh"
|
||||
SRC_URI = "svn://myrepos/proj1;module=trunk;protocol=http;path_spec=${MY_DIR}/proj1"
|
||||
SRC_URI = "svn://svn.oe.handhelds.org/svn;module=vip;proto=http;rev=667"
|
||||
SRC_URI = "svn://svn.oe.handhelds.org/svn/;module=opie;proto=svn+ssh;date=20060126"
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='git-fetcher'>
|
||||
<title>Git Fetcher (<filename>git://</filename>)</title>
|
||||
<title>GIT Fetcher (<filename>git://</filename>)</title>
|
||||
|
||||
<para>
|
||||
This fetcher submodule fetches code from the Git
|
||||
source control system.
|
||||
The fetcher works by creating a bare clone of the
|
||||
remote into
|
||||
<link linkend='var-GITDIR'><filename>GITDIR</filename></link>,
|
||||
which is usually <filename>DL_DIR/git2</filename>.
|
||||
remote into <filename>GITDIR</filename>, which is
|
||||
usually <filename>DL_DIR/git</filename>.
|
||||
This bare clone is then cloned into the work directory during the
|
||||
unpack stage when a specific tree is checked out.
|
||||
This is done using alternates and by reference to
|
||||
@@ -578,7 +551,7 @@
|
||||
network.
|
||||
For that reason, tags are often not used.
|
||||
As far as Git is concerned, the "tag" parameter behaves
|
||||
effectively the same as the "rev" parameter.
|
||||
effectively the same as the "revision" parameter.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>"subpath":</emphasis>
|
||||
Limits the checkout to a specific subpath of the tree.
|
||||
@@ -597,223 +570,6 @@
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='gitsm-fetcher'>
|
||||
<title>Git Submodule Fetcher (<filename>gitsm://</filename>)</title>
|
||||
|
||||
<para>
|
||||
This fetcher submodule inherits from the
|
||||
<link linkend='git-fetcher'>Git fetcher</link> and extends
|
||||
that fetcher's behavior by fetching a repository's submodules.
|
||||
<link linkend='var-SRC_URI'><filename>SRC_URI</filename></link>
|
||||
is passed to the Git fetcher as described in the
|
||||
"<link linkend='git-fetcher'>Git Fetcher (<filename>git://</filename>)</link>"
|
||||
section.
|
||||
<note>
|
||||
<title>Notes and Warnings</title>
|
||||
<para>
|
||||
You must clean a recipe when switching between
|
||||
'<filename>git://</filename>' and
|
||||
'<filename>gitsm://</filename>' URLs.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The Git Submodules fetcher is not a complete fetcher
|
||||
implementation.
|
||||
The fetcher has known issues where it does not use the
|
||||
normal source mirroring infrastructure properly. Further,
|
||||
the submodule sources it fetches are not visible to the
|
||||
licensing and source archiving infrastructures.
|
||||
</para>
|
||||
</note>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='clearcase-fetcher'>
|
||||
<title>ClearCase Fetcher (<filename>ccrc://</filename>)</title>
|
||||
|
||||
<para>
|
||||
This fetcher submodule fetches code from a
|
||||
<ulink url='http://en.wikipedia.org/wiki/Rational_ClearCase'>ClearCase</ulink>
|
||||
repository.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To use this fetcher, make sure your recipe has proper
|
||||
<link linkend='var-SRC_URI'><filename>SRC_URI</filename></link>,
|
||||
<link linkend='var-SRCREV'><filename>SRCREV</filename></link>, and
|
||||
<link linkend='var-PV'><filename>PV</filename></link> settings.
|
||||
Here is an example:
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI = "ccrc://cc.example.org/ccrc;vob=/example_vob;module=/example_module"
|
||||
SRCREV = "EXAMPLE_CLEARCASE_TAG"
|
||||
PV = "${@d.getVar("SRCREV", False).replace("/", "+")}"
|
||||
</literallayout>
|
||||
The fetcher uses the <filename>rcleartool</filename> or
|
||||
<filename>cleartool</filename> remote client, depending on
|
||||
which one is available.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Following are options for the <filename>SRC_URI</filename>
|
||||
statement:
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis><filename>vob</filename></emphasis>:
|
||||
The name, which must include the
|
||||
prepending "/" character, of the ClearCase VOB.
|
||||
This option is required.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis><filename>module</filename></emphasis>:
|
||||
The module, which must include the
|
||||
prepending "/" character, in the selected VOB.
|
||||
<note>
|
||||
The <filename>module</filename> and <filename>vob</filename>
|
||||
options are combined to create the <filename>load</filename> rule in
|
||||
the view config spec.
|
||||
As an example, consider the <filename>vob</filename> and
|
||||
<filename>module</filename> values from the
|
||||
<filename>SRC_URI</filename> statement at the start of this section.
|
||||
Combining those values results in the following:
|
||||
<literallayout class='monospaced'>
|
||||
load /example_vob/example_module
|
||||
</literallayout>
|
||||
</note>
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis><filename>proto</filename></emphasis>:
|
||||
The protocol, which can be either <filename>http</filename> or
|
||||
<filename>https</filename>.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
By default, the fetcher creates a configuration specification.
|
||||
If you want this specification written to an area other than the default,
|
||||
use the <filename>CCASE_CUSTOM_CONFIG_SPEC</filename> variable
|
||||
in your recipe to define where the specification is written.
|
||||
<note>
|
||||
the <filename>SRCREV</filename> loses its functionality if you
|
||||
specify this variable.
|
||||
However, <filename>SRCREV</filename> is still used to label the
|
||||
archive after a fetch even though it does not define what is
|
||||
fetched.
|
||||
</note>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Here are a couple of other behaviors worth mentioning:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
When using <filename>cleartool</filename>, the login of
|
||||
<filename>cleartool</filename> is handled by the system.
|
||||
The login require no special steps.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
In order to use <filename>rcleartool</filename> with authenticated
|
||||
users, an "rcleartool login" is necessary before using the fetcher.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='perforce-fetcher'>
|
||||
<title>Perforce Fetcher (<filename>p4://</filename>)</title>
|
||||
|
||||
<para>
|
||||
This fetcher submodule fetches code from the
|
||||
<ulink url='https://www.perforce.com/'>Perforce</ulink>
|
||||
source control system.
|
||||
The executable used is specified by
|
||||
<filename>FETCHCMD_p4</filename>, which defaults
|
||||
to "p4".
|
||||
The fetcher's temporary working directory is set by
|
||||
<link linkend='var-P4DIR'><filename>P4DIR</filename></link>,
|
||||
which defaults to "DL_DIR/p4".
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To use this fetcher, make sure your recipe has proper
|
||||
<link linkend='var-SRC_URI'><filename>SRC_URI</filename></link>,
|
||||
<link linkend='var-SRCREV'><filename>SRCREV</filename></link>, and
|
||||
<link linkend='var-PV'><filename>PV</filename></link> values.
|
||||
The p4 executable is able to use the config file defined by your
|
||||
system's <filename>P4CONFIG</filename> environment variable in
|
||||
order to define the Perforce server URL and port, username, and
|
||||
password if you do not wish to keep those values in a recipe
|
||||
itself.
|
||||
If you choose not to use <filename>P4CONFIG</filename>,
|
||||
or to explicitly set variables that <filename>P4CONFIG</filename>
|
||||
can contain, you can specify the <filename>P4PORT</filename> value,
|
||||
which is the server's URL and port number, and you can
|
||||
specify a username and password directly in your recipe within
|
||||
<filename>SRC_URI</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Here is an example that relies on <filename>P4CONFIG</filename>
|
||||
to specify the server URL and port, username, and password, and
|
||||
fetches the Head Revision:
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI = "p4://example-depot/main/source/..."
|
||||
SRCREV = "${AUTOREV}"
|
||||
PV = "p4-${SRCPV}"
|
||||
S = "${WORKDIR}/p4"
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Here is an example that specifies the server URL and port,
|
||||
username, and password, and fetches a Revision based on a Label:
|
||||
<literallayout class='monospaced'>
|
||||
P4PORT = "tcp:p4server.example.net:1666"
|
||||
SRC_URI = "p4://user:passwd@example-depot/main/source/..."
|
||||
SRCREV = "release-1.0"
|
||||
PV = "p4-${SRCPV}"
|
||||
S = "${WORKDIR}/p4"
|
||||
</literallayout>
|
||||
<note>
|
||||
You should always set <filename>S</filename>
|
||||
to <filename>"${WORKDIR}/p4"</filename> in your recipe.
|
||||
</note>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='repo-fetcher'>
|
||||
<title>Repo Fetcher (<filename>repo://</filename>)</title>
|
||||
|
||||
<para>
|
||||
This fetcher submodule fetches code from
|
||||
<filename>google-repo</filename> source control system.
|
||||
The fetcher works by initiating and syncing sources of the
|
||||
repository into
|
||||
<link linkend='var-REPODIR'><filename>REPODIR</filename></link>,
|
||||
which is usually
|
||||
<link linkend='var-DL_DIR'><filename>DL_DIR</filename></link><filename>/repo</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This fetcher supports the following parameters:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<emphasis>"protocol":</emphasis>
|
||||
Protocol to fetch the repository manifest (default: git).
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<emphasis>"branch":</emphasis>
|
||||
Branch or tag of repository to get (default: master).
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<emphasis>"manifest":</emphasis>
|
||||
Name of the manifest file (default: <filename>default.xml</filename>).
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
Here are some example URLs:
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI = "repo://REPOROOT;protocol=git;branch=some_branch;manifest=my_manifest.xml"
|
||||
SRC_URI = "repo://REPOROOT;protocol=file;branch=some_branch;manifest=my_manifest.xml"
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='other-fetchers'>
|
||||
<title>Other Fetchers</title>
|
||||
|
||||
@@ -823,6 +579,12 @@
|
||||
<listitem><para>
|
||||
Bazaar (<filename>bzr://</filename>)
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Perforce (<filename>p4://</filename>)
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Git Submodules (<filename>gitsm://</filename>)
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Trees using Git Annex (<filename>gitannex://</filename>)
|
||||
</para></listitem>
|
||||
@@ -832,6 +594,9 @@
|
||||
<listitem><para>
|
||||
Secure Shell (<filename>ssh://</filename>)
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Repo (<filename>repo://</filename>)
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
OSC (<filename>osc://</filename>)
|
||||
</para></listitem>
|
||||
|
||||
@@ -47,6 +47,7 @@
|
||||
-rw-rw-r--. 1 wmat wmat 849 Nov 26 04:55 HEADER
|
||||
drwxrwxr-x. 5 wmat wmat 4096 Jan 31 13:44 lib
|
||||
-rw-rw-r--. 1 wmat wmat 195 Nov 26 04:55 MANIFEST.in
|
||||
-rwxrwxr-x. 1 wmat wmat 3195 Jan 31 11:57 setup.py
|
||||
-rw-rw-r--. 1 wmat wmat 2887 Nov 26 04:55 TODO
|
||||
</literallayout>
|
||||
</para>
|
||||
@@ -128,8 +129,15 @@
|
||||
</para>
|
||||
|
||||
<note>
|
||||
This example was inspired by and drew heavily from
|
||||
<ulink url="http://www.mail-archive.com/yocto@yoctoproject.org/msg09379.html">Mailing List post - The BitBake equivalent of "Hello, World!"</ulink>.
|
||||
This example was inspired by and drew heavily from these sources:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<ulink url="http://www.mail-archive.com/yocto@yoctoproject.org/msg09379.html">Mailing List post - The BitBake equivalent of "Hello, World!"</ulink>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<ulink url="http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/">Hambedded Linux blog post - From Bitbake Hello World to an Image</ulink>
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</note>
|
||||
|
||||
<para>
|
||||
@@ -213,7 +221,7 @@
|
||||
<para>From your shell, enter the following commands to set and
|
||||
export the <filename>BBPATH</filename> variable:
|
||||
<literallayout class='monospaced'>
|
||||
$ BBPATH="<replaceable>projectdirectory</replaceable>"
|
||||
$ BBPATH="<projectdirectory>"
|
||||
$ export BBPATH
|
||||
</literallayout>
|
||||
Use your actual project directory in the command.
|
||||
@@ -260,9 +268,9 @@
|
||||
files.
|
||||
For this example, you need to create the file in your project directory
|
||||
and define some key BitBake variables.
|
||||
For more information on the <filename>bitbake.conf</filename> file,
|
||||
For more information on the <filename>bitbake.conf</filename>,
|
||||
see
|
||||
<ulink url='http://git.openembedded.org/bitbake/tree/conf/bitbake.conf'></ulink>.
|
||||
<ulink url='http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#an-overview-of-bitbakeconf'></ulink>
|
||||
</para>
|
||||
<para>Use the following commands to create the <filename>conf</filename>
|
||||
directory in the project directory:
|
||||
@@ -273,32 +281,14 @@
|
||||
some editor to create the <filename>bitbake.conf</filename>
|
||||
so that it contains the following:
|
||||
<literallayout class='monospaced'>
|
||||
<link linkend='var-PN'>PN</link> = "${@bb.parse.BBHandler.vars_from_file(d.getVar('FILE', False),d)[0] or 'defaultpkgname'}"
|
||||
</literallayout>
|
||||
<literallayout class='monospaced'>
|
||||
TMPDIR = "${<link linkend='var-TOPDIR'>TOPDIR</link>}/tmp"
|
||||
<link linkend='var-CACHE'>CACHE</link> = "${TMPDIR}/cache"
|
||||
<link linkend='var-STAMP'>STAMP</link> = "${TMPDIR}/${PN}/stamps"
|
||||
<link linkend='var-T'>T</link> = "${TMPDIR}/${PN}/work"
|
||||
<link linkend='var-B'>B</link> = "${TMPDIR}/${PN}"
|
||||
<link linkend='var-STAMP'>STAMP</link> = "${TMPDIR}/stamps"
|
||||
<link linkend='var-T'>T</link> = "${TMPDIR}/work"
|
||||
<link linkend='var-B'>B</link> = "${TMPDIR}"
|
||||
</literallayout>
|
||||
<note>
|
||||
Without a value for <filename>PN</filename>, the
|
||||
variables <filename>STAMP</filename>,
|
||||
<filename>T</filename>, and <filename>B</filename>,
|
||||
prevent more than one recipe from working. You can fix
|
||||
this by either setting <filename>PN</filename> to have
|
||||
a value similar to what OpenEmbedded and BitBake use
|
||||
in the default <filename>bitbake.conf</filename> file
|
||||
(see previous example). Or, by manually updating each
|
||||
recipe to set <filename>PN</filename>. You will also
|
||||
need to include <filename>PN</filename> as part of the
|
||||
<filename>STAMP</filename>, <filename>T</filename>, and
|
||||
<filename>B</filename> variable definitions in the
|
||||
<filename>local.conf</filename> file.
|
||||
</note>
|
||||
The <filename>TMPDIR</filename> variable establishes a directory
|
||||
that BitBake uses for build output and intermediate files other
|
||||
that BitBake uses for build output and intermediate files (other
|
||||
than the cached information used by the
|
||||
<link linkend='setscene'>Setscene</link> process.
|
||||
Here, the <filename>TMPDIR</filename> directory is set to
|
||||
@@ -318,19 +308,19 @@
|
||||
file exists, you can run the <filename>bitbake</filename>
|
||||
command again:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake
|
||||
ERROR: Traceback (most recent call last):
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 163, in wrapped
|
||||
return func(fn, *args)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 177, in _inherit
|
||||
bb.parse.BBHandler.inherit(bbclass, "configuration INHERITs", 0, data)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py", line 92, in inherit
|
||||
include(fn, file, lineno, d, "inherit")
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/ConfHandler.py", line 100, in include
|
||||
raise ParseError("Could not %(error_out)s file %(fn)s" % vars(), oldfn, lineno)
|
||||
ParseError: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
$ bitbake
|
||||
ERROR: Traceback (most recent call last):
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 163, in wrapped
|
||||
return func(fn, *args)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/cookerdata.py", line 177, in _inherit
|
||||
bb.parse.BBHandler.inherit(bbclass, "configuration INHERITs", 0, data)
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/BBHandler.py", line 92, in inherit
|
||||
include(fn, file, lineno, d, "inherit")
|
||||
File "/home/scott-lenovo/bitbake/lib/bb/parse/parse_py/ConfHandler.py", line 100, in include
|
||||
raise ParseError("Could not %(error_out)s file %(fn)s" % vars(), oldfn, lineno)
|
||||
ParseError: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
|
||||
ERROR: Unable to parse base: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
ERROR: Unable to parse base: ParseError in configuration INHERITs: Could not inherit file classes/base.bbclass
|
||||
</literallayout>
|
||||
In the sample output, BitBake could not find the
|
||||
<filename>classes/base.bbclass</filename> file.
|
||||
@@ -363,6 +353,9 @@
|
||||
Of course, the <filename>base.bbclass</filename> can have much
|
||||
more depending on which build environments BitBake is
|
||||
supporting.
|
||||
For more information on the <filename>base.bbclass</filename> file,
|
||||
you can look at
|
||||
<ulink url='http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#tasks'></ulink>.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>Run Bitbake:</emphasis>
|
||||
After making sure that the <filename>classes/base.bbclass</filename>
|
||||
@@ -383,10 +376,10 @@
|
||||
code separate from the general metadata used by BitBake.
|
||||
Thus, this example creates and uses a layer called "mylayer".
|
||||
<note>
|
||||
You can find additional information on layers in the
|
||||
"<link linkend='layers'>Layers</link>" section.
|
||||
</note></para>
|
||||
|
||||
You can find additional information on adding a layer at
|
||||
<ulink url='http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#adding-an-example-layer'></ulink>.
|
||||
</note>
|
||||
</para>
|
||||
<para>Minimally, you need a recipe file and a layer configuration
|
||||
file in your layer.
|
||||
The configuration file needs to be in the <filename>conf</filename>
|
||||
@@ -407,7 +400,7 @@
|
||||
<link linkend='var-BBFILES'>BBFILES</link> += "${LAYERDIR}/*.bb"
|
||||
|
||||
<link linkend='var-BBFILE_COLLECTIONS'>BBFILE_COLLECTIONS</link> += "mylayer"
|
||||
<link linkend='var-BBFILE_PATTERN'>BBFILE_PATTERN_mylayer</link> := "^${LAYERDIR_RE}/"
|
||||
<link linkend='var-BBFILE_PATTERN'>BBFILE_PATTERN_mylayer</link> := "^${LAYERDIR}/"
|
||||
</literallayout>
|
||||
For information on these variables, click the links
|
||||
to go to the definitions in the glossary.</para>
|
||||
@@ -478,7 +471,7 @@
|
||||
Time: 00:00:00
|
||||
Parsing of 1 .bb files complete (0 cached, 1 parsed). 1 targets, 0 skipped, 0 masked, 0 errors.
|
||||
NOTE: Resolving any missing task queue dependencies
|
||||
NOTE: Preparing RunQueue
|
||||
NOTE: Preparing runqueue
|
||||
NOTE: Executing RunQueue Tasks
|
||||
********************
|
||||
* *
|
||||
|
||||
@@ -8,9 +8,9 @@
|
||||
Welcome to the BitBake User Manual.
|
||||
This manual provides information on the BitBake tool.
|
||||
The information attempts to be as independent as possible regarding
|
||||
systems that use BitBake, such as OpenEmbedded and the
|
||||
Yocto Project.
|
||||
In some cases, scenarios or examples within the context of
|
||||
systems that use BitBake, such as the Yocto Project and
|
||||
OpenEmbedded.
|
||||
In some cases, scenarios or examples that within the context of
|
||||
a build system are used in the manual to help with understanding.
|
||||
For these cases, the manual clearly states the context.
|
||||
</para>
|
||||
@@ -35,31 +35,28 @@
|
||||
<listitem><para>
|
||||
BitBake executes tasks according to provided
|
||||
metadata that builds up the tasks.
|
||||
Metadata is stored in recipe (<filename>.bb</filename>)
|
||||
and related recipe "append" (<filename>.bbappend</filename>)
|
||||
files, configuration (<filename>.conf</filename>) and
|
||||
underlying include (<filename>.inc</filename>) files, and
|
||||
in class (<filename>.bbclass</filename>) files.
|
||||
The metadata provides
|
||||
Metadata is stored in recipe (<filename>.bb</filename>),
|
||||
configuration (<filename>.conf</filename>), and class
|
||||
(<filename>.bbclass</filename>) files and provides
|
||||
BitBake with instructions on what tasks to run and
|
||||
the dependencies between those tasks.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
BitBake includes a fetcher library for obtaining source
|
||||
code from various places such as local files, source control
|
||||
systems, or websites.
|
||||
code from various places such as source control
|
||||
systems or websites.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
The instructions for each unit to be built (e.g. a piece
|
||||
of software) are known as "recipe" files and
|
||||
of software) are known as recipe files and
|
||||
contain all the information about the unit
|
||||
(dependencies, source file locations, checksums, description
|
||||
and so on).
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
BitBake includes a client/server abstraction and can
|
||||
be used from a command line or used as a service over
|
||||
XML-RPC and has several different user interfaces.
|
||||
be used from a command line or used as a service over XMLRPC and
|
||||
has several different user interfaces.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
@@ -72,7 +69,7 @@
|
||||
BitBake was originally a part of the OpenEmbedded project.
|
||||
It was inspired by the Portage package management system
|
||||
used by the Gentoo Linux distribution.
|
||||
On December 7, 2004, OpenEmbedded project team member
|
||||
On December 7, 2004, OpenEmbedded project team member,
|
||||
Chris Larson split the project into two distinct pieces:
|
||||
<itemizedlist>
|
||||
<listitem><para>BitBake, a generic task executor</para></listitem>
|
||||
@@ -82,11 +79,8 @@
|
||||
Today, BitBake is the primary basis of the
|
||||
<ulink url="http://www.openembedded.org/">OpenEmbedded</ulink>
|
||||
project, which is being used to build and maintain Linux
|
||||
distributions such as the
|
||||
<ulink url='http://www.angstrom-distribution.org/'>Angstrom Distribution</ulink>,
|
||||
and which is also being used as the build tool for Linux projects
|
||||
such as the
|
||||
<ulink url='http://www.yoctoproject.org'>Yocto Project</ulink>.
|
||||
distributions such as the Angstrom Distribution and which is used
|
||||
as the build tool for Linux projects such as the Yocto Project.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -94,7 +88,7 @@
|
||||
an aspiring embedded Linux distribution.
|
||||
All of the build systems used by traditional desktop Linux
|
||||
distributions lacked important functionality, and none of the
|
||||
ad hoc Buildroot-based systems, prevalent in the
|
||||
ad-hoc Buildroot-based systems, prevalent in the
|
||||
embedded space, were scalable or maintainable.
|
||||
</para>
|
||||
|
||||
@@ -144,7 +138,7 @@
|
||||
projects for their builds.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Provide an inheritance mechanism to share
|
||||
Provide an inheritance mechanism that share
|
||||
common metadata between many packages.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
@@ -157,7 +151,7 @@
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Split metadata into layers and allow layers
|
||||
to enhance or override other layers.
|
||||
to override each other.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Allow representation of a given set of input variables
|
||||
@@ -184,14 +178,14 @@
|
||||
what tasks are required to run, and executes those tasks.
|
||||
Similar to GNU Make, BitBake controls how software is
|
||||
built.
|
||||
GNU Make achieves its control through "makefiles", while
|
||||
GNU Make achieves its control through "makefiles".
|
||||
BitBake uses "recipes".
|
||||
</para>
|
||||
|
||||
<para>
|
||||
BitBake extends the capabilities of a simple
|
||||
tool like GNU Make by allowing for the definition of much more
|
||||
complex tasks, such as assembling entire embedded Linux
|
||||
tool like GNU Make by allowing for much more complex tasks
|
||||
to be completed, such as assembling entire embedded Linux
|
||||
distributions.
|
||||
</para>
|
||||
|
||||
@@ -209,20 +203,14 @@
|
||||
<filename>.bb</filename>, are the most basic metadata files.
|
||||
These recipe files provide BitBake with the following:
|
||||
<itemizedlist>
|
||||
<listitem><para>Descriptive information about the
|
||||
package (author, homepage, license, and so on)</para></listitem>
|
||||
<listitem><para>Descriptive information about the package</para></listitem>
|
||||
<listitem><para>The version of the recipe</para></listitem>
|
||||
<listitem><para>Existing dependencies (both build
|
||||
and runtime dependencies)</para></listitem>
|
||||
<listitem><para>Where the source code resides and
|
||||
how to fetch it</para></listitem>
|
||||
<listitem><para>Whether the source code requires
|
||||
any patches, where to find them, and how to apply
|
||||
them</para></listitem>
|
||||
<listitem><para>How to configure and compile the
|
||||
source code</para></listitem>
|
||||
<listitem><para>Existing Dependencies</para></listitem>
|
||||
<listitem><para>Where the source code resides</para></listitem>
|
||||
<listitem><para>Whether the source code requires any patches</para></listitem>
|
||||
<listitem><para>How to compile the source code</para></listitem>
|
||||
<listitem><para>Where on the target machine to install the
|
||||
package or packages created</para></listitem>
|
||||
package being compiled</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
@@ -234,11 +222,7 @@
|
||||
The term "package" is also commonly used to describe recipes.
|
||||
However, since the same word is used to describe packaged
|
||||
output from a project, it is best to maintain a single
|
||||
descriptive term - "recipes".
|
||||
Put another way, a single "recipe" file is quite capable
|
||||
of generating a number of related but separately installable
|
||||
"packages".
|
||||
In fact, that ability is fairly common.
|
||||
descriptive term, "recipes".
|
||||
</note>
|
||||
</para>
|
||||
</section>
|
||||
@@ -273,7 +257,7 @@
|
||||
called <filename>base.bbclass</filename>.
|
||||
You can find this file in the
|
||||
<filename>classes</filename> directory.
|
||||
The <filename>base.bbclass</filename> class files is special since it
|
||||
The <filename>base.bbclass</filename> is special since it
|
||||
is always included automatically for all recipes
|
||||
and classes.
|
||||
This class contains definitions for standard basic tasks such
|
||||
@@ -300,8 +284,7 @@
|
||||
To illustrate how you can use layers to keep things modular,
|
||||
consider customizations you might make to support a specific target machine.
|
||||
These types of customizations typically reside in a special layer,
|
||||
rather than a general layer, called a Board Support Package (BSP)
|
||||
Layer.
|
||||
rather than a general layer, called a Board Specific Package (BSP) Layer.
|
||||
Furthermore, the machine customizations should be isolated from
|
||||
recipes and metadata that support a new GUI environment, for
|
||||
example.
|
||||
@@ -321,8 +304,9 @@
|
||||
|
||||
<para>
|
||||
Append files, which are files that have the
|
||||
<filename>.bbappend</filename> file extension, extend or
|
||||
override information in an existing recipe file.
|
||||
<filename>.bbappend</filename> file extension, add or
|
||||
extend build information to an existing
|
||||
recipe file.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -335,9 +319,8 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Information in append files extends or
|
||||
overrides the information in the underlying,
|
||||
similarly-named recipe files.
|
||||
Information in append files overrides the information in the
|
||||
similarly-named recipe file.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -362,12 +345,6 @@
|
||||
However, if you named the append file
|
||||
<filename>busybox_1.%.bbappend</filename>, then you would have a match.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
In the most general case, you could name the append file something as
|
||||
simple as <filename>busybox_%.bbappend</filename> to be entirely
|
||||
version independent.
|
||||
</para>
|
||||
</section>
|
||||
</section>
|
||||
|
||||
@@ -436,20 +413,6 @@
|
||||
you have a directory entitled
|
||||
<filename>bitbake-1.17.0</filename>.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>Using the BitBake that Comes With Your
|
||||
Build Checkout:</emphasis>
|
||||
A final possibility for getting a copy of BitBake is that it
|
||||
already comes with your checkout of a larger Bitbake-based build
|
||||
system, such as Poky.
|
||||
Rather than manually checking out individual layers and
|
||||
gluing them together yourself, you can check
|
||||
out an entire build system.
|
||||
The checkout will already include a version of BitBake that
|
||||
has been thoroughly tested for compatibility with the other
|
||||
components.
|
||||
For information on how to check out a particular BitBake-based
|
||||
build system, consult that build system's supporting documentation.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
@@ -471,7 +434,7 @@
|
||||
Following is the usage and syntax for BitBake:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake -h
|
||||
Usage: bitbake [options] [recipename/target recipe:do_task ...]
|
||||
Usage: bitbake [options] [recipename/target ...]
|
||||
|
||||
Executes the specified task (default is 'build') for a given set of target recipes (.bb files).
|
||||
It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH which
|
||||
@@ -488,6 +451,8 @@
|
||||
target that failed and anything depending on it cannot
|
||||
be built, as much as possible will be built before
|
||||
stopping.
|
||||
-a, --tryaltconfigs Continue with builds by trying to use alternative
|
||||
providers where possible.
|
||||
-f, --force Force the specified targets/task to run (invalidating
|
||||
any existing stamp file).
|
||||
-c CMD, --cmd=CMD Specify the task to execute. The exact options
|
||||
@@ -502,31 +467,18 @@
|
||||
Read the specified file before bitbake.conf.
|
||||
-R POSTFILE, --postread=POSTFILE
|
||||
Read the specified file after bitbake.conf.
|
||||
-v, --verbose Enable tracing of shell tasks (with 'set -x'). Also
|
||||
print bb.note(...) messages to stdout (in addition to
|
||||
writing them to ${T}/log.do_<task>).
|
||||
-v, --verbose Output more log message data to the terminal.
|
||||
-D, --debug Increase the debug level. You can specify this more
|
||||
than once. -D sets the debug level to 1, where only
|
||||
bb.debug(1, ...) messages are printed to stdout; -DD
|
||||
sets the debug level to 2, where both bb.debug(1, ...)
|
||||
and bb.debug(2, ...) messages are printed; etc.
|
||||
Without -D, no debug messages are printed. Note that
|
||||
-D only affects output to stdout. All debug messages
|
||||
are written to ${T}/log.do_taskname, regardless of the
|
||||
debug level.
|
||||
-q, --quiet Output less log message data to the terminal. You can
|
||||
specify this more than once.
|
||||
than once.
|
||||
-n, --dry-run Don't execute, just go through the motions.
|
||||
-S SIGNATURE_HANDLER, --dump-signatures=SIGNATURE_HANDLER
|
||||
-S DUMP_SIGNATURES, --dump-signatures=DUMP_SIGNATURES
|
||||
Dump out the signature construction information, with
|
||||
no task execution. The SIGNATURE_HANDLER parameter is
|
||||
passed to the handler. Two common values are none and
|
||||
printdiff but the handler may define more/less. none
|
||||
means only dump the signature, printdiff means compare
|
||||
the dumped signature with the cached one.
|
||||
no task execution. Parameters are passed to the
|
||||
signature handling code, use 'none' if no specific
|
||||
handler is required.
|
||||
-p, --parse-only Quit after parsing the BB recipes.
|
||||
-s, --show-versions Show current and preferred versions of all recipes.
|
||||
-e, --environment Show the global or per-recipe environment complete
|
||||
-e, --environment Show the global or per-package environment complete
|
||||
with information about where variables were
|
||||
set/changed.
|
||||
-g, --graphviz Save dependency tree information for the specified
|
||||
@@ -538,38 +490,21 @@
|
||||
-l DEBUG_DOMAINS, --log-domains=DEBUG_DOMAINS
|
||||
Show debug logging for the specified logging domains
|
||||
-P, --profile Profile the command and save reports.
|
||||
-u UI, --ui=UI The user interface to use (knotty, ncurses or taskexp
|
||||
- default knotty).
|
||||
--token=XMLRPCTOKEN Specify the connection token to be used when
|
||||
connecting to a remote server.
|
||||
-u UI, --ui=UI The user interface to use (e.g. knotty, hob, depexp).
|
||||
-t SERVERTYPE, --servertype=SERVERTYPE
|
||||
Choose which server to use, process or xmlrpc.
|
||||
--revisions-changed Set the exit code depending on whether upstream
|
||||
floating revisions have changed or not.
|
||||
--server-only Run bitbake without a UI, only starting a server
|
||||
(cooker) process.
|
||||
-B BIND, --bind=BIND The name/address for the bitbake xmlrpc server to bind
|
||||
to.
|
||||
-T SERVER_TIMEOUT, --idle-timeout=SERVER_TIMEOUT
|
||||
Set timeout to unload bitbake server due to
|
||||
inactivity, set to -1 means no unload, default:
|
||||
Environment variable BB_SERVER_TIMEOUT.
|
||||
-B BIND, --bind=BIND The name/address for the bitbake server to bind to.
|
||||
--no-setscene Do not run any setscene tasks. sstate will be ignored
|
||||
and everything needed, built.
|
||||
--setscene-only Only run setscene tasks, don't run any real tasks.
|
||||
--remote-server=REMOTE_SERVER
|
||||
Connect to the specified server.
|
||||
-m, --kill-server Terminate any running bitbake server.
|
||||
-m, --kill-server Terminate the remote server.
|
||||
--observe-only Connect to a server as an observing-only client.
|
||||
--status-only Check the status of the remote bitbake server.
|
||||
-w WRITEEVENTLOG, --write-log=WRITEEVENTLOG
|
||||
Writes the event log of the build to a bitbake event
|
||||
json file. Use '' (empty string) to assign the name
|
||||
automatically.
|
||||
--runall=RUNALL Run the specified task for any recipe in the taskgraph
|
||||
of the specified target (even if it wouldn't otherwise
|
||||
have run).
|
||||
--runonly=RUNONLY Run only the specified task within the taskgraph of
|
||||
the specified targets (and any task dependencies those
|
||||
tasks may have).
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
@@ -624,14 +559,14 @@
|
||||
when one wants to manage multiple <filename>.bb</filename>
|
||||
files.
|
||||
Clearly there needs to be a way to tell BitBake what
|
||||
files are available and, of those, which you
|
||||
files are available, and of those, which you
|
||||
want to execute.
|
||||
There also needs to be a way for each recipe
|
||||
to express its dependencies, both for build-time and
|
||||
runtime.
|
||||
There must be a way for you to express recipe preferences
|
||||
when multiple recipes provide the same functionality, or when
|
||||
there are multiple versions of a recipe.
|
||||
there are multiple versions of a recipe.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -652,25 +587,6 @@
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='executing-a-list-of-task-and-recipe-combinations'>
|
||||
<title>Executing a List of Task and Recipe Combinations</title>
|
||||
|
||||
<para>
|
||||
The BitBake command line supports specifying different
|
||||
tasks for individual targets when you specify multiple
|
||||
targets.
|
||||
For example, suppose you had two targets (or recipes)
|
||||
<filename>myfirstrecipe</filename> and
|
||||
<filename>mysecondrecipe</filename> and you needed
|
||||
BitBake to run <filename>taskA</filename> for the first
|
||||
recipe and <filename>taskB</filename> for the second
|
||||
recipe:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake myfirstrecipe:do_taskA mysecondrecipe:do_taskB
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='generating-dependency-graphs'>
|
||||
<title>Generating Dependency Graphs</title>
|
||||
|
||||
@@ -683,21 +599,21 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
When you generate a dependency graph, BitBake writes three files
|
||||
When you generate a dependency graph, BitBake writes four files
|
||||
to the current working directory:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
<emphasis><filename>recipe-depends.dot</filename>:</emphasis>
|
||||
Shows dependencies between recipes (i.e. a collapsed version of
|
||||
<filename>task-depends.dot</filename>).
|
||||
<listitem><para><emphasis><filename>package-depends.dot</filename>:</emphasis>
|
||||
Shows BitBake's knowledge of dependencies between
|
||||
runtime targets.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<emphasis><filename>task-depends.dot</filename>:</emphasis>
|
||||
<listitem><para><emphasis><filename>pn-depends.dot</filename>:</emphasis>
|
||||
Shows dependencies between build-time targets
|
||||
(i.e. recipes).
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis><filename>task-depends.dot</filename>:</emphasis>
|
||||
Shows dependencies between tasks.
|
||||
These dependencies match BitBake's internal task execution list.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
<emphasis><filename>pn-buildlist</filename>:</emphasis>
|
||||
<listitem><para><emphasis><filename>pn-buildlist</filename>:</emphasis>
|
||||
Shows a simple list of targets that are to be built.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -43,8 +43,8 @@
|
||||
<link linkend='var-DEFAULT_PREFERENCE'>D</link>
|
||||
<link linkend='var-EXCLUDE_FROM_WORLD'>E</link>
|
||||
<link linkend='var-FAKEROOT'>F</link>
|
||||
<link linkend='var-GITDIR'>G</link>
|
||||
<link linkend='var-HGDIR'>H</link>
|
||||
<!-- <link linkend='var-GROUPADD_PARAM'>G</link> -->
|
||||
<link linkend='var-HOMEPAGE'>H</link>
|
||||
<!-- <link linkend='var-ICECC_DISABLED'>I</link> -->
|
||||
<!-- <link linkend='var-glossary-j'>J</link> -->
|
||||
<!-- <link linkend='var-KARCH'>K</link> -->
|
||||
@@ -52,7 +52,7 @@
|
||||
<link linkend='var-MIRRORS'>M</link>
|
||||
<!-- <link linkend='var-glossary-n'>N</link> -->
|
||||
<link linkend='var-OVERRIDES'>O</link>
|
||||
<link linkend='var-P4DIR'>P</link>
|
||||
<link linkend='var-PACKAGES'>P</link>
|
||||
<!-- <link linkend='var-QMAKE_PROFILES'>Q</link> -->
|
||||
<link linkend='var-RDEPENDS'>R</link>
|
||||
<link linkend='var-SECTION'>S</link>
|
||||
@@ -102,56 +102,6 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BB_ALLOWED_NETWORKS'><glossterm>BB_ALLOWED_NETWORKS</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
Specifies a space-delimited list of hosts that the fetcher
|
||||
is allowed to use to obtain the required source code.
|
||||
Following are considerations surrounding this variable:
|
||||
<itemizedlist>
|
||||
<listitem><para>
|
||||
This host list is only used if
|
||||
<link linkend='var-BB_NO_NETWORK'><filename>BB_NO_NETWORK</filename></link>
|
||||
is either not set or set to "0".
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Limited support for wildcard matching against the
|
||||
beginning of host names exists.
|
||||
For example, the following setting matches
|
||||
<filename>git.gnu.org</filename>,
|
||||
<filename>ftp.gnu.org</filename>, and
|
||||
<filename>foo.git.gnu.org</filename>.
|
||||
<literallayout class='monospaced'>
|
||||
BB_ALLOWED_NETWORKS = "*.gnu.org"
|
||||
</literallayout>
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Mirrors not in the host list are skipped and
|
||||
logged in debug.
|
||||
</para></listitem>
|
||||
<listitem><para>
|
||||
Attempts to access networks not in the host list
|
||||
cause a failure.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
Using <filename>BB_ALLOWED_NETWORKS</filename> in
|
||||
conjunction with
|
||||
<link linkend='var-PREMIRRORS'><filename>PREMIRRORS</filename></link>
|
||||
is very useful.
|
||||
Adding the host you want to use to
|
||||
<filename>PREMIRRORS</filename> results in the source code
|
||||
being fetched from an allowed location and avoids raising
|
||||
an error when a host that is not allowed is in a
|
||||
<link linkend='var-SRC_URI'><filename>SRC_URI</filename></link>
|
||||
statement.
|
||||
This is because the fetcher does not attempt to use the
|
||||
host listed in <filename>SRC_URI</filename> after a
|
||||
successful fetch from the
|
||||
<filename>PREMIRRORS</filename> occurs.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BB_CONSOLELOG'><glossterm>BB_CONSOLELOG</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
@@ -716,7 +666,7 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BB_SETSCENE_VERIFY_FUNCTION2'><glossterm>BB_SETSCENE_VERIFY_FUNCTION2</glossterm>
|
||||
<glossentry id='var-BB_SETSCENE_VERIFY_FUNCTION'><glossterm>BB_SETSCENE_VERIFY_FUNCTION</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
Specifies a function to call that verifies the list of
|
||||
@@ -856,56 +806,6 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BB_TASK_IONICE_LEVEL'><glossterm>BB_TASK_IONICE_LEVEL</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
Allows adjustment of a task's Input/Output priority.
|
||||
During Autobuilder testing, random failures can occur
|
||||
for tasks due to I/O starvation.
|
||||
These failures occur during various QEMU runtime timeouts.
|
||||
You can use the <filename>BB_TASK_IONICE_LEVEL</filename>
|
||||
variable to adjust the I/O priority of these tasks.
|
||||
<note>
|
||||
This variable works similarly to the
|
||||
<link linkend='var-BB_TASK_NICE_LEVEL'><filename>BB_TASK_NICE_LEVEL</filename></link>
|
||||
variable except with a task's I/O priorities.
|
||||
</note>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Set the variable as follows:
|
||||
<literallayout class='monospaced'>
|
||||
BB_TASK_IONICE_LEVEL = "<replaceable>class</replaceable>.<replaceable>prio</replaceable>"
|
||||
</literallayout>
|
||||
For <replaceable>class</replaceable>, the default value is
|
||||
"2", which is a best effort.
|
||||
You can use "1" for realtime and "3" for idle.
|
||||
If you want to use realtime, you must have superuser
|
||||
privileges.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For <replaceable>prio</replaceable>, you can use any
|
||||
value from "0", which is the highest priority, to "7",
|
||||
which is the lowest.
|
||||
The default value is "4".
|
||||
You do not need any special privileges to use this range
|
||||
of priority values.
|
||||
<note>
|
||||
In order for your I/O priority settings to take effect,
|
||||
you need the Completely Fair Queuing (CFQ) Scheduler
|
||||
selected for the backing block device.
|
||||
To select the scheduler, use the following command form
|
||||
where <replaceable>device</replaceable> is the device
|
||||
(e.g. sda, sdb, and so forth):
|
||||
<literallayout class='monospaced'>
|
||||
$ sudo sh -c “echo cfq > /sys/block/<replaceable>device</replaceable>/queu/scheduler
|
||||
</literallayout>
|
||||
</note>
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BB_TASK_NICE_LEVEL'><glossterm>BB_TASK_NICE_LEVEL</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
@@ -972,7 +872,7 @@
|
||||
that run on the target <filename>MACHINE</filename>;
|
||||
"nativesdk", which targets the SDK machine instead of
|
||||
<filename>MACHINE</filename>; and "mulitlibs" in the form
|
||||
"<filename>multilib:</filename><replaceable>multilib_name</replaceable>".
|
||||
"<filename>multilib:<multilib_name></filename>".
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -984,31 +884,8 @@
|
||||
metadata:
|
||||
<literallayout class='monospaced'>
|
||||
BBCLASSEXTEND =+ "native nativesdk"
|
||||
BBCLASSEXTEND =+ "multilib:<replaceable>multilib_name</replaceable>"
|
||||
BBCLASSEXTEND =+ "multilib:<multilib_name>"
|
||||
</literallayout>
|
||||
<note>
|
||||
<para>
|
||||
Internally, the <filename>BBCLASSEXTEND</filename>
|
||||
mechanism generates recipe variants by rewriting
|
||||
variable values and applying overrides such as
|
||||
<filename>_class-native</filename>.
|
||||
For example, to generate a native version of a recipe,
|
||||
a
|
||||
<link linkend='var-DEPENDS'><filename>DEPENDS</filename></link>
|
||||
on "foo" is rewritten to a <filename>DEPENDS</filename>
|
||||
on "foo-native".
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Even when using <filename>BBCLASSEXTEND</filename>, the
|
||||
recipe is only parsed once.
|
||||
Parsing once adds some limitations.
|
||||
For example, it is not possible to
|
||||
include a different file depending on the variant,
|
||||
since <filename>include</filename> statements are
|
||||
processed when the recipe is parsed.
|
||||
</para>
|
||||
</note>
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
@@ -1017,7 +894,7 @@
|
||||
<glossdef>
|
||||
<para>
|
||||
Sets the BitBake debug output level to a specific value
|
||||
as incremented by the <filename>-D</filename> command line
|
||||
as incremented by the <filename>-d</filename> command line
|
||||
option.
|
||||
<note>
|
||||
You must set this variable in the external environment
|
||||
@@ -1139,18 +1016,6 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BBLAYERS_FETCH_DIR'><glossterm>BBLAYERS_FETCH_DIR</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
Sets the base location where layers are stored.
|
||||
This setting is used in conjunction with
|
||||
<filename>bitbake-layers layerindex-fetch</filename> and
|
||||
tells <filename>bitbake-layers</filename> where to place
|
||||
the fetched layers.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BBMASK'><glossterm>BBMASK</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
@@ -1163,14 +1028,14 @@
|
||||
to "hide" these <filename>.bb</filename> and
|
||||
<filename>.bbappend</filename> files.
|
||||
BitBake ignores any recipe or recipe append files that
|
||||
match any of the expressions.
|
||||
match the expression.
|
||||
It is as if BitBake does not see them at all.
|
||||
Consequently, matching files are not parsed or otherwise
|
||||
used by BitBake.</para>
|
||||
<para>
|
||||
The values you provide are passed to Python's regular
|
||||
The value you provide is passed to Python's regular
|
||||
expression compiler.
|
||||
The expressions are compared against the full paths to
|
||||
The expression is compared against the full paths to
|
||||
the files.
|
||||
For complete syntax information, see Python's
|
||||
documentation at
|
||||
@@ -1186,16 +1051,18 @@
|
||||
BBMASK = "meta-ti/recipes-misc/"
|
||||
</literallayout>
|
||||
If you want to mask out multiple directories or recipes,
|
||||
you can specify multiple regular expression fragments.
|
||||
use the vertical bar to separate the regular expression
|
||||
fragments.
|
||||
This next example masks out multiple directories and
|
||||
individual recipes:
|
||||
<literallayout class='monospaced'>
|
||||
BBMASK += "/meta-ti/recipes-misc/ meta-ti/recipes-ti/packagegroup/"
|
||||
BBMASK += "/meta-oe/recipes-support/"
|
||||
BBMASK += "/meta-foo/.*/openldap"
|
||||
BBMASK += "opencv.*\.bbappend"
|
||||
BBMASK += "lzma"
|
||||
BBMASK = "meta-ti/recipes-misc/|meta-ti/recipes-ti/packagegroup/"
|
||||
BBMASK .= "|.*meta-oe/recipes-support/"
|
||||
BBMASK .= "|.*openldap"
|
||||
BBMASK .= "|.*opencv"
|
||||
BBMASK .= "|.*lzma"
|
||||
</literallayout>
|
||||
Notice how the vertical bar is used to append the fragments.
|
||||
<note>
|
||||
When specifying a directory name, use the trailing
|
||||
slash character to ensure you match just that directory
|
||||
@@ -1224,9 +1091,9 @@
|
||||
Set the variable as you would any environment variable
|
||||
and then run BitBake:
|
||||
<literallayout class='monospaced'>
|
||||
$ BBPATH="<replaceable>build_directory</replaceable>"
|
||||
$ BBPATH="<build_directory>"
|
||||
$ export BBPATH
|
||||
$ bitbake <replaceable>target</replaceable>
|
||||
$ bitbake <target>
|
||||
</literallayout>
|
||||
</para>
|
||||
</glossdef>
|
||||
@@ -1242,15 +1109,6 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BBTARGETS'><glossterm>BBTARGETS</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
Allows you to use a configuration file to add to the list
|
||||
of command-line target recipes you want to build.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BBVERSIONS'><glossterm>BBVERSIONS</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
@@ -1296,15 +1154,6 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BZRDIR'><glossterm>BZRDIR</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
The directory in which files checked out of a Bazaar
|
||||
system are stored.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
</glossdiv>
|
||||
|
||||
<glossdiv id='var-glossary-c'><title>C</title>
|
||||
@@ -1319,15 +1168,6 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-CVSDIR'><glossterm>CVSDIR</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
The directory in which files checked out under the
|
||||
CVS system are stored.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
</glossdiv>
|
||||
|
||||
<glossdiv id='var-glossary-d'><title>D</title>
|
||||
@@ -1537,6 +1377,24 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-FILESDIR'><glossterm>FILESDIR</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
Specifies directories BitBake uses when searching for
|
||||
patches and files.
|
||||
The "local" fetcher module uses these directories when
|
||||
handling <filename>file://</filename> URLs if the file
|
||||
was not found using
|
||||
<link linkend='var-FILESPATH'><filename>FILESPATH</filename></link>.
|
||||
<note>
|
||||
The <filename>FILESDIR</filename> variable is
|
||||
deprecated and you should use
|
||||
<filename>FILESPATH</filename> in all new code.
|
||||
</note>
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-FILESPATH'><glossterm>FILESPATH</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
@@ -1554,32 +1412,13 @@
|
||||
|
||||
</glossdiv>
|
||||
|
||||
|
||||
<!--
|
||||
<glossdiv id='var-glossary-g'><title>G</title>
|
||||
|
||||
<glossentry id='var-GITDIR'><glossterm>GITDIR</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
The directory in which a local copy of a Git repository
|
||||
is stored when it is cloned.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
</glossdiv>
|
||||
|
||||
-->
|
||||
|
||||
<glossdiv id='var-glossary-h'><title>H</title>
|
||||
|
||||
<glossentry id='var-HGDIR'><glossterm>HGDIR</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
The directory in which files checked out of a Mercurial
|
||||
system are stored.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-HOMEPAGE'><glossterm>HOMEPAGE</glossterm>
|
||||
<glossdef>
|
||||
<para>Website where more information about the software the recipe is building
|
||||
@@ -1594,19 +1433,9 @@
|
||||
<glossentry id='var-INHERIT'><glossterm>INHERIT</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
Causes the named class or classes to be inherited globally.
|
||||
Anonymous functions in the class or classes
|
||||
are not executed for the
|
||||
base configuration and in each individual recipe.
|
||||
The OpenEmbedded build system ignores changes to
|
||||
<filename>INHERIT</filename> in individual recipes.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For more information on <filename>INHERIT</filename>, see
|
||||
the
|
||||
"<link linkend="inherit-configuration-directive"><filename>INHERIT</filename> Configuration Directive</link>"
|
||||
section.
|
||||
Causes the named class to be inherited at
|
||||
this point during parsing.
|
||||
The variable is only valid in configuration files.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
@@ -1649,17 +1478,6 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-LAYERDIR_RE'><glossterm>LAYERDIR_RE</glossterm>
|
||||
<glossdef>
|
||||
<para>When used inside the <filename>layer.conf</filename> configuration
|
||||
file, this variable provides the path of the current layer,
|
||||
escaped for use in a regular expression
|
||||
(<link linkend='var-BBFILE_PATTERN'><filename>BBFILE_PATTERN</filename></link>).
|
||||
This variable is not available outside of <filename>layer.conf</filename>
|
||||
and references are expanded immediately when parsing of the file completes.</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-LAYERVERSION'><glossterm>LAYERVERSION</glossterm>
|
||||
<glossdef>
|
||||
<para>Optionally specifies the version of a layer as a single number.
|
||||
@@ -1742,16 +1560,8 @@
|
||||
BitBake uses <filename>OVERRIDES</filename> to control
|
||||
what variables are overridden after BitBake parses
|
||||
recipes and configuration files.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Following is a simple example that uses an overrides
|
||||
list based on machine architectures:
|
||||
<literallayout class='monospaced'>
|
||||
OVERRIDES = "arm:x86:mips:powerpc"
|
||||
</literallayout>
|
||||
You can find information on how to use
|
||||
<filename>OVERRIDES</filename> in the
|
||||
You can find more information on how overrides are handled
|
||||
in the
|
||||
"<link linkend='conditional-syntax-overrides'>Conditional Syntax (Overrides)</link>"
|
||||
section.
|
||||
</para>
|
||||
@@ -1761,15 +1571,6 @@
|
||||
|
||||
<glossdiv id='var-glossary-p'><title>P</title>
|
||||
|
||||
<glossentry id='var-P4DIR'><glossterm>P4DIR</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
The directory in which a local copy of a Perforce depot
|
||||
is stored when it is fetched.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-PACKAGES'><glossterm>PACKAGES</glossterm>
|
||||
<glossdef>
|
||||
<para>The list of packages the recipe creates.
|
||||
@@ -1901,7 +1702,7 @@
|
||||
Here are two examples:
|
||||
<literallayout class='monospaced'>
|
||||
PREFERRED_VERSION_python = "2.7.3"
|
||||
PREFERRED_VERSION_linux-yocto = "4.12%"
|
||||
PREFERRED_VERSION_linux-yocto = "3.10%"
|
||||
</literallayout>
|
||||
</para>
|
||||
</glossdef>
|
||||
@@ -1943,49 +1744,16 @@
|
||||
<glossentry id='var-PROVIDES'><glossterm>PROVIDES</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
A list of aliases by which a particular recipe can be
|
||||
known.
|
||||
By default, a recipe's own
|
||||
<filename><link linkend='var-PN'>PN</link></filename>
|
||||
is implicitly already in its <filename>PROVIDES</filename>
|
||||
list.
|
||||
If a recipe uses <filename>PROVIDES</filename>, the
|
||||
additional aliases are synonyms for the recipe and can
|
||||
be useful satisfying dependencies of other recipes during
|
||||
the build as specified by
|
||||
<filename><link linkend='var-DEPENDS'>DEPENDS</link></filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Consider the following example
|
||||
<filename>PROVIDES</filename> statement from a recipe
|
||||
file <filename>libav_0.8.11.bb</filename>:
|
||||
<literallayout class='monospaced'>
|
||||
PROVIDES += "libpostproc"
|
||||
</literallayout>
|
||||
The <filename>PROVIDES</filename> statement results in
|
||||
the "libav" recipe also being known as "libpostproc".
|
||||
</para>
|
||||
|
||||
<para>
|
||||
In addition to providing recipes under alternate names,
|
||||
the <filename>PROVIDES</filename> mechanism is also used
|
||||
to implement virtual targets.
|
||||
A virtual target is a name that corresponds to some
|
||||
particular functionality (e.g. a Linux kernel).
|
||||
Recipes that provide the functionality in question list the
|
||||
virtual target in <filename>PROVIDES</filename>.
|
||||
Recipes that depend on the functionality in question can
|
||||
include the virtual target in
|
||||
<link linkend='var-DEPENDS'><filename>DEPENDS</filename></link>
|
||||
to leave the choice of provider open.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Conventionally, virtual targets have names on the form
|
||||
"virtual/function" (e.g. "virtual/kernel").
|
||||
The slash is simply part of the name and has no
|
||||
syntactical significance.
|
||||
A list of aliases that a recipe also provides.
|
||||
These aliases are useful for satisfying dependencies of
|
||||
other recipes during the build (as specified by
|
||||
<filename><link linkend='var-DEPENDS'>DEPENDS</link></filename>).
|
||||
<note>
|
||||
A recipe's own
|
||||
<filename><link linkend='var-PN'>PN</link></filename>
|
||||
is implicitly already in its
|
||||
<filename>PROVIDES</filename> list.
|
||||
</note>
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
@@ -2063,7 +1831,7 @@
|
||||
Here is the general syntax to specify versions with
|
||||
the <filename>RDEPENDS</filename> variable:
|
||||
<literallayout class='monospaced'>
|
||||
RDEPENDS_${PN} = "<replaceable>package</replaceable> (<replaceable>operator</replaceable> <replaceable>version</replaceable>)"
|
||||
RDEPENDS_${PN} = "<package> (<operator> <version>)"
|
||||
</literallayout>
|
||||
For <filename>operator</filename>, you can specify the
|
||||
following:
|
||||
@@ -2089,16 +1857,6 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-REPODIR'><glossterm>REPODIR</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
The directory in which a local copy of a
|
||||
<filename>google-repo</filename> directory is stored
|
||||
when it is synced.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-RPROVIDES'><glossterm>RPROVIDES</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
@@ -2139,7 +1897,7 @@
|
||||
Here is the general syntax to specify versions with
|
||||
the <filename>RRECOMMENDS</filename> variable:
|
||||
<literallayout class='monospaced'>
|
||||
RRECOMMENDS_${PN} = "<replaceable>package</replaceable> (<replaceable>operator</replaceable> <replaceable>version</replaceable>)"
|
||||
RRECOMMENDS_${PN} = "<package> (<operator> <version>)"
|
||||
</literallayout>
|
||||
For <filename>operator</filename>, you can specify the
|
||||
following:
|
||||
@@ -2322,15 +2080,6 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-SVNDIR'><glossterm>SVNDIR</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
The directory in which files checked out of a Subversion
|
||||
system are stored.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
</glossdiv>
|
||||
|
||||
<glossdiv id='var-glossary-t'><title>T</title>
|
||||
|
||||
@@ -313,13 +313,6 @@ a:hover {
|
||||
/*font-weight: bold;*/
|
||||
}
|
||||
|
||||
/* This style defines how the permalink character
|
||||
appears by itself and when hovered over with
|
||||
the mouse. */
|
||||
|
||||
[alt='Permalink'] { color: #eee; }
|
||||
[alt='Permalink']:hover { color: black; }
|
||||
|
||||
|
||||
div.informalfigure,
|
||||
div.informalexample,
|
||||
@@ -800,6 +793,7 @@ div.sect2 .titlepage .title {
|
||||
|
||||
h1.title {
|
||||
background-color: transparent;
|
||||
background-image: url("figures/yocto-project-bw.png");
|
||||
background-repeat: no-repeat;
|
||||
height: 256px;
|
||||
text-indent: -9000px;
|
||||
|
||||
@@ -56,7 +56,7 @@
|
||||
-->
|
||||
|
||||
<copyright>
|
||||
<year>2004-2017</year>
|
||||
<year>2004-2014</year>
|
||||
<holder>Richard Purdie</holder>
|
||||
<holder>Chris Larson</holder>
|
||||
<holder>and Phil Blundell</holder>
|
||||
|
||||
@@ -89,7 +89,7 @@ quit after parsing the BB files (developers only)
|
||||
show current and preferred versions of all packages
|
||||
.TP
|
||||
.B \-e, \-\-environment
|
||||
show the global or per-recipe environment (this is what used to be bbread)
|
||||
show the global or per-package environment (this is what used to be bbread)
|
||||
.TP
|
||||
.B \-g, \-\-graphviz
|
||||
emit the dependency trees of the specified packages in the dot syntax
|
||||
@@ -105,7 +105,7 @@ Show debug logging for the specified logging domains
|
||||
profile the command and print a report
|
||||
.TP
|
||||
.B \-uUI, \-\-ui=UI
|
||||
User interface to use. Currently, knotty, taskexp or ncurses can be specified as UI.
|
||||
User interface to use. Currently, hob, depexp, goggle or ncurses can be specified as UI.
|
||||
.TP
|
||||
.B \-tSERVERTYPE, \-\-servertype=SERVERTYPE
|
||||
Choose which server to use, none, process or xmlrpc.
|
||||
|
||||
39
bitbake/doc/template/component.title.xsl
vendored
39
bitbake/doc/template/component.title.xsl
vendored
@@ -1,39 +0,0 @@
|
||||
<xsl:stylesheet version="1.0"
|
||||
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:d="http://docbook.org/ns/docbook"
|
||||
xmlns="http://www.w3.org/1999/xhtml"
|
||||
exclude-result-prefixes="d">
|
||||
|
||||
<xsl:template name="component.title">
|
||||
<xsl:param name="node" select="."/>
|
||||
|
||||
<xsl:variable name="level">
|
||||
<xsl:choose>
|
||||
<xsl:when test="ancestor::d:section">
|
||||
<xsl:value-of select="count(ancestor::d:section)+1"/>
|
||||
</xsl:when>
|
||||
<xsl:when test="ancestor::d:sect5">6</xsl:when>
|
||||
<xsl:when test="ancestor::d:sect4">5</xsl:when>
|
||||
<xsl:when test="ancestor::d:sect3">4</xsl:when>
|
||||
<xsl:when test="ancestor::d:sect2">3</xsl:when>
|
||||
<xsl:when test="ancestor::d:sect1">2</xsl:when>
|
||||
<xsl:otherwise>1</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:variable>
|
||||
<xsl:element name="h{$level+1}" namespace="http://www.w3.org/1999/xhtml">
|
||||
<xsl:attribute name="class">title</xsl:attribute>
|
||||
<xsl:if test="$generate.id.attributes = 0">
|
||||
<xsl:call-template name="anchor">
|
||||
<xsl:with-param name="node" select="$node"/>
|
||||
<xsl:with-param name="conditional" select="0"/>
|
||||
</xsl:call-template>
|
||||
</xsl:if>
|
||||
<xsl:apply-templates select="$node" mode="object.title.markup">
|
||||
<xsl:with-param name="allow-anchors" select="1"/>
|
||||
</xsl:apply-templates>
|
||||
<xsl:call-template name="permalink">
|
||||
<xsl:with-param name="node" select="$node"/>
|
||||
</xsl:call-template>
|
||||
</xsl:element>
|
||||
</xsl:template>
|
||||
</xsl:stylesheet>
|
||||
25
bitbake/doc/template/division.title.xsl
vendored
25
bitbake/doc/template/division.title.xsl
vendored
@@ -1,25 +0,0 @@
|
||||
<xsl:stylesheet version="1.0"
|
||||
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:d="http://docbook.org/ns/docbook"
|
||||
xmlns="http://www.w3.org/1999/xhtml"
|
||||
exclude-result-prefixes="d">
|
||||
|
||||
<xsl:template name="division.title">
|
||||
<xsl:param name="node" select="."/>
|
||||
|
||||
<h1>
|
||||
<xsl:attribute name="class">title</xsl:attribute>
|
||||
<xsl:call-template name="anchor">
|
||||
<xsl:with-param name="node" select="$node"/>
|
||||
<xsl:with-param name="conditional" select="0"/>
|
||||
</xsl:call-template>
|
||||
<xsl:apply-templates select="$node" mode="object.title.markup">
|
||||
<xsl:with-param name="allow-anchors" select="1"/>
|
||||
</xsl:apply-templates>
|
||||
<xsl:call-template name="permalink">
|
||||
<xsl:with-param name="node" select="$node"/>
|
||||
</xsl:call-template>
|
||||
</h1>
|
||||
</xsl:template>
|
||||
</xsl:stylesheet>
|
||||
|
||||
21
bitbake/doc/template/formal.object.heading.xsl
vendored
21
bitbake/doc/template/formal.object.heading.xsl
vendored
@@ -1,21 +0,0 @@
|
||||
<xsl:stylesheet version="1.0"
|
||||
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:d="http://docbook.org/ns/docbook"
|
||||
xmlns="http://www.w3.org/1999/xhtml"
|
||||
exclude-result-prefixes="d">
|
||||
|
||||
<xsl:template name="formal.object.heading">
|
||||
<xsl:param name="object" select="."/>
|
||||
<xsl:param name="title">
|
||||
<xsl:apply-templates select="$object" mode="object.title.markup">
|
||||
<xsl:with-param name="allow-anchors" select="1"/>
|
||||
</xsl:apply-templates>
|
||||
</xsl:param>
|
||||
<p class="title">
|
||||
<b><xsl:copy-of select="$title"/></b>
|
||||
<xsl:call-template name="permalink">
|
||||
<xsl:with-param name="node" select="$object"/>
|
||||
</xsl:call-template>
|
||||
</p>
|
||||
</xsl:template>
|
||||
</xsl:stylesheet>
|
||||
14
bitbake/doc/template/gloss-permalinks.xsl
vendored
14
bitbake/doc/template/gloss-permalinks.xsl
vendored
@@ -1,14 +0,0 @@
|
||||
<xsl:stylesheet version="1.0"
|
||||
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:d="http://docbook.org/ns/docbook"
|
||||
xmlns="http://www.w3.org/1999/xhtml">
|
||||
|
||||
<xsl:template match="glossentry/glossterm">
|
||||
<xsl:apply-imports/>
|
||||
<xsl:if test="$generate.permalink != 0">
|
||||
<xsl:call-template name="permalink">
|
||||
<xsl:with-param name="node" select=".."/>
|
||||
</xsl:call-template>
|
||||
</xsl:if>
|
||||
</xsl:template>
|
||||
</xsl:stylesheet>
|
||||
25
bitbake/doc/template/permalinks.xsl
vendored
25
bitbake/doc/template/permalinks.xsl
vendored
@@ -1,25 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<xsl:stylesheet version="1.0"
|
||||
xmlns="http://www.w3.org/1999/xhtml"
|
||||
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
|
||||
|
||||
<xsl:param name="generate.permalink" select="1"/>
|
||||
<xsl:param name="permalink.text">¶</xsl:param>
|
||||
|
||||
<xsl:template name="permalink">
|
||||
<xsl:param name="node"/>
|
||||
|
||||
<xsl:if test="$generate.permalink != '0'">
|
||||
<span class="permalink">
|
||||
<a alt="Permalink" title="Permalink">
|
||||
<xsl:attribute name="href">
|
||||
<xsl:call-template name="href.target">
|
||||
<xsl:with-param name="object" select="$node"/>
|
||||
</xsl:call-template>
|
||||
</xsl:attribute>
|
||||
<xsl:copy-of select="$permalink.text"/>
|
||||
</a>
|
||||
</span>
|
||||
</xsl:if>
|
||||
</xsl:template>
|
||||
</xsl:stylesheet>
|
||||
55
bitbake/doc/template/section.title.xsl
vendored
55
bitbake/doc/template/section.title.xsl
vendored
@@ -1,55 +0,0 @@
|
||||
<xsl:stylesheet version="1.0"
|
||||
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
|
||||
xmlns:d="http://docbook.org/ns/docbook"
|
||||
xmlns="http://www.w3.org/1999/xhtml" exclude-result-prefixes="d">
|
||||
|
||||
<xsl:template name="section.title">
|
||||
<xsl:variable name="section"
|
||||
select="(ancestor::section |
|
||||
ancestor::simplesect|
|
||||
ancestor::sect1|
|
||||
ancestor::sect2|
|
||||
ancestor::sect3|
|
||||
ancestor::sect4|
|
||||
ancestor::sect5)[last()]"/>
|
||||
|
||||
<xsl:variable name="renderas">
|
||||
<xsl:choose>
|
||||
<xsl:when test="$section/@renderas = 'sect1'">1</xsl:when>
|
||||
<xsl:when test="$section/@renderas = 'sect2'">2</xsl:when>
|
||||
<xsl:when test="$section/@renderas = 'sect3'">3</xsl:when>
|
||||
<xsl:when test="$section/@renderas = 'sect4'">4</xsl:when>
|
||||
<xsl:when test="$section/@renderas = 'sect5'">5</xsl:when>
|
||||
<xsl:otherwise><xsl:value-of select="''"/></xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:variable>
|
||||
|
||||
<xsl:variable name="level">
|
||||
<xsl:choose>
|
||||
<xsl:when test="$renderas != ''">
|
||||
<xsl:value-of select="$renderas"/>
|
||||
</xsl:when>
|
||||
<xsl:otherwise>
|
||||
<xsl:call-template name="section.level">
|
||||
<xsl:with-param name="node" select="$section"/>
|
||||
</xsl:call-template>
|
||||
</xsl:otherwise>
|
||||
</xsl:choose>
|
||||
</xsl:variable>
|
||||
|
||||
<xsl:call-template name="section.heading">
|
||||
<xsl:with-param name="section" select="$section"/>
|
||||
<xsl:with-param name="level" select="$level"/>
|
||||
<xsl:with-param name="title">
|
||||
<xsl:apply-templates select="$section" mode="object.title.markup">
|
||||
<xsl:with-param name="allow-anchors" select="1"/>
|
||||
</xsl:apply-templates>
|
||||
<xsl:if test="$level > 0">
|
||||
<xsl:call-template name="permalink">
|
||||
<xsl:with-param name="node" select="$section"/>
|
||||
</xsl:call-template>
|
||||
</xsl:if>
|
||||
</xsl:with-param>
|
||||
</xsl:call-template>
|
||||
</xsl:template>
|
||||
</xsl:stylesheet>
|
||||
@@ -3,7 +3,7 @@
|
||||
#
|
||||
# This is a copy on write dictionary and set which abuses classes to try and be nice and fast.
|
||||
#
|
||||
# Copyright (C) 2006 Tim Ansell
|
||||
# Copyright (C) 2006 Tim Amsell
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
@@ -23,17 +23,19 @@
|
||||
# Assign a file to __warn__ to get warnings about slow operations.
|
||||
#
|
||||
|
||||
|
||||
from __future__ import print_function
|
||||
import copy
|
||||
import types
|
||||
ImmutableTypes = (
|
||||
types.NoneType,
|
||||
bool,
|
||||
complex,
|
||||
float,
|
||||
int,
|
||||
long,
|
||||
tuple,
|
||||
frozenset,
|
||||
str
|
||||
basestring
|
||||
)
|
||||
|
||||
MUTABLE = "__mutable__"
|
||||
@@ -59,7 +61,7 @@ class COWDictMeta(COWMeta):
|
||||
__call__ = cow
|
||||
|
||||
def __setitem__(cls, key, value):
|
||||
if value is not None and not isinstance(value, ImmutableTypes):
|
||||
if not isinstance(value, ImmutableTypes):
|
||||
if not isinstance(value, COWMeta):
|
||||
cls.__hasmutable__ = True
|
||||
key += MUTABLE
|
||||
@@ -114,7 +116,7 @@ class COWDictMeta(COWMeta):
|
||||
cls.__setitem__(key, cls.__marker__)
|
||||
|
||||
def __revertitem__(cls, key):
|
||||
if key not in cls.__dict__:
|
||||
if not cls.__dict__.has_key(key):
|
||||
key += MUTABLE
|
||||
delattr(cls, key)
|
||||
|
||||
@@ -181,7 +183,7 @@ class COWSetMeta(COWDictMeta):
|
||||
COWDictMeta.__delitem__(cls, repr(hash(value)))
|
||||
|
||||
def __in__(cls, value):
|
||||
return repr(hash(value)) in COWDictMeta
|
||||
return COWDictMeta.has_key(repr(hash(value)))
|
||||
|
||||
def iterkeys(cls):
|
||||
raise TypeError("sets don't have keys")
|
||||
@@ -190,10 +192,12 @@ class COWSetMeta(COWDictMeta):
|
||||
raise TypeError("sets don't have 'items'")
|
||||
|
||||
# These are the actual classes you use!
|
||||
class COWDictBase(object, metaclass = COWDictMeta):
|
||||
class COWDictBase(object):
|
||||
__metaclass__ = COWDictMeta
|
||||
__count__ = 0
|
||||
|
||||
class COWSetBase(object, metaclass = COWSetMeta):
|
||||
class COWSetBase(object):
|
||||
__metaclass__ = COWSetMeta
|
||||
__count__ = 0
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -283,7 +287,7 @@ if __name__ == "__main__":
|
||||
except KeyError:
|
||||
print("Yay! deleted key raises error")
|
||||
|
||||
if 'b' in b:
|
||||
if b.has_key('b'):
|
||||
print("Boo!")
|
||||
else:
|
||||
print("Yay - has_key with delete works!")
|
||||
|
||||
@@ -21,11 +21,11 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
__version__ = "1.37.0"
|
||||
__version__ = "1.22.0"
|
||||
|
||||
import sys
|
||||
if sys.version_info < (3, 4, 0):
|
||||
raise RuntimeError("Sorry, python 3.4.0 or later is required for this version of bitbake")
|
||||
if sys.version_info < (2, 7, 3):
|
||||
raise RuntimeError("Sorry, python 2.7.3 or later is required for this version of bitbake")
|
||||
|
||||
|
||||
class BBHandledException(Exception):
|
||||
@@ -70,8 +70,6 @@ logger = logging.getLogger("BitBake")
|
||||
logger.addHandler(NullHandler())
|
||||
logger.setLevel(logging.DEBUG - 2)
|
||||
|
||||
mainlogger = logging.getLogger("BitBake.Main")
|
||||
|
||||
# This has to be imported after the setLoggerClass, as the import of bb.msg
|
||||
# can result in construction of the various loggers.
|
||||
import bb.msg
|
||||
@@ -81,31 +79,32 @@ sys.modules['bb.fetch'] = sys.modules['bb.fetch2']
|
||||
|
||||
# Messaging convenience functions
|
||||
def plain(*args):
|
||||
mainlogger.plain(''.join(args))
|
||||
logger.plain(''.join(args))
|
||||
|
||||
def debug(lvl, *args):
|
||||
if isinstance(lvl, str):
|
||||
mainlogger.warning("Passed invalid debug level '%s' to bb.debug", lvl)
|
||||
if isinstance(lvl, basestring):
|
||||
logger.warn("Passed invalid debug level '%s' to bb.debug", lvl)
|
||||
args = (lvl,) + args
|
||||
lvl = 1
|
||||
mainlogger.debug(lvl, ''.join(args))
|
||||
logger.debug(lvl, ''.join(args))
|
||||
|
||||
def note(*args):
|
||||
mainlogger.info(''.join(args))
|
||||
logger.info(''.join(args))
|
||||
|
||||
def warn(*args):
|
||||
mainlogger.warning(''.join(args))
|
||||
logger.warn(''.join(args))
|
||||
|
||||
def error(*args, **kwargs):
|
||||
mainlogger.error(''.join(args), extra=kwargs)
|
||||
def error(*args):
|
||||
logger.error(''.join(args))
|
||||
|
||||
def fatal(*args):
|
||||
logger.critical(''.join(args))
|
||||
sys.exit(1)
|
||||
|
||||
def fatal(*args, **kwargs):
|
||||
mainlogger.critical(''.join(args), extra=kwargs)
|
||||
raise BBHandledException()
|
||||
|
||||
def deprecated(func, name=None, advice=""):
|
||||
"""This is a decorator which can be used to mark functions
|
||||
as deprecated. It will result in a warning being emitted
|
||||
as deprecated. It will result in a warning being emmitted
|
||||
when the function is used."""
|
||||
import warnings
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
#Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import sys
|
||||
@@ -31,43 +31,24 @@ import logging
|
||||
import shlex
|
||||
import glob
|
||||
import time
|
||||
import stat
|
||||
import bb
|
||||
import bb.msg
|
||||
import bb.process
|
||||
import bb.progress
|
||||
from bb import data, event, utils
|
||||
from contextlib import nested
|
||||
from bb import event, utils
|
||||
|
||||
bblogger = logging.getLogger('BitBake')
|
||||
logger = logging.getLogger('BitBake.Build')
|
||||
|
||||
NULL = open(os.devnull, 'r+')
|
||||
|
||||
__mtime_cache = {}
|
||||
|
||||
def cached_mtime_noerror(f):
|
||||
if f not in __mtime_cache:
|
||||
try:
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
except OSError:
|
||||
return 0
|
||||
return __mtime_cache[f]
|
||||
|
||||
def reset_cache():
|
||||
global __mtime_cache
|
||||
__mtime_cache = {}
|
||||
|
||||
# When we execute a Python function, we'd like certain things
|
||||
# in all namespaces, hence we add them to __builtins__.
|
||||
# When we execute a python function we'd like certain things
|
||||
# in all namespaces, hence we add them to __builtins__
|
||||
# If we do not do this and use the exec globals, they will
|
||||
# not be available to subfunctions.
|
||||
if hasattr(__builtins__, '__setitem__'):
|
||||
builtins = __builtins__
|
||||
else:
|
||||
builtins = __builtins__.__dict__
|
||||
|
||||
builtins['bb'] = bb
|
||||
builtins['os'] = os
|
||||
__builtins__['bb'] = bb
|
||||
__builtins__['os'] = os
|
||||
|
||||
class FuncFailed(Exception):
|
||||
def __init__(self, name = None, logfile = None):
|
||||
@@ -91,14 +72,13 @@ class TaskBase(event.Event):
|
||||
|
||||
def __init__(self, t, logfile, d):
|
||||
self._task = t
|
||||
self._package = d.getVar("PF")
|
||||
self._mc = d.getVar("BB_CURRENT_MC")
|
||||
self.taskfile = d.getVar("FILE")
|
||||
self._package = d.getVar("PF", True)
|
||||
self.taskfile = d.getVar("FILE", True)
|
||||
self.taskname = self._task
|
||||
self.logfile = logfile
|
||||
self.time = time.time()
|
||||
event.Event.__init__(self)
|
||||
self._message = "recipe %s: task %s: %s" % (d.getVar("PF"), t, self.getDisplayName())
|
||||
self._message = "recipe %s: task %s: %s" % (d.getVar("PF", True), t, self.getDisplayName())
|
||||
|
||||
def getTask(self):
|
||||
return self._task
|
||||
@@ -139,25 +119,6 @@ class TaskInvalid(TaskBase):
|
||||
super(TaskInvalid, self).__init__(task, None, metadata)
|
||||
self._message = "No such task '%s'" % task
|
||||
|
||||
class TaskProgress(event.Event):
|
||||
"""
|
||||
Task made some progress that could be reported to the user, usually in
|
||||
the form of a progress bar or similar.
|
||||
NOTE: this class does not inherit from TaskBase since it doesn't need
|
||||
to - it's fired within the task context itself, so we don't have any of
|
||||
the context information that you do in the case of the other events.
|
||||
The event PID can be used to determine which task it came from.
|
||||
The progress value is normally 0-100, but can also be negative
|
||||
indicating that progress has been made but we aren't able to determine
|
||||
how much.
|
||||
The rate is optional, this is simply an extra string to display to the
|
||||
user if specified.
|
||||
"""
|
||||
def __init__(self, progress, rate=None):
|
||||
self.progress = progress
|
||||
self.rate = rate
|
||||
event.Event.__init__(self)
|
||||
|
||||
|
||||
class LogTee(object):
|
||||
def __init__(self, logger, outfile):
|
||||
@@ -181,27 +142,23 @@ class LogTee(object):
|
||||
def flush(self):
|
||||
self.outfile.flush()
|
||||
|
||||
#
|
||||
# pythonexception allows the python exceptions generated to be raised
|
||||
# as the real exceptions (not FuncFailed) and without a backtrace at the
|
||||
# origin of the failure.
|
||||
#
|
||||
def exec_func(func, d, dirs = None, pythonexception=False):
|
||||
"""Execute a BB 'function'"""
|
||||
def exec_func(func, d, dirs = None):
|
||||
"""Execute an BB 'function'"""
|
||||
|
||||
try:
|
||||
oldcwd = os.getcwd()
|
||||
except:
|
||||
oldcwd = None
|
||||
body = d.getVar(func)
|
||||
if not body:
|
||||
if body is None:
|
||||
logger.warn("Function %s doesn't exist", func)
|
||||
return
|
||||
|
||||
flags = d.getVarFlags(func)
|
||||
cleandirs = flags.get('cleandirs') if flags else None
|
||||
cleandirs = flags.get('cleandirs')
|
||||
if cleandirs:
|
||||
for cdir in d.expand(cleandirs).split():
|
||||
bb.utils.remove(cdir, True)
|
||||
bb.utils.mkdirhier(cdir)
|
||||
|
||||
if flags and dirs is None:
|
||||
if dirs is None:
|
||||
dirs = flags.get('dirs')
|
||||
if dirs:
|
||||
dirs = d.expand(dirs).split()
|
||||
@@ -211,13 +168,8 @@ def exec_func(func, d, dirs = None, pythonexception=False):
|
||||
bb.utils.mkdirhier(adir)
|
||||
adir = dirs[-1]
|
||||
else:
|
||||
adir = None
|
||||
|
||||
body = d.getVar(func, False)
|
||||
if not body:
|
||||
if body is None:
|
||||
logger.warning("Function %s doesn't exist", func)
|
||||
return
|
||||
adir = d.getVar('B', True)
|
||||
bb.utils.mkdirhier(adir)
|
||||
|
||||
ispython = flags.get('python')
|
||||
|
||||
@@ -227,17 +179,17 @@ def exec_func(func, d, dirs = None, pythonexception=False):
|
||||
else:
|
||||
lockfiles = None
|
||||
|
||||
tempdir = d.getVar('T')
|
||||
tempdir = d.getVar('T', True)
|
||||
|
||||
# or func allows items to be executed outside of the normal
|
||||
# task set, such as buildhistory
|
||||
task = d.getVar('BB_RUNTASK') or func
|
||||
task = d.getVar('BB_RUNTASK', True) or func
|
||||
if task == func:
|
||||
taskfunc = task
|
||||
else:
|
||||
taskfunc = "%s.%s" % (task, func)
|
||||
|
||||
runfmt = d.getVar('BB_RUNFMT') or "run.{func}.{pid}"
|
||||
runfmt = d.getVar('BB_RUNFMT', True) or "run.{func}.{pid}"
|
||||
runfn = runfmt.format(taskfunc=taskfunc, task=task, func=func, pid=os.getpid())
|
||||
runfile = os.path.join(tempdir, runfn)
|
||||
bb.utils.mkdirhier(os.path.dirname(runfile))
|
||||
@@ -258,57 +210,42 @@ def exec_func(func, d, dirs = None, pythonexception=False):
|
||||
|
||||
with bb.utils.fileslocked(lockfiles):
|
||||
if ispython:
|
||||
exec_func_python(func, d, runfile, cwd=adir, pythonexception=pythonexception)
|
||||
exec_func_python(func, d, runfile, cwd=adir)
|
||||
else:
|
||||
exec_func_shell(func, d, runfile, cwd=adir)
|
||||
|
||||
try:
|
||||
curcwd = os.getcwd()
|
||||
except:
|
||||
curcwd = None
|
||||
|
||||
if oldcwd and curcwd != oldcwd:
|
||||
try:
|
||||
bb.warn("Task %s changed cwd to %s" % (func, curcwd))
|
||||
os.chdir(oldcwd)
|
||||
except:
|
||||
pass
|
||||
|
||||
_functionfmt = """
|
||||
def {function}(d):
|
||||
{body}
|
||||
|
||||
{function}(d)
|
||||
"""
|
||||
logformatter = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
def exec_func_python(func, d, runfile, cwd=None, pythonexception=False):
|
||||
def exec_func_python(func, d, runfile, cwd=None):
|
||||
"""Execute a python BB 'function'"""
|
||||
|
||||
code = _functionfmt.format(function=func)
|
||||
bbfile = d.getVar('FILE', True)
|
||||
code = _functionfmt.format(function=func, body=d.getVar(func, True))
|
||||
bb.utils.mkdirhier(os.path.dirname(runfile))
|
||||
with open(runfile, 'w') as script:
|
||||
bb.data.emit_func_python(func, script, d)
|
||||
script.write(code)
|
||||
|
||||
if cwd:
|
||||
try:
|
||||
olddir = os.getcwd()
|
||||
except OSError as e:
|
||||
bb.warn("%s: Cannot get cwd: %s" % (func, e))
|
||||
except OSError:
|
||||
olddir = None
|
||||
os.chdir(cwd)
|
||||
|
||||
bb.debug(2, "Executing python function %s" % func)
|
||||
|
||||
try:
|
||||
text = "def %s(d):\n%s" % (func, d.getVar(func, False))
|
||||
fn = d.getVarFlag(func, "filename", False)
|
||||
lineno = int(d.getVarFlag(func, "lineno", False))
|
||||
bb.methodpool.insert_method(func, text, fn, lineno - 1)
|
||||
|
||||
comp = utils.better_compile(code, func, "exec_python_func() autogenerated")
|
||||
utils.better_exec(comp, {"d": d}, code, "exec_python_func() autogenerated", pythonexception=pythonexception)
|
||||
except (bb.parse.SkipRecipe, bb.build.FuncFailed):
|
||||
raise
|
||||
comp = utils.better_compile(code, func, bbfile)
|
||||
utils.better_exec(comp, {"d": d}, code, bbfile)
|
||||
except:
|
||||
if pythonexception:
|
||||
if sys.exc_info()[0] in (bb.parse.SkipPackage, bb.build.FuncFailed):
|
||||
raise
|
||||
|
||||
raise FuncFailed(func, None)
|
||||
finally:
|
||||
bb.debug(2, "Python function %s finished" % func)
|
||||
@@ -316,8 +253,8 @@ def exec_func_python(func, d, runfile, cwd=None, pythonexception=False):
|
||||
if cwd and olddir:
|
||||
try:
|
||||
os.chdir(olddir)
|
||||
except OSError as e:
|
||||
bb.warn("%s: Cannot restore cwd %s: %s" % (func, olddir, e))
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def shell_trap_code():
|
||||
return '''#!/bin/sh\n
|
||||
@@ -327,8 +264,9 @@ bb_exit_handler() {
|
||||
case $ret in
|
||||
0) ;;
|
||||
*) case $BASH_VERSION in
|
||||
"") echo "WARNING: exit code $ret from a shell command.";;
|
||||
*) echo "WARNING: ${BASH_SOURCE[0]}:${BASH_LINENO[0]} exit $ret from '$BASH_COMMAND'";;
|
||||
"") echo "WARNING: exit code $ret from a shell command.";;
|
||||
*) echo "WARNING: ${BASH_SOURCE[0]}:${BASH_LINENO[0]} exit $ret from
|
||||
\"$BASH_COMMAND\"";;
|
||||
esac
|
||||
exit $ret
|
||||
esac
|
||||
@@ -362,14 +300,14 @@ def exec_func_shell(func, d, runfile, cwd=None):
|
||||
# cleanup
|
||||
ret=$?
|
||||
trap '' 0
|
||||
exit $ret
|
||||
exit $?
|
||||
''')
|
||||
|
||||
os.chmod(runfile, 0o775)
|
||||
os.chmod(runfile, 0775)
|
||||
|
||||
cmd = runfile
|
||||
if d.getVarFlag(func, 'fakeroot', False):
|
||||
fakerootcmd = d.getVar('FAKEROOT')
|
||||
if d.getVarFlag(func, 'fakeroot'):
|
||||
fakerootcmd = d.getVar('FAKEROOT', True)
|
||||
if fakerootcmd:
|
||||
cmd = [fakerootcmd, runfile]
|
||||
|
||||
@@ -378,75 +316,14 @@ exit $ret
|
||||
else:
|
||||
logfile = sys.stdout
|
||||
|
||||
progress = d.getVarFlag(func, 'progress')
|
||||
if progress:
|
||||
if progress == 'percent':
|
||||
# Use default regex
|
||||
logfile = bb.progress.BasicProgressHandler(d, outfile=logfile)
|
||||
elif progress.startswith('percent:'):
|
||||
# Use specified regex
|
||||
logfile = bb.progress.BasicProgressHandler(d, regex=progress.split(':', 1)[1], outfile=logfile)
|
||||
elif progress.startswith('outof:'):
|
||||
# Use specified regex
|
||||
logfile = bb.progress.OutOfProgressHandler(d, regex=progress.split(':', 1)[1], outfile=logfile)
|
||||
else:
|
||||
bb.warn('%s: invalid task progress varflag value "%s", ignoring' % (func, progress))
|
||||
bb.debug(2, "Executing shell function %s" % func)
|
||||
|
||||
fifobuffer = bytearray()
|
||||
def readfifo(data):
|
||||
nonlocal fifobuffer
|
||||
fifobuffer.extend(data)
|
||||
while fifobuffer:
|
||||
message, token, nextmsg = fifobuffer.partition(b"\00")
|
||||
if token:
|
||||
splitval = message.split(b' ', 1)
|
||||
cmd = splitval[0].decode("utf-8")
|
||||
if len(splitval) > 1:
|
||||
value = splitval[1].decode("utf-8")
|
||||
else:
|
||||
value = ''
|
||||
if cmd == 'bbplain':
|
||||
bb.plain(value)
|
||||
elif cmd == 'bbnote':
|
||||
bb.note(value)
|
||||
elif cmd == 'bbwarn':
|
||||
bb.warn(value)
|
||||
elif cmd == 'bberror':
|
||||
bb.error(value)
|
||||
elif cmd == 'bbfatal':
|
||||
# The caller will call exit themselves, so bb.error() is
|
||||
# what we want here rather than bb.fatal()
|
||||
bb.error(value)
|
||||
elif cmd == 'bbfatal_log':
|
||||
bb.error(value, forcelog=True)
|
||||
elif cmd == 'bbdebug':
|
||||
splitval = value.split(' ', 1)
|
||||
level = int(splitval[0])
|
||||
value = splitval[1]
|
||||
bb.debug(level, value)
|
||||
else:
|
||||
bb.warn("Unrecognised command '%s' on FIFO" % cmd)
|
||||
fifobuffer = nextmsg
|
||||
else:
|
||||
break
|
||||
|
||||
tempdir = d.getVar('T')
|
||||
fifopath = os.path.join(tempdir, 'fifo.%s' % os.getpid())
|
||||
if os.path.exists(fifopath):
|
||||
os.unlink(fifopath)
|
||||
os.mkfifo(fifopath)
|
||||
with open(fifopath, 'r+b', buffering=0) as fifo:
|
||||
try:
|
||||
bb.debug(2, "Executing shell function %s" % func)
|
||||
|
||||
try:
|
||||
with open(os.devnull, 'r+') as stdin:
|
||||
bb.process.run(cmd, shell=False, stdin=stdin, log=logfile, extrafiles=[(fifo,readfifo)])
|
||||
except bb.process.CmdError:
|
||||
logfn = d.getVar('BB_LOGFILE')
|
||||
raise FuncFailed(func, logfn)
|
||||
finally:
|
||||
os.unlink(fifopath)
|
||||
try:
|
||||
with open(os.devnull, 'r+') as stdin:
|
||||
bb.process.run(cmd, shell=False, stdin=stdin, log=logfile)
|
||||
except bb.process.CmdError:
|
||||
logfn = d.getVar('BB_LOGFILE', True)
|
||||
raise FuncFailed(func, logfn)
|
||||
|
||||
bb.debug(2, "Shell function %s finished" % func)
|
||||
|
||||
@@ -466,7 +343,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
Execution of a task involves a bit more setup than executing a function,
|
||||
running it with its own local metadata, and with some useful variables set.
|
||||
"""
|
||||
if not d.getVarFlag(task, 'task', False):
|
||||
if not d.getVarFlag(task, 'task'):
|
||||
event.fire(TaskInvalid(task, d), d)
|
||||
logger.error("No such task: %s" % task)
|
||||
return 1
|
||||
@@ -474,29 +351,22 @@ def _exec_task(fn, task, d, quieterr):
|
||||
logger.debug(1, "Executing task %s", task)
|
||||
|
||||
localdata = _task_data(fn, task, d)
|
||||
tempdir = localdata.getVar('T')
|
||||
tempdir = localdata.getVar('T', True)
|
||||
if not tempdir:
|
||||
bb.fatal("T variable not set, unable to build")
|
||||
|
||||
# Change nice level if we're asked to
|
||||
nice = localdata.getVar("BB_TASK_NICE_LEVEL")
|
||||
nice = localdata.getVar("BB_TASK_NICE_LEVEL", True)
|
||||
if nice:
|
||||
curnice = os.nice(0)
|
||||
nice = int(nice) - curnice
|
||||
newnice = os.nice(nice)
|
||||
logger.debug(1, "Renice to %s " % newnice)
|
||||
ionice = localdata.getVar("BB_TASK_IONICE_LEVEL")
|
||||
if ionice:
|
||||
try:
|
||||
cls, prio = ionice.split(".", 1)
|
||||
bb.utils.ioprio_set(os.getpid(), int(cls), int(prio))
|
||||
except:
|
||||
bb.warn("Invalid ionice level %s" % ionice)
|
||||
|
||||
bb.utils.mkdirhier(tempdir)
|
||||
|
||||
# Determine the logfile to generate
|
||||
logfmt = localdata.getVar('BB_LOGFMT') or 'log.{task}.{pid}'
|
||||
logfmt = localdata.getVar('BB_LOGFMT', True) or 'log.{task}.{pid}'
|
||||
logbase = logfmt.format(task=task, pid=os.getpid())
|
||||
|
||||
# Document the order of the tasks...
|
||||
@@ -527,10 +397,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
self.triggered = False
|
||||
logging.Handler.__init__(self, logging.ERROR)
|
||||
def emit(self, record):
|
||||
if getattr(record, 'forcelog', False):
|
||||
self.triggered = False
|
||||
else:
|
||||
self.triggered = True
|
||||
self.triggered = True
|
||||
|
||||
# Handle logfiles
|
||||
si = open('/dev/null', 'r')
|
||||
@@ -551,7 +418,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
os.dup2(logfile.fileno(), oso[1])
|
||||
os.dup2(logfile.fileno(), ose[1])
|
||||
|
||||
# Ensure Python logging goes to the logfile
|
||||
# Ensure python logging goes to the logfile
|
||||
handler = logging.StreamHandler(logfile)
|
||||
handler.setFormatter(logformatter)
|
||||
# Always enable full debug output into task logfiles
|
||||
@@ -563,36 +430,24 @@ def _exec_task(fn, task, d, quieterr):
|
||||
|
||||
localdata.setVar('BB_LOGFILE', logfn)
|
||||
localdata.setVar('BB_RUNTASK', task)
|
||||
localdata.setVar('BB_TASK_LOGGER', bblogger)
|
||||
|
||||
flags = localdata.getVarFlags(task)
|
||||
|
||||
event.fire(TaskStarted(task, logfn, flags, localdata), localdata)
|
||||
try:
|
||||
try:
|
||||
event.fire(TaskStarted(task, logfn, flags, localdata), localdata)
|
||||
except (bb.BBHandledException, SystemExit):
|
||||
return 1
|
||||
except FuncFailed as exc:
|
||||
for func in (prefuncs or '').split():
|
||||
exec_func(func, localdata)
|
||||
exec_func(task, localdata)
|
||||
for func in (postfuncs or '').split():
|
||||
exec_func(func, localdata)
|
||||
except FuncFailed as exc:
|
||||
if quieterr:
|
||||
event.fire(TaskFailedSilent(task, logfn, localdata), localdata)
|
||||
else:
|
||||
errprinted = errchk.triggered
|
||||
logger.error(str(exc))
|
||||
return 1
|
||||
|
||||
try:
|
||||
for func in (prefuncs or '').split():
|
||||
exec_func(func, localdata)
|
||||
exec_func(task, localdata)
|
||||
for func in (postfuncs or '').split():
|
||||
exec_func(func, localdata)
|
||||
except FuncFailed as exc:
|
||||
if quieterr:
|
||||
event.fire(TaskFailedSilent(task, logfn, localdata), localdata)
|
||||
else:
|
||||
errprinted = errchk.triggered
|
||||
logger.error(str(exc))
|
||||
event.fire(TaskFailed(task, logfn, localdata, errprinted), localdata)
|
||||
return 1
|
||||
except bb.BBHandledException:
|
||||
event.fire(TaskFailed(task, logfn, localdata, True), localdata)
|
||||
return 1
|
||||
event.fire(TaskFailed(task, logfn, localdata, errprinted), localdata)
|
||||
return 1
|
||||
finally:
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
@@ -617,7 +472,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
bb.utils.remove(loglink)
|
||||
event.fire(TaskSucceeded(task, logfn, localdata), localdata)
|
||||
|
||||
if not localdata.getVarFlag(task, 'nostamp', False) and not localdata.getVarFlag(task, 'selfstamp', False):
|
||||
if not localdata.getVarFlag(task, 'nostamp') and not localdata.getVarFlag(task, 'selfstamp'):
|
||||
make_stamp(task, localdata)
|
||||
|
||||
return 0
|
||||
@@ -625,11 +480,11 @@ def _exec_task(fn, task, d, quieterr):
|
||||
def exec_task(fn, task, d, profile = False):
|
||||
try:
|
||||
quieterr = False
|
||||
if d.getVarFlag(task, "quieterrors", False) is not None:
|
||||
if d.getVarFlag(task, "quieterrors") is not None:
|
||||
quieterr = True
|
||||
|
||||
if profile:
|
||||
profname = "profile-%s.log" % (d.getVar("PN") + "-" + task)
|
||||
profname = "profile-%s.log" % (d.getVar("PN", True) + "-" + task)
|
||||
try:
|
||||
import cProfile as profile
|
||||
except:
|
||||
@@ -652,7 +507,7 @@ def exec_task(fn, task, d, profile = False):
|
||||
event.fire(failedevent, d)
|
||||
return 1
|
||||
|
||||
def stamp_internal(taskname, d, file_name, baseonly=False, noextra=False):
|
||||
def stamp_internal(taskname, d, file_name):
|
||||
"""
|
||||
Internal stamp helper function
|
||||
Makes sure the stamp directory exists
|
||||
@@ -666,17 +521,12 @@ def stamp_internal(taskname, d, file_name, baseonly=False, noextra=False):
|
||||
taskflagname = taskname.replace("_setscene", "")
|
||||
|
||||
if file_name:
|
||||
stamp = d.stamp[file_name]
|
||||
stamp = d.stamp_base[file_name].get(taskflagname) or d.stamp[file_name]
|
||||
extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
|
||||
else:
|
||||
stamp = d.getVar('STAMP')
|
||||
file_name = d.getVar('BB_FILENAME')
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info') or ""
|
||||
|
||||
if baseonly:
|
||||
return stamp
|
||||
if noextra:
|
||||
extrainfo = ""
|
||||
stamp = d.getVarFlag(taskflagname, 'stamp-base', True) or d.getVar('STAMP', True)
|
||||
file_name = d.getVar('BB_FILENAME', True)
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info', True) or ""
|
||||
|
||||
if not stamp:
|
||||
return
|
||||
@@ -684,7 +534,7 @@ def stamp_internal(taskname, d, file_name, baseonly=False, noextra=False):
|
||||
stamp = bb.parse.siggen.stampfile(stamp, file_name, taskname, extrainfo)
|
||||
|
||||
stampdir = os.path.dirname(stamp)
|
||||
if cached_mtime_noerror(stampdir) == 0:
|
||||
if bb.parse.cached_mtime_noerror(stampdir) == 0:
|
||||
bb.utils.mkdirhier(stampdir)
|
||||
|
||||
return stamp
|
||||
@@ -702,12 +552,12 @@ def stamp_cleanmask_internal(taskname, d, file_name):
|
||||
taskflagname = taskname.replace("_setscene", "")
|
||||
|
||||
if file_name:
|
||||
stamp = d.stampclean[file_name]
|
||||
stamp = d.stamp_base_clean[file_name].get(taskflagname) or d.stampclean[file_name]
|
||||
extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
|
||||
else:
|
||||
stamp = d.getVar('STAMPCLEAN')
|
||||
file_name = d.getVar('BB_FILENAME')
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info') or ""
|
||||
stamp = d.getVarFlag(taskflagname, 'stamp-base-clean', True) or d.getVar('STAMPCLEAN', True)
|
||||
file_name = d.getVar('BB_FILENAME', True)
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info', True) or ""
|
||||
|
||||
if not stamp:
|
||||
return []
|
||||
@@ -725,7 +575,7 @@ def make_stamp(task, d, file_name = None):
|
||||
for mask in cleanmask:
|
||||
for name in glob.glob(mask):
|
||||
# Preserve sigdata files in the stamps directory
|
||||
if "sigdata" in name or "sigbasedata" in name:
|
||||
if "sigdata" in name:
|
||||
continue
|
||||
# Preserve taint files in the stamps directory
|
||||
if name.endswith('.taint'):
|
||||
@@ -742,9 +592,8 @@ def make_stamp(task, d, file_name = None):
|
||||
# If we're in task context, write out a signature file for each task
|
||||
# as it completes
|
||||
if not task.endswith("_setscene") and task != "do_setscene" and not file_name:
|
||||
stampbase = stamp_internal(task, d, None, True)
|
||||
file_name = d.getVar('BB_FILENAME')
|
||||
bb.parse.siggen.dump_sigtask(file_name, task, stampbase, True)
|
||||
file_name = d.getVar('BB_FILENAME', True)
|
||||
bb.parse.siggen.dump_sigtask(file_name, task, d.getVar('STAMP', True), True)
|
||||
|
||||
def del_stamp(task, d, file_name = None):
|
||||
"""
|
||||
@@ -765,22 +614,22 @@ def write_taint(task, d, file_name = None):
|
||||
if file_name:
|
||||
taintfn = d.stamp[file_name] + '.' + task + '.taint'
|
||||
else:
|
||||
taintfn = d.getVar('STAMP') + '.' + task + '.taint'
|
||||
taintfn = d.getVar('STAMP', True) + '.' + task + '.taint'
|
||||
bb.utils.mkdirhier(os.path.dirname(taintfn))
|
||||
# The specific content of the taint file is not really important,
|
||||
# we just need it to be random, so a random UUID is used
|
||||
with open(taintfn, 'w') as taintf:
|
||||
taintf.write(str(uuid.uuid4()))
|
||||
|
||||
def stampfile(taskname, d, file_name = None, noextra=False):
|
||||
def stampfile(taskname, d, file_name = None):
|
||||
"""
|
||||
Return the stamp for a given task
|
||||
(d can be a data dict or dataCache)
|
||||
"""
|
||||
return stamp_internal(taskname, d, file_name, noextra=noextra)
|
||||
return stamp_internal(taskname, d, file_name)
|
||||
|
||||
def add_tasks(tasklist, d):
|
||||
task_deps = d.getVar('_task_deps', False)
|
||||
def add_tasks(tasklist, deltasklist, d):
|
||||
task_deps = d.getVar('_task_deps')
|
||||
if not task_deps:
|
||||
task_deps = {}
|
||||
if not 'tasks' in task_deps:
|
||||
@@ -791,6 +640,9 @@ def add_tasks(tasklist, d):
|
||||
for task in tasklist:
|
||||
task = d.expand(task)
|
||||
|
||||
if task in deltasklist:
|
||||
continue
|
||||
|
||||
d.setVarFlag(task, 'task', 1)
|
||||
|
||||
if not task in task_deps['tasks']:
|
||||
@@ -827,12 +679,12 @@ def addtask(task, before, after, d):
|
||||
task = "do_" + task
|
||||
|
||||
d.setVarFlag(task, "task", 1)
|
||||
bbtasks = d.getVar('__BBTASKS', False) or []
|
||||
if task not in bbtasks:
|
||||
bbtasks = d.getVar('__BBTASKS') or []
|
||||
if not task in bbtasks:
|
||||
bbtasks.append(task)
|
||||
d.setVar('__BBTASKS', bbtasks)
|
||||
|
||||
existing = d.getVarFlag(task, "deps", False) or []
|
||||
existing = d.getVarFlag(task, "deps") or []
|
||||
if after is not None:
|
||||
# set up deps for function
|
||||
for entry in after.split():
|
||||
@@ -842,7 +694,7 @@ def addtask(task, before, after, d):
|
||||
if before is not None:
|
||||
# set up things that depend on this func
|
||||
for entry in before.split():
|
||||
existing = d.getVarFlag(entry, "deps", False) or []
|
||||
existing = d.getVarFlag(entry, "deps") or []
|
||||
if task not in existing:
|
||||
d.setVarFlag(entry, "deps", [task] + existing)
|
||||
|
||||
@@ -850,64 +702,8 @@ def deltask(task, d):
|
||||
if task[:3] != "do_":
|
||||
task = "do_" + task
|
||||
|
||||
bbtasks = d.getVar('__BBTASKS', False) or []
|
||||
if task in bbtasks:
|
||||
bbtasks.remove(task)
|
||||
d.delVarFlag(task, 'task')
|
||||
d.setVar('__BBTASKS', bbtasks)
|
||||
bbtasks = d.getVar('__BBDELTASKS') or []
|
||||
if not task in bbtasks:
|
||||
bbtasks.append(task)
|
||||
d.setVar('__BBDELTASKS', bbtasks)
|
||||
|
||||
d.delVarFlag(task, 'deps')
|
||||
for bbtask in d.getVar('__BBTASKS', False) or []:
|
||||
deps = d.getVarFlag(bbtask, 'deps', False) or []
|
||||
if task in deps:
|
||||
deps.remove(task)
|
||||
d.setVarFlag(bbtask, 'deps', deps)
|
||||
|
||||
def preceedtask(task, with_recrdeptasks, d):
|
||||
"""
|
||||
Returns a set of tasks in the current recipe which were specified as
|
||||
precondition by the task itself ("after") or which listed themselves
|
||||
as precondition ("before"). Preceeding tasks specified via the
|
||||
"recrdeptask" are included in the result only if requested. Beware
|
||||
that this may lead to the task itself being listed.
|
||||
"""
|
||||
preceed = set()
|
||||
|
||||
# Ignore tasks which don't exist
|
||||
tasks = d.getVar('__BBTASKS', False)
|
||||
if task not in tasks:
|
||||
return preceed
|
||||
|
||||
preceed.update(d.getVarFlag(task, 'deps') or [])
|
||||
if with_recrdeptasks:
|
||||
recrdeptask = d.getVarFlag(task, 'recrdeptask')
|
||||
if recrdeptask:
|
||||
preceed.update(recrdeptask.split())
|
||||
return preceed
|
||||
|
||||
def tasksbetween(task_start, task_end, d):
|
||||
"""
|
||||
Return the list of tasks between two tasks in the current recipe,
|
||||
where task_start is to start at and task_end is the task to end at
|
||||
(and task_end has a dependency chain back to task_start).
|
||||
"""
|
||||
outtasks = []
|
||||
tasks = list(filter(lambda k: d.getVarFlag(k, "task"), d.keys()))
|
||||
def follow_chain(task, endtask, chain=None):
|
||||
if not chain:
|
||||
chain = []
|
||||
chain.append(task)
|
||||
for othertask in tasks:
|
||||
if othertask == task:
|
||||
continue
|
||||
if task == endtask:
|
||||
for ctask in chain:
|
||||
if ctask not in outtasks:
|
||||
outtasks.append(ctask)
|
||||
else:
|
||||
deps = d.getVarFlag(othertask, 'deps', False)
|
||||
if task in deps:
|
||||
follow_chain(othertask, endtask, chain)
|
||||
chain.pop()
|
||||
follow_chain(task_start, task_end)
|
||||
return outtasks
|
||||
|
||||
@@ -28,16 +28,22 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import pickle
|
||||
from collections import defaultdict
|
||||
import bb.utils
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
|
||||
__cache_version__ = "151"
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
logger.info("Importing cPickle failed. "
|
||||
"Falling back to a very slow implementation.")
|
||||
|
||||
__cache_version__ = "147"
|
||||
|
||||
def getCacheFile(path, filename, data_hash):
|
||||
return os.path.join(path, filename + "." + data_hash)
|
||||
@@ -71,29 +77,29 @@ class RecipeInfoCommon(object):
|
||||
|
||||
@classmethod
|
||||
def flaglist(cls, flag, varlist, metadata, squash=False):
|
||||
out_dict = dict((var, metadata.getVarFlag(var, flag))
|
||||
out_dict = dict((var, metadata.getVarFlag(var, flag, True))
|
||||
for var in varlist)
|
||||
if squash:
|
||||
return dict((k,v) for (k,v) in out_dict.items() if v)
|
||||
return dict((k,v) for (k,v) in out_dict.iteritems() if v)
|
||||
else:
|
||||
return out_dict
|
||||
|
||||
@classmethod
|
||||
def getvar(cls, var, metadata, expand = True):
|
||||
return metadata.getVar(var, expand) or ''
|
||||
def getvar(cls, var, metadata):
|
||||
return metadata.getVar(var, True) or ''
|
||||
|
||||
|
||||
class CoreRecipeInfo(RecipeInfoCommon):
|
||||
__slots__ = ()
|
||||
|
||||
cachefile = "bb_cache.dat"
|
||||
cachefile = "bb_cache.dat"
|
||||
|
||||
def __init__(self, filename, metadata):
|
||||
def __init__(self, filename, metadata):
|
||||
self.file_depends = metadata.getVar('__depends', False)
|
||||
self.timestamp = bb.parse.cached_mtime(filename)
|
||||
self.variants = self.listvar('__VARIANTS', metadata) + ['']
|
||||
self.appends = self.listvar('__BBAPPEND', metadata)
|
||||
self.nocache = self.getvar('BB_DONT_CACHE', metadata)
|
||||
self.nocache = self.getvar('__BB_DONT_CACHE', metadata)
|
||||
|
||||
self.skipreason = self.getvar('__SKIPPED', metadata)
|
||||
if self.skipreason:
|
||||
@@ -107,7 +113,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
|
||||
self.pn = self.getvar('PN', metadata)
|
||||
self.packages = self.listvar('PACKAGES', metadata)
|
||||
if not self.packages:
|
||||
if not self.pn in self.packages:
|
||||
self.packages.append(self.pn)
|
||||
|
||||
self.basetaskhashes = self.taskvar('BB_BASEHASH', self.tasks, metadata)
|
||||
@@ -122,7 +128,9 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
self.defaultpref = self.intvar('DEFAULT_PREFERENCE', metadata)
|
||||
self.not_world = self.getvar('EXCLUDE_FROM_WORLD', metadata)
|
||||
self.stamp = self.getvar('STAMP', metadata)
|
||||
self.stampclean = self.getvar('STAMPCLEAN', metadata)
|
||||
self.stampclean = self.getvar('STAMPCLEAN', metadata)
|
||||
self.stamp_base = self.flaglist('stamp-base', self.tasks, metadata)
|
||||
self.stamp_base_clean = self.flaglist('stamp-base-clean', self.tasks, metadata)
|
||||
self.stamp_extrainfo = self.flaglist('stamp-extra-info', self.tasks, metadata)
|
||||
self.file_checksums = self.flaglist('file-checksums', self.tasks, metadata, True)
|
||||
self.packages_dynamic = self.listvar('PACKAGES_DYNAMIC', metadata)
|
||||
@@ -134,11 +142,10 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
self.rprovides_pkg = self.pkgvar('RPROVIDES', self.packages, metadata)
|
||||
self.rdepends_pkg = self.pkgvar('RDEPENDS', self.packages, metadata)
|
||||
self.rrecommends_pkg = self.pkgvar('RRECOMMENDS', self.packages, metadata)
|
||||
self.inherits = self.getvar('__inherit_cache', metadata, expand=False)
|
||||
self.inherits = self.getvar('__inherit_cache', metadata)
|
||||
self.fakerootenv = self.getvar('FAKEROOTENV', metadata)
|
||||
self.fakerootdirs = self.getvar('FAKEROOTDIRS', metadata)
|
||||
self.fakerootnoenv = self.getvar('FAKEROOTNOENV', metadata)
|
||||
self.extradepsfunc = self.getvar('calculate_extra_depends', metadata)
|
||||
|
||||
@classmethod
|
||||
def init_cacheData(cls, cachedata):
|
||||
@@ -151,6 +158,8 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
|
||||
cachedata.stamp = {}
|
||||
cachedata.stampclean = {}
|
||||
cachedata.stamp_base = {}
|
||||
cachedata.stamp_base_clean = {}
|
||||
cachedata.stamp_extrainfo = {}
|
||||
cachedata.file_checksums = {}
|
||||
cachedata.fn_provides = {}
|
||||
@@ -174,7 +183,6 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
cachedata.fakerootenv = {}
|
||||
cachedata.fakerootnoenv = {}
|
||||
cachedata.fakerootdirs = {}
|
||||
cachedata.extradepsfunc = {}
|
||||
|
||||
def add_cacheData(self, cachedata, fn):
|
||||
cachedata.task_deps[fn] = self.task_deps
|
||||
@@ -184,6 +192,8 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
cachedata.pkg_dp[fn] = self.defaultpref
|
||||
cachedata.stamp[fn] = self.stamp
|
||||
cachedata.stampclean[fn] = self.stampclean
|
||||
cachedata.stamp_base[fn] = self.stamp_base
|
||||
cachedata.stamp_base_clean[fn] = self.stamp_base_clean
|
||||
cachedata.stamp_extrainfo[fn] = self.stamp_extrainfo
|
||||
cachedata.file_checksums[fn] = self.file_checksums
|
||||
|
||||
@@ -210,22 +220,19 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
rprovides += self.rprovides_pkg[package]
|
||||
|
||||
for rprovide in rprovides:
|
||||
if fn not in cachedata.rproviders[rprovide]:
|
||||
cachedata.rproviders[rprovide].append(fn)
|
||||
cachedata.rproviders[rprovide].append(fn)
|
||||
|
||||
for package in self.packages_dynamic:
|
||||
cachedata.packages_dynamic[package].append(fn)
|
||||
|
||||
# Build hash of runtime depends and recommends
|
||||
for package in self.packages:
|
||||
# Build hash of runtime depends and rececommends
|
||||
for package in self.packages + [self.pn]:
|
||||
cachedata.rundeps[fn][package] = list(self.rdepends) + self.rdepends_pkg[package]
|
||||
cachedata.runrecs[fn][package] = list(self.rrecommends) + self.rrecommends_pkg[package]
|
||||
|
||||
# Collect files we may need for possible world-dep
|
||||
# calculations
|
||||
if self.not_world:
|
||||
logger.debug(1, "EXCLUDE FROM WORLD: %s", fn)
|
||||
else:
|
||||
if not self.not_world:
|
||||
cachedata.possible_world.append(fn)
|
||||
|
||||
# create a collection of all targets for sanity checking
|
||||
@@ -234,7 +241,7 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
cachedata.universe_target.append(self.pn)
|
||||
|
||||
cachedata.hashfn[fn] = self.hashfilename
|
||||
for task, taskhash in self.basetaskhashes.items():
|
||||
for task, taskhash in self.basetaskhashes.iteritems():
|
||||
identifier = '%s.%s' % (fn, task)
|
||||
cachedata.basetaskhash[identifier] = taskhash
|
||||
|
||||
@@ -242,146 +249,24 @@ class CoreRecipeInfo(RecipeInfoCommon):
|
||||
cachedata.fakerootenv[fn] = self.fakerootenv
|
||||
cachedata.fakerootnoenv[fn] = self.fakerootnoenv
|
||||
cachedata.fakerootdirs[fn] = self.fakerootdirs
|
||||
cachedata.extradepsfunc[fn] = self.extradepsfunc
|
||||
|
||||
def virtualfn2realfn(virtualfn):
|
||||
"""
|
||||
Convert a virtual file name to a real one + the associated subclass keyword
|
||||
"""
|
||||
mc = ""
|
||||
if virtualfn.startswith('multiconfig:'):
|
||||
elems = virtualfn.split(':')
|
||||
mc = elems[1]
|
||||
virtualfn = ":".join(elems[2:])
|
||||
|
||||
fn = virtualfn
|
||||
cls = ""
|
||||
if virtualfn.startswith('virtual:'):
|
||||
elems = virtualfn.split(':')
|
||||
cls = ":".join(elems[1:-1])
|
||||
fn = elems[-1]
|
||||
|
||||
return (fn, cls, mc)
|
||||
|
||||
def realfn2virtual(realfn, cls, mc):
|
||||
"""
|
||||
Convert a real filename + the associated subclass keyword to a virtual filename
|
||||
"""
|
||||
if cls:
|
||||
realfn = "virtual:" + cls + ":" + realfn
|
||||
if mc:
|
||||
realfn = "multiconfig:" + mc + ":" + realfn
|
||||
return realfn
|
||||
|
||||
def variant2virtual(realfn, variant):
|
||||
"""
|
||||
Convert a real filename + the associated subclass keyword to a virtual filename
|
||||
"""
|
||||
if variant == "":
|
||||
return realfn
|
||||
if variant.startswith("multiconfig:"):
|
||||
elems = variant.split(":")
|
||||
if elems[2]:
|
||||
return "multiconfig:" + elems[1] + ":virtual:" + ":".join(elems[2:]) + ":" + realfn
|
||||
return "multiconfig:" + elems[1] + ":" + realfn
|
||||
return "virtual:" + variant + ":" + realfn
|
||||
|
||||
def parse_recipe(bb_data, bbfile, appends, mc=''):
|
||||
"""
|
||||
Parse a recipe
|
||||
"""
|
||||
|
||||
chdir_back = False
|
||||
|
||||
bb_data.setVar("__BBMULTICONFIG", mc)
|
||||
|
||||
# expand tmpdir to include this topdir
|
||||
bb_data.setVar('TMPDIR', bb_data.getVar('TMPDIR') or "")
|
||||
bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
|
||||
oldpath = os.path.abspath(os.getcwd())
|
||||
bb.parse.cached_mtime_noerror(bbfile_loc)
|
||||
|
||||
# The ConfHandler first looks if there is a TOPDIR and if not
|
||||
# then it would call getcwd().
|
||||
# Previously, we chdir()ed to bbfile_loc, called the handler
|
||||
# and finally chdir()ed back, a couple of thousand times. We now
|
||||
# just fill in TOPDIR to point to bbfile_loc if there is no TOPDIR yet.
|
||||
if not bb_data.getVar('TOPDIR', False):
|
||||
chdir_back = True
|
||||
bb_data.setVar('TOPDIR', bbfile_loc)
|
||||
try:
|
||||
if appends:
|
||||
bb_data.setVar('__BBAPPEND', " ".join(appends))
|
||||
bb_data = bb.parse.handle(bbfile, bb_data)
|
||||
if chdir_back:
|
||||
os.chdir(oldpath)
|
||||
return bb_data
|
||||
except:
|
||||
if chdir_back:
|
||||
os.chdir(oldpath)
|
||||
raise
|
||||
|
||||
|
||||
|
||||
class NoCache(object):
|
||||
|
||||
def __init__(self, databuilder):
|
||||
self.databuilder = databuilder
|
||||
self.data = databuilder.data
|
||||
|
||||
def loadDataFull(self, virtualfn, appends):
|
||||
"""
|
||||
Return a complete set of data for fn.
|
||||
To do this, we need to parse the file.
|
||||
"""
|
||||
logger.debug(1, "Parsing %s (full)" % virtualfn)
|
||||
(fn, virtual, mc) = virtualfn2realfn(virtualfn)
|
||||
bb_data = self.load_bbfile(virtualfn, appends, virtonly=True)
|
||||
return bb_data[virtual]
|
||||
|
||||
def load_bbfile(self, bbfile, appends, virtonly = False):
|
||||
"""
|
||||
Load and parse one .bb build file
|
||||
Return the data and whether parsing resulted in the file being skipped
|
||||
"""
|
||||
|
||||
if virtonly:
|
||||
(bbfile, virtual, mc) = virtualfn2realfn(bbfile)
|
||||
bb_data = self.databuilder.mcdata[mc].createCopy()
|
||||
bb_data.setVar("__ONLYFINALISE", virtual or "default")
|
||||
datastores = parse_recipe(bb_data, bbfile, appends, mc)
|
||||
return datastores
|
||||
|
||||
bb_data = self.data.createCopy()
|
||||
datastores = parse_recipe(bb_data, bbfile, appends)
|
||||
|
||||
for mc in self.databuilder.mcdata:
|
||||
if not mc:
|
||||
continue
|
||||
bb_data = self.databuilder.mcdata[mc].createCopy()
|
||||
newstores = parse_recipe(bb_data, bbfile, appends, mc)
|
||||
for ns in newstores:
|
||||
datastores["multiconfig:%s:%s" % (mc, ns)] = newstores[ns]
|
||||
|
||||
return datastores
|
||||
|
||||
class Cache(NoCache):
|
||||
class Cache(object):
|
||||
"""
|
||||
BitBake Cache implementation
|
||||
"""
|
||||
|
||||
def __init__(self, databuilder, data_hash, caches_array):
|
||||
super().__init__(databuilder)
|
||||
data = databuilder.data
|
||||
|
||||
def __init__(self, data, data_hash, caches_array):
|
||||
# Pass caches_array information into Cache Constructor
|
||||
# It will be used later for deciding whether we
|
||||
# need extra cache file dump/load support
|
||||
# It will be used in later for deciding whether we
|
||||
# need extra cache file dump/load support
|
||||
self.caches_array = caches_array
|
||||
self.cachedir = data.getVar("CACHE")
|
||||
self.cachedir = data.getVar("CACHE", True)
|
||||
self.clean = set()
|
||||
self.checked = set()
|
||||
self.depends_cache = {}
|
||||
self.data = None
|
||||
self.data_fn = None
|
||||
self.cacheclean = True
|
||||
self.data_hash = data_hash
|
||||
@@ -395,87 +280,78 @@ class Cache(NoCache):
|
||||
self.has_cache = True
|
||||
self.cachefile = getCacheFile(self.cachedir, "bb_cache.dat", self.data_hash)
|
||||
|
||||
logger.debug(1, "Cache dir: %s", self.cachedir)
|
||||
logger.debug(1, "Using cache in '%s'", self.cachedir)
|
||||
bb.utils.mkdirhier(self.cachedir)
|
||||
|
||||
cache_ok = True
|
||||
if self.caches_array:
|
||||
for cache_class in self.caches_array:
|
||||
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
|
||||
cache_ok = cache_ok and os.path.exists(cachefile)
|
||||
cache_class.init_cacheData(self)
|
||||
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
||||
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
|
||||
cache_ok = cache_ok and os.path.exists(cachefile)
|
||||
cache_class.init_cacheData(self)
|
||||
if cache_ok:
|
||||
self.load_cachefile()
|
||||
elif os.path.isfile(self.cachefile):
|
||||
logger.info("Out of date cache found, rebuilding...")
|
||||
else:
|
||||
logger.debug(1, "Cache file %s not found, building..." % self.cachefile)
|
||||
|
||||
def load_cachefile(self):
|
||||
# Firstly, using core cache file information for
|
||||
# valid checking
|
||||
with open(self.cachefile, "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
try:
|
||||
cache_ver = pickled.load()
|
||||
bitbake_ver = pickled.load()
|
||||
except Exception:
|
||||
logger.info('Invalid cache, rebuilding...')
|
||||
return
|
||||
|
||||
if cache_ver != __cache_version__:
|
||||
logger.info('Cache version mismatch, rebuilding...')
|
||||
return
|
||||
elif bitbake_ver != bb.__version__:
|
||||
logger.info('Bitbake version mismatch, rebuilding...')
|
||||
return
|
||||
|
||||
|
||||
cachesize = 0
|
||||
previous_progress = 0
|
||||
previous_percent = 0
|
||||
|
||||
# Calculate the correct cachesize of all those cache files
|
||||
for cache_class in self.caches_array:
|
||||
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
|
||||
with open(cachefile, "rb") as cachefile:
|
||||
cachesize += os.fstat(cachefile.fileno()).st_size
|
||||
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
||||
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
|
||||
with open(cachefile, "rb") as cachefile:
|
||||
cachesize += os.fstat(cachefile.fileno()).st_size
|
||||
|
||||
bb.event.fire(bb.event.CacheLoadStarted(cachesize), self.data)
|
||||
|
||||
|
||||
for cache_class in self.caches_array:
|
||||
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
|
||||
logger.debug(1, 'Loading cache file: %s' % cachefile)
|
||||
with open(cachefile, "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
# Check cache version information
|
||||
try:
|
||||
cache_ver = pickled.load()
|
||||
bitbake_ver = pickled.load()
|
||||
except Exception:
|
||||
logger.info('Invalid cache, rebuilding...')
|
||||
return
|
||||
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
||||
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
|
||||
with open(cachefile, "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
while cachefile:
|
||||
try:
|
||||
key = pickled.load()
|
||||
value = pickled.load()
|
||||
except Exception:
|
||||
break
|
||||
if self.depends_cache.has_key(key):
|
||||
self.depends_cache[key].append(value)
|
||||
else:
|
||||
self.depends_cache[key] = [value]
|
||||
# only fire events on even percentage boundaries
|
||||
current_progress = cachefile.tell() + previous_progress
|
||||
current_percent = 100 * current_progress / cachesize
|
||||
if current_percent > previous_percent:
|
||||
previous_percent = current_percent
|
||||
bb.event.fire(bb.event.CacheLoadProgress(current_progress, cachesize),
|
||||
self.data)
|
||||
|
||||
if cache_ver != __cache_version__:
|
||||
logger.info('Cache version mismatch, rebuilding...')
|
||||
return
|
||||
elif bitbake_ver != bb.__version__:
|
||||
logger.info('Bitbake version mismatch, rebuilding...')
|
||||
return
|
||||
|
||||
# Load the rest of the cache file
|
||||
current_progress = 0
|
||||
while cachefile:
|
||||
try:
|
||||
key = pickled.load()
|
||||
value = pickled.load()
|
||||
except Exception:
|
||||
break
|
||||
if not isinstance(key, str):
|
||||
bb.warn("%s from extras cache is not a string?" % key)
|
||||
break
|
||||
if not isinstance(value, RecipeInfoCommon):
|
||||
bb.warn("%s from extras cache is not a RecipeInfoCommon class?" % value)
|
||||
break
|
||||
|
||||
if key in self.depends_cache:
|
||||
self.depends_cache[key].append(value)
|
||||
else:
|
||||
self.depends_cache[key] = [value]
|
||||
# only fire events on even percentage boundaries
|
||||
current_progress = cachefile.tell() + previous_progress
|
||||
if current_progress > cachesize:
|
||||
# we might have calculated incorrect total size because a file
|
||||
# might've been written out just after we checked its size
|
||||
cachesize = current_progress
|
||||
current_percent = 100 * current_progress / cachesize
|
||||
if current_percent > previous_percent:
|
||||
previous_percent = current_percent
|
||||
bb.event.fire(bb.event.CacheLoadProgress(current_progress, cachesize),
|
||||
self.data)
|
||||
|
||||
previous_progress += current_progress
|
||||
previous_progress += current_progress
|
||||
|
||||
# Note: depends cache number is corresponding to the parsing file numbers.
|
||||
# The same file has several caches, still regarded as one item in the cache
|
||||
@@ -483,33 +359,69 @@ class Cache(NoCache):
|
||||
len(self.depends_cache)),
|
||||
self.data)
|
||||
|
||||
def parse(self, filename, appends):
|
||||
|
||||
@staticmethod
|
||||
def virtualfn2realfn(virtualfn):
|
||||
"""
|
||||
Convert a virtual file name to a real one + the associated subclass keyword
|
||||
"""
|
||||
|
||||
fn = virtualfn
|
||||
cls = ""
|
||||
if virtualfn.startswith('virtual:'):
|
||||
elems = virtualfn.split(':')
|
||||
cls = ":".join(elems[1:-1])
|
||||
fn = elems[-1]
|
||||
return (fn, cls)
|
||||
|
||||
@staticmethod
|
||||
def realfn2virtual(realfn, cls):
|
||||
"""
|
||||
Convert a real filename + the associated subclass keyword to a virtual filename
|
||||
"""
|
||||
if cls == "":
|
||||
return realfn
|
||||
return "virtual:" + cls + ":" + realfn
|
||||
|
||||
@classmethod
|
||||
def loadDataFull(cls, virtualfn, appends, cfgData):
|
||||
"""
|
||||
Return a complete set of data for fn.
|
||||
To do this, we need to parse the file.
|
||||
"""
|
||||
|
||||
(fn, virtual) = cls.virtualfn2realfn(virtualfn)
|
||||
|
||||
logger.debug(1, "Parsing %s (full)", fn)
|
||||
|
||||
cfgData.setVar("__ONLYFINALISE", virtual or "default")
|
||||
bb_data = cls.load_bbfile(fn, appends, cfgData)
|
||||
return bb_data[virtual]
|
||||
|
||||
@classmethod
|
||||
def parse(cls, filename, appends, configdata, caches_array):
|
||||
"""Parse the specified filename, returning the recipe information"""
|
||||
logger.debug(1, "Parsing %s", filename)
|
||||
infos = []
|
||||
datastores = self.load_bbfile(filename, appends)
|
||||
datastores = cls.load_bbfile(filename, appends, configdata)
|
||||
depends = []
|
||||
variants = []
|
||||
# Process the "real" fn last so we can store variants list
|
||||
for variant, data in sorted(datastores.items(),
|
||||
for variant, data in sorted(datastores.iteritems(),
|
||||
key=lambda i: i[0],
|
||||
reverse=True):
|
||||
virtualfn = variant2virtual(filename, variant)
|
||||
variants.append(variant)
|
||||
virtualfn = cls.realfn2virtual(filename, variant)
|
||||
depends = depends + (data.getVar("__depends", False) or [])
|
||||
if depends and not variant:
|
||||
data.setVar("__depends", depends)
|
||||
if virtualfn == filename:
|
||||
data.setVar("__VARIANTS", " ".join(variants))
|
||||
|
||||
info_array = []
|
||||
for cache_class in self.caches_array:
|
||||
info = cache_class(filename, data)
|
||||
info_array.append(info)
|
||||
for cache_class in caches_array:
|
||||
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
||||
info = cache_class(filename, data)
|
||||
info_array.append(info)
|
||||
infos.append((virtualfn, info_array))
|
||||
|
||||
return infos
|
||||
|
||||
def load(self, filename, appends):
|
||||
def load(self, filename, appends, configdata):
|
||||
"""Obtain the recipe information for the specified filename,
|
||||
using cached values if available, otherwise parsing.
|
||||
|
||||
@@ -523,20 +435,21 @@ class Cache(NoCache):
|
||||
# info_array item is a list of [CoreRecipeInfo, XXXRecipeInfo]
|
||||
info_array = self.depends_cache[filename]
|
||||
for variant in info_array[0].variants:
|
||||
virtualfn = variant2virtual(filename, variant)
|
||||
virtualfn = self.realfn2virtual(filename, variant)
|
||||
infos.append((virtualfn, self.depends_cache[virtualfn]))
|
||||
else:
|
||||
logger.debug(1, "Parsing %s", filename)
|
||||
return self.parse(filename, appends, configdata, self.caches_array)
|
||||
|
||||
return cached, infos
|
||||
|
||||
def loadData(self, fn, appends, cacheData):
|
||||
def loadData(self, fn, appends, cfgData, cacheData):
|
||||
"""Load the recipe info for the specified filename,
|
||||
parsing and adding to the cache if necessary, and adding
|
||||
the recipe information to the supplied CacheData instance."""
|
||||
skipped, virtuals = 0, 0
|
||||
|
||||
cached, infos = self.load(fn, appends)
|
||||
cached, infos = self.load(fn, appends, cfgData)
|
||||
for virtualfn, info_array in infos:
|
||||
if info_array[0].skipped:
|
||||
logger.debug(1, "Skipping %s: %s", virtualfn, info_array[0].skipreason)
|
||||
@@ -613,25 +526,9 @@ class Cache(NoCache):
|
||||
|
||||
if hasattr(info_array[0], 'file_checksums'):
|
||||
for _, fl in info_array[0].file_checksums.items():
|
||||
fl = fl.strip()
|
||||
while fl:
|
||||
# A .split() would be simpler but means spaces or colons in filenames would break
|
||||
a = fl.find(":True")
|
||||
b = fl.find(":False")
|
||||
if ((a < 0) and b) or ((b > 0) and (b < a)):
|
||||
f = fl[:b+6]
|
||||
fl = fl[b+7:]
|
||||
elif ((b < 0) and a) or ((a > 0) and (a < b)):
|
||||
f = fl[:a+5]
|
||||
fl = fl[a+6:]
|
||||
else:
|
||||
break
|
||||
fl = fl.strip()
|
||||
if "*" in f:
|
||||
continue
|
||||
f, exist = f.split(":")
|
||||
if (exist == "True" and not os.path.exists(f)) or (exist == "False" and os.path.exists(f)):
|
||||
logger.debug(2, "Cache: %s's file checksum list file %s changed",
|
||||
for f in fl.split():
|
||||
if not ('*' in f or os.path.exists(f)):
|
||||
logger.debug(2, "Cache: %s's file checksum list file %s was removed",
|
||||
fn, f)
|
||||
self.remove(fn)
|
||||
return False
|
||||
@@ -644,19 +541,16 @@ class Cache(NoCache):
|
||||
|
||||
invalid = False
|
||||
for cls in info_array[0].variants:
|
||||
virtualfn = variant2virtual(fn, cls)
|
||||
virtualfn = self.realfn2virtual(fn, cls)
|
||||
self.clean.add(virtualfn)
|
||||
if virtualfn not in self.depends_cache:
|
||||
logger.debug(2, "Cache: %s is not cached", virtualfn)
|
||||
invalid = True
|
||||
elif len(self.depends_cache[virtualfn]) != len(self.caches_array):
|
||||
logger.debug(2, "Cache: Extra caches missing for %s?" % virtualfn)
|
||||
invalid = True
|
||||
|
||||
# If any one of the variants is not present, mark as invalid for all
|
||||
if invalid:
|
||||
for cls in info_array[0].variants:
|
||||
virtualfn = variant2virtual(fn, cls)
|
||||
virtualfn = self.realfn2virtual(fn, cls)
|
||||
if virtualfn in self.clean:
|
||||
logger.debug(2, "Cache: Removing %s from cache", virtualfn)
|
||||
self.clean.remove(virtualfn)
|
||||
@@ -693,19 +587,30 @@ class Cache(NoCache):
|
||||
logger.debug(2, "Cache is clean, not saving.")
|
||||
return
|
||||
|
||||
file_dict = {}
|
||||
pickler_dict = {}
|
||||
for cache_class in self.caches_array:
|
||||
cache_class_name = cache_class.__name__
|
||||
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
|
||||
with open(cachefile, "wb") as f:
|
||||
p = pickle.Pickler(f, pickle.HIGHEST_PROTOCOL)
|
||||
p.dump(__cache_version__)
|
||||
p.dump(bb.__version__)
|
||||
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
||||
cache_class_name = cache_class.__name__
|
||||
cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash)
|
||||
file_dict[cache_class_name] = open(cachefile, "wb")
|
||||
pickler_dict[cache_class_name] = pickle.Pickler(file_dict[cache_class_name], pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
pickler_dict['CoreRecipeInfo'].dump(__cache_version__)
|
||||
pickler_dict['CoreRecipeInfo'].dump(bb.__version__)
|
||||
|
||||
for key, info_array in self.depends_cache.items():
|
||||
for info in info_array:
|
||||
if isinstance(info, RecipeInfoCommon) and info.__class__.__name__ == cache_class_name:
|
||||
p.dump(key)
|
||||
p.dump(info)
|
||||
try:
|
||||
for key, info_array in self.depends_cache.iteritems():
|
||||
for info in info_array:
|
||||
if isinstance(info, RecipeInfoCommon):
|
||||
cache_class_name = info.__class__.__name__
|
||||
pickler_dict[cache_class_name].dump(key)
|
||||
pickler_dict[cache_class_name].dump(info)
|
||||
finally:
|
||||
for cache_class in self.caches_array:
|
||||
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
||||
cache_class_name = cache_class.__name__
|
||||
file_dict[cache_class_name].close()
|
||||
|
||||
del self.depends_cache
|
||||
|
||||
@@ -713,13 +618,10 @@ class Cache(NoCache):
|
||||
def mtime(cachefile):
|
||||
return bb.parse.cached_mtime_noerror(cachefile)
|
||||
|
||||
def add_info(self, filename, info_array, cacheData, parsed=None, watcher=None):
|
||||
def add_info(self, filename, info_array, cacheData, parsed=None):
|
||||
if isinstance(info_array[0], CoreRecipeInfo) and (not info_array[0].skipped):
|
||||
cacheData.add_from_recipeinfo(filename, info_array)
|
||||
|
||||
if watcher:
|
||||
watcher(info_array[0].file_depends)
|
||||
|
||||
if not self.has_cache:
|
||||
return
|
||||
|
||||
@@ -733,13 +635,50 @@ class Cache(NoCache):
|
||||
Save data we need into the cache
|
||||
"""
|
||||
|
||||
realfn = virtualfn2realfn(file_name)[0]
|
||||
realfn = self.virtualfn2realfn(file_name)[0]
|
||||
|
||||
info_array = []
|
||||
for cache_class in self.caches_array:
|
||||
info_array.append(cache_class(realfn, data))
|
||||
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
||||
info_array.append(cache_class(realfn, data))
|
||||
self.add_info(file_name, info_array, cacheData, parsed)
|
||||
|
||||
@staticmethod
|
||||
def load_bbfile(bbfile, appends, config):
|
||||
"""
|
||||
Load and parse one .bb build file
|
||||
Return the data and whether parsing resulted in the file being skipped
|
||||
"""
|
||||
chdir_back = False
|
||||
|
||||
from bb import data, parse
|
||||
|
||||
# expand tmpdir to include this topdir
|
||||
data.setVar('TMPDIR', data.getVar('TMPDIR', config, 1) or "", config)
|
||||
bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
|
||||
oldpath = os.path.abspath(os.getcwd())
|
||||
parse.cached_mtime_noerror(bbfile_loc)
|
||||
bb_data = data.init_db(config)
|
||||
# The ConfHandler first looks if there is a TOPDIR and if not
|
||||
# then it would call getcwd().
|
||||
# Previously, we chdir()ed to bbfile_loc, called the handler
|
||||
# and finally chdir()ed back, a couple of thousand times. We now
|
||||
# just fill in TOPDIR to point to bbfile_loc if there is no TOPDIR yet.
|
||||
if not data.getVar('TOPDIR', bb_data):
|
||||
chdir_back = True
|
||||
data.setVar('TOPDIR', bbfile_loc, bb_data)
|
||||
try:
|
||||
if appends:
|
||||
data.setVar('__BBAPPEND', " ".join(appends), bb_data)
|
||||
bb_data = parse.handle(bbfile, bb_data)
|
||||
if chdir_back:
|
||||
os.chdir(oldpath)
|
||||
return bb_data
|
||||
except:
|
||||
if chdir_back:
|
||||
os.chdir(oldpath)
|
||||
raise
|
||||
|
||||
|
||||
def init(cooker):
|
||||
"""
|
||||
@@ -753,7 +692,7 @@ def init(cooker):
|
||||
|
||||
* Its mtime
|
||||
* The mtimes of all its dependencies
|
||||
* Whether it caused a parse.SkipRecipe exception
|
||||
* Whether it caused a parse.SkipPackage exception
|
||||
|
||||
Files causing parsing errors are evicted from the cache.
|
||||
|
||||
@@ -769,9 +708,8 @@ class CacheData(object):
|
||||
def __init__(self, caches_array):
|
||||
self.caches_array = caches_array
|
||||
for cache_class in self.caches_array:
|
||||
if not issubclass(cache_class, RecipeInfoCommon):
|
||||
bb.error("Extra cache data class %s should subclass RecipeInfoCommon class" % cache_class)
|
||||
cache_class.init_cacheData(self)
|
||||
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
||||
cache_class.init_cacheData(self)
|
||||
|
||||
# Direct cache variables
|
||||
self.task_queues = {}
|
||||
@@ -798,14 +736,13 @@ class MultiProcessCache(object):
|
||||
self.cachedata = self.create_cachedata()
|
||||
self.cachedata_extras = self.create_cachedata()
|
||||
|
||||
def init_cache(self, d, cache_file_name=None):
|
||||
cachedir = (d.getVar("PERSISTENT_DIR") or
|
||||
d.getVar("CACHE"))
|
||||
def init_cache(self, d):
|
||||
cachedir = (d.getVar("PERSISTENT_DIR", True) or
|
||||
d.getVar("CACHE", True))
|
||||
if cachedir in [None, '']:
|
||||
return
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
self.cachefile = os.path.join(cachedir,
|
||||
cache_file_name or self.__class__.cache_file_name)
|
||||
self.cachefile = os.path.join(cachedir, self.__class__.cache_file_name)
|
||||
logger.debug(1, "Using cache in '%s'", self.cachefile)
|
||||
|
||||
glf = bb.utils.lockfile(self.cachefile + ".lock")
|
||||
@@ -825,11 +762,21 @@ class MultiProcessCache(object):
|
||||
|
||||
self.cachedata = data
|
||||
|
||||
def internSet(self, items):
|
||||
new = set()
|
||||
for i in items:
|
||||
new.add(intern(i))
|
||||
return new
|
||||
|
||||
def compress_keys(self, data):
|
||||
# Override in subclasses if desired
|
||||
return
|
||||
|
||||
def create_cachedata(self):
|
||||
data = [{}]
|
||||
return data
|
||||
|
||||
def save_extras(self):
|
||||
def save_extras(self, d):
|
||||
if not self.cachefile:
|
||||
return
|
||||
|
||||
@@ -859,13 +806,21 @@ class MultiProcessCache(object):
|
||||
if h not in dest[j]:
|
||||
dest[j][h] = source[j][h]
|
||||
|
||||
def save_merge(self):
|
||||
def save_merge(self, d):
|
||||
if not self.cachefile:
|
||||
return
|
||||
|
||||
glf = bb.utils.lockfile(self.cachefile + ".lock")
|
||||
|
||||
data = self.cachedata
|
||||
try:
|
||||
with open(self.cachefile, "rb") as f:
|
||||
p = pickle.Unpickler(f)
|
||||
data, version = p.load()
|
||||
except (IOError, EOFError):
|
||||
data, version = None, None
|
||||
|
||||
if version != self.__class__.CACHE_VERSION:
|
||||
data = self.create_cachedata()
|
||||
|
||||
for f in [y for y in os.listdir(os.path.dirname(self.cachefile)) if y.startswith(os.path.basename(self.cachefile) + '-')]:
|
||||
f = os.path.join(os.path.dirname(self.cachefile), f)
|
||||
@@ -874,18 +829,19 @@ class MultiProcessCache(object):
|
||||
p = pickle.Unpickler(fd)
|
||||
extradata, version = p.load()
|
||||
except (IOError, EOFError):
|
||||
os.unlink(f)
|
||||
continue
|
||||
extradata, version = self.create_cachedata(), None
|
||||
|
||||
if version != self.__class__.CACHE_VERSION:
|
||||
os.unlink(f)
|
||||
continue
|
||||
|
||||
self.merge_data(extradata, data)
|
||||
os.unlink(f)
|
||||
|
||||
self.compress_keys(data)
|
||||
|
||||
with open(self.cachefile, "wb") as f:
|
||||
p = pickle.Pickler(f, -1)
|
||||
p.dump([data, self.__class__.CACHE_VERSION])
|
||||
|
||||
bb.utils.unlockfile(glf)
|
||||
|
||||
|
||||
@@ -15,17 +15,22 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import glob
|
||||
import operator
|
||||
import os
|
||||
import stat
|
||||
import pickle
|
||||
import bb.utils
|
||||
import logging
|
||||
from bb.cache import MultiProcessCache
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
logger.info("Importing cPickle failed. "
|
||||
"Falling back to a very slow implementation.")
|
||||
|
||||
|
||||
# mtime cache (non-persistent)
|
||||
# based upon the assumption that files do not change during bitbake run
|
||||
class FileMtimeCache(object):
|
||||
@@ -83,52 +88,3 @@ class FileChecksumCache(MultiProcessCache):
|
||||
dest[0][h] = source[0][h]
|
||||
else:
|
||||
dest[0][h] = source[0][h]
|
||||
|
||||
def get_checksums(self, filelist, pn):
|
||||
"""Get checksums for a list of files"""
|
||||
|
||||
def checksum_file(f):
|
||||
try:
|
||||
checksum = self.get_checksum(f)
|
||||
except OSError as e:
|
||||
bb.warn("Unable to get checksum for %s SRC_URI entry %s: %s" % (pn, os.path.basename(f), e))
|
||||
return None
|
||||
return checksum
|
||||
|
||||
def checksum_dir(pth):
|
||||
# Handle directories recursively
|
||||
dirchecksums = []
|
||||
for root, dirs, files in os.walk(pth):
|
||||
for name in files:
|
||||
fullpth = os.path.join(root, name)
|
||||
checksum = checksum_file(fullpth)
|
||||
if checksum:
|
||||
dirchecksums.append((fullpth, checksum))
|
||||
return dirchecksums
|
||||
|
||||
checksums = []
|
||||
for pth in filelist.split():
|
||||
exist = pth.split(":")[1]
|
||||
if exist == "False":
|
||||
continue
|
||||
pth = pth.split(":")[0]
|
||||
if '*' in pth:
|
||||
# Handle globs
|
||||
for f in glob.glob(pth):
|
||||
if os.path.isdir(f):
|
||||
if not os.path.islink(f):
|
||||
checksums.extend(checksum_dir(f))
|
||||
else:
|
||||
checksum = checksum_file(f)
|
||||
if checksum:
|
||||
checksums.append((f, checksum))
|
||||
elif os.path.isdir(pth):
|
||||
if not os.path.islink(pth):
|
||||
checksums.extend(checksum_dir(pth))
|
||||
else:
|
||||
checksum = checksum_file(pth)
|
||||
if checksum:
|
||||
checksums.append((pth, checksum))
|
||||
|
||||
checksums.sort(key=operator.itemgetter(1))
|
||||
return checksums
|
||||
|
||||
@@ -1,39 +1,21 @@
|
||||
"""
|
||||
BitBake code parser
|
||||
|
||||
Parses actual code (i.e. python and shell) for functions and in-line
|
||||
expressions. Used mainly to determine dependencies on other functions
|
||||
and variables within the BitBake metadata. Also provides a cache for
|
||||
this information in order to speed up processing.
|
||||
|
||||
(Not to be confused with the code that parses the metadata itself,
|
||||
see lib/bb/parse/ for that).
|
||||
|
||||
NOTE: if you change how the parsers gather information you will almost
|
||||
certainly need to increment CodeParserCache.CACHE_VERSION below so that
|
||||
any existing codeparser cache gets invalidated. Additionally you'll need
|
||||
to increment __cache_version__ in cache.py in order to ensure that old
|
||||
recipe caches don't trigger "Taskhash mismatch" errors.
|
||||
|
||||
"""
|
||||
|
||||
import ast
|
||||
import sys
|
||||
import codegen
|
||||
import logging
|
||||
import pickle
|
||||
import bb.pysh as pysh
|
||||
import os.path
|
||||
import bb.utils, bb.data
|
||||
import hashlib
|
||||
from itertools import chain
|
||||
from bb.pysh import pyshyacc, pyshlex, sherrors
|
||||
from pysh import pyshyacc, pyshlex, sherrors
|
||||
from bb.cache import MultiProcessCache
|
||||
|
||||
|
||||
logger = logging.getLogger('BitBake.CodeParser')
|
||||
|
||||
def bbhash(s):
|
||||
return hashlib.md5(s.encode("utf-8")).hexdigest()
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
logger.info('Importing cPickle failed. Falling back to a very slow implementation.')
|
||||
|
||||
|
||||
def check_indent(codestr):
|
||||
"""If the code is indented, add a top level piece of code to 'remove' the indentation"""
|
||||
@@ -46,101 +28,14 @@ def check_indent(codestr):
|
||||
return codestr
|
||||
|
||||
if codestr[i-1] == "\t" or codestr[i-1] == " ":
|
||||
if codestr[0] == "\n":
|
||||
# Since we're adding a line, we need to remove one line of any empty padding
|
||||
# to ensure line numbers are correct
|
||||
codestr = codestr[1:]
|
||||
return "if 1:\n" + codestr
|
||||
|
||||
return codestr
|
||||
|
||||
|
||||
# Basically pickle, in python 2.7.3 at least, does badly with data duplication
|
||||
# upon pickling and unpickling. Combine this with duplicate objects and things
|
||||
# are a mess.
|
||||
#
|
||||
# When the sets are originally created, python calls intern() on the set keys
|
||||
# which significantly improves memory usage. Sadly the pickle/unpickle process
|
||||
# doesn't call intern() on the keys and results in the same strings being duplicated
|
||||
# in memory. This also means pickle will save the same string multiple times in
|
||||
# the cache file.
|
||||
#
|
||||
# By having shell and python cacheline objects with setstate/getstate, we force
|
||||
# the object creation through our own routine where we can call intern (via internSet).
|
||||
#
|
||||
# We also use hashable frozensets and ensure we use references to these so that
|
||||
# duplicates can be removed, both in memory and in the resulting pickled data.
|
||||
#
|
||||
# By playing these games, the size of the cache file shrinks dramatically
|
||||
# meaning faster load times and the reloaded cache files also consume much less
|
||||
# memory. Smaller cache files, faster load times and lower memory usage is good.
|
||||
#
|
||||
# A custom getstate/setstate using tuples is actually worth 15% cachesize by
|
||||
# avoiding duplication of the attribute names!
|
||||
|
||||
class SetCache(object):
|
||||
def __init__(self):
|
||||
self.setcache = {}
|
||||
|
||||
def internSet(self, items):
|
||||
|
||||
new = []
|
||||
for i in items:
|
||||
new.append(sys.intern(i))
|
||||
s = frozenset(new)
|
||||
h = hash(s)
|
||||
if h in self.setcache:
|
||||
return self.setcache[h]
|
||||
self.setcache[h] = s
|
||||
return s
|
||||
|
||||
codecache = SetCache()
|
||||
|
||||
class pythonCacheLine(object):
|
||||
def __init__(self, refs, execs, contains):
|
||||
self.refs = codecache.internSet(refs)
|
||||
self.execs = codecache.internSet(execs)
|
||||
self.contains = {}
|
||||
for c in contains:
|
||||
self.contains[c] = codecache.internSet(contains[c])
|
||||
|
||||
def __getstate__(self):
|
||||
return (self.refs, self.execs, self.contains)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(refs, execs, contains) = state
|
||||
self.__init__(refs, execs, contains)
|
||||
def __hash__(self):
|
||||
l = (hash(self.refs), hash(self.execs))
|
||||
for c in sorted(self.contains.keys()):
|
||||
l = l + (c, hash(self.contains[c]))
|
||||
return hash(l)
|
||||
def __repr__(self):
|
||||
return " ".join([str(self.refs), str(self.execs), str(self.contains)])
|
||||
|
||||
|
||||
class shellCacheLine(object):
|
||||
def __init__(self, execs):
|
||||
self.execs = codecache.internSet(execs)
|
||||
|
||||
def __getstate__(self):
|
||||
return (self.execs)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(execs) = state
|
||||
self.__init__(execs)
|
||||
def __hash__(self):
|
||||
return hash(self.execs)
|
||||
def __repr__(self):
|
||||
return str(self.execs)
|
||||
|
||||
class CodeParserCache(MultiProcessCache):
|
||||
cache_file_name = "bb_codeparser.dat"
|
||||
# NOTE: you must increment this if you change how the parsers gather information,
|
||||
# so that an existing cache gets invalidated. Additionally you'll need
|
||||
# to increment __cache_version__ in cache.py in order to ensure that old
|
||||
# recipe caches don't trigger "Taskhash mismatch" errors.
|
||||
CACHE_VERSION = 9
|
||||
CACHE_VERSION = 6
|
||||
|
||||
def __init__(self):
|
||||
MultiProcessCache.__init__(self)
|
||||
@@ -149,38 +44,32 @@ class CodeParserCache(MultiProcessCache):
|
||||
self.pythoncacheextras = self.cachedata_extras[0]
|
||||
self.shellcacheextras = self.cachedata_extras[1]
|
||||
|
||||
# To avoid duplication in the codeparser cache, keep
|
||||
# a lookup of hashes of objects we already have
|
||||
self.pythoncachelines = {}
|
||||
self.shellcachelines = {}
|
||||
|
||||
def newPythonCacheLine(self, refs, execs, contains):
|
||||
cacheline = pythonCacheLine(refs, execs, contains)
|
||||
h = hash(cacheline)
|
||||
if h in self.pythoncachelines:
|
||||
return self.pythoncachelines[h]
|
||||
self.pythoncachelines[h] = cacheline
|
||||
return cacheline
|
||||
|
||||
def newShellCacheLine(self, execs):
|
||||
cacheline = shellCacheLine(execs)
|
||||
h = hash(cacheline)
|
||||
if h in self.shellcachelines:
|
||||
return self.shellcachelines[h]
|
||||
self.shellcachelines[h] = cacheline
|
||||
return cacheline
|
||||
|
||||
def init_cache(self, d):
|
||||
# Check if we already have the caches
|
||||
if self.pythoncache:
|
||||
return
|
||||
|
||||
MultiProcessCache.init_cache(self, d)
|
||||
|
||||
# cachedata gets re-assigned in the parent
|
||||
self.pythoncache = self.cachedata[0]
|
||||
self.shellcache = self.cachedata[1]
|
||||
|
||||
def compress_keys(self, data):
|
||||
# When the dicts are originally created, python calls intern() on the set keys
|
||||
# which significantly improves memory usage. Sadly the pickle/unpickle process
|
||||
# doesn't call intern() on the keys and results in the same strings being duplicated
|
||||
# in memory. This also means pickle will save the same string multiple times in
|
||||
# the cache file. By interning the data here, the cache file shrinks dramatically
|
||||
# meaning faster load times and the reloaded cache files also consume much less
|
||||
# memory. This is worth any performance hit from this loops and the use of the
|
||||
# intern() data storage.
|
||||
# Python 3.x may behave better in this area
|
||||
for h in data[0]:
|
||||
data[0][h]["refs"] = self.internSet(data[0][h]["refs"])
|
||||
data[0][h]["execs"] = self.internSet(data[0][h]["execs"])
|
||||
for k in data[0][h]["contains"]:
|
||||
data[0][h]["contains"][k] = self.internSet(data[0][h]["contains"][k])
|
||||
for h in data[1]:
|
||||
data[1][h]["execs"] = self.internSet(data[1][h]["execs"])
|
||||
return
|
||||
|
||||
def create_cachedata(self):
|
||||
data = [{}, {}]
|
||||
return data
|
||||
@@ -190,11 +79,11 @@ codeparsercache = CodeParserCache()
|
||||
def parser_cache_init(d):
|
||||
codeparsercache.init_cache(d)
|
||||
|
||||
def parser_cache_save():
|
||||
codeparsercache.save_extras()
|
||||
def parser_cache_save(d):
|
||||
codeparsercache.save_extras(d)
|
||||
|
||||
def parser_cache_savemerge():
|
||||
codeparsercache.save_merge()
|
||||
def parser_cache_savemerge(d):
|
||||
codeparsercache.save_merge(d)
|
||||
|
||||
Logger = logging.getLoggerClass()
|
||||
class BufferedLogger(Logger):
|
||||
@@ -209,15 +98,12 @@ class BufferedLogger(Logger):
|
||||
|
||||
def flush(self):
|
||||
for record in self.buffer:
|
||||
if self.target.isEnabledFor(record.levelno):
|
||||
self.target.handle(record)
|
||||
self.target.handle(record)
|
||||
self.buffer = []
|
||||
|
||||
class PythonParser():
|
||||
getvars = (".getVar", ".appendVar", ".prependVar")
|
||||
getvarflags = (".getVarFlag", ".appendVarFlag", ".prependVarFlag")
|
||||
containsfuncs = ("bb.utils.contains", "base_contains")
|
||||
containsanyfuncs = ("bb.utils.contains_any", "bb.utils.filter")
|
||||
containsfuncs = ("bb.utils.contains", "base_contains", "oe.utils.contains")
|
||||
execfuncs = ("bb.build.exec_func", "bb.build.exec_task")
|
||||
|
||||
def warn(self, func, arg):
|
||||
@@ -236,37 +122,17 @@ class PythonParser():
|
||||
|
||||
def visit_Call(self, node):
|
||||
name = self.called_node_name(node.func)
|
||||
if name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs):
|
||||
if name and name.endswith(self.getvars) or name in self.containsfuncs:
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
varname = node.args[0].s
|
||||
if name in self.containsfuncs and isinstance(node.args[1], ast.Str):
|
||||
if varname not in self.contains:
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname].add(node.args[1].s)
|
||||
elif name in self.containsanyfuncs and isinstance(node.args[1], ast.Str):
|
||||
if varname not in self.contains:
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname].update(node.args[1].s.split())
|
||||
elif name.endswith(self.getvarflags):
|
||||
if isinstance(node.args[1], ast.Str):
|
||||
self.references.add('%s[%s]' % (varname, node.args[1].s))
|
||||
else:
|
||||
self.warn(node.func, node.args[1])
|
||||
else:
|
||||
self.references.add(varname)
|
||||
else:
|
||||
self.references.add(node.args[0].s)
|
||||
else:
|
||||
self.warn(node.func, node.args[0])
|
||||
elif name and name.endswith(".expand"):
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
value = node.args[0].s
|
||||
d = bb.data.init()
|
||||
parser = d.expandWithRefs(value, self.name)
|
||||
self.references |= parser.references
|
||||
self.execs |= parser.execs
|
||||
for varname in parser.contains:
|
||||
if varname not in self.contains:
|
||||
self.contains[varname] = set()
|
||||
self.contains[varname] |= parser.contains[varname]
|
||||
elif name in self.execfuncs:
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
self.var_execs.add(node.args[0].s)
|
||||
@@ -289,7 +155,6 @@ class PythonParser():
|
||||
break
|
||||
|
||||
def __init__(self, name, log):
|
||||
self.name = name
|
||||
self.var_execs = set()
|
||||
self.contains = {}
|
||||
self.execs = set()
|
||||
@@ -299,31 +164,22 @@ class PythonParser():
|
||||
self.unhandled_message = "in call of %s, argument '%s' is not a string literal"
|
||||
self.unhandled_message = "while parsing %s, %s" % (name, self.unhandled_message)
|
||||
|
||||
def parse_python(self, node, lineno=0, filename="<string>"):
|
||||
if not node or not node.strip():
|
||||
return
|
||||
|
||||
h = bbhash(str(node))
|
||||
def parse_python(self, node):
|
||||
h = hash(str(node))
|
||||
|
||||
if h in codeparsercache.pythoncache:
|
||||
self.references = set(codeparsercache.pythoncache[h].refs)
|
||||
self.execs = set(codeparsercache.pythoncache[h].execs)
|
||||
self.contains = {}
|
||||
for i in codeparsercache.pythoncache[h].contains:
|
||||
self.contains[i] = set(codeparsercache.pythoncache[h].contains[i])
|
||||
self.references = codeparsercache.pythoncache[h]["refs"]
|
||||
self.execs = codeparsercache.pythoncache[h]["execs"]
|
||||
self.contains = codeparsercache.pythoncache[h]["contains"]
|
||||
return
|
||||
|
||||
if h in codeparsercache.pythoncacheextras:
|
||||
self.references = set(codeparsercache.pythoncacheextras[h].refs)
|
||||
self.execs = set(codeparsercache.pythoncacheextras[h].execs)
|
||||
self.contains = {}
|
||||
for i in codeparsercache.pythoncacheextras[h].contains:
|
||||
self.contains[i] = set(codeparsercache.pythoncacheextras[h].contains[i])
|
||||
self.references = codeparsercache.pythoncacheextras[h]["refs"]
|
||||
self.execs = codeparsercache.pythoncacheextras[h]["execs"]
|
||||
self.contains = codeparsercache.pythoncacheextras[h]["contains"]
|
||||
return
|
||||
|
||||
# We can't add to the linenumbers for compile, we can pad to the correct number of blank lines though
|
||||
node = "\n" * int(lineno) + node
|
||||
code = compile(check_indent(str(node)), filename, "exec",
|
||||
code = compile(check_indent(str(node)), "<string>", "exec",
|
||||
ast.PyCF_ONLY_AST)
|
||||
|
||||
for n in ast.walk(code):
|
||||
@@ -332,7 +188,10 @@ class PythonParser():
|
||||
|
||||
self.execs.update(self.var_execs)
|
||||
|
||||
codeparsercache.pythoncacheextras[h] = codeparsercache.newPythonCacheLine(self.references, self.execs, self.contains)
|
||||
codeparsercache.pythoncacheextras[h] = {}
|
||||
codeparsercache.pythoncacheextras[h]["refs"] = self.references
|
||||
codeparsercache.pythoncacheextras[h]["execs"] = self.execs
|
||||
codeparsercache.pythoncacheextras[h]["contains"] = self.contains
|
||||
|
||||
class ShellParser():
|
||||
def __init__(self, name, log):
|
||||
@@ -348,20 +207,21 @@ class ShellParser():
|
||||
commands it executes.
|
||||
"""
|
||||
|
||||
h = bbhash(str(value))
|
||||
h = hash(str(value))
|
||||
|
||||
if h in codeparsercache.shellcache:
|
||||
self.execs = set(codeparsercache.shellcache[h].execs)
|
||||
self.execs = codeparsercache.shellcache[h]["execs"]
|
||||
return self.execs
|
||||
|
||||
if h in codeparsercache.shellcacheextras:
|
||||
self.execs = set(codeparsercache.shellcacheextras[h].execs)
|
||||
self.execs = codeparsercache.shellcacheextras[h]["execs"]
|
||||
return self.execs
|
||||
|
||||
self._parse_shell(value)
|
||||
self.execs = set(cmd for cmd in self.allexecs if cmd not in self.funcdefs)
|
||||
|
||||
codeparsercache.shellcacheextras[h] = codeparsercache.newShellCacheLine(self.execs)
|
||||
codeparsercache.shellcacheextras[h] = {}
|
||||
codeparsercache.shellcacheextras[h]["execs"] = self.execs
|
||||
|
||||
return self.execs
|
||||
|
||||
@@ -371,7 +231,8 @@ class ShellParser():
|
||||
except pyshlex.NeedMore:
|
||||
raise sherrors.ShellSyntaxError("Unexpected EOF")
|
||||
|
||||
self.process_tokens(tokens)
|
||||
for token in tokens:
|
||||
self.process_tokens(token)
|
||||
|
||||
def process_tokens(self, tokens):
|
||||
"""Process a supplied portion of the syntax tree as returned by
|
||||
@@ -417,24 +278,18 @@ class ShellParser():
|
||||
"case_clause": case_clause,
|
||||
}
|
||||
|
||||
def process_token_list(tokens):
|
||||
for token in tokens:
|
||||
if isinstance(token, list):
|
||||
process_token_list(token)
|
||||
continue
|
||||
name, value = token
|
||||
try:
|
||||
more_tokens, words = token_handlers[name](value)
|
||||
except KeyError:
|
||||
raise NotImplementedError("Unsupported token type " + name)
|
||||
for token in tokens:
|
||||
name, value = token
|
||||
try:
|
||||
more_tokens, words = token_handlers[name](value)
|
||||
except KeyError:
|
||||
raise NotImplementedError("Unsupported token type " + name)
|
||||
|
||||
if more_tokens:
|
||||
self.process_tokens(more_tokens)
|
||||
if more_tokens:
|
||||
self.process_tokens(more_tokens)
|
||||
|
||||
if words:
|
||||
self.process_words(words)
|
||||
|
||||
process_token_list(tokens)
|
||||
if words:
|
||||
self.process_words(words)
|
||||
|
||||
def process_words(self, words):
|
||||
"""Process a set of 'words' in pyshyacc parlance, which includes
|
||||
|
||||
@@ -28,15 +28,8 @@ and must not trigger events, directly or indirectly.
|
||||
Commands are queued in a CommandQueue
|
||||
"""
|
||||
|
||||
from collections import OrderedDict, defaultdict
|
||||
|
||||
import bb.event
|
||||
import bb.cooker
|
||||
import bb.remotedata
|
||||
|
||||
class DataStoreConnectionHandle(object):
|
||||
def __init__(self, dsindex=0):
|
||||
self.dsindex = dsindex
|
||||
|
||||
class CommandCompleted(bb.event.Event):
|
||||
pass
|
||||
@@ -50,8 +43,6 @@ class CommandFailed(CommandExit):
|
||||
def __init__(self, message):
|
||||
self.error = message
|
||||
CommandExit.__init__(self, 1)
|
||||
def __str__(self):
|
||||
return "Command execution failed: %s" % self.error
|
||||
|
||||
class CommandError(Exception):
|
||||
pass
|
||||
@@ -64,7 +55,6 @@ class Command:
|
||||
self.cooker = cooker
|
||||
self.cmds_sync = CommandsSync()
|
||||
self.cmds_async = CommandsAsync()
|
||||
self.remotedatastores = bb.remotedata.RemoteDatastores(cooker)
|
||||
|
||||
# FIXME Add lock for this
|
||||
self.currentAsyncCommand = None
|
||||
@@ -78,13 +68,10 @@ class Command:
|
||||
if not hasattr(command_method, 'readonly') or False == getattr(command_method, 'readonly'):
|
||||
return None, "Not able to execute not readonly commands in readonly mode"
|
||||
try:
|
||||
self.cooker.process_inotify_updates()
|
||||
if getattr(command_method, 'needconfig', True):
|
||||
self.cooker.updateCacheSync()
|
||||
result = command_method(self, commandline)
|
||||
except CommandError as exc:
|
||||
return None, exc.args[0]
|
||||
except (Exception, SystemExit):
|
||||
except Exception:
|
||||
import traceback
|
||||
return None, traceback.format_exc()
|
||||
else:
|
||||
@@ -99,11 +86,7 @@ class Command:
|
||||
|
||||
def runAsyncCommand(self):
|
||||
try:
|
||||
self.cooker.process_inotify_updates()
|
||||
if self.cooker.state in (bb.cooker.state.error, bb.cooker.state.shutdown, bb.cooker.state.forceshutdown):
|
||||
# updateCache will trigger a shutdown of the parser
|
||||
# and then raise BBHandledException triggering an exit
|
||||
self.cooker.updateCache()
|
||||
if self.cooker.state == bb.cooker.state.error:
|
||||
return False
|
||||
if self.currentAsyncCommand is not None:
|
||||
(command, options) = self.currentAsyncCommand
|
||||
@@ -122,7 +105,7 @@ class Command:
|
||||
return False
|
||||
except SystemExit as exc:
|
||||
arg = exc.args[0]
|
||||
if isinstance(arg, str):
|
||||
if isinstance(arg, basestring):
|
||||
self.finishAsyncCommand(arg)
|
||||
else:
|
||||
self.finishAsyncCommand("Exited with %s" % arg)
|
||||
@@ -137,23 +120,14 @@ class Command:
|
||||
|
||||
def finishAsyncCommand(self, msg=None, code=None):
|
||||
if msg or msg == "":
|
||||
bb.event.fire(CommandFailed(msg), self.cooker.data)
|
||||
bb.event.fire(CommandFailed(msg), self.cooker.event_data)
|
||||
elif code:
|
||||
bb.event.fire(CommandExit(code), self.cooker.data)
|
||||
bb.event.fire(CommandExit(code), self.cooker.event_data)
|
||||
else:
|
||||
bb.event.fire(CommandCompleted(), self.cooker.data)
|
||||
bb.event.fire(CommandCompleted(), self.cooker.event_data)
|
||||
self.currentAsyncCommand = None
|
||||
self.cooker.finishcommand()
|
||||
|
||||
def reset(self):
|
||||
self.remotedatastores = bb.remotedata.RemoteDatastores(self.cooker)
|
||||
|
||||
def split_mc_pn(pn):
|
||||
if pn.startswith("multiconfig:"):
|
||||
_, mc, pn = pn.split(":", 2)
|
||||
return (mc, pn)
|
||||
return ('', pn)
|
||||
|
||||
class CommandsSync:
|
||||
"""
|
||||
A class of synchronous commands
|
||||
@@ -200,19 +174,8 @@ class CommandsSync:
|
||||
"""
|
||||
varname = params[0]
|
||||
value = str(params[1])
|
||||
command.cooker.extraconfigdata[varname] = value
|
||||
command.cooker.data.setVar(varname, value)
|
||||
|
||||
def getSetVariable(self, command, params):
|
||||
"""
|
||||
Read the value of a variable from data and set it into the datastore
|
||||
which effectively expands and locks the value.
|
||||
"""
|
||||
varname = params[0]
|
||||
result = self.getVariable(command, params)
|
||||
command.cooker.data.setVar(varname, result)
|
||||
return result
|
||||
|
||||
def setConfig(self, command, params):
|
||||
"""
|
||||
Set the value of variable in configuration
|
||||
@@ -238,17 +201,54 @@ class CommandsSync:
|
||||
postfiles = params[1].split()
|
||||
command.cooker.configuration.prefile = prefiles
|
||||
command.cooker.configuration.postfile = postfiles
|
||||
setPrePostConfFiles.needconfig = False
|
||||
|
||||
def getCpuCount(self, command, params):
|
||||
"""
|
||||
Get the CPU count on the bitbake server
|
||||
"""
|
||||
return bb.utils.cpu_count()
|
||||
getCpuCount.readonly = True
|
||||
|
||||
def matchFile(self, command, params):
|
||||
fMatch = params[0]
|
||||
return command.cooker.matchFile(fMatch)
|
||||
matchFile.needconfig = False
|
||||
|
||||
def getUIHandlerNum(self, command, params):
|
||||
return bb.event.get_uihandler()
|
||||
getUIHandlerNum.needconfig = False
|
||||
getUIHandlerNum.readonly = True
|
||||
def generateNewImage(self, command, params):
|
||||
image = params[0]
|
||||
base_image = params[1]
|
||||
package_queue = params[2]
|
||||
timestamp = params[3]
|
||||
description = params[4]
|
||||
return command.cooker.generateNewImage(image, base_image,
|
||||
package_queue, timestamp, description)
|
||||
|
||||
def ensureDir(self, command, params):
|
||||
directory = params[0]
|
||||
bb.utils.mkdirhier(directory)
|
||||
|
||||
def setVarFile(self, command, params):
|
||||
"""
|
||||
Save a variable in a file; used for saving in a configuration file
|
||||
"""
|
||||
var = params[0]
|
||||
val = params[1]
|
||||
default_file = params[2]
|
||||
op = params[3]
|
||||
command.cooker.modifyConfigurationVar(var, val, default_file, op)
|
||||
|
||||
def removeVarFile(self, command, params):
|
||||
"""
|
||||
Remove a variable declaration from a file
|
||||
"""
|
||||
var = params[0]
|
||||
command.cooker.removeConfigurationVar(var)
|
||||
|
||||
def createConfigFile(self, command, params):
|
||||
"""
|
||||
Create an extra configuration file
|
||||
"""
|
||||
name = params[0]
|
||||
command.cooker.createConfigFile(name)
|
||||
|
||||
def setEventMask(self, command, params):
|
||||
handlerNum = params[0]
|
||||
@@ -256,8 +256,6 @@ class CommandsSync:
|
||||
debug_domains = params[2]
|
||||
mask = params[3]
|
||||
return bb.event.set_UIHmask(handlerNum, llevel, debug_domains, mask)
|
||||
setEventMask.needconfig = False
|
||||
setEventMask.readonly = True
|
||||
|
||||
def setFeatures(self, command, params):
|
||||
"""
|
||||
@@ -265,315 +263,11 @@ class CommandsSync:
|
||||
"""
|
||||
features = params[0]
|
||||
command.cooker.setFeatures(features)
|
||||
setFeatures.needconfig = False
|
||||
|
||||
# although we change the internal state of the cooker, this is transparent since
|
||||
# we always take and leave the cooker in state.initial
|
||||
setFeatures.readonly = True
|
||||
|
||||
def updateConfig(self, command, params):
|
||||
options = params[0]
|
||||
environment = params[1]
|
||||
cmdline = params[2]
|
||||
command.cooker.updateConfigOpts(options, environment, cmdline)
|
||||
updateConfig.needconfig = False
|
||||
|
||||
def parseConfiguration(self, command, params):
|
||||
"""Instruct bitbake to parse its configuration
|
||||
NOTE: it is only necessary to call this if you aren't calling any normal action
|
||||
(otherwise parsing is taken care of automatically)
|
||||
"""
|
||||
command.cooker.parseConfiguration()
|
||||
parseConfiguration.needconfig = False
|
||||
|
||||
def getLayerPriorities(self, command, params):
|
||||
command.cooker.parseConfiguration()
|
||||
ret = []
|
||||
# regex objects cannot be marshalled by xmlrpc
|
||||
for collection, pattern, regex, pri in command.cooker.bbfile_config_priorities:
|
||||
ret.append((collection, pattern, regex.pattern, pri))
|
||||
return ret
|
||||
getLayerPriorities.readonly = True
|
||||
|
||||
def getRecipes(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return list(command.cooker.recipecaches[mc].pkg_pn.items())
|
||||
getRecipes.readonly = True
|
||||
|
||||
def getRecipeDepends(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return list(command.cooker.recipecaches[mc].deps.items())
|
||||
getRecipeDepends.readonly = True
|
||||
|
||||
def getRecipeVersions(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].pkg_pepvpr
|
||||
getRecipeVersions.readonly = True
|
||||
|
||||
def getRecipeProvides(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].fn_provides
|
||||
getRecipeProvides.readonly = True
|
||||
|
||||
def getRecipePackages(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].packages
|
||||
getRecipePackages.readonly = True
|
||||
|
||||
def getRecipePackagesDynamic(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].packages_dynamic
|
||||
getRecipePackagesDynamic.readonly = True
|
||||
|
||||
def getRProviders(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].rproviders
|
||||
getRProviders.readonly = True
|
||||
|
||||
def getRuntimeDepends(self, command, params):
|
||||
ret = []
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
rundeps = command.cooker.recipecaches[mc].rundeps
|
||||
for key, value in rundeps.items():
|
||||
if isinstance(value, defaultdict):
|
||||
value = dict(value)
|
||||
ret.append((key, value))
|
||||
return ret
|
||||
getRuntimeDepends.readonly = True
|
||||
|
||||
def getRuntimeRecommends(self, command, params):
|
||||
ret = []
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
runrecs = command.cooker.recipecaches[mc].runrecs
|
||||
for key, value in runrecs.items():
|
||||
if isinstance(value, defaultdict):
|
||||
value = dict(value)
|
||||
ret.append((key, value))
|
||||
return ret
|
||||
getRuntimeRecommends.readonly = True
|
||||
|
||||
def getRecipeInherits(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].inherits
|
||||
getRecipeInherits.readonly = True
|
||||
|
||||
def getBbFilePriority(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].bbfile_priority
|
||||
getBbFilePriority.readonly = True
|
||||
|
||||
def getDefaultPreference(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return command.cooker.recipecaches[mc].pkg_dp
|
||||
getDefaultPreference.readonly = True
|
||||
|
||||
def getSkippedRecipes(self, command, params):
|
||||
# Return list sorted by reverse priority order
|
||||
import bb.cache
|
||||
skipdict = OrderedDict(sorted(command.cooker.skiplist.items(),
|
||||
key=lambda x: (-command.cooker.collection.calc_bbfile_priority(bb.cache.virtualfn2realfn(x[0])[0]), x[0])))
|
||||
return list(skipdict.items())
|
||||
getSkippedRecipes.readonly = True
|
||||
|
||||
def getOverlayedRecipes(self, command, params):
|
||||
return list(command.cooker.collection.overlayed.items())
|
||||
getOverlayedRecipes.readonly = True
|
||||
|
||||
def getFileAppends(self, command, params):
|
||||
fn = params[0]
|
||||
return command.cooker.collection.get_file_appends(fn)
|
||||
getFileAppends.readonly = True
|
||||
|
||||
def getAllAppends(self, command, params):
|
||||
return command.cooker.collection.bbappends
|
||||
getAllAppends.readonly = True
|
||||
|
||||
def findProviders(self, command, params):
|
||||
return command.cooker.findProviders()
|
||||
findProviders.readonly = True
|
||||
|
||||
def findBestProvider(self, command, params):
|
||||
(mc, pn) = split_mc_pn(params[0])
|
||||
return command.cooker.findBestProvider(pn, mc)
|
||||
findBestProvider.readonly = True
|
||||
|
||||
def allProviders(self, command, params):
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
return list(bb.providers.allProviders(command.cooker.recipecaches[mc]).items())
|
||||
allProviders.readonly = True
|
||||
|
||||
def getRuntimeProviders(self, command, params):
|
||||
rprovide = params[0]
|
||||
try:
|
||||
mc = params[1]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
all_p = bb.providers.getRuntimeProviders(command.cooker.recipecaches[mc], rprovide)
|
||||
if all_p:
|
||||
best = bb.providers.filterProvidersRunTime(all_p, rprovide,
|
||||
command.cooker.data,
|
||||
command.cooker.recipecaches[mc])[0][0]
|
||||
else:
|
||||
best = None
|
||||
return all_p, best
|
||||
getRuntimeProviders.readonly = True
|
||||
|
||||
def dataStoreConnectorFindVar(self, command, params):
|
||||
dsindex = params[0]
|
||||
name = params[1]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
value, overridedata = datastore._findVar(name)
|
||||
|
||||
if value:
|
||||
content = value.get('_content', None)
|
||||
if isinstance(content, bb.data_smart.DataSmart):
|
||||
# Value is a datastore (e.g. BB_ORIGENV) - need to handle this carefully
|
||||
idx = command.remotedatastores.check_store(content, True)
|
||||
return {'_content': DataStoreConnectionHandle(idx),
|
||||
'_connector_origtype': 'DataStoreConnectionHandle',
|
||||
'_connector_overrides': overridedata}
|
||||
elif isinstance(content, set):
|
||||
return {'_content': list(content),
|
||||
'_connector_origtype': 'set',
|
||||
'_connector_overrides': overridedata}
|
||||
else:
|
||||
value['_connector_overrides'] = overridedata
|
||||
else:
|
||||
value = {}
|
||||
value['_connector_overrides'] = overridedata
|
||||
return value
|
||||
dataStoreConnectorFindVar.readonly = True
|
||||
|
||||
def dataStoreConnectorGetKeys(self, command, params):
|
||||
dsindex = params[0]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
return list(datastore.keys())
|
||||
dataStoreConnectorGetKeys.readonly = True
|
||||
|
||||
def dataStoreConnectorGetVarHistory(self, command, params):
|
||||
dsindex = params[0]
|
||||
name = params[1]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
return datastore.varhistory.variable(name)
|
||||
dataStoreConnectorGetVarHistory.readonly = True
|
||||
|
||||
def dataStoreConnectorExpandPythonRef(self, command, params):
|
||||
config_data_dict = params[0]
|
||||
varname = params[1]
|
||||
expr = params[2]
|
||||
|
||||
config_data = command.remotedatastores.receive_datastore(config_data_dict)
|
||||
|
||||
varparse = bb.data_smart.VariableParse(varname, config_data)
|
||||
return varparse.python_sub(expr)
|
||||
|
||||
def dataStoreConnectorRelease(self, command, params):
|
||||
dsindex = params[0]
|
||||
if dsindex <= 0:
|
||||
raise CommandError('dataStoreConnectorRelease: invalid index %d' % dsindex)
|
||||
command.remotedatastores.release(dsindex)
|
||||
|
||||
def dataStoreConnectorSetVarFlag(self, command, params):
|
||||
dsindex = params[0]
|
||||
name = params[1]
|
||||
flag = params[2]
|
||||
value = params[3]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
datastore.setVarFlag(name, flag, value)
|
||||
|
||||
def dataStoreConnectorDelVar(self, command, params):
|
||||
dsindex = params[0]
|
||||
name = params[1]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
if len(params) > 2:
|
||||
flag = params[2]
|
||||
datastore.delVarFlag(name, flag)
|
||||
else:
|
||||
datastore.delVar(name)
|
||||
|
||||
def dataStoreConnectorRenameVar(self, command, params):
|
||||
dsindex = params[0]
|
||||
name = params[1]
|
||||
newname = params[2]
|
||||
datastore = command.remotedatastores[dsindex]
|
||||
datastore.renameVar(name, newname)
|
||||
|
||||
def parseRecipeFile(self, command, params):
|
||||
"""
|
||||
Parse the specified recipe file (with or without bbappends)
|
||||
and return a datastore object representing the environment
|
||||
for the recipe.
|
||||
"""
|
||||
fn = params[0]
|
||||
appends = params[1]
|
||||
appendlist = params[2]
|
||||
if len(params) > 3:
|
||||
config_data_dict = params[3]
|
||||
config_data = command.remotedatastores.receive_datastore(config_data_dict)
|
||||
else:
|
||||
config_data = None
|
||||
|
||||
if appends:
|
||||
if appendlist is not None:
|
||||
appendfiles = appendlist
|
||||
else:
|
||||
appendfiles = command.cooker.collection.get_file_appends(fn)
|
||||
else:
|
||||
appendfiles = []
|
||||
# We are calling bb.cache locally here rather than on the server,
|
||||
# but that's OK because it doesn't actually need anything from
|
||||
# the server barring the global datastore (which we have a remote
|
||||
# version of)
|
||||
if config_data:
|
||||
# We have to use a different function here if we're passing in a datastore
|
||||
# NOTE: we took a copy above, so we don't do it here again
|
||||
envdata = bb.cache.parse_recipe(config_data, fn, appendfiles)['']
|
||||
else:
|
||||
# Use the standard path
|
||||
parser = bb.cache.NoCache(command.cooker.databuilder)
|
||||
envdata = parser.loadDataFull(fn, appendfiles)
|
||||
idx = command.remotedatastores.store(envdata)
|
||||
return DataStoreConnectionHandle(idx)
|
||||
parseRecipeFile.readonly = True
|
||||
|
||||
class CommandsAsync:
|
||||
"""
|
||||
A class of asynchronous commands
|
||||
@@ -587,15 +281,8 @@ class CommandsAsync:
|
||||
"""
|
||||
bfile = params[0]
|
||||
task = params[1]
|
||||
if len(params) > 2:
|
||||
internal = params[2]
|
||||
else:
|
||||
internal = False
|
||||
|
||||
if internal:
|
||||
command.cooker.buildFileInternal(bfile, task, fireevents=False, quietlog=True)
|
||||
else:
|
||||
command.cooker.buildFile(bfile, task)
|
||||
command.cooker.buildFile(bfile, task)
|
||||
buildFile.needcache = False
|
||||
|
||||
def buildTargets(self, command, params):
|
||||
@@ -645,6 +332,17 @@ class CommandsAsync:
|
||||
command.finishAsyncCommand()
|
||||
generateTargetsTree.needcache = True
|
||||
|
||||
def findCoreBaseFiles(self, command, params):
|
||||
"""
|
||||
Find certain files in COREBASE directory. i.e. Layers
|
||||
"""
|
||||
subdir = params[0]
|
||||
filename = params[1]
|
||||
|
||||
command.cooker.findCoreBaseFiles(subdir, filename)
|
||||
command.finishAsyncCommand()
|
||||
findCoreBaseFiles.needcache = False
|
||||
|
||||
def findConfigFiles(self, command, params):
|
||||
"""
|
||||
Find config files which provide appropriate values
|
||||
@@ -744,22 +442,3 @@ class CommandsAsync:
|
||||
command.finishAsyncCommand()
|
||||
resetCooker.needcache = False
|
||||
|
||||
def clientComplete(self, command, params):
|
||||
"""
|
||||
Do the right thing when the controlling client exits
|
||||
"""
|
||||
command.cooker.clientComplete()
|
||||
command.finishAsyncCommand()
|
||||
clientComplete.needcache = False
|
||||
|
||||
def findSigInfo(self, command, params):
|
||||
"""
|
||||
Find signature info files via the signature generator
|
||||
"""
|
||||
pn = params[0]
|
||||
taskname = params[1]
|
||||
sigs = params[2]
|
||||
res = bb.siggen.find_siginfo(pn, taskname, sigs, command.cooker.data)
|
||||
bb.event.fire(bb.event.FindSigInfoResult(res), command.cooker.data)
|
||||
command.finishAsyncCommand()
|
||||
findSigInfo.needcache = False
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -22,11 +22,9 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import os, sys
|
||||
from functools import wraps
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
import bb.parse
|
||||
@@ -35,16 +33,20 @@ logger = logging.getLogger("BitBake")
|
||||
parselog = logging.getLogger("BitBake.Parsing")
|
||||
|
||||
class ConfigParameters(object):
|
||||
def __init__(self, argv=sys.argv):
|
||||
self.options, targets = self.parseCommandLine(argv)
|
||||
def __init__(self):
|
||||
self.options, targets = self.parseCommandLine()
|
||||
self.environment = self.parseEnvironment()
|
||||
|
||||
self.options.pkgs_to_build = targets or []
|
||||
|
||||
self.options.tracking = False
|
||||
if hasattr(self.options, "show_environment") and self.options.show_environment:
|
||||
self.options.tracking = True
|
||||
|
||||
for key, val in self.options.__dict__.items():
|
||||
setattr(self, key, val)
|
||||
|
||||
def parseCommandLine(self, argv=sys.argv):
|
||||
def parseCommandLine(self):
|
||||
raise Exception("Caller must implement commandline option parsing")
|
||||
|
||||
def parseEnvironment(self):
|
||||
@@ -61,24 +63,12 @@ class ConfigParameters(object):
|
||||
raise Exception("Unable to set configuration option 'cmd' on the server: %s" % error)
|
||||
|
||||
if not self.options.pkgs_to_build:
|
||||
bbpkgs, error = server.runCommand(["getVariable", "BBTARGETS"])
|
||||
bbpkgs, error = server.runCommand(["getVariable", "BBPKGS"])
|
||||
if error:
|
||||
raise Exception("Unable to get the value of BBTARGETS from the server: %s" % error)
|
||||
raise Exception("Unable to get the value of BBPKGS from the server: %s" % error)
|
||||
if bbpkgs:
|
||||
self.options.pkgs_to_build.extend(bbpkgs.split())
|
||||
|
||||
def updateToServer(self, server, environment):
|
||||
options = {}
|
||||
for o in ["abort", "force", "invalidate_stamp",
|
||||
"verbose", "debug", "dry_run", "dump_signatures",
|
||||
"debug_domains", "extra_assume_provided", "profile",
|
||||
"prefile", "postfile", "server_timeout"]:
|
||||
options[o] = getattr(self.options, o)
|
||||
|
||||
ret, error = server.runCommand(["updateConfig", options, environment, sys.argv])
|
||||
if error:
|
||||
raise Exception("Unable to update the server configuration with local parameters: %s" % error)
|
||||
|
||||
def parseActions(self):
|
||||
# Parse any commandline into actions
|
||||
action = {'action':None, 'msg':None}
|
||||
@@ -133,18 +123,11 @@ class CookerConfiguration(object):
|
||||
self.force = False
|
||||
self.profile = False
|
||||
self.nosetscene = False
|
||||
self.setsceneonly = False
|
||||
self.invalidate_stamp = False
|
||||
self.dump_signatures = []
|
||||
self.dry_run = False
|
||||
self.tracking = False
|
||||
self.xmlrpcinterface = []
|
||||
self.server_timeout = None
|
||||
self.writeeventlog = False
|
||||
self.server_only = False
|
||||
self.limited_deps = False
|
||||
self.runall = []
|
||||
self.runonly = []
|
||||
self.interface = []
|
||||
|
||||
self.env = {}
|
||||
|
||||
@@ -153,6 +136,7 @@ class CookerConfiguration(object):
|
||||
if key in parameters.options.__dict__:
|
||||
setattr(self, key, parameters.options.__dict__[key])
|
||||
self.env = parameters.environment.copy()
|
||||
self.tracking = parameters.tracking
|
||||
|
||||
def setServerRegIdleCallback(self, srcb):
|
||||
self.server_register_idlecallback = srcb
|
||||
@@ -168,7 +152,7 @@ class CookerConfiguration(object):
|
||||
|
||||
def __setstate__(self,state):
|
||||
for k in state:
|
||||
setattr(self, k, state[k])
|
||||
setattr(self, k, state[k])
|
||||
|
||||
|
||||
def catch_parse_error(func):
|
||||
@@ -177,26 +161,11 @@ def catch_parse_error(func):
|
||||
def wrapped(fn, *args):
|
||||
try:
|
||||
return func(fn, *args)
|
||||
except IOError as exc:
|
||||
except (IOError, bb.parse.ParseError, bb.data_smart.ExpansionError) as exc:
|
||||
import traceback
|
||||
parselog.critical(traceback.format_exc())
|
||||
parselog.critical( traceback.format_exc())
|
||||
parselog.critical("Unable to parse %s: %s" % (fn, exc))
|
||||
sys.exit(1)
|
||||
except bb.data_smart.ExpansionError as exc:
|
||||
import traceback
|
||||
|
||||
bbdir = os.path.dirname(__file__) + os.sep
|
||||
exc_class, exc, tb = sys.exc_info()
|
||||
for tb in iter(lambda: tb.tb_next, None):
|
||||
# Skip frames in bitbake itself, we only want the metadata
|
||||
fn, _, _, _ = traceback.extract_tb(tb, 1)[0]
|
||||
if not fn.startswith(bbdir):
|
||||
break
|
||||
parselog.critical("Unable to parse %s" % fn, exc_info=(exc_class, exc, tb))
|
||||
sys.exit(1)
|
||||
except bb.parse.ParseError as exc:
|
||||
parselog.critical(str(exc))
|
||||
sys.exit(1)
|
||||
return wrapped
|
||||
|
||||
@catch_parse_error
|
||||
@@ -210,7 +179,7 @@ def _inherit(bbclass, data):
|
||||
|
||||
def findConfigFile(configfile, data):
|
||||
search = []
|
||||
bbpath = data.getVar("BBPATH")
|
||||
bbpath = data.getVar("BBPATH", True)
|
||||
if bbpath:
|
||||
for i in bbpath.split(":"):
|
||||
search.append(os.path.join(i, "conf", configfile))
|
||||
@@ -225,27 +194,6 @@ def findConfigFile(configfile, data):
|
||||
|
||||
return None
|
||||
|
||||
#
|
||||
# We search for a conf/bblayers.conf under an entry in BBPATH or in cwd working
|
||||
# up to /. If that fails, we search for a conf/bitbake.conf in BBPATH.
|
||||
#
|
||||
|
||||
def findTopdir():
|
||||
d = bb.data.init()
|
||||
bbpath = None
|
||||
if 'BBPATH' in os.environ:
|
||||
bbpath = os.environ['BBPATH']
|
||||
d.setVar('BBPATH', bbpath)
|
||||
|
||||
layerconf = findConfigFile("bblayers.conf", d)
|
||||
if layerconf:
|
||||
return os.path.dirname(os.path.dirname(layerconf))
|
||||
if bbpath:
|
||||
bitbakeconf = bb.utils.which(bbpath, "conf/bitbake.conf")
|
||||
if bitbakeconf:
|
||||
return os.path.dirname(os.path.dirname(bitbakeconf))
|
||||
return None
|
||||
|
||||
class CookerDataBuilder(object):
|
||||
|
||||
def __init__(self, cookercfg, worker = False):
|
||||
@@ -256,9 +204,9 @@ class CookerDataBuilder(object):
|
||||
|
||||
bb.utils.set_context(bb.utils.clean_context())
|
||||
bb.event.set_class_handlers(bb.event.clean_class_handlers())
|
||||
self.basedata = bb.data.init()
|
||||
self.data = bb.data.init()
|
||||
if self.tracking:
|
||||
self.basedata.enableTracking()
|
||||
self.data.enableTracking()
|
||||
|
||||
# Keep a datastore of the initial environment variables and their
|
||||
# values from when BitBake was launched to enable child processes
|
||||
@@ -269,77 +217,27 @@ class CookerDataBuilder(object):
|
||||
self.savedenv.setVar(k, cookercfg.env[k])
|
||||
|
||||
filtered_keys = bb.utils.approved_variables()
|
||||
bb.data.inheritFromOS(self.basedata, self.savedenv, filtered_keys)
|
||||
self.basedata.setVar("BB_ORIGENV", self.savedenv)
|
||||
|
||||
bb.data.inheritFromOS(self.data, self.savedenv, filtered_keys)
|
||||
self.data.setVar("BB_ORIGENV", self.savedenv)
|
||||
|
||||
if worker:
|
||||
self.basedata.setVar("BB_WORKERCONTEXT", "1")
|
||||
|
||||
self.data = self.basedata
|
||||
self.mcdata = {}
|
||||
self.data.setVar("BB_WORKERCONTEXT", "1")
|
||||
|
||||
def parseBaseConfiguration(self):
|
||||
try:
|
||||
bb.parse.init_parser(self.basedata)
|
||||
self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles)
|
||||
|
||||
if self.data.getVar("BB_WORKERCONTEXT", False) is None:
|
||||
bb.fetch.fetcher_init(self.data)
|
||||
bb.codeparser.parser_cache_init(self.data)
|
||||
|
||||
bb.event.fire(bb.event.ConfigParsed(), self.data)
|
||||
|
||||
reparse_cnt = 0
|
||||
while self.data.getVar("BB_INVALIDCONF", False) is True:
|
||||
if reparse_cnt > 20:
|
||||
logger.error("Configuration has been re-parsed over 20 times, "
|
||||
"breaking out of the loop...")
|
||||
raise Exception("Too deep config re-parse loop. Check locations where "
|
||||
"BB_INVALIDCONF is being set (ConfigParsed event handlers)")
|
||||
self.data.setVar("BB_INVALIDCONF", False)
|
||||
self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles)
|
||||
reparse_cnt += 1
|
||||
bb.event.fire(bb.event.ConfigParsed(), self.data)
|
||||
|
||||
bb.parse.init_parser(self.data)
|
||||
self.data_hash = self.data.get_hash()
|
||||
self.mcdata[''] = self.data
|
||||
|
||||
multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
|
||||
for config in multiconfig:
|
||||
mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config)
|
||||
bb.event.fire(bb.event.ConfigParsed(), mcdata)
|
||||
self.mcdata[config] = mcdata
|
||||
if multiconfig:
|
||||
bb.event.fire(bb.event.MultiConfigParsed(self.mcdata), self.data)
|
||||
|
||||
except (SyntaxError, bb.BBHandledException):
|
||||
raise bb.BBHandledException
|
||||
except bb.data_smart.ExpansionError as e:
|
||||
logger.error(str(e))
|
||||
raise bb.BBHandledException
|
||||
self.parseConfigurationFiles(self.prefiles, self.postfiles)
|
||||
except SyntaxError:
|
||||
sys.exit(1)
|
||||
except Exception:
|
||||
logger.exception("Error parsing configuration files")
|
||||
raise bb.BBHandledException
|
||||
|
||||
# Create a copy so we can reset at a later date when UIs disconnect
|
||||
self.origdata = self.data
|
||||
self.data = bb.data.createCopy(self.origdata)
|
||||
self.mcdata[''] = self.data
|
||||
|
||||
def reset(self):
|
||||
# We may not have run parseBaseConfiguration() yet
|
||||
if not hasattr(self, 'origdata'):
|
||||
return
|
||||
self.data = bb.data.createCopy(self.origdata)
|
||||
self.mcdata[''] = self.data
|
||||
sys.exit(1)
|
||||
|
||||
def _findLayerConf(self, data):
|
||||
return findConfigFile("bblayers.conf", data)
|
||||
|
||||
def parseConfigurationFiles(self, prefiles, postfiles, mc = "default"):
|
||||
data = bb.data.createCopy(self.basedata)
|
||||
data.setVar("BB_CURRENT_MC", mc)
|
||||
def parseConfigurationFiles(self, prefiles, postfiles):
|
||||
data = self.data
|
||||
bb.parse.init_parser(data)
|
||||
|
||||
# Parse files for loading *before* bitbake.conf and any includes
|
||||
for f in prefiles:
|
||||
@@ -353,51 +251,18 @@ class CookerDataBuilder(object):
|
||||
data.setVar("TOPDIR", os.path.dirname(os.path.dirname(layerconf)))
|
||||
data = parse_config_file(layerconf, data)
|
||||
|
||||
layers = (data.getVar('BBLAYERS') or "").split()
|
||||
layers = (data.getVar('BBLAYERS', True) or "").split()
|
||||
|
||||
data = bb.data.createCopy(data)
|
||||
approved = bb.utils.approved_variables()
|
||||
for layer in layers:
|
||||
if not os.path.isdir(layer):
|
||||
parselog.critical("Layer directory '%s' does not exist! "
|
||||
"Please check BBLAYERS in %s" % (layer, layerconf))
|
||||
sys.exit(1)
|
||||
parselog.debug(2, "Adding layer %s", layer)
|
||||
if 'HOME' in approved and '~' in layer:
|
||||
layer = os.path.expanduser(layer)
|
||||
if layer.endswith('/'):
|
||||
layer = layer.rstrip('/')
|
||||
data.setVar('LAYERDIR', layer)
|
||||
data.setVar('LAYERDIR_RE', re.escape(layer))
|
||||
data = parse_config_file(os.path.join(layer, "conf", "layer.conf"), data)
|
||||
data.expandVarref('LAYERDIR')
|
||||
data.expandVarref('LAYERDIR_RE')
|
||||
|
||||
data.delVar('LAYERDIR_RE')
|
||||
data.delVar('LAYERDIR')
|
||||
|
||||
bbfiles_dynamic = (data.getVar('BBFILES_DYNAMIC') or "").split()
|
||||
collections = (data.getVar('BBFILE_COLLECTIONS') or "").split()
|
||||
invalid = []
|
||||
for entry in bbfiles_dynamic:
|
||||
parts = entry.split(":", 1)
|
||||
if len(parts) != 2:
|
||||
invalid.append(entry)
|
||||
continue
|
||||
l, f = parts
|
||||
if l in collections:
|
||||
data.appendVar("BBFILES", " " + f)
|
||||
if invalid:
|
||||
bb.fatal("BBFILES_DYNAMIC entries must be of the form <collection name>:<filename pattern>, not:\n %s" % "\n ".join(invalid))
|
||||
|
||||
layerseries = set((data.getVar("LAYERSERIES_CORENAMES") or "").split())
|
||||
for c in collections:
|
||||
compat = set((data.getVar("LAYERSERIES_COMPAT_%s" % c) or "").split())
|
||||
if compat and not (compat & layerseries):
|
||||
bb.fatal("Layer %s is not compatible with the core layer which only supports these series: %s (layer is compatible with %s)"
|
||||
% (c, " ".join(layerseries), " ".join(compat)))
|
||||
|
||||
if not data.getVar("BBPATH"):
|
||||
if not data.getVar("BBPATH", True):
|
||||
msg = "The BBPATH variable is not set"
|
||||
if not layerconf:
|
||||
msg += (" and bitbake did not find a conf/bblayers.conf file in"
|
||||
@@ -412,21 +277,29 @@ class CookerDataBuilder(object):
|
||||
data = parse_config_file(p, data)
|
||||
|
||||
# Handle any INHERITs and inherit the base class
|
||||
bbclasses = ["base"] + (data.getVar('INHERIT') or "").split()
|
||||
bbclasses = ["base"] + (data.getVar('INHERIT', True) or "").split()
|
||||
for bbclass in bbclasses:
|
||||
data = _inherit(bbclass, data)
|
||||
|
||||
# Nomally we only register event handlers at the end of parsing .bb files
|
||||
# We register any handlers we've found so far here...
|
||||
for var in data.getVar('__BBHANDLERS', False) or []:
|
||||
handlerfn = data.getVarFlag(var, "filename", False)
|
||||
if not handlerfn:
|
||||
parselog.critical("Undefined event handler function '%s'" % var)
|
||||
sys.exit(1)
|
||||
handlerln = int(data.getVarFlag(var, "lineno", False))
|
||||
bb.event.register(var, data.getVar(var, False), (data.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln)
|
||||
for var in data.getVar('__BBHANDLERS') or []:
|
||||
bb.event.register(var, data.getVar(var), (data.getVarFlag(var, "eventmask", True) or "").split())
|
||||
|
||||
if data.getVar("BB_WORKERCONTEXT", False) is None:
|
||||
bb.fetch.fetcher_init(data)
|
||||
bb.codeparser.parser_cache_init(data)
|
||||
bb.event.fire(bb.event.ConfigParsed(), data)
|
||||
|
||||
if data.getVar("BB_INVALIDCONF") is True:
|
||||
data.setVar("BB_INVALIDCONF", False)
|
||||
self.parseConfigurationFiles(self.prefiles, self.postfiles)
|
||||
return
|
||||
|
||||
bb.parse.init_parser(data)
|
||||
data.setVar('BBINCLUDED',bb.parse.get_file_depends(data))
|
||||
self.data = data
|
||||
self.data_hash = data.get_hash()
|
||||
|
||||
|
||||
return data
|
||||
|
||||
|
||||
@@ -1,14 +1,45 @@
|
||||
"""
|
||||
Python Daemonizing helper
|
||||
Python Deamonizing helper
|
||||
|
||||
Originally based on code Copyright (C) 2005 Chad J. Schroeder but now heavily modified
|
||||
to allow a function to be daemonized and return for bitbake use by Richard Purdie
|
||||
Configurable daemon behaviors:
|
||||
|
||||
1.) The current working directory set to the "/" directory.
|
||||
2.) The current file creation mode mask set to 0.
|
||||
3.) Close all open files (1024).
|
||||
4.) Redirect standard I/O streams to "/dev/null".
|
||||
|
||||
A failed call to fork() now raises an exception.
|
||||
|
||||
References:
|
||||
1) Advanced Programming in the Unix Environment: W. Richard Stevens
|
||||
2) Unix Programming Frequently Asked Questions:
|
||||
http://www.erlenstar.demon.co.uk/unix/faq_toc.html
|
||||
|
||||
Modified to allow a function to be daemonized and return for
|
||||
bitbake use by Richard Purdie
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import io
|
||||
import traceback
|
||||
__author__ = "Chad J. Schroeder"
|
||||
__copyright__ = "Copyright (C) 2005 Chad J. Schroeder"
|
||||
__version__ = "0.2"
|
||||
|
||||
# Standard Python modules.
|
||||
import os # Miscellaneous OS interfaces.
|
||||
import sys # System-specific parameters and functions.
|
||||
|
||||
# Default daemon parameters.
|
||||
# File mode creation mask of the daemon.
|
||||
# For BitBake's children, we do want to inherit the parent umask.
|
||||
UMASK = None
|
||||
|
||||
# Default maximum for the number of available file descriptors.
|
||||
MAXFD = 1024
|
||||
|
||||
# The standard I/O file descriptors are redirected to /dev/null by default.
|
||||
if (hasattr(os, "devnull")):
|
||||
REDIRECT_TO = os.devnull
|
||||
else:
|
||||
REDIRECT_TO = "/dev/null"
|
||||
|
||||
def createDaemon(function, logfile):
|
||||
"""
|
||||
@@ -31,6 +62,36 @@ def createDaemon(function, logfile):
|
||||
# leader of the new process group, we call os.setsid(). The process is
|
||||
# also guaranteed not to have a controlling terminal.
|
||||
os.setsid()
|
||||
|
||||
# Is ignoring SIGHUP necessary?
|
||||
#
|
||||
# It's often suggested that the SIGHUP signal should be ignored before
|
||||
# the second fork to avoid premature termination of the process. The
|
||||
# reason is that when the first child terminates, all processes, e.g.
|
||||
# the second child, in the orphaned group will be sent a SIGHUP.
|
||||
#
|
||||
# "However, as part of the session management system, there are exactly
|
||||
# two cases where SIGHUP is sent on the death of a process:
|
||||
#
|
||||
# 1) When the process that dies is the session leader of a session that
|
||||
# is attached to a terminal device, SIGHUP is sent to all processes
|
||||
# in the foreground process group of that terminal device.
|
||||
# 2) When the death of a process causes a process group to become
|
||||
# orphaned, and one or more processes in the orphaned group are
|
||||
# stopped, then SIGHUP and SIGCONT are sent to all members of the
|
||||
# orphaned group." [2]
|
||||
#
|
||||
# The first case can be ignored since the child is guaranteed not to have
|
||||
# a controlling terminal. The second case isn't so easy to dismiss.
|
||||
# The process group is orphaned when the first child terminates and
|
||||
# POSIX.1 requires that every STOPPED process in an orphaned process
|
||||
# group be sent a SIGHUP signal followed by a SIGCONT signal. Since the
|
||||
# second child is not STOPPED though, we can safely forego ignoring the
|
||||
# SIGHUP signal. In any case, there are no ill-effects if it is ignored.
|
||||
#
|
||||
# import signal # Set handlers for asynchronous events.
|
||||
# signal.signal(signal.SIGHUP, signal.SIG_IGN)
|
||||
|
||||
try:
|
||||
# Fork a second child and exit immediately to prevent zombies. This
|
||||
# causes the second child process to be orphaned, making the init
|
||||
@@ -44,39 +105,86 @@ def createDaemon(function, logfile):
|
||||
except OSError as e:
|
||||
raise Exception("%s [%d]" % (e.strerror, e.errno))
|
||||
|
||||
if (pid != 0):
|
||||
if (pid == 0): # The second child.
|
||||
# We probably don't want the file mode creation mask inherited from
|
||||
# the parent, so we give the child complete control over permissions.
|
||||
if UMASK is not None:
|
||||
os.umask(UMASK)
|
||||
else:
|
||||
# Parent (the first child) of the second child.
|
||||
# exit() or _exit()?
|
||||
# _exit is like exit(), but it doesn't call any functions registered
|
||||
# with atexit (and on_exit) or any registered signal handlers. It also
|
||||
# closes any open file descriptors. Using exit() may cause all stdio
|
||||
# streams to be flushed twice and any temporary files may be unexpectedly
|
||||
# removed. It's therefore recommended that child branches of a fork()
|
||||
# and the parent branch(es) of a daemon use _exit().
|
||||
os._exit(0)
|
||||
else:
|
||||
os.waitpid(pid, 0)
|
||||
# exit() or _exit()?
|
||||
# _exit is like exit(), but it doesn't call any functions registered
|
||||
# with atexit (and on_exit) or any registered signal handlers. It also
|
||||
# closes any open file descriptors. Using exit() may cause all stdio
|
||||
# streams to be flushed twice and any temporary files may be unexpectedly
|
||||
# removed. It's therefore recommended that child branches of a fork()
|
||||
# and the parent branch(es) of a daemon use _exit().
|
||||
return
|
||||
|
||||
# The second child.
|
||||
# Close all open file descriptors. This prevents the child from keeping
|
||||
# open any file descriptors inherited from the parent. There is a variety
|
||||
# of methods to accomplish this task. Three are listed below.
|
||||
#
|
||||
# Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
|
||||
# number of open file descriptors to close. If it doesn't exists, use
|
||||
# the default value (configurable).
|
||||
#
|
||||
# try:
|
||||
# maxfd = os.sysconf("SC_OPEN_MAX")
|
||||
# except (AttributeError, ValueError):
|
||||
# maxfd = MAXFD
|
||||
#
|
||||
# OR
|
||||
#
|
||||
# if (os.sysconf_names.has_key("SC_OPEN_MAX")):
|
||||
# maxfd = os.sysconf("SC_OPEN_MAX")
|
||||
# else:
|
||||
# maxfd = MAXFD
|
||||
#
|
||||
# OR
|
||||
#
|
||||
# Use the getrlimit method to retrieve the maximum file descriptor number
|
||||
# that can be opened by this process. If there is not limit on the
|
||||
# resource, use the default value.
|
||||
#
|
||||
import resource # Resource usage information.
|
||||
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
|
||||
if (maxfd == resource.RLIM_INFINITY):
|
||||
maxfd = MAXFD
|
||||
|
||||
# Iterate through and close all file descriptors.
|
||||
# for fd in range(0, maxfd):
|
||||
# try:
|
||||
# os.close(fd)
|
||||
# except OSError: # ERROR, fd wasn't open to begin with (ignored)
|
||||
# pass
|
||||
|
||||
# Replace standard fds with our own
|
||||
si = open('/dev/null', 'r')
|
||||
# Redirect the standard I/O file descriptors to the specified file. Since
|
||||
# the daemon has no controlling terminal, most daemons redirect stdin,
|
||||
# stdout, and stderr to /dev/null. This is done to prevent side-effects
|
||||
# from reads and writes to the standard I/O file descriptors.
|
||||
|
||||
# This call to open is guaranteed to return the lowest file descriptor,
|
||||
# which will be 0 (stdin), since it was closed above.
|
||||
# os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
|
||||
|
||||
# Duplicate standard input to standard output and standard error.
|
||||
# os.dup2(0, 1) # standard output (1)
|
||||
# os.dup2(0, 2) # standard error (2)
|
||||
|
||||
|
||||
si = file('/dev/null', 'r')
|
||||
so = file(logfile, 'w')
|
||||
se = so
|
||||
|
||||
|
||||
# Replace those fds with our own
|
||||
os.dup2(si.fileno(), sys.stdin.fileno())
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(se.fileno(), sys.stderr.fileno())
|
||||
|
||||
try:
|
||||
so = open(logfile, 'a+')
|
||||
se = so
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(se.fileno(), sys.stderr.fileno())
|
||||
except io.UnsupportedOperation:
|
||||
sys.stdout = open(logfile, 'a+')
|
||||
sys.stderr = sys.stdout
|
||||
function()
|
||||
|
||||
try:
|
||||
function()
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
finally:
|
||||
bb.event.print_ui_queue()
|
||||
os._exit(0)
|
||||
os._exit(0)
|
||||
|
||||
@@ -6,7 +6,7 @@ BitBake 'Data' implementations
|
||||
Functions for interacting with the data structure used by the
|
||||
BitBake build tools.
|
||||
|
||||
The expandKeys and update_data are the most expensive
|
||||
The expandData and update_data are the most expensive
|
||||
operations. At night the cookie monster came by and
|
||||
suggested 'give me cookies on setting the variables and
|
||||
things will work out'. Taking this suggestion into account
|
||||
@@ -15,7 +15,7 @@ Analyse von Algorithmen' lecture and the cookie
|
||||
monster seems to be right. We will track setVar more carefully
|
||||
to have faster update_data and expandKeys operations.
|
||||
|
||||
This is a trade-off between speed and memory again but
|
||||
This is a treade-off between speed and memory again but
|
||||
the speed is more critical here.
|
||||
"""
|
||||
|
||||
@@ -35,7 +35,7 @@ the speed is more critical here.
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
#Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import sys, os, re
|
||||
if sys.argv[0][-5:] == "pydoc":
|
||||
@@ -78,6 +78,59 @@ def initVar(var, d):
|
||||
"""Non-destructive var init for data structure"""
|
||||
d.initVar(var)
|
||||
|
||||
|
||||
def setVar(var, value, d):
|
||||
"""Set a variable to a given value"""
|
||||
d.setVar(var, value)
|
||||
|
||||
|
||||
def getVar(var, d, exp = 0):
|
||||
"""Gets the value of a variable"""
|
||||
return d.getVar(var, exp)
|
||||
|
||||
|
||||
def renameVar(key, newkey, d):
|
||||
"""Renames a variable from key to newkey"""
|
||||
d.renameVar(key, newkey)
|
||||
|
||||
def delVar(var, d):
|
||||
"""Removes a variable from the data set"""
|
||||
d.delVar(var)
|
||||
|
||||
def appendVar(var, value, d):
|
||||
"""Append additional value to a variable"""
|
||||
d.appendVar(var, value)
|
||||
|
||||
def setVarFlag(var, flag, flagvalue, d):
|
||||
"""Set a flag for a given variable to a given value"""
|
||||
d.setVarFlag(var, flag, flagvalue)
|
||||
|
||||
def getVarFlag(var, flag, d):
|
||||
"""Gets given flag from given var"""
|
||||
return d.getVarFlag(var, flag)
|
||||
|
||||
def delVarFlag(var, flag, d):
|
||||
"""Removes a given flag from the variable's flags"""
|
||||
d.delVarFlag(var, flag)
|
||||
|
||||
def setVarFlags(var, flags, d):
|
||||
"""Set the flags for a given variable
|
||||
|
||||
Note:
|
||||
setVarFlags will not clear previous
|
||||
flags. Think of this method as
|
||||
addVarFlags
|
||||
"""
|
||||
d.setVarFlags(var, flags)
|
||||
|
||||
def getVarFlags(var, d):
|
||||
"""Gets a variable's flags"""
|
||||
return d.getVarFlags(var)
|
||||
|
||||
def delVarFlags(var, d):
|
||||
"""Removes a variable's flags"""
|
||||
d.delVarFlags(var)
|
||||
|
||||
def keys(d):
|
||||
"""Return a list of keys in d"""
|
||||
return d.keys()
|
||||
@@ -106,12 +159,13 @@ def expandKeys(alterdata, readdata = None):
|
||||
|
||||
# These two for loops are split for performance to maximise the
|
||||
# usefulness of the expand cache
|
||||
for key in sorted(todolist):
|
||||
|
||||
for key in todolist:
|
||||
ekey = todolist[key]
|
||||
newval = alterdata.getVar(ekey, False)
|
||||
if newval is not None:
|
||||
val = alterdata.getVar(key, False)
|
||||
if val is not None:
|
||||
newval = alterdata.getVar(ekey, 0)
|
||||
if newval:
|
||||
val = alterdata.getVar(key, 0)
|
||||
if val is not None and newval is not None:
|
||||
bb.warn("Variable key %s (%s) replaces original key %s (%s)." % (key, val, ekey, newval))
|
||||
alterdata.renameVar(key, ekey)
|
||||
|
||||
@@ -121,7 +175,7 @@ def inheritFromOS(d, savedenv, permitted):
|
||||
for s in savedenv.keys():
|
||||
if s in permitted:
|
||||
try:
|
||||
d.setVar(s, savedenv.getVar(s), op = 'from env')
|
||||
d.setVar(s, getVar(s, savedenv, True), op = 'from env')
|
||||
if s in exportlist:
|
||||
d.setVarFlag(s, "export", True, op = 'auto env export')
|
||||
except TypeError:
|
||||
@@ -129,52 +183,44 @@ def inheritFromOS(d, savedenv, permitted):
|
||||
|
||||
def emit_var(var, o=sys.__stdout__, d = init(), all=False):
|
||||
"""Emit a variable to be sourced by a shell."""
|
||||
func = d.getVarFlag(var, "func", False)
|
||||
if d.getVarFlag(var, 'python', False) and func:
|
||||
return False
|
||||
if getVarFlag(var, "python", d):
|
||||
return 0
|
||||
|
||||
export = d.getVarFlag(var, "export", False)
|
||||
unexport = d.getVarFlag(var, "unexport", False)
|
||||
export = getVarFlag(var, "export", d)
|
||||
unexport = getVarFlag(var, "unexport", d)
|
||||
func = getVarFlag(var, "func", d)
|
||||
if not all and not export and not unexport and not func:
|
||||
return False
|
||||
return 0
|
||||
|
||||
try:
|
||||
if all:
|
||||
oval = d.getVar(var, False)
|
||||
val = d.getVar(var)
|
||||
oval = getVar(var, d, 0)
|
||||
val = getVar(var, d, 1)
|
||||
except (KeyboardInterrupt, bb.build.FuncFailed):
|
||||
raise
|
||||
except Exception as exc:
|
||||
o.write('# expansion of %s threw %s: %s\n' % (var, exc.__class__.__name__, str(exc)))
|
||||
return False
|
||||
return 0
|
||||
|
||||
if all:
|
||||
d.varhistory.emit(var, oval, val, o, d)
|
||||
d.varhistory.emit(var, oval, val, o)
|
||||
|
||||
if (var.find("-") != -1 or var.find(".") != -1 or var.find('{') != -1 or var.find('}') != -1 or var.find('+') != -1) and not all:
|
||||
return False
|
||||
return 0
|
||||
|
||||
varExpanded = d.expand(var)
|
||||
varExpanded = expand(var, d)
|
||||
|
||||
if unexport:
|
||||
o.write('unset %s\n' % varExpanded)
|
||||
return False
|
||||
return 0
|
||||
|
||||
if val is None:
|
||||
return False
|
||||
return 0
|
||||
|
||||
val = str(val)
|
||||
|
||||
if varExpanded.startswith("BASH_FUNC_"):
|
||||
varExpanded = varExpanded[10:-2]
|
||||
val = val[3:] # Strip off "() "
|
||||
o.write("%s() %s\n" % (varExpanded, val))
|
||||
o.write("export -f %s\n" % (varExpanded))
|
||||
return True
|
||||
|
||||
if func:
|
||||
# NOTE: should probably check for unbalanced {} within the var
|
||||
val = val.rstrip('\n')
|
||||
o.write("%s() {\n%s\n}\n" % (varExpanded, val))
|
||||
return 1
|
||||
|
||||
@@ -187,31 +233,29 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False):
|
||||
alter = re.sub('\n', ' \\\n', alter)
|
||||
alter = re.sub('\\$', '\\\\$', alter)
|
||||
o.write('%s="%s"\n' % (varExpanded, alter))
|
||||
return False
|
||||
return 0
|
||||
|
||||
def emit_env(o=sys.__stdout__, d = init(), all=False):
|
||||
"""Emits all items in the data store in a format such that it can be sourced by a shell."""
|
||||
|
||||
isfunc = lambda key: bool(d.getVarFlag(key, "func", False))
|
||||
isfunc = lambda key: bool(d.getVarFlag(key, "func"))
|
||||
keys = sorted((key for key in d.keys() if not key.startswith("__")), key=isfunc)
|
||||
grouped = groupby(keys, isfunc)
|
||||
for isfunc, keys in grouped:
|
||||
for key in sorted(keys):
|
||||
for key in keys:
|
||||
emit_var(key, o, d, all and not isfunc) and o.write('\n')
|
||||
|
||||
def exported_keys(d):
|
||||
return (key for key in d.keys() if not key.startswith('__') and
|
||||
d.getVarFlag(key, 'export', False) and
|
||||
not d.getVarFlag(key, 'unexport', False))
|
||||
d.getVarFlag(key, 'export') and
|
||||
not d.getVarFlag(key, 'unexport'))
|
||||
|
||||
def exported_vars(d):
|
||||
k = list(exported_keys(d))
|
||||
for key in k:
|
||||
for key in exported_keys(d):
|
||||
try:
|
||||
value = d.getVar(key)
|
||||
except Exception as err:
|
||||
bb.warn("%s: Unable to export ${%s}: %s" % (d.getVar("FILE"), key, err))
|
||||
continue
|
||||
value = d.getVar(key, True)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if value is not None:
|
||||
yield key, str(value)
|
||||
@@ -219,59 +263,23 @@ def exported_vars(d):
|
||||
def emit_func(func, o=sys.__stdout__, d = init()):
|
||||
"""Emits all items in the data store in a format such that it can be sourced by a shell."""
|
||||
|
||||
keys = (key for key in d.keys() if not key.startswith("__") and not d.getVarFlag(key, "func", False))
|
||||
for key in sorted(keys):
|
||||
emit_var(key, o, d, False)
|
||||
keys = (key for key in d.keys() if not key.startswith("__") and not d.getVarFlag(key, "func"))
|
||||
for key in keys:
|
||||
emit_var(key, o, d, False) and o.write('\n')
|
||||
|
||||
o.write('\n')
|
||||
emit_var(func, o, d, False) and o.write('\n')
|
||||
newdeps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func))
|
||||
newdeps |= set((d.getVarFlag(func, "vardeps") or "").split())
|
||||
newdeps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func, True))
|
||||
newdeps |= set((d.getVarFlag(func, "vardeps", True) or "").split())
|
||||
seen = set()
|
||||
while newdeps:
|
||||
deps = newdeps
|
||||
seen |= deps
|
||||
newdeps = set()
|
||||
for dep in deps:
|
||||
if d.getVarFlag(dep, "func", False) and not d.getVarFlag(dep, "python", False):
|
||||
if d.getVarFlag(dep, "func") and not d.getVarFlag(dep, "python"):
|
||||
emit_var(dep, o, d, False) and o.write('\n')
|
||||
newdeps |= bb.codeparser.ShellParser(dep, logger).parse_shell(d.getVar(dep))
|
||||
newdeps |= set((d.getVarFlag(dep, "vardeps") or "").split())
|
||||
newdeps -= seen
|
||||
|
||||
_functionfmt = """
|
||||
def {function}(d):
|
||||
{body}"""
|
||||
|
||||
def emit_func_python(func, o=sys.__stdout__, d = init()):
|
||||
"""Emits all items in the data store in a format such that it can be sourced by a shell."""
|
||||
|
||||
def write_func(func, o, call = False):
|
||||
body = d.getVar(func, False)
|
||||
if not body.startswith("def"):
|
||||
body = _functionfmt.format(function=func, body=body)
|
||||
|
||||
o.write(body.strip() + "\n\n")
|
||||
if call:
|
||||
o.write(func + "(d)" + "\n\n")
|
||||
|
||||
write_func(func, o, True)
|
||||
pp = bb.codeparser.PythonParser(func, logger)
|
||||
pp.parse_python(d.getVar(func, False))
|
||||
newdeps = pp.execs
|
||||
newdeps |= set((d.getVarFlag(func, "vardeps") or "").split())
|
||||
seen = set()
|
||||
while newdeps:
|
||||
deps = newdeps
|
||||
seen |= deps
|
||||
newdeps = set()
|
||||
for dep in deps:
|
||||
if d.getVarFlag(dep, "func", False) and d.getVarFlag(dep, "python", False):
|
||||
write_func(dep, o)
|
||||
pp = bb.codeparser.PythonParser(dep, logger)
|
||||
pp.parse_python(d.getVar(dep, False))
|
||||
newdeps |= pp.execs
|
||||
newdeps |= set((d.getVarFlag(dep, "vardeps") or "").split())
|
||||
newdeps |= bb.codeparser.ShellParser(dep, logger).parse_shell(d.getVar(dep, True))
|
||||
newdeps |= set((d.getVarFlag(dep, "vardeps", True) or "").split())
|
||||
newdeps -= seen
|
||||
|
||||
def update_data(d):
|
||||
@@ -288,21 +296,19 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
|
||||
deps |= parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
return deps, value
|
||||
varflags = d.getVarFlags(key, ["vardeps", "vardepvalue", "vardepsexclude", "exports", "postfuncs", "prefuncs", "lineno", "filename"]) or {}
|
||||
varflags = d.getVarFlags(key, ["vardeps", "vardepvalue", "vardepsexclude", "vardepvalueexclude", "postfuncs", "prefuncs"]) or {}
|
||||
vardeps = varflags.get("vardeps")
|
||||
value = d.getVarFlag(key, "_content", False)
|
||||
value = d.getVar(key, False)
|
||||
|
||||
def handle_contains(value, contains, d):
|
||||
newvalue = ""
|
||||
for k in sorted(contains):
|
||||
l = (d.getVar(k) or "").split()
|
||||
for item in sorted(contains[k]):
|
||||
for word in item.split():
|
||||
if not word in l:
|
||||
newvalue += "\n%s{%s} = Unset" % (k, item)
|
||||
break
|
||||
l = (d.getVar(k, True) or "").split()
|
||||
for word in sorted(contains[k]):
|
||||
if word in l:
|
||||
newvalue += "\n%s{%s} = Set" % (k, word)
|
||||
else:
|
||||
newvalue += "\n%s{%s} = Set" % (k, item)
|
||||
newvalue += "\n%s{%s} = Unset" % (k, word)
|
||||
if not newvalue:
|
||||
return value
|
||||
if not value:
|
||||
@@ -313,29 +319,27 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
|
||||
value = varflags.get("vardepvalue")
|
||||
elif varflags.get("func"):
|
||||
if varflags.get("python"):
|
||||
parsedvar = d.expandWithRefs(value, key)
|
||||
parser = bb.codeparser.PythonParser(key, logger)
|
||||
if value and "\t" in value:
|
||||
logger.warning("Variable %s contains tabs, please remove these (%s)" % (key, d.getVar("FILE")))
|
||||
parser.parse_python(value, filename=varflags.get("filename"), lineno=varflags.get("lineno"))
|
||||
if parsedvar.value and "\t" in parsedvar.value:
|
||||
logger.warn("Variable %s contains tabs, please remove these (%s)" % (key, d.getVar("FILE", True)))
|
||||
parser.parse_python(parsedvar.value)
|
||||
deps = deps | parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
value = handle_contains(value, parser.contains, d)
|
||||
else:
|
||||
parsedvar = d.expandWithRefs(value, key)
|
||||
parser = bb.codeparser.ShellParser(key, logger)
|
||||
parser.parse_shell(parsedvar.value)
|
||||
deps = deps | shelldeps
|
||||
deps = deps | parsedvar.references
|
||||
deps = deps | (keys & parser.execs) | (keys & parsedvar.execs)
|
||||
value = handle_contains(value, parsedvar.contains, d)
|
||||
if vardeps is None:
|
||||
parser.log.flush()
|
||||
if "prefuncs" in varflags:
|
||||
deps = deps | set(varflags["prefuncs"].split())
|
||||
if "postfuncs" in varflags:
|
||||
deps = deps | set(varflags["postfuncs"].split())
|
||||
if "exports" in varflags:
|
||||
deps = deps | set(varflags["exports"].split())
|
||||
deps = deps | parsedvar.references
|
||||
deps = deps | (keys & parser.execs) | (keys & parsedvar.execs)
|
||||
value = handle_contains(value, parsedvar.contains, d)
|
||||
else:
|
||||
parser = d.expandWithRefs(value, key)
|
||||
deps |= parser.references
|
||||
@@ -359,11 +363,8 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
|
||||
|
||||
deps |= set((vardeps or "").split())
|
||||
deps -= set(varflags.get("vardepsexclude", "").split())
|
||||
except bb.parse.SkipRecipe:
|
||||
raise
|
||||
except Exception as e:
|
||||
bb.warn("Exception during build_dependencies for %s" % key)
|
||||
raise
|
||||
raise bb.data_smart.ExpansionError(key, None, e)
|
||||
return deps, value
|
||||
#bb.note("Variable %s references %s and calls %s" % (key, str(deps), str(execs)))
|
||||
#d.setVarFlag(key, "vardeps", deps)
|
||||
@@ -371,13 +372,13 @@ def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
|
||||
def generate_dependencies(d):
|
||||
|
||||
keys = set(key for key in d if not key.startswith("__"))
|
||||
shelldeps = set(key for key in d.getVar("__exportlist", False) if d.getVarFlag(key, "export", False) and not d.getVarFlag(key, "unexport", False))
|
||||
varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS')
|
||||
shelldeps = set(key for key in d.getVar("__exportlist", False) if d.getVarFlag(key, "export") and not d.getVarFlag(key, "unexport"))
|
||||
varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS', True)
|
||||
|
||||
deps = {}
|
||||
values = {}
|
||||
|
||||
tasklist = d.getVar('__BBTASKS', False) or []
|
||||
tasklist = d.getVar('__BBTASKS') or []
|
||||
for task in tasklist:
|
||||
deps[task], values[task] = build_dependencies(task, keys, shelldeps, varflagsexcl, d)
|
||||
newdeps = deps[task]
|
||||
@@ -395,7 +396,7 @@ def generate_dependencies(d):
|
||||
return tasklist, deps, values
|
||||
|
||||
def inherits_class(klass, d):
|
||||
val = d.getVar('__inherit_cache', False) or []
|
||||
val = getVar('__inherit_cache', d) or []
|
||||
needle = os.path.join('classes', '%s.bbclass' % klass)
|
||||
for v in val:
|
||||
if v.endswith(needle):
|
||||
|
||||
@@ -39,8 +39,8 @@ from bb.COW import COWDictBase
|
||||
logger = logging.getLogger("BitBake.Data")
|
||||
|
||||
__setvar_keyword__ = ["_append", "_prepend", "_remove"]
|
||||
__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend|_remove)(_(?P<add>[^A-Z]*))?$')
|
||||
__expand_var_regexp__ = re.compile(r"\${[^{}@\n\t :]+}")
|
||||
__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend|_remove)(_(?P<add>.*))?$')
|
||||
__expand_var_regexp__ = re.compile(r"\${[^{}@\n\t ]+}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@.+?}")
|
||||
|
||||
def infer_caller_details(loginfo, parent = False, varval = True):
|
||||
@@ -54,36 +54,27 @@ def infer_caller_details(loginfo, parent = False, varval = True):
|
||||
return
|
||||
# Infer caller's likely values for variable (var) and value (value),
|
||||
# to reduce clutter in the rest of the code.
|
||||
above = None
|
||||
def set_above():
|
||||
if varval and ('variable' not in loginfo or 'detail' not in loginfo):
|
||||
try:
|
||||
raise Exception
|
||||
except Exception:
|
||||
tb = sys.exc_info()[2]
|
||||
if parent:
|
||||
return tb.tb_frame.f_back.f_back.f_back
|
||||
above = tb.tb_frame.f_back.f_back
|
||||
else:
|
||||
return tb.tb_frame.f_back.f_back
|
||||
|
||||
if varval and ('variable' not in loginfo or 'detail' not in loginfo):
|
||||
if not above:
|
||||
above = set_above()
|
||||
lcls = above.f_locals.items()
|
||||
above = tb.tb_frame.f_back
|
||||
lcls = above.f_locals.items()
|
||||
for k, v in lcls:
|
||||
if k == 'value' and 'detail' not in loginfo:
|
||||
loginfo['detail'] = v
|
||||
if k == 'var' and 'variable' not in loginfo:
|
||||
loginfo['variable'] = v
|
||||
# Infer file/line/function from traceback
|
||||
# Don't use traceback.extract_stack() since it fills the line contents which
|
||||
# we don't need and that hits stat syscalls
|
||||
if 'file' not in loginfo:
|
||||
if not above:
|
||||
above = set_above()
|
||||
f = above.f_back
|
||||
line = f.f_lineno
|
||||
file = f.f_code.co_filename
|
||||
func = f.f_code.co_name
|
||||
depth = 3
|
||||
if parent:
|
||||
depth = 4
|
||||
file, line, func, text = traceback.extract_stack(limit = depth)[0]
|
||||
loginfo['file'] = file
|
||||
loginfo['line'] = line
|
||||
if func not in loginfo:
|
||||
@@ -108,7 +99,7 @@ class VariableParse:
|
||||
varparse = self.d.expand_cache[key]
|
||||
var = varparse.value
|
||||
else:
|
||||
var = self.d.getVarFlag(key, "_content")
|
||||
var = self.d.getVarFlag(key, "_content", True)
|
||||
self.references.add(key)
|
||||
if var is not None:
|
||||
return var
|
||||
@@ -116,21 +107,13 @@ class VariableParse:
|
||||
return match.group()
|
||||
|
||||
def python_sub(self, match):
|
||||
if isinstance(match, str):
|
||||
code = match
|
||||
else:
|
||||
code = match.group()[3:-1]
|
||||
|
||||
if "_remote_data" in self.d:
|
||||
connector = self.d["_remote_data"]
|
||||
return connector.expandPythonRef(self.varname, code, self.d)
|
||||
|
||||
code = match.group()[3:-1]
|
||||
codeobj = compile(code.strip(), self.varname or "<expansion>", "eval")
|
||||
|
||||
parser = bb.codeparser.PythonParser(self.varname, logger)
|
||||
parser.parse_python(code)
|
||||
if self.varname:
|
||||
vardeps = self.d.getVarFlag(self.varname, "vardeps")
|
||||
vardeps = self.d.getVarFlag(self.varname, "vardeps", True)
|
||||
if vardeps is None:
|
||||
parser.log.flush()
|
||||
else:
|
||||
@@ -143,7 +126,7 @@ class VariableParse:
|
||||
self.contains[k] = parser.contains[k].copy()
|
||||
else:
|
||||
self.contains[k].update(parser.contains[k])
|
||||
value = utils.better_eval(codeobj, DataContext(self.d), {'d' : self.d})
|
||||
value = utils.better_eval(codeobj, DataContext(self.d))
|
||||
return str(value)
|
||||
|
||||
|
||||
@@ -154,8 +137,8 @@ class DataContext(dict):
|
||||
self['d'] = metadata
|
||||
|
||||
def __missing__(self, key):
|
||||
value = self.metadata.getVar(key)
|
||||
if value is None or self.metadata.getVarFlag(key, 'func', False):
|
||||
value = self.metadata.getVar(key, True)
|
||||
if value is None or self.metadata.getVarFlag(key, 'func'):
|
||||
raise KeyError(key)
|
||||
else:
|
||||
return value
|
||||
@@ -230,19 +213,6 @@ class VariableHistory(object):
|
||||
new.variables = self.variables.copy()
|
||||
return new
|
||||
|
||||
def __getstate__(self):
|
||||
vardict = {}
|
||||
for k, v in self.variables.iteritems():
|
||||
vardict[k] = v
|
||||
return {'dataroot': self.dataroot,
|
||||
'variables': vardict}
|
||||
|
||||
def __setstate__(self, state):
|
||||
self.dataroot = state['dataroot']
|
||||
self.variables = COWDictBase.copy()
|
||||
for k, v in state['variables'].items():
|
||||
self.variables[k] = v
|
||||
|
||||
def record(self, *kwonly, **loginfo):
|
||||
if not self.dataroot._tracking:
|
||||
return
|
||||
@@ -261,37 +231,16 @@ class VariableHistory(object):
|
||||
|
||||
if var not in self.variables:
|
||||
self.variables[var] = []
|
||||
if not isinstance(self.variables[var], list):
|
||||
return
|
||||
if 'nodups' in loginfo and loginfo in self.variables[var]:
|
||||
return
|
||||
self.variables[var].append(loginfo.copy())
|
||||
|
||||
def variable(self, var):
|
||||
remote_connector = self.dataroot.getVar('_remote_data', False)
|
||||
if remote_connector:
|
||||
varhistory = remote_connector.getVarHistory(var)
|
||||
else:
|
||||
varhistory = []
|
||||
|
||||
if var in self.variables:
|
||||
varhistory.extend(self.variables[var])
|
||||
return varhistory
|
||||
return self.variables[var]
|
||||
else:
|
||||
return []
|
||||
|
||||
def emit(self, var, oval, val, o, d):
|
||||
def emit(self, var, oval, val, o):
|
||||
history = self.variable(var)
|
||||
|
||||
# Append override history
|
||||
if var in d.overridedata:
|
||||
for (r, override) in d.overridedata[var]:
|
||||
for event in self.variable(r):
|
||||
loginfo = event.copy()
|
||||
if 'flag' in loginfo and not loginfo['flag'].startswith("_"):
|
||||
continue
|
||||
loginfo['variable'] = var
|
||||
loginfo['op'] = 'override[%s]:%s' % (override, loginfo['op'])
|
||||
history.append(loginfo)
|
||||
|
||||
commentVal = re.sub('\n', '\n#', str(oval))
|
||||
if history:
|
||||
if len(history) == 1:
|
||||
@@ -314,7 +263,7 @@ class VariableHistory(object):
|
||||
flag = ''
|
||||
o.write("# %s %s:%s%s\n# %s\"%s\"\n" % (event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', event['detail'])))
|
||||
if len(history) > 1:
|
||||
o.write("# pre-expansion value:\n")
|
||||
o.write("# computed:\n")
|
||||
o.write('# "%s"\n' % (commentVal))
|
||||
else:
|
||||
o.write("#\n# $%s\n# [no history recorded]\n#\n" % var)
|
||||
@@ -338,31 +287,6 @@ class VariableHistory(object):
|
||||
lines.append(line)
|
||||
return lines
|
||||
|
||||
def get_variable_items_files(self, var, d):
|
||||
"""
|
||||
Use variable history to map items added to a list variable and
|
||||
the files in which they were added.
|
||||
"""
|
||||
history = self.variable(var)
|
||||
finalitems = (d.getVar(var) or '').split()
|
||||
filemap = {}
|
||||
isset = False
|
||||
for event in history:
|
||||
if 'flag' in event:
|
||||
continue
|
||||
if event['op'] == '_remove':
|
||||
continue
|
||||
if isset and event['op'] == 'set?':
|
||||
continue
|
||||
isset = True
|
||||
items = d.expand(event['detail']).split()
|
||||
for item in items:
|
||||
# This is a little crude but is belt-and-braces to avoid us
|
||||
# having to handle every possible operation type specifically
|
||||
if item in finalitems and not item in filemap:
|
||||
filemap[item] = event['file']
|
||||
return filemap
|
||||
|
||||
def del_var_history(self, var, f=None, line=None):
|
||||
"""If file f and line are not given, the entire history of var is deleted"""
|
||||
if var in self.variables:
|
||||
@@ -372,23 +296,18 @@ class VariableHistory(object):
|
||||
self.variables[var] = []
|
||||
|
||||
class DataSmart(MutableMapping):
|
||||
def __init__(self):
|
||||
def __init__(self, special = COWDictBase.copy(), seen = COWDictBase.copy() ):
|
||||
self.dict = {}
|
||||
|
||||
self.inchistory = IncludeHistory()
|
||||
self.varhistory = VariableHistory(self)
|
||||
self._tracking = False
|
||||
|
||||
self.expand_cache = {}
|
||||
|
||||
# cookie monster tribute
|
||||
# Need to be careful about writes to overridedata as
|
||||
# its only a shallow copy, could influence other data store
|
||||
# copies!
|
||||
self.overridedata = {}
|
||||
self.overrides = None
|
||||
self.overridevars = set(["OVERRIDES", "FILE"])
|
||||
self.inoverride = False
|
||||
self._special_values = special
|
||||
self._seen_overrides = seen
|
||||
|
||||
self.expand_cache = {}
|
||||
|
||||
def enableTracking(self):
|
||||
self._tracking = True
|
||||
@@ -398,7 +317,7 @@ class DataSmart(MutableMapping):
|
||||
|
||||
def expandWithRefs(self, s, varname):
|
||||
|
||||
if not isinstance(s, str): # sanity check
|
||||
if not isinstance(s, basestring): # sanity check
|
||||
return VariableParse(varname, self, s)
|
||||
|
||||
if varname and varname in self.expand_cache:
|
||||
@@ -410,20 +329,15 @@ class DataSmart(MutableMapping):
|
||||
olds = s
|
||||
try:
|
||||
s = __expand_var_regexp__.sub(varparse.var_sub, s)
|
||||
try:
|
||||
s = __expand_python_regexp__.sub(varparse.python_sub, s)
|
||||
except SyntaxError as e:
|
||||
# Likely unmatched brackets, just don't expand the expression
|
||||
if e.msg != "EOL while scanning string literal":
|
||||
raise
|
||||
s = __expand_python_regexp__.sub(varparse.python_sub, s)
|
||||
if s == olds:
|
||||
break
|
||||
except ExpansionError:
|
||||
raise
|
||||
except bb.parse.SkipRecipe:
|
||||
except bb.parse.SkipPackage:
|
||||
raise
|
||||
except Exception as exc:
|
||||
raise ExpansionError(varname, s, exc) from exc
|
||||
raise ExpansionError(varname, s, exc)
|
||||
|
||||
varparse.value = s
|
||||
|
||||
@@ -435,34 +349,97 @@ class DataSmart(MutableMapping):
|
||||
def expand(self, s, varname = None):
|
||||
return self.expandWithRefs(s, varname).value
|
||||
|
||||
|
||||
def finalize(self, parent = False):
|
||||
return
|
||||
|
||||
def internal_finalize(self, parent = False):
|
||||
"""Performs final steps upon the datastore, including application of overrides"""
|
||||
self.overrides = None
|
||||
|
||||
def need_overrides(self):
|
||||
if self.overrides is not None:
|
||||
return
|
||||
if self.inoverride:
|
||||
return
|
||||
for count in range(5):
|
||||
self.inoverride = True
|
||||
# Can end up here recursively so setup dummy values
|
||||
self.overrides = []
|
||||
self.overridesset = set()
|
||||
self.overrides = (self.getVar("OVERRIDES") or "").split(":") or []
|
||||
self.overridesset = set(self.overrides)
|
||||
self.inoverride = False
|
||||
self.expand_cache = {}
|
||||
newoverrides = (self.getVar("OVERRIDES") or "").split(":") or []
|
||||
if newoverrides == self.overrides:
|
||||
break
|
||||
self.overrides = newoverrides
|
||||
self.overridesset = set(self.overrides)
|
||||
else:
|
||||
bb.fatal("Overrides could not be expanded into a stable state after 5 iterations, overrides must be being referenced by other overridden variables in some recursive fashion. Please provide your configuration to bitbake-devel so we can laugh, er, I mean try and understand how to make it work.")
|
||||
overrides = (self.getVar("OVERRIDES", True) or "").split(":") or []
|
||||
finalize_caller = {
|
||||
'op': 'finalize',
|
||||
}
|
||||
infer_caller_details(finalize_caller, parent = parent, varval = False)
|
||||
|
||||
#
|
||||
# Well let us see what breaks here. We used to iterate
|
||||
# over each variable and apply the override and then
|
||||
# do the line expanding.
|
||||
# If we have bad luck - which we will have - the keys
|
||||
# where in some order that is so important for this
|
||||
# method which we don't have anymore.
|
||||
# Anyway we will fix that and write test cases this
|
||||
# time.
|
||||
|
||||
#
|
||||
# First we apply all overrides
|
||||
# Then we will handle _append and _prepend and store the _remove
|
||||
# information for later.
|
||||
#
|
||||
|
||||
# We only want to report finalization once per variable overridden.
|
||||
finalizes_reported = {}
|
||||
|
||||
for o in overrides:
|
||||
# calculate '_'+override
|
||||
l = len(o) + 1
|
||||
|
||||
# see if one should even try
|
||||
if o not in self._seen_overrides:
|
||||
continue
|
||||
|
||||
vars = self._seen_overrides[o].copy()
|
||||
for var in vars:
|
||||
name = var[:-l]
|
||||
try:
|
||||
# Report only once, even if multiple changes.
|
||||
if name not in finalizes_reported:
|
||||
finalizes_reported[name] = True
|
||||
finalize_caller['variable'] = name
|
||||
finalize_caller['detail'] = 'was: ' + str(self.getVar(name, False))
|
||||
self.varhistory.record(**finalize_caller)
|
||||
# Copy history of the override over.
|
||||
for event in self.varhistory.variable(var):
|
||||
loginfo = event.copy()
|
||||
loginfo['variable'] = name
|
||||
loginfo['op'] = 'override[%s]:%s' % (o, loginfo['op'])
|
||||
self.varhistory.record(**loginfo)
|
||||
self.setVar(name, self.getVar(var, False), op = 'finalize', file = 'override[%s]' % o, line = '')
|
||||
self.delVar(var)
|
||||
except Exception:
|
||||
logger.info("Untracked delVar")
|
||||
|
||||
# now on to the appends and prepends, and stashing the removes
|
||||
for op in __setvar_keyword__:
|
||||
if op in self._special_values:
|
||||
appends = self._special_values[op] or []
|
||||
for append in appends:
|
||||
keep = []
|
||||
for (a, o) in self.getVarFlag(append, op) or []:
|
||||
match = True
|
||||
if o:
|
||||
for o2 in o.split("_"):
|
||||
if not o2 in overrides:
|
||||
match = False
|
||||
if not match:
|
||||
keep.append((a ,o))
|
||||
continue
|
||||
|
||||
if op == "_append":
|
||||
sval = self.getVar(append, False) or ""
|
||||
sval += a
|
||||
self.setVar(append, sval)
|
||||
elif op == "_prepend":
|
||||
sval = a + (self.getVar(append, False) or "")
|
||||
self.setVar(append, sval)
|
||||
elif op == "_remove":
|
||||
removes = self.getVarFlag(append, "_removeactive", False) or []
|
||||
removes.extend(a.split())
|
||||
self.setVarFlag(append, "_removeactive", removes, ignore=True)
|
||||
|
||||
# We save overrides that may be applied at some later stage
|
||||
if keep:
|
||||
self.setVarFlag(append, op, keep, ignore=True)
|
||||
else:
|
||||
self.delVarFlag(append, op, ignore=True)
|
||||
|
||||
def initVar(self, var):
|
||||
self.expand_cache = {}
|
||||
@@ -473,22 +450,17 @@ class DataSmart(MutableMapping):
|
||||
dest = self.dict
|
||||
while dest:
|
||||
if var in dest:
|
||||
return dest[var], self.overridedata.get(var, None)
|
||||
|
||||
if "_remote_data" in dest:
|
||||
connector = dest["_remote_data"]["_content"]
|
||||
return connector.getVar(var)
|
||||
return dest[var]
|
||||
|
||||
if "_data" not in dest:
|
||||
break
|
||||
dest = dest["_data"]
|
||||
return None, self.overridedata.get(var, None)
|
||||
|
||||
def _makeShadowCopy(self, var):
|
||||
if var in self.dict:
|
||||
return
|
||||
|
||||
local_var, _ = self._findVar(var)
|
||||
local_var = self._findVar(var)
|
||||
|
||||
if local_var:
|
||||
self.dict[var] = copy.copy(local_var)
|
||||
@@ -498,16 +470,6 @@ class DataSmart(MutableMapping):
|
||||
|
||||
def setVar(self, var, value, **loginfo):
|
||||
#print("var=" + str(var) + " val=" + str(value))
|
||||
parsing=False
|
||||
if 'parsing' in loginfo:
|
||||
parsing=True
|
||||
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.setVar(var, value)
|
||||
if not res:
|
||||
return
|
||||
|
||||
if 'op' not in loginfo:
|
||||
loginfo['op'] = "set"
|
||||
self.expand_cache = {}
|
||||
@@ -516,7 +478,7 @@ class DataSmart(MutableMapping):
|
||||
base = match.group('base')
|
||||
keyword = match.group("keyword")
|
||||
override = match.group('add')
|
||||
l = self.getVarFlag(base, keyword, False) or []
|
||||
l = self.getVarFlag(base, keyword) or []
|
||||
l.append([value, override])
|
||||
self.setVarFlag(base, keyword, l, ignore=True)
|
||||
# And cause that to be recorded:
|
||||
@@ -529,119 +491,60 @@ class DataSmart(MutableMapping):
|
||||
self.varhistory.record(**loginfo)
|
||||
# todo make sure keyword is not __doc__ or __module__
|
||||
# pay the cookie monster
|
||||
try:
|
||||
self._special_values[keyword].add(base)
|
||||
except KeyError:
|
||||
self._special_values[keyword] = set()
|
||||
self._special_values[keyword].add(base)
|
||||
|
||||
# more cookies for the cookie monster
|
||||
if '_' in var:
|
||||
self._setvar_update_overrides(base, **loginfo)
|
||||
|
||||
if base in self.overridevars:
|
||||
self._setvar_update_overridevars(var, value)
|
||||
return
|
||||
|
||||
if not var in self.dict:
|
||||
self._makeShadowCopy(var)
|
||||
|
||||
if not parsing:
|
||||
if "_append" in self.dict[var]:
|
||||
del self.dict[var]["_append"]
|
||||
if "_prepend" in self.dict[var]:
|
||||
del self.dict[var]["_prepend"]
|
||||
if "_remove" in self.dict[var]:
|
||||
del self.dict[var]["_remove"]
|
||||
if var in self.overridedata:
|
||||
active = []
|
||||
self.need_overrides()
|
||||
for (r, o) in self.overridedata[var]:
|
||||
if o in self.overridesset:
|
||||
active.append(r)
|
||||
elif "_" in o:
|
||||
if set(o.split("_")).issubset(self.overridesset):
|
||||
active.append(r)
|
||||
for a in active:
|
||||
self.delVar(a)
|
||||
del self.overridedata[var]
|
||||
|
||||
# more cookies for the cookie monster
|
||||
if '_' in var:
|
||||
self._setvar_update_overrides(var, **loginfo)
|
||||
self._setvar_update_overrides(var)
|
||||
|
||||
# setting var
|
||||
self.dict[var]["_content"] = value
|
||||
self.varhistory.record(**loginfo)
|
||||
|
||||
if var in self.overridevars:
|
||||
self._setvar_update_overridevars(var, value)
|
||||
|
||||
def _setvar_update_overridevars(self, var, value):
|
||||
vardata = self.expandWithRefs(value, var)
|
||||
new = vardata.references
|
||||
new.update(vardata.contains.keys())
|
||||
while not new.issubset(self.overridevars):
|
||||
nextnew = set()
|
||||
self.overridevars.update(new)
|
||||
for i in new:
|
||||
vardata = self.expandWithRefs(self.getVar(i), i)
|
||||
nextnew.update(vardata.references)
|
||||
nextnew.update(vardata.contains.keys())
|
||||
new = nextnew
|
||||
self.internal_finalize(True)
|
||||
|
||||
def _setvar_update_overrides(self, var, **loginfo):
|
||||
def _setvar_update_overrides(self, var):
|
||||
# aka pay the cookie monster
|
||||
override = var[var.rfind('_')+1:]
|
||||
shortvar = var[:var.rfind('_')]
|
||||
while override and override.islower():
|
||||
if shortvar not in self.overridedata:
|
||||
self.overridedata[shortvar] = []
|
||||
if [var, override] not in self.overridedata[shortvar]:
|
||||
# Force CoW by recreating the list first
|
||||
self.overridedata[shortvar] = list(self.overridedata[shortvar])
|
||||
self.overridedata[shortvar].append([var, override])
|
||||
override = None
|
||||
if "_" in shortvar:
|
||||
override = var[shortvar.rfind('_')+1:]
|
||||
shortvar = var[:shortvar.rfind('_')]
|
||||
if len(shortvar) == 0:
|
||||
override = None
|
||||
if len(override) > 0:
|
||||
if override not in self._seen_overrides:
|
||||
self._seen_overrides[override] = set()
|
||||
self._seen_overrides[override].add( var )
|
||||
|
||||
def getVar(self, var, expand=True, noweakdefault=False, parsing=False):
|
||||
return self.getVarFlag(var, "_content", expand, noweakdefault, parsing)
|
||||
def getVar(self, var, expand=False, noweakdefault=False):
|
||||
return self.getVarFlag(var, "_content", expand, noweakdefault)
|
||||
|
||||
def renameVar(self, key, newkey, **loginfo):
|
||||
"""
|
||||
Rename the variable key to newkey
|
||||
"""
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.renameVar(key, newkey)
|
||||
if not res:
|
||||
return
|
||||
|
||||
val = self.getVar(key, 0, parsing=True)
|
||||
val = self.getVar(key, 0)
|
||||
if val is not None:
|
||||
loginfo['variable'] = newkey
|
||||
loginfo['op'] = 'rename from %s' % key
|
||||
loginfo['detail'] = val
|
||||
self.varhistory.record(**loginfo)
|
||||
self.setVar(newkey, val, ignore=True, parsing=True)
|
||||
self.setVar(newkey, val, ignore=True)
|
||||
|
||||
for i in (__setvar_keyword__):
|
||||
src = self.getVarFlag(key, i, False)
|
||||
src = self.getVarFlag(key, i)
|
||||
if src is None:
|
||||
continue
|
||||
|
||||
dest = self.getVarFlag(newkey, i, False) or []
|
||||
dest = self.getVarFlag(newkey, i) or []
|
||||
dest.extend(src)
|
||||
self.setVarFlag(newkey, i, dest, ignore=True)
|
||||
|
||||
if key in self.overridedata:
|
||||
self.overridedata[newkey] = []
|
||||
for (v, o) in self.overridedata[key]:
|
||||
self.overridedata[newkey].append([v.replace(key, newkey), o])
|
||||
self.renameVar(v, v.replace(key, newkey))
|
||||
|
||||
if '_' in newkey and val is None:
|
||||
self._setvar_update_overrides(newkey, **loginfo)
|
||||
if i in self._special_values and key in self._special_values[i]:
|
||||
self._special_values[i].remove(key)
|
||||
self._special_values[i].add(newkey)
|
||||
|
||||
loginfo['variable'] = key
|
||||
loginfo['op'] = 'rename (to)'
|
||||
@@ -652,53 +555,27 @@ class DataSmart(MutableMapping):
|
||||
def appendVar(self, var, value, **loginfo):
|
||||
loginfo['op'] = 'append'
|
||||
self.varhistory.record(**loginfo)
|
||||
self.setVar(var + "_append", value, ignore=True, parsing=True)
|
||||
newvalue = (self.getVar(var, False) or "") + value
|
||||
self.setVar(var, newvalue, ignore=True)
|
||||
|
||||
def prependVar(self, var, value, **loginfo):
|
||||
loginfo['op'] = 'prepend'
|
||||
self.varhistory.record(**loginfo)
|
||||
self.setVar(var + "_prepend", value, ignore=True, parsing=True)
|
||||
newvalue = value + (self.getVar(var, False) or "")
|
||||
self.setVar(var, newvalue, ignore=True)
|
||||
|
||||
def delVar(self, var, **loginfo):
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.delVar(var)
|
||||
if not res:
|
||||
return
|
||||
|
||||
loginfo['detail'] = ""
|
||||
loginfo['op'] = 'del'
|
||||
self.varhistory.record(**loginfo)
|
||||
self.expand_cache = {}
|
||||
self.dict[var] = {}
|
||||
if var in self.overridedata:
|
||||
del self.overridedata[var]
|
||||
if '_' in var:
|
||||
override = var[var.rfind('_')+1:]
|
||||
shortvar = var[:var.rfind('_')]
|
||||
while override and override.islower():
|
||||
try:
|
||||
if shortvar in self.overridedata:
|
||||
# Force CoW by recreating the list first
|
||||
self.overridedata[shortvar] = list(self.overridedata[shortvar])
|
||||
self.overridedata[shortvar].remove([var, override])
|
||||
except ValueError as e:
|
||||
pass
|
||||
override = None
|
||||
if "_" in shortvar:
|
||||
override = var[shortvar.rfind('_')+1:]
|
||||
shortvar = var[:shortvar.rfind('_')]
|
||||
if len(shortvar) == 0:
|
||||
override = None
|
||||
if override and override in self._seen_overrides and var in self._seen_overrides[override]:
|
||||
self._seen_overrides[override].remove(var)
|
||||
|
||||
def setVarFlag(self, var, flag, value, **loginfo):
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.setVarFlag(var, flag, value)
|
||||
if not res:
|
||||
return
|
||||
|
||||
self.expand_cache = {}
|
||||
if 'op' not in loginfo:
|
||||
loginfo['op'] = "set"
|
||||
loginfo['flag'] = flag
|
||||
@@ -707,10 +584,8 @@ class DataSmart(MutableMapping):
|
||||
self._makeShadowCopy(var)
|
||||
self.dict[var][flag] = value
|
||||
|
||||
if flag == "_defaultval" and '_' in var:
|
||||
self._setvar_update_overrides(var, **loginfo)
|
||||
if flag == "_defaultval" and var in self.overridevars:
|
||||
self._setvar_update_overridevars(var, value)
|
||||
if flag == "defaultval" and '_' in var:
|
||||
self._setvar_update_overrides(var)
|
||||
|
||||
if flag == "unexport" or flag == "export":
|
||||
if not "__exportlist" in self.dict:
|
||||
@@ -719,71 +594,14 @@ class DataSmart(MutableMapping):
|
||||
self.dict["__exportlist"]["_content"] = set()
|
||||
self.dict["__exportlist"]["_content"].add(var)
|
||||
|
||||
def getVarFlag(self, var, flag, expand=True, noweakdefault=False, parsing=False):
|
||||
local_var, overridedata = self._findVar(var)
|
||||
def getVarFlag(self, var, flag, expand=False, noweakdefault=False):
|
||||
local_var = self._findVar(var)
|
||||
value = None
|
||||
if flag == "_content" and overridedata is not None and not parsing:
|
||||
match = False
|
||||
active = {}
|
||||
self.need_overrides()
|
||||
for (r, o) in overridedata:
|
||||
# What about double overrides both with "_" in the name?
|
||||
if o in self.overridesset:
|
||||
active[o] = r
|
||||
elif "_" in o:
|
||||
if set(o.split("_")).issubset(self.overridesset):
|
||||
active[o] = r
|
||||
|
||||
mod = True
|
||||
while mod:
|
||||
mod = False
|
||||
for o in self.overrides:
|
||||
for a in active.copy():
|
||||
if a.endswith("_" + o):
|
||||
t = active[a]
|
||||
del active[a]
|
||||
active[a.replace("_" + o, "")] = t
|
||||
mod = True
|
||||
elif a == o:
|
||||
match = active[a]
|
||||
del active[a]
|
||||
if match:
|
||||
value = self.getVar(match, False)
|
||||
|
||||
if local_var is not None and value is None:
|
||||
if local_var is not None:
|
||||
if flag in local_var:
|
||||
value = copy.copy(local_var[flag])
|
||||
elif flag == "_content" and "_defaultval" in local_var and not noweakdefault:
|
||||
value = copy.copy(local_var["_defaultval"])
|
||||
|
||||
|
||||
if flag == "_content" and local_var is not None and "_append" in local_var and not parsing:
|
||||
if not value:
|
||||
value = ""
|
||||
self.need_overrides()
|
||||
for (r, o) in local_var["_append"]:
|
||||
match = True
|
||||
if o:
|
||||
for o2 in o.split("_"):
|
||||
if not o2 in self.overrides:
|
||||
match = False
|
||||
if match:
|
||||
value = value + r
|
||||
|
||||
if flag == "_content" and local_var is not None and "_prepend" in local_var and not parsing:
|
||||
if not value:
|
||||
value = ""
|
||||
self.need_overrides()
|
||||
for (r, o) in local_var["_prepend"]:
|
||||
|
||||
match = True
|
||||
if o:
|
||||
for o2 in o.split("_"):
|
||||
if not o2 in self.overrides:
|
||||
match = False
|
||||
if match:
|
||||
value = r + value
|
||||
|
||||
elif flag == "_content" and "defaultval" in local_var and not noweakdefault:
|
||||
value = copy.copy(local_var["defaultval"])
|
||||
if expand and value:
|
||||
# Only getvar (flag == _content) hits the expand cache
|
||||
cachename = None
|
||||
@@ -792,38 +610,18 @@ class DataSmart(MutableMapping):
|
||||
else:
|
||||
cachename = var + "[" + flag + "]"
|
||||
value = self.expand(value, cachename)
|
||||
|
||||
if value and flag == "_content" and local_var is not None and "_remove" in local_var:
|
||||
removes = []
|
||||
self.need_overrides()
|
||||
for (r, o) in local_var["_remove"]:
|
||||
match = True
|
||||
if o:
|
||||
for o2 in o.split("_"):
|
||||
if not o2 in self.overrides:
|
||||
match = False
|
||||
if match:
|
||||
removes.extend(self.expand(r).split())
|
||||
|
||||
if removes:
|
||||
filtered = filter(lambda v: v not in removes,
|
||||
value.split())
|
||||
value = " ".join(filtered)
|
||||
if expand and var in self.expand_cache:
|
||||
# We need to ensure the expand cache has the correct value
|
||||
# flag == "_content" here
|
||||
self.expand_cache[var].value = value
|
||||
if value and flag == "_content" and local_var is not None and "_removeactive" in local_var:
|
||||
filtered = filter(lambda v: v not in local_var["_removeactive"],
|
||||
value.split(" "))
|
||||
value = " ".join(filtered)
|
||||
if expand:
|
||||
# We need to ensure the expand cache has the correct value
|
||||
# flag == "_content" here
|
||||
self.expand_cache[var].value = value
|
||||
return value
|
||||
|
||||
def delVarFlag(self, var, flag, **loginfo):
|
||||
if '_remote_data' in self.dict:
|
||||
connector = self.dict["_remote_data"]["_content"]
|
||||
res = connector.delVarFlag(var, flag)
|
||||
if not res:
|
||||
return
|
||||
|
||||
self.expand_cache = {}
|
||||
local_var, _ = self._findVar(var)
|
||||
local_var = self._findVar(var)
|
||||
if not local_var:
|
||||
return
|
||||
if not var in self.dict:
|
||||
@@ -852,7 +650,6 @@ class DataSmart(MutableMapping):
|
||||
self.setVarFlag(var, flag, newvalue, ignore=True)
|
||||
|
||||
def setVarFlags(self, var, flags, **loginfo):
|
||||
self.expand_cache = {}
|
||||
infer_caller_details(loginfo)
|
||||
if not var in self.dict:
|
||||
self._makeShadowCopy(var)
|
||||
@@ -866,7 +663,7 @@ class DataSmart(MutableMapping):
|
||||
self.dict[var][i] = flags[i]
|
||||
|
||||
def getVarFlags(self, var, expand = False, internalflags=False):
|
||||
local_var, _ = self._findVar(var)
|
||||
local_var = self._findVar(var)
|
||||
flags = {}
|
||||
|
||||
if local_var:
|
||||
@@ -882,7 +679,6 @@ class DataSmart(MutableMapping):
|
||||
|
||||
|
||||
def delVarFlags(self, var, **loginfo):
|
||||
self.expand_cache = {}
|
||||
if not var in self.dict:
|
||||
self._makeShadowCopy(var)
|
||||
|
||||
@@ -900,25 +696,20 @@ class DataSmart(MutableMapping):
|
||||
else:
|
||||
del self.dict[var]
|
||||
|
||||
|
||||
def createCopy(self):
|
||||
"""
|
||||
Create a copy of self by setting _data to self
|
||||
"""
|
||||
# we really want this to be a DataSmart...
|
||||
data = DataSmart()
|
||||
data = DataSmart(seen=self._seen_overrides.copy(), special=self._special_values.copy())
|
||||
data.dict["_data"] = self.dict
|
||||
data.varhistory = self.varhistory.copy()
|
||||
data.varhistory.dataroot = data
|
||||
data.varhistory.datasmart = data
|
||||
data.inchistory = self.inchistory.copy()
|
||||
|
||||
data._tracking = self._tracking
|
||||
|
||||
data.overrides = None
|
||||
data.overridevars = copy.copy(self.overridevars)
|
||||
# Should really be a deepcopy but has heavy overhead.
|
||||
# Instead, we're careful with writes.
|
||||
data.overridedata = copy.copy(self.overridedata)
|
||||
|
||||
return data
|
||||
|
||||
def expandVarref(self, variable, parents=False):
|
||||
@@ -939,55 +730,29 @@ class DataSmart(MutableMapping):
|
||||
|
||||
def localkeys(self):
|
||||
for key in self.dict:
|
||||
if key not in ['_data', '_remote_data']:
|
||||
if key != '_data':
|
||||
yield key
|
||||
|
||||
def __iter__(self):
|
||||
deleted = set()
|
||||
overrides = set()
|
||||
def keylist(d):
|
||||
klist = set()
|
||||
for key in d:
|
||||
if key in ["_data", "_remote_data"]:
|
||||
continue
|
||||
if key in deleted:
|
||||
continue
|
||||
if key in overrides:
|
||||
if key == "_data":
|
||||
continue
|
||||
if not d[key]:
|
||||
deleted.add(key)
|
||||
continue
|
||||
klist.add(key)
|
||||
|
||||
if "_data" in d:
|
||||
klist |= keylist(d["_data"])
|
||||
|
||||
if "_remote_data" in d:
|
||||
connector = d["_remote_data"]["_content"]
|
||||
for key in connector.getKeys():
|
||||
if key in deleted:
|
||||
continue
|
||||
klist.add(key)
|
||||
|
||||
return klist
|
||||
|
||||
self.need_overrides()
|
||||
for var in self.overridedata:
|
||||
for (r, o) in self.overridedata[var]:
|
||||
if o in self.overridesset:
|
||||
overrides.add(var)
|
||||
elif "_" in o:
|
||||
if set(o.split("_")).issubset(self.overridesset):
|
||||
overrides.add(var)
|
||||
|
||||
for k in keylist(self.dict):
|
||||
yield k
|
||||
|
||||
for k in overrides:
|
||||
yield k
|
||||
|
||||
def __len__(self):
|
||||
return len(frozenset(iter(self)))
|
||||
return len(frozenset(self))
|
||||
|
||||
def __getitem__(self, item):
|
||||
value = self.getVar(item, False)
|
||||
@@ -1006,8 +771,9 @@ class DataSmart(MutableMapping):
|
||||
data = {}
|
||||
d = self.createCopy()
|
||||
bb.data.expandKeys(d)
|
||||
bb.data.update_data(d)
|
||||
|
||||
config_whitelist = set((d.getVar("BB_HASHCONFIG_WHITELIST") or "").split())
|
||||
config_whitelist = set((d.getVar("BB_HASHCONFIG_WHITELIST", True) or "").split())
|
||||
keys = set(key for key in iter(d) if not key.startswith("__"))
|
||||
for key in keys:
|
||||
if key in config_whitelist:
|
||||
@@ -1026,12 +792,13 @@ class DataSmart(MutableMapping):
|
||||
|
||||
for key in ["__BBTASKS", "__BBANONFUNCS", "__BBHANDLERS"]:
|
||||
bb_list = d.getVar(key, False) or []
|
||||
bb_list.sort()
|
||||
data.update({key:str(bb_list)})
|
||||
|
||||
if key == "__BBANONFUNCS":
|
||||
for i in bb_list:
|
||||
value = d.getVar(i, False) or ""
|
||||
value = d.getVar(i, True) or ""
|
||||
data.update({i:value})
|
||||
|
||||
data_str = str([(k, data[k]) for k in sorted(data.keys())])
|
||||
return hashlib.md5(data_str.encode("utf-8")).hexdigest()
|
||||
return hashlib.md5(data_str).hexdigest()
|
||||
|
||||
@@ -24,13 +24,13 @@ BitBake build tools.
|
||||
|
||||
import os, sys
|
||||
import warnings
|
||||
import pickle
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
import logging
|
||||
import atexit
|
||||
import traceback
|
||||
import ast
|
||||
import threading
|
||||
|
||||
import bb.utils
|
||||
import bb.compat
|
||||
import bb.exceptions
|
||||
@@ -48,16 +48,6 @@ class Event(object):
|
||||
def __init__(self):
|
||||
self.pid = worker_pid
|
||||
|
||||
|
||||
class HeartbeatEvent(Event):
|
||||
"""Triggered at regular time intervals of 10 seconds. Other events can fire much more often
|
||||
(runQueueTaskStarted when there are many short tasks) or not at all for long periods
|
||||
of time (again runQueueTaskStarted, when there is just one long-running task), so this
|
||||
event is more suitable for doing some task-independent work occassionally."""
|
||||
def __init__(self, time):
|
||||
Event.__init__(self)
|
||||
self.time = time
|
||||
|
||||
Registered = 10
|
||||
AlreadyRegistered = 14
|
||||
|
||||
@@ -65,7 +55,6 @@ def get_class_handlers():
|
||||
return _handlers
|
||||
|
||||
def set_class_handlers(h):
|
||||
global _handlers
|
||||
_handlers = h
|
||||
|
||||
def clean_class_handlers():
|
||||
@@ -78,33 +67,12 @@ _ui_logfilters = {}
|
||||
_ui_handler_seq = 0
|
||||
_event_handler_map = {}
|
||||
_catchall_handlers = {}
|
||||
_eventfilter = None
|
||||
_uiready = False
|
||||
_thread_lock = threading.Lock()
|
||||
_thread_lock_enabled = False
|
||||
|
||||
if hasattr(__builtins__, '__setitem__'):
|
||||
builtins = __builtins__
|
||||
else:
|
||||
builtins = __builtins__.__dict__
|
||||
|
||||
def enable_threadlock():
|
||||
global _thread_lock_enabled
|
||||
_thread_lock_enabled = True
|
||||
|
||||
def disable_threadlock():
|
||||
global _thread_lock_enabled
|
||||
_thread_lock_enabled = False
|
||||
|
||||
def execute_handler(name, handler, event, d):
|
||||
event.data = d
|
||||
addedd = False
|
||||
if 'd' not in builtins:
|
||||
builtins['d'] = d
|
||||
addedd = True
|
||||
try:
|
||||
ret = handler(event)
|
||||
except (bb.parse.SkipRecipe, bb.BBHandledException):
|
||||
except bb.parse.SkipPackage:
|
||||
raise
|
||||
except Exception:
|
||||
etype, value, tb = sys.exc_info()
|
||||
@@ -117,8 +85,6 @@ def execute_handler(name, handler, event, d):
|
||||
raise
|
||||
finally:
|
||||
del event.data
|
||||
if addedd:
|
||||
del builtins['d']
|
||||
|
||||
def fire_class_handlers(event, d):
|
||||
if isinstance(event, logging.LogRecord):
|
||||
@@ -126,12 +92,12 @@ def fire_class_handlers(event, d):
|
||||
|
||||
eid = str(event.__class__)[8:-2]
|
||||
evt_hmap = _event_handler_map.get(eid, {})
|
||||
for name, handler in list(_handlers.items()):
|
||||
for name, handler in _handlers.iteritems():
|
||||
if name in _catchall_handlers or name in evt_hmap:
|
||||
if _eventfilter:
|
||||
if not _eventfilter(name, handler, event, d):
|
||||
continue
|
||||
execute_handler(name, handler, event, d)
|
||||
try:
|
||||
execute_handler(name, handler, event, d)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
ui_queue = []
|
||||
@atexit.register
|
||||
@@ -139,57 +105,33 @@ def print_ui_queue():
|
||||
"""If we're exiting before a UI has been spawned, display any queued
|
||||
LogRecords to the console."""
|
||||
logger = logging.getLogger("BitBake")
|
||||
if not _uiready:
|
||||
if not _ui_handlers:
|
||||
from bb.msg import BBLogFormatter
|
||||
stdout = logging.StreamHandler(sys.stdout)
|
||||
stderr = logging.StreamHandler(sys.stderr)
|
||||
formatter = BBLogFormatter("%(levelname)s: %(message)s")
|
||||
stdout.setFormatter(formatter)
|
||||
stderr.setFormatter(formatter)
|
||||
console = logging.StreamHandler(sys.stdout)
|
||||
console.setFormatter(BBLogFormatter("%(levelname)s: %(message)s"))
|
||||
logger.handlers = [console]
|
||||
|
||||
# First check to see if we have any proper messages
|
||||
msgprint = False
|
||||
msgerrs = False
|
||||
|
||||
# Should we print to stderr?
|
||||
for event in ui_queue[:]:
|
||||
if isinstance(event, logging.LogRecord) and event.levelno >= logging.WARNING:
|
||||
msgerrs = True
|
||||
break
|
||||
|
||||
if msgerrs:
|
||||
logger.addHandler(stderr)
|
||||
else:
|
||||
logger.addHandler(stdout)
|
||||
|
||||
for event in ui_queue[:]:
|
||||
for event in ui_queue:
|
||||
if isinstance(event, logging.LogRecord):
|
||||
if event.levelno > logging.DEBUG:
|
||||
logger.handle(event)
|
||||
msgprint = True
|
||||
if msgprint:
|
||||
return
|
||||
|
||||
# Nope, so just print all of the messages we have (including debug messages)
|
||||
if not msgprint:
|
||||
for event in ui_queue[:]:
|
||||
if isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
if msgerrs:
|
||||
logger.removeHandler(stderr)
|
||||
else:
|
||||
logger.removeHandler(stdout)
|
||||
for event in ui_queue:
|
||||
if isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
|
||||
def fire_ui_handlers(event, d):
|
||||
global _thread_lock
|
||||
global _thread_lock_enabled
|
||||
|
||||
if not _uiready:
|
||||
if not _ui_handlers:
|
||||
# No UI handlers registered yet, queue up the messages
|
||||
ui_queue.append(event)
|
||||
return
|
||||
|
||||
if _thread_lock_enabled:
|
||||
_thread_lock.acquire()
|
||||
|
||||
errors = []
|
||||
for h in _ui_handlers:
|
||||
#print "Sending event %s" % event
|
||||
@@ -208,9 +150,6 @@ def fire_ui_handlers(event, d):
|
||||
for h in errors:
|
||||
del _ui_handlers[h]
|
||||
|
||||
if _thread_lock_enabled:
|
||||
_thread_lock.release()
|
||||
|
||||
def fire(event, d):
|
||||
"""Fire off an Event"""
|
||||
|
||||
@@ -223,19 +162,13 @@ def fire(event, d):
|
||||
if worker_fire:
|
||||
worker_fire(event, d)
|
||||
else:
|
||||
# If messages have been queued up, clear the queue
|
||||
global _uiready, ui_queue
|
||||
if _uiready and ui_queue:
|
||||
for queue_event in ui_queue:
|
||||
fire_ui_handlers(queue_event, d)
|
||||
ui_queue = []
|
||||
fire_ui_handlers(event, d)
|
||||
|
||||
def fire_from_worker(event, d):
|
||||
fire_ui_handlers(event, d)
|
||||
|
||||
noop = lambda _: None
|
||||
def register(name, handler, mask=None, filename=None, lineno=None):
|
||||
def register(name, handler, mask=[]):
|
||||
"""Register an Event handler"""
|
||||
|
||||
# already registered
|
||||
@@ -244,18 +177,10 @@ def register(name, handler, mask=None, filename=None, lineno=None):
|
||||
|
||||
if handler is not None:
|
||||
# handle string containing python code
|
||||
if isinstance(handler, str):
|
||||
if isinstance(handler, basestring):
|
||||
tmp = "def %s(e):\n%s" % (name, handler)
|
||||
try:
|
||||
code = bb.methodpool.compile_cache(tmp)
|
||||
if not code:
|
||||
if filename is None:
|
||||
filename = "%s(e)" % name
|
||||
code = compile(tmp, filename, "exec", ast.PyCF_ONLY_AST)
|
||||
if lineno is not None:
|
||||
ast.increment_lineno(code, lineno-1)
|
||||
code = compile(code, filename, "exec")
|
||||
bb.methodpool.compile_cache_add(tmp, code)
|
||||
code = compile(tmp, "%s(e)" % name, "exec")
|
||||
except SyntaxError:
|
||||
logger.error("Unable to register event handler '%s':\n%s", name,
|
||||
''.join(traceback.format_exc(limit=0)))
|
||||
@@ -281,46 +206,19 @@ def register(name, handler, mask=None, filename=None, lineno=None):
|
||||
def remove(name, handler):
|
||||
"""Remove an Event handler"""
|
||||
_handlers.pop(name)
|
||||
if name in _catchall_handlers:
|
||||
_catchall_handlers.pop(name)
|
||||
for event in _event_handler_map.keys():
|
||||
if name in _event_handler_map[event]:
|
||||
_event_handler_map[event].pop(name)
|
||||
|
||||
def get_handlers():
|
||||
return _handlers
|
||||
|
||||
def set_handlers(handlers):
|
||||
global _handlers
|
||||
_handlers = handlers
|
||||
|
||||
def set_eventfilter(func):
|
||||
global _eventfilter
|
||||
_eventfilter = func
|
||||
|
||||
def register_UIHhandler(handler, mainui=False):
|
||||
def register_UIHhandler(handler):
|
||||
bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1
|
||||
_ui_handlers[_ui_handler_seq] = handler
|
||||
level, debug_domains = bb.msg.constructLogOptions()
|
||||
_ui_logfilters[_ui_handler_seq] = UIEventFilter(level, debug_domains)
|
||||
if mainui:
|
||||
global _uiready
|
||||
_uiready = _ui_handler_seq
|
||||
return _ui_handler_seq
|
||||
|
||||
def unregister_UIHhandler(handlerNum, mainui=False):
|
||||
if mainui:
|
||||
global _uiready
|
||||
_uiready = False
|
||||
def unregister_UIHhandler(handlerNum):
|
||||
if handlerNum in _ui_handlers:
|
||||
del _ui_handlers[handlerNum]
|
||||
return
|
||||
|
||||
def get_uihandler():
|
||||
if _uiready is False:
|
||||
return None
|
||||
return _uiready
|
||||
|
||||
# Class to allow filtering of events and specific filtering of LogRecords *before* we put them over the IPC
|
||||
class UIEventFilter(object):
|
||||
def __init__(self, level, debug_domains):
|
||||
@@ -383,12 +281,6 @@ class OperationProgress(Event):
|
||||
class ConfigParsed(Event):
|
||||
"""Configuration Parsing Complete"""
|
||||
|
||||
class MultiConfigParsed(Event):
|
||||
"""Multi-Config Parsing Complete"""
|
||||
def __init__(self, mcdata):
|
||||
self.mcdata = mcdata
|
||||
Event.__init__(self)
|
||||
|
||||
class RecipeEvent(Event):
|
||||
def __init__(self, fn):
|
||||
self.fn = fn
|
||||
@@ -397,17 +289,6 @@ class RecipeEvent(Event):
|
||||
class RecipePreFinalise(RecipeEvent):
|
||||
""" Recipe Parsing Complete but not yet finialised"""
|
||||
|
||||
class RecipeTaskPreProcess(RecipeEvent):
|
||||
"""
|
||||
Recipe Tasks about to be finalised
|
||||
The list of tasks should be final at this point and handlers
|
||||
are only able to change interdependencies
|
||||
"""
|
||||
def __init__(self, fn, tasklist):
|
||||
self.fn = fn
|
||||
self.tasklist = tasklist
|
||||
Event.__init__(self)
|
||||
|
||||
class RecipeParsed(RecipeEvent):
|
||||
""" Recipe Parsing Complete """
|
||||
|
||||
@@ -429,7 +310,7 @@ class StampUpdate(Event):
|
||||
targets = property(getTargets)
|
||||
|
||||
class BuildBase(Event):
|
||||
"""Base class for bitbake build events"""
|
||||
"""Base class for bbmake run events"""
|
||||
|
||||
def __init__(self, n, p, failures = 0):
|
||||
self._name = n
|
||||
@@ -449,6 +330,12 @@ class BuildBase(Event):
|
||||
def setName(self, name):
|
||||
self._name = name
|
||||
|
||||
def getCfg(self):
|
||||
return self.data
|
||||
|
||||
def setCfg(self, cfg):
|
||||
self.data = cfg
|
||||
|
||||
def getFailures(self):
|
||||
"""
|
||||
Return the number of failed packages
|
||||
@@ -457,27 +344,25 @@ class BuildBase(Event):
|
||||
|
||||
pkgs = property(getPkgs, setPkgs, None, "pkgs property")
|
||||
name = property(getName, setName, None, "name property")
|
||||
cfg = property(getCfg, setCfg, None, "cfg property")
|
||||
|
||||
|
||||
|
||||
|
||||
class BuildInit(BuildBase):
|
||||
"""buildFile or buildTargets was invoked"""
|
||||
def __init__(self, p=[]):
|
||||
name = None
|
||||
BuildBase.__init__(self, name, p)
|
||||
|
||||
class BuildStarted(BuildBase, OperationStarted):
|
||||
"""Event when builds start"""
|
||||
"""bbmake build run started"""
|
||||
def __init__(self, n, p, failures = 0):
|
||||
OperationStarted.__init__(self, "Building Started")
|
||||
BuildBase.__init__(self, n, p, failures)
|
||||
|
||||
class BuildCompleted(BuildBase, OperationCompleted):
|
||||
"""Event when builds have completed"""
|
||||
def __init__(self, total, n, p, failures=0, interrupted=0):
|
||||
"""bbmake build run completed"""
|
||||
def __init__(self, total, n, p, failures = 0):
|
||||
if not failures:
|
||||
OperationCompleted.__init__(self, total, "Building Succeeded")
|
||||
else:
|
||||
OperationCompleted.__init__(self, total, "Building Failed")
|
||||
self._interrupted = interrupted
|
||||
BuildBase.__init__(self, n, p, failures)
|
||||
|
||||
class DiskFull(Event):
|
||||
@@ -489,27 +374,10 @@ class DiskFull(Event):
|
||||
self._free = freespace
|
||||
self._mountpoint = mountpoint
|
||||
|
||||
class DiskUsageSample:
|
||||
def __init__(self, available_bytes, free_bytes, total_bytes):
|
||||
# Number of bytes available to non-root processes.
|
||||
self.available_bytes = available_bytes
|
||||
# Number of bytes available to root processes.
|
||||
self.free_bytes = free_bytes
|
||||
# Total capacity of the volume.
|
||||
self.total_bytes = total_bytes
|
||||
|
||||
class MonitorDiskEvent(Event):
|
||||
"""If BB_DISKMON_DIRS is set, then this event gets triggered each time disk space is checked.
|
||||
Provides information about devices that are getting monitored."""
|
||||
def __init__(self, disk_usage):
|
||||
Event.__init__(self)
|
||||
# hash of device root path -> DiskUsageSample
|
||||
self.disk_usage = disk_usage
|
||||
|
||||
class NoProvider(Event):
|
||||
"""No Provider for an Event"""
|
||||
|
||||
def __init__(self, item, runtime=False, dependees=None, reasons=None, close_matches=None):
|
||||
def __init__(self, item, runtime=False, dependees=None, reasons=[], close_matches=[]):
|
||||
Event.__init__(self)
|
||||
self._item = item
|
||||
self._runtime = runtime
|
||||
@@ -523,28 +391,6 @@ class NoProvider(Event):
|
||||
def isRuntime(self):
|
||||
return self._runtime
|
||||
|
||||
def __str__(self):
|
||||
msg = ''
|
||||
if self._runtime:
|
||||
r = "R"
|
||||
else:
|
||||
r = ""
|
||||
|
||||
extra = ''
|
||||
if not self._reasons:
|
||||
if self._close_matches:
|
||||
extra = ". Close matches:\n %s" % '\n '.join(self._close_matches)
|
||||
|
||||
if self._dependees:
|
||||
msg = "Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)%s" % (r, self._item, ", ".join(self._dependees), r, extra)
|
||||
else:
|
||||
msg = "Nothing %sPROVIDES '%s'%s" % (r, self._item, extra)
|
||||
if self._reasons:
|
||||
for reason in self._reasons:
|
||||
msg += '\n' + reason
|
||||
return msg
|
||||
|
||||
|
||||
class MultipleProviders(Event):
|
||||
"""Multiple Providers"""
|
||||
|
||||
@@ -572,16 +418,6 @@ class MultipleProviders(Event):
|
||||
"""
|
||||
return self._candidates
|
||||
|
||||
def __str__(self):
|
||||
msg = "Multiple providers are available for %s%s (%s)" % (self._is_runtime and "runtime " or "",
|
||||
self._item,
|
||||
", ".join(self._candidates))
|
||||
rtime = ""
|
||||
if self._is_runtime:
|
||||
rtime = "R"
|
||||
msg += "\nConsider defining a PREFERRED_%sPROVIDER entry to match %s" % (rtime, self._item)
|
||||
return msg
|
||||
|
||||
class ParseStarted(OperationStarted):
|
||||
"""Recipe parsing for the runqueue has begun"""
|
||||
def __init__(self, total):
|
||||
@@ -655,16 +491,6 @@ class TargetsTreeGenerated(Event):
|
||||
Event.__init__(self)
|
||||
self._model = model
|
||||
|
||||
class ReachableStamps(Event):
|
||||
"""
|
||||
An event listing all stamps reachable after parsing
|
||||
which the metadata may use to clean up stale data
|
||||
"""
|
||||
|
||||
def __init__(self, stamps):
|
||||
Event.__init__(self)
|
||||
self.stamps = stamps
|
||||
|
||||
class FilesMatchingFound(Event):
|
||||
"""
|
||||
Event when a list of files matching the supplied pattern has
|
||||
@@ -675,6 +501,14 @@ class FilesMatchingFound(Event):
|
||||
self._pattern = pattern
|
||||
self._matches = matches
|
||||
|
||||
class CoreBaseFilesFound(Event):
|
||||
"""
|
||||
Event when a list of appropriate config files has been generated
|
||||
"""
|
||||
def __init__(self, paths):
|
||||
Event.__init__(self)
|
||||
self._paths = paths
|
||||
|
||||
class ConfigFilesFound(Event):
|
||||
"""
|
||||
Event when a list of appropriate config files has been generated
|
||||
@@ -734,10 +568,7 @@ class LogHandler(logging.Handler):
|
||||
etype, value, tb = record.exc_info
|
||||
if hasattr(tb, 'tb_next'):
|
||||
tb = list(bb.exceptions.extract_traceback(tb, context=3))
|
||||
# Need to turn the value into something the logging system can pickle
|
||||
record.bb_exc_info = (etype, value, tb)
|
||||
record.bb_exc_formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
|
||||
value = str(value)
|
||||
record.exc_info = None
|
||||
fire(record, None)
|
||||
|
||||
@@ -745,6 +576,19 @@ class LogHandler(logging.Handler):
|
||||
record.taskpid = worker_pid
|
||||
return True
|
||||
|
||||
class RequestPackageInfo(Event):
|
||||
"""
|
||||
Event to request package information
|
||||
"""
|
||||
|
||||
class PackageInfo(Event):
|
||||
"""
|
||||
Package information for GUI
|
||||
"""
|
||||
def __init__(self, pkginfolist):
|
||||
Event.__init__(self)
|
||||
self._pkginfolist = pkginfolist
|
||||
|
||||
class MetadataEvent(Event):
|
||||
"""
|
||||
Generic event that target for OE-Core classes
|
||||
@@ -753,38 +597,11 @@ class MetadataEvent(Event):
|
||||
def __init__(self, eventtype, eventdata):
|
||||
Event.__init__(self)
|
||||
self.type = eventtype
|
||||
self._localdata = eventdata
|
||||
|
||||
class ProcessStarted(Event):
|
||||
"""
|
||||
Generic process started event (usually part of the initial startup)
|
||||
where further progress events will be delivered
|
||||
"""
|
||||
def __init__(self, processname, total):
|
||||
Event.__init__(self)
|
||||
self.processname = processname
|
||||
self.total = total
|
||||
|
||||
class ProcessProgress(Event):
|
||||
"""
|
||||
Generic process progress event (usually part of the initial startup)
|
||||
"""
|
||||
def __init__(self, processname, progress):
|
||||
Event.__init__(self)
|
||||
self.processname = processname
|
||||
self.progress = progress
|
||||
|
||||
class ProcessFinished(Event):
|
||||
"""
|
||||
Generic process finished event (usually part of the initial startup)
|
||||
"""
|
||||
def __init__(self, processname):
|
||||
Event.__init__(self)
|
||||
self.processname = processname
|
||||
self.data = eventdata
|
||||
|
||||
class SanityCheck(Event):
|
||||
"""
|
||||
Event to run sanity checks, either raise errors or generate events as return status.
|
||||
Event to runs sanity checks, either raise errors or generate events as return status.
|
||||
"""
|
||||
def __init__(self, generateevents = True):
|
||||
Event.__init__(self)
|
||||
@@ -792,7 +609,7 @@ class SanityCheck(Event):
|
||||
|
||||
class SanityCheckPassed(Event):
|
||||
"""
|
||||
Event to indicate sanity check has passed
|
||||
Event to indicate sanity check is passed
|
||||
"""
|
||||
|
||||
class SanityCheckFailed(Event):
|
||||
@@ -822,10 +639,3 @@ class NetworkTestFailed(Event):
|
||||
Event to indicate network test has failed
|
||||
"""
|
||||
|
||||
class FindSigInfoResult(Event):
|
||||
"""
|
||||
Event to return results from findSigInfo command
|
||||
"""
|
||||
def __init__(self, result):
|
||||
Event.__init__(self)
|
||||
self.result = result
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
|
||||
from __future__ import absolute_import
|
||||
import inspect
|
||||
import traceback
|
||||
import bb.namedtuple_with_abc
|
||||
@@ -86,6 +86,6 @@ def format_exception(etype, value, tb, context=1, limit=None, formatter=None):
|
||||
|
||||
def to_string(exc):
|
||||
if isinstance(exc, SystemExit):
|
||||
if not isinstance(exc.code, str):
|
||||
if not isinstance(exc.code, basestring):
|
||||
return 'Exited with "%d"' % exc.code
|
||||
return str(exc)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -27,6 +27,7 @@ import os
|
||||
import sys
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
@@ -42,14 +43,14 @@ class Bzr(FetchMethod):
|
||||
"""
|
||||
# Create paths to bzr checkouts
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(d.expand('${BZRDIR}'), ud.host, relpath)
|
||||
ud.pkgdir = os.path.join(data.expand('${BZRDIR}', d), ud.host, relpath)
|
||||
|
||||
ud.setup_revisions(d)
|
||||
ud.setup_revisons(d)
|
||||
|
||||
if not ud.revision:
|
||||
ud.revision = self.latest_revision(ud, d)
|
||||
|
||||
ud.localfile = d.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision))
|
||||
ud.localfile = data.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision), d)
|
||||
|
||||
def _buildbzrcommand(self, ud, d, command):
|
||||
"""
|
||||
@@ -57,7 +58,7 @@ class Bzr(FetchMethod):
|
||||
command is "fetch", "update", "revno"
|
||||
"""
|
||||
|
||||
basecmd = d.expand('${FETCHCMD_bzr}')
|
||||
basecmd = data.expand('${FETCHCMD_bzr}', d)
|
||||
|
||||
proto = ud.parm.get('protocol', 'http')
|
||||
|
||||
@@ -87,25 +88,28 @@ class Bzr(FetchMethod):
|
||||
bzrcmd = self._buildbzrcommand(ud, d, "update")
|
||||
logger.debug(1, "BZR Update %s", ud.url)
|
||||
bb.fetch2.check_network_access(d, bzrcmd, ud.url)
|
||||
runfetchcmd(bzrcmd, d, workdir=os.path.join(ud.pkgdir, os.path.basename(ud.path)))
|
||||
os.chdir(os.path.join (ud.pkgdir, os.path.basename(ud.path)))
|
||||
runfetchcmd(bzrcmd, d)
|
||||
else:
|
||||
bb.utils.remove(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir)), True)
|
||||
bzrcmd = self._buildbzrcommand(ud, d, "fetch")
|
||||
bb.fetch2.check_network_access(d, bzrcmd, ud.url)
|
||||
logger.debug(1, "BZR Checkout %s", ud.url)
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
os.chdir(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", bzrcmd)
|
||||
runfetchcmd(bzrcmd, d, workdir=ud.pkgdir)
|
||||
runfetchcmd(bzrcmd, d)
|
||||
|
||||
os.chdir(ud.pkgdir)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude='.bzr' --exclude='.bzrtags'"
|
||||
tar_flags = "--exclude '.bzr' --exclude '.bzrtags'"
|
||||
|
||||
# tar them up to a defined filename
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(ud.pkgdir)),
|
||||
d, cleanup=[ud.localpath], workdir=ud.pkgdir)
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(ud.pkgdir)), d, cleanup = [ud.localpath])
|
||||
|
||||
def supports_srcrev(self):
|
||||
return True
|
||||
|
||||
@@ -1,260 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'Fetch' clearcase implementation
|
||||
|
||||
The clearcase fetcher is used to retrieve files from a ClearCase repository.
|
||||
|
||||
Usage in the recipe:
|
||||
|
||||
SRC_URI = "ccrc://cc.example.org/ccrc;vob=/example_vob;module=/example_module"
|
||||
SRCREV = "EXAMPLE_CLEARCASE_TAG"
|
||||
PV = "${@d.getVar("SRCREV", False).replace("/", "+")}"
|
||||
|
||||
The fetcher uses the rcleartool or cleartool remote client, depending on which one is available.
|
||||
|
||||
Supported SRC_URI options are:
|
||||
|
||||
- vob
|
||||
(required) The name of the clearcase VOB (with prepending "/")
|
||||
|
||||
- module
|
||||
The module in the selected VOB (with prepending "/")
|
||||
|
||||
The module and vob parameters are combined to create
|
||||
the following load rule in the view config spec:
|
||||
load <vob><module>
|
||||
|
||||
- proto
|
||||
http or https
|
||||
|
||||
Related variables:
|
||||
|
||||
CCASE_CUSTOM_CONFIG_SPEC
|
||||
Write a config spec to this variable in your recipe to use it instead
|
||||
of the default config spec generated by this fetcher.
|
||||
Please note that the SRCREV loses its functionality if you specify
|
||||
this variable. SRCREV is still used to label the archive after a fetch,
|
||||
but it doesn't define what's fetched.
|
||||
|
||||
User credentials:
|
||||
cleartool:
|
||||
The login of cleartool is handled by the system. No special steps needed.
|
||||
|
||||
rcleartool:
|
||||
In order to use rcleartool with authenticated users an `rcleartool login` is
|
||||
necessary before using the fetcher.
|
||||
"""
|
||||
# Copyright (C) 2014 Siemens AG
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
|
||||
import os
|
||||
import sys
|
||||
import shutil
|
||||
import bb
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
from distutils import spawn
|
||||
|
||||
class ClearCase(FetchMethod):
|
||||
"""Class to fetch urls via 'clearcase'"""
|
||||
def init(self, d):
|
||||
pass
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with Clearcase.
|
||||
"""
|
||||
return ud.type in ['ccrc']
|
||||
|
||||
def debug(self, msg):
|
||||
logger.debug(1, "ClearCase: %s", msg)
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""
|
||||
init ClearCase specific variable within url data
|
||||
"""
|
||||
ud.proto = "https"
|
||||
if 'protocol' in ud.parm:
|
||||
ud.proto = ud.parm['protocol']
|
||||
if not ud.proto in ('http', 'https'):
|
||||
raise fetch2.ParameterError("Invalid protocol type", ud.url)
|
||||
|
||||
ud.vob = ''
|
||||
if 'vob' in ud.parm:
|
||||
ud.vob = ud.parm['vob']
|
||||
else:
|
||||
msg = ud.url+": vob must be defined so the fetcher knows what to get."
|
||||
raise MissingParameterError('vob', msg)
|
||||
|
||||
if 'module' in ud.parm:
|
||||
ud.module = ud.parm['module']
|
||||
else:
|
||||
ud.module = ""
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_ccrc") or spawn.find_executable("cleartool") or spawn.find_executable("rcleartool")
|
||||
|
||||
if d.getVar("SRCREV") == "INVALID":
|
||||
raise FetchError("Set a valid SRCREV for the clearcase fetcher in your recipe, e.g. SRCREV = \"/main/LATEST\" or any other label of your choice.")
|
||||
|
||||
ud.label = d.getVar("SRCREV", False)
|
||||
ud.customspec = d.getVar("CCASE_CUSTOM_CONFIG_SPEC")
|
||||
|
||||
ud.server = "%s://%s%s" % (ud.proto, ud.host, ud.path)
|
||||
|
||||
ud.identifier = "clearcase-%s%s-%s" % ( ud.vob.replace("/", ""),
|
||||
ud.module.replace("/", "."),
|
||||
ud.label.replace("/", "."))
|
||||
|
||||
ud.viewname = "%s-view%s" % (ud.identifier, d.getVar("DATETIME", d, True))
|
||||
ud.csname = "%s-config-spec" % (ud.identifier)
|
||||
ud.ccasedir = os.path.join(d.getVar("DL_DIR"), ud.type)
|
||||
ud.viewdir = os.path.join(ud.ccasedir, ud.viewname)
|
||||
ud.configspecfile = os.path.join(ud.ccasedir, ud.csname)
|
||||
ud.localfile = "%s.tar.gz" % (ud.identifier)
|
||||
|
||||
self.debug("host = %s" % ud.host)
|
||||
self.debug("path = %s" % ud.path)
|
||||
self.debug("server = %s" % ud.server)
|
||||
self.debug("proto = %s" % ud.proto)
|
||||
self.debug("type = %s" % ud.type)
|
||||
self.debug("vob = %s" % ud.vob)
|
||||
self.debug("module = %s" % ud.module)
|
||||
self.debug("basecmd = %s" % ud.basecmd)
|
||||
self.debug("label = %s" % ud.label)
|
||||
self.debug("ccasedir = %s" % ud.ccasedir)
|
||||
self.debug("viewdir = %s" % ud.viewdir)
|
||||
self.debug("viewname = %s" % ud.viewname)
|
||||
self.debug("configspecfile = %s" % ud.configspecfile)
|
||||
self.debug("localfile = %s" % ud.localfile)
|
||||
|
||||
ud.localfile = os.path.join(d.getVar("DL_DIR"), ud.localfile)
|
||||
|
||||
def _build_ccase_command(self, ud, command):
|
||||
"""
|
||||
Build up a commandline based on ud
|
||||
command is: mkview, setcs, rmview
|
||||
"""
|
||||
options = []
|
||||
|
||||
if "rcleartool" in ud.basecmd:
|
||||
options.append("-server %s" % ud.server)
|
||||
|
||||
basecmd = "%s %s" % (ud.basecmd, command)
|
||||
|
||||
if command is 'mkview':
|
||||
if not "rcleartool" in ud.basecmd:
|
||||
# Cleartool needs a -snapshot view
|
||||
options.append("-snapshot")
|
||||
options.append("-tag %s" % ud.viewname)
|
||||
options.append(ud.viewdir)
|
||||
|
||||
elif command is 'rmview':
|
||||
options.append("-force")
|
||||
options.append("%s" % ud.viewdir)
|
||||
|
||||
elif command is 'setcs':
|
||||
options.append("-overwrite")
|
||||
options.append(ud.configspecfile)
|
||||
|
||||
else:
|
||||
raise FetchError("Invalid ccase command %s" % command)
|
||||
|
||||
ccasecmd = "%s %s" % (basecmd, " ".join(options))
|
||||
self.debug("ccasecmd = %s" % ccasecmd)
|
||||
return ccasecmd
|
||||
|
||||
def _write_configspec(self, ud, d):
|
||||
"""
|
||||
Create config spec file (ud.configspecfile) for ccase view
|
||||
"""
|
||||
config_spec = ""
|
||||
custom_config_spec = d.getVar("CCASE_CUSTOM_CONFIG_SPEC", d)
|
||||
if custom_config_spec is not None:
|
||||
for line in custom_config_spec.split("\\n"):
|
||||
config_spec += line+"\n"
|
||||
bb.warn("A custom config spec has been set, SRCREV is only relevant for the tarball name.")
|
||||
else:
|
||||
config_spec += "element * CHECKEDOUT\n"
|
||||
config_spec += "element * %s\n" % ud.label
|
||||
config_spec += "load %s%s\n" % (ud.vob, ud.module)
|
||||
|
||||
logger.info("Using config spec: \n%s" % config_spec)
|
||||
|
||||
with open(ud.configspecfile, 'w') as f:
|
||||
f.write(config_spec)
|
||||
|
||||
def _remove_view(self, ud, d):
|
||||
if os.path.exists(ud.viewdir):
|
||||
cmd = self._build_ccase_command(ud, 'rmview');
|
||||
logger.info("cleaning up [VOB=%s label=%s view=%s]", ud.vob, ud.label, ud.viewname)
|
||||
bb.fetch2.check_network_access(d, cmd, ud.url)
|
||||
output = runfetchcmd(cmd, d, workdir=ud.ccasedir)
|
||||
logger.info("rmview output: %s", output)
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if ("LATEST" in ud.label) or (ud.customspec and "LATEST" in ud.customspec):
|
||||
ud.identifier += "-%s" % d.getVar("DATETIME",d, True)
|
||||
return True
|
||||
if os.path.exists(ud.localpath):
|
||||
return False
|
||||
return True
|
||||
|
||||
def supports_srcrev(self):
|
||||
return True
|
||||
|
||||
def sortable_revision(self, ud, d, name):
|
||||
return False, ud.identifier
|
||||
|
||||
def download(self, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
# Make a fresh view
|
||||
bb.utils.mkdirhier(ud.ccasedir)
|
||||
self._write_configspec(ud, d)
|
||||
cmd = self._build_ccase_command(ud, 'mkview')
|
||||
logger.info("creating view [VOB=%s label=%s view=%s]", ud.vob, ud.label, ud.viewname)
|
||||
bb.fetch2.check_network_access(d, cmd, ud.url)
|
||||
try:
|
||||
runfetchcmd(cmd, d)
|
||||
except FetchError as e:
|
||||
if "CRCLI2008E" in e.msg:
|
||||
raise FetchError("%s\n%s\n" % (e.msg, "Call `rcleartool login` in your console to authenticate to the clearcase server before running bitbake."))
|
||||
else:
|
||||
raise e
|
||||
|
||||
# Set configspec: Setting the configspec effectively fetches the files as defined in the configspec
|
||||
cmd = self._build_ccase_command(ud, 'setcs');
|
||||
logger.info("fetching data [VOB=%s label=%s view=%s]", ud.vob, ud.label, ud.viewname)
|
||||
bb.fetch2.check_network_access(d, cmd, ud.url)
|
||||
output = runfetchcmd(cmd, d, workdir=ud.viewdir)
|
||||
logger.info("%s", output)
|
||||
|
||||
# Copy the configspec to the viewdir so we have it in our source tarball later
|
||||
shutil.copyfile(ud.configspecfile, os.path.join(ud.viewdir, ud.csname))
|
||||
|
||||
# Clean clearcase meta-data before tar
|
||||
|
||||
runfetchcmd('tar -czf "%s" .' % (ud.localpath), d, cleanup = [ud.localpath])
|
||||
|
||||
# Clean up so we can create a new view next time
|
||||
self.clean(ud, d);
|
||||
|
||||
def clean(self, ud, d):
|
||||
self._remove_view(ud, d)
|
||||
bb.utils.remove(ud.configspecfile)
|
||||
@@ -63,7 +63,7 @@ class Cvs(FetchMethod):
|
||||
if 'fullpath' in ud.parm:
|
||||
fullpath = '_fullpath'
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s_%s%s%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date, norecurse, fullpath))
|
||||
ud.localfile = bb.data.expand('%s_%s_%s_%s%s%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date, norecurse, fullpath), d)
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if (ud.date == "now"):
|
||||
@@ -87,10 +87,10 @@ class Cvs(FetchMethod):
|
||||
cvsroot = ud.path
|
||||
else:
|
||||
cvsroot = ":" + method
|
||||
cvsproxyhost = d.getVar('CVS_PROXY_HOST')
|
||||
cvsproxyhost = d.getVar('CVS_PROXY_HOST', True)
|
||||
if cvsproxyhost:
|
||||
cvsroot += ";proxy=" + cvsproxyhost
|
||||
cvsproxyport = d.getVar('CVS_PROXY_PORT')
|
||||
cvsproxyport = d.getVar('CVS_PROXY_PORT', True)
|
||||
if cvsproxyport:
|
||||
cvsroot += ";proxyport=" + cvsproxyport
|
||||
cvsroot += ":" + ud.user
|
||||
@@ -110,7 +110,7 @@ class Cvs(FetchMethod):
|
||||
if ud.tag:
|
||||
options.append("-r %s" % ud.tag)
|
||||
|
||||
cvsbasecmd = d.getVar("FETCHCMD_cvs")
|
||||
cvsbasecmd = d.getVar("FETCHCMD_cvs", True)
|
||||
cvscmd = cvsbasecmd + " '-d" + cvsroot + "' co " + " ".join(options) + " " + ud.module
|
||||
cvsupdatecmd = cvsbasecmd + " '-d" + cvsroot + "' update -d -P " + " ".join(options)
|
||||
|
||||
@@ -120,26 +120,25 @@ class Cvs(FetchMethod):
|
||||
|
||||
# create module directory
|
||||
logger.debug(2, "Fetch: checking for module directory")
|
||||
pkg = d.getVar('PN')
|
||||
pkgdir = os.path.join(d.getVar('CVSDIR'), pkg)
|
||||
pkg = d.getVar('PN', True)
|
||||
pkgdir = os.path.join(d.getVar('CVSDIR', True), pkg)
|
||||
moddir = os.path.join(pkgdir, localdir)
|
||||
workdir = None
|
||||
if os.access(os.path.join(moddir, 'CVS'), os.R_OK):
|
||||
logger.info("Update " + ud.url)
|
||||
bb.fetch2.check_network_access(d, cvsupdatecmd, ud.url)
|
||||
# update sources there
|
||||
workdir = moddir
|
||||
os.chdir(moddir)
|
||||
cmd = cvsupdatecmd
|
||||
else:
|
||||
logger.info("Fetch " + ud.url)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(pkgdir)
|
||||
workdir = pkgdir
|
||||
os.chdir(pkgdir)
|
||||
logger.debug(1, "Running %s", cvscmd)
|
||||
bb.fetch2.check_network_access(d, cvscmd, ud.url)
|
||||
cmd = cvscmd
|
||||
|
||||
runfetchcmd(cmd, d, cleanup=[moddir], workdir=workdir)
|
||||
runfetchcmd(cmd, d, cleanup = [moddir])
|
||||
|
||||
if not os.access(moddir, os.R_OK):
|
||||
raise FetchError("Directory %s was not readable despite sucessful fetch?!" % moddir, ud.url)
|
||||
@@ -148,24 +147,24 @@ class Cvs(FetchMethod):
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude='CVS'"
|
||||
tar_flags = "--exclude 'CVS'"
|
||||
|
||||
# tar them up to a defined filename
|
||||
workdir = None
|
||||
if 'fullpath' in ud.parm:
|
||||
workdir = pkgdir
|
||||
os.chdir(pkgdir)
|
||||
cmd = "tar %s -czf %s %s" % (tar_flags, ud.localpath, localdir)
|
||||
else:
|
||||
workdir = os.path.dirname(os.path.realpath(moddir))
|
||||
os.chdir(moddir)
|
||||
os.chdir('..')
|
||||
cmd = "tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(moddir))
|
||||
|
||||
runfetchcmd(cmd, d, cleanup=[ud.localpath], workdir=workdir)
|
||||
runfetchcmd(cmd, d, cleanup = [ud.localpath])
|
||||
|
||||
def clean(self, ud, d):
|
||||
""" Clean CVS Files and tarballs """
|
||||
|
||||
pkg = d.getVar('PN')
|
||||
pkgdir = os.path.join(d.getVar("CVSDIR"), pkg)
|
||||
pkg = d.getVar('PN', True)
|
||||
pkgdir = os.path.join(d.getVar("CVSDIR", True), pkg)
|
||||
|
||||
bb.utils.remove(pkgdir, True)
|
||||
bb.utils.remove(ud.localpath)
|
||||
|
||||
@@ -49,10 +49,6 @@ Supported SRC_URI options are:
|
||||
referring to commit which is valid in tag instead of branch.
|
||||
The default is "0", set nobranch=1 if needed.
|
||||
|
||||
- usehead
|
||||
For local git:// urls to use the current branch HEAD as the revision for use with
|
||||
AUTOREV. Implies nobranch.
|
||||
|
||||
"""
|
||||
|
||||
#Copyright (C) 2005 Richard Purdie
|
||||
@@ -70,64 +66,14 @@ Supported SRC_URI options are:
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import collections
|
||||
import errno
|
||||
import fnmatch
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import tempfile
|
||||
import bb
|
||||
import bb.progress
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
|
||||
|
||||
class GitProgressHandler(bb.progress.LineFilterProgressHandler):
|
||||
"""Extract progress information from git output"""
|
||||
def __init__(self, d):
|
||||
self._buffer = ''
|
||||
self._count = 0
|
||||
super(GitProgressHandler, self).__init__(d)
|
||||
# Send an initial progress event so the bar gets shown
|
||||
self._fire_progress(-1)
|
||||
|
||||
def write(self, string):
|
||||
self._buffer += string
|
||||
stages = ['Counting objects', 'Compressing objects', 'Receiving objects', 'Resolving deltas']
|
||||
stage_weights = [0.2, 0.05, 0.5, 0.25]
|
||||
stagenum = 0
|
||||
for i, stage in reversed(list(enumerate(stages))):
|
||||
if stage in self._buffer:
|
||||
stagenum = i
|
||||
self._buffer = ''
|
||||
break
|
||||
self._status = stages[stagenum]
|
||||
percs = re.findall(r'(\d+)%', string)
|
||||
if percs:
|
||||
progress = int(round((int(percs[-1]) * stage_weights[stagenum]) + (sum(stage_weights[:stagenum]) * 100)))
|
||||
rates = re.findall(r'([\d.]+ [a-zA-Z]*/s+)', string)
|
||||
if rates:
|
||||
rate = rates[-1]
|
||||
else:
|
||||
rate = None
|
||||
self.update(progress, rate)
|
||||
else:
|
||||
if stagenum == 0:
|
||||
percs = re.findall(r': (\d+)', string)
|
||||
if percs:
|
||||
count = int(percs[-1])
|
||||
if count > self._count:
|
||||
self._count = count
|
||||
self._fire_progress(-count)
|
||||
super(GitProgressHandler, self).write(string)
|
||||
|
||||
|
||||
class Git(FetchMethod):
|
||||
bitbake_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.join(os.path.abspath(__file__))), '..', '..', '..'))
|
||||
make_shallow_path = os.path.join(bitbake_dir, 'bin', 'git-make-shallow')
|
||||
|
||||
"""Class to fetch a module or modules from git repositories"""
|
||||
def init(self, d):
|
||||
pass
|
||||
@@ -162,13 +108,6 @@ class Git(FetchMethod):
|
||||
|
||||
ud.nobranch = ud.parm.get("nobranch","0") == "1"
|
||||
|
||||
# usehead implies nobranch
|
||||
ud.usehead = ud.parm.get("usehead","0") == "1"
|
||||
if ud.usehead:
|
||||
if ud.proto != "file":
|
||||
raise bb.fetch2.ParameterError("The usehead option is only for use with local ('protocol=file') git repositories", ud.url)
|
||||
ud.nobranch = 1
|
||||
|
||||
# bareclone implies nocheckout
|
||||
ud.bareclone = ud.parm.get("bareclone","0") == "1"
|
||||
if ud.bareclone:
|
||||
@@ -178,68 +117,17 @@ class Git(FetchMethod):
|
||||
branches = ud.parm.get("branch", "master").split(',')
|
||||
if len(branches) != len(ud.names):
|
||||
raise bb.fetch2.ParameterError("The number of name and branch parameters is not balanced", ud.url)
|
||||
|
||||
ud.cloneflags = "-s -n"
|
||||
if ud.bareclone:
|
||||
ud.cloneflags += " --mirror"
|
||||
|
||||
ud.shallow = d.getVar("BB_GIT_SHALLOW") == "1"
|
||||
ud.shallow_extra_refs = (d.getVar("BB_GIT_SHALLOW_EXTRA_REFS") or "").split()
|
||||
|
||||
depth_default = d.getVar("BB_GIT_SHALLOW_DEPTH")
|
||||
if depth_default is not None:
|
||||
try:
|
||||
depth_default = int(depth_default or 0)
|
||||
except ValueError:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH: %s" % depth_default)
|
||||
else:
|
||||
if depth_default < 0:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH: %s" % depth_default)
|
||||
else:
|
||||
depth_default = 1
|
||||
ud.shallow_depths = collections.defaultdict(lambda: depth_default)
|
||||
|
||||
revs_default = d.getVar("BB_GIT_SHALLOW_REVS", True)
|
||||
ud.shallow_revs = []
|
||||
ud.branches = {}
|
||||
for pos, name in enumerate(ud.names):
|
||||
branch = branches[pos]
|
||||
for name in ud.names:
|
||||
branch = branches[ud.names.index(name)]
|
||||
ud.branches[name] = branch
|
||||
ud.unresolvedrev[name] = branch
|
||||
|
||||
shallow_depth = d.getVar("BB_GIT_SHALLOW_DEPTH_%s" % name)
|
||||
if shallow_depth is not None:
|
||||
try:
|
||||
shallow_depth = int(shallow_depth or 0)
|
||||
except ValueError:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth))
|
||||
else:
|
||||
if shallow_depth < 0:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth))
|
||||
ud.shallow_depths[name] = shallow_depth
|
||||
ud.basecmd = data.getVar("FETCHCMD_git", d, True) or "git"
|
||||
|
||||
revs = d.getVar("BB_GIT_SHALLOW_REVS_%s" % name)
|
||||
if revs is not None:
|
||||
ud.shallow_revs.extend(revs.split())
|
||||
elif revs_default is not None:
|
||||
ud.shallow_revs.extend(revs_default.split())
|
||||
ud.write_tarballs = ((data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True) or "0") != "0") or ud.rebaseable
|
||||
|
||||
if (ud.shallow and
|
||||
not ud.shallow_revs and
|
||||
all(ud.shallow_depths[n] == 0 for n in ud.names)):
|
||||
# Shallow disabled for this URL
|
||||
ud.shallow = False
|
||||
|
||||
if ud.usehead:
|
||||
ud.unresolvedrev['default'] = 'HEAD'
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c core.fsyncobjectfiles=0"
|
||||
|
||||
write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0"
|
||||
ud.write_tarballs = write_tarballs != "0" or ud.rebaseable
|
||||
ud.write_shallow_tarballs = (d.getVar("BB_GENERATE_SHALLOW_TARBALLS") or write_tarballs) != "0"
|
||||
|
||||
ud.setup_revisions(d)
|
||||
ud.setup_revisons(d)
|
||||
|
||||
for name in ud.names:
|
||||
# Ensure anything that doesn't look like a sha256 checksum/revision is translated into one
|
||||
@@ -248,10 +136,7 @@ class Git(FetchMethod):
|
||||
ud.unresolvedrev[name] = ud.revisions[name]
|
||||
ud.revisions[name] = self.latest_revision(ud, d, name)
|
||||
|
||||
gitsrcname = '%s%s' % (ud.host.replace(':', '.'), ud.path.replace('/', '.').replace('*', '.'))
|
||||
if gitsrcname.startswith('.'):
|
||||
gitsrcname = gitsrcname[1:]
|
||||
|
||||
gitsrcname = '%s%s' % (ud.host.replace(':','.'), ud.path.replace('/', '.').replace('*', '.'))
|
||||
# for rebaseable git repo, it is necessary to keep mirror tar ball
|
||||
# per revision, so that even the revision disappears from the
|
||||
# upstream repo in the future, the mirror will remain intact and still
|
||||
@@ -259,53 +144,23 @@ class Git(FetchMethod):
|
||||
if ud.rebaseable:
|
||||
for name in ud.names:
|
||||
gitsrcname = gitsrcname + '_' + ud.revisions[name]
|
||||
|
||||
dl_dir = d.getVar("DL_DIR")
|
||||
gitdir = d.getVar("GITDIR") or (dl_dir + "/git2/")
|
||||
ud.mirrortarball = 'git2_%s.tar.gz' % (gitsrcname)
|
||||
ud.fullmirror = os.path.join(d.getVar("DL_DIR", True), ud.mirrortarball)
|
||||
gitdir = d.getVar("GITDIR", True) or (d.getVar("DL_DIR", True) + "/git2/")
|
||||
ud.clonedir = os.path.join(gitdir, gitsrcname)
|
||||
|
||||
ud.localfile = ud.clonedir
|
||||
|
||||
mirrortarball = 'git2_%s.tar.gz' % gitsrcname
|
||||
ud.fullmirror = os.path.join(dl_dir, mirrortarball)
|
||||
ud.mirrortarballs = [mirrortarball]
|
||||
if ud.shallow:
|
||||
tarballname = gitsrcname
|
||||
if ud.bareclone:
|
||||
tarballname = "%s_bare" % tarballname
|
||||
|
||||
if ud.shallow_revs:
|
||||
tarballname = "%s_%s" % (tarballname, "_".join(sorted(ud.shallow_revs)))
|
||||
|
||||
for name, revision in sorted(ud.revisions.items()):
|
||||
tarballname = "%s_%s" % (tarballname, ud.revisions[name][:7])
|
||||
depth = ud.shallow_depths[name]
|
||||
if depth:
|
||||
tarballname = "%s-%s" % (tarballname, depth)
|
||||
|
||||
shallow_refs = []
|
||||
if not ud.nobranch:
|
||||
shallow_refs.extend(ud.branches.values())
|
||||
if ud.shallow_extra_refs:
|
||||
shallow_refs.extend(r.replace('refs/heads/', '').replace('*', 'ALL') for r in ud.shallow_extra_refs)
|
||||
if shallow_refs:
|
||||
tarballname = "%s_%s" % (tarballname, "_".join(sorted(shallow_refs)).replace('/', '.'))
|
||||
|
||||
fetcher = self.__class__.__name__.lower()
|
||||
ud.shallowtarball = '%sshallow_%s.tar.gz' % (fetcher, tarballname)
|
||||
ud.fullshallow = os.path.join(dl_dir, ud.shallowtarball)
|
||||
ud.mirrortarballs.insert(0, ud.shallowtarball)
|
||||
|
||||
def localpath(self, ud, d):
|
||||
return ud.clonedir
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if not os.path.exists(ud.clonedir):
|
||||
return True
|
||||
os.chdir(ud.clonedir)
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
if not self._contains_ref(ud, d, name):
|
||||
return True
|
||||
if ud.shallow and ud.write_shallow_tarballs and not os.path.exists(ud.fullshallow):
|
||||
return True
|
||||
if ud.write_tarballs and not os.path.exists(ud.fullmirror):
|
||||
return True
|
||||
return False
|
||||
@@ -313,7 +168,7 @@ class Git(FetchMethod):
|
||||
def try_premirror(self, ud, d):
|
||||
# If we don't do this, updating an existing checkout with only premirrors
|
||||
# is not possible
|
||||
if d.getVar("BB_FETCH_PREMIRRORONLY") is not None:
|
||||
if d.getVar("BB_FETCH_PREMIRRORONLY", True) is not None:
|
||||
return True
|
||||
if os.path.exists(ud.clonedir):
|
||||
return False
|
||||
@@ -322,146 +177,74 @@ class Git(FetchMethod):
|
||||
def download(self, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
no_clone = not os.path.exists(ud.clonedir)
|
||||
need_update = no_clone or self.need_update(ud, d)
|
||||
if ud.user:
|
||||
username = ud.user + '@'
|
||||
else:
|
||||
username = ""
|
||||
|
||||
# A current clone is preferred to either tarball, a shallow tarball is
|
||||
# preferred to an out of date clone, and a missing clone will use
|
||||
# either tarball.
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and need_update:
|
||||
ud.localpath = ud.fullshallow
|
||||
return
|
||||
elif os.path.exists(ud.fullmirror) and no_clone:
|
||||
ud.repochanged = not os.path.exists(ud.fullmirror)
|
||||
|
||||
# If the checkout doesn't exist and the mirror tarball does, extract it
|
||||
if not os.path.exists(ud.clonedir) and os.path.exists(ud.fullmirror):
|
||||
bb.utils.mkdirhier(ud.clonedir)
|
||||
runfetchcmd("tar -xzf %s" % ud.fullmirror, d, workdir=ud.clonedir)
|
||||
os.chdir(ud.clonedir)
|
||||
runfetchcmd("tar -xzf %s" % (ud.fullmirror), d)
|
||||
|
||||
repourl = self._get_repo_url(ud)
|
||||
repourl = "%s://%s%s%s" % (ud.proto, username, ud.host, ud.path)
|
||||
|
||||
# If the repo still doesn't exist, fallback to cloning it
|
||||
if not os.path.exists(ud.clonedir):
|
||||
# We do this since git will use a "-l" option automatically for local urls where possible
|
||||
if repourl.startswith("file://"):
|
||||
repourl = repourl[7:]
|
||||
clone_cmd = "LANG=C %s clone --bare --mirror %s %s --progress" % (ud.basecmd, repourl, ud.clonedir)
|
||||
clone_cmd = "%s clone --bare --mirror %s %s" % (ud.basecmd, repourl, ud.clonedir)
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, clone_cmd, ud.url)
|
||||
progresshandler = GitProgressHandler(d)
|
||||
runfetchcmd(clone_cmd, d, log=progresshandler)
|
||||
bb.fetch2.check_network_access(d, clone_cmd)
|
||||
runfetchcmd(clone_cmd, d)
|
||||
|
||||
os.chdir(ud.clonedir)
|
||||
# Update the checkout if needed
|
||||
needupdate = False
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
if not self._contains_ref(ud, d, name):
|
||||
needupdate = True
|
||||
if needupdate:
|
||||
try:
|
||||
runfetchcmd("%s remote rm origin" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
runfetchcmd("%s remote rm origin" % ud.basecmd, d)
|
||||
except bb.fetch2.FetchError:
|
||||
logger.debug(1, "No Origin")
|
||||
|
||||
runfetchcmd("%s remote add --mirror=fetch origin %s" % (ud.basecmd, repourl), d, workdir=ud.clonedir)
|
||||
fetch_cmd = "LANG=C %s fetch -f --prune --progress %s refs/*:refs/*" % (ud.basecmd, repourl)
|
||||
runfetchcmd("%s remote add --mirror=fetch origin %s" % (ud.basecmd, repourl), d)
|
||||
fetch_cmd = "%s fetch -f --prune %s refs/*:refs/*" % (ud.basecmd, repourl)
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, fetch_cmd, ud.url)
|
||||
progresshandler = GitProgressHandler(d)
|
||||
runfetchcmd(fetch_cmd, d, log=progresshandler, workdir=ud.clonedir)
|
||||
runfetchcmd("%s prune-packed" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
runfetchcmd("%s pack-refs --all" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
runfetchcmd("%s pack-redundant --all | xargs -r rm" % ud.basecmd, d, workdir=ud.clonedir)
|
||||
try:
|
||||
os.unlink(ud.fullmirror)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
runfetchcmd(fetch_cmd, d)
|
||||
runfetchcmd("%s prune-packed" % ud.basecmd, d)
|
||||
runfetchcmd("%s pack-redundant --all | xargs -r rm" % ud.basecmd, d)
|
||||
ud.repochanged = True
|
||||
os.chdir(ud.clonedir)
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
if not self._contains_ref(ud, d, name):
|
||||
raise bb.fetch2.FetchError("Unable to find revision %s in branch %s even from upstream" % (ud.revisions[name], ud.branches[name]))
|
||||
|
||||
def build_mirror_data(self, ud, d):
|
||||
if ud.shallow and ud.write_shallow_tarballs:
|
||||
if not os.path.exists(ud.fullshallow):
|
||||
if os.path.islink(ud.fullshallow):
|
||||
os.unlink(ud.fullshallow)
|
||||
tempdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
|
||||
shallowclone = os.path.join(tempdir, 'git')
|
||||
try:
|
||||
self.clone_shallow_local(ud, shallowclone, d)
|
||||
|
||||
logger.info("Creating tarball of git repository")
|
||||
runfetchcmd("tar -czf %s ." % ud.fullshallow, d, workdir=shallowclone)
|
||||
runfetchcmd("touch %s.done" % ud.fullshallow, d)
|
||||
finally:
|
||||
bb.utils.remove(tempdir, recurse=True)
|
||||
elif ud.write_tarballs and not os.path.exists(ud.fullmirror):
|
||||
# Generate a mirror tarball if needed
|
||||
if ud.write_tarballs and (ud.repochanged or not os.path.exists(ud.fullmirror)):
|
||||
# it's possible that this symlink points to read-only filesystem with PREMIRROR
|
||||
if os.path.islink(ud.fullmirror):
|
||||
os.unlink(ud.fullmirror)
|
||||
|
||||
os.chdir(ud.clonedir)
|
||||
logger.info("Creating tarball of git repository")
|
||||
runfetchcmd("tar -czf %s ." % ud.fullmirror, d, workdir=ud.clonedir)
|
||||
runfetchcmd("touch %s.done" % ud.fullmirror, d)
|
||||
|
||||
def clone_shallow_local(self, ud, dest, d):
|
||||
"""Clone the repo and make it shallow.
|
||||
|
||||
The upstream url of the new clone isn't set at this time, as it'll be
|
||||
set correctly when unpacked."""
|
||||
runfetchcmd("%s clone %s %s %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, dest), d)
|
||||
|
||||
to_parse, shallow_branches = [], []
|
||||
for name in ud.names:
|
||||
revision = ud.revisions[name]
|
||||
depth = ud.shallow_depths[name]
|
||||
if depth:
|
||||
to_parse.append('%s~%d^{}' % (revision, depth - 1))
|
||||
|
||||
# For nobranch, we need a ref, otherwise the commits will be
|
||||
# removed, and for non-nobranch, we truncate the branch to our
|
||||
# srcrev, to avoid keeping unnecessary history beyond that.
|
||||
branch = ud.branches[name]
|
||||
if ud.nobranch:
|
||||
ref = "refs/shallow/%s" % name
|
||||
elif ud.bareclone:
|
||||
ref = "refs/heads/%s" % branch
|
||||
else:
|
||||
ref = "refs/remotes/origin/%s" % branch
|
||||
|
||||
shallow_branches.append(ref)
|
||||
runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest)
|
||||
|
||||
# Map srcrev+depths to revisions
|
||||
parsed_depths = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join(to_parse)), d, workdir=dest)
|
||||
|
||||
# Resolve specified revisions
|
||||
parsed_revs = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join('"%s^{}"' % r for r in ud.shallow_revs)), d, workdir=dest)
|
||||
shallow_revisions = parsed_depths.splitlines() + parsed_revs.splitlines()
|
||||
|
||||
# Apply extra ref wildcards
|
||||
all_refs = runfetchcmd('%s for-each-ref "--format=%%(refname)"' % ud.basecmd,
|
||||
d, workdir=dest).splitlines()
|
||||
for r in ud.shallow_extra_refs:
|
||||
if not ud.bareclone:
|
||||
r = r.replace('refs/heads/', 'refs/remotes/origin/')
|
||||
|
||||
if '*' in r:
|
||||
matches = filter(lambda a: fnmatch.fnmatchcase(a, r), all_refs)
|
||||
shallow_branches.extend(matches)
|
||||
else:
|
||||
shallow_branches.append(r)
|
||||
|
||||
# Make the repository shallow
|
||||
shallow_cmd = [self.make_shallow_path, '-s']
|
||||
for b in shallow_branches:
|
||||
shallow_cmd.append('-r')
|
||||
shallow_cmd.append(b)
|
||||
shallow_cmd.extend(shallow_revisions)
|
||||
runfetchcmd(subprocess.list2cmdline(shallow_cmd), d, workdir=dest)
|
||||
runfetchcmd("tar -czf %s %s" % (ud.fullmirror, os.path.join(".") ), d)
|
||||
runfetchcmd("touch %s.done" % (ud.fullmirror), d)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
""" unpack the downloaded src to destdir"""
|
||||
|
||||
subdir = ud.parm.get("subpath", "")
|
||||
if subdir != "":
|
||||
readpathspec = ":%s" % subdir
|
||||
readpathspec = ":%s" % (subdir)
|
||||
def_destsuffix = "%s/" % os.path.basename(subdir.rstrip('/'))
|
||||
else:
|
||||
readpathspec = ""
|
||||
@@ -472,28 +255,34 @@ class Git(FetchMethod):
|
||||
if os.path.exists(destdir):
|
||||
bb.utils.prunedir(destdir)
|
||||
|
||||
if ud.shallow and (not os.path.exists(ud.clonedir) or self.need_update(ud, d)):
|
||||
bb.utils.mkdirhier(destdir)
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=destdir)
|
||||
else:
|
||||
runfetchcmd("%s clone %s %s/ %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, destdir), d)
|
||||
cloneflags = "-s -n"
|
||||
if ud.bareclone:
|
||||
cloneflags += " --mirror"
|
||||
|
||||
repourl = self._get_repo_url(ud)
|
||||
runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, repourl), d, workdir=destdir)
|
||||
# Versions of git prior to 1.7.9.2 have issues where foo.git and foo get confused
|
||||
# and you end up with some horrible union of the two when you attempt to clone it
|
||||
# The least invasive workaround seems to be a symlink to the real directory to
|
||||
# fool git into ignoring any .git version that may also be present.
|
||||
#
|
||||
# The issue is fixed in more recent versions of git so we can drop this hack in future
|
||||
# when that version becomes common enough.
|
||||
clonedir = ud.clonedir
|
||||
if not ud.path.endswith(".git"):
|
||||
indirectiondir = destdir[:-1] + ".indirectionsymlink"
|
||||
if os.path.exists(indirectiondir):
|
||||
os.remove(indirectiondir)
|
||||
bb.utils.mkdirhier(os.path.dirname(indirectiondir))
|
||||
os.symlink(ud.clonedir, indirectiondir)
|
||||
clonedir = indirectiondir
|
||||
|
||||
runfetchcmd("git clone %s %s/ %s" % (cloneflags, clonedir, destdir), d)
|
||||
if not ud.nocheckout:
|
||||
os.chdir(destdir)
|
||||
if subdir != "":
|
||||
runfetchcmd("%s read-tree %s%s" % (ud.basecmd, ud.revisions[ud.names[0]], readpathspec), d,
|
||||
workdir=destdir)
|
||||
runfetchcmd("%s checkout-index -q -f -a" % ud.basecmd, d, workdir=destdir)
|
||||
elif not ud.nobranch:
|
||||
branchname = ud.branches[ud.names[0]]
|
||||
runfetchcmd("%s checkout -B %s %s" % (ud.basecmd, branchname, \
|
||||
ud.revisions[ud.names[0]]), d, workdir=destdir)
|
||||
runfetchcmd("%s branch %s --set-upstream-to origin/%s" % (ud.basecmd, branchname, \
|
||||
branchname), d, workdir=destdir)
|
||||
runfetchcmd("%s read-tree %s%s" % (ud.basecmd, ud.revisions[ud.names[0]], readpathspec), d)
|
||||
runfetchcmd("%s checkout-index -q -f -a" % ud.basecmd, d)
|
||||
else:
|
||||
runfetchcmd("%s checkout %s" % (ud.basecmd, ud.revisions[ud.names[0]]), d, workdir=destdir)
|
||||
|
||||
runfetchcmd("%s checkout %s" % (ud.basecmd, ud.revisions[ud.names[0]]), d)
|
||||
return True
|
||||
|
||||
def clean(self, ud, d):
|
||||
@@ -506,7 +295,7 @@ class Git(FetchMethod):
|
||||
def supports_srcrev(self):
|
||||
return True
|
||||
|
||||
def _contains_ref(self, ud, d, name, wd):
|
||||
def _contains_ref(self, ud, d, name):
|
||||
cmd = ""
|
||||
if ud.nobranch:
|
||||
cmd = "%s log --pretty=oneline -n 1 %s -- 2> /dev/null | wc -l" % (
|
||||
@@ -515,23 +304,13 @@ class Git(FetchMethod):
|
||||
cmd = "%s branch --contains %s --list %s 2> /dev/null | wc -l" % (
|
||||
ud.basecmd, ud.revisions[name], ud.branches[name])
|
||||
try:
|
||||
output = runfetchcmd(cmd, d, quiet=True, workdir=wd)
|
||||
output = runfetchcmd(cmd, d, quiet=True)
|
||||
except bb.fetch2.FetchError:
|
||||
return False
|
||||
if len(output.split()) > 1:
|
||||
raise bb.fetch2.FetchError("The command '%s' gave output with more then 1 line unexpectedly, output: '%s'" % (cmd, output))
|
||||
return output.split()[0] != "0"
|
||||
|
||||
def _get_repo_url(self, ud):
|
||||
"""
|
||||
Return the repository URL
|
||||
"""
|
||||
if ud.user:
|
||||
username = ud.user + '@'
|
||||
else:
|
||||
username = ""
|
||||
return "%s://%s%s%s" % (ud.proto, username, ud.host, ud.path)
|
||||
|
||||
def _revision_key(self, ud, d, name):
|
||||
"""
|
||||
Return a unique key for the url
|
||||
@@ -542,122 +321,35 @@ class Git(FetchMethod):
|
||||
"""
|
||||
Run git ls-remote with the specified search string
|
||||
"""
|
||||
# Prevent recursion e.g. in OE if SRCPV is in PV, PV is in WORKDIR,
|
||||
# and WORKDIR is in PATH (as a result of RSS), our call to
|
||||
# runfetchcmd() exports PATH so this function will get called again (!)
|
||||
# In this scenario the return call of the function isn't actually
|
||||
# important - WORKDIR isn't needed in PATH to call git ls-remote
|
||||
# anyway.
|
||||
if d.getVar('_BB_GIT_IN_LSREMOTE', False):
|
||||
return ''
|
||||
d.setVar('_BB_GIT_IN_LSREMOTE', '1')
|
||||
try:
|
||||
repourl = self._get_repo_url(ud)
|
||||
cmd = "%s ls-remote %s %s" % \
|
||||
(ud.basecmd, repourl, search)
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, cmd, repourl)
|
||||
output = runfetchcmd(cmd, d, True)
|
||||
if not output:
|
||||
raise bb.fetch2.FetchError("The command %s gave empty output unexpectedly" % cmd, ud.url)
|
||||
finally:
|
||||
d.delVar('_BB_GIT_IN_LSREMOTE')
|
||||
if ud.user:
|
||||
username = ud.user + '@'
|
||||
else:
|
||||
username = ""
|
||||
|
||||
cmd = "%s ls-remote %s://%s%s%s %s" % \
|
||||
(ud.basecmd, ud.proto, username, ud.host, ud.path, search)
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, cmd)
|
||||
output = runfetchcmd(cmd, d, True)
|
||||
if not output:
|
||||
raise bb.fetch2.FetchError("The command %s gave empty output unexpectedly" % cmd, ud.url)
|
||||
return output
|
||||
|
||||
def _latest_revision(self, ud, d, name):
|
||||
"""
|
||||
Compute the HEAD revision for the url
|
||||
"""
|
||||
output = self._lsremote(ud, d, "")
|
||||
# Tags of the form ^{} may not work, need to fallback to other form
|
||||
if ud.unresolvedrev[name][:5] == "refs/" or ud.usehead:
|
||||
head = ud.unresolvedrev[name]
|
||||
tag = ud.unresolvedrev[name]
|
||||
else:
|
||||
head = "refs/heads/%s" % ud.unresolvedrev[name]
|
||||
tag = "refs/tags/%s" % ud.unresolvedrev[name]
|
||||
for s in [head, tag + "^{}", tag]:
|
||||
for l in output.strip().split('\n'):
|
||||
sha1, ref = l.split()
|
||||
if s == ref:
|
||||
return sha1
|
||||
raise bb.fetch2.FetchError("Unable to resolve '%s' in upstream git repository in git ls-remote output for %s" % \
|
||||
(ud.unresolvedrev[name], ud.host+ud.path))
|
||||
|
||||
def latest_versionstring(self, ud, d):
|
||||
"""
|
||||
Compute the latest release name like "x.y.x" in "x.y.x+gitHASH"
|
||||
by searching through the tags output of ls-remote, comparing
|
||||
versions and returning the highest match.
|
||||
"""
|
||||
pupver = ('', '')
|
||||
|
||||
tagregex = re.compile(d.getVar('UPSTREAM_CHECK_GITTAGREGEX') or "(?P<pver>([0-9][\.|_]?)+)")
|
||||
try:
|
||||
output = self._lsremote(ud, d, "refs/tags/*")
|
||||
except bb.fetch2.FetchError or bb.fetch2.NetworkAccess:
|
||||
return pupver
|
||||
|
||||
verstring = ""
|
||||
revision = ""
|
||||
for line in output.split("\n"):
|
||||
if not line:
|
||||
break
|
||||
|
||||
tag_head = line.split("/")[-1]
|
||||
# Ignore non-released branches
|
||||
m = re.search("(alpha|beta|rc|final)+", tag_head)
|
||||
if m:
|
||||
continue
|
||||
|
||||
# search for version in the line
|
||||
tag = tagregex.search(tag_head)
|
||||
if tag == None:
|
||||
continue
|
||||
|
||||
tag = tag.group('pver')
|
||||
tag = tag.replace("_", ".")
|
||||
|
||||
if verstring and bb.utils.vercmp(("0", tag, ""), ("0", verstring, "")) < 0:
|
||||
continue
|
||||
|
||||
verstring = tag
|
||||
revision = line.split()[0]
|
||||
pupver = (verstring, revision)
|
||||
|
||||
return pupver
|
||||
search = "refs/heads/%s refs/tags/%s^{}" % (ud.unresolvedrev[name], ud.unresolvedrev[name])
|
||||
output = self._lsremote(ud, d, search)
|
||||
return output.split()[0]
|
||||
|
||||
def _build_revision(self, ud, d, name):
|
||||
return ud.revisions[name]
|
||||
|
||||
def gitpkgv_revision(self, ud, d, name):
|
||||
"""
|
||||
Return a sortable revision number by counting commits in the history
|
||||
Based on gitpkgv.bblass in meta-openembedded
|
||||
"""
|
||||
rev = self._build_revision(ud, d, name)
|
||||
localpath = ud.localpath
|
||||
rev_file = os.path.join(localpath, "oe-gitpkgv_" + rev)
|
||||
if not os.path.exists(localpath):
|
||||
commits = None
|
||||
else:
|
||||
if not os.path.exists(rev_file) or not os.path.getsize(rev_file):
|
||||
from pipes import quote
|
||||
commits = bb.fetch2.runfetchcmd(
|
||||
"git rev-list %s -- | wc -l" % quote(rev),
|
||||
d, quiet=True).strip().lstrip('0')
|
||||
if commits:
|
||||
open(rev_file, "w").write("%d\n" % int(commits))
|
||||
else:
|
||||
commits = open(rev_file, "r").readline(128).strip()
|
||||
if commits:
|
||||
return False, "%s+%s" % (commits, rev[:7])
|
||||
else:
|
||||
return True, str(rev)
|
||||
|
||||
def checkstatus(self, fetch, ud, d):
|
||||
def checkstatus(self, ud, d):
|
||||
fetchcmd = "%s ls-remote %s" % (ud.basecmd, ud.url)
|
||||
try:
|
||||
self._lsremote(ud, d, "")
|
||||
runfetchcmd(fetchcmd, d, quiet=True)
|
||||
return True
|
||||
except bb.fetch2.FetchError:
|
||||
except FetchError:
|
||||
return False
|
||||
|
||||
@@ -22,6 +22,7 @@ BitBake 'Fetch' git annex implementation
|
||||
|
||||
import os
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2.git import Git
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
@@ -33,59 +34,43 @@ class GitANNEX(Git):
|
||||
"""
|
||||
return ud.type in ['gitannex']
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
super(GitANNEX, self).urldata_init(ud, d)
|
||||
if ud.shallow:
|
||||
ud.shallow_extra_refs += ['refs/heads/git-annex', 'refs/heads/synced/*']
|
||||
|
||||
def uses_annex(self, ud, d, wd):
|
||||
def uses_annex(self, ud, d):
|
||||
for name in ud.names:
|
||||
try:
|
||||
runfetchcmd("%s rev-list git-annex" % (ud.basecmd), d, quiet=True, workdir=wd)
|
||||
runfetchcmd("%s rev-list git-annex" % (ud.basecmd), d, quiet=True)
|
||||
return True
|
||||
except bb.fetch.FetchError:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
def update_annex(self, ud, d, wd):
|
||||
def update_annex(self, ud, d):
|
||||
try:
|
||||
runfetchcmd("%s annex get --all" % (ud.basecmd), d, quiet=True, workdir=wd)
|
||||
runfetchcmd("%s annex get --all" % (ud.basecmd), d, quiet=True)
|
||||
except bb.fetch.FetchError:
|
||||
return False
|
||||
runfetchcmd("chmod u+w -R %s/annex" % (ud.clonedir), d, quiet=True, workdir=wd)
|
||||
runfetchcmd("chmod u+w -R %s/annex" % (ud.clonedir), d, quiet=True)
|
||||
|
||||
return True
|
||||
|
||||
def download(self, ud, d):
|
||||
Git.download(self, ud, d)
|
||||
|
||||
if not ud.shallow or ud.localpath != ud.fullshallow:
|
||||
if self.uses_annex(ud, d, ud.clonedir):
|
||||
self.update_annex(ud, d, ud.clonedir)
|
||||
|
||||
def clone_shallow_local(self, ud, dest, d):
|
||||
super(GitANNEX, self).clone_shallow_local(ud, dest, d)
|
||||
|
||||
try:
|
||||
runfetchcmd("%s annex init" % ud.basecmd, d, workdir=dest)
|
||||
except bb.fetch.FetchError:
|
||||
pass
|
||||
|
||||
if self.uses_annex(ud, d, dest):
|
||||
runfetchcmd("%s annex get" % ud.basecmd, d, workdir=dest)
|
||||
runfetchcmd("chmod u+w -R %s/.git/annex" % (dest), d, quiet=True, workdir=dest)
|
||||
os.chdir(ud.clonedir)
|
||||
annex = self.uses_annex(ud, d)
|
||||
if annex:
|
||||
self.update_annex(ud, d)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
Git.unpack(self, ud, destdir, d)
|
||||
|
||||
os.chdir(ud.destdir)
|
||||
try:
|
||||
runfetchcmd("%s annex init" % (ud.basecmd), d, workdir=ud.destdir)
|
||||
runfetchcmd("%s annex sync" % (ud.basecmd), d)
|
||||
except bb.fetch.FetchError:
|
||||
pass
|
||||
|
||||
annex = self.uses_annex(ud, d, ud.destdir)
|
||||
annex = self.uses_annex(ud, d)
|
||||
if annex:
|
||||
runfetchcmd("%s annex get" % (ud.basecmd), d, workdir=ud.destdir)
|
||||
runfetchcmd("chmod u+w -R %s/.git/annex" % (ud.destdir), d, quiet=True, workdir=ud.destdir)
|
||||
|
||||
runfetchcmd("%s annex get" % (ud.basecmd), d)
|
||||
runfetchcmd("chmod u+w -R %s/.git/annex" % (ud.destdir), d, quiet=True)
|
||||
|
||||
@@ -2,16 +2,6 @@
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'Fetch' git submodules implementation
|
||||
|
||||
Inherits from and extends the Git fetcher to retrieve submodules of a git repository
|
||||
after cloning.
|
||||
|
||||
SRC_URI = "gitsm://<see Git fetcher for syntax>"
|
||||
|
||||
See the Git fetcher, git://, for usage documentation.
|
||||
|
||||
NOTE: Switching a SRC_URI from "git://" to "gitsm://" requires a clean of your recipe.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2013 Richard Purdie
|
||||
@@ -31,6 +21,7 @@ NOTE: Switching a SRC_URI from "git://" to "gitsm://" requires a clean of your r
|
||||
|
||||
import os
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2.git import Git
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
@@ -42,10 +33,10 @@ class GitSM(Git):
|
||||
"""
|
||||
return ud.type in ['gitsm']
|
||||
|
||||
def uses_submodules(self, ud, d, wd):
|
||||
def uses_submodules(self, ud, d):
|
||||
for name in ud.names:
|
||||
try:
|
||||
runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revisions[name]), d, quiet=True, workdir=wd)
|
||||
runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revisions[name]), d, quiet=True)
|
||||
return True
|
||||
except bb.fetch.FetchError:
|
||||
pass
|
||||
@@ -106,30 +97,30 @@ class GitSM(Git):
|
||||
os.mkdir(tmpclonedir)
|
||||
os.rename(ud.clonedir, gitdir)
|
||||
runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*true/bare = false/'", d)
|
||||
runfetchcmd(ud.basecmd + " reset --hard", d, workdir=tmpclonedir)
|
||||
runfetchcmd(ud.basecmd + " checkout -f " + ud.revisions[ud.names[0]], d, workdir=tmpclonedir)
|
||||
runfetchcmd(ud.basecmd + " submodule update --init --recursive", d, workdir=tmpclonedir)
|
||||
os.chdir(tmpclonedir)
|
||||
runfetchcmd(ud.basecmd + " reset --hard", d)
|
||||
runfetchcmd(ud.basecmd + " submodule init", d)
|
||||
runfetchcmd(ud.basecmd + " submodule update", d)
|
||||
self._set_relative_paths(tmpclonedir)
|
||||
runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*false/bare = true/'", d, workdir=tmpclonedir)
|
||||
runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*false/bare = true/'", d)
|
||||
os.rename(gitdir, ud.clonedir,)
|
||||
bb.utils.remove(tmpclonedir, True)
|
||||
|
||||
def download(self, ud, d):
|
||||
Git.download(self, ud, d)
|
||||
|
||||
if not ud.shallow or ud.localpath != ud.fullshallow:
|
||||
submodules = self.uses_submodules(ud, d, ud.clonedir)
|
||||
if submodules:
|
||||
self.update_submodules(ud, d)
|
||||
|
||||
def clone_shallow_local(self, ud, dest, d):
|
||||
super(GitSM, self).clone_shallow_local(ud, dest, d)
|
||||
|
||||
runfetchcmd('cp -fpPRH "%s/modules" "%s/"' % (ud.clonedir, os.path.join(dest, '.git')), d)
|
||||
os.chdir(ud.clonedir)
|
||||
submodules = self.uses_submodules(ud, d)
|
||||
if submodules:
|
||||
self.update_submodules(ud, d)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
Git.unpack(self, ud, destdir, d)
|
||||
|
||||
os.chdir(ud.destdir)
|
||||
submodules = self.uses_submodules(ud, d)
|
||||
if submodules:
|
||||
runfetchcmd("cp -r " + ud.clonedir + "/modules " + ud.destdir + "/.git/", d)
|
||||
runfetchcmd(ud.basecmd + " submodule init", d)
|
||||
runfetchcmd(ud.basecmd + " submodule update", d)
|
||||
|
||||
if self.uses_submodules(ud, d, ud.destdir):
|
||||
runfetchcmd(ud.basecmd + " checkout " + ud.revisions[ud.names[0]], d, workdir=ud.destdir)
|
||||
runfetchcmd(ud.basecmd + " submodule update --init --recursive", d, workdir=ud.destdir)
|
||||
|
||||
@@ -28,7 +28,7 @@ import os
|
||||
import sys
|
||||
import logging
|
||||
import bb
|
||||
import errno
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import MissingParameterError
|
||||
@@ -43,13 +43,6 @@ class Hg(FetchMethod):
|
||||
"""
|
||||
return ud.type in ['hg']
|
||||
|
||||
def supports_checksum(self, urldata):
|
||||
"""
|
||||
Don't require checksums for local archives created from
|
||||
repository checkouts.
|
||||
"""
|
||||
return False
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""
|
||||
init hg specific variable within url data
|
||||
@@ -59,34 +52,19 @@ class Hg(FetchMethod):
|
||||
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
if 'protocol' in ud.parm:
|
||||
ud.proto = ud.parm['protocol']
|
||||
elif not ud.host:
|
||||
ud.proto = 'file'
|
||||
else:
|
||||
ud.proto = "hg"
|
||||
# Create paths to mercurial checkouts
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(data.expand('${HGDIR}', d), ud.host, relpath)
|
||||
ud.moddir = os.path.join(ud.pkgdir, ud.module)
|
||||
|
||||
ud.setup_revisions(d)
|
||||
ud.setup_revisons(d)
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
elif not ud.revision:
|
||||
ud.revision = self.latest_revision(ud, d)
|
||||
|
||||
# Create paths to mercurial checkouts
|
||||
hgsrcname = '%s_%s_%s' % (ud.module.replace('/', '.'), \
|
||||
ud.host, ud.path.replace('/', '.'))
|
||||
mirrortarball = 'hg_%s.tar.gz' % hgsrcname
|
||||
ud.fullmirror = os.path.join(d.getVar("DL_DIR"), mirrortarball)
|
||||
ud.mirrortarballs = [mirrortarball]
|
||||
|
||||
hgdir = d.getVar("HGDIR") or (d.getVar("DL_DIR") + "/hg/")
|
||||
ud.pkgdir = os.path.join(hgdir, hgsrcname)
|
||||
ud.moddir = os.path.join(ud.pkgdir, ud.module)
|
||||
ud.localfile = ud.moddir
|
||||
ud.basecmd = d.getVar("FETCHCMD_hg") or "/usr/bin/env hg"
|
||||
|
||||
ud.write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS")
|
||||
ud.localfile = data.expand('%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision), d)
|
||||
|
||||
def need_update(self, ud, d):
|
||||
revTag = ud.parm.get('rev', 'tip')
|
||||
@@ -96,21 +74,14 @@ class Hg(FetchMethod):
|
||||
return True
|
||||
return False
|
||||
|
||||
def try_premirror(self, ud, d):
|
||||
# If we don't do this, updating an existing checkout with only premirrors
|
||||
# is not possible
|
||||
if d.getVar("BB_FETCH_PREMIRRORONLY") is not None:
|
||||
return True
|
||||
if os.path.exists(ud.moddir):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _buildhgcommand(self, ud, d, command):
|
||||
"""
|
||||
Build up an hg commandline based on ud
|
||||
command is "fetch", "update", "info"
|
||||
"""
|
||||
|
||||
basecmd = data.expand('${FETCHCMD_hg}', d)
|
||||
|
||||
proto = ud.parm.get('protocol', 'http')
|
||||
|
||||
host = ud.host
|
||||
@@ -127,7 +98,7 @@ class Hg(FetchMethod):
|
||||
hgroot = ud.user + "@" + host + ud.path
|
||||
|
||||
if command == "info":
|
||||
return "%s identify -i %s://%s/%s" % (ud.basecmd, proto, hgroot, ud.module)
|
||||
return "%s identify -i %s://%s/%s" % (basecmd, proto, hgroot, ud.module)
|
||||
|
||||
options = [];
|
||||
|
||||
@@ -140,22 +111,19 @@ class Hg(FetchMethod):
|
||||
|
||||
if command == "fetch":
|
||||
if ud.user and ud.pswd:
|
||||
cmd = "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" clone %s %s://%s/%s %s" % (ud.basecmd, ud.user, ud.pswd, proto, " ".join(options), proto, hgroot, ud.module, ud.module)
|
||||
cmd = "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" clone %s %s://%s/%s %s" % (basecmd, ud.user, ud.pswd, proto, " ".join(options), proto, hgroot, ud.module, ud.module)
|
||||
else:
|
||||
cmd = "%s clone %s %s://%s/%s %s" % (ud.basecmd, " ".join(options), proto, hgroot, ud.module, ud.module)
|
||||
cmd = "%s clone %s %s://%s/%s %s" % (basecmd, " ".join(options), proto, hgroot, ud.module, ud.module)
|
||||
elif command == "pull":
|
||||
# do not pass options list; limiting pull to rev causes the local
|
||||
# repo not to contain it and immediately following "update" command
|
||||
# will crash
|
||||
if ud.user and ud.pswd:
|
||||
cmd = "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" pull" % (ud.basecmd, ud.user, ud.pswd, proto)
|
||||
cmd = "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" pull" % (basecmd, ud.user, ud.pswd, proto)
|
||||
else:
|
||||
cmd = "%s pull" % (ud.basecmd)
|
||||
cmd = "%s pull" % (basecmd)
|
||||
elif command == "update":
|
||||
if ud.user and ud.pswd:
|
||||
cmd = "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" update -C %s" % (ud.basecmd, ud.user, ud.pswd, proto, " ".join(options))
|
||||
else:
|
||||
cmd = "%s update -C %s" % (ud.basecmd, " ".join(options))
|
||||
cmd = "%s update --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" -C %s" % (basecmd, ud.user, ud.pswd, proto, " ".join(options))
|
||||
else:
|
||||
raise FetchError("Invalid hg command %s" % command, ud.url)
|
||||
|
||||
@@ -166,53 +134,40 @@ class Hg(FetchMethod):
|
||||
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
# If the checkout doesn't exist and the mirror tarball does, extract it
|
||||
if not os.path.exists(ud.pkgdir) and os.path.exists(ud.fullmirror):
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
runfetchcmd("tar -xzf %s" % (ud.fullmirror), d, workdir=ud.pkgdir)
|
||||
|
||||
if os.access(os.path.join(ud.moddir, '.hg'), os.R_OK):
|
||||
# Found the source, check whether need pull
|
||||
updatecmd = self._buildhgcommand(ud, d, "update")
|
||||
updatecmd = self._buildhgcommand(ud, d, "pull")
|
||||
logger.info("Update " + ud.url)
|
||||
# update sources there
|
||||
os.chdir(ud.moddir)
|
||||
logger.debug(1, "Running %s", updatecmd)
|
||||
try:
|
||||
runfetchcmd(updatecmd, d, workdir=ud.moddir)
|
||||
except bb.fetch2.FetchError:
|
||||
# Runnning pull in the repo
|
||||
pullcmd = self._buildhgcommand(ud, d, "pull")
|
||||
logger.info("Pulling " + ud.url)
|
||||
# update sources there
|
||||
logger.debug(1, "Running %s", pullcmd)
|
||||
bb.fetch2.check_network_access(d, pullcmd, ud.url)
|
||||
runfetchcmd(pullcmd, d, workdir=ud.moddir)
|
||||
try:
|
||||
os.unlink(ud.fullmirror)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
bb.fetch2.check_network_access(d, updatecmd, ud.url)
|
||||
runfetchcmd(updatecmd, d)
|
||||
|
||||
# No source found, clone it.
|
||||
if not os.path.exists(ud.moddir):
|
||||
else:
|
||||
fetchcmd = self._buildhgcommand(ud, d, "fetch")
|
||||
logger.info("Fetch " + ud.url)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
os.chdir(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", fetchcmd)
|
||||
bb.fetch2.check_network_access(d, fetchcmd, ud.url)
|
||||
runfetchcmd(fetchcmd, d, workdir=ud.pkgdir)
|
||||
runfetchcmd(fetchcmd, d)
|
||||
|
||||
# Even when we clone (fetch), we still need to update as hg's clone
|
||||
# won't checkout the specified revision if its on a branch
|
||||
updatecmd = self._buildhgcommand(ud, d, "update")
|
||||
os.chdir(ud.moddir)
|
||||
logger.debug(1, "Running %s", updatecmd)
|
||||
runfetchcmd(updatecmd, d, workdir=ud.moddir)
|
||||
runfetchcmd(updatecmd, d)
|
||||
|
||||
def clean(self, ud, d):
|
||||
""" Clean the hg dir """
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude '.hg' --exclude '.hgrags'"
|
||||
|
||||
bb.utils.remove(ud.localpath, True)
|
||||
bb.utils.remove(ud.fullmirror)
|
||||
bb.utils.remove(ud.fullmirror + ".done")
|
||||
os.chdir(ud.pkgdir)
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.module), d, cleanup = [ud.localpath])
|
||||
|
||||
def supports_srcrev(self):
|
||||
return True
|
||||
@@ -221,7 +176,7 @@ class Hg(FetchMethod):
|
||||
"""
|
||||
Compute tip revision for the url
|
||||
"""
|
||||
bb.fetch2.check_network_access(d, self._buildhgcommand(ud, d, "info"), ud.url)
|
||||
bb.fetch2.check_network_access(d, self._buildhgcommand(ud, d, "info"))
|
||||
output = runfetchcmd(self._buildhgcommand(ud, d, "info"), d)
|
||||
return output.strip()
|
||||
|
||||
@@ -233,38 +188,3 @@ class Hg(FetchMethod):
|
||||
Return a unique key for the url
|
||||
"""
|
||||
return "hg:" + ud.moddir
|
||||
|
||||
def build_mirror_data(self, ud, d):
|
||||
# Generate a mirror tarball if needed
|
||||
if ud.write_tarballs == "1" and not os.path.exists(ud.fullmirror):
|
||||
# it's possible that this symlink points to read-only filesystem with PREMIRROR
|
||||
if os.path.islink(ud.fullmirror):
|
||||
os.unlink(ud.fullmirror)
|
||||
|
||||
logger.info("Creating tarball of hg repository")
|
||||
runfetchcmd("tar -czf %s %s" % (ud.fullmirror, ud.module), d, workdir=ud.pkgdir)
|
||||
runfetchcmd("touch %s.done" % (ud.fullmirror), d, workdir=ud.pkgdir)
|
||||
|
||||
def localpath(self, ud, d):
|
||||
return ud.pkgdir
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
"""
|
||||
Make a local clone or export for the url
|
||||
"""
|
||||
|
||||
revflag = "-r %s" % ud.revision
|
||||
subdir = ud.parm.get("destsuffix", ud.module)
|
||||
codir = "%s/%s" % (destdir, subdir)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata != "nokeep":
|
||||
if not os.access(os.path.join(codir, '.hg'), os.R_OK):
|
||||
logger.debug(2, "Unpack: creating new hg repository in '" + codir + "'")
|
||||
runfetchcmd("%s init %s" % (ud.basecmd, codir), d)
|
||||
logger.debug(2, "Unpack: updating source in '" + codir + "'")
|
||||
runfetchcmd("%s pull %s" % (ud.basecmd, ud.moddir), d, workdir=codir)
|
||||
runfetchcmd("%s up -C %s" % (ud.basecmd, revflag), d, workdir=codir)
|
||||
else:
|
||||
logger.debug(2, "Unpack: extracting source to '" + codir + "'")
|
||||
runfetchcmd("%s archive -t files %s %s" % (ud.basecmd, revflag, codir), d, workdir=ud.moddir)
|
||||
|
||||
@@ -26,9 +26,10 @@ BitBake build tools.
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import urllib
|
||||
import bb
|
||||
import bb.utils
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod, FetchError
|
||||
from bb.fetch2 import logger
|
||||
|
||||
@@ -41,45 +42,38 @@ class Local(FetchMethod):
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
# We don't set localfile as for this fetcher the file is already local!
|
||||
ud.decodedurl = urllib.parse.unquote(ud.url.split("://")[1].split(";")[0])
|
||||
ud.decodedurl = urllib.unquote(ud.url.split("://")[1].split(";")[0])
|
||||
ud.basename = os.path.basename(ud.decodedurl)
|
||||
ud.basepath = ud.decodedurl
|
||||
ud.needdonestamp = False
|
||||
return
|
||||
|
||||
def localpath(self, urldata, d):
|
||||
"""
|
||||
Return the local filename of a given url assuming a successful fetch.
|
||||
"""
|
||||
return self.localpaths(urldata, d)[-1]
|
||||
|
||||
def localpaths(self, urldata, d):
|
||||
"""
|
||||
Return the local filename of a given url assuming a successful fetch.
|
||||
"""
|
||||
searched = []
|
||||
path = urldata.decodedurl
|
||||
newpath = path
|
||||
if path[0] == "/":
|
||||
return [path]
|
||||
filespath = d.getVar('FILESPATH')
|
||||
if filespath:
|
||||
logger.debug(2, "Searching for %s in paths:\n %s" % (path, "\n ".join(filespath.split(":"))))
|
||||
newpath, hist = bb.utils.which(filespath, path, history=True)
|
||||
searched.extend(hist)
|
||||
if (not newpath or not os.path.exists(newpath)) and path.find("*") != -1:
|
||||
# For expressions using '*', best we can do is take the first directory in FILESPATH that exists
|
||||
newpath, hist = bb.utils.which(filespath, ".", history=True)
|
||||
searched.extend(hist)
|
||||
logger.debug(2, "Searching for %s in path: %s" % (path, newpath))
|
||||
return searched
|
||||
if not os.path.exists(newpath):
|
||||
dldirfile = os.path.join(d.getVar("DL_DIR"), path)
|
||||
logger.debug(2, "Defaulting to %s for %s" % (dldirfile, path))
|
||||
bb.utils.mkdirhier(os.path.dirname(dldirfile))
|
||||
searched.append(dldirfile)
|
||||
return searched
|
||||
return searched
|
||||
if path[0] != "/":
|
||||
filespath = data.getVar('FILESPATH', d, True)
|
||||
if filespath:
|
||||
logger.debug(2, "Searching for %s in paths: \n%s" % (path, "\n ".join(filespath.split(":"))))
|
||||
newpath = bb.utils.which(filespath, path)
|
||||
if not newpath:
|
||||
filesdir = data.getVar('FILESDIR', d, True)
|
||||
if filesdir:
|
||||
logger.debug(2, "Searching for %s in path: %s" % (path, filesdir))
|
||||
newpath = os.path.join(filesdir, path)
|
||||
if (not newpath or not os.path.exists(newpath)) and path.find("*") != -1:
|
||||
# For expressions using '*', best we can do is take the first directory in FILESPATH that exists
|
||||
newpath = bb.utils.which(filespath, ".")
|
||||
logger.debug(2, "Searching for %s in path: %s" % (path, newpath))
|
||||
return newpath
|
||||
if not os.path.exists(newpath):
|
||||
dldirfile = os.path.join(d.getVar("DL_DIR", True), path)
|
||||
logger.debug(2, "Defaulting to %s for %s" % (dldirfile, path))
|
||||
bb.utils.mkdirhier(os.path.dirname(dldirfile))
|
||||
return dldirfile
|
||||
return newpath
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if ud.url.find("*") != -1:
|
||||
@@ -93,17 +87,20 @@ class Local(FetchMethod):
|
||||
# no need to fetch local files, we'll deal with them in place.
|
||||
if self.supports_checksum(urldata) and not os.path.exists(urldata.localpath):
|
||||
locations = []
|
||||
filespath = d.getVar('FILESPATH')
|
||||
filespath = data.getVar('FILESPATH', d, True)
|
||||
if filespath:
|
||||
locations = filespath.split(":")
|
||||
locations.append(d.getVar("DL_DIR"))
|
||||
filesdir = data.getVar('FILESDIR', d, True)
|
||||
if filesdir:
|
||||
locations.append(filesdir)
|
||||
locations.append(d.getVar("DL_DIR", True))
|
||||
|
||||
msg = "Unable to find file " + urldata.url + " anywhere. The paths that were searched were:\n " + "\n ".join(locations)
|
||||
raise FetchError(msg)
|
||||
|
||||
return True
|
||||
|
||||
def checkstatus(self, fetch, urldata, d):
|
||||
def checkstatus(self, urldata, d):
|
||||
"""
|
||||
Check the status of the url
|
||||
"""
|
||||
|
||||
@@ -1,309 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'Fetch' NPM implementation
|
||||
|
||||
The NPM fetcher is used to retrieve files from the npmjs repository
|
||||
|
||||
Usage in the recipe:
|
||||
|
||||
SRC_URI = "npm://registry.npmjs.org/;name=${PN};version=${PV}"
|
||||
Suported SRC_URI options are:
|
||||
|
||||
- name
|
||||
- version
|
||||
|
||||
npm://registry.npmjs.org/${PN}/-/${PN}-${PV}.tgz would become npm://registry.npmjs.org;name=${PN};version=${PV}
|
||||
The fetcher all triggers off the existence of ud.localpath. If that exists and has the ".done" stamp, its assumed the fetch is good/done
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import json
|
||||
import subprocess
|
||||
import signal
|
||||
import bb
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import ChecksumError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import UnpackError
|
||||
from bb.fetch2 import ParameterError
|
||||
from distutils import spawn
|
||||
|
||||
def subprocess_setup():
|
||||
# Python installs a SIGPIPE handler by default. This is usually not what
|
||||
# non-Python subprocesses expect.
|
||||
# SIGPIPE errors are known issues with gzip/bash
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||
|
||||
class Npm(FetchMethod):
|
||||
|
||||
"""Class to fetch urls via 'npm'"""
|
||||
def init(self, d):
|
||||
pass
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with npm
|
||||
"""
|
||||
return ud.type in ['npm']
|
||||
|
||||
def debug(self, msg):
|
||||
logger.debug(1, "NpmFetch: %s", msg)
|
||||
|
||||
def clean(self, ud, d):
|
||||
logger.debug(2, "Calling cleanup %s" % ud.pkgname)
|
||||
bb.utils.remove(ud.localpath, False)
|
||||
bb.utils.remove(ud.pkgdatadir, True)
|
||||
bb.utils.remove(ud.fullmirror, False)
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""
|
||||
init NPM specific variable within url data
|
||||
"""
|
||||
if 'downloadfilename' in ud.parm:
|
||||
ud.basename = ud.parm['downloadfilename']
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
# can't call it ud.name otherwise fetcher base class will start doing sha1stuff
|
||||
# TODO: find a way to get an sha1/sha256 manifest of pkg & all deps
|
||||
ud.pkgname = ud.parm.get("name", None)
|
||||
if not ud.pkgname:
|
||||
raise ParameterError("NPM fetcher requires a name parameter", ud.url)
|
||||
ud.version = ud.parm.get("version", None)
|
||||
if not ud.version:
|
||||
raise ParameterError("NPM fetcher requires a version parameter", ud.url)
|
||||
ud.bbnpmmanifest = "%s-%s.deps.json" % (ud.pkgname, ud.version)
|
||||
ud.bbnpmmanifest = ud.bbnpmmanifest.replace('/', '-')
|
||||
ud.registry = "http://%s" % (ud.url.replace('npm://', '', 1).split(';'))[0]
|
||||
prefixdir = "npm/%s" % ud.pkgname
|
||||
ud.pkgdatadir = d.expand("${DL_DIR}/%s" % prefixdir)
|
||||
if not os.path.exists(ud.pkgdatadir):
|
||||
bb.utils.mkdirhier(ud.pkgdatadir)
|
||||
ud.localpath = d.expand("${DL_DIR}/npm/%s" % ud.bbnpmmanifest)
|
||||
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -O -t 2 -T 30 -nv --passive-ftp --no-check-certificate "
|
||||
ud.prefixdir = prefixdir
|
||||
|
||||
ud.write_tarballs = ((d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0") != "0")
|
||||
mirrortarball = 'npm_%s-%s.tar.xz' % (ud.pkgname, ud.version)
|
||||
mirrortarball = mirrortarball.replace('/', '-')
|
||||
ud.fullmirror = os.path.join(d.getVar("DL_DIR"), mirrortarball)
|
||||
ud.mirrortarballs = [mirrortarball]
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if os.path.exists(ud.localpath):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _runwget(self, ud, d, command, quiet):
|
||||
logger.debug(2, "Fetching %s using command '%s'" % (ud.url, command))
|
||||
bb.fetch2.check_network_access(d, command, ud.url)
|
||||
dldir = d.getVar("DL_DIR")
|
||||
runfetchcmd(command, d, quiet, workdir=dldir)
|
||||
|
||||
def _unpackdep(self, ud, pkg, data, destdir, dldir, d):
|
||||
file = data[pkg]['tgz']
|
||||
logger.debug(2, "file to extract is %s" % file)
|
||||
if file.endswith('.tgz') or file.endswith('.tar.gz') or file.endswith('.tar.Z'):
|
||||
cmd = 'tar xz --strip 1 --no-same-owner --warning=no-unknown-keyword -f %s/%s' % (dldir, file)
|
||||
else:
|
||||
bb.fatal("NPM package %s downloaded not a tarball!" % file)
|
||||
|
||||
# Change to subdir before executing command
|
||||
if not os.path.exists(destdir):
|
||||
os.makedirs(destdir)
|
||||
path = d.getVar('PATH')
|
||||
if path:
|
||||
cmd = "PATH=\"%s\" %s" % (path, cmd)
|
||||
bb.note("Unpacking %s to %s/" % (file, destdir))
|
||||
ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True, cwd=destdir)
|
||||
|
||||
if ret != 0:
|
||||
raise UnpackError("Unpack command %s failed with return value %s" % (cmd, ret), ud.url)
|
||||
|
||||
if 'deps' not in data[pkg]:
|
||||
return
|
||||
for dep in data[pkg]['deps']:
|
||||
self._unpackdep(ud, dep, data[pkg]['deps'], "%s/node_modules/%s" % (destdir, dep), dldir, d)
|
||||
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
dldir = d.getVar("DL_DIR")
|
||||
with open("%s/npm/%s" % (dldir, ud.bbnpmmanifest)) as datafile:
|
||||
workobj = json.load(datafile)
|
||||
dldir = "%s/%s" % (os.path.dirname(ud.localpath), ud.pkgname)
|
||||
|
||||
if 'subdir' in ud.parm:
|
||||
unpackdir = '%s/%s' % (destdir, ud.parm.get('subdir'))
|
||||
else:
|
||||
unpackdir = '%s/npmpkg' % destdir
|
||||
|
||||
self._unpackdep(ud, ud.pkgname, workobj, unpackdir, dldir, d)
|
||||
|
||||
def _parse_view(self, output):
|
||||
'''
|
||||
Parse the output of npm view --json; the last JSON result
|
||||
is assumed to be the one that we're interested in.
|
||||
'''
|
||||
pdata = None
|
||||
outdeps = {}
|
||||
datalines = []
|
||||
bracelevel = 0
|
||||
for line in output.splitlines():
|
||||
if bracelevel:
|
||||
datalines.append(line)
|
||||
elif '{' in line:
|
||||
datalines = []
|
||||
datalines.append(line)
|
||||
bracelevel = bracelevel + line.count('{') - line.count('}')
|
||||
if datalines:
|
||||
pdata = json.loads('\n'.join(datalines))
|
||||
return pdata
|
||||
|
||||
def _getdependencies(self, pkg, data, version, d, ud, optional=False, fetchedlist=None):
|
||||
if fetchedlist is None:
|
||||
fetchedlist = []
|
||||
pkgfullname = pkg
|
||||
if version != '*' and not '/' in version:
|
||||
pkgfullname += "@'%s'" % version
|
||||
logger.debug(2, "Calling getdeps on %s" % pkg)
|
||||
fetchcmd = "npm view %s --json --registry %s" % (pkgfullname, ud.registry)
|
||||
output = runfetchcmd(fetchcmd, d, True)
|
||||
pdata = self._parse_view(output)
|
||||
if not pdata:
|
||||
raise FetchError("The command '%s' returned no output" % fetchcmd)
|
||||
if optional:
|
||||
pkg_os = pdata.get('os', None)
|
||||
if pkg_os:
|
||||
if not isinstance(pkg_os, list):
|
||||
pkg_os = [pkg_os]
|
||||
blacklist = False
|
||||
for item in pkg_os:
|
||||
if item.startswith('!'):
|
||||
blacklist = True
|
||||
break
|
||||
if (not blacklist and 'linux' not in pkg_os) or '!linux' in pkg_os:
|
||||
logger.debug(2, "Skipping %s since it's incompatible with Linux" % pkg)
|
||||
return
|
||||
#logger.debug(2, "Output URL is %s - %s - %s" % (ud.basepath, ud.basename, ud.localfile))
|
||||
outputurl = pdata['dist']['tarball']
|
||||
data[pkg] = {}
|
||||
data[pkg]['tgz'] = os.path.basename(outputurl)
|
||||
if outputurl in fetchedlist:
|
||||
return
|
||||
|
||||
self._runwget(ud, d, "%s --directory-prefix=%s %s" % (self.basecmd, ud.prefixdir, outputurl), False)
|
||||
fetchedlist.append(outputurl)
|
||||
|
||||
dependencies = pdata.get('dependencies', {})
|
||||
optionalDependencies = pdata.get('optionalDependencies', {})
|
||||
dependencies.update(optionalDependencies)
|
||||
depsfound = {}
|
||||
optdepsfound = {}
|
||||
data[pkg]['deps'] = {}
|
||||
for dep in dependencies:
|
||||
if dep in optionalDependencies:
|
||||
optdepsfound[dep] = dependencies[dep]
|
||||
else:
|
||||
depsfound[dep] = dependencies[dep]
|
||||
for dep, version in optdepsfound.items():
|
||||
self._getdependencies(dep, data[pkg]['deps'], version, d, ud, optional=True, fetchedlist=fetchedlist)
|
||||
for dep, version in depsfound.items():
|
||||
self._getdependencies(dep, data[pkg]['deps'], version, d, ud, fetchedlist=fetchedlist)
|
||||
|
||||
def _getshrinkeddependencies(self, pkg, data, version, d, ud, lockdown, manifest, toplevel=True):
|
||||
logger.debug(2, "NPM shrinkwrap file is %s" % data)
|
||||
if toplevel:
|
||||
name = data.get('name', None)
|
||||
if name and name != pkg:
|
||||
for obj in data.get('dependencies', []):
|
||||
if obj == pkg:
|
||||
self._getshrinkeddependencies(obj, data['dependencies'][obj], data['dependencies'][obj]['version'], d, ud, lockdown, manifest, False)
|
||||
return
|
||||
outputurl = "invalid"
|
||||
if ('resolved' not in data) or (not data['resolved'].startswith('http')):
|
||||
# will be the case for ${PN}
|
||||
fetchcmd = "npm view %s@%s dist.tarball --registry %s" % (pkg, version, ud.registry)
|
||||
logger.debug(2, "Found this matching URL: %s" % str(fetchcmd))
|
||||
outputurl = runfetchcmd(fetchcmd, d, True)
|
||||
else:
|
||||
outputurl = data['resolved']
|
||||
self._runwget(ud, d, "%s --directory-prefix=%s %s" % (self.basecmd, ud.prefixdir, outputurl), False)
|
||||
manifest[pkg] = {}
|
||||
manifest[pkg]['tgz'] = os.path.basename(outputurl).rstrip()
|
||||
manifest[pkg]['deps'] = {}
|
||||
|
||||
if pkg in lockdown:
|
||||
sha1_expected = lockdown[pkg][version]
|
||||
sha1_data = bb.utils.sha1_file("npm/%s/%s" % (ud.pkgname, manifest[pkg]['tgz']))
|
||||
if sha1_expected != sha1_data:
|
||||
msg = "\nFile: '%s' has %s checksum %s when %s was expected" % (manifest[pkg]['tgz'], 'sha1', sha1_data, sha1_expected)
|
||||
raise ChecksumError('Checksum mismatch!%s' % msg)
|
||||
else:
|
||||
logger.debug(2, "No lockdown data for %s@%s" % (pkg, version))
|
||||
|
||||
if 'dependencies' in data:
|
||||
for obj in data['dependencies']:
|
||||
logger.debug(2, "Found dep is %s" % str(obj))
|
||||
self._getshrinkeddependencies(obj, data['dependencies'][obj], data['dependencies'][obj]['version'], d, ud, lockdown, manifest[pkg]['deps'], False)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""Fetch url"""
|
||||
jsondepobj = {}
|
||||
shrinkobj = {}
|
||||
lockdown = {}
|
||||
|
||||
if not os.listdir(ud.pkgdatadir) and os.path.exists(ud.fullmirror):
|
||||
dest = d.getVar("DL_DIR")
|
||||
bb.utils.mkdirhier(dest)
|
||||
runfetchcmd("tar -xJf %s" % (ud.fullmirror), d, workdir=dest)
|
||||
return
|
||||
|
||||
if ud.parm.get("noverify", None) != '1':
|
||||
shwrf = d.getVar('NPM_SHRINKWRAP')
|
||||
logger.debug(2, "NPM shrinkwrap file is %s" % shwrf)
|
||||
if shwrf:
|
||||
try:
|
||||
with open(shwrf) as datafile:
|
||||
shrinkobj = json.load(datafile)
|
||||
except Exception as e:
|
||||
raise FetchError('Error loading NPM_SHRINKWRAP file "%s" for %s: %s' % (shwrf, ud.pkgname, str(e)))
|
||||
elif not ud.ignore_checksums:
|
||||
logger.warning('Missing shrinkwrap file in NPM_SHRINKWRAP for %s, this will lead to unreliable builds!' % ud.pkgname)
|
||||
lckdf = d.getVar('NPM_LOCKDOWN')
|
||||
logger.debug(2, "NPM lockdown file is %s" % lckdf)
|
||||
if lckdf:
|
||||
try:
|
||||
with open(lckdf) as datafile:
|
||||
lockdown = json.load(datafile)
|
||||
except Exception as e:
|
||||
raise FetchError('Error loading NPM_LOCKDOWN file "%s" for %s: %s' % (lckdf, ud.pkgname, str(e)))
|
||||
elif not ud.ignore_checksums:
|
||||
logger.warning('Missing lockdown file in NPM_LOCKDOWN for %s, this will lead to unreproducible builds!' % ud.pkgname)
|
||||
|
||||
if ('name' not in shrinkobj):
|
||||
self._getdependencies(ud.pkgname, jsondepobj, ud.version, d, ud)
|
||||
else:
|
||||
self._getshrinkeddependencies(ud.pkgname, shrinkobj, ud.version, d, ud, lockdown, jsondepobj)
|
||||
|
||||
with open(ud.localpath, 'w') as outfile:
|
||||
json.dump(jsondepobj, outfile)
|
||||
|
||||
def build_mirror_data(self, ud, d):
|
||||
# Generate a mirror tarball if needed
|
||||
if ud.write_tarballs and not os.path.exists(ud.fullmirror):
|
||||
# it's possible that this symlink points to read-only filesystem with PREMIRROR
|
||||
if os.path.islink(ud.fullmirror):
|
||||
os.unlink(ud.fullmirror)
|
||||
|
||||
dldir = d.getVar("DL_DIR")
|
||||
logger.info("Creating tarball of npm data")
|
||||
runfetchcmd("tar -cJf %s npm/%s npm/%s" % (ud.fullmirror, ud.bbnpmmanifest, ud.pkgname), d,
|
||||
workdir=dldir)
|
||||
runfetchcmd("touch %s.done" % (ud.fullmirror), d, workdir=dldir)
|
||||
@@ -10,6 +10,7 @@ import os
|
||||
import sys
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import MissingParameterError
|
||||
@@ -33,20 +34,20 @@ class Osc(FetchMethod):
|
||||
|
||||
# Create paths to osc checkouts
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(d.getVar('OSCDIR'), ud.host)
|
||||
ud.pkgdir = os.path.join(data.expand('${OSCDIR}', d), ud.host)
|
||||
ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module)
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
else:
|
||||
pv = d.getVar("PV", False)
|
||||
pv = data.getVar("PV", d, 0)
|
||||
rev = bb.fetch2.srcrev_internal_helper(ud, d)
|
||||
if rev and rev != True:
|
||||
ud.revision = rev
|
||||
else:
|
||||
ud.revision = ""
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision))
|
||||
ud.localfile = data.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision), d)
|
||||
|
||||
def _buildosccommand(self, ud, d, command):
|
||||
"""
|
||||
@@ -54,7 +55,7 @@ class Osc(FetchMethod):
|
||||
command is "fetch", "update", "info"
|
||||
"""
|
||||
|
||||
basecmd = d.expand('${FETCHCMD_osc}')
|
||||
basecmd = data.expand('${FETCHCMD_osc}', d)
|
||||
|
||||
proto = ud.parm.get('protocol', 'ocs')
|
||||
|
||||
@@ -83,25 +84,27 @@ class Osc(FetchMethod):
|
||||
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
if os.access(os.path.join(d.getVar('OSCDIR'), ud.path, ud.module), os.R_OK):
|
||||
if os.access(os.path.join(data.expand('${OSCDIR}', d), ud.path, ud.module), os.R_OK):
|
||||
oscupdatecmd = self._buildosccommand(ud, d, "update")
|
||||
logger.info("Update "+ ud.url)
|
||||
# update sources there
|
||||
os.chdir(ud.moddir)
|
||||
logger.debug(1, "Running %s", oscupdatecmd)
|
||||
bb.fetch2.check_network_access(d, oscupdatecmd, ud.url)
|
||||
runfetchcmd(oscupdatecmd, d, workdir=ud.moddir)
|
||||
runfetchcmd(oscupdatecmd, d)
|
||||
else:
|
||||
oscfetchcmd = self._buildosccommand(ud, d, "fetch")
|
||||
logger.info("Fetch " + ud.url)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
os.chdir(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", oscfetchcmd)
|
||||
bb.fetch2.check_network_access(d, oscfetchcmd, ud.url)
|
||||
runfetchcmd(oscfetchcmd, d, workdir=ud.pkgdir)
|
||||
runfetchcmd(oscfetchcmd, d)
|
||||
|
||||
os.chdir(os.path.join(ud.pkgdir + ud.path))
|
||||
# tar them up to a defined filename
|
||||
runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d,
|
||||
cleanup=[ud.localpath], workdir=os.path.join(ud.pkgdir + ud.path))
|
||||
runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d, cleanup = [ud.localpath])
|
||||
|
||||
def supports_srcrev(self):
|
||||
return False
|
||||
@@ -111,7 +114,7 @@ class Osc(FetchMethod):
|
||||
Generate a .oscrc to be used for this run.
|
||||
"""
|
||||
|
||||
config_path = os.path.join(d.getVar('OSCDIR'), "oscrc")
|
||||
config_path = os.path.join(data.expand('${OSCDIR}', d), "oscrc")
|
||||
if (os.path.exists(config_path)):
|
||||
os.remove(config_path)
|
||||
|
||||
@@ -120,8 +123,8 @@ class Osc(FetchMethod):
|
||||
f.write("apisrv = %s\n" % ud.host)
|
||||
f.write("scheme = http\n")
|
||||
f.write("su-wrapper = su -c\n")
|
||||
f.write("build-root = %s\n" % d.getVar('WORKDIR'))
|
||||
f.write("urllist = %s\n" % d.getVar("OSCURLLIST"))
|
||||
f.write("build-root = %s\n" % data.expand('${WORKDIR}', d))
|
||||
f.write("urllist = http://moblin-obs.jf.intel.com:8888/build/%(project)s/%(repository)s/%(buildarch)s/:full/%(name)s.rpm\n")
|
||||
f.write("extra-pkgs = gzip\n")
|
||||
f.write("\n")
|
||||
f.write("[%s]\n" % ud.host)
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'Fetch' implementation for perforce
|
||||
BitBake 'Fetch' implementations
|
||||
|
||||
Classes for obtaining upstream sources for the
|
||||
BitBake build tools.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
# Copyright (C) 2016 Kodak Alaris, Inc.
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
@@ -23,187 +25,170 @@ BitBake 'Fetch' implementation for perforce
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
from future_builtins import zip
|
||||
import os
|
||||
import subprocess
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
class Perforce(FetchMethod):
|
||||
""" Class to fetch from perforce repositories """
|
||||
def supports(self, ud, d):
|
||||
""" Check to see if a given url can be fetched with perforce. """
|
||||
return ud.type in ['p4']
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""
|
||||
Initialize perforce specific variables within url data. If P4CONFIG is
|
||||
provided by the env, use it. If P4PORT is specified by the recipe, use
|
||||
its values, which may override the settings in P4CONFIG.
|
||||
"""
|
||||
ud.basecmd = d.getVar('FETCHCMD_p4')
|
||||
if not ud.basecmd:
|
||||
ud.basecmd = "/usr/bin/env p4"
|
||||
|
||||
ud.dldir = d.getVar('P4DIR')
|
||||
if not ud.dldir:
|
||||
ud.dldir = '%s/%s' % (d.getVar('DL_DIR'), 'p4')
|
||||
|
||||
path = ud.url.split('://')[1]
|
||||
path = path.split(';')[0]
|
||||
delim = path.find('@');
|
||||
def doparse(url, d):
|
||||
parm = {}
|
||||
path = url.split("://")[1]
|
||||
delim = path.find("@");
|
||||
if delim != -1:
|
||||
(ud.user, ud.pswd) = path.split('@')[0].split(':')
|
||||
ud.path = path.split('@')[1]
|
||||
(user, pswd, host, port) = path.split('@')[0].split(":")
|
||||
path = path.split('@')[1]
|
||||
else:
|
||||
ud.path = path
|
||||
(host, port) = d.getVar('P4PORT').split(':')
|
||||
user = ""
|
||||
pswd = ""
|
||||
|
||||
ud.usingp4config = False
|
||||
p4port = d.getVar('P4PORT')
|
||||
if path.find(";") != -1:
|
||||
keys=[]
|
||||
values=[]
|
||||
plist = path.split(';')
|
||||
for item in plist:
|
||||
if item.count('='):
|
||||
(key, value) = item.split('=')
|
||||
keys.append(key)
|
||||
values.append(value)
|
||||
|
||||
if p4port:
|
||||
logger.debug(1, 'Using recipe provided P4PORT: %s' % p4port)
|
||||
ud.host = p4port
|
||||
else:
|
||||
logger.debug(1, 'Trying to use P4CONFIG to automatically set P4PORT...')
|
||||
ud.usingp4config = True
|
||||
p4cmd = '%s info | grep "Server address"' % ud.basecmd
|
||||
bb.fetch2.check_network_access(d, p4cmd, ud.url)
|
||||
ud.host = runfetchcmd(p4cmd, d, True)
|
||||
ud.host = ud.host.split(': ')[1].strip()
|
||||
logger.debug(1, 'Determined P4PORT to be: %s' % ud.host)
|
||||
if not ud.host:
|
||||
raise FetchError('Could not determine P4PORT from P4CONFIG')
|
||||
|
||||
if ud.path.find('/...') >= 0:
|
||||
ud.pathisdir = True
|
||||
else:
|
||||
ud.pathisdir = False
|
||||
parm = dict(zip(keys, values))
|
||||
path = "//" + path.split(';')[0]
|
||||
host += ":%s" % (port)
|
||||
parm["cset"] = Perforce.getcset(d, path, host, user, pswd, parm)
|
||||
|
||||
cleanedpath = ud.path.replace('/...', '').replace('/', '.')
|
||||
cleanedhost = ud.host.replace(':', '.')
|
||||
ud.pkgdir = os.path.join(ud.dldir, cleanedhost, cleanedpath)
|
||||
return host, path, user, pswd, parm
|
||||
doparse = staticmethod(doparse)
|
||||
|
||||
ud.setup_revisions(d)
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s.tar.gz' % (cleanedhost, cleanedpath, ud.revision))
|
||||
|
||||
def _buildp4command(self, ud, d, command, depot_filename=None):
|
||||
"""
|
||||
Build a p4 commandline. Valid commands are "changes", "print", and
|
||||
"files". depot_filename is the full path to the file in the depot
|
||||
including the trailing '#rev' value.
|
||||
"""
|
||||
def getcset(d, depot, host, user, pswd, parm):
|
||||
p4opt = ""
|
||||
if "cset" in parm:
|
||||
return parm["cset"];
|
||||
if user:
|
||||
p4opt += " -u %s" % (user)
|
||||
if pswd:
|
||||
p4opt += " -P %s" % (pswd)
|
||||
if host:
|
||||
p4opt += " -p %s" % (host)
|
||||
|
||||
if ud.user:
|
||||
p4opt += ' -u "%s"' % (ud.user)
|
||||
p4date = d.getVar("P4DATE", True)
|
||||
if "revision" in parm:
|
||||
depot += "#%s" % (parm["revision"])
|
||||
elif "label" in parm:
|
||||
depot += "@%s" % (parm["label"])
|
||||
elif p4date:
|
||||
depot += "@%s" % (p4date)
|
||||
|
||||
if ud.pswd:
|
||||
p4opt += ' -P "%s"' % (ud.pswd)
|
||||
p4cmd = d.getVar('FETCHCMD_p4', True) or "p4"
|
||||
logger.debug(1, "Running %s%s changes -m 1 %s", p4cmd, p4opt, depot)
|
||||
p4file, errors = bb.process.run("%s%s changes -m 1 %s" % (p4cmd, p4opt, depot))
|
||||
cset = p4file.strip()
|
||||
logger.debug(1, "READ %s", cset)
|
||||
if not cset:
|
||||
return -1
|
||||
|
||||
if ud.host and not ud.usingp4config:
|
||||
p4opt += ' -p %s' % (ud.host)
|
||||
return cset.split(' ')[1]
|
||||
getcset = staticmethod(getcset)
|
||||
|
||||
if hasattr(ud, 'revision') and ud.revision:
|
||||
pathnrev = '%s@%s' % (ud.path, ud.revision)
|
||||
else:
|
||||
pathnrev = '%s' % (ud.path)
|
||||
def urldata_init(self, ud, d):
|
||||
(host, path, user, pswd, parm) = Perforce.doparse(ud.url, d)
|
||||
|
||||
if depot_filename:
|
||||
if ud.pathisdir: # Remove leading path to obtain filename
|
||||
filename = depot_filename[len(ud.path)-1:]
|
||||
else:
|
||||
filename = depot_filename[depot_filename.rfind('/'):]
|
||||
filename = filename[:filename.find('#')] # Remove trailing '#rev'
|
||||
# If a label is specified, we use that as our filename
|
||||
|
||||
if command == 'changes':
|
||||
p4cmd = '%s%s changes -m 1 //%s' % (ud.basecmd, p4opt, pathnrev)
|
||||
elif command == 'print':
|
||||
if depot_filename != None:
|
||||
p4cmd = '%s%s print -o "p4/%s" "%s"' % (ud.basecmd, p4opt, filename, depot_filename)
|
||||
else:
|
||||
raise FetchError('No depot file name provided to p4 %s' % command, ud.url)
|
||||
elif command == 'files':
|
||||
p4cmd = '%s%s files //%s' % (ud.basecmd, p4opt, pathnrev)
|
||||
else:
|
||||
raise FetchError('Invalid p4 command %s' % command, ud.url)
|
||||
if "label" in parm:
|
||||
ud.localfile = "%s.tar.gz" % (parm["label"])
|
||||
return
|
||||
|
||||
return p4cmd
|
||||
base = path
|
||||
which = path.find('/...')
|
||||
if which != -1:
|
||||
base = path[:which-1]
|
||||
|
||||
def _p4listfiles(self, ud, d):
|
||||
"""
|
||||
Return a list of the file names which are present in the depot using the
|
||||
'p4 files' command, including trailing '#rev' file revision indicator
|
||||
"""
|
||||
p4cmd = self._buildp4command(ud, d, 'files')
|
||||
bb.fetch2.check_network_access(d, p4cmd, ud.url)
|
||||
p4fileslist = runfetchcmd(p4cmd, d, True)
|
||||
p4fileslist = [f.rstrip() for f in p4fileslist.splitlines()]
|
||||
base = self._strip_leading_slashes(base)
|
||||
|
||||
if not p4fileslist:
|
||||
raise FetchError('Unable to fetch listing of p4 files from %s@%s' % (ud.host, ud.path))
|
||||
cset = Perforce.getcset(d, path, host, user, pswd, parm)
|
||||
|
||||
count = 0
|
||||
filelist = []
|
||||
|
||||
for filename in p4fileslist:
|
||||
item = filename.split(' - ')
|
||||
lastaction = item[1].split()
|
||||
logger.debug(1, 'File: %s Last Action: %s' % (item[0], lastaction[0]))
|
||||
if lastaction[0] == 'delete':
|
||||
continue
|
||||
filelist.append(item[0])
|
||||
|
||||
return filelist
|
||||
ud.localfile = data.expand('%s+%s+%s.tar.gz' % (host, base.replace('/', '.'), cset), d)
|
||||
|
||||
def download(self, ud, d):
|
||||
""" Get the list of files, fetch each one """
|
||||
filelist = self._p4listfiles(ud, d)
|
||||
if not filelist:
|
||||
raise FetchError('No files found in depot %s@%s' % (ud.host, ud.path))
|
||||
"""
|
||||
Fetch urls
|
||||
"""
|
||||
|
||||
bb.utils.remove(ud.pkgdir, True)
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
(host, depot, user, pswd, parm) = Perforce.doparse(ud.url, d)
|
||||
|
||||
for afile in filelist:
|
||||
p4fetchcmd = self._buildp4command(ud, d, 'print', afile)
|
||||
bb.fetch2.check_network_access(d, p4fetchcmd, ud.url)
|
||||
runfetchcmd(p4fetchcmd, d, workdir=ud.pkgdir)
|
||||
if depot.find('/...') != -1:
|
||||
path = depot[:depot.find('/...')]
|
||||
else:
|
||||
path = depot
|
||||
|
||||
runfetchcmd('tar -czf %s p4' % (ud.localpath), d, cleanup=[ud.localpath], workdir=ud.pkgdir)
|
||||
module = parm.get('module', os.path.basename(path))
|
||||
|
||||
def clean(self, ud, d):
|
||||
""" Cleanup p4 specific files and dirs"""
|
||||
bb.utils.remove(ud.localpath)
|
||||
bb.utils.remove(ud.pkgdir, True)
|
||||
# Get the p4 command
|
||||
p4opt = ""
|
||||
if user:
|
||||
p4opt += " -u %s" % (user)
|
||||
|
||||
def supports_srcrev(self):
|
||||
return True
|
||||
if pswd:
|
||||
p4opt += " -P %s" % (pswd)
|
||||
|
||||
def _revision_key(self, ud, d, name):
|
||||
""" Return a unique key for the url """
|
||||
return 'p4:%s' % ud.pkgdir
|
||||
if host:
|
||||
p4opt += " -p %s" % (host)
|
||||
|
||||
def _latest_revision(self, ud, d, name):
|
||||
""" Return the latest upstream scm revision number """
|
||||
p4cmd = self._buildp4command(ud, d, "changes")
|
||||
bb.fetch2.check_network_access(d, p4cmd, ud.url)
|
||||
tip = runfetchcmd(p4cmd, d, True)
|
||||
p4cmd = d.getVar('FETCHCMD_p4', True) or "p4"
|
||||
|
||||
if not tip:
|
||||
raise FetchError('Could not determine the latest perforce changelist')
|
||||
# create temp directory
|
||||
logger.debug(2, "Fetch: creating temporary directory")
|
||||
bb.utils.mkdirhier(d.expand('${WORKDIR}'))
|
||||
mktemp = d.getVar("FETCHCMD_p4mktemp", True) or d.expand("mktemp -d -q '${WORKDIR}/oep4.XXXXXX'")
|
||||
tmpfile, errors = bb.process.run(mktemp)
|
||||
tmpfile = tmpfile.strip()
|
||||
if not tmpfile:
|
||||
raise FetchError("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.", ud.url)
|
||||
|
||||
tipcset = tip.split(' ')[1]
|
||||
logger.debug(1, 'p4 tip found to be changelist %s' % tipcset)
|
||||
return tipcset
|
||||
if "label" in parm:
|
||||
depot = "%s@%s" % (depot, parm["label"])
|
||||
else:
|
||||
cset = Perforce.getcset(d, depot, host, user, pswd, parm)
|
||||
depot = "%s@%s" % (depot, cset)
|
||||
|
||||
def sortable_revision(self, ud, d, name):
|
||||
""" Return a sortable revision number """
|
||||
return False, self._build_revision(ud, d)
|
||||
os.chdir(tmpfile)
|
||||
logger.info("Fetch " + ud.url)
|
||||
logger.info("%s%s files %s", p4cmd, p4opt, depot)
|
||||
p4file, errors = bb.process.run("%s%s files %s" % (p4cmd, p4opt, depot))
|
||||
p4file = [f.rstrip() for f in p4file.splitlines()]
|
||||
|
||||
def _build_revision(self, ud, d):
|
||||
return ud.revision
|
||||
if not p4file:
|
||||
raise FetchError("Fetch: unable to get the P4 files from %s" % depot, ud.url)
|
||||
|
||||
count = 0
|
||||
|
||||
for file in p4file:
|
||||
list = file.split()
|
||||
|
||||
if list[2] == "delete":
|
||||
continue
|
||||
|
||||
dest = list[0][len(path)+1:]
|
||||
where = dest.find("#")
|
||||
|
||||
subprocess.call("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module, dest[:where], list[0]), shell=True)
|
||||
count = count + 1
|
||||
|
||||
if count == 0:
|
||||
logger.error()
|
||||
raise FetchError("Fetch: No files gathered from the P4 fetch", ud.url)
|
||||
|
||||
runfetchcmd("tar -czf %s %s" % (ud.localpath, module), d, cleanup = [ud.localpath])
|
||||
# cleanup
|
||||
bb.utils.prunedir(tmpfile)
|
||||
|
||||
@@ -25,9 +25,9 @@ BitBake "Fetch" repo (git) implementation
|
||||
|
||||
import os
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
|
||||
class Repo(FetchMethod):
|
||||
"""Class to fetch a module or modules from repo (git) repositories"""
|
||||
@@ -51,17 +51,17 @@ class Repo(FetchMethod):
|
||||
if not ud.manifest.endswith('.xml'):
|
||||
ud.manifest += '.xml'
|
||||
|
||||
ud.localfile = d.expand("repo_%s%s_%s_%s.tar.gz" % (ud.host, ud.path.replace("/", "."), ud.manifest, ud.branch))
|
||||
ud.localfile = data.expand("repo_%s%s_%s_%s.tar.gz" % (ud.host, ud.path.replace("/", "."), ud.manifest, ud.branch), d)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
if os.access(os.path.join(d.getVar("DL_DIR"), ud.localfile), os.R_OK):
|
||||
if os.access(os.path.join(data.getVar("DL_DIR", d, True), ud.localfile), os.R_OK):
|
||||
logger.debug(1, "%s already exists (or was stashed). Skipping repo init / sync.", ud.localpath)
|
||||
return
|
||||
|
||||
gitsrcname = "%s%s" % (ud.host, ud.path.replace("/", "."))
|
||||
repodir = d.getVar("REPODIR") or os.path.join(d.getVar("DL_DIR"), "repo")
|
||||
repodir = data.getVar("REPODIR", d, True) or os.path.join(data.getVar("DL_DIR", d, True), "repo")
|
||||
codir = os.path.join(repodir, gitsrcname, ud.manifest)
|
||||
|
||||
if ud.user:
|
||||
@@ -69,23 +69,24 @@ class Repo(FetchMethod):
|
||||
else:
|
||||
username = ""
|
||||
|
||||
repodir = os.path.join(codir, "repo")
|
||||
bb.utils.mkdirhier(repodir)
|
||||
if not os.path.exists(os.path.join(repodir, ".repo")):
|
||||
bb.utils.mkdirhier(os.path.join(codir, "repo"))
|
||||
os.chdir(os.path.join(codir, "repo"))
|
||||
if not os.path.exists(os.path.join(codir, "repo", ".repo")):
|
||||
bb.fetch2.check_network_access(d, "repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), ud.url)
|
||||
runfetchcmd("repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), d, workdir=repodir)
|
||||
runfetchcmd("repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), d)
|
||||
|
||||
bb.fetch2.check_network_access(d, "repo sync %s" % ud.url, ud.url)
|
||||
runfetchcmd("repo sync", d, workdir=repodir)
|
||||
runfetchcmd("repo sync", d)
|
||||
os.chdir(codir)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude='.repo' --exclude='.git'"
|
||||
tar_flags = "--exclude '.repo' --exclude '.git'"
|
||||
|
||||
# Create a cache
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.join(".", "*") ), d, workdir=codir)
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.join(".", "*") ), d)
|
||||
|
||||
def supports_srcrev(self):
|
||||
return False
|
||||
|
||||
@@ -1,98 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'Fetch' implementation for Amazon AWS S3.
|
||||
|
||||
Class for fetching files from Amazon S3 using the AWS Command Line Interface.
|
||||
The aws tool must be correctly installed and configured prior to use.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2017, Andre McCurdy <armccurdy@gmail.com>
|
||||
#
|
||||
# Based in part on bb.fetch2.wget:
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import bb
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
class S3(FetchMethod):
|
||||
"""Class to fetch urls via 'aws s3'"""
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with s3.
|
||||
"""
|
||||
return ud.type in ['s3']
|
||||
|
||||
def recommends_checksum(self, urldata):
|
||||
return True
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
if 'downloadfilename' in ud.parm:
|
||||
ud.basename = ud.parm['downloadfilename']
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_s3") or "/usr/bin/env aws s3"
|
||||
|
||||
def download(self, ud, d):
|
||||
"""
|
||||
Fetch urls
|
||||
Assumes localpath was called first
|
||||
"""
|
||||
|
||||
cmd = '%s cp s3://%s%s %s' % (ud.basecmd, ud.host, ud.path, ud.localpath)
|
||||
bb.fetch2.check_network_access(d, cmd, ud.url)
|
||||
runfetchcmd(cmd, d)
|
||||
|
||||
# Additional sanity checks copied from the wget class (although there
|
||||
# are no known issues which mean these are required, treat the aws cli
|
||||
# tool with a little healthy suspicion).
|
||||
|
||||
if not os.path.exists(ud.localpath):
|
||||
raise FetchError("The aws cp command returned success for s3://%s%s but %s doesn't exist?!" % (ud.host, ud.path, ud.localpath))
|
||||
|
||||
if os.path.getsize(ud.localpath) == 0:
|
||||
os.remove(ud.localpath)
|
||||
raise FetchError("The aws cp command for s3://%s%s resulted in a zero size file?! Deleting and failing since this isn't right." % (ud.host, ud.path))
|
||||
|
||||
return True
|
||||
|
||||
def checkstatus(self, fetch, ud, d):
|
||||
"""
|
||||
Check the status of a URL
|
||||
"""
|
||||
|
||||
cmd = '%s ls s3://%s%s' % (ud.basecmd, ud.host, ud.path)
|
||||
bb.fetch2.check_network_access(d, cmd, ud.url)
|
||||
output = runfetchcmd(cmd, d)
|
||||
|
||||
# "aws s3 ls s3://mybucket/foo" will exit with success even if the file
|
||||
# is not found, so check output of the command to confirm success.
|
||||
|
||||
if not output:
|
||||
raise FetchError("The aws ls command for s3://%s%s gave empty output" % (ud.host, ud.path))
|
||||
|
||||
return True
|
||||
@@ -61,11 +61,14 @@ SRC_URI = "sftp://user@host.example.com/dir/path.file.txt"
|
||||
|
||||
import os
|
||||
import bb
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import urllib
|
||||
import commands
|
||||
from bb import data
|
||||
from bb.fetch2 import URI
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
|
||||
class SFTP(FetchMethod):
|
||||
"""Class to fetch urls via 'sftp'"""
|
||||
|
||||
@@ -90,19 +93,19 @@ class SFTP(FetchMethod):
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
ud.localfile = data.expand(urllib.unquote(ud.basename), d)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""Fetch urls"""
|
||||
|
||||
urlo = URI(ud.url)
|
||||
basecmd = 'sftp -oBatchMode=yes'
|
||||
basecmd = 'sftp -oPasswordAuthentication=no'
|
||||
port = ''
|
||||
if urlo.port:
|
||||
port = '-P %d' % urlo.port
|
||||
urlo.port = None
|
||||
|
||||
dldir = d.getVar('DL_DIR')
|
||||
dldir = data.getVar('DL_DIR', d, True)
|
||||
lpath = os.path.join(dldir, ud.localfile)
|
||||
|
||||
user = ''
|
||||
@@ -118,7 +121,8 @@ class SFTP(FetchMethod):
|
||||
|
||||
remote = '%s%s:%s' % (user, urlo.hostname, path)
|
||||
|
||||
cmd = '%s %s %s %s' % (basecmd, port, remote, lpath)
|
||||
cmd = '%s %s %s %s' % (basecmd, port, commands.mkarg(remote),
|
||||
commands.mkarg(lpath))
|
||||
|
||||
bb.fetch2.check_network_access(d, cmd, ud.url)
|
||||
runfetchcmd(cmd, d)
|
||||
|
||||
@@ -43,6 +43,7 @@ IETF secsh internet draft:
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import re, os
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
@@ -86,11 +87,10 @@ class SSH(FetchMethod):
|
||||
m = __pattern__.match(urldata.url)
|
||||
path = m.group('path')
|
||||
host = m.group('host')
|
||||
urldata.localpath = os.path.join(d.getVar('DL_DIR'),
|
||||
os.path.basename(os.path.normpath(path)))
|
||||
urldata.localpath = os.path.join(d.getVar('DL_DIR', True), os.path.basename(path))
|
||||
|
||||
def download(self, urldata, d):
|
||||
dldir = d.getVar('DL_DIR')
|
||||
dldir = d.getVar('DL_DIR', True)
|
||||
|
||||
m = __pattern__.match(urldata.url)
|
||||
path = m.group('path')
|
||||
@@ -113,10 +113,12 @@ class SSH(FetchMethod):
|
||||
fr = host
|
||||
fr += ':%s' % path
|
||||
|
||||
|
||||
import commands
|
||||
cmd = 'scp -B -r %s %s %s/' % (
|
||||
portarg,
|
||||
fr,
|
||||
dldir
|
||||
commands.mkarg(fr),
|
||||
commands.mkarg(dldir)
|
||||
)
|
||||
|
||||
bb.fetch2.check_network_access(d, cmd, urldata.url)
|
||||
|
||||
@@ -28,6 +28,7 @@ import sys
|
||||
import logging
|
||||
import bb
|
||||
import re
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import MissingParameterError
|
||||
@@ -49,26 +50,21 @@ class Svn(FetchMethod):
|
||||
if not "module" in ud.parm:
|
||||
raise MissingParameterError('module', ud.url)
|
||||
|
||||
ud.basecmd = d.getVar('FETCHCMD_svn')
|
||||
ud.basecmd = d.getVar('FETCHCMD_svn', True)
|
||||
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
if not "path_spec" in ud.parm:
|
||||
ud.path_spec = ud.module
|
||||
else:
|
||||
ud.path_spec = ud.parm["path_spec"]
|
||||
|
||||
# Create paths to svn checkouts
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(d.expand('${SVNDIR}'), ud.host, relpath)
|
||||
ud.pkgdir = os.path.join(data.expand('${SVNDIR}', d), ud.host, relpath)
|
||||
ud.moddir = os.path.join(ud.pkgdir, ud.module)
|
||||
|
||||
ud.setup_revisions(d)
|
||||
ud.setup_revisons(d)
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
|
||||
ud.localfile = d.expand('%s_%s_%s_%s_.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision))
|
||||
ud.localfile = data.expand('%s_%s_%s_%s_.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision), d)
|
||||
|
||||
def _buildsvncommand(self, ud, d, command):
|
||||
"""
|
||||
@@ -78,9 +74,9 @@ class Svn(FetchMethod):
|
||||
|
||||
proto = ud.parm.get('protocol', 'svn')
|
||||
|
||||
svn_ssh = None
|
||||
if proto == "svn+ssh" and "ssh" in ud.parm:
|
||||
svn_ssh = ud.parm["ssh"]
|
||||
svn_rsh = None
|
||||
if proto == "svn+ssh" and "rsh" in ud.parm:
|
||||
svn_rsh = ud.parm["rsh"]
|
||||
|
||||
svnroot = ud.host + ud.path
|
||||
|
||||
@@ -105,15 +101,14 @@ class Svn(FetchMethod):
|
||||
suffix = "@%s" % (ud.revision)
|
||||
|
||||
if command == "fetch":
|
||||
transportuser = ud.parm.get("transportuser", "")
|
||||
svncmd = "%s co %s %s://%s%s/%s%s %s" % (ud.basecmd, " ".join(options), proto, transportuser, svnroot, ud.module, suffix, ud.path_spec)
|
||||
svncmd = "%s co %s %s://%s/%s%s %s" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module, suffix, ud.module)
|
||||
elif command == "update":
|
||||
svncmd = "%s update %s" % (ud.basecmd, " ".join(options))
|
||||
else:
|
||||
raise FetchError("Invalid svn command %s" % command, ud.url)
|
||||
|
||||
if svn_ssh:
|
||||
svncmd = "SVN_SSH=\"%s\" %s" % (svn_ssh, svncmd)
|
||||
if svn_rsh:
|
||||
svncmd = "svn_RSH=\"%s\" %s" % (svn_rsh, svncmd)
|
||||
|
||||
return svncmd
|
||||
|
||||
@@ -125,32 +120,35 @@ class Svn(FetchMethod):
|
||||
if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK):
|
||||
svnupdatecmd = self._buildsvncommand(ud, d, "update")
|
||||
logger.info("Update " + ud.url)
|
||||
# update sources there
|
||||
os.chdir(ud.moddir)
|
||||
# We need to attempt to run svn upgrade first in case its an older working format
|
||||
try:
|
||||
runfetchcmd(ud.basecmd + " upgrade", d, workdir=ud.moddir)
|
||||
runfetchcmd(ud.basecmd + " upgrade", d)
|
||||
except FetchError:
|
||||
pass
|
||||
logger.debug(1, "Running %s", svnupdatecmd)
|
||||
bb.fetch2.check_network_access(d, svnupdatecmd, ud.url)
|
||||
runfetchcmd(svnupdatecmd, d, workdir=ud.moddir)
|
||||
runfetchcmd(svnupdatecmd, d)
|
||||
else:
|
||||
svnfetchcmd = self._buildsvncommand(ud, d, "fetch")
|
||||
logger.info("Fetch " + ud.url)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
os.chdir(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", svnfetchcmd)
|
||||
bb.fetch2.check_network_access(d, svnfetchcmd, ud.url)
|
||||
runfetchcmd(svnfetchcmd, d, workdir=ud.pkgdir)
|
||||
runfetchcmd(svnfetchcmd, d)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude='.svn'"
|
||||
tar_flags = "--exclude '.svn'"
|
||||
|
||||
os.chdir(ud.pkgdir)
|
||||
# tar them up to a defined filename
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.path_spec), d,
|
||||
cleanup=[ud.localpath], workdir=ud.pkgdir)
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.module), d, cleanup = [ud.localpath])
|
||||
|
||||
def clean(self, ud, d):
|
||||
""" Clean SVN specific files and dirs """
|
||||
@@ -172,7 +170,7 @@ class Svn(FetchMethod):
|
||||
"""
|
||||
Return the latest upstream revision number
|
||||
"""
|
||||
bb.fetch2.check_network_access(d, self._buildsvncommand(ud, d, "log1"), ud.url)
|
||||
bb.fetch2.check_network_access(d, self._buildsvncommand(ud, d, "log1"))
|
||||
|
||||
output = runfetchcmd("LANG=C LC_ALL=C " + self._buildsvncommand(ud, d, "log1"), d, True)
|
||||
|
||||
|
||||
@@ -25,43 +25,15 @@ BitBake build tools.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import re
|
||||
import tempfile
|
||||
import subprocess
|
||||
import os
|
||||
import logging
|
||||
import errno
|
||||
import bb
|
||||
import bb.progress
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import urllib
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.utils import export_proxies
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4 import SoupStrainer
|
||||
|
||||
class WgetProgressHandler(bb.progress.LineFilterProgressHandler):
|
||||
"""
|
||||
Extract progress information from wget output.
|
||||
Note: relies on --progress=dot (with -v or without -q/-nv) being
|
||||
specified on the wget command line.
|
||||
"""
|
||||
def __init__(self, d):
|
||||
super(WgetProgressHandler, self).__init__(d)
|
||||
# Send an initial progress event so the bar gets shown
|
||||
self._fire_progress(0)
|
||||
|
||||
def writeline(self, line):
|
||||
percs = re.findall(r'(\d+)%\s+([\d.]+[A-Z])', line)
|
||||
if percs:
|
||||
progress = int(percs[-1][0])
|
||||
rate = percs[-1][1] + '/s'
|
||||
self.update(progress, rate)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class Wget(FetchMethod):
|
||||
"""Class to fetch urls via 'wget'"""
|
||||
@@ -84,19 +56,15 @@ class Wget(FetchMethod):
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
if not ud.localfile:
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.host + ud.path).replace("/", "."))
|
||||
ud.localfile = data.expand(urllib.unquote(ud.basename), d)
|
||||
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 30 --passive-ftp --no-check-certificate"
|
||||
self.basecmd = d.getVar("FETCHCMD_wget", True) or "/usr/bin/env wget -t 2 -T 30 -nv --passive-ftp --no-check-certificate"
|
||||
|
||||
def _runwget(self, ud, d, command, quiet, workdir=None):
|
||||
|
||||
progresshandler = WgetProgressHandler(d)
|
||||
def _runwget(self, ud, d, command, quiet):
|
||||
|
||||
logger.debug(2, "Fetching %s using command '%s'" % (ud.url, command))
|
||||
bb.fetch2.check_network_access(d, command, ud.url)
|
||||
runfetchcmd(command + ' --progress=dot -v', d, quiet, log=progresshandler, workdir=workdir)
|
||||
bb.fetch2.check_network_access(d, command)
|
||||
runfetchcmd(command, d, quiet)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""Fetch urls"""
|
||||
@@ -104,13 +72,10 @@ class Wget(FetchMethod):
|
||||
fetchcmd = self.basecmd
|
||||
|
||||
if 'downloadfilename' in ud.parm:
|
||||
dldir = d.getVar("DL_DIR")
|
||||
dldir = d.getVar("DL_DIR", True)
|
||||
bb.utils.mkdirhier(os.path.dirname(dldir + os.sep + ud.localfile))
|
||||
fetchcmd += " -O " + dldir + os.sep + ud.localfile
|
||||
|
||||
if ud.user and ud.pswd:
|
||||
fetchcmd += " --user=%s --password=%s --auth-no-challenge" % (ud.user, ud.pswd)
|
||||
|
||||
uri = ud.url.split(";")[0]
|
||||
if os.path.exists(ud.localpath):
|
||||
# file exists, but we didnt complete it.. trying again..
|
||||
@@ -131,496 +96,11 @@ class Wget(FetchMethod):
|
||||
|
||||
return True
|
||||
|
||||
def checkstatus(self, fetch, ud, d, try_again=True):
|
||||
import urllib.request, urllib.error, urllib.parse, socket, http.client
|
||||
from urllib.response import addinfourl
|
||||
from bb.fetch2 import FetchConnectionCache
|
||||
def checkstatus(self, ud, d):
|
||||
|
||||
class HTTPConnectionCache(http.client.HTTPConnection):
|
||||
if fetch.connection_cache:
|
||||
def connect(self):
|
||||
"""Connect to the host and port specified in __init__."""
|
||||
uri = ud.url.split(";")[0]
|
||||
fetchcmd = self.basecmd + " --spider '%s'" % uri
|
||||
|
||||
sock = fetch.connection_cache.get_connection(self.host, self.port)
|
||||
if sock:
|
||||
self.sock = sock
|
||||
else:
|
||||
self.sock = socket.create_connection((self.host, self.port),
|
||||
self.timeout, self.source_address)
|
||||
fetch.connection_cache.add_connection(self.host, self.port, self.sock)
|
||||
self._runwget(ud, d, fetchcmd, True)
|
||||
|
||||
if self._tunnel_host:
|
||||
self._tunnel()
|
||||
|
||||
class CacheHTTPHandler(urllib.request.HTTPHandler):
|
||||
def http_open(self, req):
|
||||
return self.do_open(HTTPConnectionCache, req)
|
||||
|
||||
def do_open(self, http_class, req):
|
||||
"""Return an addinfourl object for the request, using http_class.
|
||||
|
||||
http_class must implement the HTTPConnection API from httplib.
|
||||
The addinfourl return value is a file-like object. It also
|
||||
has methods and attributes including:
|
||||
- info(): return a mimetools.Message object for the headers
|
||||
- geturl(): return the original request URL
|
||||
- code: HTTP status code
|
||||
"""
|
||||
host = req.host
|
||||
if not host:
|
||||
raise urlllib2.URLError('no host given')
|
||||
|
||||
h = http_class(host, timeout=req.timeout) # will parse host:port
|
||||
h.set_debuglevel(self._debuglevel)
|
||||
|
||||
headers = dict(req.unredirected_hdrs)
|
||||
headers.update(dict((k, v) for k, v in list(req.headers.items())
|
||||
if k not in headers))
|
||||
|
||||
# We want to make an HTTP/1.1 request, but the addinfourl
|
||||
# class isn't prepared to deal with a persistent connection.
|
||||
# It will try to read all remaining data from the socket,
|
||||
# which will block while the server waits for the next request.
|
||||
# So make sure the connection gets closed after the (only)
|
||||
# request.
|
||||
|
||||
# Don't close connection when connection_cache is enabled,
|
||||
if fetch.connection_cache is None:
|
||||
headers["Connection"] = "close"
|
||||
else:
|
||||
headers["Connection"] = "Keep-Alive" # Works for HTTP/1.0
|
||||
|
||||
headers = dict(
|
||||
(name.title(), val) for name, val in list(headers.items()))
|
||||
|
||||
if req._tunnel_host:
|
||||
tunnel_headers = {}
|
||||
proxy_auth_hdr = "Proxy-Authorization"
|
||||
if proxy_auth_hdr in headers:
|
||||
tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr]
|
||||
# Proxy-Authorization should not be sent to origin
|
||||
# server.
|
||||
del headers[proxy_auth_hdr]
|
||||
h.set_tunnel(req._tunnel_host, headers=tunnel_headers)
|
||||
|
||||
try:
|
||||
h.request(req.get_method(), req.selector, req.data, headers)
|
||||
except socket.error as err: # XXX what error?
|
||||
# Don't close connection when cache is enabled.
|
||||
# Instead, try to detect connections that are no longer
|
||||
# usable (for example, closed unexpectedly) and remove
|
||||
# them from the cache.
|
||||
if fetch.connection_cache is None:
|
||||
h.close()
|
||||
elif isinstance(err, OSError) and err.errno == errno.EBADF:
|
||||
# This happens when the server closes the connection despite the Keep-Alive.
|
||||
# Apparently urllib then uses the file descriptor, expecting it to be
|
||||
# connected, when in reality the connection is already gone.
|
||||
# We let the request fail and expect it to be
|
||||
# tried once more ("try_again" in check_status()),
|
||||
# with the dead connection removed from the cache.
|
||||
# If it still fails, we give up, which can happend for bad
|
||||
# HTTP proxy settings.
|
||||
fetch.connection_cache.remove_connection(h.host, h.port)
|
||||
raise urllib.error.URLError(err)
|
||||
else:
|
||||
try:
|
||||
r = h.getresponse(buffering=True)
|
||||
except TypeError: # buffering kw not supported
|
||||
r = h.getresponse()
|
||||
|
||||
# Pick apart the HTTPResponse object to get the addinfourl
|
||||
# object initialized properly.
|
||||
|
||||
# Wrap the HTTPResponse object in socket's file object adapter
|
||||
# for Windows. That adapter calls recv(), so delegate recv()
|
||||
# to read(). This weird wrapping allows the returned object to
|
||||
# have readline() and readlines() methods.
|
||||
|
||||
# XXX It might be better to extract the read buffering code
|
||||
# out of socket._fileobject() and into a base class.
|
||||
r.recv = r.read
|
||||
|
||||
# no data, just have to read
|
||||
r.read()
|
||||
class fp_dummy(object):
|
||||
def read(self):
|
||||
return ""
|
||||
def readline(self):
|
||||
return ""
|
||||
def close(self):
|
||||
pass
|
||||
closed = False
|
||||
|
||||
resp = addinfourl(fp_dummy(), r.msg, req.get_full_url())
|
||||
resp.code = r.status
|
||||
resp.msg = r.reason
|
||||
|
||||
# Close connection when server request it.
|
||||
if fetch.connection_cache is not None:
|
||||
if 'Connection' in r.msg and r.msg['Connection'] == 'close':
|
||||
fetch.connection_cache.remove_connection(h.host, h.port)
|
||||
|
||||
return resp
|
||||
|
||||
class HTTPMethodFallback(urllib.request.BaseHandler):
|
||||
"""
|
||||
Fallback to GET if HEAD is not allowed (405 HTTP error)
|
||||
"""
|
||||
def http_error_405(self, req, fp, code, msg, headers):
|
||||
fp.read()
|
||||
fp.close()
|
||||
|
||||
newheaders = dict((k,v) for k,v in list(req.headers.items())
|
||||
if k.lower() not in ("content-length", "content-type"))
|
||||
return self.parent.open(urllib.request.Request(req.get_full_url(),
|
||||
headers=newheaders,
|
||||
origin_req_host=req.origin_req_host,
|
||||
unverifiable=True))
|
||||
|
||||
"""
|
||||
Some servers (e.g. GitHub archives, hosted on Amazon S3) return 403
|
||||
Forbidden when they actually mean 405 Method Not Allowed.
|
||||
"""
|
||||
http_error_403 = http_error_405
|
||||
|
||||
|
||||
class FixedHTTPRedirectHandler(urllib.request.HTTPRedirectHandler):
|
||||
"""
|
||||
urllib2.HTTPRedirectHandler resets the method to GET on redirect,
|
||||
when we want to follow redirects using the original method.
|
||||
"""
|
||||
def redirect_request(self, req, fp, code, msg, headers, newurl):
|
||||
newreq = urllib.request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, headers, newurl)
|
||||
newreq.get_method = lambda: req.get_method()
|
||||
return newreq
|
||||
exported_proxies = export_proxies(d)
|
||||
|
||||
handlers = [FixedHTTPRedirectHandler, HTTPMethodFallback]
|
||||
if export_proxies:
|
||||
handlers.append(urllib.request.ProxyHandler())
|
||||
handlers.append(CacheHTTPHandler())
|
||||
# XXX: Since Python 2.7.9 ssl cert validation is enabled by default
|
||||
# see PEP-0476, this causes verification errors on some https servers
|
||||
# so disable by default.
|
||||
import ssl
|
||||
if hasattr(ssl, '_create_unverified_context'):
|
||||
handlers.append(urllib.request.HTTPSHandler(context=ssl._create_unverified_context()))
|
||||
opener = urllib.request.build_opener(*handlers)
|
||||
|
||||
try:
|
||||
uri = ud.url.split(";")[0]
|
||||
r = urllib.request.Request(uri)
|
||||
r.get_method = lambda: "HEAD"
|
||||
# Some servers (FusionForge, as used on Alioth) require that the
|
||||
# optional Accept header is set.
|
||||
r.add_header("Accept", "*/*")
|
||||
def add_basic_auth(login_str, request):
|
||||
'''Adds Basic auth to http request, pass in login:password as string'''
|
||||
import base64
|
||||
encodeuser = base64.b64encode(login_str.encode('utf-8')).decode("utf-8")
|
||||
authheader = "Basic %s" % encodeuser
|
||||
r.add_header("Authorization", authheader)
|
||||
|
||||
if ud.user:
|
||||
add_basic_auth(ud.user, r)
|
||||
|
||||
try:
|
||||
import netrc, urllib.parse
|
||||
n = netrc.netrc()
|
||||
login, unused, password = n.authenticators(urllib.parse.urlparse(uri).hostname)
|
||||
add_basic_auth("%s:%s" % (login, password), r)
|
||||
except (TypeError, ImportError, IOError, netrc.NetrcParseError):
|
||||
pass
|
||||
|
||||
with opener.open(r) as response:
|
||||
pass
|
||||
except urllib.error.URLError as e:
|
||||
if try_again:
|
||||
logger.debug(2, "checkstatus: trying again")
|
||||
return self.checkstatus(fetch, ud, d, False)
|
||||
else:
|
||||
# debug for now to avoid spamming the logs in e.g. remote sstate searches
|
||||
logger.debug(2, "checkstatus() urlopen failed: %s" % e)
|
||||
return False
|
||||
return True
|
||||
|
||||
def _parse_path(self, regex, s):
|
||||
"""
|
||||
Find and group name, version and archive type in the given string s
|
||||
"""
|
||||
|
||||
m = regex.search(s)
|
||||
if m:
|
||||
pname = ''
|
||||
pver = ''
|
||||
ptype = ''
|
||||
|
||||
mdict = m.groupdict()
|
||||
if 'name' in mdict.keys():
|
||||
pname = mdict['name']
|
||||
if 'pver' in mdict.keys():
|
||||
pver = mdict['pver']
|
||||
if 'type' in mdict.keys():
|
||||
ptype = mdict['type']
|
||||
|
||||
bb.debug(3, "_parse_path: %s, %s, %s" % (pname, pver, ptype))
|
||||
|
||||
return (pname, pver, ptype)
|
||||
|
||||
return None
|
||||
|
||||
def _modelate_version(self, version):
|
||||
if version[0] in ['.', '-']:
|
||||
if version[1].isdigit():
|
||||
version = version[1] + version[0] + version[2:len(version)]
|
||||
else:
|
||||
version = version[1:len(version)]
|
||||
|
||||
version = re.sub('-', '.', version)
|
||||
version = re.sub('_', '.', version)
|
||||
version = re.sub('(rc)+', '.1000.', version)
|
||||
version = re.sub('(beta)+', '.100.', version)
|
||||
version = re.sub('(alpha)+', '.10.', version)
|
||||
if version[0] == 'v':
|
||||
version = version[1:len(version)]
|
||||
return version
|
||||
|
||||
def _vercmp(self, old, new):
|
||||
"""
|
||||
Check whether 'new' is newer than 'old' version. We use existing vercmp() for the
|
||||
purpose. PE is cleared in comparison as it's not for build, and PR is cleared too
|
||||
for simplicity as it's somehow difficult to get from various upstream format
|
||||
"""
|
||||
|
||||
(oldpn, oldpv, oldsuffix) = old
|
||||
(newpn, newpv, newsuffix) = new
|
||||
|
||||
"""
|
||||
Check for a new suffix type that we have never heard of before
|
||||
"""
|
||||
if (newsuffix):
|
||||
m = self.suffix_regex_comp.search(newsuffix)
|
||||
if not m:
|
||||
bb.warn("%s has a possible unknown suffix: %s" % (newpn, newsuffix))
|
||||
return False
|
||||
|
||||
"""
|
||||
Not our package so ignore it
|
||||
"""
|
||||
if oldpn != newpn:
|
||||
return False
|
||||
|
||||
oldpv = self._modelate_version(oldpv)
|
||||
newpv = self._modelate_version(newpv)
|
||||
|
||||
return bb.utils.vercmp(("0", oldpv, ""), ("0", newpv, ""))
|
||||
|
||||
def _fetch_index(self, uri, ud, d):
|
||||
"""
|
||||
Run fetch checkstatus to get directory information
|
||||
"""
|
||||
f = tempfile.NamedTemporaryFile()
|
||||
with tempfile.TemporaryDirectory(prefix="wget-index-") as workdir, tempfile.NamedTemporaryFile(dir=workdir, prefix="wget-listing-") as f:
|
||||
agent = "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2.12) Gecko/20101027 Ubuntu/9.10 (karmic) Firefox/3.6.12"
|
||||
fetchcmd = self.basecmd
|
||||
fetchcmd += " -O " + f.name + " --user-agent='" + agent + "' '" + uri + "'"
|
||||
try:
|
||||
self._runwget(ud, d, fetchcmd, True, workdir=workdir)
|
||||
fetchresult = f.read()
|
||||
except bb.fetch2.BBFetchException:
|
||||
fetchresult = ""
|
||||
|
||||
return fetchresult
|
||||
|
||||
def _check_latest_version(self, url, package, package_regex, current_version, ud, d):
|
||||
"""
|
||||
Return the latest version of a package inside a given directory path
|
||||
If error or no version, return ""
|
||||
"""
|
||||
valid = 0
|
||||
version = ['', '', '']
|
||||
|
||||
bb.debug(3, "VersionURL: %s" % (url))
|
||||
soup = BeautifulSoup(self._fetch_index(url, ud, d), "html.parser", parse_only=SoupStrainer("a"))
|
||||
if not soup:
|
||||
bb.debug(3, "*** %s NO SOUP" % (url))
|
||||
return ""
|
||||
|
||||
for line in soup.find_all('a', href=True):
|
||||
bb.debug(3, "line['href'] = '%s'" % (line['href']))
|
||||
bb.debug(3, "line = '%s'" % (str(line)))
|
||||
|
||||
newver = self._parse_path(package_regex, line['href'])
|
||||
if not newver:
|
||||
newver = self._parse_path(package_regex, str(line))
|
||||
|
||||
if newver:
|
||||
bb.debug(3, "Upstream version found: %s" % newver[1])
|
||||
if valid == 0:
|
||||
version = newver
|
||||
valid = 1
|
||||
elif self._vercmp(version, newver) < 0:
|
||||
version = newver
|
||||
|
||||
pupver = re.sub('_', '.', version[1])
|
||||
|
||||
bb.debug(3, "*** %s -> UpstreamVersion = %s (CurrentVersion = %s)" %
|
||||
(package, pupver or "N/A", current_version[1]))
|
||||
|
||||
if valid:
|
||||
return pupver
|
||||
|
||||
return ""
|
||||
|
||||
def _check_latest_version_by_dir(self, dirver, package, package_regex,
|
||||
current_version, ud, d):
|
||||
"""
|
||||
Scan every directory in order to get upstream version.
|
||||
"""
|
||||
version_dir = ['', '', '']
|
||||
version = ['', '', '']
|
||||
|
||||
dirver_regex = re.compile("(?P<pfx>\D*)(?P<ver>(\d+[\.\-_])+(\d+))")
|
||||
s = dirver_regex.search(dirver)
|
||||
if s:
|
||||
version_dir[1] = s.group('ver')
|
||||
else:
|
||||
version_dir[1] = dirver
|
||||
|
||||
dirs_uri = bb.fetch.encodeurl([ud.type, ud.host,
|
||||
ud.path.split(dirver)[0], ud.user, ud.pswd, {}])
|
||||
bb.debug(3, "DirURL: %s, %s" % (dirs_uri, package))
|
||||
|
||||
soup = BeautifulSoup(self._fetch_index(dirs_uri, ud, d), "html.parser", parse_only=SoupStrainer("a"))
|
||||
if not soup:
|
||||
return version[1]
|
||||
|
||||
for line in soup.find_all('a', href=True):
|
||||
s = dirver_regex.search(line['href'].strip("/"))
|
||||
if s:
|
||||
sver = s.group('ver')
|
||||
|
||||
# When prefix is part of the version directory it need to
|
||||
# ensure that only version directory is used so remove previous
|
||||
# directories if exists.
|
||||
#
|
||||
# Example: pfx = '/dir1/dir2/v' and version = '2.5' the expected
|
||||
# result is v2.5.
|
||||
spfx = s.group('pfx').split('/')[-1]
|
||||
|
||||
version_dir_new = ['', sver, '']
|
||||
if self._vercmp(version_dir, version_dir_new) <= 0:
|
||||
dirver_new = spfx + sver
|
||||
path = ud.path.replace(dirver, dirver_new, True) \
|
||||
.split(package)[0]
|
||||
uri = bb.fetch.encodeurl([ud.type, ud.host, path,
|
||||
ud.user, ud.pswd, {}])
|
||||
|
||||
pupver = self._check_latest_version(uri,
|
||||
package, package_regex, current_version, ud, d)
|
||||
if pupver:
|
||||
version[1] = pupver
|
||||
|
||||
version_dir = version_dir_new
|
||||
|
||||
return version[1]
|
||||
|
||||
def _init_regexes(self, package, ud, d):
|
||||
"""
|
||||
Match as many patterns as possible such as:
|
||||
gnome-common-2.20.0.tar.gz (most common format)
|
||||
gtk+-2.90.1.tar.gz
|
||||
xf86-input-synaptics-12.6.9.tar.gz
|
||||
dri2proto-2.3.tar.gz
|
||||
blktool_4.orig.tar.gz
|
||||
libid3tag-0.15.1b.tar.gz
|
||||
unzip552.tar.gz
|
||||
icu4c-3_6-src.tgz
|
||||
genext2fs_1.3.orig.tar.gz
|
||||
gst-fluendo-mp3
|
||||
"""
|
||||
# match most patterns which uses "-" as separator to version digits
|
||||
pn_prefix1 = "[a-zA-Z][a-zA-Z0-9]*([-_][a-zA-Z]\w+)*\+?[-_]"
|
||||
# a loose pattern such as for unzip552.tar.gz
|
||||
pn_prefix2 = "[a-zA-Z]+"
|
||||
# a loose pattern such as for 80325-quicky-0.4.tar.gz
|
||||
pn_prefix3 = "[0-9]+[-]?[a-zA-Z]+"
|
||||
# Save the Package Name (pn) Regex for use later
|
||||
pn_regex = "(%s|%s|%s)" % (pn_prefix1, pn_prefix2, pn_prefix3)
|
||||
|
||||
# match version
|
||||
pver_regex = "(([A-Z]*\d+[a-zA-Z]*[\.\-_]*)+)"
|
||||
|
||||
# match arch
|
||||
parch_regex = "-source|_all_"
|
||||
|
||||
# src.rpm extension was added only for rpm package. Can be removed if the rpm
|
||||
# packaged will always be considered as having to be manually upgraded
|
||||
psuffix_regex = "(tar\.gz|tgz|tar\.bz2|zip|xz|tar\.lz|rpm|bz2|orig\.tar\.gz|tar\.xz|src\.tar\.gz|src\.tgz|svnr\d+\.tar\.bz2|stable\.tar\.gz|src\.rpm)"
|
||||
|
||||
# match name, version and archive type of a package
|
||||
package_regex_comp = re.compile("(?P<name>%s?\.?v?)(?P<pver>%s)(?P<arch>%s)?[\.-](?P<type>%s$)"
|
||||
% (pn_regex, pver_regex, parch_regex, psuffix_regex))
|
||||
self.suffix_regex_comp = re.compile(psuffix_regex)
|
||||
|
||||
# compile regex, can be specific by package or generic regex
|
||||
pn_regex = d.getVar('UPSTREAM_CHECK_REGEX')
|
||||
if pn_regex:
|
||||
package_custom_regex_comp = re.compile(pn_regex)
|
||||
else:
|
||||
version = self._parse_path(package_regex_comp, package)
|
||||
if version:
|
||||
package_custom_regex_comp = re.compile(
|
||||
"(?P<name>%s)(?P<pver>%s)(?P<arch>%s)?[\.-](?P<type>%s)" %
|
||||
(re.escape(version[0]), pver_regex, parch_regex, psuffix_regex))
|
||||
else:
|
||||
package_custom_regex_comp = None
|
||||
|
||||
return package_custom_regex_comp
|
||||
|
||||
def latest_versionstring(self, ud, d):
|
||||
"""
|
||||
Manipulate the URL and try to obtain the latest package version
|
||||
|
||||
sanity check to ensure same name and type.
|
||||
"""
|
||||
package = ud.path.split("/")[-1]
|
||||
current_version = ['', d.getVar('PV'), '']
|
||||
|
||||
"""possible to have no version in pkg name, such as spectrum-fw"""
|
||||
if not re.search("\d+", package):
|
||||
current_version[1] = re.sub('_', '.', current_version[1])
|
||||
current_version[1] = re.sub('-', '.', current_version[1])
|
||||
return (current_version[1], '')
|
||||
|
||||
package_regex = self._init_regexes(package, ud, d)
|
||||
if package_regex is None:
|
||||
bb.warn("latest_versionstring: package %s don't match pattern" % (package))
|
||||
return ('', '')
|
||||
bb.debug(3, "latest_versionstring, regex: %s" % (package_regex.pattern))
|
||||
|
||||
uri = ""
|
||||
regex_uri = d.getVar("UPSTREAM_CHECK_URI")
|
||||
if not regex_uri:
|
||||
path = ud.path.split(package)[0]
|
||||
|
||||
# search for version matches on folders inside the path, like:
|
||||
# "5.7" in http://download.gnome.org/sources/${PN}/5.7/${PN}-${PV}.tar.gz
|
||||
dirver_regex = re.compile("(?P<dirver>[^/]*(\d+\.)*\d+([-_]r\d+)*)/")
|
||||
m = dirver_regex.search(path)
|
||||
if m:
|
||||
pn = d.getVar('PN')
|
||||
dirver = m.group('dirver')
|
||||
|
||||
dirver_pn_regex = re.compile("%s\d?" % (re.escape(pn)))
|
||||
if not dirver_pn_regex.search(dirver):
|
||||
return (self._check_latest_version_by_dir(dirver,
|
||||
package, package_regex, current_version, ud, d), '')
|
||||
|
||||
uri = bb.fetch.encodeurl([ud.type, ud.host, path, ud.user, ud.pswd, {}])
|
||||
else:
|
||||
uri = regex_uri
|
||||
|
||||
return (self._check_latest_version(uri, package, package_regex,
|
||||
current_version, ud, d), '')
|
||||
|
||||
@@ -1,508 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
# Copyright (C) 2003, 2004 Phil Blundell
|
||||
# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer
|
||||
# Copyright (C) 2005 Holger Hans Peter Freyther
|
||||
# Copyright (C) 2005 ROAD GmbH
|
||||
# Copyright (C) 2006 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import optparse
|
||||
import warnings
|
||||
import fcntl
|
||||
import time
|
||||
import traceback
|
||||
|
||||
import bb
|
||||
from bb import event
|
||||
import bb.msg
|
||||
from bb import cooker
|
||||
from bb import ui
|
||||
from bb import server
|
||||
from bb import cookerdata
|
||||
|
||||
import bb.server.process
|
||||
import bb.server.xmlrpcclient
|
||||
|
||||
logger = logging.getLogger("BitBake")
|
||||
|
||||
class BBMainException(Exception):
|
||||
pass
|
||||
|
||||
class BBMainFatal(bb.BBHandledException):
|
||||
pass
|
||||
|
||||
def present_options(optionlist):
|
||||
if len(optionlist) > 1:
|
||||
return ' or '.join([', '.join(optionlist[:-1]), optionlist[-1]])
|
||||
else:
|
||||
return optionlist[0]
|
||||
|
||||
class BitbakeHelpFormatter(optparse.IndentedHelpFormatter):
|
||||
def format_option(self, option):
|
||||
# We need to do this here rather than in the text we supply to
|
||||
# add_option() because we don't want to call list_extension_modules()
|
||||
# on every execution (since it imports all of the modules)
|
||||
# Note also that we modify option.help rather than the returned text
|
||||
# - this is so that we don't have to re-format the text ourselves
|
||||
if option.dest == 'ui':
|
||||
valid_uis = list_extension_modules(bb.ui, 'main')
|
||||
option.help = option.help.replace('@CHOICES@', present_options(valid_uis))
|
||||
|
||||
return optparse.IndentedHelpFormatter.format_option(self, option)
|
||||
|
||||
def list_extension_modules(pkg, checkattr):
|
||||
"""
|
||||
Lists extension modules in a specific Python package
|
||||
(e.g. UIs, servers). NOTE: Calling this function will import all of the
|
||||
submodules of the specified module in order to check for the specified
|
||||
attribute; this can have unusual side-effects. As a result, this should
|
||||
only be called when displaying help text or error messages.
|
||||
Parameters:
|
||||
pkg: previously imported Python package to list
|
||||
checkattr: attribute to look for in module to determine if it's valid
|
||||
as the type of extension you are looking for
|
||||
"""
|
||||
import pkgutil
|
||||
pkgdir = os.path.dirname(pkg.__file__)
|
||||
|
||||
modules = []
|
||||
for _, modulename, _ in pkgutil.iter_modules([pkgdir]):
|
||||
if os.path.isdir(os.path.join(pkgdir, modulename)):
|
||||
# ignore directories
|
||||
continue
|
||||
try:
|
||||
module = __import__(pkg.__name__, fromlist=[modulename])
|
||||
except:
|
||||
# If we can't import it, it's not valid
|
||||
continue
|
||||
module_if = getattr(module, modulename)
|
||||
if getattr(module_if, 'hidden_extension', False):
|
||||
continue
|
||||
if not checkattr or hasattr(module_if, checkattr):
|
||||
modules.append(modulename)
|
||||
return modules
|
||||
|
||||
def import_extension_module(pkg, modulename, checkattr):
|
||||
try:
|
||||
# Dynamically load the UI based on the ui name. Although we
|
||||
# suggest a fixed set this allows you to have flexibility in which
|
||||
# ones are available.
|
||||
module = __import__(pkg.__name__, fromlist=[modulename])
|
||||
return getattr(module, modulename)
|
||||
except AttributeError:
|
||||
modules = present_options(list_extension_modules(pkg, checkattr))
|
||||
raise BBMainException('FATAL: Unable to import extension module "%s" from %s. '
|
||||
'Valid extension modules: %s' % (modulename, pkg.__name__, modules))
|
||||
|
||||
# Display bitbake/OE warnings via the BitBake.Warnings logger, ignoring others"""
|
||||
warnlog = logging.getLogger("BitBake.Warnings")
|
||||
_warnings_showwarning = warnings.showwarning
|
||||
def _showwarning(message, category, filename, lineno, file=None, line=None):
|
||||
if file is not None:
|
||||
if _warnings_showwarning is not None:
|
||||
_warnings_showwarning(message, category, filename, lineno, file, line)
|
||||
else:
|
||||
s = warnings.formatwarning(message, category, filename, lineno)
|
||||
warnlog.warning(s)
|
||||
|
||||
warnings.showwarning = _showwarning
|
||||
warnings.filterwarnings("ignore")
|
||||
warnings.filterwarnings("default", module="(<string>$|(oe|bb)\.)")
|
||||
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
|
||||
warnings.filterwarnings("ignore", category=ImportWarning)
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning, module="<string>$")
|
||||
warnings.filterwarnings("ignore", message="With-statements now directly support multiple context managers")
|
||||
|
||||
class BitBakeConfigParameters(cookerdata.ConfigParameters):
|
||||
|
||||
def parseCommandLine(self, argv=sys.argv):
|
||||
parser = optparse.OptionParser(
|
||||
formatter=BitbakeHelpFormatter(),
|
||||
version="BitBake Build Tool Core version %s" % bb.__version__,
|
||||
usage="""%prog [options] [recipename/target recipe:do_task ...]
|
||||
|
||||
Executes the specified task (default is 'build') for a given set of target recipes (.bb files).
|
||||
It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH which
|
||||
will provide the layer, BBFILES and other configuration information.""")
|
||||
|
||||
parser.add_option("-b", "--buildfile", action="store", dest="buildfile", default=None,
|
||||
help="Execute tasks from a specific .bb recipe directly. WARNING: Does "
|
||||
"not handle any dependencies from other recipes.")
|
||||
|
||||
parser.add_option("-k", "--continue", action="store_false", dest="abort", default=True,
|
||||
help="Continue as much as possible after an error. While the target that "
|
||||
"failed and anything depending on it cannot be built, as much as "
|
||||
"possible will be built before stopping.")
|
||||
|
||||
parser.add_option("-f", "--force", action="store_true", dest="force", default=False,
|
||||
help="Force the specified targets/task to run (invalidating any "
|
||||
"existing stamp file).")
|
||||
|
||||
parser.add_option("-c", "--cmd", action="store", dest="cmd",
|
||||
help="Specify the task to execute. The exact options available "
|
||||
"depend on the metadata. Some examples might be 'compile'"
|
||||
" or 'populate_sysroot' or 'listtasks' may give a list of "
|
||||
"the tasks available.")
|
||||
|
||||
parser.add_option("-C", "--clear-stamp", action="store", dest="invalidate_stamp",
|
||||
help="Invalidate the stamp for the specified task such as 'compile' "
|
||||
"and then run the default task for the specified target(s).")
|
||||
|
||||
parser.add_option("-r", "--read", action="append", dest="prefile", default=[],
|
||||
help="Read the specified file before bitbake.conf.")
|
||||
|
||||
parser.add_option("-R", "--postread", action="append", dest="postfile", default=[],
|
||||
help="Read the specified file after bitbake.conf.")
|
||||
|
||||
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False,
|
||||
help="Enable tracing of shell tasks (with 'set -x'). "
|
||||
"Also print bb.note(...) messages to stdout (in "
|
||||
"addition to writing them to ${T}/log.do_<task>).")
|
||||
|
||||
parser.add_option("-D", "--debug", action="count", dest="debug", default=0,
|
||||
help="Increase the debug level. You can specify this "
|
||||
"more than once. -D sets the debug level to 1, "
|
||||
"where only bb.debug(1, ...) messages are printed "
|
||||
"to stdout; -DD sets the debug level to 2, where "
|
||||
"both bb.debug(1, ...) and bb.debug(2, ...) "
|
||||
"messages are printed; etc. Without -D, no debug "
|
||||
"messages are printed. Note that -D only affects "
|
||||
"output to stdout. All debug messages are written "
|
||||
"to ${T}/log.do_taskname, regardless of the debug "
|
||||
"level.")
|
||||
|
||||
parser.add_option("-q", "--quiet", action="count", dest="quiet", default=0,
|
||||
help="Output less log message data to the terminal. You can specify this more than once.")
|
||||
|
||||
parser.add_option("-n", "--dry-run", action="store_true", dest="dry_run", default=False,
|
||||
help="Don't execute, just go through the motions.")
|
||||
|
||||
parser.add_option("-S", "--dump-signatures", action="append", dest="dump_signatures",
|
||||
default=[], metavar="SIGNATURE_HANDLER",
|
||||
help="Dump out the signature construction information, with no task "
|
||||
"execution. The SIGNATURE_HANDLER parameter is passed to the "
|
||||
"handler. Two common values are none and printdiff but the handler "
|
||||
"may define more/less. none means only dump the signature, printdiff"
|
||||
" means compare the dumped signature with the cached one.")
|
||||
|
||||
parser.add_option("-p", "--parse-only", action="store_true",
|
||||
dest="parse_only", default=False,
|
||||
help="Quit after parsing the BB recipes.")
|
||||
|
||||
parser.add_option("-s", "--show-versions", action="store_true",
|
||||
dest="show_versions", default=False,
|
||||
help="Show current and preferred versions of all recipes.")
|
||||
|
||||
parser.add_option("-e", "--environment", action="store_true",
|
||||
dest="show_environment", default=False,
|
||||
help="Show the global or per-recipe environment complete with information"
|
||||
" about where variables were set/changed.")
|
||||
|
||||
parser.add_option("-g", "--graphviz", action="store_true", dest="dot_graph", default=False,
|
||||
help="Save dependency tree information for the specified "
|
||||
"targets in the dot syntax.")
|
||||
|
||||
parser.add_option("-I", "--ignore-deps", action="append",
|
||||
dest="extra_assume_provided", default=[],
|
||||
help="Assume these dependencies don't exist and are already provided "
|
||||
"(equivalent to ASSUME_PROVIDED). Useful to make dependency "
|
||||
"graphs more appealing")
|
||||
|
||||
parser.add_option("-l", "--log-domains", action="append", dest="debug_domains", default=[],
|
||||
help="Show debug logging for the specified logging domains")
|
||||
|
||||
parser.add_option("-P", "--profile", action="store_true", dest="profile", default=False,
|
||||
help="Profile the command and save reports.")
|
||||
|
||||
# @CHOICES@ is substituted out by BitbakeHelpFormatter above
|
||||
parser.add_option("-u", "--ui", action="store", dest="ui",
|
||||
default=os.environ.get('BITBAKE_UI', 'knotty'),
|
||||
help="The user interface to use (@CHOICES@ - default %default).")
|
||||
|
||||
parser.add_option("", "--token", action="store", dest="xmlrpctoken",
|
||||
default=os.environ.get("BBTOKEN"),
|
||||
help="Specify the connection token to be used when connecting "
|
||||
"to a remote server.")
|
||||
|
||||
parser.add_option("", "--revisions-changed", action="store_true",
|
||||
dest="revisions_changed", default=False,
|
||||
help="Set the exit code depending on whether upstream floating "
|
||||
"revisions have changed or not.")
|
||||
|
||||
parser.add_option("", "--server-only", action="store_true",
|
||||
dest="server_only", default=False,
|
||||
help="Run bitbake without a UI, only starting a server "
|
||||
"(cooker) process.")
|
||||
|
||||
parser.add_option("-B", "--bind", action="store", dest="bind", default=False,
|
||||
help="The name/address for the bitbake xmlrpc server to bind to.")
|
||||
|
||||
parser.add_option("-T", "--idle-timeout", type=float, dest="server_timeout",
|
||||
default=os.getenv("BB_SERVER_TIMEOUT"),
|
||||
help="Set timeout to unload bitbake server due to inactivity, "
|
||||
"set to -1 means no unload, "
|
||||
"default: Environment variable BB_SERVER_TIMEOUT.")
|
||||
|
||||
parser.add_option("", "--no-setscene", action="store_true",
|
||||
dest="nosetscene", default=False,
|
||||
help="Do not run any setscene tasks. sstate will be ignored and "
|
||||
"everything needed, built.")
|
||||
|
||||
parser.add_option("", "--setscene-only", action="store_true",
|
||||
dest="setsceneonly", default=False,
|
||||
help="Only run setscene tasks, don't run any real tasks.")
|
||||
|
||||
parser.add_option("", "--remote-server", action="store", dest="remote_server",
|
||||
default=os.environ.get("BBSERVER"),
|
||||
help="Connect to the specified server.")
|
||||
|
||||
parser.add_option("-m", "--kill-server", action="store_true",
|
||||
dest="kill_server", default=False,
|
||||
help="Terminate any running bitbake server.")
|
||||
|
||||
parser.add_option("", "--observe-only", action="store_true",
|
||||
dest="observe_only", default=False,
|
||||
help="Connect to a server as an observing-only client.")
|
||||
|
||||
parser.add_option("", "--status-only", action="store_true",
|
||||
dest="status_only", default=False,
|
||||
help="Check the status of the remote bitbake server.")
|
||||
|
||||
parser.add_option("-w", "--write-log", action="store", dest="writeeventlog",
|
||||
default=os.environ.get("BBEVENTLOG"),
|
||||
help="Writes the event log of the build to a bitbake event json file. "
|
||||
"Use '' (empty string) to assign the name automatically.")
|
||||
|
||||
parser.add_option("", "--runall", action="append", dest="runall",
|
||||
help="Run the specified task for any recipe in the taskgraph of the specified target (even if it wouldn't otherwise have run).")
|
||||
|
||||
parser.add_option("", "--runonly", action="append", dest="runonly",
|
||||
help="Run only the specified task within the taskgraph of the specified targets (and any task dependencies those tasks may have).")
|
||||
|
||||
|
||||
options, targets = parser.parse_args(argv)
|
||||
|
||||
if options.quiet and options.verbose:
|
||||
parser.error("options --quiet and --verbose are mutually exclusive")
|
||||
|
||||
if options.quiet and options.debug:
|
||||
parser.error("options --quiet and --debug are mutually exclusive")
|
||||
|
||||
# use configuration files from environment variables
|
||||
if "BBPRECONF" in os.environ:
|
||||
options.prefile.append(os.environ["BBPRECONF"])
|
||||
|
||||
if "BBPOSTCONF" in os.environ:
|
||||
options.postfile.append(os.environ["BBPOSTCONF"])
|
||||
|
||||
# fill in proper log name if not supplied
|
||||
if options.writeeventlog is not None and len(options.writeeventlog) == 0:
|
||||
from datetime import datetime
|
||||
eventlog = "bitbake_eventlog_%s.json" % datetime.now().strftime("%Y%m%d%H%M%S")
|
||||
options.writeeventlog = eventlog
|
||||
|
||||
if options.bind:
|
||||
try:
|
||||
#Checking that the port is a number and is a ':' delimited value
|
||||
(host, port) = options.bind.split(':')
|
||||
port = int(port)
|
||||
except (ValueError,IndexError):
|
||||
raise BBMainException("FATAL: Malformed host:port bind parameter")
|
||||
options.xmlrpcinterface = (host, port)
|
||||
else:
|
||||
options.xmlrpcinterface = (None, 0)
|
||||
|
||||
return options, targets[1:]
|
||||
|
||||
|
||||
def bitbake_main(configParams, configuration):
|
||||
|
||||
# Python multiprocessing requires /dev/shm on Linux
|
||||
if sys.platform.startswith('linux') and not os.access('/dev/shm', os.W_OK | os.X_OK):
|
||||
raise BBMainException("FATAL: /dev/shm does not exist or is not writable")
|
||||
|
||||
# Unbuffer stdout to avoid log truncation in the event
|
||||
# of an unorderly exit as well as to provide timely
|
||||
# updates to log files for use with tail
|
||||
try:
|
||||
if sys.stdout.name == '<stdout>':
|
||||
# Reopen with O_SYNC (unbuffered)
|
||||
fl = fcntl.fcntl(sys.stdout.fileno(), fcntl.F_GETFL)
|
||||
fl |= os.O_SYNC
|
||||
fcntl.fcntl(sys.stdout.fileno(), fcntl.F_SETFL, fl)
|
||||
except:
|
||||
pass
|
||||
|
||||
configuration.setConfigParameters(configParams)
|
||||
|
||||
if configParams.server_only and configParams.remote_server:
|
||||
raise BBMainException("FATAL: The '--server-only' option conflicts with %s.\n" %
|
||||
("the BBSERVER environment variable" if "BBSERVER" in os.environ \
|
||||
else "the '--remote-server' option"))
|
||||
|
||||
if configParams.observe_only and not (configParams.remote_server or configParams.bind):
|
||||
raise BBMainException("FATAL: '--observe-only' can only be used by UI clients "
|
||||
"connecting to a server.\n")
|
||||
|
||||
if "BBDEBUG" in os.environ:
|
||||
level = int(os.environ["BBDEBUG"])
|
||||
if level > configuration.debug:
|
||||
configuration.debug = level
|
||||
|
||||
bb.msg.init_msgconfig(configParams.verbose, configuration.debug,
|
||||
configuration.debug_domains)
|
||||
|
||||
server_connection, ui_module = setup_bitbake(configParams, configuration)
|
||||
# No server connection
|
||||
if server_connection is None:
|
||||
if configParams.status_only:
|
||||
return 1
|
||||
if configParams.kill_server:
|
||||
return 0
|
||||
|
||||
if not configParams.server_only:
|
||||
if configParams.status_only:
|
||||
server_connection.terminate()
|
||||
return 0
|
||||
|
||||
try:
|
||||
for event in bb.event.ui_queue:
|
||||
server_connection.events.queue_event(event)
|
||||
bb.event.ui_queue = []
|
||||
|
||||
return ui_module.main(server_connection.connection, server_connection.events,
|
||||
configParams)
|
||||
finally:
|
||||
server_connection.terminate()
|
||||
else:
|
||||
return 0
|
||||
|
||||
return 1
|
||||
|
||||
def setup_bitbake(configParams, configuration, extrafeatures=None):
|
||||
# Ensure logging messages get sent to the UI as events
|
||||
handler = bb.event.LogHandler()
|
||||
if not configParams.status_only:
|
||||
# In status only mode there are no logs and no UI
|
||||
logger.addHandler(handler)
|
||||
|
||||
# Clear away any spurious environment variables while we stoke up the cooker
|
||||
cleanedvars = bb.utils.clean_environment()
|
||||
|
||||
if configParams.server_only:
|
||||
featureset = []
|
||||
ui_module = None
|
||||
else:
|
||||
ui_module = import_extension_module(bb.ui, configParams.ui, 'main')
|
||||
# Collect the feature set for the UI
|
||||
featureset = getattr(ui_module, "featureSet", [])
|
||||
|
||||
if extrafeatures:
|
||||
for feature in extrafeatures:
|
||||
if not feature in featureset:
|
||||
featureset.append(feature)
|
||||
|
||||
server_connection = None
|
||||
|
||||
if configParams.remote_server:
|
||||
# Connect to a remote XMLRPC server
|
||||
server_connection = bb.server.xmlrpcclient.connectXMLRPC(configParams.remote_server, featureset,
|
||||
configParams.observe_only, configParams.xmlrpctoken)
|
||||
else:
|
||||
retries = 8
|
||||
while retries:
|
||||
try:
|
||||
topdir, lock = lockBitbake()
|
||||
sockname = topdir + "/bitbake.sock"
|
||||
if lock:
|
||||
if configParams.status_only or configParams.kill_server:
|
||||
logger.info("bitbake server is not running.")
|
||||
lock.close()
|
||||
return None, None
|
||||
# we start a server with a given configuration
|
||||
logger.info("Starting bitbake server...")
|
||||
# Clear the event queue since we already displayed messages
|
||||
bb.event.ui_queue = []
|
||||
server = bb.server.process.BitBakeServer(lock, sockname, configuration, featureset)
|
||||
|
||||
else:
|
||||
logger.info("Reconnecting to bitbake server...")
|
||||
if not os.path.exists(sockname):
|
||||
print("Previous bitbake instance shutting down?, waiting to retry...")
|
||||
i = 0
|
||||
lock = None
|
||||
# Wait for 5s or until we can get the lock
|
||||
while not lock and i < 50:
|
||||
time.sleep(0.1)
|
||||
_, lock = lockBitbake()
|
||||
i += 1
|
||||
if lock:
|
||||
bb.utils.unlockfile(lock)
|
||||
raise bb.server.process.ProcessTimeout("Bitbake still shutting down as socket exists but no lock?")
|
||||
if not configParams.server_only:
|
||||
try:
|
||||
server_connection = bb.server.process.connectProcessServer(sockname, featureset)
|
||||
except EOFError:
|
||||
# The server may have been shutting down but not closed the socket yet. If that happened,
|
||||
# ignore it.
|
||||
pass
|
||||
|
||||
if server_connection or configParams.server_only:
|
||||
break
|
||||
except BBMainFatal:
|
||||
raise
|
||||
except (Exception, bb.server.process.ProcessTimeout) as e:
|
||||
if not retries:
|
||||
raise
|
||||
retries -= 1
|
||||
if isinstance(e, (bb.server.process.ProcessTimeout, BrokenPipeError)):
|
||||
logger.info("Retrying server connection...")
|
||||
else:
|
||||
logger.info("Retrying server connection... (%s)" % traceback.format_exc())
|
||||
if not retries:
|
||||
bb.fatal("Unable to connect to bitbake server, or start one")
|
||||
if retries < 5:
|
||||
time.sleep(5)
|
||||
|
||||
if configParams.kill_server:
|
||||
server_connection.connection.terminateServer()
|
||||
server_connection.terminate()
|
||||
bb.event.ui_queue = []
|
||||
logger.info("Terminated bitbake server.")
|
||||
return None, None
|
||||
|
||||
# Restore the environment in case the UI needs it
|
||||
for k in cleanedvars:
|
||||
os.environ[k] = cleanedvars[k]
|
||||
|
||||
logger.removeHandler(handler)
|
||||
|
||||
return server_connection, ui_module
|
||||
|
||||
def lockBitbake():
|
||||
topdir = bb.cookerdata.findTopdir()
|
||||
if not topdir:
|
||||
bb.error("Unable to find conf/bblayers.conf or conf/bitbake.conf. BBAPTH is unset and/or not in a build directory?")
|
||||
raise BBMainFatal
|
||||
lockfile = topdir + "/bitbake.lock"
|
||||
return topdir, bb.utils.lockfile(lockfile, False, False)
|
||||
|
||||
@@ -19,22 +19,11 @@
|
||||
|
||||
from bb.utils import better_compile, better_exec
|
||||
|
||||
def insert_method(modulename, code, fn, lineno):
|
||||
def insert_method(modulename, code, fn):
|
||||
"""
|
||||
Add code of a module should be added. The methods
|
||||
will be simply added, no checking will be done
|
||||
"""
|
||||
comp = better_compile(code, modulename, fn, lineno=lineno)
|
||||
comp = better_compile(code, modulename, fn )
|
||||
better_exec(comp, None, code, fn)
|
||||
|
||||
compilecache = {}
|
||||
|
||||
def compile_cache(code):
|
||||
h = hash(code)
|
||||
if h in compilecache:
|
||||
return compilecache[h]
|
||||
return None
|
||||
|
||||
def compile_cache_add(code, compileobj):
|
||||
h = hash(code)
|
||||
compilecache[h] = compileobj
|
||||
|
||||
@@ -52,10 +52,10 @@ def getMountedDev(path):
|
||||
parentDev = os.stat(path).st_dev
|
||||
currentDev = parentDev
|
||||
# When the current directory's device is different from the
|
||||
# parent's, then the current directory is a mount point
|
||||
# parrent's, then the current directory is a mount point
|
||||
while parentDev == currentDev:
|
||||
mountPoint = path
|
||||
# Use dirname to get the parent's directory
|
||||
# Use dirname to get the parrent's directory
|
||||
path = os.path.dirname(path)
|
||||
# Reach the "/"
|
||||
if path == mountPoint:
|
||||
@@ -77,7 +77,7 @@ def getDiskData(BBDirs, configuration):
|
||||
"""Prepare disk data for disk space monitor"""
|
||||
|
||||
# Save the device IDs, need the ID to be unique (the dictionary's key is
|
||||
# unique), so that when more than one directory is located on the same
|
||||
# unique), so that when more than one directories are located in the same
|
||||
# device, we just monitor it once
|
||||
devDict = {}
|
||||
for pathSpaceInode in BBDirs.split():
|
||||
@@ -129,7 +129,7 @@ def getDiskData(BBDirs, configuration):
|
||||
bb.utils.mkdirhier(path)
|
||||
dev = getMountedDev(path)
|
||||
# Use path/action as the key
|
||||
devDict[(path, action)] = [dev, minSpace, minInode]
|
||||
devDict[os.path.join(path, action)] = [dev, minSpace, minInode]
|
||||
|
||||
return devDict
|
||||
|
||||
@@ -141,7 +141,7 @@ def getInterval(configuration):
|
||||
spaceDefault = 50 * 1024 * 1024
|
||||
inodeDefault = 5 * 1024
|
||||
|
||||
interval = configuration.getVar("BB_DISKMON_WARNINTERVAL")
|
||||
interval = configuration.getVar("BB_DISKMON_WARNINTERVAL", True)
|
||||
if not interval:
|
||||
return spaceDefault, inodeDefault
|
||||
else:
|
||||
@@ -179,7 +179,7 @@ class diskMonitor:
|
||||
self.enableMonitor = False
|
||||
self.configuration = configuration
|
||||
|
||||
BBDirs = configuration.getVar("BB_DISKMON_DIRS") or None
|
||||
BBDirs = configuration.getVar("BB_DISKMON_DIRS", True) or None
|
||||
if BBDirs:
|
||||
self.devDict = getDiskData(BBDirs, configuration)
|
||||
if self.devDict:
|
||||
@@ -187,11 +187,11 @@ class diskMonitor:
|
||||
if self.spaceInterval and self.inodeInterval:
|
||||
self.enableMonitor = True
|
||||
# These are for saving the previous disk free space and inode, we
|
||||
# use them to avoid printing too many warning messages
|
||||
# use them to avoid print too many warning messages
|
||||
self.preFreeS = {}
|
||||
self.preFreeI = {}
|
||||
# This is for STOPTASKS and ABORT, to avoid printing the message
|
||||
# repeatedly while waiting for the tasks to finish
|
||||
# This is for STOPTASKS and ABORT, to avoid print the message repeatly
|
||||
# during waiting the tasks to finish
|
||||
self.checked = {}
|
||||
for k in self.devDict:
|
||||
self.preFreeS[k] = 0
|
||||
@@ -205,25 +205,22 @@ class diskMonitor:
|
||||
""" Take action for the monitor """
|
||||
|
||||
if self.enableMonitor:
|
||||
diskUsage = {}
|
||||
for k, attributes in self.devDict.items():
|
||||
path, action = k
|
||||
dev, minSpace, minInode = attributes
|
||||
for k in self.devDict:
|
||||
path = os.path.dirname(k)
|
||||
action = os.path.basename(k)
|
||||
dev = self.devDict[k][0]
|
||||
minSpace = self.devDict[k][1]
|
||||
minInode = self.devDict[k][2]
|
||||
|
||||
st = os.statvfs(path)
|
||||
|
||||
# The available free space, integer number
|
||||
# The free space, float point number
|
||||
freeSpace = st.f_bavail * st.f_frsize
|
||||
|
||||
# Send all relevant information in the event.
|
||||
freeSpaceRoot = st.f_bfree * st.f_frsize
|
||||
totalSpace = st.f_blocks * st.f_frsize
|
||||
diskUsage[dev] = bb.event.DiskUsageSample(freeSpace, freeSpaceRoot, totalSpace)
|
||||
|
||||
if minSpace and freeSpace < minSpace:
|
||||
# Always show warning, the self.checked would always be False if the action is WARN
|
||||
if self.preFreeS[k] == 0 or self.preFreeS[k] - freeSpace > self.spaceInterval and not self.checked[k]:
|
||||
logger.warning("The free space of %s (%s) is running low (%.3fGB left)" % \
|
||||
logger.warn("The free space of %s (%s) is running low (%.3fGB left)" % \
|
||||
(path, dev, freeSpace / 1024 / 1024 / 1024.0))
|
||||
self.preFreeS[k] = freeSpace
|
||||
|
||||
@@ -238,18 +235,20 @@ class diskMonitor:
|
||||
rq.finish_runqueue(True)
|
||||
bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, path), self.configuration)
|
||||
|
||||
# The free inodes, integer number
|
||||
# The free inodes, float point number
|
||||
freeInode = st.f_favail
|
||||
|
||||
if minInode and freeInode < minInode:
|
||||
# Some filesystems use dynamic inodes so can't run out
|
||||
# (e.g. btrfs). This is reported by the inode count being 0.
|
||||
# Some fs formats' (e.g., btrfs) statvfs.f_files (inodes) is
|
||||
# zero, this is a feature of the fs, we disable the inode
|
||||
# checking for such a fs.
|
||||
if st.f_files == 0:
|
||||
logger.info("Inode check for %s is unavaliable, will remove it from disk monitor" % path)
|
||||
self.devDict[k][2] = None
|
||||
continue
|
||||
# Always show warning, the self.checked would always be False if the action is WARN
|
||||
if self.preFreeI[k] == 0 or self.preFreeI[k] - freeInode > self.inodeInterval and not self.checked[k]:
|
||||
logger.warning("The free inode of %s (%s) is running low (%.3fK left)" % \
|
||||
logger.warn("The free inode of %s (%s) is running low (%.3fK left)" % \
|
||||
(path, dev, freeInode / 1024.0))
|
||||
self.preFreeI[k] = freeInode
|
||||
|
||||
@@ -263,6 +262,4 @@ class diskMonitor:
|
||||
self.checked[k] = True
|
||||
rq.finish_runqueue(True)
|
||||
bb.event.fire(bb.event.DiskFull(dev, 'inode', freeInode, path), self.configuration)
|
||||
|
||||
bb.event.fire(bb.event.MonitorDiskEvent(diskUsage), self.configuration)
|
||||
return
|
||||
|
||||
@@ -57,7 +57,7 @@ class BBLogFormatter(logging.Formatter):
|
||||
}
|
||||
|
||||
color_enabled = False
|
||||
BASECOLOR, BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list(range(29,38))
|
||||
BASECOLOR, BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(29,38)
|
||||
|
||||
COLORS = {
|
||||
DEBUG3 : CYAN,
|
||||
@@ -90,9 +90,8 @@ class BBLogFormatter(logging.Formatter):
|
||||
if self.color_enabled:
|
||||
record = self.colorize(record)
|
||||
msg = logging.Formatter.format(self, record)
|
||||
if hasattr(record, 'bb_exc_formatted'):
|
||||
msg += '\n' + ''.join(record.bb_exc_formatted)
|
||||
elif hasattr(record, 'bb_exc_info'):
|
||||
|
||||
if hasattr(record, 'bb_exc_info'):
|
||||
etype, value, tb = record.bb_exc_info
|
||||
formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
|
||||
msg += '\n' + ''.join(formatted)
|
||||
@@ -151,7 +150,7 @@ loggerDefaultVerbose = False
|
||||
loggerVerboseLogs = False
|
||||
loggerDefaultDomains = []
|
||||
|
||||
def init_msgconfig(verbose, debug, debug_domains=None):
|
||||
def init_msgconfig(verbose, debug, debug_domains = []):
|
||||
"""
|
||||
Set default verbosity and debug levels config the logger
|
||||
"""
|
||||
@@ -159,10 +158,7 @@ def init_msgconfig(verbose, debug, debug_domains=None):
|
||||
bb.msg.loggerDefaultVerbose = verbose
|
||||
if verbose:
|
||||
bb.msg.loggerVerboseLogs = True
|
||||
if debug_domains:
|
||||
bb.msg.loggerDefaultDomains = debug_domains
|
||||
else:
|
||||
bb.msg.loggerDefaultDomains = []
|
||||
bb.msg.loggerDefaultDomains = debug_domains
|
||||
|
||||
def constructLogOptions():
|
||||
debug = loggerDefaultDebugLevel
|
||||
@@ -182,12 +178,9 @@ def constructLogOptions():
|
||||
debug_domains["BitBake.%s" % domainarg] = logging.DEBUG - dlevel + 1
|
||||
return level, debug_domains
|
||||
|
||||
def addDefaultlogFilter(handler, cls = BBLogFilter, forcelevel=None):
|
||||
def addDefaultlogFilter(handler, cls = BBLogFilter):
|
||||
level, debug_domains = constructLogOptions()
|
||||
|
||||
if forcelevel is not None:
|
||||
level = forcelevel
|
||||
|
||||
cls(handler, level, debug_domains)
|
||||
|
||||
#
|
||||
@@ -201,25 +194,3 @@ def fatal(msgdomain, msg):
|
||||
logger = logging.getLogger("BitBake")
|
||||
logger.critical(msg)
|
||||
sys.exit(1)
|
||||
|
||||
def logger_create(name, output=sys.stderr, level=logging.INFO, preserve_handlers=False, color='auto'):
|
||||
"""Standalone logger creation function"""
|
||||
logger = logging.getLogger(name)
|
||||
console = logging.StreamHandler(output)
|
||||
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
if color == 'always' or (color == 'auto' and output.isatty()):
|
||||
format.enable_color()
|
||||
console.setFormatter(format)
|
||||
if preserve_handlers:
|
||||
logger.addHandler(console)
|
||||
else:
|
||||
logger.handlers = [console]
|
||||
logger.setLevel(level)
|
||||
return logger
|
||||
|
||||
def has_console_handler(logger):
|
||||
for handler in logger.handlers:
|
||||
if isinstance(handler, logging.StreamHandler):
|
||||
if handler.stream in [sys.stderr, sys.stdout]:
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -202,8 +202,8 @@ if __name__ == '__main__':
|
||||
print(rec5._replace(k=222)._my_custom_method()) # MyMixIn's
|
||||
print(rec5._replace(k=222).count(2)) # MyMixIn's
|
||||
|
||||
# Note that behavior: the standard namedtuple methods cannot be
|
||||
# overridden by a foreign mix-in -- even if the mix-in is declared
|
||||
# None that behavior: the standard namedtuple methods cannot be
|
||||
# overriden by a foreign mix-in -- even if the mix-in is declared
|
||||
# as the leftmost base class (but, obviously, you can override them
|
||||
# in the defined class or its subclasses):
|
||||
|
||||
|
||||
@@ -26,10 +26,9 @@ File parsers for the BitBake build tools.
|
||||
|
||||
handlers = []
|
||||
|
||||
import errno
|
||||
import logging
|
||||
import os
|
||||
import stat
|
||||
import logging
|
||||
import bb
|
||||
import bb.utils
|
||||
import bb.siggen
|
||||
@@ -50,11 +49,8 @@ class ParseError(Exception):
|
||||
else:
|
||||
return "ParseError in %s: %s" % (self.filename, self.msg)
|
||||
|
||||
class SkipRecipe(Exception):
|
||||
"""Exception raised to skip this recipe"""
|
||||
|
||||
class SkipPackage(SkipRecipe):
|
||||
"""Exception raised to skip this recipe (use SkipRecipe in new code)"""
|
||||
class SkipPackage(Exception):
|
||||
"""Exception raised to skip this package"""
|
||||
|
||||
__mtime_cache = {}
|
||||
def cached_mtime(f):
|
||||
@@ -71,27 +67,13 @@ def cached_mtime_noerror(f):
|
||||
return __mtime_cache[f]
|
||||
|
||||
def update_mtime(f):
|
||||
try:
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
except OSError:
|
||||
if f in __mtime_cache:
|
||||
del __mtime_cache[f]
|
||||
return 0
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
return __mtime_cache[f]
|
||||
|
||||
def update_cache(f):
|
||||
if f in __mtime_cache:
|
||||
logger.debug(1, "Updating mtime cache for %s" % f)
|
||||
update_mtime(f)
|
||||
|
||||
def clear_cache():
|
||||
global __mtime_cache
|
||||
__mtime_cache = {}
|
||||
|
||||
def mark_dependency(d, f):
|
||||
if f.startswith('./'):
|
||||
f = "%s/%s" % (os.getcwd(), f[2:])
|
||||
deps = (d.getVar('__depends', False) or [])
|
||||
deps = (d.getVar('__depends') or [])
|
||||
s = (f, cached_mtime_noerror(f))
|
||||
if s not in deps:
|
||||
deps.append(s)
|
||||
@@ -99,7 +81,7 @@ def mark_dependency(d, f):
|
||||
|
||||
def check_dependency(d, f):
|
||||
s = (f, cached_mtime_noerror(f))
|
||||
deps = (d.getVar('__depends', False) or [])
|
||||
deps = (d.getVar('__depends') or [])
|
||||
return s in deps
|
||||
|
||||
def supports(fn, data):
|
||||
@@ -127,19 +109,19 @@ def init_parser(d):
|
||||
|
||||
def resolve_file(fn, d):
|
||||
if not os.path.isabs(fn):
|
||||
bbpath = d.getVar("BBPATH")
|
||||
bbpath = d.getVar("BBPATH", True)
|
||||
newfn, attempts = bb.utils.which(bbpath, fn, history=True)
|
||||
for af in attempts:
|
||||
mark_dependency(d, af)
|
||||
if not newfn:
|
||||
raise IOError(errno.ENOENT, "file %s not found in %s" % (fn, bbpath))
|
||||
raise IOError("file %s not found in %s" % (fn, bbpath))
|
||||
fn = newfn
|
||||
else:
|
||||
mark_dependency(d, fn)
|
||||
|
||||
mark_dependency(d, fn)
|
||||
if not os.path.isfile(fn):
|
||||
raise IOError(errno.ENOENT, "file %s not found" % fn)
|
||||
raise IOError("file %s not found" % fn)
|
||||
|
||||
logger.debug(2, "LOAD %s", fn)
|
||||
return fn
|
||||
|
||||
# Used by OpenEmbedded metadata
|
||||
@@ -166,8 +148,8 @@ def vars_from_file(mypkg, d):
|
||||
def get_file_depends(d):
|
||||
'''Return the dependent files'''
|
||||
dep_files = []
|
||||
depends = d.getVar('__base_depends', False) or []
|
||||
depends = depends + (d.getVar('__depends', False) or [])
|
||||
depends = d.getVar('__base_depends', True) or []
|
||||
depends = depends + (d.getVar('__depends', True) or [])
|
||||
for (fn, _) in depends:
|
||||
dep_files.append(os.path.abspath(fn))
|
||||
return " ".join(dep_files)
|
||||
|
||||
@@ -21,7 +21,8 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
from future_builtins import filter
|
||||
import re
|
||||
import string
|
||||
import logging
|
||||
@@ -30,6 +31,8 @@ import itertools
|
||||
from bb import methodpool
|
||||
from bb.parse import logger
|
||||
|
||||
_bbversions_re = re.compile(r"\[(?P<from>[0-9]+)-(?P<to>[0-9]+)\]")
|
||||
|
||||
class StatementGroup(list):
|
||||
def eval(self, data):
|
||||
for statement in self:
|
||||
@@ -67,33 +70,6 @@ class ExportNode(AstNode):
|
||||
def eval(self, data):
|
||||
data.setVarFlag(self.var, "export", 1, op = 'exported')
|
||||
|
||||
class UnsetNode(AstNode):
|
||||
def __init__(self, filename, lineno, var):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.var = var
|
||||
|
||||
def eval(self, data):
|
||||
loginfo = {
|
||||
'variable': self.var,
|
||||
'file': self.filename,
|
||||
'line': self.lineno,
|
||||
}
|
||||
data.delVar(self.var,**loginfo)
|
||||
|
||||
class UnsetFlagNode(AstNode):
|
||||
def __init__(self, filename, lineno, var, flag):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.var = var
|
||||
self.flag = flag
|
||||
|
||||
def eval(self, data):
|
||||
loginfo = {
|
||||
'variable': self.var,
|
||||
'file': self.filename,
|
||||
'line': self.lineno,
|
||||
}
|
||||
data.delVarFlag(self.var, self.flag, **loginfo)
|
||||
|
||||
class DataNode(AstNode):
|
||||
"""
|
||||
Various data related updates. For the sake of sanity
|
||||
@@ -107,9 +83,9 @@ class DataNode(AstNode):
|
||||
|
||||
def getFunc(self, key, data):
|
||||
if 'flag' in self.groupd and self.groupd['flag'] != None:
|
||||
return data.getVarFlag(key, self.groupd['flag'], expand=False, noweakdefault=True)
|
||||
return data.getVarFlag(key, self.groupd['flag'], noweakdefault=True)
|
||||
else:
|
||||
return data.getVar(key, False, noweakdefault=True, parsing=True)
|
||||
return data.getVar(key, noweakdefault=True)
|
||||
|
||||
def eval(self, data):
|
||||
groupd = self.groupd
|
||||
@@ -130,6 +106,7 @@ class DataNode(AstNode):
|
||||
val = groupd["value"]
|
||||
elif "colon" in groupd and groupd["colon"] != None:
|
||||
e = data.createCopy()
|
||||
bb.data.update_data(e)
|
||||
op = "immediate"
|
||||
val = e.expand(groupd["value"], key + "[:=]")
|
||||
elif "append" in groupd and groupd["append"] != None:
|
||||
@@ -151,7 +128,7 @@ class DataNode(AstNode):
|
||||
if 'flag' in groupd and groupd['flag'] != None:
|
||||
flag = groupd['flag']
|
||||
elif groupd["lazyques"]:
|
||||
flag = "_defaultval"
|
||||
flag = "defaultval"
|
||||
|
||||
loginfo['op'] = op
|
||||
loginfo['detail'] = groupd["value"]
|
||||
@@ -159,42 +136,29 @@ class DataNode(AstNode):
|
||||
if flag:
|
||||
data.setVarFlag(key, flag, val, **loginfo)
|
||||
else:
|
||||
data.setVar(key, val, parsing=True, **loginfo)
|
||||
data.setVar(key, val, **loginfo)
|
||||
|
||||
class MethodNode(AstNode):
|
||||
tr_tbl = str.maketrans('/.+-@%&', '_______')
|
||||
tr_tbl = string.maketrans('/.+-@%', '______')
|
||||
|
||||
def __init__(self, filename, lineno, func_name, body, python, fakeroot):
|
||||
def __init__(self, filename, lineno, func_name, body):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.func_name = func_name
|
||||
self.body = body
|
||||
self.python = python
|
||||
self.fakeroot = fakeroot
|
||||
|
||||
def eval(self, data):
|
||||
text = '\n'.join(self.body)
|
||||
funcname = self.func_name
|
||||
if self.func_name == "__anonymous":
|
||||
funcname = ("__anon_%s_%s" % (self.lineno, self.filename.translate(MethodNode.tr_tbl)))
|
||||
self.python = True
|
||||
text = "def %s(d):\n" % (funcname) + text
|
||||
bb.methodpool.insert_method(funcname, text, self.filename, self.lineno - len(self.body))
|
||||
anonfuncs = data.getVar('__BBANONFUNCS', False) or []
|
||||
bb.methodpool.insert_method(funcname, text, self.filename)
|
||||
anonfuncs = data.getVar('__BBANONFUNCS') or []
|
||||
anonfuncs.append(funcname)
|
||||
data.setVar('__BBANONFUNCS', anonfuncs)
|
||||
if data.getVar(funcname, False):
|
||||
# clean up old version of this piece of metadata, as its
|
||||
# flags could cause problems
|
||||
data.delVarFlag(funcname, 'python')
|
||||
data.delVarFlag(funcname, 'fakeroot')
|
||||
if self.python:
|
||||
data.setVarFlag(funcname, "python", "1")
|
||||
if self.fakeroot:
|
||||
data.setVarFlag(funcname, "fakeroot", "1")
|
||||
data.setVarFlag(funcname, "func", 1)
|
||||
data.setVar(funcname, text, parsing=True)
|
||||
data.setVarFlag(funcname, 'filename', self.filename)
|
||||
data.setVarFlag(funcname, 'lineno', str(self.lineno - len(self.body)))
|
||||
data.setVar(funcname, text)
|
||||
else:
|
||||
data.setVarFlag(self.func_name, "func", 1)
|
||||
data.setVar(self.func_name, text)
|
||||
|
||||
class PythonMethodNode(AstNode):
|
||||
def __init__(self, filename, lineno, function, modulename, body):
|
||||
@@ -208,12 +172,31 @@ class PythonMethodNode(AstNode):
|
||||
# 'this' file. This means we will not parse methods from
|
||||
# bb classes twice
|
||||
text = '\n'.join(self.body)
|
||||
bb.methodpool.insert_method(self.modulename, text, self.filename, self.lineno - len(self.body) - 1)
|
||||
bb.methodpool.insert_method(self.modulename, text, self.filename)
|
||||
data.setVarFlag(self.function, "func", 1)
|
||||
data.setVarFlag(self.function, "python", 1)
|
||||
data.setVar(self.function, text, parsing=True)
|
||||
data.setVarFlag(self.function, 'filename', self.filename)
|
||||
data.setVarFlag(self.function, 'lineno', str(self.lineno - len(self.body) - 1))
|
||||
data.setVar(self.function, text)
|
||||
|
||||
class MethodFlagsNode(AstNode):
|
||||
def __init__(self, filename, lineno, key, m):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.key = key
|
||||
self.m = m
|
||||
|
||||
def eval(self, data):
|
||||
if data.getVar(self.key):
|
||||
# clean up old version of this piece of metadata, as its
|
||||
# flags could cause problems
|
||||
data.setVarFlag(self.key, 'python', None)
|
||||
data.setVarFlag(self.key, 'fakeroot', None)
|
||||
if self.m.group("py") is not None:
|
||||
data.setVarFlag(self.key, "python", "1")
|
||||
else:
|
||||
data.delVarFlag(self.key, "python")
|
||||
if self.m.group("fr") is not None:
|
||||
data.setVarFlag(self.key, "fakeroot", "1")
|
||||
else:
|
||||
data.delVarFlag(self.key, "fakeroot")
|
||||
|
||||
class ExportFuncsNode(AstNode):
|
||||
def __init__(self, filename, lineno, fns, classname):
|
||||
@@ -226,28 +209,24 @@ class ExportFuncsNode(AstNode):
|
||||
for func in self.n:
|
||||
calledfunc = self.classname + "_" + func
|
||||
|
||||
if data.getVar(func, False) and not data.getVarFlag(func, 'export_func', False):
|
||||
if data.getVar(func) and not data.getVarFlag(func, 'export_func'):
|
||||
continue
|
||||
|
||||
if data.getVar(func, False):
|
||||
if data.getVar(func):
|
||||
data.setVarFlag(func, 'python', None)
|
||||
data.setVarFlag(func, 'func', None)
|
||||
|
||||
for flag in [ "func", "python" ]:
|
||||
if data.getVarFlag(calledfunc, flag, False):
|
||||
data.setVarFlag(func, flag, data.getVarFlag(calledfunc, flag, False))
|
||||
if data.getVarFlag(calledfunc, flag):
|
||||
data.setVarFlag(func, flag, data.getVarFlag(calledfunc, flag))
|
||||
for flag in [ "dirs" ]:
|
||||
if data.getVarFlag(func, flag, False):
|
||||
data.setVarFlag(calledfunc, flag, data.getVarFlag(func, flag, False))
|
||||
data.setVarFlag(func, "filename", "autogenerated")
|
||||
data.setVarFlag(func, "lineno", 1)
|
||||
if data.getVarFlag(func, flag):
|
||||
data.setVarFlag(calledfunc, flag, data.getVarFlag(func, flag))
|
||||
|
||||
if data.getVarFlag(calledfunc, "python", False):
|
||||
data.setVar(func, " bb.build.exec_func('" + calledfunc + "', d)\n", parsing=True)
|
||||
if data.getVarFlag(calledfunc, "python"):
|
||||
data.setVar(func, " bb.build.exec_func('" + calledfunc + "', d)\n")
|
||||
else:
|
||||
if "-" in self.classname:
|
||||
bb.fatal("The classname %s contains a dash character and is calling an sh function %s using EXPORT_FUNCTIONS. Since a dash is illegal in sh function names, this cannot work, please rename the class or don't use EXPORT_FUNCTIONS." % (self.classname, calledfunc))
|
||||
data.setVar(func, " " + calledfunc + "\n", parsing=True)
|
||||
data.setVar(func, " " + calledfunc + "\n")
|
||||
data.setVarFlag(func, 'export_func', '1')
|
||||
|
||||
class AddTaskNode(AstNode):
|
||||
@@ -274,7 +253,7 @@ class BBHandlerNode(AstNode):
|
||||
self.hs = fns.split()
|
||||
|
||||
def eval(self, data):
|
||||
bbhands = data.getVar('__BBHANDLERS', False) or []
|
||||
bbhands = data.getVar('__BBHANDLERS') or []
|
||||
for h in self.hs:
|
||||
bbhands.append(h)
|
||||
data.setVarFlag(h, "handler", 1)
|
||||
@@ -294,21 +273,18 @@ def handleInclude(statements, filename, lineno, m, force):
|
||||
def handleExport(statements, filename, lineno, m):
|
||||
statements.append(ExportNode(filename, lineno, m.group(1)))
|
||||
|
||||
def handleUnset(statements, filename, lineno, m):
|
||||
statements.append(UnsetNode(filename, lineno, m.group(1)))
|
||||
|
||||
def handleUnsetFlag(statements, filename, lineno, m):
|
||||
statements.append(UnsetFlagNode(filename, lineno, m.group(1), m.group(2)))
|
||||
|
||||
def handleData(statements, filename, lineno, groupd):
|
||||
statements.append(DataNode(filename, lineno, groupd))
|
||||
|
||||
def handleMethod(statements, filename, lineno, func_name, body, python, fakeroot):
|
||||
statements.append(MethodNode(filename, lineno, func_name, body, python, fakeroot))
|
||||
def handleMethod(statements, filename, lineno, func_name, body):
|
||||
statements.append(MethodNode(filename, lineno, func_name, body))
|
||||
|
||||
def handlePythonMethod(statements, filename, lineno, funcname, modulename, body):
|
||||
statements.append(PythonMethodNode(filename, lineno, funcname, modulename, body))
|
||||
|
||||
def handleMethodFlags(statements, filename, lineno, key, m):
|
||||
statements.append(MethodFlagsNode(filename, lineno, key, m))
|
||||
|
||||
def handleExportFuncs(statements, filename, lineno, m, classname):
|
||||
statements.append(ExportFuncsNode(filename, lineno, m.group(1), classname))
|
||||
|
||||
@@ -335,59 +311,71 @@ def handleInherit(statements, filename, lineno, m):
|
||||
classes = m.group(1)
|
||||
statements.append(InheritNode(filename, lineno, classes))
|
||||
|
||||
def runAnonFuncs(d):
|
||||
code = []
|
||||
for funcname in d.getVar("__BBANONFUNCS", False) or []:
|
||||
code.append("%s(d)" % funcname)
|
||||
bb.utils.better_exec("\n".join(code), {"d": d})
|
||||
|
||||
def finalize(fn, d, variant = None):
|
||||
saved_handlers = bb.event.get_handlers().copy()
|
||||
|
||||
for var in d.getVar('__BBHANDLERS', False) or []:
|
||||
all_handlers = {}
|
||||
for var in d.getVar('__BBHANDLERS') or []:
|
||||
# try to add the handler
|
||||
handlerfn = d.getVarFlag(var, "filename", False)
|
||||
if not handlerfn:
|
||||
bb.fatal("Undefined event handler function '%s'" % var)
|
||||
handlerln = int(d.getVarFlag(var, "lineno", False))
|
||||
bb.event.register(var, d.getVar(var, False), (d.getVarFlag(var, "eventmask") or "").split(), handlerfn, handlerln)
|
||||
bb.event.register(var, d.getVar(var), (d.getVarFlag(var, "eventmask", True) or "").split())
|
||||
|
||||
bb.event.fire(bb.event.RecipePreFinalise(fn), d)
|
||||
|
||||
bb.data.expandKeys(d)
|
||||
runAnonFuncs(d)
|
||||
bb.data.update_data(d)
|
||||
code = []
|
||||
for funcname in d.getVar("__BBANONFUNCS") or []:
|
||||
code.append("%s(d)" % funcname)
|
||||
bb.utils.better_exec("\n".join(code), {"d": d})
|
||||
bb.data.update_data(d)
|
||||
|
||||
tasklist = d.getVar('__BBTASKS', False) or []
|
||||
bb.event.fire(bb.event.RecipeTaskPreProcess(fn, list(tasklist)), d)
|
||||
bb.build.add_tasks(tasklist, d)
|
||||
tasklist = d.getVar('__BBTASKS') or []
|
||||
deltasklist = d.getVar('__BBDELTASKS') or []
|
||||
bb.build.add_tasks(tasklist, deltasklist, d)
|
||||
|
||||
bb.parse.siggen.finalise(fn, d, variant)
|
||||
|
||||
d.setVar('BBINCLUDED', bb.parse.get_file_depends(d))
|
||||
|
||||
bb.event.fire(bb.event.RecipeParsed(fn), d)
|
||||
bb.event.set_handlers(saved_handlers)
|
||||
|
||||
def _create_variants(datastores, names, function, onlyfinalise):
|
||||
def _create_variants(datastores, names, function):
|
||||
def create_variant(name, orig_d, arg = None):
|
||||
if onlyfinalise and name not in onlyfinalise:
|
||||
return
|
||||
new_d = bb.data.createCopy(orig_d)
|
||||
function(arg or name, new_d)
|
||||
datastores[name] = new_d
|
||||
|
||||
for variant in list(datastores.keys()):
|
||||
for variant, variant_d in datastores.items():
|
||||
for name in names:
|
||||
if not variant:
|
||||
# Based on main recipe
|
||||
create_variant(name, datastores[""])
|
||||
create_variant(name, variant_d)
|
||||
else:
|
||||
create_variant("%s-%s" % (variant, name), datastores[variant], name)
|
||||
create_variant("%s-%s" % (variant, name), variant_d, name)
|
||||
|
||||
def _expand_versions(versions):
|
||||
def expand_one(version, start, end):
|
||||
for i in xrange(start, end + 1):
|
||||
ver = _bbversions_re.sub(str(i), version, 1)
|
||||
yield ver
|
||||
|
||||
versions = iter(versions)
|
||||
while True:
|
||||
try:
|
||||
version = next(versions)
|
||||
except StopIteration:
|
||||
break
|
||||
|
||||
range_ver = _bbversions_re.search(version)
|
||||
if not range_ver:
|
||||
yield version
|
||||
else:
|
||||
newversions = expand_one(version, int(range_ver.group("from")),
|
||||
int(range_ver.group("to")))
|
||||
versions = itertools.chain(newversions, versions)
|
||||
|
||||
def multi_finalize(fn, d):
|
||||
appends = (d.getVar("__BBAPPEND") or "").split()
|
||||
appends = (d.getVar("__BBAPPEND", True) or "").split()
|
||||
for append in appends:
|
||||
logger.debug(1, "Appending .bbappend file %s to %s", append, fn)
|
||||
logger.debug(2, "Appending .bbappend file %s to %s", append, fn)
|
||||
bb.parse.BBHandler.handle(append, d, True)
|
||||
|
||||
onlyfinalise = d.getVar("__ONLYFINALISE", False)
|
||||
@@ -396,11 +384,55 @@ def multi_finalize(fn, d):
|
||||
d = bb.data.createCopy(safe_d)
|
||||
try:
|
||||
finalize(fn, d)
|
||||
except bb.parse.SkipRecipe as e:
|
||||
except bb.parse.SkipPackage as e:
|
||||
d.setVar("__SKIPPED", e.args[0])
|
||||
datastores = {"": safe_d}
|
||||
|
||||
extended = d.getVar("BBCLASSEXTEND") or ""
|
||||
versions = (d.getVar("BBVERSIONS", True) or "").split()
|
||||
if versions:
|
||||
pv = orig_pv = d.getVar("PV", True)
|
||||
baseversions = {}
|
||||
|
||||
def verfunc(ver, d, pv_d = None):
|
||||
if pv_d is None:
|
||||
pv_d = d
|
||||
|
||||
overrides = d.getVar("OVERRIDES", True).split(":")
|
||||
pv_d.setVar("PV", ver)
|
||||
overrides.append(ver)
|
||||
bpv = baseversions.get(ver) or orig_pv
|
||||
pv_d.setVar("BPV", bpv)
|
||||
overrides.append(bpv)
|
||||
d.setVar("OVERRIDES", ":".join(overrides))
|
||||
|
||||
versions = list(_expand_versions(versions))
|
||||
for pos, version in enumerate(list(versions)):
|
||||
try:
|
||||
pv, bpv = version.split(":", 2)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
versions[pos] = pv
|
||||
baseversions[pv] = bpv
|
||||
|
||||
if pv in versions and not baseversions.get(pv):
|
||||
versions.remove(pv)
|
||||
else:
|
||||
pv = versions.pop()
|
||||
|
||||
# This is necessary because our existing main datastore
|
||||
# has already been finalized with the old PV, we need one
|
||||
# that's been finalized with the new PV.
|
||||
d = bb.data.createCopy(safe_d)
|
||||
verfunc(pv, d, safe_d)
|
||||
try:
|
||||
finalize(fn, d)
|
||||
except bb.parse.SkipPackage as e:
|
||||
d.setVar("__SKIPPED", e.args[0])
|
||||
|
||||
_create_variants(datastores, versions, verfunc)
|
||||
|
||||
extended = d.getVar("BBCLASSEXTEND", True) or ""
|
||||
if extended:
|
||||
# the following is to support bbextends with arguments, for e.g. multilib
|
||||
# an example is as follows:
|
||||
@@ -418,7 +450,7 @@ def multi_finalize(fn, d):
|
||||
else:
|
||||
extendedmap[ext] = ext
|
||||
|
||||
pn = d.getVar("PN")
|
||||
pn = d.getVar("PN", True)
|
||||
def extendfunc(name, d):
|
||||
if name != extendedmap[name]:
|
||||
d.setVar("BBEXTENDCURR", extendedmap[name])
|
||||
@@ -428,15 +460,19 @@ def multi_finalize(fn, d):
|
||||
bb.parse.BBHandler.inherit(extendedmap[name], fn, 0, d)
|
||||
|
||||
safe_d.setVar("BBCLASSEXTEND", extended)
|
||||
_create_variants(datastores, extendedmap.keys(), extendfunc, onlyfinalise)
|
||||
_create_variants(datastores, extendedmap.keys(), extendfunc)
|
||||
|
||||
for variant in datastores.keys():
|
||||
for variant, variant_d in datastores.iteritems():
|
||||
if variant:
|
||||
try:
|
||||
if not onlyfinalise or variant in onlyfinalise:
|
||||
finalize(fn, datastores[variant], variant)
|
||||
except bb.parse.SkipRecipe as e:
|
||||
datastores[variant].setVar("__SKIPPED", e.args[0])
|
||||
finalize(fn, variant_d, variant)
|
||||
except bb.parse.SkipPackage as e:
|
||||
variant_d.setVar("__SKIPPED", e.args[0])
|
||||
|
||||
if len(datastores) > 1:
|
||||
variants = filter(None, datastores.iterkeys())
|
||||
safe_d.setVar("__VARIANTS", " ".join(variants))
|
||||
|
||||
datastores[""] = d
|
||||
return datastores
|
||||
|
||||
@@ -25,14 +25,14 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
|
||||
from __future__ import absolute_import
|
||||
import re, bb, os
|
||||
import logging
|
||||
import bb.build, bb.utils
|
||||
from bb import data
|
||||
|
||||
from . import ConfHandler
|
||||
from .. import resolve_file, ast, logger, ParseError
|
||||
from .. import resolve_file, ast, logger
|
||||
from .ConfHandler import include, init
|
||||
|
||||
# For compatibility
|
||||
@@ -47,26 +47,37 @@ __addhandler_regexp__ = re.compile( r"addhandler\s+(.+)" )
|
||||
__def_regexp__ = re.compile( r"def\s+(\w+).*:" )
|
||||
__python_func_regexp__ = re.compile( r"(\s+.*)|(^$)" )
|
||||
|
||||
__infunc__ = []
|
||||
|
||||
__infunc__ = ""
|
||||
__inpython__ = False
|
||||
__body__ = []
|
||||
__classname__ = ""
|
||||
|
||||
cached_statements = {}
|
||||
|
||||
# We need to indicate EOF to the feeder. This code is so messy that
|
||||
# factoring it out to a close_parse_file method is out of question.
|
||||
# We will use the IN_PYTHON_EOF as an indicator to just close the method
|
||||
#
|
||||
# The two parts using it are tightly integrated anyway
|
||||
IN_PYTHON_EOF = -9999999999999
|
||||
|
||||
|
||||
|
||||
def supports(fn, d):
|
||||
"""Return True if fn has a supported extension"""
|
||||
return os.path.splitext(fn)[-1] in [".bb", ".bbclass", ".inc"]
|
||||
|
||||
def inherit(files, fn, lineno, d):
|
||||
__inherit_cache = d.getVar('__inherit_cache', False) or []
|
||||
__inherit_cache = d.getVar('__inherit_cache') or []
|
||||
files = d.expand(files).split()
|
||||
for file in files:
|
||||
if not os.path.isabs(file) and not file.endswith(".bbclass"):
|
||||
file = os.path.join('classes', '%s.bbclass' % file)
|
||||
|
||||
if not os.path.isabs(file):
|
||||
bbpath = d.getVar("BBPATH")
|
||||
dname = os.path.dirname(fn)
|
||||
bbpath = "%s:%s" % (dname, d.getVar("BBPATH", True))
|
||||
abs_fn, attempts = bb.utils.which(bbpath, file, history=True)
|
||||
for af in attempts:
|
||||
if af != abs_fn:
|
||||
@@ -75,11 +86,11 @@ def inherit(files, fn, lineno, d):
|
||||
file = abs_fn
|
||||
|
||||
if not file in __inherit_cache:
|
||||
logger.debug(1, "Inheriting %s (from %s:%d)" % (file, fn, lineno))
|
||||
logger.log(logging.DEBUG -1, "BB %s:%d: inheriting %s", fn, lineno, file)
|
||||
__inherit_cache.append( file )
|
||||
d.setVar('__inherit_cache', __inherit_cache)
|
||||
include(fn, file, lineno, d, "inherit")
|
||||
__inherit_cache = d.getVar('__inherit_cache', False) or []
|
||||
__inherit_cache = d.getVar('__inherit_cache') or []
|
||||
|
||||
def get_statements(filename, absolute_filename, base_name):
|
||||
global cached_statements
|
||||
@@ -87,20 +98,20 @@ def get_statements(filename, absolute_filename, base_name):
|
||||
try:
|
||||
return cached_statements[absolute_filename]
|
||||
except KeyError:
|
||||
with open(absolute_filename, 'r') as f:
|
||||
statements = ast.StatementGroup()
|
||||
|
||||
lineno = 0
|
||||
while True:
|
||||
lineno = lineno + 1
|
||||
s = f.readline()
|
||||
if not s: break
|
||||
s = s.rstrip()
|
||||
feeder(lineno, s, filename, base_name, statements)
|
||||
file = open(absolute_filename, 'r')
|
||||
statements = ast.StatementGroup()
|
||||
|
||||
lineno = 0
|
||||
while True:
|
||||
lineno = lineno + 1
|
||||
s = file.readline()
|
||||
if not s: break
|
||||
s = s.rstrip()
|
||||
feeder(lineno, s, filename, base_name, statements)
|
||||
file.close()
|
||||
if __inpython__:
|
||||
# add a blank line to close out any python definition
|
||||
feeder(lineno, "", filename, base_name, statements, eof=True)
|
||||
feeder(IN_PYTHON_EOF, "", filename, base_name, statements)
|
||||
|
||||
if filename.endswith(".bbclass") or filename.endswith(".inc"):
|
||||
cached_statements[absolute_filename] = statements
|
||||
@@ -109,62 +120,66 @@ def get_statements(filename, absolute_filename, base_name):
|
||||
def handle(fn, d, include):
|
||||
global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __infunc__, __body__, __residue__, __classname__
|
||||
__body__ = []
|
||||
__infunc__ = []
|
||||
__infunc__ = ""
|
||||
__classname__ = ""
|
||||
__residue__ = []
|
||||
|
||||
|
||||
if include == 0:
|
||||
logger.debug(2, "BB %s: handle(data)", fn)
|
||||
else:
|
||||
logger.debug(2, "BB %s: handle(data, include)", fn)
|
||||
|
||||
base_name = os.path.basename(fn)
|
||||
(root, ext) = os.path.splitext(base_name)
|
||||
init(d)
|
||||
|
||||
if ext == ".bbclass":
|
||||
__classname__ = root
|
||||
__inherit_cache = d.getVar('__inherit_cache', False) or []
|
||||
__inherit_cache = d.getVar('__inherit_cache') or []
|
||||
if not fn in __inherit_cache:
|
||||
__inherit_cache.append(fn)
|
||||
d.setVar('__inherit_cache', __inherit_cache)
|
||||
|
||||
if include != 0:
|
||||
oldfile = d.getVar('FILE', False)
|
||||
oldfile = d.getVar('FILE')
|
||||
else:
|
||||
oldfile = None
|
||||
|
||||
abs_fn = resolve_file(fn, d)
|
||||
|
||||
if include:
|
||||
bb.parse.mark_dependency(d, abs_fn)
|
||||
|
||||
# actual loading
|
||||
statements = get_statements(fn, abs_fn, base_name)
|
||||
|
||||
# DONE WITH PARSING... time to evaluate
|
||||
if ext != ".bbclass" and abs_fn != oldfile:
|
||||
if ext != ".bbclass":
|
||||
d.setVar('FILE', abs_fn)
|
||||
|
||||
try:
|
||||
statements.eval(d)
|
||||
except bb.parse.SkipRecipe:
|
||||
d.setVar("__SKIPPED", True)
|
||||
except bb.parse.SkipPackage:
|
||||
bb.data.setVar("__SKIPPED", True, d)
|
||||
if include == 0:
|
||||
return { "" : d }
|
||||
|
||||
if __infunc__:
|
||||
raise ParseError("Shell function %s is never closed" % __infunc__[0], __infunc__[1], __infunc__[2])
|
||||
if __residue__:
|
||||
raise ParseError("Leftover unparsed (incomplete?) data %s from %s" % __residue__, fn)
|
||||
|
||||
if ext != ".bbclass" and include == 0:
|
||||
return ast.multi_finalize(fn, d)
|
||||
|
||||
if ext != ".bbclass" and oldfile and abs_fn != oldfile:
|
||||
if oldfile:
|
||||
d.setVar("FILE", oldfile)
|
||||
|
||||
return d
|
||||
|
||||
def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
def feeder(lineno, s, fn, root, statements):
|
||||
global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__, __infunc__, __body__, bb, __residue__, __classname__
|
||||
if __infunc__:
|
||||
if s == '}':
|
||||
__body__.append('')
|
||||
ast.handleMethod(statements, fn, lineno, __infunc__[0], __body__, __infunc__[3], __infunc__[4])
|
||||
__infunc__ = []
|
||||
ast.handleMethod(statements, fn, lineno, __infunc__, __body__)
|
||||
__infunc__ = ""
|
||||
__body__ = []
|
||||
else:
|
||||
__body__.append(s)
|
||||
@@ -172,7 +187,7 @@ def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
|
||||
if __inpython__:
|
||||
m = __python_func_regexp__.match(s)
|
||||
if m and not eof:
|
||||
if m and lineno != IN_PYTHON_EOF:
|
||||
__body__.append(s)
|
||||
return
|
||||
else:
|
||||
@@ -181,7 +196,7 @@ def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
__body__ = []
|
||||
__inpython__ = False
|
||||
|
||||
if eof:
|
||||
if lineno == IN_PYTHON_EOF:
|
||||
return
|
||||
|
||||
if s and s[0] == '#':
|
||||
@@ -208,7 +223,8 @@ def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
|
||||
m = __func_start_regexp__.match(s)
|
||||
if m:
|
||||
__infunc__ = [m.group("func") or "__anonymous", fn, lineno, m.group("py") is not None, m.group("fr") is not None]
|
||||
__infunc__ = m.group("func") or "__anonymous"
|
||||
ast.handleMethodFlags(statements, fn, lineno, __infunc__, m)
|
||||
return
|
||||
|
||||
m = __def_regexp__.match(s)
|
||||
|
||||
@@ -24,16 +24,15 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import errno
|
||||
import re
|
||||
import os
|
||||
import re, os
|
||||
import logging
|
||||
import bb.utils
|
||||
from bb.parse import ParseError, resolve_file, ast, logger, handle
|
||||
from bb.parse import ParseError, resolve_file, ast, logger
|
||||
|
||||
__config_regexp__ = re.compile( r"""
|
||||
^
|
||||
(?P<exp>export\s+)?
|
||||
(?P<var>[a-zA-Z0-9\-_+.${}/~]+?)
|
||||
(?P<exp>export\s*)?
|
||||
(?P<var>[a-zA-Z0-9\-~_+.${}/]+?)
|
||||
(\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])?
|
||||
|
||||
\s* (
|
||||
@@ -56,12 +55,10 @@ __config_regexp__ = re.compile( r"""
|
||||
""", re.X)
|
||||
__include_regexp__ = re.compile( r"include\s+(.+)" )
|
||||
__require_regexp__ = re.compile( r"require\s+(.+)" )
|
||||
__export_regexp__ = re.compile( r"export\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
|
||||
__unset_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
|
||||
__unset_flag_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)\[([a-zA-Z0-9\-_+.]+)\]$" )
|
||||
__export_regexp__ = re.compile( r"export\s+([a-zA-Z0-9\-_+.${}/]+)$" )
|
||||
|
||||
def init(data):
|
||||
topdir = data.getVar('TOPDIR', False)
|
||||
topdir = data.getVar('TOPDIR')
|
||||
if not topdir:
|
||||
data.setVar('TOPDIR', os.getcwd())
|
||||
|
||||
@@ -69,51 +66,40 @@ def init(data):
|
||||
def supports(fn, d):
|
||||
return fn[-5:] == ".conf"
|
||||
|
||||
def include(parentfn, fns, lineno, data, error_out):
|
||||
def include(oldfn, fn, lineno, data, error_out):
|
||||
"""
|
||||
error_out: A string indicating the verb (e.g. "include", "inherit") to be
|
||||
used in a ParseError that will be raised if the file to be included could
|
||||
not be included. Specify False to avoid raising an error in this case.
|
||||
"""
|
||||
fns = data.expand(fns)
|
||||
parentfn = data.expand(parentfn)
|
||||
|
||||
# "include" or "require" accept zero to n space-separated file names to include.
|
||||
for fn in fns.split():
|
||||
include_single_file(parentfn, fn, lineno, data, error_out)
|
||||
|
||||
def include_single_file(parentfn, fn, lineno, data, error_out):
|
||||
"""
|
||||
Helper function for include() which does not expand or split its parameters.
|
||||
"""
|
||||
if parentfn == fn: # prevent infinite recursion
|
||||
if oldfn == fn: # prevent infinite recursion
|
||||
return None
|
||||
|
||||
import bb
|
||||
fn = data.expand(fn)
|
||||
oldfn = data.expand(oldfn)
|
||||
|
||||
if not os.path.isabs(fn):
|
||||
dname = os.path.dirname(parentfn)
|
||||
bbpath = "%s:%s" % (dname, data.getVar("BBPATH"))
|
||||
dname = os.path.dirname(oldfn)
|
||||
bbpath = "%s:%s" % (dname, data.getVar("BBPATH", True))
|
||||
abs_fn, attempts = bb.utils.which(bbpath, fn, history=True)
|
||||
if abs_fn and bb.parse.check_dependency(data, abs_fn):
|
||||
logger.warning("Duplicate inclusion for %s in %s" % (abs_fn, data.getVar('FILE')))
|
||||
bb.warn("Duplicate inclusion for %s in %s" % (abs_fn, data.getVar('FILE', True)))
|
||||
for af in attempts:
|
||||
bb.parse.mark_dependency(data, af)
|
||||
if abs_fn:
|
||||
fn = abs_fn
|
||||
elif bb.parse.check_dependency(data, fn):
|
||||
logger.warning("Duplicate inclusion for %s in %s" % (fn, data.getVar('FILE')))
|
||||
bb.warn("Duplicate inclusion for %s in %s" % (fn, data.getVar('FILE', True)))
|
||||
|
||||
from bb.parse import handle
|
||||
try:
|
||||
bb.parse.handle(fn, data, True)
|
||||
except (IOError, OSError) as exc:
|
||||
if exc.errno == errno.ENOENT:
|
||||
if error_out:
|
||||
raise ParseError("Could not %s file %s" % (error_out, fn), parentfn, lineno)
|
||||
logger.debug(2, "CONF file '%s' not found", fn)
|
||||
else:
|
||||
if error_out:
|
||||
raise ParseError("Could not %s file %s: %s" % (error_out, fn, exc.strerror), parentfn, lineno)
|
||||
else:
|
||||
raise ParseError("Error parsing %s: %s" % (fn, exc.strerror), parentfn, lineno)
|
||||
ret = handle(fn, data, True)
|
||||
except (IOError, OSError):
|
||||
if error_out:
|
||||
raise ParseError("Could not %(error_out)s file %(fn)s" % vars(), oldfn, lineno)
|
||||
logger.debug(2, "CONF file '%s' not found", fn)
|
||||
bb.parse.mark_dependency(data, fn)
|
||||
|
||||
# We have an issue where a UI might want to enforce particular settings such as
|
||||
# an empty DISTRO variable. If configuration files do something like assigning
|
||||
@@ -129,11 +115,14 @@ def handle(fn, data, include):
|
||||
if include == 0:
|
||||
oldfile = None
|
||||
else:
|
||||
oldfile = data.getVar('FILE', False)
|
||||
oldfile = data.getVar('FILE')
|
||||
|
||||
abs_fn = resolve_file(fn, data)
|
||||
f = open(abs_fn, 'r')
|
||||
|
||||
if include:
|
||||
bb.parse.mark_dependency(data, abs_fn)
|
||||
|
||||
statements = ast.StatementGroup()
|
||||
lineno = 0
|
||||
while True:
|
||||
@@ -192,16 +181,6 @@ def feeder(lineno, s, fn, statements):
|
||||
ast.handleExport(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __unset_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleUnset(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __unset_flag_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleUnsetFlag(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
raise ParseError("unparsed line: '%s'" % s, fn, lineno);
|
||||
|
||||
# Add us to the handlers list
|
||||
|
||||
@@ -28,7 +28,11 @@ import sys
|
||||
import warnings
|
||||
from bb.compat import total_ordering
|
||||
from collections import Mapping
|
||||
import sqlite3
|
||||
|
||||
try:
|
||||
import sqlite3
|
||||
except ImportError:
|
||||
from pysqlite2 import dbapi2 as sqlite3
|
||||
|
||||
sqlversion = sqlite3.sqlite_version_info
|
||||
if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
|
||||
@@ -88,9 +92,9 @@ class SQLTable(collections.MutableMapping):
|
||||
self._execute("DELETE from %s where key=?;" % self.table, [key])
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if not isinstance(key, str):
|
||||
if not isinstance(key, basestring):
|
||||
raise TypeError('Only string keys are supported')
|
||||
elif not isinstance(value, str):
|
||||
elif not isinstance(value, basestring):
|
||||
raise TypeError('Only string values are supported')
|
||||
|
||||
data = self._execute("SELECT * from %s where key=?;" %
|
||||
@@ -174,7 +178,7 @@ class PersistData(object):
|
||||
"""
|
||||
Return a list of key + value pairs for a domain
|
||||
"""
|
||||
return list(self.data[domain].items())
|
||||
return self.data[domain].items()
|
||||
|
||||
def getValue(self, domain, key):
|
||||
"""
|
||||
@@ -195,16 +199,13 @@ class PersistData(object):
|
||||
del self.data[domain][key]
|
||||
|
||||
def connect(database):
|
||||
connection = sqlite3.connect(database, timeout=5, isolation_level=None)
|
||||
connection.execute("pragma synchronous = off;")
|
||||
connection.text_factory = str
|
||||
return connection
|
||||
return sqlite3.connect(database, timeout=5, isolation_level=None)
|
||||
|
||||
def persist(domain, d):
|
||||
"""Convenience factory for SQLTable objects based upon metadata"""
|
||||
import bb.utils
|
||||
cachedir = (d.getVar("PERSISTENT_DIR") or
|
||||
d.getVar("CACHE"))
|
||||
cachedir = (d.getVar("PERSISTENT_DIR", True) or
|
||||
d.getVar("CACHE", True))
|
||||
if not cachedir:
|
||||
logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
|
||||
sys.exit(1)
|
||||
|
||||
@@ -17,7 +17,7 @@ class CmdError(RuntimeError):
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self):
|
||||
if not isinstance(self.command, str):
|
||||
if not isinstance(self.command, basestring):
|
||||
cmd = subprocess.list2cmdline(self.command)
|
||||
else:
|
||||
cmd = self.command
|
||||
@@ -64,7 +64,7 @@ class Popen(subprocess.Popen):
|
||||
options.update(kwargs)
|
||||
subprocess.Popen.__init__(self, *args, **options)
|
||||
|
||||
def _logged_communicate(pipe, log, input, extrafiles):
|
||||
def _logged_communicate(pipe, log, input):
|
||||
if pipe.stdin:
|
||||
if input is not None:
|
||||
pipe.stdin.write(input)
|
||||
@@ -79,82 +79,40 @@ def _logged_communicate(pipe, log, input, extrafiles):
|
||||
if pipe.stderr is not None:
|
||||
bb.utils.nonblockingfd(pipe.stderr.fileno())
|
||||
rin.append(pipe.stderr)
|
||||
for fobj, _ in extrafiles:
|
||||
bb.utils.nonblockingfd(fobj.fileno())
|
||||
rin.append(fobj)
|
||||
|
||||
def readextras(selected):
|
||||
for fobj, func in extrafiles:
|
||||
if fobj in selected:
|
||||
try:
|
||||
data = fobj.read()
|
||||
except IOError as err:
|
||||
if err.errno == errno.EAGAIN or err.errno == errno.EWOULDBLOCK:
|
||||
data = None
|
||||
if data is not None:
|
||||
func(data)
|
||||
|
||||
def read_all_pipes(log, rin, outdata, errdata):
|
||||
rlist = rin
|
||||
stdoutbuf = b""
|
||||
stderrbuf = b""
|
||||
|
||||
try:
|
||||
r,w,e = select.select (rlist, [], [], 1)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EINTR:
|
||||
raise
|
||||
|
||||
readextras(r)
|
||||
|
||||
if pipe.stdout in r:
|
||||
data = stdoutbuf + pipe.stdout.read()
|
||||
if data is not None and len(data) > 0:
|
||||
try:
|
||||
data = data.decode("utf-8")
|
||||
outdata.append(data)
|
||||
log.write(data)
|
||||
log.flush()
|
||||
stdoutbuf = b""
|
||||
except UnicodeDecodeError:
|
||||
stdoutbuf = data
|
||||
|
||||
if pipe.stderr in r:
|
||||
data = stderrbuf + pipe.stderr.read()
|
||||
if data is not None and len(data) > 0:
|
||||
try:
|
||||
data = data.decode("utf-8")
|
||||
errdata.append(data)
|
||||
log.write(data)
|
||||
log.flush()
|
||||
stderrbuf = b""
|
||||
except UnicodeDecodeError:
|
||||
stderrbuf = data
|
||||
|
||||
try:
|
||||
# Read all pipes while the process is open
|
||||
while pipe.poll() is None:
|
||||
read_all_pipes(log, rin, outdata, errdata)
|
||||
rlist = rin
|
||||
try:
|
||||
r,w,e = select.select (rlist, [], [], 1)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EINTR:
|
||||
raise
|
||||
|
||||
# Pocess closed, drain all pipes...
|
||||
read_all_pipes(log, rin, outdata, errdata)
|
||||
finally:
|
||||
if pipe.stdout in r:
|
||||
data = pipe.stdout.read()
|
||||
if data is not None:
|
||||
outdata.append(data)
|
||||
log.write(data)
|
||||
|
||||
if pipe.stderr in r:
|
||||
data = pipe.stderr.read()
|
||||
if data is not None:
|
||||
errdata.append(data)
|
||||
log.write(data)
|
||||
finally:
|
||||
log.flush()
|
||||
|
||||
if pipe.stdout is not None:
|
||||
pipe.stdout.close()
|
||||
if pipe.stderr is not None:
|
||||
pipe.stderr.close()
|
||||
return ''.join(outdata), ''.join(errdata)
|
||||
|
||||
def run(cmd, input=None, log=None, extrafiles=None, **options):
|
||||
def run(cmd, input=None, log=None, **options):
|
||||
"""Convenience function to run a command and return its output, raising an
|
||||
exception when the command fails"""
|
||||
|
||||
if not extrafiles:
|
||||
extrafiles = []
|
||||
|
||||
if isinstance(cmd, str) and not "shell" in options:
|
||||
if isinstance(cmd, basestring) and not "shell" in options:
|
||||
options["shell"] = True
|
||||
|
||||
try:
|
||||
@@ -166,13 +124,9 @@ def run(cmd, input=None, log=None, extrafiles=None, **options):
|
||||
raise CmdError(cmd, exc)
|
||||
|
||||
if log:
|
||||
stdout, stderr = _logged_communicate(pipe, log, input, extrafiles)
|
||||
stdout, stderr = _logged_communicate(pipe, log, input)
|
||||
else:
|
||||
stdout, stderr = pipe.communicate(input)
|
||||
if not stdout is None:
|
||||
stdout = stdout.decode("utf-8")
|
||||
if not stderr is None:
|
||||
stderr = stderr.decode("utf-8")
|
||||
|
||||
if pipe.returncode != 0:
|
||||
raise ExecutionError(cmd, pipe.returncode, stdout, stderr)
|
||||
|
||||
@@ -1,276 +0,0 @@
|
||||
"""
|
||||
BitBake progress handling code
|
||||
"""
|
||||
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import sys
|
||||
import re
|
||||
import time
|
||||
import inspect
|
||||
import bb.event
|
||||
import bb.build
|
||||
|
||||
class ProgressHandler(object):
|
||||
"""
|
||||
Base class that can pretend to be a file object well enough to be
|
||||
used to build objects to intercept console output and determine the
|
||||
progress of some operation.
|
||||
"""
|
||||
def __init__(self, d, outfile=None):
|
||||
self._progress = 0
|
||||
self._data = d
|
||||
self._lastevent = 0
|
||||
if outfile:
|
||||
self._outfile = outfile
|
||||
else:
|
||||
self._outfile = sys.stdout
|
||||
|
||||
def _fire_progress(self, taskprogress, rate=None):
|
||||
"""Internal function to fire the progress event"""
|
||||
bb.event.fire(bb.build.TaskProgress(taskprogress, rate), self._data)
|
||||
|
||||
def write(self, string):
|
||||
self._outfile.write(string)
|
||||
|
||||
def flush(self):
|
||||
self._outfile.flush()
|
||||
|
||||
def update(self, progress, rate=None):
|
||||
ts = time.time()
|
||||
if progress > 100:
|
||||
progress = 100
|
||||
if progress != self._progress or self._lastevent + 1 < ts:
|
||||
self._fire_progress(progress, rate)
|
||||
self._lastevent = ts
|
||||
self._progress = progress
|
||||
|
||||
class LineFilterProgressHandler(ProgressHandler):
|
||||
"""
|
||||
A ProgressHandler variant that provides the ability to filter out
|
||||
the lines if they contain progress information. Additionally, it
|
||||
filters out anything before the last line feed on a line. This can
|
||||
be used to keep the logs clean of output that we've only enabled for
|
||||
getting progress, assuming that that can be done on a per-line
|
||||
basis.
|
||||
"""
|
||||
def __init__(self, d, outfile=None):
|
||||
self._linebuffer = ''
|
||||
super(LineFilterProgressHandler, self).__init__(d, outfile)
|
||||
|
||||
def write(self, string):
|
||||
self._linebuffer += string
|
||||
while True:
|
||||
breakpos = self._linebuffer.find('\n') + 1
|
||||
if breakpos == 0:
|
||||
break
|
||||
line = self._linebuffer[:breakpos]
|
||||
self._linebuffer = self._linebuffer[breakpos:]
|
||||
# Drop any line feeds and anything that precedes them
|
||||
lbreakpos = line.rfind('\r') + 1
|
||||
if lbreakpos:
|
||||
line = line[lbreakpos:]
|
||||
if self.writeline(line):
|
||||
super(LineFilterProgressHandler, self).write(line)
|
||||
|
||||
def writeline(self, line):
|
||||
return True
|
||||
|
||||
class BasicProgressHandler(ProgressHandler):
|
||||
def __init__(self, d, regex=r'(\d+)%', outfile=None):
|
||||
super(BasicProgressHandler, self).__init__(d, outfile)
|
||||
self._regex = re.compile(regex)
|
||||
# Send an initial progress event so the bar gets shown
|
||||
self._fire_progress(0)
|
||||
|
||||
def write(self, string):
|
||||
percs = self._regex.findall(string)
|
||||
if percs:
|
||||
progress = int(percs[-1])
|
||||
self.update(progress)
|
||||
super(BasicProgressHandler, self).write(string)
|
||||
|
||||
class OutOfProgressHandler(ProgressHandler):
|
||||
def __init__(self, d, regex, outfile=None):
|
||||
super(OutOfProgressHandler, self).__init__(d, outfile)
|
||||
self._regex = re.compile(regex)
|
||||
# Send an initial progress event so the bar gets shown
|
||||
self._fire_progress(0)
|
||||
|
||||
def write(self, string):
|
||||
nums = self._regex.findall(string)
|
||||
if nums:
|
||||
progress = (float(nums[-1][0]) / float(nums[-1][1])) * 100
|
||||
self.update(progress)
|
||||
super(OutOfProgressHandler, self).write(string)
|
||||
|
||||
class MultiStageProgressReporter(object):
|
||||
"""
|
||||
Class which allows reporting progress without the caller
|
||||
having to know where they are in the overall sequence. Useful
|
||||
for tasks made up of python code spread across multiple
|
||||
classes / functions - the progress reporter object can
|
||||
be passed around or stored at the object level and calls
|
||||
to next_stage() and update() made whereever needed.
|
||||
"""
|
||||
def __init__(self, d, stage_weights, debug=False):
|
||||
"""
|
||||
Initialise the progress reporter.
|
||||
|
||||
Parameters:
|
||||
* d: the datastore (needed for firing the events)
|
||||
* stage_weights: a list of weight values, one for each stage.
|
||||
The value is scaled internally so you only need to specify
|
||||
values relative to other values in the list, so if there
|
||||
are two stages and the first takes 2s and the second takes
|
||||
10s you would specify [2, 10] (or [1, 5], it doesn't matter).
|
||||
* debug: specify True (and ensure you call finish() at the end)
|
||||
in order to show a printout of the calculated stage weights
|
||||
based on timing each stage. Use this to determine what the
|
||||
weights should be when you're not sure.
|
||||
"""
|
||||
self._data = d
|
||||
total = sum(stage_weights)
|
||||
self._stage_weights = [float(x)/total for x in stage_weights]
|
||||
self._stage = -1
|
||||
self._base_progress = 0
|
||||
# Send an initial progress event so the bar gets shown
|
||||
self._fire_progress(0)
|
||||
self._debug = debug
|
||||
self._finished = False
|
||||
if self._debug:
|
||||
self._last_time = time.time()
|
||||
self._stage_times = []
|
||||
self._stage_total = None
|
||||
self._callers = []
|
||||
|
||||
def _fire_progress(self, taskprogress):
|
||||
bb.event.fire(bb.build.TaskProgress(taskprogress), self._data)
|
||||
|
||||
def next_stage(self, stage_total=None):
|
||||
"""
|
||||
Move to the next stage.
|
||||
Parameters:
|
||||
* stage_total: optional total for progress within the stage,
|
||||
see update() for details
|
||||
NOTE: you need to call this before the first stage.
|
||||
"""
|
||||
self._stage += 1
|
||||
self._stage_total = stage_total
|
||||
if self._stage == 0:
|
||||
# First stage
|
||||
if self._debug:
|
||||
self._last_time = time.time()
|
||||
else:
|
||||
if self._stage < len(self._stage_weights):
|
||||
self._base_progress = sum(self._stage_weights[:self._stage]) * 100
|
||||
if self._debug:
|
||||
currtime = time.time()
|
||||
self._stage_times.append(currtime - self._last_time)
|
||||
self._last_time = currtime
|
||||
self._callers.append(inspect.getouterframes(inspect.currentframe())[1])
|
||||
elif not self._debug:
|
||||
bb.warn('ProgressReporter: current stage beyond declared number of stages')
|
||||
self._base_progress = 100
|
||||
self._fire_progress(self._base_progress)
|
||||
|
||||
def update(self, stage_progress):
|
||||
"""
|
||||
Update progress within the current stage.
|
||||
Parameters:
|
||||
* stage_progress: progress value within the stage. If stage_total
|
||||
was specified when next_stage() was last called, then this
|
||||
value is considered to be out of stage_total, otherwise it should
|
||||
be a percentage value from 0 to 100.
|
||||
"""
|
||||
if self._stage_total:
|
||||
stage_progress = (float(stage_progress) / self._stage_total) * 100
|
||||
if self._stage < 0:
|
||||
bb.warn('ProgressReporter: update called before first call to next_stage()')
|
||||
elif self._stage < len(self._stage_weights):
|
||||
progress = self._base_progress + (stage_progress * self._stage_weights[self._stage])
|
||||
else:
|
||||
progress = self._base_progress
|
||||
if progress > 100:
|
||||
progress = 100
|
||||
self._fire_progress(progress)
|
||||
|
||||
def finish(self):
|
||||
if self._finished:
|
||||
return
|
||||
self._finished = True
|
||||
if self._debug:
|
||||
import math
|
||||
self._stage_times.append(time.time() - self._last_time)
|
||||
mintime = max(min(self._stage_times), 0.01)
|
||||
self._callers.append(None)
|
||||
stage_weights = [int(math.ceil(x / mintime)) for x in self._stage_times]
|
||||
bb.warn('Stage weights: %s' % stage_weights)
|
||||
out = []
|
||||
for stage_weight, caller in zip(stage_weights, self._callers):
|
||||
if caller:
|
||||
out.append('Up to %s:%d: %d' % (caller[1], caller[2], stage_weight))
|
||||
else:
|
||||
out.append('Up to finish: %d' % stage_weight)
|
||||
bb.warn('Stage times:\n %s' % '\n '.join(out))
|
||||
|
||||
class MultiStageProcessProgressReporter(MultiStageProgressReporter):
|
||||
"""
|
||||
Version of MultiStageProgressReporter intended for use with
|
||||
standalone processes (such as preparing the runqueue)
|
||||
"""
|
||||
def __init__(self, d, processname, stage_weights, debug=False):
|
||||
self._processname = processname
|
||||
self._started = False
|
||||
MultiStageProgressReporter.__init__(self, d, stage_weights, debug)
|
||||
|
||||
def start(self):
|
||||
if not self._started:
|
||||
bb.event.fire(bb.event.ProcessStarted(self._processname, 100), self._data)
|
||||
self._started = True
|
||||
|
||||
def _fire_progress(self, taskprogress):
|
||||
if taskprogress == 0:
|
||||
self.start()
|
||||
return
|
||||
bb.event.fire(bb.event.ProcessProgress(self._processname, taskprogress), self._data)
|
||||
|
||||
def finish(self):
|
||||
MultiStageProgressReporter.finish(self)
|
||||
bb.event.fire(bb.event.ProcessFinished(self._processname), self._data)
|
||||
|
||||
class DummyMultiStageProcessProgressReporter(MultiStageProgressReporter):
|
||||
"""
|
||||
MultiStageProcessProgressReporter that takes the calls and does nothing
|
||||
with them (to avoid a bunch of "if progress_reporter:" checks)
|
||||
"""
|
||||
def __init__(self):
|
||||
MultiStageProcessProgressReporter.__init__(self, "", None, [])
|
||||
|
||||
def _fire_progress(self, taskprogress, rate=None):
|
||||
pass
|
||||
|
||||
def start(self):
|
||||
pass
|
||||
|
||||
def next_stage(self, stage_total=None):
|
||||
pass
|
||||
|
||||
def update(self, stage_progress):
|
||||
pass
|
||||
|
||||
def finish(self):
|
||||
pass
|
||||
@@ -48,6 +48,7 @@ def findProviders(cfgData, dataCache, pkg_pn = None):
|
||||
|
||||
# Need to ensure data store is expanded
|
||||
localdata = data.createCopy(cfgData)
|
||||
bb.data.update_data(localdata)
|
||||
bb.data.expandKeys(localdata)
|
||||
|
||||
preferred_versions = {}
|
||||
@@ -120,14 +121,11 @@ def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
preferred_file = None
|
||||
preferred_ver = None
|
||||
|
||||
# pn can contain '_', e.g. gcc-cross-x86_64 and an override cannot
|
||||
# hence we do this manually rather than use OVERRIDES
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION_pn-%s" % pn)
|
||||
if not preferred_v:
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION_%s" % pn)
|
||||
if not preferred_v:
|
||||
preferred_v = cfgData.getVar("PREFERRED_VERSION")
|
||||
localdata = data.createCopy(cfgData)
|
||||
localdata.setVar('OVERRIDES', "%s:pn-%s:%s" % (data.getVar('OVERRIDES', localdata), pn, pn))
|
||||
bb.data.update_data(localdata)
|
||||
|
||||
preferred_v = localdata.getVar('PREFERRED_VERSION', True)
|
||||
if preferred_v:
|
||||
m = re.match('(\d+:)*(.*)(_.*)*', preferred_v)
|
||||
if m:
|
||||
@@ -225,7 +223,7 @@ def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
def _filterProviders(providers, item, cfgData, dataCache):
|
||||
"""
|
||||
Take a list of providers and filter/reorder according to the
|
||||
environment variables
|
||||
environment variables and previous build results
|
||||
"""
|
||||
eligible = []
|
||||
preferred_versions = {}
|
||||
@@ -244,17 +242,17 @@ def _filterProviders(providers, item, cfgData, dataCache):
|
||||
pkg_pn[pn] = []
|
||||
pkg_pn[pn].append(p)
|
||||
|
||||
logger.debug(1, "providers for %s are: %s", item, list(sorted(pkg_pn.keys())))
|
||||
logger.debug(1, "providers for %s are: %s", item, pkg_pn.keys())
|
||||
|
||||
# First add PREFERRED_VERSIONS
|
||||
for pn in sorted(pkg_pn):
|
||||
for pn in pkg_pn:
|
||||
sortpkg_pn[pn] = sortPriorities(pn, dataCache, pkg_pn)
|
||||
preferred_versions[pn] = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item)
|
||||
if preferred_versions[pn][1]:
|
||||
eligible.append(preferred_versions[pn][1])
|
||||
|
||||
# Now add latest versions
|
||||
for pn in sorted(sortpkg_pn):
|
||||
for pn in sortpkg_pn:
|
||||
if pn in preferred_versions and preferred_versions[pn][1]:
|
||||
continue
|
||||
preferred_versions[pn] = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[pn][0])
|
||||
@@ -282,13 +280,13 @@ def _filterProviders(providers, item, cfgData, dataCache):
|
||||
def filterProviders(providers, item, cfgData, dataCache):
|
||||
"""
|
||||
Take a list of providers and filter/reorder according to the
|
||||
environment variables
|
||||
environment variables and previous build results
|
||||
Takes a "normal" target item
|
||||
"""
|
||||
|
||||
eligible = _filterProviders(providers, item, cfgData, dataCache)
|
||||
|
||||
prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % item)
|
||||
prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % item, True)
|
||||
if prefervar:
|
||||
dataCache.preferred[item] = prefervar
|
||||
|
||||
@@ -310,56 +308,38 @@ def filterProviders(providers, item, cfgData, dataCache):
|
||||
def filterProvidersRunTime(providers, item, cfgData, dataCache):
|
||||
"""
|
||||
Take a list of providers and filter/reorder according to the
|
||||
environment variables
|
||||
environment variables and previous build results
|
||||
Takes a "runtime" target item
|
||||
"""
|
||||
|
||||
eligible = _filterProviders(providers, item, cfgData, dataCache)
|
||||
|
||||
# First try and match any PREFERRED_RPROVIDER entry
|
||||
prefervar = cfgData.getVar('PREFERRED_RPROVIDER_%s' % item)
|
||||
foundUnique = False
|
||||
if prefervar:
|
||||
for p in eligible:
|
||||
pn = dataCache.pkg_fn[p]
|
||||
if prefervar == pn:
|
||||
logger.verbose("selecting %s to satisfy %s due to PREFERRED_RPROVIDER", pn, item)
|
||||
eligible.remove(p)
|
||||
eligible = [p] + eligible
|
||||
foundUnique = True
|
||||
numberPreferred = 1
|
||||
# Should use dataCache.preferred here?
|
||||
preferred = []
|
||||
preferred_vars = []
|
||||
pns = {}
|
||||
for p in eligible:
|
||||
pns[dataCache.pkg_fn[p]] = p
|
||||
for p in eligible:
|
||||
pn = dataCache.pkg_fn[p]
|
||||
provides = dataCache.pn_provides[pn]
|
||||
for provide in provides:
|
||||
prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % provide, True)
|
||||
#logger.debug(1, "checking PREFERRED_PROVIDER_%s (value %s) against %s", provide, prefervar, pns.keys())
|
||||
if prefervar in pns and pns[prefervar] not in preferred:
|
||||
var = "PREFERRED_PROVIDER_%s = %s" % (provide, prefervar)
|
||||
logger.verbose("selecting %s to satisfy runtime %s due to %s", prefervar, item, var)
|
||||
preferred_vars.append(var)
|
||||
pref = pns[prefervar]
|
||||
eligible.remove(pref)
|
||||
eligible = [pref] + eligible
|
||||
preferred.append(pref)
|
||||
break
|
||||
|
||||
# If we didn't find an RPROVIDER entry, try and infer the provider from PREFERRED_PROVIDER entries
|
||||
# by looking through the provides of each eligible recipe and seeing if a PREFERRED_PROVIDER was set.
|
||||
# This is most useful for virtual/ entries rather than having a RPROVIDER per entry.
|
||||
if not foundUnique:
|
||||
# Should use dataCache.preferred here?
|
||||
preferred = []
|
||||
preferred_vars = []
|
||||
pns = {}
|
||||
for p in eligible:
|
||||
pns[dataCache.pkg_fn[p]] = p
|
||||
for p in eligible:
|
||||
pn = dataCache.pkg_fn[p]
|
||||
provides = dataCache.pn_provides[pn]
|
||||
for provide in provides:
|
||||
prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % provide)
|
||||
#logger.debug(1, "checking PREFERRED_PROVIDER_%s (value %s) against %s", provide, prefervar, pns.keys())
|
||||
if prefervar in pns and pns[prefervar] not in preferred:
|
||||
var = "PREFERRED_PROVIDER_%s = %s" % (provide, prefervar)
|
||||
logger.verbose("selecting %s to satisfy runtime %s due to %s", prefervar, item, var)
|
||||
preferred_vars.append(var)
|
||||
pref = pns[prefervar]
|
||||
eligible.remove(pref)
|
||||
eligible = [pref] + eligible
|
||||
preferred.append(pref)
|
||||
break
|
||||
|
||||
numberPreferred = len(preferred)
|
||||
numberPreferred = len(preferred)
|
||||
|
||||
if numberPreferred > 1:
|
||||
logger.error("Trying to resolve runtime dependency %s resulted in conflicting PREFERRED_PROVIDER entries being found.\nThe providers found were: %s\nThe PREFERRED_PROVIDER entries resulting in this conflict were: %s. You could set PREFERRED_RPROVIDER_%s" % (item, preferred, preferred_vars, item))
|
||||
logger.error("Trying to resolve runtime dependency %s resulted in conflicting PREFERRED_PROVIDER entries being found.\nThe providers found were: %s\nThe PREFERRED_PROVIDER entries resulting in this conflict were: %s", item, preferred, preferred_vars)
|
||||
|
||||
logger.debug(1, "sorted runtime providers for %s are: %s", item, eligible)
|
||||
|
||||
@@ -399,32 +379,3 @@ def getRuntimeProviders(dataCache, rdepend):
|
||||
logger.debug(1, "Assuming %s is a dynamic package, but it may not exist" % rdepend)
|
||||
|
||||
return rproviders
|
||||
|
||||
|
||||
def buildWorldTargetList(dataCache, task=None):
|
||||
"""
|
||||
Build package list for "bitbake world"
|
||||
"""
|
||||
if dataCache.world_target:
|
||||
return
|
||||
|
||||
logger.debug(1, "collating packages for \"world\"")
|
||||
for f in dataCache.possible_world:
|
||||
terminal = True
|
||||
pn = dataCache.pkg_fn[f]
|
||||
if task and task not in dataCache.task_deps[f]['tasks']:
|
||||
logger.debug(2, "World build skipping %s as task %s doesn't exist", f, task)
|
||||
terminal = False
|
||||
|
||||
for p in dataCache.pn_provides[pn]:
|
||||
if p.startswith('virtual/'):
|
||||
logger.debug(2, "World build skipping %s due to %s provider starting with virtual/", f, p)
|
||||
terminal = False
|
||||
break
|
||||
for pf in dataCache.providers[p]:
|
||||
if dataCache.pkg_fn[pf] != pn:
|
||||
logger.debug(2, "World build skipping %s due to both us and %s providing %s", f, pf, p)
|
||||
terminal = False
|
||||
break
|
||||
if terminal:
|
||||
dataCache.world_target.add(pn)
|
||||
|
||||
@@ -527,7 +527,7 @@ def utility_sed(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
# Scan pattern arguments and append a space if necessary
|
||||
for i in range(len(args)):
|
||||
for i in xrange(len(args)):
|
||||
if not RE_SED.search(args[i]):
|
||||
continue
|
||||
args[i] = args[i] + ' '
|
||||
|
||||
@@ -474,7 +474,7 @@ class Environment:
|
||||
"""
|
||||
# Save and remove previous arguments
|
||||
prevargs = []
|
||||
for i in range(int(self._env['#'])):
|
||||
for i in xrange(int(self._env['#'])):
|
||||
i = str(i+1)
|
||||
prevargs.append(self._env[i])
|
||||
del self._env[i]
|
||||
@@ -488,7 +488,7 @@ class Environment:
|
||||
return prevargs
|
||||
|
||||
def get_positional_args(self):
|
||||
return [self._env[str(i+1)] for i in range(int(self._env['#']))]
|
||||
return [self._env[str(i+1)] for i in xrange(int(self._env['#']))]
|
||||
|
||||
def get_variables(self):
|
||||
return dict(self._env)
|
||||
|
||||
@@ -20,7 +20,7 @@ except NameError:
|
||||
from Set import Set as set
|
||||
|
||||
from ply import lex
|
||||
from bb.pysh.sherrors import *
|
||||
from sherrors import *
|
||||
|
||||
class NeedMore(Exception):
|
||||
pass
|
||||
|
||||
@@ -10,11 +10,11 @@
|
||||
import os.path
|
||||
import sys
|
||||
|
||||
import bb.pysh.pyshlex as pyshlex
|
||||
import pyshlex
|
||||
tokens = pyshlex.tokens
|
||||
|
||||
from ply import yacc
|
||||
import bb.pysh.sherrors as sherrors
|
||||
import sherrors
|
||||
|
||||
class IORedirect:
|
||||
def __init__(self, op, filename, io_number=None):
|
||||
|
||||
@@ -38,4 +38,4 @@ class ExitSignal(ShellError):
|
||||
|
||||
class ReturnSignal(ShellError):
|
||||
"""Exit signal."""
|
||||
pass
|
||||
pass
|
||||
@@ -1,116 +0,0 @@
|
||||
"""
|
||||
BitBake 'remotedata' module
|
||||
|
||||
Provides support for using a datastore from the bitbake client
|
||||
"""
|
||||
|
||||
# Copyright (C) 2016 Intel Corporation
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import bb.data
|
||||
|
||||
class RemoteDatastores:
|
||||
"""Used on the server side to manage references to server-side datastores"""
|
||||
def __init__(self, cooker):
|
||||
self.cooker = cooker
|
||||
self.datastores = {}
|
||||
self.locked = []
|
||||
self.nextindex = 1
|
||||
|
||||
def __len__(self):
|
||||
return len(self.datastores)
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key is None:
|
||||
return self.cooker.data
|
||||
else:
|
||||
return self.datastores[key]
|
||||
|
||||
def items(self):
|
||||
return self.datastores.items()
|
||||
|
||||
def store(self, d, locked=False):
|
||||
"""
|
||||
Put a datastore into the collection. If locked=True then the datastore
|
||||
is understood to be managed externally and cannot be released by calling
|
||||
release().
|
||||
"""
|
||||
idx = self.nextindex
|
||||
self.datastores[idx] = d
|
||||
if locked:
|
||||
self.locked.append(idx)
|
||||
self.nextindex += 1
|
||||
return idx
|
||||
|
||||
def check_store(self, d, locked=False):
|
||||
"""
|
||||
Put a datastore into the collection if it's not already in there;
|
||||
in either case return the index
|
||||
"""
|
||||
for key, val in self.datastores.items():
|
||||
if val is d:
|
||||
idx = key
|
||||
break
|
||||
else:
|
||||
idx = self.store(d, locked)
|
||||
return idx
|
||||
|
||||
def release(self, idx):
|
||||
"""Discard a datastore in the collection"""
|
||||
if idx in self.locked:
|
||||
raise Exception('Tried to release locked datastore %d' % idx)
|
||||
del self.datastores[idx]
|
||||
|
||||
def receive_datastore(self, remote_data):
|
||||
"""Receive a datastore object sent from the client (as prepared by transmit_datastore())"""
|
||||
dct = dict(remote_data)
|
||||
d = bb.data_smart.DataSmart()
|
||||
d.dict = dct
|
||||
while True:
|
||||
if '_remote_data' in dct:
|
||||
dsindex = dct['_remote_data']['_content']
|
||||
del dct['_remote_data']
|
||||
if dsindex is None:
|
||||
dct['_data'] = self.cooker.data.dict
|
||||
else:
|
||||
dct['_data'] = self.datastores[dsindex].dict
|
||||
break
|
||||
elif '_data' in dct:
|
||||
idct = dict(dct['_data'])
|
||||
dct['_data'] = idct
|
||||
dct = idct
|
||||
else:
|
||||
break
|
||||
return d
|
||||
|
||||
@staticmethod
|
||||
def transmit_datastore(d):
|
||||
"""Prepare a datastore object for sending over IPC from the client end"""
|
||||
# FIXME content might be a dict, need to turn that into a list as well
|
||||
def copy_dicts(dct):
|
||||
if '_remote_data' in dct:
|
||||
dsindex = dct['_remote_data']['_content'].dsindex
|
||||
newdct = dct.copy()
|
||||
newdct['_remote_data'] = {'_content': dsindex}
|
||||
return list(newdct.items())
|
||||
elif '_data' in dct:
|
||||
newdct = dct.copy()
|
||||
newdata = copy_dicts(dct['_data'])
|
||||
if newdata:
|
||||
newdct['_data'] = newdata
|
||||
return list(newdct.items())
|
||||
return None
|
||||
main_dict = copy_dicts(d.dict)
|
||||
return main_dict
|
||||
File diff suppressed because it is too large
Load Diff
@@ -18,4 +18,79 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
""" Base code for Bitbake server process
|
||||
|
||||
Have a common base for that all Bitbake server classes ensures a consistent
|
||||
approach to the interface, and minimize risks associated with code duplication.
|
||||
|
||||
"""
|
||||
|
||||
""" BaseImplServer() the base class for all XXServer() implementations.
|
||||
|
||||
These classes contain the actual code that runs the server side, i.e.
|
||||
listens for the commands and executes them. Although these implementations
|
||||
contain all the data of the original bitbake command, i.e the cooker instance,
|
||||
they may well run on a different process or even machine.
|
||||
|
||||
"""
|
||||
|
||||
class BaseImplServer():
|
||||
def __init__(self):
|
||||
self._idlefuns = {}
|
||||
|
||||
def addcooker(self, cooker):
|
||||
self.cooker = cooker
|
||||
|
||||
def register_idle_function(self, function, data):
|
||||
"""Register a function to be called while the server is idle"""
|
||||
assert hasattr(function, '__call__')
|
||||
self._idlefuns[function] = data
|
||||
|
||||
|
||||
|
||||
""" BitBakeBaseServerConnection class is the common ancestor to all
|
||||
BitBakeServerConnection classes.
|
||||
|
||||
These classes control the remote server. The only command currently
|
||||
implemented is the terminate() command.
|
||||
|
||||
"""
|
||||
|
||||
class BitBakeBaseServerConnection():
|
||||
def __init__(self, serverImpl):
|
||||
pass
|
||||
|
||||
def terminate(self):
|
||||
pass
|
||||
|
||||
|
||||
""" BitBakeBaseServer class is the common ancestor to all Bitbake servers
|
||||
|
||||
Derive this class in order to implement a BitBakeServer which is the
|
||||
controlling stub for the actual server implementation
|
||||
|
||||
"""
|
||||
class BitBakeBaseServer(object):
|
||||
def initServer(self):
|
||||
self.serverImpl = None # we ensure a runtime crash if not overloaded
|
||||
self.connection = None
|
||||
return
|
||||
|
||||
def addcooker(self, cooker):
|
||||
self.cooker = cooker
|
||||
self.serverImpl.addcooker(cooker)
|
||||
|
||||
def getServerIdleCB(self):
|
||||
return self.serverImpl.register_idle_function
|
||||
|
||||
def saveConnectionDetails(self):
|
||||
return
|
||||
|
||||
def detach(self):
|
||||
return
|
||||
|
||||
def establishConnection(self, featureset):
|
||||
raise "Must redefine the %s.establishConnection()" % self.__class__.__name__
|
||||
|
||||
def endSession(self):
|
||||
self.connection.terminate()
|
||||
|
||||
@@ -22,254 +22,108 @@
|
||||
|
||||
import bb
|
||||
import bb.event
|
||||
import itertools
|
||||
import logging
|
||||
import multiprocessing
|
||||
import threading
|
||||
import array
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import select
|
||||
import socket
|
||||
import subprocess
|
||||
import errno
|
||||
import re
|
||||
import datetime
|
||||
import bb.server.xmlrpcserver
|
||||
from bb import daemonize
|
||||
from multiprocessing import queues
|
||||
from Queue import Empty
|
||||
from multiprocessing import Event, Process, util, Queue, Pipe, queues, Manager
|
||||
|
||||
from . import BitBakeBaseServer, BitBakeBaseServerConnection, BaseImplServer
|
||||
|
||||
logger = logging.getLogger('BitBake')
|
||||
|
||||
class ProcessTimeout(SystemExit):
|
||||
pass
|
||||
class ServerCommunicator():
|
||||
def __init__(self, connection, event_handle):
|
||||
self.connection = connection
|
||||
self.event_handle = event_handle
|
||||
|
||||
class ProcessServer(multiprocessing.Process):
|
||||
def runCommand(self, command):
|
||||
# @todo try/except
|
||||
self.connection.send(command)
|
||||
|
||||
while True:
|
||||
# don't let the user ctrl-c while we're waiting for a response
|
||||
try:
|
||||
if self.connection.poll(20):
|
||||
return self.connection.recv()
|
||||
else:
|
||||
bb.fatal("Timeout while attempting to communicate with bitbake server")
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
def getEventHandle(self):
|
||||
return self.event_handle.value
|
||||
|
||||
class EventAdapter():
|
||||
"""
|
||||
Adapter to wrap our event queue since the caller (bb.event) expects to
|
||||
call a send() method, but our actual queue only has put()
|
||||
"""
|
||||
def __init__(self, queue):
|
||||
self.queue = queue
|
||||
|
||||
def send(self, event):
|
||||
try:
|
||||
self.queue.put(event)
|
||||
except Exception as err:
|
||||
print("EventAdapter puked: %s" % str(err))
|
||||
|
||||
|
||||
class ProcessServer(Process, BaseImplServer):
|
||||
profile_filename = "profile.log"
|
||||
profile_processed_filename = "profile.log.processed"
|
||||
|
||||
def __init__(self, lock, sock, sockname):
|
||||
multiprocessing.Process.__init__(self)
|
||||
self.command_channel = False
|
||||
self.command_channel_reply = False
|
||||
def __init__(self, command_channel, event_queue, featurelist):
|
||||
BaseImplServer.__init__(self)
|
||||
Process.__init__(self)
|
||||
self.command_channel = command_channel
|
||||
self.event_queue = event_queue
|
||||
self.event = EventAdapter(event_queue)
|
||||
self.featurelist = featurelist
|
||||
self.quit = False
|
||||
self.heartbeat_seconds = 1 # default, BB_HEARTBEAT_EVENT will be checked once we have a datastore.
|
||||
self.next_heartbeat = time.time()
|
||||
|
||||
self.event_handle = None
|
||||
self.haveui = False
|
||||
self.lastui = False
|
||||
self.xmlrpc = False
|
||||
|
||||
self._idlefuns = {}
|
||||
|
||||
self.bitbake_lock = lock
|
||||
self.sock = sock
|
||||
self.sockname = sockname
|
||||
|
||||
def register_idle_function(self, function, data):
|
||||
"""Register a function to be called while the server is idle"""
|
||||
assert hasattr(function, '__call__')
|
||||
self._idlefuns[function] = data
|
||||
self.quitin, self.quitout = Pipe()
|
||||
self.event_handle = multiprocessing.Value("i")
|
||||
|
||||
def run(self):
|
||||
for event in bb.event.ui_queue:
|
||||
self.event_queue.put(event)
|
||||
self.event_handle.value = bb.event.register_UIHhandler(self)
|
||||
|
||||
if self.xmlrpcinterface[0]:
|
||||
self.xmlrpc = bb.server.xmlrpcserver.BitBakeXMLRPCServer(self.xmlrpcinterface, self.cooker, self)
|
||||
|
||||
print("Bitbake XMLRPC server address: %s, server port: %s" % (self.xmlrpc.host, self.xmlrpc.port))
|
||||
|
||||
heartbeat_event = self.cooker.data.getVar('BB_HEARTBEAT_EVENT')
|
||||
if heartbeat_event:
|
||||
try:
|
||||
self.heartbeat_seconds = float(heartbeat_event)
|
||||
except:
|
||||
bb.warn('Ignoring invalid BB_HEARTBEAT_EVENT=%s, must be a float specifying seconds.' % heartbeat_event)
|
||||
|
||||
self.timeout = self.server_timeout or self.cooker.data.getVar('BB_SERVER_TIMEOUT')
|
||||
try:
|
||||
if self.timeout:
|
||||
self.timeout = float(self.timeout)
|
||||
except:
|
||||
bb.warn('Ignoring invalid BB_SERVER_TIMEOUT=%s, must be a float specifying seconds.' % self.timeout)
|
||||
|
||||
|
||||
try:
|
||||
self.bitbake_lock.seek(0)
|
||||
self.bitbake_lock.truncate()
|
||||
if self.xmlrpc:
|
||||
self.bitbake_lock.write("%s %s:%s\n" % (os.getpid(), self.xmlrpc.host, self.xmlrpc.port))
|
||||
else:
|
||||
self.bitbake_lock.write("%s\n" % (os.getpid()))
|
||||
self.bitbake_lock.flush()
|
||||
except Exception as e:
|
||||
print("Error writing to lock file: %s" % str(e))
|
||||
pass
|
||||
|
||||
if self.cooker.configuration.profile:
|
||||
try:
|
||||
import cProfile as profile
|
||||
except:
|
||||
import profile
|
||||
prof = profile.Profile()
|
||||
|
||||
ret = profile.Profile.runcall(prof, self.main)
|
||||
|
||||
prof.dump_stats("profile.log")
|
||||
bb.utils.process_profilelog("profile.log")
|
||||
print("Raw profiling information saved to profile.log and processed statistics to profile.log.processed")
|
||||
|
||||
else:
|
||||
ret = self.main()
|
||||
|
||||
return ret
|
||||
bb.cooker.server_main(self.cooker, self.main)
|
||||
|
||||
def main(self):
|
||||
self.cooker.pre_serve()
|
||||
|
||||
bb.utils.set_process_name("Cooker")
|
||||
|
||||
ready = []
|
||||
|
||||
self.controllersock = False
|
||||
fds = [self.sock]
|
||||
if self.xmlrpc:
|
||||
fds.append(self.xmlrpc)
|
||||
print("Entering server connection loop")
|
||||
|
||||
def disconnect_client(self, fds):
|
||||
if not self.haveui:
|
||||
return
|
||||
print("Disconnecting Client")
|
||||
fds.remove(self.controllersock)
|
||||
fds.remove(self.command_channel)
|
||||
bb.event.unregister_UIHhandler(self.event_handle, True)
|
||||
self.command_channel_reply.writer.close()
|
||||
self.event_writer.writer.close()
|
||||
del self.event_writer
|
||||
self.controllersock.close()
|
||||
self.controllersock = False
|
||||
self.haveui = False
|
||||
self.lastui = time.time()
|
||||
self.cooker.clientComplete()
|
||||
if self.timeout is None:
|
||||
print("No timeout, exiting.")
|
||||
self.quit = True
|
||||
|
||||
# Ignore SIGINT within the server, as all SIGINT handling is done by
|
||||
# the UI and communicated to us
|
||||
self.quitin.close()
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
while not self.quit:
|
||||
if self.sock in ready:
|
||||
self.controllersock, address = self.sock.accept()
|
||||
if self.haveui:
|
||||
print("Dropping connection attempt as we have a UI %s" % (str(ready)))
|
||||
self.controllersock.close()
|
||||
else:
|
||||
print("Accepting %s" % (str(ready)))
|
||||
fds.append(self.controllersock)
|
||||
if self.controllersock in ready:
|
||||
try:
|
||||
print("Connecting Client")
|
||||
ui_fds = recvfds(self.controllersock, 3)
|
||||
|
||||
# Where to write events to
|
||||
writer = ConnectionWriter(ui_fds[0])
|
||||
self.event_handle = bb.event.register_UIHhandler(writer, True)
|
||||
self.event_writer = writer
|
||||
|
||||
# Where to read commands from
|
||||
reader = ConnectionReader(ui_fds[1])
|
||||
fds.append(reader)
|
||||
self.command_channel = reader
|
||||
|
||||
# Where to send command return values to
|
||||
writer = ConnectionWriter(ui_fds[2])
|
||||
self.command_channel_reply = writer
|
||||
|
||||
self.haveui = True
|
||||
|
||||
except (EOFError, OSError):
|
||||
disconnect_client(self, fds)
|
||||
|
||||
if not self.timeout == -1.0 and not self.haveui and self.lastui and self.timeout and \
|
||||
(self.lastui + self.timeout) < time.time():
|
||||
print("Server timeout, exiting.")
|
||||
self.quit = True
|
||||
|
||||
if self.command_channel in ready:
|
||||
try:
|
||||
command = self.command_channel.get()
|
||||
except EOFError:
|
||||
# Client connection shutting down
|
||||
ready = []
|
||||
disconnect_client(self, fds)
|
||||
continue
|
||||
if command[0] == "terminateServer":
|
||||
try:
|
||||
if self.command_channel.poll():
|
||||
command = self.command_channel.recv()
|
||||
self.runCommand(command)
|
||||
if self.quitout.poll():
|
||||
self.quitout.recv()
|
||||
self.quit = True
|
||||
continue
|
||||
try:
|
||||
print("Running command %s" % command)
|
||||
self.command_channel_reply.send(self.cooker.command.runCommand(command))
|
||||
except Exception as e:
|
||||
logger.exception('Exception in server main event loop running command %s (%s)' % (command, str(e)))
|
||||
|
||||
if self.xmlrpc in ready:
|
||||
self.xmlrpc.handle_requests()
|
||||
self.idle_commands(.1, [self.event_queue._reader, self.command_channel, self.quitout])
|
||||
except Exception:
|
||||
logger.exception('Running command %s', command)
|
||||
|
||||
ready = self.idle_commands(.1, fds)
|
||||
self.event_queue.close()
|
||||
bb.event.unregister_UIHhandler(self.event_handle.value)
|
||||
self.command_channel.close()
|
||||
self.cooker.shutdown(True)
|
||||
|
||||
print("Exiting")
|
||||
# Remove the socket file so we don't get any more connections to avoid races
|
||||
os.unlink(self.sockname)
|
||||
self.sock.close()
|
||||
|
||||
try:
|
||||
self.cooker.shutdown(True)
|
||||
self.cooker.notifier.stop()
|
||||
self.cooker.confignotifier.stop()
|
||||
except:
|
||||
pass
|
||||
|
||||
self.cooker.post_serve()
|
||||
|
||||
# Finally release the lockfile but warn about other processes holding it open
|
||||
lock = self.bitbake_lock
|
||||
lockfile = lock.name
|
||||
lock.close()
|
||||
lock = None
|
||||
|
||||
while not lock:
|
||||
with bb.utils.timeout(3):
|
||||
lock = bb.utils.lockfile(lockfile, shared=False, retry=False, block=True)
|
||||
if not lock:
|
||||
# Some systems may not have lsof available
|
||||
procs = None
|
||||
try:
|
||||
procs = subprocess.check_output(["lsof", '-w', lockfile], stderr=subprocess.STDOUT)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
if procs is None:
|
||||
# Fall back to fuser if lsof is unavailable
|
||||
try:
|
||||
procs = subprocess.check_output(["fuser", '-v', lockfile], stderr=subprocess.STDOUT)
|
||||
except OSError as e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
msg = "Delaying shutdown due to active processes which appear to be holding bitbake.lock"
|
||||
if procs:
|
||||
msg += ":\n%s" % str(procs)
|
||||
print(msg)
|
||||
return
|
||||
# We hold the lock so we can remove the file (hide stale pid data)
|
||||
bb.utils.remove(lockfile)
|
||||
bb.utils.unlockfile(lock)
|
||||
|
||||
def idle_commands(self, delay, fds=None):
|
||||
def idle_commands(self, delay, fds = []):
|
||||
nextsleep = delay
|
||||
if not fds:
|
||||
fds = []
|
||||
|
||||
for function, data in list(self._idlefuns.items()):
|
||||
for function, data in self._idlefuns.items():
|
||||
try:
|
||||
retval = function(self, data, False)
|
||||
if retval is False:
|
||||
@@ -277,348 +131,106 @@ class ProcessServer(multiprocessing.Process):
|
||||
nextsleep = None
|
||||
elif retval is True:
|
||||
nextsleep = None
|
||||
elif isinstance(retval, float) and nextsleep:
|
||||
if (retval < nextsleep):
|
||||
nextsleep = retval
|
||||
elif nextsleep is None:
|
||||
continue
|
||||
else:
|
||||
fds = fds + retval
|
||||
except SystemExit:
|
||||
raise
|
||||
except Exception as exc:
|
||||
if not isinstance(exc, bb.BBHandledException):
|
||||
logger.exception('Running idle function')
|
||||
del self._idlefuns[function]
|
||||
self.quit = True
|
||||
|
||||
# Create new heartbeat event?
|
||||
now = time.time()
|
||||
if now >= self.next_heartbeat:
|
||||
# We might have missed heartbeats. Just trigger once in
|
||||
# that case and continue after the usual delay.
|
||||
self.next_heartbeat += self.heartbeat_seconds
|
||||
if self.next_heartbeat <= now:
|
||||
self.next_heartbeat = now + self.heartbeat_seconds
|
||||
heartbeat = bb.event.HeartbeatEvent(now)
|
||||
bb.event.fire(heartbeat, self.cooker.data)
|
||||
if nextsleep and now + nextsleep > self.next_heartbeat:
|
||||
# Shorten timeout so that we we wake up in time for
|
||||
# the heartbeat.
|
||||
nextsleep = self.next_heartbeat - now
|
||||
except Exception:
|
||||
logger.exception('Running idle function')
|
||||
|
||||
if nextsleep is not None:
|
||||
if self.xmlrpc:
|
||||
nextsleep = self.xmlrpc.get_timeout(nextsleep)
|
||||
try:
|
||||
return select.select(fds,[],[],nextsleep)[0]
|
||||
except InterruptedError:
|
||||
# Ignore EINTR
|
||||
return []
|
||||
else:
|
||||
return select.select(fds,[],[],0)[0]
|
||||
|
||||
|
||||
class ServerCommunicator():
|
||||
def __init__(self, connection, recv):
|
||||
self.connection = connection
|
||||
self.recv = recv
|
||||
select.select(fds,[],[],nextsleep)
|
||||
|
||||
def runCommand(self, command):
|
||||
self.connection.send(command)
|
||||
if not self.recv.poll(30):
|
||||
raise ProcessTimeout("Timeout while waiting for a reply from the bitbake server")
|
||||
return self.recv.get()
|
||||
"""
|
||||
Run a cooker command on the server
|
||||
"""
|
||||
self.command_channel.send(self.cooker.command.runCommand(command))
|
||||
|
||||
def updateFeatureSet(self, featureset):
|
||||
_, error = self.runCommand(["setFeatures", featureset])
|
||||
def stop(self):
|
||||
self.quitin.send("quit")
|
||||
self.quitin.close()
|
||||
|
||||
class BitBakeProcessServerConnection(BitBakeBaseServerConnection):
|
||||
def __init__(self, serverImpl, ui_channel, event_queue):
|
||||
self.procserver = serverImpl
|
||||
self.ui_channel = ui_channel
|
||||
self.event_queue = event_queue
|
||||
self.connection = ServerCommunicator(self.ui_channel, self.procserver.event_handle)
|
||||
self.events = self.event_queue
|
||||
|
||||
def sigterm_terminate(self):
|
||||
bb.error("UI received SIGTERM")
|
||||
self.terminate()
|
||||
|
||||
def terminate(self):
|
||||
def flushevents():
|
||||
while True:
|
||||
try:
|
||||
event = self.event_queue.get(block=False)
|
||||
except (Empty, IOError):
|
||||
break
|
||||
if isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
self.procserver.stop()
|
||||
|
||||
while self.procserver.is_alive():
|
||||
flushevents()
|
||||
self.procserver.join(0.1)
|
||||
|
||||
self.ui_channel.close()
|
||||
self.event_queue.close()
|
||||
self.event_queue.setexit()
|
||||
|
||||
# Wrap Queue to provide API which isn't server implementation specific
|
||||
class ProcessEventQueue(multiprocessing.queues.Queue):
|
||||
def __init__(self, maxsize):
|
||||
multiprocessing.queues.Queue.__init__(self, maxsize)
|
||||
self.exit = False
|
||||
|
||||
def setexit(self):
|
||||
self.exit = True
|
||||
|
||||
def waitEvent(self, timeout):
|
||||
if self.exit:
|
||||
raise KeyboardInterrupt()
|
||||
try:
|
||||
return self.get(True, timeout)
|
||||
except Empty:
|
||||
return None
|
||||
|
||||
def getEvent(self):
|
||||
try:
|
||||
return self.get(False)
|
||||
except Empty:
|
||||
return None
|
||||
|
||||
|
||||
class BitBakeServer(BitBakeBaseServer):
|
||||
def initServer(self):
|
||||
# establish communication channels. We use bidirectional pipes for
|
||||
# ui <--> server command/response pairs
|
||||
# and a queue for server -> ui event notifications
|
||||
#
|
||||
self.ui_channel, self.server_channel = Pipe()
|
||||
self.event_queue = ProcessEventQueue(0)
|
||||
self.serverImpl = ProcessServer(self.server_channel, self.event_queue, None)
|
||||
|
||||
def detach(self):
|
||||
self.serverImpl.start()
|
||||
return
|
||||
|
||||
def establishConnection(self, featureset):
|
||||
|
||||
self.connection = BitBakeProcessServerConnection(self.serverImpl, self.ui_channel, self.event_queue)
|
||||
|
||||
_, error = self.connection.connection.runCommand(["setFeatures", featureset])
|
||||
if error:
|
||||
logger.error("Unable to set the cooker to the correct featureset: %s" % error)
|
||||
raise BaseException(error)
|
||||
|
||||
def getEventHandle(self):
|
||||
handle, error = self.runCommand(["getUIHandlerNum"])
|
||||
if error:
|
||||
logger.error("Unable to get UI Handler Number: %s" % error)
|
||||
raise BaseException(error)
|
||||
|
||||
return handle
|
||||
|
||||
def terminateServer(self):
|
||||
self.connection.send(['terminateServer'])
|
||||
return
|
||||
|
||||
class BitBakeProcessServerConnection(object):
|
||||
def __init__(self, ui_channel, recv, eq, sock):
|
||||
self.connection = ServerCommunicator(ui_channel, recv)
|
||||
self.events = eq
|
||||
# Save sock so it doesn't get gc'd for the life of our connection
|
||||
self.socket_connection = sock
|
||||
|
||||
def terminate(self):
|
||||
self.socket_connection.close()
|
||||
self.connection.connection.close()
|
||||
self.connection.recv.close()
|
||||
return
|
||||
|
||||
class BitBakeServer(object):
|
||||
start_log_format = '--- Starting bitbake server pid %s at %s ---'
|
||||
start_log_datetime_format = '%Y-%m-%d %H:%M:%S.%f'
|
||||
|
||||
def __init__(self, lock, sockname, configuration, featureset):
|
||||
|
||||
self.configuration = configuration
|
||||
self.featureset = featureset
|
||||
self.sockname = sockname
|
||||
self.bitbake_lock = lock
|
||||
self.readypipe, self.readypipein = os.pipe()
|
||||
|
||||
# Create server control socket
|
||||
if os.path.exists(sockname):
|
||||
os.unlink(sockname)
|
||||
|
||||
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
# AF_UNIX has path length issues so chdir here to workaround
|
||||
cwd = os.getcwd()
|
||||
logfile = os.path.join(cwd, "bitbake-cookerdaemon.log")
|
||||
|
||||
try:
|
||||
os.chdir(os.path.dirname(sockname))
|
||||
self.sock.bind(os.path.basename(sockname))
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
self.sock.listen(1)
|
||||
|
||||
os.set_inheritable(self.sock.fileno(), True)
|
||||
startdatetime = datetime.datetime.now()
|
||||
bb.daemonize.createDaemon(self._startServer, logfile)
|
||||
self.sock.close()
|
||||
self.bitbake_lock.close()
|
||||
|
||||
ready = ConnectionReader(self.readypipe)
|
||||
r = ready.poll(30)
|
||||
if r:
|
||||
r = ready.get()
|
||||
if not r or r != "ready":
|
||||
ready.close()
|
||||
bb.error("Unable to start bitbake server")
|
||||
if os.path.exists(logfile):
|
||||
logstart_re = re.compile(self.start_log_format % ('([0-9]+)', '([0-9-]+ [0-9:.]+)'))
|
||||
started = False
|
||||
lines = []
|
||||
with open(logfile, "r") as f:
|
||||
for line in f:
|
||||
if started:
|
||||
lines.append(line)
|
||||
else:
|
||||
res = logstart_re.match(line.rstrip())
|
||||
if res:
|
||||
ldatetime = datetime.datetime.strptime(res.group(2), self.start_log_datetime_format)
|
||||
if ldatetime >= startdatetime:
|
||||
started = True
|
||||
lines.append(line)
|
||||
if lines:
|
||||
if len(lines) > 10:
|
||||
bb.error("Last 10 lines of server log for this session (%s):\n%s" % (logfile, "".join(lines[-10:])))
|
||||
else:
|
||||
bb.error("Server log for this session (%s):\n%s" % (logfile, "".join(lines)))
|
||||
raise SystemExit(1)
|
||||
ready.close()
|
||||
os.close(self.readypipein)
|
||||
|
||||
def _startServer(self):
|
||||
print(self.start_log_format % (os.getpid(), datetime.datetime.now().strftime(self.start_log_datetime_format)))
|
||||
server = ProcessServer(self.bitbake_lock, self.sock, self.sockname)
|
||||
self.configuration.setServerRegIdleCallback(server.register_idle_function)
|
||||
writer = ConnectionWriter(self.readypipein)
|
||||
try:
|
||||
self.cooker = bb.cooker.BBCooker(self.configuration, self.featureset)
|
||||
writer.send("ready")
|
||||
except:
|
||||
writer.send("fail")
|
||||
raise
|
||||
finally:
|
||||
os.close(self.readypipein)
|
||||
server.cooker = self.cooker
|
||||
server.server_timeout = self.configuration.server_timeout
|
||||
server.xmlrpcinterface = self.configuration.xmlrpcinterface
|
||||
print("Started bitbake server pid %d" % os.getpid())
|
||||
server.start()
|
||||
|
||||
def connectProcessServer(sockname, featureset):
|
||||
# Connect to socket
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
# AF_UNIX has path length issues so chdir here to workaround
|
||||
cwd = os.getcwd()
|
||||
|
||||
try:
|
||||
os.chdir(os.path.dirname(sockname))
|
||||
sock.connect(os.path.basename(sockname))
|
||||
finally:
|
||||
os.chdir(cwd)
|
||||
|
||||
readfd = writefd = readfd1 = writefd1 = readfd2 = writefd2 = None
|
||||
eq = command_chan_recv = command_chan = None
|
||||
|
||||
try:
|
||||
|
||||
# Send an fd for the remote to write events to
|
||||
readfd, writefd = os.pipe()
|
||||
eq = BBUIEventQueue(readfd)
|
||||
# Send an fd for the remote to recieve commands from
|
||||
readfd1, writefd1 = os.pipe()
|
||||
command_chan = ConnectionWriter(writefd1)
|
||||
# Send an fd for the remote to write commands results to
|
||||
readfd2, writefd2 = os.pipe()
|
||||
command_chan_recv = ConnectionReader(readfd2)
|
||||
|
||||
sendfds(sock, [writefd, readfd1, writefd2])
|
||||
|
||||
server_connection = BitBakeProcessServerConnection(command_chan, command_chan_recv, eq, sock)
|
||||
|
||||
# Close the ends of the pipes we won't use
|
||||
for i in [writefd, readfd1, writefd2]:
|
||||
os.close(i)
|
||||
|
||||
server_connection.connection.updateFeatureSet(featureset)
|
||||
|
||||
except (Exception, SystemExit) as e:
|
||||
if command_chan_recv:
|
||||
command_chan_recv.close()
|
||||
if command_chan:
|
||||
command_chan.close()
|
||||
for i in [writefd, readfd1, writefd2]:
|
||||
try:
|
||||
os.close(i)
|
||||
except OSError:
|
||||
pass
|
||||
sock.close()
|
||||
raise
|
||||
|
||||
return server_connection
|
||||
|
||||
def sendfds(sock, fds):
|
||||
'''Send an array of fds over an AF_UNIX socket.'''
|
||||
fds = array.array('i', fds)
|
||||
msg = bytes([len(fds) % 256])
|
||||
sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
|
||||
|
||||
def recvfds(sock, size):
|
||||
'''Receive an array of fds over an AF_UNIX socket.'''
|
||||
a = array.array('i')
|
||||
bytes_size = a.itemsize * size
|
||||
msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_LEN(bytes_size))
|
||||
if not msg and not ancdata:
|
||||
raise EOFError
|
||||
try:
|
||||
if len(ancdata) != 1:
|
||||
raise RuntimeError('received %d items of ancdata' %
|
||||
len(ancdata))
|
||||
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
|
||||
if (cmsg_level == socket.SOL_SOCKET and
|
||||
cmsg_type == socket.SCM_RIGHTS):
|
||||
if len(cmsg_data) % a.itemsize != 0:
|
||||
raise ValueError
|
||||
a.frombytes(cmsg_data)
|
||||
assert len(a) % 256 == msg[0]
|
||||
return list(a)
|
||||
except (ValueError, IndexError):
|
||||
pass
|
||||
raise RuntimeError('Invalid data received')
|
||||
|
||||
class BBUIEventQueue:
|
||||
def __init__(self, readfd):
|
||||
|
||||
self.eventQueue = []
|
||||
self.eventQueueLock = threading.Lock()
|
||||
self.eventQueueNotify = threading.Event()
|
||||
|
||||
self.reader = ConnectionReader(readfd)
|
||||
|
||||
self.t = threading.Thread()
|
||||
self.t.setDaemon(True)
|
||||
self.t.run = self.startCallbackHandler
|
||||
self.t.start()
|
||||
|
||||
def getEvent(self):
|
||||
self.eventQueueLock.acquire()
|
||||
|
||||
if len(self.eventQueue) == 0:
|
||||
self.eventQueueLock.release()
|
||||
return None
|
||||
|
||||
item = self.eventQueue.pop(0)
|
||||
|
||||
if len(self.eventQueue) == 0:
|
||||
self.eventQueueNotify.clear()
|
||||
|
||||
self.eventQueueLock.release()
|
||||
return item
|
||||
|
||||
def waitEvent(self, delay):
|
||||
self.eventQueueNotify.wait(delay)
|
||||
return self.getEvent()
|
||||
|
||||
def queue_event(self, event):
|
||||
self.eventQueueLock.acquire()
|
||||
self.eventQueue.append(event)
|
||||
self.eventQueueNotify.set()
|
||||
self.eventQueueLock.release()
|
||||
|
||||
def send_event(self, event):
|
||||
self.queue_event(pickle.loads(event))
|
||||
|
||||
def startCallbackHandler(self):
|
||||
bb.utils.set_process_name("UIEventQueue")
|
||||
while True:
|
||||
try:
|
||||
self.reader.wait()
|
||||
event = self.reader.get()
|
||||
self.queue_event(event)
|
||||
except EOFError:
|
||||
# Easiest way to exit is to close the file descriptor to cause an exit
|
||||
break
|
||||
self.reader.close()
|
||||
|
||||
class ConnectionReader(object):
|
||||
|
||||
def __init__(self, fd):
|
||||
self.reader = multiprocessing.connection.Connection(fd, writable=False)
|
||||
self.rlock = multiprocessing.Lock()
|
||||
|
||||
def wait(self, timeout=None):
|
||||
return multiprocessing.connection.wait([self.reader], timeout)
|
||||
|
||||
def poll(self, timeout=None):
|
||||
return self.reader.poll(timeout)
|
||||
|
||||
def get(self):
|
||||
with self.rlock:
|
||||
res = self.reader.recv_bytes()
|
||||
return multiprocessing.reduction.ForkingPickler.loads(res)
|
||||
|
||||
def fileno(self):
|
||||
return self.reader.fileno()
|
||||
|
||||
def close(self):
|
||||
return self.reader.close()
|
||||
|
||||
|
||||
class ConnectionWriter(object):
|
||||
|
||||
def __init__(self, fd):
|
||||
self.writer = multiprocessing.connection.Connection(fd, readable=False)
|
||||
self.wlock = multiprocessing.Lock()
|
||||
# Why bb.event needs this I have no idea
|
||||
self.event = self
|
||||
|
||||
def send(self, obj):
|
||||
obj = multiprocessing.reduction.ForkingPickler.dumps(obj)
|
||||
with self.wlock:
|
||||
self.writer.send_bytes(obj)
|
||||
|
||||
def fileno(self):
|
||||
return self.writer.fileno()
|
||||
|
||||
def close(self):
|
||||
return self.writer.close()
|
||||
signal.signal(signal.SIGTERM, lambda i, s: self.connection.sigterm_terminate())
|
||||
return self.connection
|
||||
|
||||
392
bitbake/lib/bb/server/xmlrpc.py
Normal file
392
bitbake/lib/bb/server/xmlrpc.py
Normal file
@@ -0,0 +1,392 @@
|
||||
#
|
||||
# BitBake XMLRPC Server
|
||||
#
|
||||
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
|
||||
# Copyright (C) 2006 - 2008 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
"""
|
||||
This module implements an xmlrpc server for BitBake.
|
||||
|
||||
Use this by deriving a class from BitBakeXMLRPCServer and then adding
|
||||
methods which you want to "export" via XMLRPC. If the methods have the
|
||||
prefix xmlrpc_, then registering those function will happen automatically,
|
||||
if not, you need to call register_function.
|
||||
|
||||
Use register_idle_function() to add a function which the xmlrpc server
|
||||
calls from within server_forever when no requests are pending. Make sure
|
||||
that those functions are non-blocking or else you will introduce latency
|
||||
in the server's main loop.
|
||||
"""
|
||||
|
||||
import bb
|
||||
import xmlrpclib, sys
|
||||
from bb import daemonize
|
||||
from bb.ui import uievent
|
||||
import hashlib, time
|
||||
import socket
|
||||
import os, signal
|
||||
import threading
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
|
||||
DEBUG = False
|
||||
|
||||
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
|
||||
import inspect, select, httplib
|
||||
|
||||
from . import BitBakeBaseServer, BitBakeBaseServerConnection, BaseImplServer
|
||||
|
||||
class BBTransport(xmlrpclib.Transport):
|
||||
def __init__(self, timeout):
|
||||
self.timeout = timeout
|
||||
self.connection_token = None
|
||||
xmlrpclib.Transport.__init__(self)
|
||||
|
||||
# Modified from default to pass timeout to HTTPConnection
|
||||
def make_connection(self, host):
|
||||
#return an existing connection if possible. This allows
|
||||
#HTTP/1.1 keep-alive.
|
||||
if self._connection and host == self._connection[0]:
|
||||
return self._connection[1]
|
||||
|
||||
# create a HTTP connection object from a host descriptor
|
||||
chost, self._extra_headers, x509 = self.get_host_info(host)
|
||||
#store the host argument along with the connection object
|
||||
self._connection = host, httplib.HTTPConnection(chost, timeout=self.timeout)
|
||||
return self._connection[1]
|
||||
|
||||
def set_connection_token(self, token):
|
||||
self.connection_token = token
|
||||
|
||||
def send_content(self, h, body):
|
||||
if self.connection_token:
|
||||
h.putheader("Bitbake-token", self.connection_token)
|
||||
xmlrpclib.Transport.send_content(self, h, body)
|
||||
|
||||
def _create_server(host, port, timeout = 60):
|
||||
t = BBTransport(timeout)
|
||||
s = xmlrpclib.Server("http://%s:%d/" % (host, port), transport=t, allow_none=True)
|
||||
return s, t
|
||||
|
||||
class BitBakeServerCommands():
|
||||
|
||||
def __init__(self, server):
|
||||
self.server = server
|
||||
self.has_client = False
|
||||
|
||||
def registerEventHandler(self, host, port):
|
||||
"""
|
||||
Register a remote UI Event Handler
|
||||
"""
|
||||
s, t = _create_server(host, port)
|
||||
|
||||
# we don't allow connections if the cooker is running
|
||||
if (self.cooker.state in [bb.cooker.state.parsing, bb.cooker.state.running]):
|
||||
return None
|
||||
|
||||
self.event_handle = bb.event.register_UIHhandler(s)
|
||||
return self.event_handle
|
||||
|
||||
def unregisterEventHandler(self, handlerNum):
|
||||
"""
|
||||
Unregister a remote UI Event Handler
|
||||
"""
|
||||
return bb.event.unregister_UIHhandler(handlerNum)
|
||||
|
||||
def runCommand(self, command):
|
||||
"""
|
||||
Run a cooker command on the server
|
||||
"""
|
||||
return self.cooker.command.runCommand(command, self.server.readonly)
|
||||
|
||||
def getEventHandle(self):
|
||||
return self.event_handle
|
||||
|
||||
def terminateServer(self):
|
||||
"""
|
||||
Trigger the server to quit
|
||||
"""
|
||||
self.server.quit = True
|
||||
print("Server (cooker) exiting")
|
||||
return
|
||||
|
||||
def addClient(self):
|
||||
if self.has_client:
|
||||
return None
|
||||
token = hashlib.md5(str(time.time())).hexdigest()
|
||||
self.server.set_connection_token(token)
|
||||
self.has_client = True
|
||||
return token
|
||||
|
||||
def removeClient(self):
|
||||
if self.has_client:
|
||||
self.server.set_connection_token(None)
|
||||
self.has_client = False
|
||||
if self.server.single_use:
|
||||
self.server.quit = True
|
||||
|
||||
# This request handler checks if the request has a "Bitbake-token" header
|
||||
# field (this comes from the client side) and compares it with its internal
|
||||
# "Bitbake-token" field (this comes from the server). If the two are not
|
||||
# equal, it is assumed that a client is trying to connect to the server
|
||||
# while another client is connected to the server. In this case, a 503 error
|
||||
# ("service unavailable") is returned to the client.
|
||||
class BitBakeXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||
def __init__(self, request, client_address, server):
|
||||
self.server = server
|
||||
SimpleXMLRPCRequestHandler.__init__(self, request, client_address, server)
|
||||
|
||||
def do_POST(self):
|
||||
try:
|
||||
remote_token = self.headers["Bitbake-token"]
|
||||
except:
|
||||
remote_token = None
|
||||
if remote_token != self.server.connection_token and remote_token != "observer":
|
||||
self.report_503()
|
||||
else:
|
||||
if remote_token == "observer":
|
||||
self.server.readonly = True
|
||||
else:
|
||||
self.server.readonly = False
|
||||
SimpleXMLRPCRequestHandler.do_POST(self)
|
||||
|
||||
def report_503(self):
|
||||
self.send_response(503)
|
||||
response = 'No more client allowed'
|
||||
self.send_header("Content-type", "text/plain")
|
||||
self.send_header("Content-length", str(len(response)))
|
||||
self.end_headers()
|
||||
self.wfile.write(response)
|
||||
|
||||
|
||||
class XMLRPCProxyServer(BaseImplServer):
|
||||
""" not a real working server, but a stub for a proxy server connection
|
||||
|
||||
"""
|
||||
def __init__(self, host, port):
|
||||
self.host = host
|
||||
self.port = port
|
||||
|
||||
class XMLRPCServer(SimpleXMLRPCServer, BaseImplServer):
|
||||
# remove this when you're done with debugging
|
||||
# allow_reuse_address = True
|
||||
|
||||
def __init__(self, interface):
|
||||
"""
|
||||
Constructor
|
||||
"""
|
||||
BaseImplServer.__init__(self)
|
||||
if (interface[1] == 0): # anonymous port, not getting reused
|
||||
self.single_use = True
|
||||
# Use auto port configuration
|
||||
if (interface[1] == -1):
|
||||
interface = (interface[0], 0)
|
||||
SimpleXMLRPCServer.__init__(self, interface,
|
||||
requestHandler=BitBakeXMLRPCRequestHandler,
|
||||
logRequests=False, allow_none=True)
|
||||
self.host, self.port = self.socket.getsockname()
|
||||
self.connection_token = None
|
||||
#self.register_introspection_functions()
|
||||
self.commands = BitBakeServerCommands(self)
|
||||
self.autoregister_all_functions(self.commands, "")
|
||||
self.interface = interface
|
||||
self.single_use = False
|
||||
|
||||
def addcooker(self, cooker):
|
||||
BaseImplServer.addcooker(self, cooker)
|
||||
self.commands.cooker = cooker
|
||||
|
||||
def autoregister_all_functions(self, context, prefix):
|
||||
"""
|
||||
Convenience method for registering all functions in the scope
|
||||
of this class that start with a common prefix
|
||||
"""
|
||||
methodlist = inspect.getmembers(context, inspect.ismethod)
|
||||
for name, method in methodlist:
|
||||
if name.startswith(prefix):
|
||||
self.register_function(method, name[len(prefix):])
|
||||
|
||||
|
||||
def serve_forever(self):
|
||||
# Start the actual XMLRPC server
|
||||
bb.cooker.server_main(self.cooker, self._serve_forever)
|
||||
|
||||
def _serve_forever(self):
|
||||
"""
|
||||
Serve Requests. Overloaded to honor a quit command
|
||||
"""
|
||||
self.quit = False
|
||||
while not self.quit:
|
||||
fds = [self]
|
||||
nextsleep = 0.1
|
||||
for function, data in self._idlefuns.items():
|
||||
try:
|
||||
retval = function(self, data, False)
|
||||
if retval is False:
|
||||
del self._idlefuns[function]
|
||||
elif retval is True:
|
||||
nextsleep = 0
|
||||
else:
|
||||
fds = fds + retval
|
||||
except SystemExit:
|
||||
raise
|
||||
except:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
pass
|
||||
|
||||
socktimeout = self.socket.gettimeout() or nextsleep
|
||||
socktimeout = min(socktimeout, nextsleep)
|
||||
# Mirror what BaseServer handle_request would do
|
||||
fd_sets = select.select(fds, [], [], socktimeout)
|
||||
if fd_sets[0] and self in fd_sets[0]:
|
||||
self._handle_request_noblock()
|
||||
|
||||
# Tell idle functions we're exiting
|
||||
for function, data in self._idlefuns.items():
|
||||
try:
|
||||
retval = function(self, data, True)
|
||||
except:
|
||||
pass
|
||||
self.server_close()
|
||||
return
|
||||
|
||||
def set_connection_token(self, token):
|
||||
self.connection_token = token
|
||||
|
||||
class BitBakeXMLRPCServerConnection(BitBakeBaseServerConnection):
|
||||
def __init__(self, serverImpl, clientinfo=("localhost", 0), observer_only = False, featureset = []):
|
||||
self.connection, self.transport = _create_server(serverImpl.host, serverImpl.port)
|
||||
self.clientinfo = clientinfo
|
||||
self.serverImpl = serverImpl
|
||||
self.observer_only = observer_only
|
||||
self.featureset = featureset
|
||||
|
||||
def connect(self):
|
||||
if not self.observer_only:
|
||||
token = self.connection.addClient()
|
||||
else:
|
||||
token = "observer"
|
||||
if token is None:
|
||||
return None
|
||||
self.transport.set_connection_token(token)
|
||||
|
||||
self.events = uievent.BBUIEventQueue(self.connection, self.clientinfo)
|
||||
for event in bb.event.ui_queue:
|
||||
self.events.queue_event(event)
|
||||
|
||||
_, error = self.connection.runCommand(["setFeatures", self.featureset])
|
||||
if error:
|
||||
# no need to log it here, the error shall be sent to the client
|
||||
raise BaseException(error)
|
||||
|
||||
return self
|
||||
|
||||
def removeClient(self):
|
||||
if not self.observer_only:
|
||||
self.connection.removeClient()
|
||||
|
||||
def terminate(self):
|
||||
# Don't wait for server indefinitely
|
||||
import socket
|
||||
socket.setdefaulttimeout(2)
|
||||
try:
|
||||
self.events.system_quit()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
self.connection.removeClient()
|
||||
except:
|
||||
pass
|
||||
|
||||
class BitBakeServer(BitBakeBaseServer):
|
||||
def initServer(self, interface = ("localhost", 0)):
|
||||
self.interface = interface
|
||||
self.serverImpl = XMLRPCServer(interface)
|
||||
|
||||
def detach(self):
|
||||
daemonize.createDaemon(self.serverImpl.serve_forever, "bitbake-cookerdaemon.log")
|
||||
del self.cooker
|
||||
|
||||
def establishConnection(self, featureset):
|
||||
self.connection = BitBakeXMLRPCServerConnection(self.serverImpl, self.interface, False, featureset)
|
||||
return self.connection.connect()
|
||||
|
||||
def set_connection_token(self, token):
|
||||
self.connection.transport.set_connection_token(token)
|
||||
|
||||
class BitBakeXMLRPCClient(BitBakeBaseServer):
|
||||
|
||||
def __init__(self, observer_only = False):
|
||||
self.observer_only = observer_only
|
||||
# if we need extra caches, just tell the server to load them all
|
||||
pass
|
||||
|
||||
def saveConnectionDetails(self, remote):
|
||||
self.remote = remote
|
||||
|
||||
def saveConnectionConfigParams(self, configParams):
|
||||
self.configParams = configParams
|
||||
|
||||
def establishConnection(self, featureset):
|
||||
# The format of "remote" must be "server:port"
|
||||
try:
|
||||
[host, port] = self.remote.split(":")
|
||||
port = int(port)
|
||||
except Exception as e:
|
||||
bb.fatal("Failed to read remote definition (%s)" % str(e))
|
||||
|
||||
# use automatic port if port set to -1, meaning read it from
|
||||
# the bitbake.lock file
|
||||
if port == -1:
|
||||
lock_location = "%s/bitbake.lock" % self.configParams.environment.get('BUILDDIR')
|
||||
lock = bb.utils.lockfile(lock_location, False, False)
|
||||
if lock:
|
||||
# This means there is no server running which we can
|
||||
# connect to on the local system.
|
||||
bb.utils.unlockfile(lock)
|
||||
return None
|
||||
|
||||
try:
|
||||
lf = open(lock_location, 'r')
|
||||
remotedef = lf.readline()
|
||||
[host, port] = remotedef.split(":")
|
||||
port = int(port)
|
||||
lf.close()
|
||||
self.remote = remotedef
|
||||
except Exception as e:
|
||||
bb.fatal("Failed to read bitbake.lock (%s)" % str(e))
|
||||
|
||||
# We need our IP for the server connection. We get the IP
|
||||
# by trying to connect with the server
|
||||
try:
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
s.connect((host, port))
|
||||
ip = s.getsockname()[0]
|
||||
s.close()
|
||||
except Exception as e:
|
||||
bb.fatal("Could not create socket for %s:%s (%s)" % (host, port, str(e)))
|
||||
try:
|
||||
self.serverImpl = XMLRPCProxyServer(host, port)
|
||||
self.connection = BitBakeXMLRPCServerConnection(self.serverImpl, (ip, 0), self.observer_only, featureset)
|
||||
return self.connection.connect()
|
||||
except Exception as e:
|
||||
bb.fatal("Could not connect to server at %s:%s (%s)" % (host, port, str(e)))
|
||||
|
||||
def endSession(self):
|
||||
self.connection.removeClient()
|
||||
@@ -1,154 +0,0 @@
|
||||
#
|
||||
# BitBake XMLRPC Client Interface
|
||||
#
|
||||
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
|
||||
# Copyright (C) 2006 - 2008 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import socket
|
||||
import http.client
|
||||
import xmlrpc.client
|
||||
|
||||
import bb
|
||||
from bb.ui import uievent
|
||||
|
||||
class BBTransport(xmlrpc.client.Transport):
|
||||
def __init__(self, timeout):
|
||||
self.timeout = timeout
|
||||
self.connection_token = None
|
||||
xmlrpc.client.Transport.__init__(self)
|
||||
|
||||
# Modified from default to pass timeout to HTTPConnection
|
||||
def make_connection(self, host):
|
||||
#return an existing connection if possible. This allows
|
||||
#HTTP/1.1 keep-alive.
|
||||
if self._connection and host == self._connection[0]:
|
||||
return self._connection[1]
|
||||
|
||||
# create a HTTP connection object from a host descriptor
|
||||
chost, self._extra_headers, x509 = self.get_host_info(host)
|
||||
#store the host argument along with the connection object
|
||||
self._connection = host, http.client.HTTPConnection(chost, timeout=self.timeout)
|
||||
return self._connection[1]
|
||||
|
||||
def set_connection_token(self, token):
|
||||
self.connection_token = token
|
||||
|
||||
def send_content(self, h, body):
|
||||
if self.connection_token:
|
||||
h.putheader("Bitbake-token", self.connection_token)
|
||||
xmlrpc.client.Transport.send_content(self, h, body)
|
||||
|
||||
def _create_server(host, port, timeout = 60):
|
||||
t = BBTransport(timeout)
|
||||
s = xmlrpc.client.ServerProxy("http://%s:%d/" % (host, port), transport=t, allow_none=True, use_builtin_types=True)
|
||||
return s, t
|
||||
|
||||
def check_connection(remote, timeout):
|
||||
try:
|
||||
host, port = remote.split(":")
|
||||
port = int(port)
|
||||
except Exception as e:
|
||||
bb.warn("Failed to read remote definition (%s)" % str(e))
|
||||
raise e
|
||||
|
||||
server, _transport = _create_server(host, port, timeout)
|
||||
try:
|
||||
ret, err = server.runCommand(['getVariable', 'TOPDIR'])
|
||||
if err or not ret:
|
||||
return False
|
||||
except ConnectionError:
|
||||
return False
|
||||
return True
|
||||
|
||||
class BitBakeXMLRPCServerConnection(object):
|
||||
def __init__(self, host, port, clientinfo=("localhost", 0), observer_only = False, featureset = None):
|
||||
self.connection, self.transport = _create_server(host, port)
|
||||
self.clientinfo = clientinfo
|
||||
self.observer_only = observer_only
|
||||
if featureset:
|
||||
self.featureset = featureset
|
||||
else:
|
||||
self.featureset = []
|
||||
|
||||
self.events = uievent.BBUIEventQueue(self.connection, self.clientinfo)
|
||||
|
||||
_, error = self.connection.runCommand(["setFeatures", self.featureset])
|
||||
if error:
|
||||
# disconnect the client, we can't make the setFeature work
|
||||
self.connection.removeClient()
|
||||
# no need to log it here, the error shall be sent to the client
|
||||
raise BaseException(error)
|
||||
|
||||
def connect(self, token = None):
|
||||
if token is None:
|
||||
if self.observer_only:
|
||||
token = "observer"
|
||||
else:
|
||||
token = self.connection.addClient()
|
||||
|
||||
if token is None:
|
||||
return None
|
||||
|
||||
self.transport.set_connection_token(token)
|
||||
return self
|
||||
|
||||
def removeClient(self):
|
||||
if not self.observer_only:
|
||||
self.connection.removeClient()
|
||||
|
||||
def terminate(self):
|
||||
# Don't wait for server indefinitely
|
||||
socket.setdefaulttimeout(2)
|
||||
try:
|
||||
self.events.system_quit()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
self.connection.removeClient()
|
||||
except:
|
||||
pass
|
||||
|
||||
def connectXMLRPC(remote, featureset, observer_only = False, token = None):
|
||||
# The format of "remote" must be "server:port"
|
||||
try:
|
||||
[host, port] = remote.split(":")
|
||||
port = int(port)
|
||||
except Exception as e:
|
||||
bb.warn("Failed to parse remote definition %s (%s)" % (remote, str(e)))
|
||||
raise e
|
||||
|
||||
# We need our IP for the server connection. We get the IP
|
||||
# by trying to connect with the server
|
||||
try:
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
s.connect((host, port))
|
||||
ip = s.getsockname()[0]
|
||||
s.close()
|
||||
except Exception as e:
|
||||
bb.warn("Could not create socket for %s:%s (%s)" % (host, port, str(e)))
|
||||
raise e
|
||||
try:
|
||||
connection = BitBakeXMLRPCServerConnection(host, port, (ip, 0), observer_only, featureset)
|
||||
return connection.connect(token)
|
||||
except Exception as e:
|
||||
bb.warn("Could not connect to server at %s:%s (%s)" % (host, port, str(e)))
|
||||
raise e
|
||||
|
||||
|
||||
|
||||
@@ -1,158 +0,0 @@
|
||||
#
|
||||
# BitBake XMLRPC Server Interface
|
||||
#
|
||||
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
|
||||
# Copyright (C) 2006 - 2008 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import hashlib
|
||||
import time
|
||||
import inspect
|
||||
from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
|
||||
|
||||
import bb
|
||||
|
||||
# This request handler checks if the request has a "Bitbake-token" header
|
||||
# field (this comes from the client side) and compares it with its internal
|
||||
# "Bitbake-token" field (this comes from the server). If the two are not
|
||||
# equal, it is assumed that a client is trying to connect to the server
|
||||
# while another client is connected to the server. In this case, a 503 error
|
||||
# ("service unavailable") is returned to the client.
|
||||
class BitBakeXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||
def __init__(self, request, client_address, server):
|
||||
self.server = server
|
||||
SimpleXMLRPCRequestHandler.__init__(self, request, client_address, server)
|
||||
|
||||
def do_POST(self):
|
||||
try:
|
||||
remote_token = self.headers["Bitbake-token"]
|
||||
except:
|
||||
remote_token = None
|
||||
if 0 and remote_token != self.server.connection_token and remote_token != "observer":
|
||||
self.report_503()
|
||||
else:
|
||||
if remote_token == "observer":
|
||||
self.server.readonly = True
|
||||
else:
|
||||
self.server.readonly = False
|
||||
SimpleXMLRPCRequestHandler.do_POST(self)
|
||||
|
||||
def report_503(self):
|
||||
self.send_response(503)
|
||||
response = 'No more client allowed'
|
||||
self.send_header("Content-type", "text/plain")
|
||||
self.send_header("Content-length", str(len(response)))
|
||||
self.end_headers()
|
||||
self.wfile.write(bytes(response, 'utf-8'))
|
||||
|
||||
class BitBakeXMLRPCServer(SimpleXMLRPCServer):
|
||||
# remove this when you're done with debugging
|
||||
# allow_reuse_address = True
|
||||
|
||||
def __init__(self, interface, cooker, parent):
|
||||
# Use auto port configuration
|
||||
if (interface[1] == -1):
|
||||
interface = (interface[0], 0)
|
||||
SimpleXMLRPCServer.__init__(self, interface,
|
||||
requestHandler=BitBakeXMLRPCRequestHandler,
|
||||
logRequests=False, allow_none=True)
|
||||
self.host, self.port = self.socket.getsockname()
|
||||
self.interface = interface
|
||||
|
||||
self.connection_token = None
|
||||
self.commands = BitBakeXMLRPCServerCommands(self)
|
||||
self.register_functions(self.commands, "")
|
||||
|
||||
self.cooker = cooker
|
||||
self.parent = parent
|
||||
|
||||
|
||||
def register_functions(self, context, prefix):
|
||||
"""
|
||||
Convenience method for registering all functions in the scope
|
||||
of this class that start with a common prefix
|
||||
"""
|
||||
methodlist = inspect.getmembers(context, inspect.ismethod)
|
||||
for name, method in methodlist:
|
||||
if name.startswith(prefix):
|
||||
self.register_function(method, name[len(prefix):])
|
||||
|
||||
def get_timeout(self, delay):
|
||||
socktimeout = self.socket.gettimeout() or delay
|
||||
return min(socktimeout, delay)
|
||||
|
||||
def handle_requests(self):
|
||||
self._handle_request_noblock()
|
||||
|
||||
class BitBakeXMLRPCServerCommands():
|
||||
|
||||
def __init__(self, server):
|
||||
self.server = server
|
||||
self.has_client = False
|
||||
|
||||
def registerEventHandler(self, host, port):
|
||||
"""
|
||||
Register a remote UI Event Handler
|
||||
"""
|
||||
s, t = bb.server.xmlrpcclient._create_server(host, port)
|
||||
|
||||
# we don't allow connections if the cooker is running
|
||||
if (self.server.cooker.state in [bb.cooker.state.parsing, bb.cooker.state.running]):
|
||||
return None, "Cooker is busy: %s" % bb.cooker.state.get_name(self.server.cooker.state)
|
||||
|
||||
self.event_handle = bb.event.register_UIHhandler(s, True)
|
||||
return self.event_handle, 'OK'
|
||||
|
||||
def unregisterEventHandler(self, handlerNum):
|
||||
"""
|
||||
Unregister a remote UI Event Handler
|
||||
"""
|
||||
ret = bb.event.unregister_UIHhandler(handlerNum, True)
|
||||
self.event_handle = None
|
||||
return ret
|
||||
|
||||
def runCommand(self, command):
|
||||
"""
|
||||
Run a cooker command on the server
|
||||
"""
|
||||
return self.server.cooker.command.runCommand(command, self.server.readonly)
|
||||
|
||||
def getEventHandle(self):
|
||||
return self.event_handle
|
||||
|
||||
def terminateServer(self):
|
||||
"""
|
||||
Trigger the server to quit
|
||||
"""
|
||||
self.server.parent.quit = True
|
||||
print("XMLRPC Server triggering exit")
|
||||
return
|
||||
|
||||
def addClient(self):
|
||||
if self.server.parent.haveui:
|
||||
return None
|
||||
token = hashlib.md5(str(time.time()).encode("utf-8")).hexdigest()
|
||||
self.server.connection_token = token
|
||||
self.server.parent.haveui = True
|
||||
return token
|
||||
|
||||
def removeClient(self):
|
||||
if self.server.parent.haveui:
|
||||
self.server.connection_token = None
|
||||
self.server.parent.haveui = False
|
||||
|
||||
@@ -3,19 +3,21 @@ import logging
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
import pickle
|
||||
import bb.data
|
||||
import difflib
|
||||
import simplediff
|
||||
from bb.checksum import FileChecksumCache
|
||||
|
||||
logger = logging.getLogger('BitBake.SigGen')
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
logger.info('Importing cPickle failed. Falling back to a very slow implementation.')
|
||||
|
||||
def init(d):
|
||||
siggens = [obj for obj in globals().values()
|
||||
siggens = [obj for obj in globals().itervalues()
|
||||
if type(obj) is type and issubclass(obj, SignatureGenerator)]
|
||||
|
||||
desired = d.getVar("BB_SIGNATURE_HANDLER") or "noop"
|
||||
desired = d.getVar("BB_SIGNATURE_HANDLER", True) or "noop"
|
||||
for sg in siggens:
|
||||
if desired == sg.name:
|
||||
return sg(d)
|
||||
@@ -32,11 +34,9 @@ class SignatureGenerator(object):
|
||||
name = "noop"
|
||||
|
||||
def __init__(self, data):
|
||||
self.basehash = {}
|
||||
self.taskhash = {}
|
||||
self.runtaskdeps = {}
|
||||
self.file_checksum_values = {}
|
||||
self.taints = {}
|
||||
|
||||
def finalise(self, fn, d, varient):
|
||||
return
|
||||
@@ -44,8 +44,7 @@ class SignatureGenerator(object):
|
||||
def get_taskhash(self, fn, task, deps, dataCache):
|
||||
return "0"
|
||||
|
||||
def writeout_file_checksum_cache(self):
|
||||
"""Write/update the file checksum cache onto disk"""
|
||||
def set_taskdata(self, hashes, deps, checksum):
|
||||
return
|
||||
|
||||
def stampfile(self, stampbase, file_name, taskname, extrainfo):
|
||||
@@ -63,16 +62,6 @@ class SignatureGenerator(object):
|
||||
def dump_sigs(self, dataCache, options):
|
||||
return
|
||||
|
||||
def get_taskdata(self):
|
||||
return (self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash)
|
||||
|
||||
def set_taskdata(self, data):
|
||||
self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash = data
|
||||
|
||||
def reset(self, data):
|
||||
self.__init__(data)
|
||||
|
||||
|
||||
class SignatureGeneratorBasic(SignatureGenerator):
|
||||
"""
|
||||
"""
|
||||
@@ -84,22 +73,15 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
self.taskdeps = {}
|
||||
self.runtaskdeps = {}
|
||||
self.file_checksum_values = {}
|
||||
self.taints = {}
|
||||
self.gendeps = {}
|
||||
self.lookupcache = {}
|
||||
self.pkgnameextract = re.compile("(?P<fn>.*)\..*")
|
||||
self.basewhitelist = set((data.getVar("BB_HASHBASE_WHITELIST") or "").split())
|
||||
self.basewhitelist = set((data.getVar("BB_HASHBASE_WHITELIST", True) or "").split())
|
||||
self.taskwhitelist = None
|
||||
self.init_rundepcheck(data)
|
||||
checksum_cache_file = data.getVar("BB_HASH_CHECKSUM_CACHE_FILE")
|
||||
if checksum_cache_file:
|
||||
self.checksum_cache = FileChecksumCache()
|
||||
self.checksum_cache.init_cache(data, checksum_cache_file)
|
||||
else:
|
||||
self.checksum_cache = None
|
||||
|
||||
def init_rundepcheck(self, data):
|
||||
self.taskwhitelist = data.getVar("BB_HASHTASK_WHITELIST") or None
|
||||
self.taskwhitelist = data.getVar("BB_HASHTASK_WHITELIST", True) or None
|
||||
if self.taskwhitelist:
|
||||
self.twl = re.compile(self.taskwhitelist)
|
||||
else:
|
||||
@@ -107,7 +89,6 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
|
||||
def _build_data(self, fn, d):
|
||||
|
||||
ignore_mismatch = ((d.getVar("BB_HASH_IGNORE_MISMATCH") or '') == '1')
|
||||
tasklist, gendeps, lookupcache = bb.data.generate_dependencies(d)
|
||||
|
||||
taskdeps = {}
|
||||
@@ -140,11 +121,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
var = lookupcache[dep]
|
||||
if var is not None:
|
||||
data = data + str(var)
|
||||
datahash = hashlib.md5(data.encode("utf-8")).hexdigest()
|
||||
k = fn + "." + task
|
||||
if not ignore_mismatch and k in self.basehash and self.basehash[k] != datahash:
|
||||
bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (k, self.basehash[k], datahash))
|
||||
self.basehash[k] = datahash
|
||||
self.basehash[fn + "." + task] = hashlib.md5(data).hexdigest()
|
||||
taskdeps[task] = alldeps
|
||||
|
||||
self.taskdeps[fn] = taskdeps
|
||||
@@ -155,21 +132,18 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
|
||||
def finalise(self, fn, d, variant):
|
||||
|
||||
mc = d.getVar("__BBMULTICONFIG", False) or ""
|
||||
if variant or mc:
|
||||
fn = bb.cache.realfn2virtual(fn, variant, mc)
|
||||
if variant:
|
||||
fn = "virtual:" + variant + ":" + fn
|
||||
|
||||
try:
|
||||
taskdeps = self._build_data(fn, d)
|
||||
except bb.parse.SkipRecipe:
|
||||
raise
|
||||
except:
|
||||
bb.warn("Error during finalise of %s" % fn)
|
||||
bb.note("Error during finalise of %s" % fn)
|
||||
raise
|
||||
|
||||
#Slow but can be useful for debugging mismatched basehashes
|
||||
#for task in self.taskdeps[fn]:
|
||||
# self.dump_sigtask(fn, task, d.getVar("STAMP"), False)
|
||||
# self.dump_sigtask(fn, task, d.getVar("STAMP", True), False)
|
||||
|
||||
for task in taskdeps:
|
||||
d.setVar("BB_BASEHASH_task-%s" % task, self.basehash[fn + "." + task])
|
||||
@@ -195,11 +169,9 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
def get_taskhash(self, fn, task, deps, dataCache):
|
||||
k = fn + "." + task
|
||||
data = dataCache.basetaskhash[k]
|
||||
self.basehash[k] = data
|
||||
self.runtaskdeps[k] = []
|
||||
self.file_checksum_values[k] = []
|
||||
self.file_checksum_values[k] = {}
|
||||
recipename = dataCache.pkg_fn[fn]
|
||||
|
||||
for dep in sorted(deps, key=clean_basepath):
|
||||
depname = dataCache.pkg_fn[self.pkgnameextract.search(dep).group('fn')]
|
||||
if not self.rundep_check(fn, recipename, task, dep, depname, dataCache):
|
||||
@@ -210,50 +182,29 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
self.runtaskdeps[k].append(dep)
|
||||
|
||||
if task in dataCache.file_checksums[fn]:
|
||||
if self.checksum_cache:
|
||||
checksums = self.checksum_cache.get_checksums(dataCache.file_checksums[fn][task], recipename)
|
||||
else:
|
||||
checksums = bb.fetch2.get_file_checksums(dataCache.file_checksums[fn][task], recipename)
|
||||
checksums = bb.fetch2.get_file_checksums(dataCache.file_checksums[fn][task], recipename)
|
||||
for (f,cs) in checksums:
|
||||
self.file_checksum_values[k].append((f,cs))
|
||||
if cs:
|
||||
data = data + cs
|
||||
|
||||
taskdep = dataCache.task_deps[fn]
|
||||
if 'nostamp' in taskdep and task in taskdep['nostamp']:
|
||||
# Nostamp tasks need an implicit taint so that they force any dependent tasks to run
|
||||
import uuid
|
||||
taint = str(uuid.uuid4())
|
||||
data = data + taint
|
||||
self.taints[k] = "nostamp:" + taint
|
||||
self.file_checksum_values[k][f] = cs
|
||||
data = data + cs
|
||||
|
||||
taint = self.read_taint(fn, task, dataCache.stamp[fn])
|
||||
if taint:
|
||||
data = data + taint
|
||||
self.taints[k] = taint
|
||||
logger.warning("%s is tainted from a forced run" % k)
|
||||
|
||||
h = hashlib.md5(data.encode("utf-8")).hexdigest()
|
||||
h = hashlib.md5(data).hexdigest()
|
||||
self.taskhash[k] = h
|
||||
#d.setVar("BB_TASKHASH_task-%s" % task, taskhash[task])
|
||||
return h
|
||||
|
||||
def writeout_file_checksum_cache(self):
|
||||
"""Write/update the file checksum cache onto disk"""
|
||||
if self.checksum_cache:
|
||||
self.checksum_cache.save_extras()
|
||||
self.checksum_cache.save_merge()
|
||||
else:
|
||||
bb.fetch2.fetcher_parse_save()
|
||||
bb.fetch2.fetcher_parse_done()
|
||||
def set_taskdata(self, hashes, deps, checksums):
|
||||
self.runtaskdeps = deps
|
||||
self.taskhash = hashes
|
||||
self.file_checksum_values = checksums
|
||||
|
||||
def dump_sigtask(self, fn, task, stampbase, runtime):
|
||||
|
||||
k = fn + "." + task
|
||||
referencestamp = stampbase
|
||||
if isinstance(runtime, str) and runtime.startswith("customfile"):
|
||||
if runtime == "customfile":
|
||||
sigfile = stampbase
|
||||
referencestamp = runtime[11:]
|
||||
elif runtime and k in self.taskhash:
|
||||
sigfile = stampbase + "." + task + ".sigdata" + "." + self.taskhash[k]
|
||||
else:
|
||||
@@ -262,7 +213,6 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
bb.utils.mkdirhier(os.path.dirname(sigfile))
|
||||
|
||||
data = {}
|
||||
data['task'] = task
|
||||
data['basewhitelist'] = self.basewhitelist
|
||||
data['taskwhitelist'] = self.taskwhitelist
|
||||
data['taskdeps'] = self.taskdeps[fn][task]
|
||||
@@ -278,35 +228,21 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
|
||||
if runtime and k in self.taskhash:
|
||||
data['runtaskdeps'] = self.runtaskdeps[k]
|
||||
data['file_checksum_values'] = [(os.path.basename(f), cs) for f,cs in self.file_checksum_values[k]]
|
||||
data['file_checksum_values'] = [(os.path.basename(f), cs) for f,cs in self.file_checksum_values[k].items()]
|
||||
data['runtaskhashes'] = {}
|
||||
for dep in data['runtaskdeps']:
|
||||
data['runtaskhashes'][dep] = self.taskhash[dep]
|
||||
data['taskhash'] = self.taskhash[k]
|
||||
|
||||
taint = self.read_taint(fn, task, referencestamp)
|
||||
taint = self.read_taint(fn, task, stampbase)
|
||||
if taint:
|
||||
data['taint'] = taint
|
||||
|
||||
if runtime and k in self.taints:
|
||||
if 'nostamp:' in self.taints[k]:
|
||||
data['taint'] = self.taints[k]
|
||||
|
||||
computed_basehash = calc_basehash(data)
|
||||
if computed_basehash != self.basehash[k]:
|
||||
bb.error("Basehash mismatch %s versus %s for %s" % (computed_basehash, self.basehash[k], k))
|
||||
if runtime and k in self.taskhash:
|
||||
computed_taskhash = calc_taskhash(data)
|
||||
if computed_taskhash != self.taskhash[k]:
|
||||
bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[k], k))
|
||||
sigfile = sigfile.replace(self.taskhash[k], computed_taskhash)
|
||||
|
||||
fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
|
||||
try:
|
||||
with os.fdopen(fd, "wb") as stream:
|
||||
p = pickle.dump(data, stream, -1)
|
||||
stream.flush()
|
||||
os.chmod(tmpfile, 0o664)
|
||||
os.chmod(tmpfile, 0664)
|
||||
os.rename(tmpfile, sigfile)
|
||||
except (OSError, IOError) as err:
|
||||
try:
|
||||
@@ -315,18 +251,16 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
pass
|
||||
raise err
|
||||
|
||||
def dump_sigfn(self, fn, dataCaches, options):
|
||||
if fn in self.taskdeps:
|
||||
def dump_sigs(self, dataCache, options):
|
||||
for fn in self.taskdeps:
|
||||
for task in self.taskdeps[fn]:
|
||||
tid = fn + ":" + task
|
||||
(mc, _, _) = bb.runqueue.split_tid(tid)
|
||||
k = fn + "." + task
|
||||
if k not in self.taskhash:
|
||||
continue
|
||||
if dataCaches[mc].basetaskhash[k] != self.basehash[k]:
|
||||
if dataCache.basetaskhash[k] != self.basehash[k]:
|
||||
bb.error("Bitbake's cached basehash does not match the one we just generated (%s)!" % k)
|
||||
bb.error("The mismatched hashes were %s and %s" % (dataCaches[mc].basetaskhash[k], self.basehash[k]))
|
||||
self.dump_sigtask(fn, task, dataCaches[mc].stamp[fn], True)
|
||||
bb.error("The mismatched hashes were %s and %s" % (dataCache.basetaskhash[k], self.basehash[k]))
|
||||
self.dump_sigtask(fn, task, dataCache.stamp[fn], True)
|
||||
|
||||
class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
|
||||
name = "basichash"
|
||||
@@ -354,71 +288,15 @@ class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
|
||||
|
||||
def dump_this_task(outfile, d):
|
||||
import bb.parse
|
||||
fn = d.getVar("BB_FILENAME")
|
||||
task = "do_" + d.getVar("BB_CURRENTTASK")
|
||||
referencestamp = bb.build.stamp_internal(task, d, None, True)
|
||||
bb.parse.siggen.dump_sigtask(fn, task, outfile, "customfile:" + referencestamp)
|
||||
|
||||
def init_colors(enable_color):
|
||||
"""Initialise colour dict for passing to compare_sigfiles()"""
|
||||
# First set up the colours
|
||||
colors = {'color_title': '\033[1;37;40m',
|
||||
'color_default': '\033[0;37;40m',
|
||||
'color_add': '\033[1;32;40m',
|
||||
'color_remove': '\033[1;31;40m',
|
||||
}
|
||||
# Leave all keys present but clear the values
|
||||
if not enable_color:
|
||||
for k in colors.keys():
|
||||
colors[k] = ''
|
||||
return colors
|
||||
|
||||
def worddiff_str(oldstr, newstr, colors=None):
|
||||
if not colors:
|
||||
colors = init_colors(False)
|
||||
diff = simplediff.diff(oldstr.split(' '), newstr.split(' '))
|
||||
ret = []
|
||||
for change, value in diff:
|
||||
value = ' '.join(value)
|
||||
if change == '=':
|
||||
ret.append(value)
|
||||
elif change == '+':
|
||||
item = '{color_add}{{+{value}+}}{color_default}'.format(value=value, **colors)
|
||||
ret.append(item)
|
||||
elif change == '-':
|
||||
item = '{color_remove}[-{value}-]{color_default}'.format(value=value, **colors)
|
||||
ret.append(item)
|
||||
whitespace_note = ''
|
||||
if oldstr != newstr and ' '.join(oldstr.split()) == ' '.join(newstr.split()):
|
||||
whitespace_note = ' (whitespace changed)'
|
||||
return '"%s"%s' % (' '.join(ret), whitespace_note)
|
||||
|
||||
def list_inline_diff(oldlist, newlist, colors=None):
|
||||
if not colors:
|
||||
colors = init_colors(False)
|
||||
diff = simplediff.diff(oldlist, newlist)
|
||||
ret = []
|
||||
for change, value in diff:
|
||||
value = ' '.join(value)
|
||||
if change == '=':
|
||||
ret.append("'%s'" % value)
|
||||
elif change == '+':
|
||||
item = '{color_add}+{value}{color_default}'.format(value=value, **colors)
|
||||
ret.append(item)
|
||||
elif change == '-':
|
||||
item = '{color_remove}-{value}{color_default}'.format(value=value, **colors)
|
||||
ret.append(item)
|
||||
return '[%s]' % (', '.join(ret))
|
||||
fn = d.getVar("BB_FILENAME", True)
|
||||
task = "do_" + d.getVar("BB_CURRENTTASK", True)
|
||||
bb.parse.siggen.dump_sigtask(fn, task, outfile, "customfile")
|
||||
|
||||
def clean_basepath(a):
|
||||
mc = None
|
||||
if a.startswith("multiconfig:"):
|
||||
_, mc, a = a.split(":", 2)
|
||||
b = a.rsplit("/", 2)[1] + '/' + a.rsplit("/", 2)[2]
|
||||
if a.startswith("virtual:"):
|
||||
b = b + ":" + a.rsplit(":", 1)[0]
|
||||
if mc:
|
||||
b = b + ":multiconfig:" + mc
|
||||
b = a.rsplit(":", 1)[0] + ":" + a.rsplit("/", 1)[1]
|
||||
else:
|
||||
b = a.rsplit("/", 1)[1]
|
||||
return b
|
||||
|
||||
def clean_basepaths(a):
|
||||
@@ -427,38 +305,13 @@ def clean_basepaths(a):
|
||||
b[clean_basepath(x)] = a[x]
|
||||
return b
|
||||
|
||||
def clean_basepaths_list(a):
|
||||
b = []
|
||||
for x in a:
|
||||
b.append(clean_basepath(x))
|
||||
return b
|
||||
|
||||
def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
def compare_sigfiles(a, b, recursecb = None):
|
||||
output = []
|
||||
|
||||
colors = init_colors(color)
|
||||
def color_format(formatstr, **values):
|
||||
"""
|
||||
Return colour formatted string.
|
||||
NOTE: call with the format string, not an already formatted string
|
||||
containing values (otherwise you could have trouble with { and }
|
||||
characters)
|
||||
"""
|
||||
if not formatstr.endswith('{color_default}'):
|
||||
formatstr += '{color_default}'
|
||||
# In newer python 3 versions you can pass both of these directly,
|
||||
# but we only require 3.4 at the moment
|
||||
formatparams = {}
|
||||
formatparams.update(colors)
|
||||
formatparams.update(values)
|
||||
return formatstr.format(**formatparams)
|
||||
|
||||
with open(a, 'rb') as f:
|
||||
p1 = pickle.Unpickler(f)
|
||||
a_data = p1.load()
|
||||
with open(b, 'rb') as f:
|
||||
p2 = pickle.Unpickler(f)
|
||||
b_data = p2.load()
|
||||
p1 = pickle.Unpickler(open(a, "rb"))
|
||||
a_data = p1.load()
|
||||
p2 = pickle.Unpickler(open(b, "rb"))
|
||||
b_data = p2.load()
|
||||
|
||||
def dict_diff(a, b, whitelist=set()):
|
||||
sa = set(a.keys())
|
||||
@@ -506,100 +359,50 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
return changed, added, removed
|
||||
|
||||
if 'basewhitelist' in a_data and a_data['basewhitelist'] != b_data['basewhitelist']:
|
||||
output.append(color_format("{color_title}basewhitelist changed{color_default} from '%s' to '%s'") % (a_data['basewhitelist'], b_data['basewhitelist']))
|
||||
output.append("basewhitelist changed from '%s' to '%s'" % (a_data['basewhitelist'], b_data['basewhitelist']))
|
||||
if a_data['basewhitelist'] and b_data['basewhitelist']:
|
||||
output.append("changed items: %s" % a_data['basewhitelist'].symmetric_difference(b_data['basewhitelist']))
|
||||
|
||||
if 'taskwhitelist' in a_data and a_data['taskwhitelist'] != b_data['taskwhitelist']:
|
||||
output.append(color_format("{color_title}taskwhitelist changed{color_default} from '%s' to '%s'") % (a_data['taskwhitelist'], b_data['taskwhitelist']))
|
||||
output.append("taskwhitelist changed from '%s' to '%s'" % (a_data['taskwhitelist'], b_data['taskwhitelist']))
|
||||
if a_data['taskwhitelist'] and b_data['taskwhitelist']:
|
||||
output.append("changed items: %s" % a_data['taskwhitelist'].symmetric_difference(b_data['taskwhitelist']))
|
||||
|
||||
if a_data['taskdeps'] != b_data['taskdeps']:
|
||||
output.append(color_format("{color_title}Task dependencies changed{color_default} from:\n%s\nto:\n%s") % (sorted(a_data['taskdeps']), sorted(b_data['taskdeps'])))
|
||||
output.append("Task dependencies changed from:\n%s\nto:\n%s" % (sorted(a_data['taskdeps']), sorted(b_data['taskdeps'])))
|
||||
|
||||
if a_data['basehash'] != b_data['basehash'] and not collapsed:
|
||||
output.append(color_format("{color_title}basehash changed{color_default} from %s to %s") % (a_data['basehash'], b_data['basehash']))
|
||||
if a_data['basehash'] != b_data['basehash']:
|
||||
output.append("basehash changed from %s to %s" % (a_data['basehash'], b_data['basehash']))
|
||||
|
||||
changed, added, removed = dict_diff(a_data['gendeps'], b_data['gendeps'], a_data['basewhitelist'] & b_data['basewhitelist'])
|
||||
if changed:
|
||||
for dep in changed:
|
||||
output.append(color_format("{color_title}List of dependencies for variable %s changed from '{color_default}%s{color_title}' to '{color_default}%s{color_title}'") % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep]))
|
||||
output.append("List of dependencies for variable %s changed from '%s' to '%s'" % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep]))
|
||||
if a_data['gendeps'][dep] and b_data['gendeps'][dep]:
|
||||
output.append("changed items: %s" % a_data['gendeps'][dep].symmetric_difference(b_data['gendeps'][dep]))
|
||||
if added:
|
||||
for dep in added:
|
||||
output.append(color_format("{color_title}Dependency on variable %s was added") % (dep))
|
||||
output.append("Dependency on variable %s was added" % (dep))
|
||||
if removed:
|
||||
for dep in removed:
|
||||
output.append(color_format("{color_title}Dependency on Variable %s was removed") % (dep))
|
||||
output.append("Dependency on Variable %s was removed" % (dep))
|
||||
|
||||
|
||||
changed, added, removed = dict_diff(a_data['varvals'], b_data['varvals'])
|
||||
if changed:
|
||||
for dep in changed:
|
||||
oldval = a_data['varvals'][dep]
|
||||
newval = b_data['varvals'][dep]
|
||||
if newval and oldval and ('\n' in oldval or '\n' in newval):
|
||||
diff = difflib.unified_diff(oldval.splitlines(), newval.splitlines(), lineterm='')
|
||||
# Cut off the first two lines, since we aren't interested in
|
||||
# the old/new filename (they are blank anyway in this case)
|
||||
difflines = list(diff)[2:]
|
||||
if color:
|
||||
# Add colour to diff output
|
||||
for i, line in enumerate(difflines):
|
||||
if line.startswith('+'):
|
||||
line = color_format('{color_add}{line}', line=line)
|
||||
difflines[i] = line
|
||||
elif line.startswith('-'):
|
||||
line = color_format('{color_remove}{line}', line=line)
|
||||
difflines[i] = line
|
||||
output.append(color_format("{color_title}Variable {var} value changed:{color_default}\n{diff}", var=dep, diff='\n'.join(difflines)))
|
||||
elif newval and oldval and (' ' in oldval or ' ' in newval):
|
||||
output.append(color_format("{color_title}Variable {var} value changed:{color_default}\n{diff}", var=dep, diff=worddiff_str(oldval, newval, colors)))
|
||||
else:
|
||||
output.append(color_format("{color_title}Variable {var} value changed from '{color_default}{oldval}{color_title}' to '{color_default}{newval}{color_title}'{color_default}", var=dep, oldval=oldval, newval=newval))
|
||||
|
||||
if not 'file_checksum_values' in a_data:
|
||||
a_data['file_checksum_values'] = {}
|
||||
if not 'file_checksum_values' in b_data:
|
||||
b_data['file_checksum_values'] = {}
|
||||
output.append("Variable %s value changed from '%s' to '%s'" % (dep, a_data['varvals'][dep], b_data['varvals'][dep]))
|
||||
|
||||
changed, added, removed = file_checksums_diff(a_data['file_checksum_values'], b_data['file_checksum_values'])
|
||||
if changed:
|
||||
for f, old, new in changed:
|
||||
output.append(color_format("{color_title}Checksum for file %s changed{color_default} from %s to %s") % (f, old, new))
|
||||
output.append("Checksum for file %s changed from %s to %s" % (f, old, new))
|
||||
if added:
|
||||
for f in added:
|
||||
output.append(color_format("{color_title}Dependency on checksum of file %s was added") % (f))
|
||||
output.append("Dependency on checksum of file %s was added" % (f))
|
||||
if removed:
|
||||
for f in removed:
|
||||
output.append(color_format("{color_title}Dependency on checksum of file %s was removed") % (f))
|
||||
|
||||
if not 'runtaskdeps' in a_data:
|
||||
a_data['runtaskdeps'] = {}
|
||||
if not 'runtaskdeps' in b_data:
|
||||
b_data['runtaskdeps'] = {}
|
||||
|
||||
if not collapsed:
|
||||
if len(a_data['runtaskdeps']) != len(b_data['runtaskdeps']):
|
||||
changed = ["Number of task dependencies changed"]
|
||||
else:
|
||||
changed = []
|
||||
for idx, task in enumerate(a_data['runtaskdeps']):
|
||||
a = a_data['runtaskdeps'][idx]
|
||||
b = b_data['runtaskdeps'][idx]
|
||||
if a_data['runtaskhashes'][a] != b_data['runtaskhashes'][b] and not collapsed:
|
||||
changed.append("%s with hash %s\n changed to\n%s with hash %s" % (clean_basepath(a), a_data['runtaskhashes'][a], clean_basepath(b), b_data['runtaskhashes'][b]))
|
||||
|
||||
if changed:
|
||||
clean_a = clean_basepaths_list(a_data['runtaskdeps'])
|
||||
clean_b = clean_basepaths_list(b_data['runtaskdeps'])
|
||||
if clean_a != clean_b:
|
||||
output.append(color_format("{color_title}runtaskdeps changed:{color_default}\n%s") % list_inline_diff(clean_a, clean_b, colors))
|
||||
else:
|
||||
output.append(color_format("{color_title}runtaskdeps changed:"))
|
||||
output.append("\n".join(changed))
|
||||
output.append("Dependency on checksum of file %s was removed" % (f))
|
||||
|
||||
|
||||
if 'runtaskhashes' in a_data and 'runtaskhashes' in b_data:
|
||||
@@ -615,7 +418,7 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
#output.append("Dependency on task %s was replaced by %s with same hash" % (dep, bdep))
|
||||
bdep_found = True
|
||||
if not bdep_found:
|
||||
output.append(color_format("{color_title}Dependency on task %s was added{color_default} with hash %s") % (clean_basepath(dep), b[dep]))
|
||||
output.append("Dependency on task %s was added with hash %s" % (clean_basepath(dep), b[dep]))
|
||||
if removed:
|
||||
for dep in removed:
|
||||
adep_found = False
|
||||
@@ -625,70 +428,30 @@ def compare_sigfiles(a, b, recursecb=None, color=False, collapsed=False):
|
||||
#output.append("Dependency on task %s was replaced by %s with same hash" % (adep, dep))
|
||||
adep_found = True
|
||||
if not adep_found:
|
||||
output.append(color_format("{color_title}Dependency on task %s was removed{color_default} with hash %s") % (clean_basepath(dep), a[dep]))
|
||||
output.append("Dependency on task %s was removed with hash %s" % (clean_basepath(dep), a[dep]))
|
||||
if changed:
|
||||
for dep in changed:
|
||||
if not collapsed:
|
||||
output.append(color_format("{color_title}Hash for dependent task %s changed{color_default} from %s to %s") % (clean_basepath(dep), a[dep], b[dep]))
|
||||
output.append("Hash for dependent task %s changed from %s to %s" % (clean_basepath(dep), a[dep], b[dep]))
|
||||
if callable(recursecb):
|
||||
# If a dependent hash changed, might as well print the line above and then defer to the changes in
|
||||
# that hash since in all likelyhood, they're the same changes this task also saw.
|
||||
recout = recursecb(dep, a[dep], b[dep])
|
||||
if recout:
|
||||
if collapsed:
|
||||
output.extend(recout)
|
||||
else:
|
||||
# If a dependent hash changed, might as well print the line above and then defer to the changes in
|
||||
# that hash since in all likelyhood, they're the same changes this task also saw.
|
||||
output = [output[-1]] + recout
|
||||
output = [output[-1]] + recout
|
||||
|
||||
a_taint = a_data.get('taint', None)
|
||||
b_taint = b_data.get('taint', None)
|
||||
if a_taint != b_taint:
|
||||
output.append(color_format("{color_title}Taint (by forced/invalidated task) changed{color_default} from %s to %s") % (a_taint, b_taint))
|
||||
output.append("Taint (by forced/invalidated task) changed from %s to %s" % (a_taint, b_taint))
|
||||
|
||||
return output
|
||||
|
||||
|
||||
def calc_basehash(sigdata):
|
||||
task = sigdata['task']
|
||||
basedata = sigdata['varvals'][task]
|
||||
|
||||
if basedata is None:
|
||||
basedata = ''
|
||||
|
||||
alldeps = sigdata['taskdeps']
|
||||
for dep in alldeps:
|
||||
basedata = basedata + dep
|
||||
val = sigdata['varvals'][dep]
|
||||
if val is not None:
|
||||
basedata = basedata + str(val)
|
||||
|
||||
return hashlib.md5(basedata.encode("utf-8")).hexdigest()
|
||||
|
||||
def calc_taskhash(sigdata):
|
||||
data = sigdata['basehash']
|
||||
|
||||
for dep in sigdata['runtaskdeps']:
|
||||
data = data + sigdata['runtaskhashes'][dep]
|
||||
|
||||
for c in sigdata['file_checksum_values']:
|
||||
if c[1]:
|
||||
data = data + c[1]
|
||||
|
||||
if 'taint' in sigdata:
|
||||
if 'nostamp:' in sigdata['taint']:
|
||||
data = data + sigdata['taint'][8:]
|
||||
else:
|
||||
data = data + sigdata['taint']
|
||||
|
||||
return hashlib.md5(data.encode("utf-8")).hexdigest()
|
||||
|
||||
|
||||
def dump_sigfile(a):
|
||||
output = []
|
||||
|
||||
with open(a, 'rb') as f:
|
||||
p1 = pickle.Unpickler(f)
|
||||
a_data = p1.load()
|
||||
p1 = pickle.Unpickler(open(a, "rb"))
|
||||
a_data = p1.load()
|
||||
|
||||
output.append("basewhitelist: %s" % (a_data['basewhitelist']))
|
||||
|
||||
@@ -717,13 +480,4 @@ def dump_sigfile(a):
|
||||
if 'taint' in a_data:
|
||||
output.append("Tainted (by forced/invalidated task): %s" % a_data['taint'])
|
||||
|
||||
if 'task' in a_data:
|
||||
computed_basehash = calc_basehash(a_data)
|
||||
output.append("Computed base hash is %s and from file %s" % (computed_basehash, a_data['basehash']))
|
||||
else:
|
||||
output.append("Unable to compute base hash")
|
||||
|
||||
computed_taskhash = calc_taskhash(a_data)
|
||||
output.append("Computed task hash is %s" % computed_taskhash)
|
||||
|
||||
return output
|
||||
|
||||
@@ -37,24 +37,27 @@ def re_match_strings(target, strings):
|
||||
return any(name == target or re.match(name, target)
|
||||
for name in strings)
|
||||
|
||||
class TaskEntry:
|
||||
def __init__(self):
|
||||
self.tdepends = []
|
||||
self.idepends = []
|
||||
self.irdepends = []
|
||||
|
||||
class TaskData:
|
||||
"""
|
||||
BitBake Task Data implementation
|
||||
"""
|
||||
def __init__(self, abort = True, skiplist = None, allowincomplete = False):
|
||||
def __init__(self, abort = True, tryaltconfigs = False, skiplist = None):
|
||||
self.build_names_index = []
|
||||
self.run_names_index = []
|
||||
self.fn_index = []
|
||||
|
||||
self.build_targets = {}
|
||||
self.run_targets = {}
|
||||
|
||||
self.external_targets = []
|
||||
|
||||
self.seenfns = []
|
||||
self.taskentries = {}
|
||||
self.tasks_fnid = []
|
||||
self.tasks_name = []
|
||||
self.tasks_tdepends = []
|
||||
self.tasks_idepends = []
|
||||
self.tasks_irdepends = []
|
||||
# Cache to speed up task ID lookups
|
||||
self.tasks_lookup = {}
|
||||
|
||||
self.depids = {}
|
||||
self.rdepids = {}
|
||||
@@ -63,13 +66,95 @@ class TaskData:
|
||||
|
||||
self.failed_deps = []
|
||||
self.failed_rdeps = []
|
||||
self.failed_fns = []
|
||||
self.failed_fnids = []
|
||||
|
||||
self.abort = abort
|
||||
self.allowincomplete = allowincomplete
|
||||
self.tryaltconfigs = tryaltconfigs
|
||||
|
||||
self.skiplist = skiplist
|
||||
|
||||
def getbuild_id(self, name):
|
||||
"""
|
||||
Return an ID number for the build target name.
|
||||
If it doesn't exist, create one.
|
||||
"""
|
||||
if not name in self.build_names_index:
|
||||
self.build_names_index.append(name)
|
||||
return len(self.build_names_index) - 1
|
||||
|
||||
return self.build_names_index.index(name)
|
||||
|
||||
def getrun_id(self, name):
|
||||
"""
|
||||
Return an ID number for the run target name.
|
||||
If it doesn't exist, create one.
|
||||
"""
|
||||
if not name in self.run_names_index:
|
||||
self.run_names_index.append(name)
|
||||
return len(self.run_names_index) - 1
|
||||
|
||||
return self.run_names_index.index(name)
|
||||
|
||||
def getfn_id(self, name):
|
||||
"""
|
||||
Return an ID number for the filename.
|
||||
If it doesn't exist, create one.
|
||||
"""
|
||||
if not name in self.fn_index:
|
||||
self.fn_index.append(name)
|
||||
return len(self.fn_index) - 1
|
||||
|
||||
return self.fn_index.index(name)
|
||||
|
||||
def gettask_ids(self, fnid):
|
||||
"""
|
||||
Return an array of the ID numbers matching a given fnid.
|
||||
"""
|
||||
ids = []
|
||||
if fnid in self.tasks_lookup:
|
||||
for task in self.tasks_lookup[fnid]:
|
||||
ids.append(self.tasks_lookup[fnid][task])
|
||||
return ids
|
||||
|
||||
def gettask_id_fromfnid(self, fnid, task):
|
||||
"""
|
||||
Return an ID number for the task matching fnid and task.
|
||||
"""
|
||||
if fnid in self.tasks_lookup:
|
||||
if task in self.tasks_lookup[fnid]:
|
||||
return self.tasks_lookup[fnid][task]
|
||||
|
||||
return None
|
||||
|
||||
def gettask_id(self, fn, task, create = True):
|
||||
"""
|
||||
Return an ID number for the task matching fn and task.
|
||||
If it doesn't exist, create one by default.
|
||||
Optionally return None instead.
|
||||
"""
|
||||
fnid = self.getfn_id(fn)
|
||||
|
||||
if fnid in self.tasks_lookup:
|
||||
if task in self.tasks_lookup[fnid]:
|
||||
return self.tasks_lookup[fnid][task]
|
||||
|
||||
if not create:
|
||||
return None
|
||||
|
||||
self.tasks_name.append(task)
|
||||
self.tasks_fnid.append(fnid)
|
||||
self.tasks_tdepends.append([])
|
||||
self.tasks_idepends.append([])
|
||||
self.tasks_irdepends.append([])
|
||||
|
||||
listid = len(self.tasks_name) - 1
|
||||
|
||||
if fnid not in self.tasks_lookup:
|
||||
self.tasks_lookup[fnid] = {}
|
||||
self.tasks_lookup[fnid][task] = listid
|
||||
|
||||
return listid
|
||||
|
||||
def add_tasks(self, fn, dataCache):
|
||||
"""
|
||||
Add tasks for a given fn to the database
|
||||
@@ -77,60 +162,58 @@ class TaskData:
|
||||
|
||||
task_deps = dataCache.task_deps[fn]
|
||||
|
||||
if fn in self.failed_fns:
|
||||
fnid = self.getfn_id(fn)
|
||||
|
||||
if fnid in self.failed_fnids:
|
||||
bb.msg.fatal("TaskData", "Trying to re-add a failed file? Something is broken...")
|
||||
|
||||
# Check if we've already seen this fn
|
||||
if fn in self.seenfns:
|
||||
if fnid in self.tasks_fnid:
|
||||
return
|
||||
|
||||
self.seenfns.append(fn)
|
||||
|
||||
self.add_extra_deps(fn, dataCache)
|
||||
|
||||
# Common code for dep_name/depends = 'depends'/idepends and 'rdepends'/irdepends
|
||||
def handle_deps(task, dep_name, depends, seen):
|
||||
if dep_name in task_deps and task in task_deps[dep_name]:
|
||||
ids = []
|
||||
for dep in task_deps[dep_name][task].split():
|
||||
if dep:
|
||||
parts = dep.split(":")
|
||||
if len(parts) != 2:
|
||||
bb.msg.fatal("TaskData", "Error for %s:%s[%s], dependency %s in '%s' does not contain exactly one ':' character.\n Task '%s' should be specified in the form 'packagename:task'" % (fn, task, dep_name, dep, task_deps[dep_name][task], dep_name))
|
||||
ids.append((parts[0], parts[1]))
|
||||
seen(parts[0])
|
||||
depends.extend(ids)
|
||||
|
||||
for task in task_deps['tasks']:
|
||||
|
||||
tid = "%s:%s" % (fn, task)
|
||||
self.taskentries[tid] = TaskEntry()
|
||||
|
||||
# Work out task dependencies
|
||||
parentids = []
|
||||
for dep in task_deps['parents'][task]:
|
||||
if dep not in task_deps['tasks']:
|
||||
bb.debug(2, "Not adding dependeny of %s on %s since %s does not exist" % (task, dep, dep))
|
||||
continue
|
||||
parentid = "%s:%s" % (fn, dep)
|
||||
parentid = self.gettask_id(fn, dep)
|
||||
parentids.append(parentid)
|
||||
self.taskentries[tid].tdepends.extend(parentids)
|
||||
taskid = self.gettask_id(fn, task)
|
||||
self.tasks_tdepends[taskid].extend(parentids)
|
||||
|
||||
# Touch all intertask dependencies
|
||||
handle_deps(task, 'depends', self.taskentries[tid].idepends, self.seen_build_target)
|
||||
handle_deps(task, 'rdepends', self.taskentries[tid].irdepends, self.seen_run_target)
|
||||
if 'depends' in task_deps and task in task_deps['depends']:
|
||||
ids = []
|
||||
for dep in task_deps['depends'][task].split():
|
||||
if dep:
|
||||
if ":" not in dep:
|
||||
bb.msg.fatal("TaskData", "Error for %s, dependency %s does not contain ':' character\n. Task 'depends' should be specified in the form 'packagename:task'" % (fn, dep))
|
||||
ids.append(((self.getbuild_id(dep.split(":")[0])), dep.split(":")[1]))
|
||||
self.tasks_idepends[taskid].extend(ids)
|
||||
if 'rdepends' in task_deps and task in task_deps['rdepends']:
|
||||
ids = []
|
||||
for dep in task_deps['rdepends'][task].split():
|
||||
if dep:
|
||||
if ":" not in dep:
|
||||
bb.msg.fatal("TaskData", "Error for %s, dependency %s does not contain ':' character\n. Task 'rdepends' should be specified in the form 'packagename:task'" % (fn, dep))
|
||||
ids.append(((self.getrun_id(dep.split(":")[0])), dep.split(":")[1]))
|
||||
self.tasks_irdepends[taskid].extend(ids)
|
||||
|
||||
|
||||
# Work out build dependencies
|
||||
if not fn in self.depids:
|
||||
dependids = set()
|
||||
if not fnid in self.depids:
|
||||
dependids = {}
|
||||
for depend in dataCache.deps[fn]:
|
||||
dependids.add(depend)
|
||||
self.depids[fn] = list(dependids)
|
||||
dependids[self.getbuild_id(depend)] = None
|
||||
self.depids[fnid] = dependids.keys()
|
||||
logger.debug(2, "Added dependencies %s for %s", str(dataCache.deps[fn]), fn)
|
||||
|
||||
# Work out runtime dependencies
|
||||
if not fn in self.rdepids:
|
||||
rdependids = set()
|
||||
if not fnid in self.rdepids:
|
||||
rdependids = {}
|
||||
rdepends = dataCache.rundeps[fn]
|
||||
rrecs = dataCache.runrecs[fn]
|
||||
rdependlist = []
|
||||
@@ -138,48 +221,33 @@ class TaskData:
|
||||
for package in rdepends:
|
||||
for rdepend in rdepends[package]:
|
||||
rdependlist.append(rdepend)
|
||||
rdependids.add(rdepend)
|
||||
rdependids[self.getrun_id(rdepend)] = None
|
||||
for package in rrecs:
|
||||
for rdepend in rrecs[package]:
|
||||
rreclist.append(rdepend)
|
||||
rdependids.add(rdepend)
|
||||
rdependids[self.getrun_id(rdepend)] = None
|
||||
if rdependlist:
|
||||
logger.debug(2, "Added runtime dependencies %s for %s", str(rdependlist), fn)
|
||||
if rreclist:
|
||||
logger.debug(2, "Added runtime recommendations %s for %s", str(rreclist), fn)
|
||||
self.rdepids[fn] = list(rdependids)
|
||||
self.rdepids[fnid] = rdependids.keys()
|
||||
|
||||
for dep in self.depids[fn]:
|
||||
self.seen_build_target(dep)
|
||||
for dep in self.depids[fnid]:
|
||||
if dep in self.failed_deps:
|
||||
self.fail_fn(fn)
|
||||
self.fail_fnid(fnid)
|
||||
return
|
||||
for dep in self.rdepids[fn]:
|
||||
self.seen_run_target(dep)
|
||||
for dep in self.rdepids[fnid]:
|
||||
if dep in self.failed_rdeps:
|
||||
self.fail_fn(fn)
|
||||
self.fail_fnid(fnid)
|
||||
return
|
||||
|
||||
def add_extra_deps(self, fn, dataCache):
|
||||
func = dataCache.extradepsfunc.get(fn, None)
|
||||
if func:
|
||||
bb.providers.buildWorldTargetList(dataCache)
|
||||
pn = dataCache.pkg_fn[fn]
|
||||
params = {'deps': dataCache.deps[fn],
|
||||
'world_target': dataCache.world_target,
|
||||
'pkg_pn': dataCache.pkg_pn,
|
||||
'self_pn': pn}
|
||||
funcname = '_%s_calculate_extra_depends' % pn.replace('-', '_')
|
||||
paramlist = ','.join(params.keys())
|
||||
func = 'def %s(%s):\n%s\n\n%s(%s)' % (funcname, paramlist, func, funcname, paramlist)
|
||||
bb.utils.better_exec(func, params)
|
||||
|
||||
|
||||
def have_build_target(self, target):
|
||||
"""
|
||||
Have we a build target matching this name?
|
||||
"""
|
||||
if target in self.build_targets and self.build_targets[target]:
|
||||
targetid = self.getbuild_id(target)
|
||||
|
||||
if targetid in self.build_targets:
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -187,54 +255,50 @@ class TaskData:
|
||||
"""
|
||||
Have we a runtime target matching this name?
|
||||
"""
|
||||
if target in self.run_targets and self.run_targets[target]:
|
||||
targetid = self.getrun_id(target)
|
||||
|
||||
if targetid in self.run_targets:
|
||||
return True
|
||||
return False
|
||||
|
||||
def seen_build_target(self, name):
|
||||
"""
|
||||
Maintain a list of build targets
|
||||
"""
|
||||
if name not in self.build_targets:
|
||||
self.build_targets[name] = []
|
||||
|
||||
def add_build_target(self, fn, item):
|
||||
"""
|
||||
Add a build target.
|
||||
If already present, append the provider fn to the list
|
||||
"""
|
||||
if item in self.build_targets:
|
||||
if fn in self.build_targets[item]:
|
||||
return
|
||||
self.build_targets[item].append(fn)
|
||||
return
|
||||
self.build_targets[item] = [fn]
|
||||
targetid = self.getbuild_id(item)
|
||||
fnid = self.getfn_id(fn)
|
||||
|
||||
def seen_run_target(self, name):
|
||||
"""
|
||||
Maintain a list of runtime build targets
|
||||
"""
|
||||
if name not in self.run_targets:
|
||||
self.run_targets[name] = []
|
||||
if targetid in self.build_targets:
|
||||
if fnid in self.build_targets[targetid]:
|
||||
return
|
||||
self.build_targets[targetid].append(fnid)
|
||||
return
|
||||
self.build_targets[targetid] = [fnid]
|
||||
|
||||
def add_runtime_target(self, fn, item):
|
||||
"""
|
||||
Add a runtime target.
|
||||
If already present, append the provider fn to the list
|
||||
"""
|
||||
if item in self.run_targets:
|
||||
if fn in self.run_targets[item]:
|
||||
return
|
||||
self.run_targets[item].append(fn)
|
||||
return
|
||||
self.run_targets[item] = [fn]
|
||||
targetid = self.getrun_id(item)
|
||||
fnid = self.getfn_id(fn)
|
||||
|
||||
def mark_external_target(self, target):
|
||||
if targetid in self.run_targets:
|
||||
if fnid in self.run_targets[targetid]:
|
||||
return
|
||||
self.run_targets[targetid].append(fnid)
|
||||
return
|
||||
self.run_targets[targetid] = [fnid]
|
||||
|
||||
def mark_external_target(self, item):
|
||||
"""
|
||||
Mark a build target as being externally requested
|
||||
"""
|
||||
if target not in self.external_targets:
|
||||
self.external_targets.append(target)
|
||||
targetid = self.getbuild_id(item)
|
||||
|
||||
if targetid not in self.external_targets:
|
||||
self.external_targets.append(targetid)
|
||||
|
||||
def get_unresolved_build_targets(self, dataCache):
|
||||
"""
|
||||
@@ -242,12 +306,12 @@ class TaskData:
|
||||
are unknown.
|
||||
"""
|
||||
unresolved = []
|
||||
for target in self.build_targets:
|
||||
for target in self.build_names_index:
|
||||
if re_match_strings(target, dataCache.ignored_dependencies):
|
||||
continue
|
||||
if target in self.failed_deps:
|
||||
if self.build_names_index.index(target) in self.failed_deps:
|
||||
continue
|
||||
if not self.build_targets[target]:
|
||||
if not self.have_build_target(target):
|
||||
unresolved.append(target)
|
||||
return unresolved
|
||||
|
||||
@@ -257,12 +321,12 @@ class TaskData:
|
||||
are unknown.
|
||||
"""
|
||||
unresolved = []
|
||||
for target in self.run_targets:
|
||||
for target in self.run_names_index:
|
||||
if re_match_strings(target, dataCache.ignored_dependencies):
|
||||
continue
|
||||
if target in self.failed_rdeps:
|
||||
if self.run_names_index.index(target) in self.failed_rdeps:
|
||||
continue
|
||||
if not self.run_targets[target]:
|
||||
if not self.have_runtime_target(target):
|
||||
unresolved.append(target)
|
||||
return unresolved
|
||||
|
||||
@@ -270,26 +334,50 @@ class TaskData:
|
||||
"""
|
||||
Return a list of providers of item
|
||||
"""
|
||||
return self.build_targets[item]
|
||||
targetid = self.getbuild_id(item)
|
||||
|
||||
def get_dependees(self, item):
|
||||
return self.build_targets[targetid]
|
||||
|
||||
def get_dependees(self, itemid):
|
||||
"""
|
||||
Return a list of targets which depend on item
|
||||
"""
|
||||
dependees = []
|
||||
for fn in self.depids:
|
||||
if item in self.depids[fn]:
|
||||
dependees.append(fn)
|
||||
for fnid in self.depids:
|
||||
if itemid in self.depids[fnid]:
|
||||
dependees.append(fnid)
|
||||
return dependees
|
||||
|
||||
def get_rdependees(self, item):
|
||||
def get_dependees_str(self, item):
|
||||
"""
|
||||
Return a list of targets which depend on item as a user readable string
|
||||
"""
|
||||
itemid = self.getbuild_id(item)
|
||||
dependees = []
|
||||
for fnid in self.depids:
|
||||
if itemid in self.depids[fnid]:
|
||||
dependees.append(self.fn_index[fnid])
|
||||
return dependees
|
||||
|
||||
def get_rdependees(self, itemid):
|
||||
"""
|
||||
Return a list of targets which depend on runtime item
|
||||
"""
|
||||
dependees = []
|
||||
for fn in self.rdepids:
|
||||
if item in self.rdepids[fn]:
|
||||
dependees.append(fn)
|
||||
for fnid in self.rdepids:
|
||||
if itemid in self.rdepids[fnid]:
|
||||
dependees.append(fnid)
|
||||
return dependees
|
||||
|
||||
def get_rdependees_str(self, item):
|
||||
"""
|
||||
Return a list of targets which depend on runtime item as a user readable string
|
||||
"""
|
||||
itemid = self.getrun_id(item)
|
||||
dependees = []
|
||||
for fnid in self.rdepids:
|
||||
if itemid in self.rdepids[fnid]:
|
||||
dependees.append(self.fn_index[fnid])
|
||||
return dependees
|
||||
|
||||
def get_reasons(self, item, runtime=False):
|
||||
@@ -325,7 +413,7 @@ class TaskData:
|
||||
except bb.providers.NoProvider:
|
||||
if self.abort:
|
||||
raise
|
||||
self.remove_buildtarget(item)
|
||||
self.remove_buildtarget(self.getbuild_id(item))
|
||||
|
||||
self.mark_external_target(item)
|
||||
|
||||
@@ -340,14 +428,7 @@ class TaskData:
|
||||
return
|
||||
|
||||
if not item in dataCache.providers:
|
||||
close_matches = self.get_close_matches(item, list(dataCache.providers.keys()))
|
||||
# Is it in RuntimeProviders ?
|
||||
all_p = bb.providers.getRuntimeProviders(dataCache, item)
|
||||
for fn in all_p:
|
||||
new = dataCache.pkg_fn[fn] + " RPROVIDES " + item
|
||||
if new not in close_matches:
|
||||
close_matches.append(new)
|
||||
bb.event.fire(bb.event.NoProvider(item, dependees=self.get_dependees(item), reasons=self.get_reasons(item), close_matches=close_matches), cfgData)
|
||||
bb.event.fire(bb.event.NoProvider(item, dependees=self.get_dependees_str(item), reasons=self.get_reasons(item), close_matches=self.get_close_matches(item, dataCache.providers.keys())), cfgData)
|
||||
raise bb.providers.NoProvider(item)
|
||||
|
||||
if self.have_build_target(item):
|
||||
@@ -356,10 +437,10 @@ class TaskData:
|
||||
all_p = dataCache.providers[item]
|
||||
|
||||
eligible, foundUnique = bb.providers.filterProviders(all_p, item, cfgData, dataCache)
|
||||
eligible = [p for p in eligible if not p in self.failed_fns]
|
||||
eligible = [p for p in eligible if not self.getfn_id(p) in self.failed_fnids]
|
||||
|
||||
if not eligible:
|
||||
bb.event.fire(bb.event.NoProvider(item, dependees=self.get_dependees(item), reasons=["No eligible PROVIDERs exist for '%s'" % item]), cfgData)
|
||||
bb.event.fire(bb.event.NoProvider(item, dependees=self.get_dependees_str(item), reasons=["No eligible PROVIDERs exist for '%s'" % item]), cfgData)
|
||||
raise bb.providers.NoProvider(item)
|
||||
|
||||
if len(eligible) > 1 and foundUnique == False:
|
||||
@@ -371,7 +452,8 @@ class TaskData:
|
||||
self.consider_msgs_cache.append(item)
|
||||
|
||||
for fn in eligible:
|
||||
if fn in self.failed_fns:
|
||||
fnid = self.getfn_id(fn)
|
||||
if fnid in self.failed_fnids:
|
||||
continue
|
||||
logger.debug(2, "adding %s to satisfy %s", fn, item)
|
||||
self.add_build_target(fn, item)
|
||||
@@ -395,14 +477,14 @@ class TaskData:
|
||||
all_p = bb.providers.getRuntimeProviders(dataCache, item)
|
||||
|
||||
if not all_p:
|
||||
bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees(item), reasons=self.get_reasons(item, True)), cfgData)
|
||||
bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees_str(item), reasons=self.get_reasons(item, True)), cfgData)
|
||||
raise bb.providers.NoRProvider(item)
|
||||
|
||||
eligible, numberPreferred = bb.providers.filterProvidersRunTime(all_p, item, cfgData, dataCache)
|
||||
eligible = [p for p in eligible if not p in self.failed_fns]
|
||||
eligible = [p for p in eligible if not self.getfn_id(p) in self.failed_fnids]
|
||||
|
||||
if not eligible:
|
||||
bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees(item), reasons=["No eligible RPROVIDERs exist for '%s'" % item]), cfgData)
|
||||
bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees_str(item), reasons=["No eligible RPROVIDERs exist for '%s'" % item]), cfgData)
|
||||
raise bb.providers.NoRProvider(item)
|
||||
|
||||
if len(eligible) > 1 and numberPreferred == 0:
|
||||
@@ -424,80 +506,80 @@ class TaskData:
|
||||
|
||||
# run through the list until we find one that we can build
|
||||
for fn in eligible:
|
||||
if fn in self.failed_fns:
|
||||
fnid = self.getfn_id(fn)
|
||||
if fnid in self.failed_fnids:
|
||||
continue
|
||||
logger.debug(2, "adding '%s' to satisfy runtime '%s'", fn, item)
|
||||
self.add_runtime_target(fn, item)
|
||||
self.add_tasks(fn, dataCache)
|
||||
|
||||
def fail_fn(self, fn, missing_list=None):
|
||||
def fail_fnid(self, fnid, missing_list = []):
|
||||
"""
|
||||
Mark a file as failed (unbuildable)
|
||||
Remove any references from build and runtime provider lists
|
||||
|
||||
missing_list, A list of missing requirements for this target
|
||||
"""
|
||||
if fn in self.failed_fns:
|
||||
if fnid in self.failed_fnids:
|
||||
return
|
||||
if not missing_list:
|
||||
missing_list = []
|
||||
logger.debug(1, "File '%s' is unbuildable, removing...", fn)
|
||||
self.failed_fns.append(fn)
|
||||
logger.debug(1, "File '%s' is unbuildable, removing...", self.fn_index[fnid])
|
||||
self.failed_fnids.append(fnid)
|
||||
for target in self.build_targets:
|
||||
if fn in self.build_targets[target]:
|
||||
self.build_targets[target].remove(fn)
|
||||
if fnid in self.build_targets[target]:
|
||||
self.build_targets[target].remove(fnid)
|
||||
if len(self.build_targets[target]) == 0:
|
||||
self.remove_buildtarget(target, missing_list)
|
||||
for target in self.run_targets:
|
||||
if fn in self.run_targets[target]:
|
||||
self.run_targets[target].remove(fn)
|
||||
if fnid in self.run_targets[target]:
|
||||
self.run_targets[target].remove(fnid)
|
||||
if len(self.run_targets[target]) == 0:
|
||||
self.remove_runtarget(target, missing_list)
|
||||
|
||||
def remove_buildtarget(self, target, missing_list=None):
|
||||
def remove_buildtarget(self, targetid, missing_list = []):
|
||||
"""
|
||||
Mark a build target as failed (unbuildable)
|
||||
Trigger removal of any files that have this as a dependency
|
||||
"""
|
||||
if not missing_list:
|
||||
missing_list = [target]
|
||||
missing_list = [self.build_names_index[targetid]]
|
||||
else:
|
||||
missing_list = [target] + missing_list
|
||||
logger.verbose("Target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s", target, missing_list)
|
||||
self.failed_deps.append(target)
|
||||
dependees = self.get_dependees(target)
|
||||
for fn in dependees:
|
||||
self.fail_fn(fn, missing_list)
|
||||
for tid in self.taskentries:
|
||||
for (idepend, idependtask) in self.taskentries[tid].idepends:
|
||||
if idepend == target:
|
||||
fn = tid.rsplit(":",1)[0]
|
||||
self.fail_fn(fn, missing_list)
|
||||
missing_list = [self.build_names_index[targetid]] + missing_list
|
||||
logger.verbose("Target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s", self.build_names_index[targetid], missing_list)
|
||||
self.failed_deps.append(targetid)
|
||||
dependees = self.get_dependees(targetid)
|
||||
for fnid in dependees:
|
||||
self.fail_fnid(fnid, missing_list)
|
||||
for taskid in xrange(len(self.tasks_idepends)):
|
||||
idepends = self.tasks_idepends[taskid]
|
||||
for (idependid, idependtask) in idepends:
|
||||
if idependid == targetid:
|
||||
self.fail_fnid(self.tasks_fnid[taskid], missing_list)
|
||||
|
||||
if self.abort and target in self.external_targets:
|
||||
if self.abort and targetid in self.external_targets:
|
||||
target = self.build_names_index[targetid]
|
||||
logger.error("Required build target '%s' has no buildable providers.\nMissing or unbuildable dependency chain was: %s", target, missing_list)
|
||||
raise bb.providers.NoProvider(target)
|
||||
|
||||
def remove_runtarget(self, target, missing_list=None):
|
||||
def remove_runtarget(self, targetid, missing_list = []):
|
||||
"""
|
||||
Mark a run target as failed (unbuildable)
|
||||
Trigger removal of any files that have this as a dependency
|
||||
"""
|
||||
if not missing_list:
|
||||
missing_list = [target]
|
||||
missing_list = [self.run_names_index[targetid]]
|
||||
else:
|
||||
missing_list = [target] + missing_list
|
||||
missing_list = [self.run_names_index[targetid]] + missing_list
|
||||
|
||||
logger.info("Runtime target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s", target, missing_list)
|
||||
self.failed_rdeps.append(target)
|
||||
dependees = self.get_rdependees(target)
|
||||
for fn in dependees:
|
||||
self.fail_fn(fn, missing_list)
|
||||
for tid in self.taskentries:
|
||||
for (idepend, idependtask) in self.taskentries[tid].irdepends:
|
||||
if idepend == target:
|
||||
fn = tid.rsplit(":",1)[0]
|
||||
self.fail_fn(fn, missing_list)
|
||||
logger.info("Runtime target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s", self.run_names_index[targetid], missing_list)
|
||||
self.failed_rdeps.append(targetid)
|
||||
dependees = self.get_rdependees(targetid)
|
||||
for fnid in dependees:
|
||||
self.fail_fnid(fnid, missing_list)
|
||||
for taskid in xrange(len(self.tasks_irdepends)):
|
||||
irdepends = self.tasks_irdepends[taskid]
|
||||
for (idependid, idependtask) in irdepends:
|
||||
if idependid == targetid:
|
||||
self.fail_fnid(self.tasks_fnid[taskid], missing_list)
|
||||
|
||||
def add_unresolved(self, cfgData, dataCache):
|
||||
"""
|
||||
@@ -511,68 +593,59 @@ class TaskData:
|
||||
self.add_provider_internal(cfgData, dataCache, target)
|
||||
added = added + 1
|
||||
except bb.providers.NoProvider:
|
||||
if self.abort and target in self.external_targets and not self.allowincomplete:
|
||||
targetid = self.getbuild_id(target)
|
||||
if self.abort and targetid in self.external_targets:
|
||||
raise
|
||||
if not self.allowincomplete:
|
||||
self.remove_buildtarget(target)
|
||||
self.remove_buildtarget(targetid)
|
||||
for target in self.get_unresolved_run_targets(dataCache):
|
||||
try:
|
||||
self.add_rprovider(cfgData, dataCache, target)
|
||||
added = added + 1
|
||||
except (bb.providers.NoRProvider, bb.providers.MultipleRProvider):
|
||||
self.remove_runtarget(target)
|
||||
self.remove_runtarget(self.getrun_id(target))
|
||||
logger.debug(1, "Resolved " + str(added) + " extra dependencies")
|
||||
if added == 0:
|
||||
break
|
||||
# self.dump_data()
|
||||
|
||||
def get_providermap(self, prefix=None):
|
||||
provmap = {}
|
||||
for name in self.build_targets:
|
||||
if prefix and not name.startswith(prefix):
|
||||
continue
|
||||
if self.have_build_target(name):
|
||||
provider = self.get_provider(name)
|
||||
if provider:
|
||||
provmap[name] = provider[0]
|
||||
return provmap
|
||||
|
||||
def dump_data(self):
|
||||
"""
|
||||
Dump some debug information on the internal data structures
|
||||
"""
|
||||
logger.debug(3, "build_names:")
|
||||
logger.debug(3, ", ".join(self.build_targets))
|
||||
logger.debug(3, ", ".join(self.build_names_index))
|
||||
|
||||
logger.debug(3, "run_names:")
|
||||
logger.debug(3, ", ".join(self.run_targets))
|
||||
logger.debug(3, ", ".join(self.run_names_index))
|
||||
|
||||
logger.debug(3, "build_targets:")
|
||||
for target in self.build_targets:
|
||||
for buildid in xrange(len(self.build_names_index)):
|
||||
target = self.build_names_index[buildid]
|
||||
targets = "None"
|
||||
if target in self.build_targets:
|
||||
targets = self.build_targets[target]
|
||||
logger.debug(3, " %s: %s", target, targets)
|
||||
if buildid in self.build_targets:
|
||||
targets = self.build_targets[buildid]
|
||||
logger.debug(3, " (%s)%s: %s", buildid, target, targets)
|
||||
|
||||
logger.debug(3, "run_targets:")
|
||||
for target in self.run_targets:
|
||||
for runid in xrange(len(self.run_names_index)):
|
||||
target = self.run_names_index[runid]
|
||||
targets = "None"
|
||||
if target in self.run_targets:
|
||||
targets = self.run_targets[target]
|
||||
logger.debug(3, " %s: %s", target, targets)
|
||||
if runid in self.run_targets:
|
||||
targets = self.run_targets[runid]
|
||||
logger.debug(3, " (%s)%s: %s", runid, target, targets)
|
||||
|
||||
logger.debug(3, "tasks:")
|
||||
for tid in self.taskentries:
|
||||
logger.debug(3, " %s: %s %s %s",
|
||||
tid,
|
||||
self.taskentries[tid].idepends,
|
||||
self.taskentries[tid].irdepends,
|
||||
self.taskentries[tid].tdepends)
|
||||
for task in xrange(len(self.tasks_name)):
|
||||
logger.debug(3, " (%s)%s - %s: %s",
|
||||
task,
|
||||
self.fn_index[self.tasks_fnid[task]],
|
||||
self.tasks_name[task],
|
||||
self.tasks_tdepends[task])
|
||||
|
||||
logger.debug(3, "dependency ids (per fn):")
|
||||
for fn in self.depids:
|
||||
logger.debug(3, " %s: %s", fn, self.depids[fn])
|
||||
for fnid in self.depids:
|
||||
logger.debug(3, " %s %s: %s", fnid, self.fn_index[fnid], self.depids[fnid])
|
||||
|
||||
logger.debug(3, "runtime dependency ids (per fn):")
|
||||
for fn in self.rdepids:
|
||||
logger.debug(3, " %s: %s", fn, self.rdepids[fn])
|
||||
for fnid in self.rdepids:
|
||||
logger.debug(3, " %s %s: %s", fnid, self.fn_index[fnid], self.rdepids[fnid])
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user