mirror of
https://git.yoctoproject.org/poky
synced 2026-01-30 13:28:43 +01:00
Compare commits
403 Commits
1.1_M3.fin
...
bernard
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c006044611 | ||
|
|
69cf476e36 | ||
|
|
0283822752 | ||
|
|
15c05fc10c | ||
|
|
cc6e20ea98 | ||
|
|
fc7f4b9711 | ||
|
|
4f1611cb8d | ||
|
|
5347bf352b | ||
|
|
d81e13a138 | ||
|
|
3d9db8b275 | ||
|
|
3faa84f835 | ||
|
|
10a8fb437e | ||
|
|
2c24e6b9b9 | ||
|
|
7f0a98f9ee | ||
|
|
47b2f03955 | ||
|
|
a02537187f | ||
|
|
78d092fe7a | ||
|
|
361eda901c | ||
|
|
373d73c7e7 | ||
|
|
b154d10232 | ||
|
|
00af854e96 | ||
|
|
4005aaf3f8 | ||
|
|
5c78a2b02d | ||
|
|
11355f3a7f | ||
|
|
a2283defe2 | ||
|
|
7ce789de38 | ||
|
|
4194c83a56 | ||
|
|
decb8953cd | ||
|
|
d6b531e6a1 | ||
|
|
d412b923ac | ||
|
|
cb69e75b7b | ||
|
|
9b33f20a73 | ||
|
|
00a8552b2b | ||
|
|
ec3aab7b04 | ||
|
|
5c51a88346 | ||
|
|
53bbe30ee7 | ||
|
|
1ca2d4316e | ||
|
|
87d0a3b594 | ||
|
|
c2c0b9f861 | ||
|
|
d1c356ad3d | ||
|
|
4f5622fb01 | ||
|
|
9ee10c93af | ||
|
|
490b71d15d | ||
|
|
60f42f2dc9 | ||
|
|
4dab699e96 | ||
|
|
1ca9ca2c7d | ||
|
|
5359255ce2 | ||
|
|
c9805a0c3c | ||
|
|
3545f453aa | ||
|
|
2319b2d2d7 | ||
|
|
da22a78bd4 | ||
|
|
0a69e60cfc | ||
|
|
2816cc0db8 | ||
|
|
c998000630 | ||
|
|
bf8d577f1d | ||
|
|
e3e50d2c69 | ||
|
|
e5cce8a57d | ||
|
|
bb855dab75 | ||
|
|
7779a1fedc | ||
|
|
1c5171b251 | ||
|
|
5eabb17202 | ||
|
|
b3cb28df9f | ||
|
|
14c9af0056 | ||
|
|
d106d15cad | ||
|
|
72f06800bc | ||
|
|
53b15f2732 | ||
|
|
1b159ff35d | ||
|
|
eabe47ed8c | ||
|
|
5299510bd3 | ||
|
|
679e3ae6de | ||
|
|
6619eff40b | ||
|
|
4e41793b5c | ||
|
|
5b1d38c0ed | ||
|
|
67ef061d39 | ||
|
|
5d3bfbbd18 | ||
|
|
36c9135215 | ||
|
|
437950723f | ||
|
|
5f92b6262f | ||
|
|
65d61e2d11 | ||
|
|
4825604977 | ||
|
|
00996de4eb | ||
|
|
5a9b3fecde | ||
|
|
2343f81fb4 | ||
|
|
d8f4a33500 | ||
|
|
a982aa5786 | ||
|
|
8404b657fa | ||
|
|
e4ab64389e | ||
|
|
9c43741ed6 | ||
|
|
586b7055b3 | ||
|
|
0401043d43 | ||
|
|
40a6a2612e | ||
|
|
2060a0d1f2 | ||
|
|
23a0019b1f | ||
|
|
aa37762223 | ||
|
|
5570e0ae78 | ||
|
|
310897df07 | ||
|
|
ca77772632 | ||
|
|
b8765d4efb | ||
|
|
c36361ed5a | ||
|
|
60ab27d71b | ||
|
|
d739fc53eb | ||
|
|
84d82c0685 | ||
|
|
9361df5ec2 | ||
|
|
5ec8233e2f | ||
|
|
c7301228c0 | ||
|
|
f77efdf544 | ||
|
|
f15a4a7677 | ||
|
|
0e55651fd0 | ||
|
|
8c888bf67a | ||
|
|
6f904b3550 | ||
|
|
5d01c9c296 | ||
|
|
55f72863b9 | ||
|
|
e086bc7c11 | ||
|
|
aa468ee163 | ||
|
|
94d2b2c563 | ||
|
|
f837ecebc6 | ||
|
|
dfb31f15b9 | ||
|
|
6bfb96bff3 | ||
|
|
88083714e3 | ||
|
|
2bd9b41760 | ||
|
|
e5f8d44d24 | ||
|
|
a3ed4e19e1 | ||
|
|
16c10b7a8d | ||
|
|
ce08910d62 | ||
|
|
d2492a6ee2 | ||
|
|
a05ffe7e61 | ||
|
|
05d95c7feb | ||
|
|
22a4bae306 | ||
|
|
dd99bbf1f3 | ||
|
|
6e902e0a31 | ||
|
|
1e10a0cf03 | ||
|
|
98820f5b74 | ||
|
|
e8486ec930 | ||
|
|
c0a58abed5 | ||
|
|
41d9bcdabe | ||
|
|
58e3304ea0 | ||
|
|
eb83549448 | ||
|
|
fdd4dc5db9 | ||
|
|
32d330889f | ||
|
|
a6620f2fcf | ||
|
|
8e4021a890 | ||
|
|
10a0dca45a | ||
|
|
347bbd1d4b | ||
|
|
da39a264ed | ||
|
|
292488656d | ||
|
|
6683544362 | ||
|
|
5ac1d6be71 | ||
|
|
97b223c6fc | ||
|
|
96bc30cf03 | ||
|
|
1d8535ccb7 | ||
|
|
6244cbc945 | ||
|
|
f76a807400 | ||
|
|
b1febbcb26 | ||
|
|
24b30e5285 | ||
|
|
47724b4320 | ||
|
|
058625b713 | ||
|
|
7d75d2cd94 | ||
|
|
4a17fc8a81 | ||
|
|
1327b6b06b | ||
|
|
8bc71db41f | ||
|
|
0137a98b28 | ||
|
|
326eb3f2cc | ||
|
|
5ed5ed5a0e | ||
|
|
e6668220f2 | ||
|
|
372e52ff6c | ||
|
|
3da8a8b9b9 | ||
|
|
841d084555 | ||
|
|
5a4d5b9c43 | ||
|
|
7959e40061 | ||
|
|
3d2c481ab0 | ||
|
|
232d7322b5 | ||
|
|
b116631418 | ||
|
|
9388aa62cf | ||
|
|
10ac9442f2 | ||
|
|
184a5c1c0a | ||
|
|
472a3b34d8 | ||
|
|
3c81ae17ea | ||
|
|
1528b88657 | ||
|
|
65a1eaf069 | ||
|
|
6d853bb196 | ||
|
|
74aeb0a2ec | ||
|
|
b02f8a482d | ||
|
|
0a11038665 | ||
|
|
01ab37c9ce | ||
|
|
db95181f8f | ||
|
|
8b6416db1e | ||
|
|
17600d23d8 | ||
|
|
e347cd769a | ||
|
|
25437936c4 | ||
|
|
063ede8698 | ||
|
|
82af8b9fb6 | ||
|
|
7b8b77444d | ||
|
|
2176606ff7 | ||
|
|
8c920456e4 | ||
|
|
a80791c568 | ||
|
|
ed8bcb28b2 | ||
|
|
a5d2854104 | ||
|
|
d3d7b1d679 | ||
|
|
4efe1437dd | ||
|
|
ee78d54023 | ||
|
|
bfdabe46df | ||
|
|
236357c05a | ||
|
|
06ba4f48dc | ||
|
|
d7635a9972 | ||
|
|
4a8dd99a9f | ||
|
|
bcc330c80e | ||
|
|
1743bba3ea | ||
|
|
74a635b919 | ||
|
|
1fc2d92bf6 | ||
|
|
df56b575cd | ||
|
|
8753278af8 | ||
|
|
6f139706ae | ||
|
|
b37e6a2234 | ||
|
|
f32ea8feff | ||
|
|
6f7f0810e0 | ||
|
|
8dee7adf47 | ||
|
|
bf4f7761b3 | ||
|
|
6f5703e473 | ||
|
|
c8bab9bca4 | ||
|
|
b209beb54b | ||
|
|
c1c7f61e80 | ||
|
|
48ea0ca37f | ||
|
|
60a7bca27e | ||
|
|
6e1e21942e | ||
|
|
8e70535583 | ||
|
|
5f5c9d133b | ||
|
|
6be2a5e54b | ||
|
|
334ff1fd4f | ||
|
|
b0df49cb10 | ||
|
|
a7b0c87a97 | ||
|
|
39734c77f7 | ||
|
|
3f689d6bfd | ||
|
|
dc08a1f933 | ||
|
|
8be338ed08 | ||
|
|
4c7131c26a | ||
|
|
3d732748b6 | ||
|
|
4d20c5ffd1 | ||
|
|
e6a8e53a8d | ||
|
|
fe6e54773e | ||
|
|
d659e6242b | ||
|
|
0cfdb4a029 | ||
|
|
37e29b5434 | ||
|
|
ed949c59cf | ||
|
|
185f2ac9ce | ||
|
|
387e05af6d | ||
|
|
806df0f8de | ||
|
|
47bbe6afe7 | ||
|
|
76f0cbaf1f | ||
|
|
51316230ba | ||
|
|
80c4ba0e03 | ||
|
|
97532bc759 | ||
|
|
a7d927af35 | ||
|
|
e9105d8b46 | ||
|
|
a4adf0d1ec | ||
|
|
0473eb2c22 | ||
|
|
d9d74a549d | ||
|
|
b5afabf41b | ||
|
|
b09e273fab | ||
|
|
6d990c8ca1 | ||
|
|
d065ae7311 | ||
|
|
f6185c6d85 | ||
|
|
0d4aa19918 | ||
|
|
4dfed39284 | ||
|
|
fc6863bea9 | ||
|
|
b4af02bcc4 | ||
|
|
811b28ae39 | ||
|
|
55b141c756 | ||
|
|
fbe5fdcd05 | ||
|
|
a2075d255b | ||
|
|
7ea36613da | ||
|
|
e4021e2d21 | ||
|
|
dfbc6b2d28 | ||
|
|
b14246e828 | ||
|
|
cc764902bc | ||
|
|
1156930bd7 | ||
|
|
a6f0062bd7 | ||
|
|
c86bd7f528 | ||
|
|
f971949135 | ||
|
|
93970e41e3 | ||
|
|
a250829cb6 | ||
|
|
aa3af99591 | ||
|
|
1800ff1c5f | ||
|
|
c7f5dcaf38 | ||
|
|
95fa18fd1b | ||
|
|
8797e389c6 | ||
|
|
8f0fc87a18 | ||
|
|
c455f4ccbd | ||
|
|
b8f4c95e21 | ||
|
|
498c628a1e | ||
|
|
5c0a84fd95 | ||
|
|
abcec8015c | ||
|
|
70febdf0ce | ||
|
|
a33a2cc024 | ||
|
|
d16085b67b | ||
|
|
5903a8fb4f | ||
|
|
e865b4f106 | ||
|
|
b507383230 | ||
|
|
ee0bd97330 | ||
|
|
cd7615343e | ||
|
|
3087be111c | ||
|
|
e415fd6d5b | ||
|
|
d6c639e64b | ||
|
|
6bb3da2236 | ||
|
|
f4b458f9e2 | ||
|
|
c966517392 | ||
|
|
1b774773ac | ||
|
|
095f420299 | ||
|
|
f6b945c739 | ||
|
|
4a7c467763 | ||
|
|
ff5680b8f1 | ||
|
|
111c268fbb | ||
|
|
20b41bd136 | ||
|
|
232dcb7241 | ||
|
|
09d166ebfd | ||
|
|
f432f1b010 | ||
|
|
d7fcae0778 | ||
|
|
223b4a9fb2 | ||
|
|
1af309aa19 | ||
|
|
13d14d0ddf | ||
|
|
66b30531ac | ||
|
|
66cf5423c6 | ||
|
|
3f0cec517b | ||
|
|
833b8160b5 | ||
|
|
a776cc376e | ||
|
|
83777bf1bc | ||
|
|
a15bc3ddd9 | ||
|
|
6dfddf5410 | ||
|
|
fb928dc8ea | ||
|
|
1bac3117fa | ||
|
|
07c55e9db4 | ||
|
|
5a8991913d | ||
|
|
fcce8449bc | ||
|
|
283d452ede | ||
|
|
52ba9b76e0 | ||
|
|
9d051f5808 | ||
|
|
b2ad1b9b42 | ||
|
|
a5d3c7c4f4 | ||
|
|
bef6f89563 | ||
|
|
4b77527f7a | ||
|
|
a080556e7e | ||
|
|
95fe31c60d | ||
|
|
8f1465aa9c | ||
|
|
1b11ff7752 | ||
|
|
101ce7109e | ||
|
|
7caf083ebe | ||
|
|
11f85405e0 | ||
|
|
be297836a1 | ||
|
|
91d72e822e | ||
|
|
8640414cca | ||
|
|
091ace83f8 | ||
|
|
e81957973d | ||
|
|
f3af7d55a8 | ||
|
|
e92f3a25ec | ||
|
|
ecbe894712 | ||
|
|
9a432a2328 | ||
|
|
976cb2d81d | ||
|
|
00d70680f9 | ||
|
|
3b8e7319f1 | ||
|
|
39c4f1f7c5 | ||
|
|
50a7f8483a | ||
|
|
52df73c3ff | ||
|
|
6adaf5a554 | ||
|
|
fc94ae7a77 | ||
|
|
ec8ab90763 | ||
|
|
5a0f713935 | ||
|
|
34921ffbba | ||
|
|
62ad9a8dc5 | ||
|
|
84752f34f9 | ||
|
|
3a39d96928 | ||
|
|
65d37c34b7 | ||
|
|
8e174d9437 | ||
|
|
4ec9b314c1 | ||
|
|
ba59c319b8 | ||
|
|
7708dde102 | ||
|
|
6c4c621475 | ||
|
|
9a8cc4eeb5 | ||
|
|
389ab65ab9 | ||
|
|
d3ae37234c | ||
|
|
f3c6ccd13c | ||
|
|
7305ee0962 | ||
|
|
38d6560c11 | ||
|
|
87ab152239 | ||
|
|
c387491661 | ||
|
|
7db4e07719 | ||
|
|
0073fae58e | ||
|
|
960c76bad2 | ||
|
|
93c36a6f68 | ||
|
|
781c12f2a9 | ||
|
|
55f3c2f438 | ||
|
|
60e922f180 | ||
|
|
a8a305a8ca | ||
|
|
87e8e1b31c | ||
|
|
f68e7a365f | ||
|
|
8abb5f60ca | ||
|
|
59aa9a23d8 | ||
|
|
6d79765420 | ||
|
|
49ca11e02d | ||
|
|
c004e18fb1 | ||
|
|
ee5918d9d7 | ||
|
|
9837e78bfc | ||
|
|
e08dc5aaae | ||
|
|
9ae2e2ef95 | ||
|
|
55b58a5d4c |
29
.gitignore
vendored
29
.gitignore
vendored
@@ -1,22 +1,37 @@
|
||||
*.pyc
|
||||
*.pyo
|
||||
build*/conf/local.conf
|
||||
build*/conf/bblayers.conf
|
||||
build*/downloads
|
||||
build*/tmp/
|
||||
build*/sstate-cache
|
||||
build*/pyshtables.py
|
||||
build/conf/local.conf
|
||||
build/conf/bblayers.conf
|
||||
build/downloads
|
||||
build/tmp/
|
||||
build/sstate-cache
|
||||
build/pyshtables.py
|
||||
pstage/
|
||||
scripts/oe-git-proxy-socks
|
||||
scripts/poky-git-proxy-socks
|
||||
sources/
|
||||
meta-darwin
|
||||
meta-maemo
|
||||
meta-extras
|
||||
meta-m2
|
||||
meta-prvt*
|
||||
poky-autobuilder*
|
||||
*.swp
|
||||
*.orig
|
||||
*.rej
|
||||
*~
|
||||
documentation/poky-ref-manual/poky-ref-manual.html
|
||||
documentation/poky-ref-manual/poky-ref-manual.pdf
|
||||
documentation/poky-ref-manual/poky-ref-manual.tgz
|
||||
documentation/poky-ref-manual/bsp-guide.html
|
||||
documentation/poky-ref-manual/bsp-guide.pdf
|
||||
documentation/bsp-guide/bsp-guide.html
|
||||
documentation/bsp-guide/bsp-guide.pdf
|
||||
documentation/bsp-guide/bsp-guide.tgz
|
||||
documentation/yocto-project-qs/yocto-project-qs.html
|
||||
documentation/yocto-project-qs/yocto-project-qs.tgz
|
||||
documentation/kernel-manual/kernel-manual.html
|
||||
documentation/kernel-manual/kernel-manual.tgz
|
||||
documentation/kernel-manual/kernel-manual.pdf
|
||||
|
||||
|
||||
|
||||
|
||||
30
README
30
README
@@ -1,25 +1,15 @@
|
||||
Poky
|
||||
====
|
||||
|
||||
Poky is an integration of various components to form a complete prepackaged
|
||||
build system and development environment. It features support for building
|
||||
customised embedded device style images. There are reference demo images
|
||||
featuring a X11/Matchbox/GTK themed UI called Sato. The system supports
|
||||
cross-architecture application development using QEMU emulation and a
|
||||
standalone toolchain and SDK with IDE integration.
|
||||
Poky platform builder is a combined cross build system and development
|
||||
environment. It features support for building X11/Matchbox/GTK based
|
||||
filesystem images for various embedded devices and boards. It also
|
||||
supports cross-architecture application development using QEMU emulation
|
||||
and a standalone toolchain and SDK with IDE integration.
|
||||
|
||||
Poky has an extensive handbook, the source of which is contained in
|
||||
the handbook directory. For compiled HTML or pdf versions of this,
|
||||
see the Poky website http://pokylinux.org.
|
||||
|
||||
Additional information on the specifics of hardware that Poky supports
|
||||
is available in README.hardware. Further hardware support can easily be added
|
||||
in the form of layers which extend the systems capabilities in a modular way.
|
||||
|
||||
As an integration layer Poky consists of several upstream projects such as
|
||||
BitBake, OpenEmbedded-Core, Yocto documentation and various sources of information
|
||||
e.g. for the hardware support. Poky is in turn a component of the Yocto Project.
|
||||
|
||||
The Yocto Project has extensive documentation about the system including a
|
||||
reference manual which can be found at:
|
||||
http://yoctoproject.org/community/documentation
|
||||
|
||||
For information about OpenEmbedded see their website:
|
||||
http://www.openembedded.org/
|
||||
|
||||
is available in README.hardware.
|
||||
|
||||
@@ -87,22 +87,22 @@ Hard Disk:
|
||||
1. Build a directdisk image format. This will generate proper partition tables
|
||||
that will in turn be written to the physical media. For example:
|
||||
|
||||
$ bitbake core-image-minimal-directdisk
|
||||
$ bitbake poky-image-minimal-directdisk
|
||||
|
||||
2. Use the "dd" utility to write the image to the raw block device. For example:
|
||||
|
||||
# dd if=core-image-minimal-directdisk-atom-pc.hdddirect of=/dev/sdb
|
||||
# dd if=poky-image-minimal-directdisk-atom-pc.hdddirect of=/dev/sdb
|
||||
|
||||
USB Device:
|
||||
1. Build an hddimg image format. This is a simple filesystem without partition
|
||||
tables and is suitable for USB keys. For example:
|
||||
|
||||
$ bitbake core-image-minimal-live
|
||||
$ bitbake poky-image-minimal-live
|
||||
|
||||
2. Use the "dd" utility to write the image to the raw block device. For
|
||||
example:
|
||||
|
||||
# dd if=core-image-minimal-live-atom-pc.hddimg of=/dev/sdb
|
||||
# dd if=poky-image-minimal-live-atom-pc.hddimg of=/dev/sdb
|
||||
|
||||
If the device fails to boot with "Boot error" displayed, it is likely the BIOS
|
||||
cannot understand the physical layout of the disk (or rather it expects a
|
||||
@@ -126,7 +126,7 @@ USB Device:
|
||||
|
||||
b. Copy the contents of the poky image to the USB-ZIP mode device:
|
||||
|
||||
# mount -o loop core-image-minimal-live-atom-pc.hddimg /tmp/image
|
||||
# mount -o loop poky-image-minimal-live-atom-pc.hddimg /tmp/image
|
||||
# mount /dev/sdb4 /tmp/usbkey
|
||||
# cp -rf /tmp/image/* /tmp/usbkey
|
||||
|
||||
@@ -196,7 +196,7 @@ if used via a usb card reader):
|
||||
# cp u-boot-beagleboard.bin /media/boot/u-boot.bin
|
||||
|
||||
3. Install the root filesystem
|
||||
# tar x -C /media/root -f core-image-$IMAGE_TYPE-beagleboard.tar.bz2
|
||||
# tar x -C /media/root -f poky-image-$IMAGE_TYPE-beagleboard.tar.bz2
|
||||
# tar x -C /media/root -f modules-$KERNEL_VERSION-beagleboard.tgz
|
||||
|
||||
4. Install the kernel uImage
|
||||
@@ -239,57 +239,30 @@ software development of network attached storage (NAS) and digital media server
|
||||
applications. The MPC8315E-RDB features the PowerQUICC II Pro processor, which
|
||||
includes a built-in security accelerator.
|
||||
|
||||
(Note: you may find it easier to order MPC8315E-RDBA; this appears to be the
|
||||
same board in an enclosure with accessories. In any case it is fully
|
||||
compatible with the instructions given here.)
|
||||
|
||||
Setup instructions
|
||||
------------------
|
||||
|
||||
You will need the following:
|
||||
* NFS root setup on your workstation
|
||||
* TFTP server installed on your workstation
|
||||
* Null modem cable connected from your workstation to the first serial port
|
||||
on the board
|
||||
* Ethernet connected to the first ethernet port on the board
|
||||
* nfs root setup on your workstation
|
||||
* tftp server installed on your workstation
|
||||
|
||||
--- Preparation ---
|
||||
Load the kernel and boot it as follows:
|
||||
|
||||
Note: if you have altered your board's ethernet MAC address(es) from the
|
||||
defaults, or you need to do so because you want multiple boards on the same
|
||||
network, then you will need to change the values in the dts file (patch
|
||||
linux/arch/powerpc/boot/dts/mpc8315erdb.dts within the kernel source). If
|
||||
you have left them at the factory default then you shouldn't need to do
|
||||
anything here.
|
||||
1. Get the kernel (uImage.mpc8315erdb) and dtb (mpc8315erdb.dtb) files from
|
||||
the Poky build tmp/deploy directory, and make them available on your tftp
|
||||
server.
|
||||
|
||||
--- Booting from NFS root ---
|
||||
2. Set up the environment in U-Boot:
|
||||
|
||||
Load the kernel and dtb (device tree blob), and boot the system as follows:
|
||||
=>setenv ipaddr <board ip>
|
||||
=>setenv serverip <tftp server ip>
|
||||
=>setenv bootargs root=/dev/nfs rw nfsroot=<nfsroot ip>:<rootfs path> ip=<board ip>:<server ip>:<gateway ip>:255.255.255.0:mpc8315e:eth0:off console=ttyS0,115200
|
||||
|
||||
1. Get the kernel (uImage-mpc8315e-rdb.bin) and dtb (uImage-mpc8315e-rdb.dtb)
|
||||
files from the Poky build tmp/deploy directory, and make them available on
|
||||
your TFTP server.
|
||||
3. Download kernel and dtb to boot kernel.
|
||||
|
||||
2. Connect the board's first serial port to your workstation and then start up
|
||||
your favourite serial terminal so that you will be able to interact with
|
||||
the serial console. If you don't have a favourite, picocom is suggested:
|
||||
|
||||
$ picocom /dev/ttyUSB0 -b 115200
|
||||
|
||||
3. Power up or reset the board and press a key on the terminal when prompted
|
||||
to get to the U-Boot command line
|
||||
|
||||
4. Set up the environment in U-Boot:
|
||||
|
||||
=> setenv ipaddr <board ip>
|
||||
=> setenv serverip <tftp server ip>
|
||||
=> setenv bootargs root=/dev/nfs rw nfsroot=<nfsroot ip>:<rootfs path> ip=<board ip>:<server ip>:<gateway ip>:255.255.255.0:mpc8315e:eth0:off console=ttyS0,115200
|
||||
|
||||
5. Download the kernel and dtb, and boot:
|
||||
|
||||
=> tftp 800000 uImage-mpc8315e-rdb.bin
|
||||
=> tftp 780000 uImage-mpc8315e-rdb.dtb
|
||||
=> bootm 800000 - 780000
|
||||
=>tftp 800000 uImage.mpc8315erdb
|
||||
=>tftp 780000 mpc8315erdb.dtb
|
||||
=>bootm 800000 - 780000
|
||||
|
||||
|
||||
Ubiquiti Networks RouterStation Pro (routerstationpro)
|
||||
@@ -318,11 +291,11 @@ name in all commands where appropriate.
|
||||
|
||||
--- Preparation ---
|
||||
|
||||
1) Build an image (e.g. core-image-minimal) using "routerstationpro" as the
|
||||
1) Build an image (e.g. poky-image-minimal) using "routerstationpro" as the
|
||||
MACHINE
|
||||
|
||||
2) Partition the USB drive so that primary partition 1 is type Linux (83).
|
||||
Minimum size depends on your root image size - core-image-minimal probably
|
||||
Minimum size depends on your root image size - poky-image-minimal probably
|
||||
only needs 8-16MB, other images will need more.
|
||||
|
||||
# fdisk /dev/sdb
|
||||
@@ -343,11 +316,11 @@ only needs 8-16MB, other images will need more.
|
||||
# mke2fs -j /dev/sdb1
|
||||
|
||||
4) Mount partition 1 and then extract the contents of
|
||||
tmp/deploy/images/core-image-XXXX.tar.bz2 into it (preserving permissions).
|
||||
tmp/deploy/images/poky-image-XXXX.tar.bz2 into it (preserving permissions).
|
||||
|
||||
# mount /dev/sdb1 /media/sdb1
|
||||
# cd /media/sdb1
|
||||
# tar -xvjpf tmp/deploy/images/core-image-XXXX.tar.bz2
|
||||
# tar -xvjpf tmp/deploy/images/poky-image-XXXX.tar.bz2
|
||||
|
||||
5) Unmount the USB drive and then plug it into the board's USB port
|
||||
|
||||
|
||||
@@ -32,15 +32,17 @@ import warnings
|
||||
from traceback import format_exception
|
||||
try:
|
||||
import bb
|
||||
except RuntimeError as exc:
|
||||
except RuntimeError, exc:
|
||||
sys.exit(str(exc))
|
||||
from bb import event
|
||||
import bb.msg
|
||||
from bb import cooker
|
||||
from bb import ui
|
||||
from bb import server
|
||||
from bb.server import none
|
||||
#from bb.server import xmlrpc
|
||||
|
||||
__version__ = "1.13.3"
|
||||
__version__ = "1.11.0"
|
||||
logger = logging.getLogger("BitBake")
|
||||
|
||||
|
||||
@@ -102,7 +104,7 @@ It expects that BBFILES is defined, which is a space separated list of files to
|
||||
be executed. BBFILES does support wildcards.
|
||||
Default BBFILES are the .bb files in the current directory.""")
|
||||
|
||||
parser.add_option("-b", "--buildfile", help = "execute the task against this .bb file, rather than a package from BBFILES. Does not handle any dependencies.",
|
||||
parser.add_option("-b", "--buildfile", help = "execute the task against this .bb file, rather than a package from BBFILES.",
|
||||
action = "store", dest = "buildfile", default = None)
|
||||
|
||||
parser.add_option("-k", "--continue", help = "continue as much as possible after an error. While the target that failed, and those that depend on it, cannot be remade, the other dependencies of these targets can be processed all the same.",
|
||||
@@ -118,10 +120,7 @@ Default BBFILES are the .bb files in the current directory.""")
|
||||
action = "store", dest = "cmd")
|
||||
|
||||
parser.add_option("-r", "--read", help = "read the specified file before bitbake.conf",
|
||||
action = "append", dest = "prefile", default = [])
|
||||
|
||||
parser.add_option("-R", "--postread", help = "read the specified file after bitbake.conf",
|
||||
action = "append", dest = "postfile", default = [])
|
||||
action = "append", dest = "file", default = [])
|
||||
|
||||
parser.add_option("-v", "--verbose", help = "output more chit-chat to the terminal",
|
||||
action = "store_true", dest = "verbose", default = False)
|
||||
@@ -138,6 +137,9 @@ Default BBFILES are the .bb files in the current directory.""")
|
||||
parser.add_option("-p", "--parse-only", help = "quit after parsing the BB files (developers only)",
|
||||
action = "store_true", dest = "parse_only", default = False)
|
||||
|
||||
parser.add_option("-d", "--disable-psyco", help = "disable using the psyco just-in-time compiler (not recommended)",
|
||||
action = "store_true", dest = "disable_psyco", default = False)
|
||||
|
||||
parser.add_option("-s", "--show-versions", help = "show current and preferred versions of all packages",
|
||||
action = "store_true", dest = "show_versions", default = False)
|
||||
|
||||
@@ -159,9 +161,6 @@ Default BBFILES are the .bb files in the current directory.""")
|
||||
parser.add_option("-u", "--ui", help = "userinterface to use",
|
||||
action = "store", dest = "ui")
|
||||
|
||||
parser.add_option("-t", "--servertype", help = "Choose which server to use, none, process or xmlrpc",
|
||||
action = "store", dest = "servertype")
|
||||
|
||||
parser.add_option("", "--revisions-changed", help = "Set the exit code depending on whether upstream floating revisions have changed or not",
|
||||
action = "store_true", dest = "revisions_changed", default = False)
|
||||
|
||||
@@ -169,22 +168,15 @@ Default BBFILES are the .bb files in the current directory.""")
|
||||
|
||||
configuration = BBConfiguration(options)
|
||||
configuration.pkgs_to_build.extend(args[1:])
|
||||
configuration.initial_path = os.environ['PATH']
|
||||
|
||||
ui_main = get_ui(configuration)
|
||||
|
||||
# Server type can be xmlrpc, process or none currently, if nothing is specified,
|
||||
# the default server is process
|
||||
if configuration.servertype:
|
||||
server_type = configuration.servertype
|
||||
else:
|
||||
server_type = 'process'
|
||||
loghandler = event.LogHandler()
|
||||
logger.addHandler(loghandler)
|
||||
|
||||
try:
|
||||
module = __import__("bb.server", fromlist = [server_type])
|
||||
server = getattr(module, server_type)
|
||||
except AttributeError:
|
||||
sys.exit("FATAL: Invalid server type '%s' specified.\n"
|
||||
"Valid interfaces: xmlrpc, process [default], none." % servertype)
|
||||
#server = bb.server.xmlrpc
|
||||
server = bb.server.none
|
||||
|
||||
# Save a logfile for cooker into the current working directory. When the
|
||||
# server is daemonized this logfile will be truncated.
|
||||
@@ -193,45 +185,35 @@ Default BBFILES are the .bb files in the current directory.""")
|
||||
bb.utils.init_logger(bb.msg, configuration.verbose, configuration.debug,
|
||||
configuration.debug_domains)
|
||||
|
||||
# Ensure logging messages get sent to the UI as events
|
||||
handler = bb.event.LogHandler()
|
||||
logger.addHandler(handler)
|
||||
|
||||
# Before we start modifying the environment we should take a pristine
|
||||
# copy for possible later use
|
||||
initialenv = os.environ.copy()
|
||||
# Clear away any spurious environment variables. But don't wipe the
|
||||
# environment totally. This is necessary to ensure the correct operation
|
||||
# of the UIs (e.g. for DISPLAY, etc.)
|
||||
bb.utils.clean_environment()
|
||||
|
||||
server = server.BitBakeServer()
|
||||
|
||||
server.initServer()
|
||||
idle = server.getServerIdleCB()
|
||||
|
||||
cooker = bb.cooker.BBCooker(configuration, idle, initialenv)
|
||||
cooker = bb.cooker.BBCooker(configuration, server)
|
||||
cooker.parseCommandLine()
|
||||
|
||||
server.addcooker(cooker)
|
||||
server.saveConnectionDetails()
|
||||
server.detach(cooker_logfile)
|
||||
serverinfo = server.BitbakeServerInfo(cooker.server)
|
||||
|
||||
# Should no longer need to ever reference cooker
|
||||
server.BitBakeServerFork(cooker, cooker.server, serverinfo, cooker_logfile)
|
||||
del cooker
|
||||
|
||||
logger.removeHandler(handler)
|
||||
logger.removeHandler(loghandler)
|
||||
|
||||
# Setup a connection to the server (cooker)
|
||||
server_connection = server.establishConnection()
|
||||
server_connection = server.BitBakeServerConnection(serverinfo)
|
||||
|
||||
# Launch the UI
|
||||
if configuration.ui:
|
||||
ui = configuration.ui
|
||||
else:
|
||||
ui = "knotty"
|
||||
|
||||
try:
|
||||
return server.launchUI(ui_main, server_connection.connection, server_connection.events)
|
||||
return server.BitbakeUILauch().launch(serverinfo, ui_main, server_connection.connection, server_connection.events)
|
||||
finally:
|
||||
server_connection.terminate()
|
||||
|
||||
return 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
ret = main()
|
||||
@@ -240,4 +222,3 @@ if __name__ == "__main__":
|
||||
import traceback
|
||||
traceback.print_exc(5)
|
||||
sys.exit(ret)
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
import cmd
|
||||
import logging
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
|
||||
bindir = os.path.dirname(__file__)
|
||||
@@ -18,8 +18,8 @@ sys.path[0:0] = [os.path.join(topdir, 'lib')]
|
||||
import bb.cache
|
||||
import bb.cooker
|
||||
import bb.providers
|
||||
import bb.utils
|
||||
from bb.cooker import state
|
||||
from bb.server import none
|
||||
|
||||
|
||||
logger = logging.getLogger('BitBake')
|
||||
@@ -41,22 +41,18 @@ def main(args):
|
||||
class Commands(cmd.Cmd):
|
||||
def __init__(self):
|
||||
cmd.Cmd.__init__(self)
|
||||
initialenv = os.environ.copy()
|
||||
|
||||
self.returncode = 0
|
||||
self.config = Config(parse_only=True)
|
||||
self.cooker = bb.cooker.BBCooker(self.config,
|
||||
self.register_idle_function,
|
||||
initialenv)
|
||||
bb.server.none)
|
||||
self.config_data = self.cooker.configuration.data
|
||||
bb.providers.logger.setLevel(logging.ERROR)
|
||||
self.cooker_data = None
|
||||
|
||||
def register_idle_function(self, function, data):
|
||||
pass
|
||||
self.prepare_cooker()
|
||||
|
||||
def prepare_cooker(self):
|
||||
sys.stderr.write("Parsing recipes..")
|
||||
logger.setLevel(logging.WARNING)
|
||||
logger.setLevel(logging.ERROR)
|
||||
|
||||
try:
|
||||
while self.cooker.state in (state.initial, state.parsing):
|
||||
@@ -72,128 +68,20 @@ class Commands(cmd.Cmd):
|
||||
self.cooker_data = self.cooker.status
|
||||
self.cooker_data.appends = self.cooker.appendlist
|
||||
|
||||
def check_prepare_cooker(self):
|
||||
if not self.cooker_data:
|
||||
self.prepare_cooker()
|
||||
|
||||
def do_show_layers(self, args):
|
||||
"""show_layers: shows current configured layers"""
|
||||
self.check_prepare_cooker()
|
||||
logger.info(str(self.config_data.getVar('BBLAYERS', True)))
|
||||
|
||||
def do_show_overlayed(self, args):
|
||||
"""show_overlayed: list overlayed recipes (where there is a recipe in another
|
||||
layer that has a higher layer priority)
|
||||
|
||||
syntax: show_overlayed
|
||||
|
||||
Highest priority recipes are listed with the recipes they overlay as subitems.
|
||||
"""
|
||||
self.check_prepare_cooker()
|
||||
if self.cooker.overlayed:
|
||||
logger.info('Overlayed recipes:')
|
||||
for f in self.cooker.overlayed.iterkeys():
|
||||
logger.info('%s' % f)
|
||||
for of in self.cooker.overlayed[f]:
|
||||
logger.info(' %s' % of)
|
||||
else:
|
||||
logger.info('No overlayed recipes found')
|
||||
|
||||
def do_flatten(self, args):
|
||||
"""flatten: flattens layer configuration into a separate output directory.
|
||||
|
||||
syntax: flatten <outputdir>
|
||||
|
||||
Takes the current layer configuration and builds a "flattened" directory
|
||||
containing the contents of all layers, with any overlayed recipes removed
|
||||
and bbappends appended to the corresponding recipes. Note that some manual
|
||||
cleanup may still be necessary afterwards, in particular:
|
||||
|
||||
* where non-recipe files (such as patches) are overwritten (the flatten
|
||||
command will show a warning for these)
|
||||
* where anything beyond the normal layer setup has been added to
|
||||
layer.conf (only the lowest priority layer's layer.conf is used)
|
||||
* overridden/appended items from bbappends will need to be tidied up
|
||||
"""
|
||||
arglist = args.split()
|
||||
if len(arglist) != 1:
|
||||
logger.error('Please specify an output directory')
|
||||
self.do_help('flatten')
|
||||
return
|
||||
|
||||
if os.path.exists(arglist[0]) and os.listdir(arglist[0]):
|
||||
logger.error('Directory %s exists and is non-empty, please clear it out first' % arglist[0])
|
||||
return
|
||||
|
||||
self.check_prepare_cooker()
|
||||
layers = (self.config_data.getVar('BBLAYERS', True) or "").split()
|
||||
for layer in layers:
|
||||
overlayed = []
|
||||
for f in self.cooker.overlayed.iterkeys():
|
||||
for of in self.cooker.overlayed[f]:
|
||||
if of.startswith(layer):
|
||||
overlayed.append(of)
|
||||
|
||||
logger.info('Copying files from %s...' % layer )
|
||||
for root, dirs, files in os.walk(layer):
|
||||
for f1 in files:
|
||||
f1full = os.sep.join([root, f1])
|
||||
if f1full in overlayed:
|
||||
logger.info(' Skipping overlayed file %s' % f1full )
|
||||
else:
|
||||
ext = os.path.splitext(f1)[1]
|
||||
if ext != '.bbappend':
|
||||
fdest = f1full[len(layer):]
|
||||
fdest = os.path.normpath(os.sep.join([arglist[0],fdest]))
|
||||
bb.utils.mkdirhier(os.path.dirname(fdest))
|
||||
if os.path.exists(fdest):
|
||||
if f1 == 'layer.conf' and root.endswith('/conf'):
|
||||
logger.info(' Skipping layer config file %s' % f1full )
|
||||
continue
|
||||
else:
|
||||
logger.warn('Overwriting file %s', fdest)
|
||||
bb.utils.copyfile(f1full, fdest)
|
||||
if ext == '.bb':
|
||||
if f1 in self.cooker_data.appends:
|
||||
appends = self.cooker_data.appends[f1]
|
||||
if appends:
|
||||
logger.info(' Applying appends to %s' % fdest )
|
||||
for appendname in appends:
|
||||
self.apply_append(appendname, fdest)
|
||||
|
||||
def get_append_layer(self, appendname):
|
||||
for layer, _, regex, _ in self.cooker.status.bbfile_config_priorities:
|
||||
if regex.match(appendname):
|
||||
return layer
|
||||
return "?"
|
||||
|
||||
def apply_append(self, appendname, recipename):
|
||||
appendfile = open(appendname, 'r')
|
||||
recipefile = open(recipename, 'a')
|
||||
recipefile.write('\n')
|
||||
recipefile.write('##### bbappended from %s #####\n' % self.get_append_layer(appendname))
|
||||
recipefile.writelines(appendfile.readlines())
|
||||
|
||||
def do_show_appends(self, args):
|
||||
"""show_appends: List bbappend files and recipe files they apply to
|
||||
|
||||
syntax: show_appends
|
||||
|
||||
Recipes are listed with the bbappends that apply to them as subitems.
|
||||
"""
|
||||
self.check_prepare_cooker()
|
||||
if not self.cooker_data.appends:
|
||||
logger.info('No append files found')
|
||||
return
|
||||
|
||||
logger.info('State of append files:')
|
||||
|
||||
pnlist = list(self.cooker_data.pkg_pn.keys())
|
||||
pnlist.sort()
|
||||
for pn in pnlist:
|
||||
for pn in self.cooker_data.pkg_pn:
|
||||
self.show_appends_for_pn(pn)
|
||||
|
||||
self.show_appends_for_skipped()
|
||||
self.show_appends_with_no_recipes()
|
||||
|
||||
def show_appends_for_pn(self, pn):
|
||||
filenames = self.cooker_data.pkg_pn[pn]
|
||||
@@ -204,30 +92,20 @@ Recipes are listed with the bbappends that apply to them as subitems.
|
||||
self.cooker_data.pkg_pn)
|
||||
best_filename = os.path.basename(best[3])
|
||||
|
||||
self.show_appends_output(filenames, best_filename)
|
||||
|
||||
def show_appends_for_skipped(self):
|
||||
filenames = [os.path.basename(f)
|
||||
for f in self.cooker.skiplist.iterkeys()]
|
||||
self.show_appends_output(filenames, None, " (skipped)")
|
||||
|
||||
def show_appends_output(self, filenames, best_filename, name_suffix = ''):
|
||||
appended, missing = self.get_appends_for_files(filenames)
|
||||
if appended:
|
||||
for basename, appends in appended:
|
||||
logger.info('%s%s:', basename, name_suffix)
|
||||
logger.info('%s:', basename)
|
||||
for append in appends:
|
||||
logger.info(' %s', append)
|
||||
|
||||
if best_filename:
|
||||
if best_filename in missing:
|
||||
logger.warn('%s: missing append for preferred version',
|
||||
best_filename)
|
||||
self.returncode |= 1
|
||||
|
||||
if best_filename in missing:
|
||||
logger.warn('%s: missing append for preferred version',
|
||||
best_filename)
|
||||
self.returncode |= 1
|
||||
|
||||
def get_appends_for_files(self, filenames):
|
||||
appended, notappended = [], []
|
||||
appended, notappended = set(), set()
|
||||
for filename in filenames:
|
||||
_, cls = bb.cache.Cache.virtualfn2realfn(filename)
|
||||
if cls:
|
||||
@@ -236,19 +114,36 @@ Recipes are listed with the bbappends that apply to them as subitems.
|
||||
basename = os.path.basename(filename)
|
||||
appends = self.cooker_data.appends.get(basename)
|
||||
if appends:
|
||||
appended.append((basename, list(appends)))
|
||||
appended.add((basename, frozenset(appends)))
|
||||
else:
|
||||
notappended.append(basename)
|
||||
notappended.add(basename)
|
||||
return appended, notappended
|
||||
|
||||
def show_appends_with_no_recipes(self):
|
||||
recipes = set(os.path.basename(f)
|
||||
for f in self.cooker_data.pkg_fn.iterkeys())
|
||||
appended_recipes = self.cooker_data.appends.iterkeys()
|
||||
appends_without_recipes = [self.cooker_data.appends[recipe]
|
||||
for recipe in appended_recipes
|
||||
if recipe not in recipes]
|
||||
if appends_without_recipes:
|
||||
appendlines = (' %s' % append
|
||||
for appends in appends_without_recipes
|
||||
for append in appends)
|
||||
logger.warn('No recipes available for:\n%s',
|
||||
'\n'.join(appendlines))
|
||||
self.returncode |= 4
|
||||
|
||||
def do_EOF(self, line):
|
||||
return True
|
||||
|
||||
|
||||
class Config(object):
|
||||
def __init__(self, **options):
|
||||
self.pkgs_to_build = []
|
||||
self.debug_domains = []
|
||||
self.extra_assume_provided = []
|
||||
self.prefile = []
|
||||
self.postfile = []
|
||||
self.file = []
|
||||
self.debug = 0
|
||||
self.__dict__.update(options)
|
||||
|
||||
|
||||
@@ -1,53 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
import sys,logging
|
||||
import optparse
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),'lib'))
|
||||
|
||||
import prserv
|
||||
import prserv.serv
|
||||
|
||||
__version__="1.0.0"
|
||||
|
||||
PRHOST_DEFAULT=''
|
||||
PRPORT_DEFAULT=8585
|
||||
|
||||
def main():
|
||||
parser = optparse.OptionParser(
|
||||
version="Bitbake PR Service Core version %s, %%prog version %s" % (prserv.__version__, __version__),
|
||||
usage = "%prog [options]")
|
||||
|
||||
parser.add_option("-f", "--file", help="database filename(default prserv.db)", action="store",
|
||||
dest="dbfile", type="string", default="prserv.db")
|
||||
parser.add_option("-l", "--log", help="log filename(default prserv.log)", action="store",
|
||||
dest="logfile", type="string", default="prserv.log")
|
||||
parser.add_option("--loglevel", help="logging level, i.e. CRITICAL, ERROR, WARNING, INFO, DEBUG",
|
||||
action = "store", type="string", dest="loglevel", default = "WARNING")
|
||||
parser.add_option("--start", help="start daemon",
|
||||
action="store_true", dest="start", default="True")
|
||||
parser.add_option("--stop", help="stop daemon",
|
||||
action="store_false", dest="start")
|
||||
parser.add_option("--host", help="ip address to bind", action="store",
|
||||
dest="host", type="string", default=PRHOST_DEFAULT)
|
||||
parser.add_option("--port", help="port number(default 8585)", action="store",
|
||||
dest="port", type="int", default=PRPORT_DEFAULT)
|
||||
|
||||
options, args = parser.parse_args(sys.argv)
|
||||
|
||||
prserv.init_logger(os.path.abspath(options.logfile),options.loglevel)
|
||||
|
||||
if options.start:
|
||||
prserv.serv.start_daemon(options)
|
||||
else:
|
||||
prserv.serv.stop_daemon()
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
ret = main()
|
||||
except Exception:
|
||||
ret = 1
|
||||
import traceback
|
||||
traceback.print_exc(5)
|
||||
sys.exit(ret)
|
||||
|
||||
@@ -85,6 +85,9 @@ don't execute, just go through the motions
|
||||
.B \-p, \-\-parse-only
|
||||
quit after parsing the BB files (developers only)
|
||||
.TP
|
||||
.B \-d, \-\-disable-psyco
|
||||
disable using the psyco just-in-time compiler (not recommended)
|
||||
.TP
|
||||
.B \-s, \-\-show-versions
|
||||
show current and preferred versions of all packages
|
||||
.TP
|
||||
|
||||
@@ -29,7 +29,7 @@ tasks and managing metadata. As such, its similarities to GNU make and other
|
||||
build tools are readily apparent. It was inspired by Portage, the package management system used by the Gentoo Linux distribution. BitBake is the basis of the <ulink url="http://www.openembedded.org/">OpenEmbedded</ulink> project, which is being used to build and maintain a number of embedded Linux distributions, including OpenZaurus and Familiar.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Background and goals</title>
|
||||
<title>Background and Goals</title>
|
||||
<para>Prior to BitBake, no other build tool adequately met
|
||||
the needs of an aspiring embedded Linux distribution. All of the
|
||||
buildsystems used by traditional desktop Linux distributions lacked
|
||||
@@ -42,9 +42,9 @@ embedded space, were scalable or maintainable.</para>
|
||||
<listitem><para>Handle crosscompilation.</para></listitem>
|
||||
<listitem><para>Handle interpackage dependencies (build time on target architecture, build time on native architecture, and runtime).</para></listitem>
|
||||
<listitem><para>Support running any number of tasks within a given package, including, but not limited to, fetching upstream sources, unpacking them, patching them, configuring them, et cetera.</para></listitem>
|
||||
<listitem><para>Must be Linux distribution agnostic (both build and target).</para></listitem>
|
||||
<listitem><para>Must be linux distribution agnostic (both build and target).</para></listitem>
|
||||
<listitem><para>Must be architecture agnostic</para></listitem>
|
||||
<listitem><para>Must support multiple build and target operating systems (including Cygwin, the BSDs, etc).</para></listitem>
|
||||
<listitem><para>Must support multiple build and target operating systems (including cygwin, the BSDs, etc).</para></listitem>
|
||||
<listitem><para>Must be able to be self contained, rather than tightly integrated into the build machine's root filesystem.</para></listitem>
|
||||
<listitem><para>There must be a way to handle conditional metadata (on target architecture, operating system, distribution, machine).</para></listitem>
|
||||
<listitem><para>It must be easy for the person using the tools to supply their own local metadata and packages to operate against.</para></listitem>
|
||||
@@ -91,13 +91,13 @@ share common metadata between many packages.</para></listitem>
|
||||
<section>
|
||||
<title>Setting a default value (?=)</title>
|
||||
<para><screen><varname>A</varname> ?= "aval"</screen></para>
|
||||
<para>If <varname>A</varname> is set before the above is called, it will retain its previous value. If <varname>A</varname> is unset prior to the above call, <varname>A</varname> will be set to <literal>aval</literal>. Note that this assignment is immediate, so if there are multiple ?= assignments to a single variable, the first of those will be used.</para>
|
||||
<para>If <varname>A</varname> is set before the above is called, it will retain it's previous value. If <varname>A</varname> is unset prior to the above call, <varname>A</varname> will be set to <literal>aval</literal>. Note that this assignment is immediate, so if there are multiple ?= assignments to a single variable, the first of those will be used.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Setting a default value (??=)</title>
|
||||
<para><screen><varname>A</varname> ??= "somevalue"</screen></para>
|
||||
<para><screen><varname>A</varname> ??= "someothervalue"</screen></para>
|
||||
<para>If <varname>A</varname> is set before the above, it will retain that value. If <varname>A</varname> is unset prior to the above, <varname>A</varname> will be set to <literal>someothervalue</literal>. This is a lazy version of ??=, in that the assignment does not occur until the end of the parsing process, so that the last, rather than the first, ??= assignment to a given variable will be used.</para>
|
||||
<para>If <varname>A</varname> is set before the above, it will retain that value. If <varname>A</varname> is unset prior to the above, <varname>A</varname> will be set to <literal>someothervalue</literal>. This is a lazy version of ?=, in that the assignment does not occur until the end of the parsing process, so that the last, rather than the first, ??= assignment to a given variable will be used.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Immediate variable expansion (:=)</title>
|
||||
@@ -125,7 +125,7 @@ share common metadata between many packages.</para></listitem>
|
||||
<varname>B</varname> .= "additionaldata"
|
||||
<varname>C</varname> = "cval"
|
||||
<varname>C</varname> =. "test"</screen></para>
|
||||
<para>In this example, <varname>B</varname> is now <literal>bvaladditionaldata</literal> and <varname>C</varname> is <literal>testcval</literal>. In contrast to the above appending and prepending operators, no additional space
|
||||
<para>In this example, <varname>B</varname> is now <literal>bvaladditionaldata</literal> and <varname>C</varname> is <literal>testcval</literal>. In contrast to the above Appending and Prepending operators no additional space
|
||||
will be introduced.</para>
|
||||
</section>
|
||||
<section>
|
||||
@@ -147,12 +147,12 @@ will be introduced.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Inclusion</title>
|
||||
<para>Next, there is the <literal>include</literal> directive, which causes BitBake to parse whatever file you specify, and insert it at that location, which is not unlike <command>make</command>. However, if the path specified on the <literal>include</literal> line is a relative path, BitBake will locate the first one it can find within <envar>BBPATH</envar>.</para>
|
||||
<para>Next, there is the <literal>include</literal> directive, which causes BitBake to parse in whatever file you specify, and insert it at that location, which is not unlike <command>make</command>. However, if the path specified on the <literal>include</literal> line is a relative path, BitBake will locate the first one it can find within <envar>BBPATH</envar>.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Requiring inclusion</title>
|
||||
<title>Requiring Inclusion</title>
|
||||
<para>In contrast to the <literal>include</literal> directive, <literal>require</literal> will
|
||||
raise an ParseError if the file to be included cannot be found. Otherwise it will behave just like the <literal>
|
||||
raise an ParseError if the to be included file can not be found. Otherwise it will behave just like the <literal>
|
||||
include</literal> directive.</para>
|
||||
</section>
|
||||
<section>
|
||||
@@ -171,10 +171,10 @@ include</literal> directive.</para>
|
||||
import time
|
||||
print time.strftime('%Y%m%d', time.gmtime())
|
||||
}</screen></para>
|
||||
<para>This is the similar to the previous, but flags it as Python so that BitBake knows it is Python code.</para>
|
||||
<para>This is the similar to the previous, but flags it as python so that BitBake knows it is python code.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Defining Python functions into the global Python namespace</title>
|
||||
<title>Defining python functions into the global python namespace</title>
|
||||
<para><emphasis>NOTE:</emphasis> This is only supported in .bb and .bbclass files.</para>
|
||||
<para><screen>def get_depends(bb, d):
|
||||
if bb.data.getVar('SOMECONDITION', d, True):
|
||||
@@ -187,8 +187,8 @@ include</literal> directive.</para>
|
||||
<para>This would result in <varname>DEPENDS</varname> containing <literal>dependencywithcond</literal>.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Variable flags</title>
|
||||
<para>Variables can have associated flags which provide a way of tagging extra information onto a variable. Several flags are used internally by BitBake but they can be used externally too if needed. The standard operations mentioned above also work on flags.</para>
|
||||
<title>Variable Flags</title>
|
||||
<para>Variables can have associated flags which provide a way of tagging extra information onto a variable. Several flags are used internally by bitbake but they can be used externally too if needed. The standard operations mentioned above also work on flags.</para>
|
||||
<para><screen><varname>VARIABLE</varname>[<varname>SOMEFLAG</varname>] = "value"</screen></para>
|
||||
<para>In this example, <varname>VARIABLE</varname> has a flag, <varname>SOMEFLAG</varname> which is set to <literal>value</literal>.</para>
|
||||
</section>
|
||||
@@ -200,19 +200,19 @@ include</literal> directive.</para>
|
||||
<section>
|
||||
<title>Tasks</title>
|
||||
<para><emphasis>NOTE:</emphasis> This is only supported in .bb and .bbclass files.</para>
|
||||
<para>In BitBake, each step that needs to be run for a given .bb is known as a task. There is a command <literal>addtask</literal> to add new tasks (must be a defined Python executable metadata and must start with <quote>do_</quote>) and describe intertask dependencies.</para>
|
||||
<para>In BitBake, each step that needs to be run for a given .bb is known as a task. There is a command <literal>addtask</literal> to add new tasks (must be a defined python executable metadata and must start with <quote>do_</quote>) and describe intertask dependencies.</para>
|
||||
<para><screen>python do_printdate () {
|
||||
import time
|
||||
print time.strftime('%Y%m%d', time.gmtime())
|
||||
}
|
||||
|
||||
addtask printdate before do_build</screen></para>
|
||||
<para>This defines the necessary Python function and adds it as a task which is now a dependency of do_build, the default task. If anyone executes the do_build task, that will result in do_printdate being run first.</para>
|
||||
<para>This defines the necessary python function and adds it as a task which is now a dependency of do_build (the default task). If anyone executes the do_build task, that will result in do_printdate being run first.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Events</title>
|
||||
<para><emphasis>NOTE:</emphasis> This is only supported in .bb and .bbclass files.</para>
|
||||
<para>BitBake allows installation of event handlers. Events are triggered at certain points during operation, such as the beginning of operation against a given .bb, the start of a given task, task failure, task success, et cetera. The intent is to make it easy to do things like email notification on build failure.</para>
|
||||
<para>BitBake allows to install event handlers. Events are triggered at certain points during operation, such as, the beginning of operation against a given .bb, the start of a given task, task failure, task success, et cetera. The intent was to make it easy to do things like email notifications on build failure.</para>
|
||||
<para><screen>addhandler myclass_eventhandler
|
||||
python myclass_eventhandler() {
|
||||
from bb.event import getName
|
||||
@@ -228,20 +228,20 @@ of the event and the content of the <varname>FILE</varname> variable.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Variants</title>
|
||||
<para>Two BitBake features exist to facilitate the creation of multiple buildable incarnations from a single recipe file.</para>
|
||||
<para>The first is <varname>BBCLASSEXTEND</varname>. This variable is a space separated list of classes used to "extend" the recipe for each variant. As an example, setting <screen>BBCLASSEXTEND = "native"</screen> results in a second incarnation of the current recipe being available. This second incarantion will have the "native" class inherited.</para>
|
||||
<para>The second feature is <varname>BBVERSIONS</varname>. This variable allows a single recipe to build multiple versions of a project from a single recipe file, and allows you to specify conditional metadata (using the <varname>OVERRIDES</varname> mechanism) for a single version, or an optionally named range of versions:</para>
|
||||
<para>Two Bitbake features exist to facilitate the creation of multiple buildable incarnations from a single recipe file.</para>
|
||||
<para>The first is <varname>BBCLASSEXTEND</varname>. This variable is a space separated list of classes to utilize to "extend" the recipe for each variant. As an example, setting <screen>BBCLASSEXTEND = "native"</screen> results in a second incarnation of the current recipe being available. This second incarantion will have the "native" class inherited.</para>
|
||||
<para>The second feature is <varname>BBVERSIONS</varname>. This variable allows a single recipe to be able to build multiple versions of a project from a single recipe file, and allows you to specify conditional metadata (using the <varname>OVERRIDES</varname> mechanism) for a single version, or an optionally named range of versions:</para>
|
||||
<para><screen>BBVERSIONS = "1.0 2.0 git"
|
||||
SRC_URI_git = "git://someurl/somepath.git"</screen></para>
|
||||
<para><screen>BBVERSIONS = "1.0.[0-6]:1.0.0+ \
|
||||
1.0.[7-9]:1.0.7+"
|
||||
SRC_URI_append_1.0.7+ = "file://some_patch_which_the_new_versions_need.patch;patch=1"</screen></para>
|
||||
<para>Note that the name of the range will default to the original version of the recipe, so given OE, a recipe file of foo_1.0.0+.bb will default the name of its versions to 1.0.0+. This is useful, as the range name is not only placed into overrides; it's also made available for the metadata to use in the form of the <varname>BPV</varname> variable, for use in file:// search paths (<varname>FILESPATH</varname>).</para>
|
||||
<para>Note that the name of the range will default to the original version of the recipe, so given OE, a recipe file of foo_1.0.0+.bb will default the name of its versions to 1.0.0+. This is useful, as the range name is not only placed into overrides, it's also made available for the metadata to use in the form of the <varname>BPV</varname> variable, for use in file:// search paths (<varname>FILESPATH</varname>).</para>
|
||||
</section>
|
||||
</section>
|
||||
<section>
|
||||
<title>Dependency handling</title>
|
||||
<para>BitBake 1.7.x onwards works with the metadata at the task level since this is optimal when dealing with multiple threads of execution. A robust method of specifing task dependencies is therefore needed. </para>
|
||||
<title>Dependency Handling</title>
|
||||
<para>Bitbake 1.7.x onwards works with the metadata at the task level since this is optimal when dealing with multiple threads of execution. A robust method of specifing task dependencies is therefore needed. </para>
|
||||
<section>
|
||||
<title>Dependencies internal to the .bb file</title>
|
||||
<para>Where the dependencies are internal to a given .bb file, the dependencies are handled by the previously detailed addtask directive.</para>
|
||||
@@ -249,26 +249,26 @@ SRC_URI_append_1.0.7+ = "file://some_patch_which_the_new_versions_need.patch;pat
|
||||
|
||||
<section>
|
||||
<title>DEPENDS</title>
|
||||
<para>DEPENDS lists build time dependencies. The 'deptask' flag for tasks is used to signify the task of each item listed in DEPENDS which must have completed before that task can be executed.</para>
|
||||
<para>DEPENDS is taken to specify build time dependencies. The 'deptask' flag for tasks is used to signify the task of each DEPENDS which must have completed before that task can be executed.</para>
|
||||
<para><screen>do_configure[deptask] = "do_populate_staging"</screen></para>
|
||||
<para>means the do_populate_staging task of each item in DEPENDS must have completed before do_configure can execute.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>RDEPENDS</title>
|
||||
<para>RDEPENDS lists runtime dependencies. The 'rdeptask' flag for tasks is used to signify the task of each item listed in RDEPENDS which must have completed before that task can be executed.</para>
|
||||
<para>RDEPENDS is taken to specify runtime dependencies. The 'rdeptask' flag for tasks is used to signify the task of each RDEPENDS which must have completed before that task can be executed.</para>
|
||||
<para><screen>do_package_write[rdeptask] = "do_package"</screen></para>
|
||||
<para>means the do_package task of each item in RDEPENDS must have completed before do_package_write can execute.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Recursive DEPENDS</title>
|
||||
<para>These are specified with the 'recdeptask' flag and is used signify the task(s) of each DEPENDS which must have completed before that task can be executed. It applies recursively so the DEPENDS of each item in the original DEPENDS must be met and so on.</para>
|
||||
<para>These are specified with the 'recdeptask' flag and is used signify the task(s) of each DEPENDS which must have completed before that task can be executed. It applies recursively so also, the DEPENDS of each item in the original DEPENDS must be met and so on.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Recursive RDEPENDS</title>
|
||||
<para>These are specified with the 'recrdeptask' flag and is used signify the task(s) of each RDEPENDS which must have completed before that task can be executed. It applies recursively so the RDEPENDS of each item in the original RDEPENDS must be met and so on. It also runs all DEPENDS first.</para>
|
||||
<para>These are specified with the 'recrdeptask' flag and is used signify the task(s) of each RDEPENDS which must have completed before that task can be executed. It applies recursively so also, the RDEPENDS of each item in the original RDEPENDS must be met and so on. It also runs all DEPENDS first too.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Inter task</title>
|
||||
<title>Inter Task</title>
|
||||
<para>The 'depends' flag for tasks is a more generic form of which allows an interdependency on specific tasks rather than specifying the data in DEPENDS or RDEPENDS.</para>
|
||||
<para><screen>do_patch[depends] = "quilt-native:do_populate_staging"</screen></para>
|
||||
<para>means the do_populate_staging task of the target quilt-native must have completed before the do_patch can execute.</para>
|
||||
@@ -278,34 +278,35 @@ SRC_URI_append_1.0.7+ = "file://some_patch_which_the_new_versions_need.patch;pat
|
||||
<section>
|
||||
<title>Parsing</title>
|
||||
<section>
|
||||
<title>Configuration files</title>
|
||||
<para>The first kind of metadata in BitBake is configuration metadata. This metadata is global, and therefore affects <emphasis>all</emphasis> packages and tasks which are executed.</para>
|
||||
<para>BitBake will first search the current working directory for an optional "conf/bblayers.conf" configuration file. This file is expected to contain a BBLAYERS variable which is a space delimited list of 'layer' directories. For each directory in this list, a "conf/layer.conf" file will be searched for and parsed with the LAYERDIR variable being set to the directory where the layer was found. The idea is these files will setup BBPATH and other variables correctly for a given build directory automatically for the user.</para>
|
||||
<para>BitBake will then expect to find 'conf/bitbake.conf' somewhere in the user specified <envar>BBPATH</envar>. That configuration file generally has include directives to pull in any other metadata (generally files specific to architecture, machine, <emphasis>local</emphasis> and so on).</para>
|
||||
<title>Configuration Files</title>
|
||||
<para>The first of the classifications of metadata in BitBake is configuration metadata. This metadata is global, and therefore affects <emphasis>all</emphasis> packages and tasks which are executed.</para>
|
||||
<para>Bitbake will first search the current working directory for an optional "conf/bblayers.conf" configuration file. This file is expected to contain a BBLAYERS variable which is a space delimited list of 'layer' directories. For each directory in this list a "conf/layer.conf" file will be searched for and parsed with the LAYERDIR variable being set to the directory where the layer was found. The idea is these files will setup BBPATH and other variables correctly for a given build directory automatically for the user.</para>
|
||||
<para>Bitbake will then expect to find 'conf/bitbake.conf' somewhere in the user specified <envar>BBPATH</envar>. That configuration file generally has include directives to pull in any other metadata (generally files specific to architecture, machine, <emphasis>local</emphasis> and so on.</para>
|
||||
<para>Only variable definitions and include directives are allowed in .conf files.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Classes</title>
|
||||
<para>BitBake classes are our rudimentary inheritance mechanism. As briefly mentioned in the metadata introduction, they're parsed when an <literal>inherit</literal> directive is encountered, and they are located in classes/ relative to the directories in <envar>BBPATH</envar>.</para>
|
||||
<para>BitBake classes are our rudimentary inheritance mechanism. As briefly mentioned in the metadata introduction, they're parsed when an <literal>inherit</literal> directive is encountered, and they are located in classes/ relative to the dirs in <envar>BBPATH</envar>.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>.bb files</title>
|
||||
<title>.bb Files</title>
|
||||
<para>A BitBake (.bb) file is a logical unit of tasks to be executed. Normally this is a package to be built. Inter-.bb dependencies are obeyed. The files themselves are located via the <varname>BBFILES</varname> variable, which is set to a space separated list of .bb files, and does handle wildcards.</para>
|
||||
</section>
|
||||
</section>
|
||||
</chapter>
|
||||
|
||||
<chapter>
|
||||
<title>File download support</title>
|
||||
<title>File Download support</title>
|
||||
<section>
|
||||
<title>Overview</title>
|
||||
<para>BitBake provides support to download files this procedure is called fetching. The SRC_URI is normally used to tell BitBake which files to fetch. The next sections will describe the available fetchers and their options. Each fetcher honors a set of variables and per URI parameters separated by a <quote>;</quote> consisting of a key and a value. The semantics of the variables and parameters are defined by the fetcher. BitBake tries to have consistent semantics between the different fetchers.
|
||||
<para>BitBake provides support to download files this procedure is called fetching. The SRC_URI is normally used to indicate BitBake which files to fetch. The next sections will describe th available fetchers and the options they have. Each Fetcher honors a set of Variables and
|
||||
a per URI parameters separated by a <quote>;</quote> consisting of a key and a value. The semantic of the Variables and Parameters are defined by the Fetcher. BitBakes tries to have a consistent semantic between the different Fetchers.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>Local file fetcher</title>
|
||||
<para>The URN for the local file fetcher is <emphasis>file</emphasis>. The filename can be either absolute or relative. If the filename is relative, <varname>FILESPATH</varname> and <varname>FILESDIR</varname> will be used to find the appropriate relative file, depending on the <varname>OVERRIDES</varname>. Single files and complete directories can be specified.
|
||||
<title>Local File Fetcher</title>
|
||||
<para>The URN for the Local File Fetcher is <emphasis>file</emphasis>. The filename can be either absolute or relative. If the filename is relative <varname>FILESPATH</varname> and <varname>FILESDIR</varname> will be used to find the appropriate relative file depending on the <varname>OVERRIDES</varname>. Single files and complete directories can be specified.
|
||||
<screen><varname>SRC_URI</varname>= "file://relativefile.patch"
|
||||
<varname>SRC_URI</varname>= "file://relativefile.patch;this=ignored"
|
||||
<varname>SRC_URI</varname>= "file:///Users/ich/very_important_software"
|
||||
@@ -314,11 +315,10 @@ SRC_URI_append_1.0.7+ = "file://some_patch_which_the_new_versions_need.patch;pat
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>CVS file fetcher</title>
|
||||
<para>The URN for the CVS fetcher is <emphasis>cvs</emphasis>. This fetcher honors the variables <varname>DL_DIR</varname>, <varname>SRCDATE</varname>, <varname>FETCHCOMMAND_cvs</varname>, <varname>UPDATECOMMAND_cvs</varname>. <varname>DL_DIR</varname> specifies where a temporary checkout is saved. <varname>SRCDATE</varname> specifies which date to use when doing the fetching (the special value of "now" will cause the checkout to be updated on every build). <varname>FETCHCOMMAND</varname> and <varname>UPDATECOMMAND</varname> specify which executables to use for the CVS checkout or update.
|
||||
<title>CVS File Fetcher</title>
|
||||
<para>The URN for the CVS Fetcher is <emphasis>cvs</emphasis>. This Fetcher honors the variables <varname>DL_DIR</varname>, <varname>SRCDATE</varname>, <varname>FETCHCOMMAND_cvs</varname>, <varname>UPDATECOMMAND_cvs</varname>. <varname>DL_DIR</varname> specifies where a temporary checkout is saved, <varname>SRCDATE</varname> specifies which date to use when doing the fetching (the special value of "now" will cause the checkout to be updated on every build), <varname>FETCHCOMMAND</varname> and <varname>UPDATECOMMAND</varname> specify which executables should be used when doing the CVS checkout or update.
|
||||
</para>
|
||||
<para>The supported parameters are <varname>module</varname>, <varname>tag</varname>, <varname>date</varname>, <varname>method</varname>, <varname>localdir</varname>, <varname>rsh</varname> and <varname>scmdata</varname>. The <varname>module</varname> specifies which module to check out, the <varname>tag</varname> describes which CVS TAG should be used for the checkout. By default the TAG is empty. A <varname>date</varname> can be specified to override the SRCDATE of the configuration to checkout a specific date. The special value of "now" will cause the checkout to be updated on every build.<varname>method</varname> is by default <emphasis>pserver</emphasis>. If <emphasis>ext</emphasis> is used the <varname>rsh</varname> parameter will be evaluated and <varname>CVS_RSH</varname> will be set. Finally, <varname>localdir</varname> is used to checkout into a special directory relative to <varname>CVSDIR</varname>.
|
||||
|
||||
<para>The supported Parameters are <varname>module</varname>, <varname>tag</varname>, <varname>date</varname>, <varname>method</varname>, <varname>localdir</varname>, <varname>rsh</varname> and <varname>scmdata</varname>. The <varname>module</varname> specifies which module to check out, the <varname>tag</varname> describes which CVS TAG should be used for the checkout. By default the TAG is empty. A <varname>date</varname> can be specified to override the SRCDATE of the configuration to checkout a specific date. The special value of "now" will cause the checkout to be updated on every build.<varname>method</varname> is by default <emphasis>pserver</emphasis>, if <emphasis>ext</emphasis> is used the <varname>rsh</varname> parameter will be evaluated and <varname>CVS_RSH</varname> will be set. Finally <varname>localdir</varname> is used to checkout into a special directory relative to <varname>CVSDIR</varname>. If <varname>scmdata</varname> is set to <quote>keep</quote>
|
||||
<screen><varname>SRC_URI</varname> = "cvs://CVSROOT;module=mymodule;tag=some-version;method=ext"
|
||||
<varname>SRC_URI</varname> = "cvs://CVSROOT;module=mymodule;date=20060126;localdir=usethat"
|
||||
</screen>
|
||||
@@ -326,10 +326,11 @@ SRC_URI_append_1.0.7+ = "file://some_patch_which_the_new_versions_need.patch;pat
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>HTTP/FTP fetcher</title>
|
||||
<para>The URNs for the HTTP/FTP fetcher are <emphasis>http</emphasis>, <emphasis>https</emphasis> and <emphasis>ftp</emphasis>. This fetcher honors the variables <varname>DL_DIR</varname>, <varname>FETCHCOMMAND_wget</varname>, <varname>PREMIRRORS</varname>, <varname>MIRRORS</varname>. The <varname>DL_DIR</varname> defines where to store the fetched file. <varname>FETCHCOMMAND</varname> contains the command used for fetching. <quote>${URI}</quote> and <quote>${FILES}</quote> will be replaced by the URI and basename of the file to be fetched. <varname>PREMIRRORS</varname> will be tried first when fetching a file. If that fails, the actual file will be tried and finally all <varname>MIRRORS</varname> will be tried.
|
||||
<title>HTTP/FTP Fetcher</title>
|
||||
<para>The URNs for the HTTP/FTP are <emphasis>http</emphasis>, <emphasis>https</emphasis> and <emphasis>ftp</emphasis>. This Fetcher honors the variables <varname>DL_DIR</varname>, <varname>FETCHCOMMAND_wget</varname>, <varname>PREMIRRORS</varname>, <varname>MIRRORS</varname>. The <varname>DL_DIR</varname> defines where to store the fetched file, <varname>FETCHCOMMAND</varname> contains the command used for fetching. <quote>${URI}</quote> and <quote>${FILES}</quote> will be replaced by the uri and basename of the to be fetched file. <varname>PREMIRRORS</varname>
|
||||
will be tried first when fetching a file if that fails the actual file will be tried and finally all <varname>MIRRORS</varname> will be tried.
|
||||
</para>
|
||||
<para>The only supported parameter is <varname>md5sum</varname>. After a fetch the <varname>md5sum</varname> of the file will be calculated and the two sums will be compared.
|
||||
<para>The only supported Parameter is <varname>md5sum</varname>. After a fetch the <varname>md5sum</varname> of the file will be calculated and the two sums will be compared.
|
||||
</para>
|
||||
<para><screen><varname>SRC_URI</varname> = "http://oe.handhelds.org/not_there.aac;md5sum=12343"
|
||||
<varname>SRC_URI</varname> = "ftp://oe.handhelds.org/not_there_as_well.aac;md5sum=1234"
|
||||
@@ -338,19 +339,19 @@ SRC_URI_append_1.0.7+ = "file://some_patch_which_the_new_versions_need.patch;pat
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>SVK fetcher</title>
|
||||
<title>SVK Fetcher</title>
|
||||
<para>
|
||||
<emphasis>Currently NOT supported</emphasis>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>SVN fetcher</title>
|
||||
<para>The URN for the SVN fetcher is <emphasis>svn</emphasis>.
|
||||
<title>SVN Fetcher</title>
|
||||
<para>The URN for the SVN Fetcher is <emphasis>svn</emphasis>.
|
||||
</para>
|
||||
<para>This fetcher honors the variables <varname>FETCHCOMMAND_svn</varname>, <varname>DL_DIR</varname>, <varname>SRCDATE</varname>. <varname>FETCHCOMMAND</varname> contains the subversion command. <varname>DL_DIR</varname> is the directory where tarballs will be saved. <varname>SRCDATE</varname> specifies which date to use when doing the fetching (the special value of "now" will cause the checkout to be updated on every build).
|
||||
<para>This Fetcher honors the variables <varname>FETCHCOMMAND_svn</varname>, <varname>DL_DIR</varname>, <varname>SRCDATE</varname>. <varname>FETCHCOMMAND</varname> contains the subversion command, <varname>DL_DIR</varname> is the directory where tarballs will be saved, <varname>SRCDATE</varname> specifies which date to use when doing the fetching (the special value of "now" will cause the checkout to be updated on every build).
|
||||
</para>
|
||||
<para>The supported parameters are <varname>proto</varname>, <varname>rev</varname> and <varname>scmdata</varname>. <varname>proto</varname> is the Subversion protocol, <varname>rev</varname> is the Subversion revision. If <varname>scmdata</varname> is set to <quote>keep</quote>, the <quote>.svn</quote> directories will be available during compile-time.
|
||||
<para>The supported Parameters are <varname>proto</varname>, <varname>rev</varname> and <varname>scmdata</varname>. <varname>proto</varname> is the subversion protocol, <varname>rev</varname> is the subversion revision. If <varname>scmdata</varname> is set to <quote>keep</quote>, the <quote>.svn</quote> directories will be available during compile-time.
|
||||
</para>
|
||||
<para><screen><varname>SRC_URI</varname> = "svn://svn.oe.handhelds.org/svn;module=vip;proto=http;rev=667"
|
||||
<varname>SRC_URI</varname> = "svn://svn.oe.handhelds.org/svn/;module=opie;proto=svn+ssh;date=20060126"
|
||||
@@ -358,12 +359,12 @@ SRC_URI_append_1.0.7+ = "file://some_patch_which_the_new_versions_need.patch;pat
|
||||
</section>
|
||||
|
||||
<section>
|
||||
<title>GIT fetcher</title>
|
||||
<title>GIT Fetcher</title>
|
||||
<para>The URN for the GIT Fetcher is <emphasis>git</emphasis>.
|
||||
</para>
|
||||
<para>The Variables <varname>DL_DIR</varname>, <varname>GITDIR</varname> are used. <varname>DL_DIR</varname> will be used to store the checkedout version. <varname>GITDIR</varname> will be used as the base directory where the git tree is cloned to.
|
||||
</para>
|
||||
<para>The parameters are <emphasis>tag</emphasis>, <emphasis>protocol</emphasis> and <emphasis>scmdata</emphasis>. <emphasis>tag</emphasis> is a Git tag, the default is <quote>master</quote>. <emphasis>protocol</emphasis> is the Git protocol to use and defaults to <quote>rsync</quote>. If <emphasis>scmdata</emphasis> is set to <quote>keep</quote>, the <quote>.git</quote> directory will be available during compile-time.
|
||||
<para>The Parameters are <emphasis>tag</emphasis>, <emphasis>protocol</emphasis> and <emphasis>scmdata</emphasis>. <emphasis>tag</emphasis> is a git tag, the default is <quote>master</quote>. <emphasis>protocol</emphasis> is the git protocol to use and defaults to <quote>rsync</quote>. If <emphasis>scmdata</emphasis> is set to <quote>keep</quote>, the <quote>.git</quote> directory will be available during compile-time.
|
||||
</para>
|
||||
<para><screen><varname>SRC_URI</varname> = "git://git.oe.handhelds.org/git/vip.git;tag=version-1"
|
||||
<varname>SRC_URI</varname> = "git://git.oe.handhelds.org/git/vip.git;protocol=http"
|
||||
@@ -374,13 +375,13 @@ SRC_URI_append_1.0.7+ = "file://some_patch_which_the_new_versions_need.patch;pat
|
||||
|
||||
|
||||
<chapter>
|
||||
<title>The BitBake command</title>
|
||||
<title>The bitbake command</title>
|
||||
<section>
|
||||
<title>Introduction</title>
|
||||
<para>bitbake is the primary command in the system. It facilitates executing tasks in a single .bb file, or executing a given task on a set of multiple .bb files, accounting for interdependencies amongst them.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Usage and syntax</title>
|
||||
<title>Usage and Syntax</title>
|
||||
<para>
|
||||
<screen><prompt>$ </prompt>bitbake --help
|
||||
usage: bitbake [options] [package ...]
|
||||
@@ -416,6 +417,8 @@ options:
|
||||
than once.
|
||||
-n, --dry-run don't execute, just go through the motions
|
||||
-p, --parse-only quit after parsing the BB files (developers only)
|
||||
-d, --disable-psyco disable using the psyco just-in-time compiler (not
|
||||
recommended)
|
||||
-s, --show-versions show current and preferred versions of all packages
|
||||
-e, --environment show the global or per-package environment (this is
|
||||
what used to be bbread)
|
||||
@@ -435,7 +438,7 @@ options:
|
||||
<para>
|
||||
<example>
|
||||
<title>Executing a task against a single .bb</title>
|
||||
<para>Executing tasks for a single file is relatively simple. You specify the file in question, and BitBake parses it and executes the specified task (or <quote>build</quote> by default). It obeys intertask dependencies when doing so.</para>
|
||||
<para>Executing tasks for a single file is relatively simple. You specify the file in question, and bitbake parses it and executes the specified task (or <quote>build</quote> by default). It obeys intertask dependencies when doing so.</para>
|
||||
<para><quote>clean</quote> task:</para>
|
||||
<para><screen><prompt>$ </prompt>bitbake -b blah_1.0.bb -c clean</screen></para>
|
||||
<para><quote>build</quote> task:</para>
|
||||
@@ -445,8 +448,8 @@ options:
|
||||
<para>
|
||||
<example>
|
||||
<title>Executing tasks against a set of .bb files</title>
|
||||
<para>There are a number of additional complexities introduced when one wants to manage multiple .bb files. Clearly there needs to be a way to tell BitBake what files are available, and of those, which we want to execute at this time. There also needs to be a way for each .bb to express its dependencies, both for build time and runtime. There must be a way for the user to express their preferences when multiple .bb's provide the same functionality, or when there are multiple versions of a .bb.</para>
|
||||
<para>The next section, Metadata, outlines how to specify such things.</para>
|
||||
<para>There are a number of additional complexities introduced when one wants to manage multiple .bb files. Clearly there needs to be a way to tell bitbake what files are available, and of those, which we want to execute at this time. There also needs to be a way for each .bb to express its dependencies, both for build time and runtime. There must be a way for the user to express their preferences when multiple .bb's provide the same functionality, or when there are multiple versions of a .bb.</para>
|
||||
<para>The next section, Metadata, outlines how one goes about specifying such things.</para>
|
||||
<para>Note that the bitbake command, when not using --buildfile, accepts a <varname>PROVIDER</varname>, not a filename or anything else. By default, a .bb generally PROVIDES its packagename, packagename-version, and packagename-version-revision.</para>
|
||||
<screen><prompt>$ </prompt>bitbake blah</screen>
|
||||
<screen><prompt>$ </prompt>bitbake blah-1.0</screen>
|
||||
@@ -458,8 +461,8 @@ options:
|
||||
<example>
|
||||
<title>Generating dependency graphs</title>
|
||||
<para>BitBake is able to generate dependency graphs using the dot syntax. These graphs can be converted
|
||||
to images using the <application>dot</application> application from <ulink url="http://www.graphviz.org">Graphviz</ulink>.
|
||||
Two files will be written into the current working directory, <emphasis>depends.dot</emphasis> containing dependency information at the package level and <emphasis>task-depends.dot</emphasis> containing a breakdown of the dependencies at the task level. To stop depending on common depends, one can use the <prompt>-I depend</prompt> to omit these from the graph. This can lead to more readable graphs. This way, <varname>DEPENDS</varname> from inherited classes such as base.bbclass can be removed from the graph.</para>
|
||||
to images using the <application>dot</application> application from <ulink url="http://www.graphviz.org">graphviz</ulink>.
|
||||
Two files will be written into the current working directory, <emphasis>depends.dot</emphasis> containing dependency information at the package level and <emphasis>task-depends.dot</emphasis> containing a breakdown of the dependencies at the task level. To stop depending on common depends one can use the <prompt>-I depend</prompt> to omit these from the graph. This can lead to more readable graphs. E.g. this way <varname>DEPENDS</varname> from inherited classes, e.g. base.bbclass, can be removed from the graph.</para>
|
||||
<screen><prompt>$ </prompt>bitbake -g blah</screen>
|
||||
<screen><prompt>$ </prompt>bitbake -g -I virtual/whatever -I bloom blah</screen>
|
||||
</example>
|
||||
@@ -467,20 +470,20 @@ Two files will be written into the current working directory, <emphasis>depends.
|
||||
</section>
|
||||
<section>
|
||||
<title>Special variables</title>
|
||||
<para>Certain variables affect BitBake operation:</para>
|
||||
<para>Certain variables affect bitbake operation:</para>
|
||||
<section>
|
||||
<title><varname>BB_NUMBER_THREADS</varname></title>
|
||||
<para> The number of threads BitBake should run at once (default: 1).</para>
|
||||
<para> The number of threads bitbake should run at once (default: 1).</para>
|
||||
</section>
|
||||
</section>
|
||||
<section>
|
||||
<title>Metadata</title>
|
||||
<para>As you may have seen in the usage information, or in the information about .bb files, the <varname>BBFILES</varname> variable is how the BitBake tool locates its files. This variable is a space separated list of files that are available, and supports wildcards.
|
||||
<para>As you may have seen in the usage information, or in the information about .bb files, the BBFILES variable is how the bitbake tool locates its files. This variable is a space separated list of files that are available, and supports wildcards.
|
||||
<example>
|
||||
<title>Setting BBFILES</title>
|
||||
<programlisting><varname>BBFILES</varname> = "/path/to/bbfiles/*.bb"</programlisting>
|
||||
</example></para>
|
||||
<para>With regard to dependencies, it expects the .bb to define a <varname>DEPENDS</varname> variable, which contains a space separated list of <quote>package names</quote>, which themselves are the <varname>PN</varname> variable. The <varname>PN</varname> variable is, in general, set to a component of the .bb filename by default.</para>
|
||||
<para>With regard to dependencies, it expects the .bb to define a <varname>DEPENDS</varname> variable, which contains a space separated list of <quote>package names</quote>, which themselves are the <varname>PN</varname> variable. The <varname>PN</varname> variable is, in general, by default, set to a component of the .bb filename.</para>
|
||||
<example>
|
||||
<title>Depending on another .bb</title>
|
||||
<para>a.bb:
|
||||
@@ -493,7 +496,7 @@ DEPENDS += "package-b"</screen>
|
||||
</example>
|
||||
<example>
|
||||
<title>Using PROVIDES</title>
|
||||
<para>This example shows the usage of the <varname>PROVIDES</varname> variable, which allows a given .bb to specify what functionality it provides.</para>
|
||||
<para>This example shows the usage of the PROVIDES variable, which allows a given .bb to specify what functionality it provides.</para>
|
||||
<para>package1.bb:
|
||||
<screen>PROVIDES += "virtual/package"</screen>
|
||||
</para>
|
||||
@@ -503,16 +506,16 @@ DEPENDS += "package-b"</screen>
|
||||
<para>package3.bb:
|
||||
<screen>PROVIDES += "virtual/package"</screen>
|
||||
</para>
|
||||
<para>As you can see, we have two different .bb's that provide the same functionality (virtual/package). Clearly, there needs to be a way for the person running BitBake to control which of those providers gets used. There is, indeed, such a way.</para>
|
||||
<para>As you can see, here there are two different .bb's that provide the same functionality (virtual/package). Clearly, there needs to be a way for the person running bitbake to control which of those providers gets used. There is, indeed, such a way.</para>
|
||||
<para>The following would go into a .conf file, to select package1:
|
||||
<screen>PREFERRED_PROVIDER_virtual/package = "package1"</screen>
|
||||
</para>
|
||||
</example>
|
||||
<example>
|
||||
<title>Specifying version preference</title>
|
||||
<para>When there are multiple <quote>versions</quote> of a given package, BitBake defaults to selecting the most recent version, unless otherwise specified. If the .bb in question has a <varname>DEFAULT_PREFERENCE</varname> set lower than the other .bb's (default is 0), then it will not be selected. This allows the person or persons maintaining the repository of .bb files to specify their preference for the default selected version. In addition, the user can specify their preferred version.</para>
|
||||
<para>When there are multiple <quote>versions</quote> of a given package, bitbake defaults to selecting the most recent version, unless otherwise specified. If the .bb in question has a <varname>DEFAULT_PREFERENCE</varname> set lower than the other .bb's (default is 0), then it will not be selected. This allows the person or persons maintaining the repository of .bb files to specify their preferences for the default selected version. In addition, the user can specify their preferences with regard to version.</para>
|
||||
<para>If the first .bb is named <filename>a_1.1.bb</filename>, then the <varname>PN</varname> variable will be set to <quote>a</quote>, and the <varname>PV</varname> variable will be set to 1.1.</para>
|
||||
<para>If we then have an <filename>a_1.2.bb</filename>, BitBake will choose 1.2 by default. However, if we define the following variable in a .conf that BitBake parses, we can change that.
|
||||
<para>If we then have an <filename>a_1.2.bb</filename>, bitbake will choose 1.2 by default. However, if we define the following variable in a .conf that bitbake parses, we can change that.
|
||||
<screen>PREFERRED_VERSION_a = "1.1"</screen>
|
||||
</para>
|
||||
</example>
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
__version__ = "1.13.3"
|
||||
__version__ = "1.11.0"
|
||||
|
||||
import sys
|
||||
if sys.version_info < (2, 6, 0):
|
||||
@@ -29,7 +29,7 @@ if sys.version_info < (2, 6, 0):
|
||||
|
||||
import os
|
||||
import logging
|
||||
|
||||
import traceback
|
||||
|
||||
class NullHandler(logging.Handler):
|
||||
def emit(self, record):
|
||||
@@ -51,6 +51,9 @@ class BBLogger(Logger):
|
||||
def verbose(self, msg, *args, **kwargs):
|
||||
return self.log(logging.INFO - 1, msg, *args, **kwargs)
|
||||
|
||||
def exception(self, msg, *args, **kwargs):
|
||||
return self.critical("%s\n%s" % (msg, traceback.format_exc()), *args, **kwargs)
|
||||
|
||||
logging.raiseExceptions = False
|
||||
logging.setLoggerClass(BBLogger)
|
||||
|
||||
@@ -76,10 +79,6 @@ def plain(*args):
|
||||
logger.plain(''.join(args))
|
||||
|
||||
def debug(lvl, *args):
|
||||
if isinstance(lvl, basestring):
|
||||
logger.warn("Passed invalid debug level '%s' to bb.debug", lvl)
|
||||
args = (lvl,) + args
|
||||
lvl = 1
|
||||
logger.debug(lvl, ''.join(args))
|
||||
|
||||
def note(*args):
|
||||
@@ -96,7 +95,7 @@ def fatal(*args):
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def deprecated(func, name=None, advice=""):
|
||||
def deprecated(func, name = None, advice = ""):
|
||||
"""This is a decorator which can be used to mark functions
|
||||
as deprecated. It will result in a warning being emmitted
|
||||
when the function is used."""
|
||||
@@ -110,8 +109,8 @@ def deprecated(func, name=None, advice=""):
|
||||
def newFunc(*args, **kwargs):
|
||||
warnings.warn("Call to deprecated function %s%s." % (name,
|
||||
advice),
|
||||
category=DeprecationWarning,
|
||||
stacklevel=2)
|
||||
category = PendingDeprecationWarning,
|
||||
stacklevel = 2)
|
||||
return func(*args, **kwargs)
|
||||
newFunc.__name__ = func.__name__
|
||||
newFunc.__doc__ = func.__doc__
|
||||
|
||||
@@ -28,12 +28,11 @@
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import shlex
|
||||
import bb
|
||||
import bb.msg
|
||||
import bb.process
|
||||
from contextlib import nested
|
||||
from bb import data, event, utils
|
||||
from bb import data, event, mkdirhier, utils
|
||||
|
||||
bblogger = logging.getLogger('BitBake')
|
||||
logger = logging.getLogger('BitBake.Build')
|
||||
@@ -163,7 +162,6 @@ def exec_func(func, d, dirs = None):
|
||||
lockfiles = None
|
||||
|
||||
tempdir = data.getVar('T', d, 1)
|
||||
bb.utils.mkdirhier(tempdir)
|
||||
runfile = os.path.join(tempdir, 'run.{0}.{1}'.format(func, os.getpid()))
|
||||
|
||||
with bb.utils.fileslocked(lockfiles):
|
||||
@@ -183,16 +181,16 @@ def exec_func_python(func, d, runfile, cwd=None):
|
||||
"""Execute a python BB 'function'"""
|
||||
|
||||
bbfile = d.getVar('FILE', True)
|
||||
try:
|
||||
olddir = os.getcwd()
|
||||
except OSError:
|
||||
olddir = None
|
||||
code = _functionfmt.format(function=func, body=d.getVar(func, True))
|
||||
bb.utils.mkdirhier(os.path.dirname(runfile))
|
||||
with open(runfile, 'w') as script:
|
||||
script.write(code)
|
||||
|
||||
if cwd:
|
||||
try:
|
||||
olddir = os.getcwd()
|
||||
except OSError:
|
||||
olddir = None
|
||||
os.chdir(cwd)
|
||||
|
||||
try:
|
||||
@@ -204,11 +202,8 @@ def exec_func_python(func, d, runfile, cwd=None):
|
||||
|
||||
raise FuncFailed(func, None)
|
||||
finally:
|
||||
if cwd and olddir:
|
||||
try:
|
||||
os.chdir(olddir)
|
||||
except OSError:
|
||||
pass
|
||||
if olddir:
|
||||
os.chdir(olddir)
|
||||
|
||||
def exec_func_shell(function, d, runfile, cwd=None):
|
||||
"""Execute a shell function from the metadata
|
||||
@@ -229,8 +224,12 @@ def exec_func_shell(function, d, runfile, cwd=None):
|
||||
if cwd:
|
||||
script.write("cd %s\n" % cwd)
|
||||
script.write("%s\n" % function)
|
||||
os.fchmod(script.fileno(), 0775)
|
||||
|
||||
os.chmod(runfile, 0775)
|
||||
env = {
|
||||
'PATH': d.getVar('PATH', True),
|
||||
'LC_ALL': 'C',
|
||||
}
|
||||
|
||||
cmd = runfile
|
||||
|
||||
@@ -240,7 +239,7 @@ def exec_func_shell(function, d, runfile, cwd=None):
|
||||
logfile = sys.stdout
|
||||
|
||||
try:
|
||||
bb.process.run(cmd, shell=False, stdin=NULL, log=logfile)
|
||||
bb.process.run(cmd, env=env, shell=False, stdin=NULL, log=logfile)
|
||||
except bb.process.CmdError:
|
||||
logfn = d.getVar('BB_LOGFILE', True)
|
||||
raise FuncFailed(function, logfn)
|
||||
@@ -383,10 +382,10 @@ def stamp_internal(taskname, d, file_name):
|
||||
taskflagname = taskname.replace("_setscene", "")
|
||||
|
||||
if file_name:
|
||||
stamp = d.stamp_base[file_name].get(taskflagname) or d.stamp[file_name]
|
||||
stamp = d.stamp[file_name]
|
||||
extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
|
||||
else:
|
||||
stamp = d.getVarFlag(taskflagname, 'stamp-base', True) or d.getVar('STAMP', True)
|
||||
stamp = d.getVar('STAMP', True)
|
||||
file_name = d.getVar('BB_FILENAME', True)
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info', True) or ""
|
||||
|
||||
@@ -412,12 +411,6 @@ def make_stamp(task, d, file_name = None):
|
||||
f = open(stamp, "w")
|
||||
f.close()
|
||||
|
||||
# If we're in task context, write out a signature file for each task
|
||||
# as it completes
|
||||
if not task.endswith("_setscene") and task != "do_setscene" and not file_name:
|
||||
file_name = d.getVar('BB_FILENAME', True)
|
||||
bb.parse.siggen.dump_sigtask(file_name, task, d.getVar('STAMP', True), True)
|
||||
|
||||
def del_stamp(task, d, file_name = None):
|
||||
"""
|
||||
Removes a stamp for a given task
|
||||
@@ -463,7 +456,6 @@ def add_tasks(tasklist, d):
|
||||
getTask('nostamp')
|
||||
getTask('fakeroot')
|
||||
getTask('noexec')
|
||||
getTask('umask')
|
||||
task_deps['parents'][task] = []
|
||||
for dep in flags['deps']:
|
||||
dep = data.expand(dep, d)
|
||||
|
||||
@@ -30,7 +30,7 @@
|
||||
|
||||
import os
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from collections import defaultdict, namedtuple
|
||||
import bb.data
|
||||
import bb.utils
|
||||
|
||||
@@ -43,15 +43,48 @@ except ImportError:
|
||||
logger.info("Importing cPickle failed. "
|
||||
"Falling back to a very slow implementation.")
|
||||
|
||||
__cache_version__ = "142"
|
||||
__cache_version__ = "138"
|
||||
|
||||
def getCacheFile(path, filename):
|
||||
return os.path.join(path, filename)
|
||||
recipe_fields = (
|
||||
'pn',
|
||||
'pv',
|
||||
'pr',
|
||||
'pe',
|
||||
'defaultpref',
|
||||
'depends',
|
||||
'provides',
|
||||
'task_deps',
|
||||
'stamp',
|
||||
'stamp_extrainfo',
|
||||
'broken',
|
||||
'not_world',
|
||||
'skipped',
|
||||
'timestamp',
|
||||
'packages',
|
||||
'packages_dynamic',
|
||||
'rdepends',
|
||||
'rdepends_pkg',
|
||||
'rprovides',
|
||||
'rprovides_pkg',
|
||||
'rrecommends',
|
||||
'rrecommends_pkg',
|
||||
'nocache',
|
||||
'variants',
|
||||
'file_depends',
|
||||
'tasks',
|
||||
'basetaskhashes',
|
||||
'hashfilename',
|
||||
'inherits',
|
||||
'summary',
|
||||
'license',
|
||||
'section',
|
||||
'fakerootenv',
|
||||
'fakerootdirs'
|
||||
)
|
||||
|
||||
# RecipeInfoCommon defines common data retrieving methods
|
||||
# from meta data for caches. CoreRecipeInfo as well as other
|
||||
# Extra RecipeInfo needs to inherit this class
|
||||
class RecipeInfoCommon(object):
|
||||
|
||||
class RecipeInfo(namedtuple('RecipeInfo', recipe_fields)):
|
||||
__slots__ = ()
|
||||
|
||||
@classmethod
|
||||
def listvar(cls, var, metadata):
|
||||
@@ -84,167 +117,66 @@ class RecipeInfoCommon(object):
|
||||
def getvar(cls, var, metadata):
|
||||
return metadata.getVar(var, True) or ''
|
||||
|
||||
|
||||
class CoreRecipeInfo(RecipeInfoCommon):
|
||||
__slots__ = ()
|
||||
|
||||
cachefile = "bb_cache.dat"
|
||||
|
||||
def __init__(self, filename, metadata):
|
||||
self.file_depends = metadata.getVar('__depends', False)
|
||||
self.timestamp = bb.parse.cached_mtime(filename)
|
||||
self.variants = self.listvar('__VARIANTS', metadata) + ['']
|
||||
self.appends = self.listvar('__BBAPPEND', metadata)
|
||||
self.nocache = self.getvar('__BB_DONT_CACHE', metadata)
|
||||
|
||||
self.skipreason = self.getvar('__SKIPPED', metadata)
|
||||
if self.skipreason:
|
||||
self.pn = self.getvar('PN', metadata) or bb.parse.BBHandler.vars_from_file(filename,metadata)[0]
|
||||
self.skipped = True
|
||||
self.provides = self.depvar('PROVIDES', metadata)
|
||||
self.rprovides = self.depvar('RPROVIDES', metadata)
|
||||
return
|
||||
|
||||
self.tasks = metadata.getVar('__BBTASKS', False)
|
||||
|
||||
self.pn = self.getvar('PN', metadata)
|
||||
self.packages = self.listvar('PACKAGES', metadata)
|
||||
if not self.pn in self.packages:
|
||||
self.packages.append(self.pn)
|
||||
|
||||
self.basetaskhashes = self.taskvar('BB_BASEHASH', self.tasks, metadata)
|
||||
self.hashfilename = self.getvar('BB_HASHFILENAME', metadata)
|
||||
|
||||
self.file_depends = metadata.getVar('__depends', False)
|
||||
self.task_deps = metadata.getVar('_task_deps', False) or {'tasks': [], 'parents': {}}
|
||||
|
||||
self.skipped = False
|
||||
self.pe = self.getvar('PE', metadata)
|
||||
self.pv = self.getvar('PV', metadata)
|
||||
self.pr = self.getvar('PR', metadata)
|
||||
self.defaultpref = self.intvar('DEFAULT_PREFERENCE', metadata)
|
||||
self.broken = self.getvar('BROKEN', metadata)
|
||||
self.not_world = self.getvar('EXCLUDE_FROM_WORLD', metadata)
|
||||
self.stamp = self.getvar('STAMP', metadata)
|
||||
self.stamp_base = self.flaglist('stamp-base', self.tasks, metadata)
|
||||
self.stamp_extrainfo = self.flaglist('stamp-extra-info', self.tasks, metadata)
|
||||
self.packages_dynamic = self.listvar('PACKAGES_DYNAMIC', metadata)
|
||||
self.depends = self.depvar('DEPENDS', metadata)
|
||||
self.provides = self.depvar('PROVIDES', metadata)
|
||||
self.rdepends = self.depvar('RDEPENDS', metadata)
|
||||
self.rprovides = self.depvar('RPROVIDES', metadata)
|
||||
self.rrecommends = self.depvar('RRECOMMENDS', metadata)
|
||||
self.rprovides_pkg = self.pkgvar('RPROVIDES', self.packages, metadata)
|
||||
self.rdepends_pkg = self.pkgvar('RDEPENDS', self.packages, metadata)
|
||||
self.rrecommends_pkg = self.pkgvar('RRECOMMENDS', self.packages, metadata)
|
||||
self.inherits = self.getvar('__inherit_cache', metadata)
|
||||
self.summary = self.getvar('SUMMARY', metadata)
|
||||
self.license = self.getvar('LICENSE', metadata)
|
||||
self.section = self.getvar('SECTION', metadata)
|
||||
self.fakerootenv = self.getvar('FAKEROOTENV', metadata)
|
||||
self.fakerootdirs = self.getvar('FAKEROOTDIRS', metadata)
|
||||
@classmethod
|
||||
def make_optional(cls, default=None, **kwargs):
|
||||
"""Construct the namedtuple from the specified keyword arguments,
|
||||
with every value considered optional, using the default value if
|
||||
it was not specified."""
|
||||
for field in cls._fields:
|
||||
kwargs[field] = kwargs.get(field, default)
|
||||
return cls(**kwargs)
|
||||
|
||||
@classmethod
|
||||
def init_cacheData(cls, cachedata):
|
||||
# CacheData in Core RecipeInfo Class
|
||||
cachedata.task_deps = {}
|
||||
cachedata.pkg_fn = {}
|
||||
cachedata.pkg_pn = defaultdict(list)
|
||||
cachedata.pkg_pepvpr = {}
|
||||
cachedata.pkg_dp = {}
|
||||
def from_metadata(cls, filename, metadata):
|
||||
if cls.getvar('__SKIPPED', metadata):
|
||||
return cls.make_optional(skipped=True)
|
||||
|
||||
cachedata.stamp = {}
|
||||
cachedata.stamp_base = {}
|
||||
cachedata.stamp_extrainfo = {}
|
||||
cachedata.fn_provides = {}
|
||||
cachedata.pn_provides = defaultdict(list)
|
||||
cachedata.all_depends = []
|
||||
tasks = metadata.getVar('__BBTASKS', False)
|
||||
|
||||
cachedata.deps = defaultdict(list)
|
||||
cachedata.packages = defaultdict(list)
|
||||
cachedata.providers = defaultdict(list)
|
||||
cachedata.rproviders = defaultdict(list)
|
||||
cachedata.packages_dynamic = defaultdict(list)
|
||||
pn = cls.getvar('PN', metadata)
|
||||
packages = cls.listvar('PACKAGES', metadata)
|
||||
if not pn in packages:
|
||||
packages.append(pn)
|
||||
|
||||
cachedata.rundeps = defaultdict(lambda: defaultdict(list))
|
||||
cachedata.runrecs = defaultdict(lambda: defaultdict(list))
|
||||
cachedata.possible_world = []
|
||||
cachedata.universe_target = []
|
||||
cachedata.hashfn = {}
|
||||
return RecipeInfo(
|
||||
tasks = tasks,
|
||||
basetaskhashes = cls.taskvar('BB_BASEHASH', tasks, metadata),
|
||||
hashfilename = cls.getvar('BB_HASHFILENAME', metadata),
|
||||
|
||||
cachedata.basetaskhash = {}
|
||||
cachedata.inherits = {}
|
||||
cachedata.summary = {}
|
||||
cachedata.license = {}
|
||||
cachedata.section = {}
|
||||
cachedata.fakerootenv = {}
|
||||
cachedata.fakerootdirs = {}
|
||||
|
||||
def add_cacheData(self, cachedata, fn):
|
||||
cachedata.task_deps[fn] = self.task_deps
|
||||
cachedata.pkg_fn[fn] = self.pn
|
||||
cachedata.pkg_pn[self.pn].append(fn)
|
||||
cachedata.pkg_pepvpr[fn] = (self.pe, self.pv, self.pr)
|
||||
cachedata.pkg_dp[fn] = self.defaultpref
|
||||
cachedata.stamp[fn] = self.stamp
|
||||
cachedata.stamp_base[fn] = self.stamp_base
|
||||
cachedata.stamp_extrainfo[fn] = self.stamp_extrainfo
|
||||
|
||||
provides = [self.pn]
|
||||
for provide in self.provides:
|
||||
if provide not in provides:
|
||||
provides.append(provide)
|
||||
cachedata.fn_provides[fn] = provides
|
||||
|
||||
for provide in provides:
|
||||
cachedata.providers[provide].append(fn)
|
||||
if provide not in cachedata.pn_provides[self.pn]:
|
||||
cachedata.pn_provides[self.pn].append(provide)
|
||||
|
||||
for dep in self.depends:
|
||||
if dep not in cachedata.deps[fn]:
|
||||
cachedata.deps[fn].append(dep)
|
||||
if dep not in cachedata.all_depends:
|
||||
cachedata.all_depends.append(dep)
|
||||
|
||||
rprovides = self.rprovides
|
||||
for package in self.packages:
|
||||
cachedata.packages[package].append(fn)
|
||||
rprovides += self.rprovides_pkg[package]
|
||||
|
||||
for rprovide in rprovides:
|
||||
cachedata.rproviders[rprovide].append(fn)
|
||||
|
||||
for package in self.packages_dynamic:
|
||||
cachedata.packages_dynamic[package].append(fn)
|
||||
|
||||
# Build hash of runtime depends and rececommends
|
||||
for package in self.packages + [self.pn]:
|
||||
cachedata.rundeps[fn][package] = list(self.rdepends) + self.rdepends_pkg[package]
|
||||
cachedata.runrecs[fn][package] = list(self.rrecommends) + self.rrecommends_pkg[package]
|
||||
|
||||
# Collect files we may need for possible world-dep
|
||||
# calculations
|
||||
if not self.broken and not self.not_world:
|
||||
cachedata.possible_world.append(fn)
|
||||
|
||||
# create a collection of all targets for sanity checking
|
||||
# tasks, such as upstream versions, license, and tools for
|
||||
# task and image creation.
|
||||
cachedata.universe_target.append(self.pn)
|
||||
|
||||
cachedata.hashfn[fn] = self.hashfilename
|
||||
for task, taskhash in self.basetaskhashes.iteritems():
|
||||
identifier = '%s.%s' % (fn, task)
|
||||
cachedata.basetaskhash[identifier] = taskhash
|
||||
|
||||
cachedata.inherits[fn] = self.inherits
|
||||
cachedata.summary[fn] = self.summary
|
||||
cachedata.license[fn] = self.license
|
||||
cachedata.section[fn] = self.section
|
||||
cachedata.fakerootenv[fn] = self.fakerootenv
|
||||
cachedata.fakerootdirs[fn] = self.fakerootdirs
|
||||
file_depends = metadata.getVar('__depends', False),
|
||||
task_deps = metadata.getVar('_task_deps', False) or
|
||||
{'tasks': [], 'parents': {}},
|
||||
variants = cls.listvar('__VARIANTS', metadata) + [''],
|
||||
|
||||
skipped = False,
|
||||
timestamp = bb.parse.cached_mtime(filename),
|
||||
packages = cls.listvar('PACKAGES', metadata),
|
||||
pn = pn,
|
||||
pe = cls.getvar('PE', metadata),
|
||||
pv = cls.getvar('PV', metadata),
|
||||
pr = cls.getvar('PR', metadata),
|
||||
nocache = cls.getvar('__BB_DONT_CACHE', metadata),
|
||||
defaultpref = cls.intvar('DEFAULT_PREFERENCE', metadata),
|
||||
broken = cls.getvar('BROKEN', metadata),
|
||||
not_world = cls.getvar('EXCLUDE_FROM_WORLD', metadata),
|
||||
stamp = cls.getvar('STAMP', metadata),
|
||||
stamp_extrainfo = cls.flaglist('stamp-extra-info', tasks, metadata),
|
||||
packages_dynamic = cls.listvar('PACKAGES_DYNAMIC', metadata),
|
||||
depends = cls.depvar('DEPENDS', metadata),
|
||||
provides = cls.depvar('PROVIDES', metadata),
|
||||
rdepends = cls.depvar('RDEPENDS', metadata),
|
||||
rprovides = cls.depvar('RPROVIDES', metadata),
|
||||
rrecommends = cls.depvar('RRECOMMENDS', metadata),
|
||||
rprovides_pkg = cls.pkgvar('RPROVIDES', packages, metadata),
|
||||
rdepends_pkg = cls.pkgvar('RDEPENDS', packages, metadata),
|
||||
rrecommends_pkg = cls.pkgvar('RRECOMMENDS', packages, metadata),
|
||||
inherits = cls.getvar('__inherit_cache', metadata),
|
||||
summary = cls.getvar('SUMMARY', metadata),
|
||||
license = cls.getvar('LICENSE', metadata),
|
||||
section = cls.getvar('SECTION', metadata),
|
||||
fakerootenv = cls.getvar('FAKEROOTENV', metadata),
|
||||
fakerootdirs = cls.getvar('FAKEROOTDIRS', metadata),
|
||||
)
|
||||
|
||||
|
||||
class Cache(object):
|
||||
@@ -252,11 +184,7 @@ class Cache(object):
|
||||
BitBake Cache implementation
|
||||
"""
|
||||
|
||||
def __init__(self, data, caches_array):
|
||||
# Pass caches_array information into Cache Constructor
|
||||
# It will be used in later for deciding whether we
|
||||
# need extra cache file dump/load support
|
||||
self.caches_array = caches_array
|
||||
def __init__(self, data):
|
||||
self.cachedir = bb.data.getVar("CACHE", data, True)
|
||||
self.clean = set()
|
||||
self.checked = set()
|
||||
@@ -272,7 +200,7 @@ class Cache(object):
|
||||
return
|
||||
|
||||
self.has_cache = True
|
||||
self.cachefile = getCacheFile(self.cachedir, "bb_cache.dat")
|
||||
self.cachefile = os.path.join(self.cachedir, "bb_cache.dat")
|
||||
|
||||
logger.debug(1, "Using cache in '%s'", self.cachedir)
|
||||
bb.utils.mkdirhier(self.cachedir)
|
||||
@@ -286,21 +214,12 @@ class Cache(object):
|
||||
old_mtimes.append(newest_mtime)
|
||||
newest_mtime = max(old_mtimes)
|
||||
|
||||
bNeedUpdate = True
|
||||
if self.caches_array:
|
||||
for cache_class in self.caches_array:
|
||||
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
||||
cachefile = getCacheFile(self.cachedir, cache_class.cachefile)
|
||||
bNeedUpdate = bNeedUpdate and (bb.parse.cached_mtime_noerror(cachefile) >= newest_mtime)
|
||||
cache_class.init_cacheData(self)
|
||||
if bNeedUpdate:
|
||||
if bb.parse.cached_mtime_noerror(self.cachefile) >= newest_mtime:
|
||||
self.load_cachefile()
|
||||
elif os.path.isfile(self.cachefile):
|
||||
logger.info("Out of date cache found, rebuilding...")
|
||||
|
||||
def load_cachefile(self):
|
||||
# Firstly, using core cache file information for
|
||||
# valid checking
|
||||
with open(self.cachefile, "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
try:
|
||||
@@ -317,52 +236,31 @@ class Cache(object):
|
||||
logger.info('Bitbake version mismatch, rebuilding...')
|
||||
return
|
||||
|
||||
cachesize = os.fstat(cachefile.fileno()).st_size
|
||||
bb.event.fire(bb.event.CacheLoadStarted(cachesize), self.data)
|
||||
|
||||
cachesize = 0
|
||||
previous_progress = 0
|
||||
previous_percent = 0
|
||||
previous_percent = 0
|
||||
while cachefile:
|
||||
try:
|
||||
key = pickled.load()
|
||||
value = pickled.load()
|
||||
except Exception:
|
||||
break
|
||||
|
||||
# Calculate the correct cachesize of all those cache files
|
||||
for cache_class in self.caches_array:
|
||||
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
||||
cachefile = getCacheFile(self.cachedir, cache_class.cachefile)
|
||||
with open(cachefile, "rb") as cachefile:
|
||||
cachesize += os.fstat(cachefile.fileno()).st_size
|
||||
self.depends_cache[key] = value
|
||||
|
||||
bb.event.fire(bb.event.CacheLoadStarted(cachesize), self.data)
|
||||
|
||||
for cache_class in self.caches_array:
|
||||
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
||||
cachefile = getCacheFile(self.cachedir, cache_class.cachefile)
|
||||
with open(cachefile, "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
while cachefile:
|
||||
try:
|
||||
key = pickled.load()
|
||||
value = pickled.load()
|
||||
except Exception:
|
||||
break
|
||||
if self.depends_cache.has_key(key):
|
||||
self.depends_cache[key].append(value)
|
||||
else:
|
||||
self.depends_cache[key] = [value]
|
||||
# only fire events on even percentage boundaries
|
||||
current_progress = cachefile.tell() + previous_progress
|
||||
current_percent = 100 * current_progress / cachesize
|
||||
if current_percent > previous_percent:
|
||||
previous_percent = current_percent
|
||||
bb.event.fire(bb.event.CacheLoadProgress(current_progress),
|
||||
self.data)
|
||||
# only fire events on even percentage boundaries
|
||||
current_progress = cachefile.tell()
|
||||
current_percent = 100 * current_progress / cachesize
|
||||
if current_percent > previous_percent:
|
||||
previous_percent = current_percent
|
||||
bb.event.fire(bb.event.CacheLoadProgress(current_progress),
|
||||
self.data)
|
||||
|
||||
previous_progress += current_progress
|
||||
bb.event.fire(bb.event.CacheLoadCompleted(cachesize,
|
||||
len(self.depends_cache)),
|
||||
self.data)
|
||||
|
||||
# Note: depends cache number is corresponding to the parsing file numbers.
|
||||
# The same file has several caches, still regarded as one item in the cache
|
||||
bb.event.fire(bb.event.CacheLoadCompleted(cachesize,
|
||||
len(self.depends_cache)),
|
||||
self.data)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def virtualfn2realfn(virtualfn):
|
||||
"""
|
||||
@@ -372,9 +270,8 @@ class Cache(object):
|
||||
fn = virtualfn
|
||||
cls = ""
|
||||
if virtualfn.startswith('virtual:'):
|
||||
elems = virtualfn.split(':')
|
||||
cls = ":".join(elems[1:-1])
|
||||
fn = elems[-1]
|
||||
cls = virtualfn.split(':', 2)[1]
|
||||
fn = virtualfn.replace('virtual:' + cls + ':', '')
|
||||
return (fn, cls)
|
||||
|
||||
@staticmethod
|
||||
@@ -397,12 +294,11 @@ class Cache(object):
|
||||
|
||||
logger.debug(1, "Parsing %s (full)", fn)
|
||||
|
||||
cfgData.setVar("__ONLYFINALISE", virtual or "default")
|
||||
bb_data = cls.load_bbfile(fn, appends, cfgData)
|
||||
return bb_data[virtual]
|
||||
|
||||
@classmethod
|
||||
def parse(cls, filename, appends, configdata, caches_array):
|
||||
def parse(cls, filename, appends, configdata):
|
||||
"""Parse the specified filename, returning the recipe information"""
|
||||
infos = []
|
||||
datastores = cls.load_bbfile(filename, appends, configdata)
|
||||
@@ -414,14 +310,8 @@ class Cache(object):
|
||||
depends |= (data.getVar("__depends", False) or set())
|
||||
if depends and not variant:
|
||||
data.setVar("__depends", depends)
|
||||
|
||||
info_array = []
|
||||
for cache_class in caches_array:
|
||||
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
||||
info = cache_class(filename, data)
|
||||
info_array.append(info)
|
||||
infos.append((virtualfn, info_array))
|
||||
|
||||
info = RecipeInfo.from_metadata(filename, data)
|
||||
infos.append((virtualfn, info))
|
||||
return infos
|
||||
|
||||
def load(self, filename, appends, configdata):
|
||||
@@ -432,17 +322,16 @@ class Cache(object):
|
||||
automatically add the information to the cache or to your
|
||||
CacheData. Use the add or add_info method to do so after
|
||||
running this, or use loadData instead."""
|
||||
cached = self.cacheValid(filename, appends)
|
||||
cached = self.cacheValid(filename)
|
||||
if cached:
|
||||
infos = []
|
||||
# info_array item is a list of [CoreRecipeInfo, XXXRecipeInfo]
|
||||
info_array = self.depends_cache[filename]
|
||||
for variant in info_array[0].variants:
|
||||
info = self.depends_cache[filename]
|
||||
for variant in info.variants:
|
||||
virtualfn = self.realfn2virtual(filename, variant)
|
||||
infos.append((virtualfn, self.depends_cache[virtualfn]))
|
||||
else:
|
||||
logger.debug(1, "Parsing %s", filename)
|
||||
return self.parse(filename, appends, configdata, self.caches_array)
|
||||
return self.parse(filename, appends, configdata)
|
||||
|
||||
return cached, infos
|
||||
|
||||
@@ -453,23 +342,23 @@ class Cache(object):
|
||||
skipped, virtuals = 0, 0
|
||||
|
||||
cached, infos = self.load(fn, appends, cfgData)
|
||||
for virtualfn, info_array in infos:
|
||||
if info_array[0].skipped:
|
||||
logger.debug(1, "Skipping %s: %s", virtualfn, info_array[0].skipreason)
|
||||
for virtualfn, info in infos:
|
||||
if info.skipped:
|
||||
logger.debug(1, "Skipping %s", virtualfn)
|
||||
skipped += 1
|
||||
else:
|
||||
self.add_info(virtualfn, info_array, cacheData, not cached)
|
||||
self.add_info(virtualfn, info, cacheData, not cached)
|
||||
virtuals += 1
|
||||
|
||||
return cached, skipped, virtuals
|
||||
|
||||
def cacheValid(self, fn, appends):
|
||||
def cacheValid(self, fn):
|
||||
"""
|
||||
Is the cache valid for fn?
|
||||
Fast version, no timestamps checked.
|
||||
"""
|
||||
if fn not in self.checked:
|
||||
self.cacheValidUpdate(fn, appends)
|
||||
self.cacheValidUpdate(fn)
|
||||
|
||||
# Is cache enabled?
|
||||
if not self.has_cache:
|
||||
@@ -478,7 +367,7 @@ class Cache(object):
|
||||
return True
|
||||
return False
|
||||
|
||||
def cacheValidUpdate(self, fn, appends):
|
||||
def cacheValidUpdate(self, fn):
|
||||
"""
|
||||
Is the cache valid for fn?
|
||||
Make thorough (slower) checks including timestamps.
|
||||
@@ -502,15 +391,15 @@ class Cache(object):
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
info_array = self.depends_cache[fn]
|
||||
info = self.depends_cache[fn]
|
||||
# Check the file's timestamp
|
||||
if mtime != info_array[0].timestamp:
|
||||
if mtime != info.timestamp:
|
||||
logger.debug(2, "Cache: %s changed", fn)
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
# Check dependencies are still valid
|
||||
depends = info_array[0].file_depends
|
||||
depends = info.file_depends
|
||||
if depends:
|
||||
for f, old_mtime in depends:
|
||||
fmtime = bb.parse.cached_mtime_noerror(f)
|
||||
@@ -527,14 +416,8 @@ class Cache(object):
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
if appends != info_array[0].appends:
|
||||
logger.debug(2, "Cache: appends for %s changed", fn)
|
||||
bb.note("%s to %s" % (str(appends), str(info_array[0].appends)))
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
invalid = False
|
||||
for cls in info_array[0].variants:
|
||||
for cls in info.variants:
|
||||
virtualfn = self.realfn2virtual(fn, cls)
|
||||
self.clean.add(virtualfn)
|
||||
if virtualfn not in self.depends_cache:
|
||||
@@ -543,7 +426,7 @@ class Cache(object):
|
||||
|
||||
# If any one of the variants is not present, mark as invalid for all
|
||||
if invalid:
|
||||
for cls in info_array[0].variants:
|
||||
for cls in info.variants:
|
||||
virtualfn = self.realfn2virtual(fn, cls)
|
||||
if virtualfn in self.clean:
|
||||
logger.debug(2, "Cache: Removing %s from cache", virtualfn)
|
||||
@@ -581,30 +464,13 @@ class Cache(object):
|
||||
logger.debug(2, "Cache is clean, not saving.")
|
||||
return
|
||||
|
||||
file_dict = {}
|
||||
pickler_dict = {}
|
||||
for cache_class in self.caches_array:
|
||||
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
||||
cache_class_name = cache_class.__name__
|
||||
cachefile = getCacheFile(self.cachedir, cache_class.cachefile)
|
||||
file_dict[cache_class_name] = open(cachefile, "wb")
|
||||
pickler_dict[cache_class_name] = pickle.Pickler(file_dict[cache_class_name], pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
pickler_dict['CoreRecipeInfo'].dump(__cache_version__)
|
||||
pickler_dict['CoreRecipeInfo'].dump(bb.__version__)
|
||||
|
||||
try:
|
||||
for key, info_array in self.depends_cache.iteritems():
|
||||
for info in info_array:
|
||||
if isinstance(info, RecipeInfoCommon):
|
||||
cache_class_name = info.__class__.__name__
|
||||
pickler_dict[cache_class_name].dump(key)
|
||||
pickler_dict[cache_class_name].dump(info)
|
||||
finally:
|
||||
for cache_class in self.caches_array:
|
||||
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
||||
cache_class_name = cache_class.__name__
|
||||
file_dict[cache_class_name].close()
|
||||
with open(self.cachefile, "wb") as cachefile:
|
||||
pickler = pickle.Pickler(cachefile, pickle.HIGHEST_PROTOCOL)
|
||||
pickler.dump(__cache_version__)
|
||||
pickler.dump(bb.__version__)
|
||||
for key, value in self.depends_cache.iteritems():
|
||||
pickler.dump(key)
|
||||
pickler.dump(value)
|
||||
|
||||
del self.depends_cache
|
||||
|
||||
@@ -612,17 +478,15 @@ class Cache(object):
|
||||
def mtime(cachefile):
|
||||
return bb.parse.cached_mtime_noerror(cachefile)
|
||||
|
||||
def add_info(self, filename, info_array, cacheData, parsed=None):
|
||||
if isinstance(info_array[0], CoreRecipeInfo) and (not info_array[0].skipped):
|
||||
cacheData.add_from_recipeinfo(filename, info_array)
|
||||
|
||||
def add_info(self, filename, info, cacheData, parsed=None):
|
||||
cacheData.add_from_recipeinfo(filename, info)
|
||||
if not self.has_cache:
|
||||
return
|
||||
|
||||
if (info_array[0].skipped or 'SRCREVINACTION' not in info_array[0].pv) and not info_array[0].nocache:
|
||||
if 'SRCREVINACTION' not in info.pv and not info.nocache:
|
||||
if parsed:
|
||||
self.cacheclean = False
|
||||
self.depends_cache[filename] = info_array
|
||||
self.depends_cache[filename] = info
|
||||
|
||||
def add(self, file_name, data, cacheData, parsed=None):
|
||||
"""
|
||||
@@ -630,12 +494,8 @@ class Cache(object):
|
||||
"""
|
||||
|
||||
realfn = self.virtualfn2realfn(file_name)[0]
|
||||
|
||||
info_array = []
|
||||
for cache_class in self.caches_array:
|
||||
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
||||
info_array.append(cache_class(realfn, data))
|
||||
self.add_info(file_name, info_array, cacheData, parsed)
|
||||
info = RecipeInfo.from_metadata(realfn, data)
|
||||
self.add_info(file_name, info, cacheData, parsed)
|
||||
|
||||
@staticmethod
|
||||
def load_bbfile(bbfile, appends, config):
|
||||
@@ -699,23 +559,99 @@ class CacheData(object):
|
||||
The data structures we compile from the cached data
|
||||
"""
|
||||
|
||||
def __init__(self, caches_array):
|
||||
self.caches_array = caches_array
|
||||
for cache_class in self.caches_array:
|
||||
if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon):
|
||||
cache_class.init_cacheData(self)
|
||||
|
||||
def __init__(self):
|
||||
# Direct cache variables
|
||||
self.providers = defaultdict(list)
|
||||
self.rproviders = defaultdict(list)
|
||||
self.packages = defaultdict(list)
|
||||
self.packages_dynamic = defaultdict(list)
|
||||
self.possible_world = []
|
||||
self.pkg_pn = defaultdict(list)
|
||||
self.pkg_fn = {}
|
||||
self.pkg_pepvpr = {}
|
||||
self.pkg_dp = {}
|
||||
self.pn_provides = defaultdict(list)
|
||||
self.fn_provides = {}
|
||||
self.all_depends = []
|
||||
self.deps = defaultdict(list)
|
||||
self.rundeps = defaultdict(lambda: defaultdict(list))
|
||||
self.runrecs = defaultdict(lambda: defaultdict(list))
|
||||
self.task_queues = {}
|
||||
self.task_deps = {}
|
||||
self.stamp = {}
|
||||
self.stamp_extrainfo = {}
|
||||
self.preferred = {}
|
||||
self.tasks = {}
|
||||
self.basetaskhash = {}
|
||||
self.hashfn = {}
|
||||
self.inherits = {}
|
||||
self.summary = {}
|
||||
self.license = {}
|
||||
self.section = {}
|
||||
self.fakerootenv = {}
|
||||
self.fakerootdirs = {}
|
||||
|
||||
# Indirect Cache variables (set elsewhere)
|
||||
self.ignored_dependencies = []
|
||||
self.world_target = set()
|
||||
self.bbfile_priority = {}
|
||||
self.bbfile_config_priorities = []
|
||||
|
||||
def add_from_recipeinfo(self, fn, info_array):
|
||||
for info in info_array:
|
||||
info.add_cacheData(self, fn)
|
||||
def add_from_recipeinfo(self, fn, info):
|
||||
self.task_deps[fn] = info.task_deps
|
||||
self.pkg_fn[fn] = info.pn
|
||||
self.pkg_pn[info.pn].append(fn)
|
||||
self.pkg_pepvpr[fn] = (info.pe, info.pv, info.pr)
|
||||
self.pkg_dp[fn] = info.defaultpref
|
||||
self.stamp[fn] = info.stamp
|
||||
self.stamp_extrainfo[fn] = info.stamp_extrainfo
|
||||
|
||||
|
||||
provides = [info.pn]
|
||||
for provide in info.provides:
|
||||
if provide not in provides:
|
||||
provides.append(provide)
|
||||
self.fn_provides[fn] = provides
|
||||
|
||||
for provide in provides:
|
||||
self.providers[provide].append(fn)
|
||||
if provide not in self.pn_provides[info.pn]:
|
||||
self.pn_provides[info.pn].append(provide)
|
||||
|
||||
for dep in info.depends:
|
||||
if dep not in self.deps[fn]:
|
||||
self.deps[fn].append(dep)
|
||||
if dep not in self.all_depends:
|
||||
self.all_depends.append(dep)
|
||||
|
||||
rprovides = info.rprovides
|
||||
for package in info.packages:
|
||||
self.packages[package].append(fn)
|
||||
rprovides += info.rprovides_pkg[package]
|
||||
|
||||
for rprovide in rprovides:
|
||||
self.rproviders[rprovide].append(fn)
|
||||
|
||||
for package in info.packages_dynamic:
|
||||
self.packages_dynamic[package].append(fn)
|
||||
|
||||
# Build hash of runtime depends and rececommends
|
||||
for package in info.packages + [info.pn]:
|
||||
self.rundeps[fn][package] = list(info.rdepends) + info.rdepends_pkg[package]
|
||||
self.runrecs[fn][package] = list(info.rrecommends) + info.rrecommends_pkg[package]
|
||||
|
||||
# Collect files we may need for possible world-dep
|
||||
# calculations
|
||||
if not info.broken and not info.not_world:
|
||||
self.possible_world.append(fn)
|
||||
|
||||
self.hashfn[fn] = info.hashfilename
|
||||
for task, taskhash in info.basetaskhashes.iteritems():
|
||||
identifier = '%s.%s' % (fn, task)
|
||||
self.basetaskhash[identifier] = taskhash
|
||||
|
||||
self.inherits[fn] = info.inherits
|
||||
self.summary[fn] = info.summary
|
||||
self.license[fn] = info.license
|
||||
self.section[fn] = info.section
|
||||
self.fakerootenv[fn] = info.fakerootenv
|
||||
self.fakerootdirs[fn] = info.fakerootdirs
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# Extra RecipeInfo will be all defined in this file. Currently,
|
||||
# Only Hob (Image Creator) Requests some extra fields. So
|
||||
# HobRecipeInfo is defined. It's named HobRecipeInfo because it
|
||||
# is introduced by 'hob'. Users could also introduce other
|
||||
# RecipeInfo or simply use those already defined RecipeInfo.
|
||||
# In the following patch, this newly defined new extra RecipeInfo
|
||||
# will be dynamically loaded and used for loading/saving the extra
|
||||
# cache fields
|
||||
|
||||
# Copyright (C) 2011, Intel Corporation. All rights reserved.
|
||||
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
from bb.cache import RecipeInfoCommon
|
||||
|
||||
class HobRecipeInfo(RecipeInfoCommon):
|
||||
__slots__ = ()
|
||||
|
||||
classname = "HobRecipeInfo"
|
||||
# please override this member with the correct data cache file
|
||||
# such as (bb_cache.dat, bb_extracache_hob.dat)
|
||||
cachefile = "bb_extracache_" + classname +".dat"
|
||||
|
||||
def __init__(self, filename, metadata):
|
||||
|
||||
self.summary = self.getvar('SUMMARY', metadata)
|
||||
self.license = self.getvar('LICENSE', metadata)
|
||||
self.section = self.getvar('SECTION', metadata)
|
||||
|
||||
@classmethod
|
||||
def init_cacheData(cls, cachedata):
|
||||
# CacheData in Hob RecipeInfo Class
|
||||
cachedata.summary = {}
|
||||
cachedata.license = {}
|
||||
cachedata.section = {}
|
||||
|
||||
def add_cacheData(self, cachedata, fn):
|
||||
cachedata.summary[fn] = self.summary
|
||||
cachedata.license[fn] = self.license
|
||||
cachedata.section[fn] = self.section
|
||||
@@ -21,13 +21,13 @@ def check_indent(codestr):
|
||||
"""If the code is indented, add a top level piece of code to 'remove' the indentation"""
|
||||
|
||||
i = 0
|
||||
while codestr[i] in ["\n", "\t", " "]:
|
||||
while codestr[i] in ["\n", " ", " "]:
|
||||
i = i + 1
|
||||
|
||||
if i == 0:
|
||||
return codestr
|
||||
|
||||
if codestr[i-1] == "\t" or codestr[i-1] == " ":
|
||||
if codestr[i-1] is " " or codestr[i-1] is " ":
|
||||
return "if 1:\n" + codestr
|
||||
|
||||
return codestr
|
||||
@@ -70,85 +70,8 @@ def parser_cache_save(d):
|
||||
if not cachefile:
|
||||
return
|
||||
|
||||
glf = bb.utils.lockfile(cachefile + ".lock", shared=True)
|
||||
|
||||
i = os.getpid()
|
||||
lf = None
|
||||
while not lf:
|
||||
shellcache = {}
|
||||
pythoncache = {}
|
||||
|
||||
lf = bb.utils.lockfile(cachefile + ".lock." + str(i), retry=False)
|
||||
if not lf or os.path.exists(cachefile + "-" + str(i)):
|
||||
if lf:
|
||||
bb.utils.unlockfile(lf)
|
||||
lf = None
|
||||
i = i + 1
|
||||
continue
|
||||
|
||||
try:
|
||||
p = pickle.Unpickler(file(cachefile, "rb"))
|
||||
data, version = p.load()
|
||||
except (IOError, EOFError, ValueError):
|
||||
data, version = None, None
|
||||
|
||||
if version != PARSERCACHE_VERSION:
|
||||
shellcache = shellparsecache
|
||||
pythoncache = pythonparsecache
|
||||
else:
|
||||
for h in pythonparsecache:
|
||||
if h not in data[0]:
|
||||
pythoncache[h] = pythonparsecache[h]
|
||||
for h in shellparsecache:
|
||||
if h not in data[1]:
|
||||
shellcache[h] = shellparsecache[h]
|
||||
|
||||
p = pickle.Pickler(file(cachefile + "-" + str(i), "wb"), -1)
|
||||
p.dump([[pythoncache, shellcache], PARSERCACHE_VERSION])
|
||||
|
||||
bb.utils.unlockfile(lf)
|
||||
bb.utils.unlockfile(glf)
|
||||
|
||||
def parser_cache_savemerge(d):
|
||||
cachefile = parser_cachefile(d)
|
||||
if not cachefile:
|
||||
return
|
||||
|
||||
glf = bb.utils.lockfile(cachefile + ".lock")
|
||||
|
||||
try:
|
||||
p = pickle.Unpickler(file(cachefile, "rb"))
|
||||
data, version = p.load()
|
||||
except (IOError, EOFError):
|
||||
data, version = None, None
|
||||
|
||||
if version != PARSERCACHE_VERSION:
|
||||
data = [{}, {}]
|
||||
|
||||
for f in [y for y in os.listdir(os.path.dirname(cachefile)) if y.startswith(os.path.basename(cachefile) + '-')]:
|
||||
f = os.path.join(os.path.dirname(cachefile), f)
|
||||
try:
|
||||
p = pickle.Unpickler(file(f, "rb"))
|
||||
extradata, version = p.load()
|
||||
except (IOError, EOFError):
|
||||
extradata, version = [{}, {}], None
|
||||
|
||||
if version != PARSERCACHE_VERSION:
|
||||
continue
|
||||
|
||||
for h in extradata[0]:
|
||||
if h not in data[0]:
|
||||
data[0][h] = extradata[0][h]
|
||||
for h in extradata[1]:
|
||||
if h not in data[1]:
|
||||
data[1][h] = extradata[1][h]
|
||||
os.unlink(f)
|
||||
|
||||
p = pickle.Pickler(file(cachefile, "wb"), -1)
|
||||
p.dump([data, PARSERCACHE_VERSION])
|
||||
|
||||
bb.utils.unlockfile(glf)
|
||||
|
||||
p.dump([[pythonparsecache, shellparsecache], PARSERCACHE_VERSION])
|
||||
|
||||
class PythonParser():
|
||||
class ValueVisitor():
|
||||
|
||||
@@ -82,7 +82,7 @@ class Command:
|
||||
if command not in CommandsAsync.__dict__:
|
||||
return "No such command"
|
||||
self.currentAsyncCommand = (command, commandline)
|
||||
self.cooker.server_registration_cb(self.cooker.runCommands, self.cooker)
|
||||
self.cooker.server.register_idle_function(self.cooker.runCommands, self.cooker)
|
||||
return True
|
||||
except:
|
||||
import traceback
|
||||
@@ -224,19 +224,11 @@ class CommandsAsync:
|
||||
|
||||
def generateTargetsTree(self, command, params):
|
||||
"""
|
||||
Generate a tree of buildable targets.
|
||||
If klass is provided ensure all recipes that inherit the class are
|
||||
included in the package list.
|
||||
If pkg_list provided use that list (plus any extras brought in by
|
||||
klass) rather than generating a tree for all packages.
|
||||
Generate a tree of all buildable targets.
|
||||
"""
|
||||
klass = params[0]
|
||||
if len(params) > 1:
|
||||
pkg_list = params[1]
|
||||
else:
|
||||
pkg_list = []
|
||||
|
||||
command.cooker.generateTargetsTree(klass, pkg_list)
|
||||
command.cooker.generateTargetsTree(klass)
|
||||
command.finishAsyncCommand()
|
||||
generateTargetsTree.needcache = True
|
||||
|
||||
@@ -251,28 +243,6 @@ class CommandsAsync:
|
||||
command.finishAsyncCommand()
|
||||
findConfigFiles.needcache = True
|
||||
|
||||
def findFilesMatchingInDir(self, command, params):
|
||||
"""
|
||||
Find implementation files matching the specified pattern
|
||||
in the requested subdirectory of a BBPATH
|
||||
"""
|
||||
pattern = params[0]
|
||||
directory = params[1]
|
||||
|
||||
command.cooker.findFilesMatchingInDir(pattern, directory)
|
||||
command.finishAsyncCommand()
|
||||
findFilesMatchingInDir.needcache = True
|
||||
|
||||
def findConfigFilePath(self, command, params):
|
||||
"""
|
||||
Find the path of the requested configuration file
|
||||
"""
|
||||
configfile = params[0]
|
||||
|
||||
command.cooker.findConfigFilePath(configfile)
|
||||
command.finishAsyncCommand()
|
||||
findConfigFilePath.needcache = False
|
||||
|
||||
def showVersions(self, command, params):
|
||||
"""
|
||||
Show the currently selected versions
|
||||
@@ -311,14 +281,6 @@ class CommandsAsync:
|
||||
command.finishAsyncCommand()
|
||||
parseFiles.needcache = True
|
||||
|
||||
def reparseFiles(self, command, params):
|
||||
"""
|
||||
Reparse .bb files
|
||||
"""
|
||||
command.cooker.reparseFiles()
|
||||
command.finishAsyncCommand()
|
||||
reparseFiles.needcache = True
|
||||
|
||||
def compareRevisions(self, command, params):
|
||||
"""
|
||||
Parse the .bb files
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
"""Code pulled from future python versions, here for compatibility"""
|
||||
|
||||
def total_ordering(cls):
|
||||
"""Class decorator that fills in missing ordering methods"""
|
||||
convert = {
|
||||
'__lt__': [('__gt__', lambda self, other: other < self),
|
||||
('__le__', lambda self, other: not other < self),
|
||||
('__ge__', lambda self, other: not self < other)],
|
||||
'__le__': [('__ge__', lambda self, other: other <= self),
|
||||
('__lt__', lambda self, other: not other <= self),
|
||||
('__gt__', lambda self, other: not self <= other)],
|
||||
'__gt__': [('__lt__', lambda self, other: other > self),
|
||||
('__ge__', lambda self, other: not other > self),
|
||||
('__le__', lambda self, other: not self > other)],
|
||||
'__ge__': [('__le__', lambda self, other: other >= self),
|
||||
('__gt__', lambda self, other: not other >= self),
|
||||
('__lt__', lambda self, other: not self >= other)]
|
||||
}
|
||||
roots = set(dir(cls)) & set(convert)
|
||||
if not roots:
|
||||
raise ValueError('must define at least one ordering operation: < > <= >=')
|
||||
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
|
||||
for opname, opfunc in convert[root]:
|
||||
if opname not in roots:
|
||||
opfunc.__name__ = opname
|
||||
opfunc.__doc__ = getattr(int, opname).__doc__
|
||||
setattr(cls, opname, opfunc)
|
||||
return cls
|
||||
File diff suppressed because it is too large
Load Diff
@@ -159,12 +159,12 @@ def expandKeys(alterdata, readdata = None):
|
||||
ekey = todolist[key]
|
||||
renameVar(key, ekey, alterdata)
|
||||
|
||||
def inheritFromOS(d, savedenv):
|
||||
"""Inherit variables from the initial environment."""
|
||||
def inheritFromOS(d):
|
||||
"""Inherit variables from the environment."""
|
||||
exportlist = bb.utils.preserved_envvars_exported()
|
||||
for s in savedenv.keys():
|
||||
for s in os.environ.keys():
|
||||
try:
|
||||
setVar(s, getVar(s, savedenv, True), d)
|
||||
setVar(s, os.environ[s], d)
|
||||
if s in exportlist:
|
||||
setVarFlag(s, "export", True, d)
|
||||
except TypeError:
|
||||
@@ -187,7 +187,7 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False):
|
||||
val = getVar(var, d, 1)
|
||||
except (KeyboardInterrupt, bb.build.FuncFailed):
|
||||
raise
|
||||
except Exception as exc:
|
||||
except Exception, exc:
|
||||
o.write('# expansion of %s threw %s: %s\n' % (var, exc.__class__.__name__, str(exc)))
|
||||
return 0
|
||||
|
||||
@@ -234,20 +234,25 @@ def emit_env(o=sys.__stdout__, d = init(), all=False):
|
||||
for key in keys:
|
||||
emit_var(key, o, d, all and not isfunc) and o.write('\n')
|
||||
|
||||
def exported_keys(d):
|
||||
return (key for key in d.keys() if not key.startswith('__') and
|
||||
d.getVarFlag(key, 'export') and
|
||||
not d.getVarFlag(key, 'unexport'))
|
||||
|
||||
def exported_vars(d):
|
||||
for key in exported_keys(d):
|
||||
def export_vars(d):
|
||||
keys = (key for key in d.keys() if d.getVarFlag(key, "export"))
|
||||
ret = {}
|
||||
for k in keys:
|
||||
try:
|
||||
value = d.getVar(key, True)
|
||||
except Exception:
|
||||
v = d.getVar(k, True)
|
||||
if v:
|
||||
ret[k] = v
|
||||
except (KeyboardInterrupt, bb.build.FuncFailed):
|
||||
raise
|
||||
except Exception, exc:
|
||||
pass
|
||||
return ret
|
||||
|
||||
if value is not None:
|
||||
yield key, str(value)
|
||||
def export_envvars(v, d):
|
||||
for s in os.environ.keys():
|
||||
if s not in v:
|
||||
v[s] = os.environ[s]
|
||||
return v
|
||||
|
||||
def emit_func(func, o=sys.__stdout__, d = init()):
|
||||
"""Emits all items in the data store in a format such that it can be sourced by a shell."""
|
||||
|
||||
@@ -172,12 +172,11 @@ class DataSmart(MutableMapping):
|
||||
if o not in self._seen_overrides:
|
||||
continue
|
||||
|
||||
vars = self._seen_overrides[o].copy()
|
||||
vars = self._seen_overrides[o]
|
||||
for var in vars:
|
||||
name = var[:-l]
|
||||
try:
|
||||
self.setVar(name, self.getVar(var, False))
|
||||
self.delVar(var)
|
||||
except Exception:
|
||||
logger.info("Untracked delVar")
|
||||
|
||||
@@ -192,11 +191,11 @@ class DataSmart(MutableMapping):
|
||||
keep.append((a ,o))
|
||||
continue
|
||||
|
||||
if op == "_append":
|
||||
if op is "_append":
|
||||
sval = self.getVar(append, False) or ""
|
||||
sval += a
|
||||
self.setVar(append, sval)
|
||||
elif op == "_prepend":
|
||||
elif op is "_prepend":
|
||||
sval = a + (self.getVar(append, False) or "")
|
||||
self.setVar(append, sval)
|
||||
|
||||
@@ -259,16 +258,19 @@ class DataSmart(MutableMapping):
|
||||
# more cookies for the cookie monster
|
||||
if '_' in var:
|
||||
override = var[var.rfind('_')+1:]
|
||||
if len(override) > 0:
|
||||
if override not in self._seen_overrides:
|
||||
self._seen_overrides[override] = set()
|
||||
self._seen_overrides[override].add( var )
|
||||
if override not in self._seen_overrides:
|
||||
self._seen_overrides[override] = set()
|
||||
self._seen_overrides[override].add( var )
|
||||
|
||||
# setting var
|
||||
self.dict[var]["content"] = value
|
||||
|
||||
def getVar(self, var, expand=False, noweakdefault=False):
|
||||
return self.getVarFlag(var, "content", expand, noweakdefault)
|
||||
def getVar(self, var, exp):
|
||||
value = self.getVarFlag(var, "content")
|
||||
|
||||
if exp and value:
|
||||
return self.expand(value, var)
|
||||
return value
|
||||
|
||||
def renameVar(self, key, newkey):
|
||||
"""
|
||||
@@ -296,23 +298,19 @@ class DataSmart(MutableMapping):
|
||||
def delVar(self, var):
|
||||
self.expand_cache = {}
|
||||
self.dict[var] = {}
|
||||
if '_' in var:
|
||||
override = var[var.rfind('_')+1:]
|
||||
if override and override in self._seen_overrides and var in self._seen_overrides[override]:
|
||||
self._seen_overrides[override].remove(var)
|
||||
|
||||
def setVarFlag(self, var, flag, flagvalue):
|
||||
if not var in self.dict:
|
||||
self._makeShadowCopy(var)
|
||||
self.dict[var][flag] = flagvalue
|
||||
|
||||
def getVarFlag(self, var, flag, expand=False, noweakdefault=False):
|
||||
def getVarFlag(self, var, flag, expand=False):
|
||||
local_var = self._findVar(var)
|
||||
value = None
|
||||
if local_var:
|
||||
if flag in local_var:
|
||||
value = copy.copy(local_var[flag])
|
||||
elif flag == "content" and "defaultval" in local_var and not noweakdefault:
|
||||
elif flag == "content" and "defaultval" in local_var:
|
||||
value = copy.copy(local_var["defaultval"])
|
||||
if expand and value:
|
||||
value = self.expand(value, None)
|
||||
@@ -400,22 +398,18 @@ class DataSmart(MutableMapping):
|
||||
yield key
|
||||
|
||||
def __iter__(self):
|
||||
def keylist(d):
|
||||
klist = set()
|
||||
for key in d:
|
||||
if key == "_data":
|
||||
continue
|
||||
if not d[key]:
|
||||
continue
|
||||
klist.add(key)
|
||||
|
||||
seen = set()
|
||||
def _keys(d):
|
||||
if "_data" in d:
|
||||
klist |= keylist(d["_data"])
|
||||
for key in _keys(d["_data"]):
|
||||
yield key
|
||||
|
||||
return klist
|
||||
|
||||
for k in keylist(self.dict):
|
||||
yield k
|
||||
for key in d:
|
||||
if key != "_data":
|
||||
if not key in seen:
|
||||
seen.add(key)
|
||||
yield key
|
||||
return _keys(self.dict)
|
||||
|
||||
def __len__(self):
|
||||
return len(frozenset(self))
|
||||
|
||||
@@ -30,7 +30,6 @@ except ImportError:
|
||||
import pickle
|
||||
import logging
|
||||
import atexit
|
||||
import traceback
|
||||
import bb.utils
|
||||
|
||||
# This is the pid for which we should generate the event. This is set when
|
||||
@@ -38,8 +37,6 @@ import bb.utils
|
||||
worker_pid = 0
|
||||
worker_pipe = None
|
||||
|
||||
logger = logging.getLogger('BitBake.Event')
|
||||
|
||||
class Event(object):
|
||||
"""Base class for events"""
|
||||
|
||||
@@ -61,35 +58,23 @@ _ui_handler_seq = 0
|
||||
bb.utils._context["NotHandled"] = NotHandled
|
||||
bb.utils._context["Handled"] = Handled
|
||||
|
||||
def execute_handler(name, handler, event, d):
|
||||
event.data = d
|
||||
try:
|
||||
ret = handler(event)
|
||||
except Exception:
|
||||
etype, value, tb = sys.exc_info()
|
||||
logger.error("Execution of event handler '%s' failed" % name,
|
||||
exc_info=(etype, value, tb.tb_next))
|
||||
raise
|
||||
except SystemExit as exc:
|
||||
if exc.code != 0:
|
||||
logger.error("Execution of event handler '%s' failed" % name)
|
||||
raise
|
||||
finally:
|
||||
del event.data
|
||||
|
||||
if ret is not None:
|
||||
warnings.warn("Using Handled/NotHandled in event handlers is deprecated",
|
||||
DeprecationWarning, stacklevel = 2)
|
||||
|
||||
def fire_class_handlers(event, d):
|
||||
if isinstance(event, logging.LogRecord):
|
||||
return
|
||||
|
||||
for name, handler in _handlers.iteritems():
|
||||
try:
|
||||
execute_handler(name, handler, event, d)
|
||||
except Exception:
|
||||
continue
|
||||
for handler in _handlers:
|
||||
h = _handlers[handler]
|
||||
event.data = d
|
||||
if type(h).__name__ == "code":
|
||||
locals = {"e": event}
|
||||
bb.utils.simple_exec(h, locals)
|
||||
ret = bb.utils.better_eval("tmpHandler(e)", locals)
|
||||
if ret is not None:
|
||||
warnings.warn("Using Handled/NotHandled in event handlers is deprecated",
|
||||
DeprecationWarning, stacklevel = 2)
|
||||
else:
|
||||
h(event)
|
||||
del event.data
|
||||
|
||||
ui_queue = []
|
||||
@atexit.register
|
||||
@@ -102,7 +87,8 @@ def print_ui_queue():
|
||||
console = logging.StreamHandler(sys.stdout)
|
||||
console.setFormatter(BBLogFormatter("%(levelname)s: %(message)s"))
|
||||
logger.handlers = [console]
|
||||
for event in ui_queue:
|
||||
while ui_queue:
|
||||
event = ui_queue.pop()
|
||||
if isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
|
||||
@@ -119,10 +105,7 @@ def fire_ui_handlers(event, d):
|
||||
# We use pickle here since it better handles object instances
|
||||
# which xmlrpc's marshaller does not. Events *must* be serializable
|
||||
# by pickle.
|
||||
if hasattr(_ui_handlers[h].event, "sendpickle"):
|
||||
_ui_handlers[h].event.sendpickle((pickle.dumps(event)))
|
||||
else:
|
||||
_ui_handlers[h].event.send(event)
|
||||
_ui_handlers[h].event.send((pickle.dumps(event)))
|
||||
except:
|
||||
errors.append(h)
|
||||
for h in errors:
|
||||
@@ -153,7 +136,6 @@ def fire_from_worker(event, d):
|
||||
event = pickle.loads(event[7:-8])
|
||||
fire_ui_handlers(event, d)
|
||||
|
||||
noop = lambda _: None
|
||||
def register(name, handler):
|
||||
"""Register an Event handler"""
|
||||
|
||||
@@ -164,18 +146,9 @@ def register(name, handler):
|
||||
if handler is not None:
|
||||
# handle string containing python code
|
||||
if isinstance(handler, basestring):
|
||||
tmp = "def %s(e):\n%s" % (name, handler)
|
||||
try:
|
||||
code = compile(tmp, "%s(e)" % name, "exec")
|
||||
except SyntaxError:
|
||||
logger.error("Unable to register event handler '%s':\n%s", name,
|
||||
''.join(traceback.format_exc(limit=0)))
|
||||
_handlers[name] = noop
|
||||
return
|
||||
env = {}
|
||||
bb.utils.simple_exec(code, env)
|
||||
func = bb.utils.better_eval(name, env)
|
||||
_handlers[name] = func
|
||||
tmp = "def tmpHandler(e):\n%s" % handler
|
||||
comp = bb.utils.better_compile(tmp, "tmpHandler(e)", "bb.event._registerCode")
|
||||
_handlers[name] = comp
|
||||
else:
|
||||
_handlers[name] = handler
|
||||
|
||||
@@ -205,17 +178,13 @@ def getName(e):
|
||||
class ConfigParsed(Event):
|
||||
"""Configuration Parsing Complete"""
|
||||
|
||||
class RecipeEvent(Event):
|
||||
class RecipeParsed(Event):
|
||||
""" Recipe Parsing Complete """
|
||||
|
||||
def __init__(self, fn):
|
||||
self.fn = fn
|
||||
Event.__init__(self)
|
||||
|
||||
class RecipePreFinalise(RecipeEvent):
|
||||
""" Recipe Parsing Complete but not yet finialised"""
|
||||
|
||||
class RecipeParsed(RecipeEvent):
|
||||
""" Recipe Parsing Complete """
|
||||
|
||||
class StampUpdate(Event):
|
||||
"""Trigger for any adjustment of the stamp files to happen"""
|
||||
|
||||
@@ -287,12 +256,11 @@ class BuildCompleted(BuildBase):
|
||||
class NoProvider(Event):
|
||||
"""No Provider for an Event"""
|
||||
|
||||
def __init__(self, item, runtime=False, dependees=None, reasons=[]):
|
||||
def __init__(self, item, runtime=False, dependees=None):
|
||||
Event.__init__(self)
|
||||
self._item = item
|
||||
self._runtime = runtime
|
||||
self._dependees = dependees
|
||||
self._reasons = reasons
|
||||
|
||||
def getItem(self):
|
||||
return self._item
|
||||
@@ -390,16 +358,6 @@ class TargetsTreeGenerated(Event):
|
||||
Event.__init__(self)
|
||||
self._model = model
|
||||
|
||||
class FilesMatchingFound(Event):
|
||||
"""
|
||||
Event when a list of files matching the supplied pattern has
|
||||
been generated
|
||||
"""
|
||||
def __init__(self, pattern, matches):
|
||||
Event.__init__(self)
|
||||
self._pattern = pattern
|
||||
self._matches = matches
|
||||
|
||||
class ConfigFilesFound(Event):
|
||||
"""
|
||||
Event when a list of appropriate config files has been generated
|
||||
@@ -409,14 +367,6 @@ class ConfigFilesFound(Event):
|
||||
self._variable = variable
|
||||
self._values = values
|
||||
|
||||
class ConfigFilePathFound(Event):
|
||||
"""
|
||||
Event when a path for a config file has been found
|
||||
"""
|
||||
def __init__(self, path):
|
||||
Event.__init__(self)
|
||||
self._path = path
|
||||
|
||||
class MsgBase(Event):
|
||||
"""Base class for messages"""
|
||||
|
||||
@@ -446,12 +396,6 @@ class LogHandler(logging.Handler):
|
||||
"""Dispatch logging messages as bitbake events"""
|
||||
|
||||
def emit(self, record):
|
||||
if record.exc_info:
|
||||
etype, value, tb = record.exc_info
|
||||
if hasattr(tb, 'tb_next'):
|
||||
tb = list(bb.exceptions.extract_traceback(tb, context=3))
|
||||
record.bb_exc_info = (etype, value, tb)
|
||||
record.exc_info = None
|
||||
fire(record, None)
|
||||
|
||||
def filter(self, record):
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
from __future__ import absolute_import
|
||||
import inspect
|
||||
import traceback
|
||||
import bb.namedtuple_with_abc
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
class TracebackEntry(namedtuple.abc):
|
||||
"""Pickleable representation of a traceback entry"""
|
||||
_fields = 'filename lineno function args code_context index'
|
||||
_header = ' File "{0.filename}", line {0.lineno}, in {0.function}{0.args}'
|
||||
|
||||
def format(self, formatter=None):
|
||||
if not self.code_context:
|
||||
return self._header.format(self) + '\n'
|
||||
|
||||
formatted = [self._header.format(self) + ':\n']
|
||||
|
||||
for lineindex, line in enumerate(self.code_context):
|
||||
if formatter:
|
||||
line = formatter(line)
|
||||
|
||||
if lineindex == self.index:
|
||||
formatted.append(' >%s' % line)
|
||||
else:
|
||||
formatted.append(' %s' % line)
|
||||
return formatted
|
||||
|
||||
def __str__(self):
|
||||
return ''.join(self.format())
|
||||
|
||||
def _get_frame_args(frame):
|
||||
"""Get the formatted arguments and class (if available) for a frame"""
|
||||
arginfo = inspect.getargvalues(frame)
|
||||
if not arginfo.args:
|
||||
return '', None
|
||||
|
||||
firstarg = arginfo.args[0]
|
||||
if firstarg == 'self':
|
||||
self = arginfo.locals['self']
|
||||
cls = self.__class__.__name__
|
||||
|
||||
arginfo.args.pop(0)
|
||||
del arginfo.locals['self']
|
||||
else:
|
||||
cls = None
|
||||
|
||||
formatted = inspect.formatargvalues(*arginfo)
|
||||
return formatted, cls
|
||||
|
||||
def extract_traceback(tb, context=1):
|
||||
frames = inspect.getinnerframes(tb, context)
|
||||
for frame, filename, lineno, function, code_context, index in frames:
|
||||
formatted_args, cls = _get_frame_args(frame)
|
||||
if cls:
|
||||
function = '%s.%s' % (cls, function)
|
||||
yield TracebackEntry(filename, lineno, function, formatted_args,
|
||||
code_context, index)
|
||||
|
||||
def format_extracted(extracted, formatter=None, limit=None):
|
||||
if limit:
|
||||
extracted = extracted[-limit:]
|
||||
|
||||
formatted = []
|
||||
for tracebackinfo in extracted:
|
||||
formatted.extend(tracebackinfo.format(formatter))
|
||||
return formatted
|
||||
|
||||
|
||||
def format_exception(etype, value, tb, context=1, limit=None, formatter=None):
|
||||
formatted = ['Traceback (most recent call last):\n']
|
||||
|
||||
if hasattr(tb, 'tb_next'):
|
||||
tb = extract_traceback(tb, context)
|
||||
|
||||
formatted.extend(format_extracted(tb, formatter, limit))
|
||||
formatted.extend(traceback.format_exception_only(etype, value))
|
||||
return formatted
|
||||
|
||||
def to_string(exc):
|
||||
if isinstance(exc, SystemExit):
|
||||
if not isinstance(exc.code, basestring):
|
||||
return 'Exited with "%d"' % exc.code
|
||||
return str(exc)
|
||||
@@ -153,18 +153,18 @@ def fetcher_init(d):
|
||||
Called to initialize the fetchers once the configuration data is known.
|
||||
Calls before this must not hit the cache.
|
||||
"""
|
||||
pd = persist_data.persist(d)
|
||||
# When to drop SCM head revisions controlled by user policy
|
||||
srcrev_policy = bb.data.getVar('BB_SRCREV_POLICY', d, 1) or "clear"
|
||||
if srcrev_policy == "cache":
|
||||
logger.debug(1, "Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
elif srcrev_policy == "clear":
|
||||
logger.debug(1, "Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
revs = persist_data.persist('BB_URI_HEADREVS', d)
|
||||
try:
|
||||
bb.fetch.saved_headrevs = revs.items()
|
||||
bb.fetch.saved_headrevs = pd['BB_URI_HEADREVS'].items()
|
||||
except:
|
||||
pass
|
||||
revs.clear()
|
||||
del pd['BB_URI_HEADREVS']
|
||||
else:
|
||||
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
|
||||
|
||||
@@ -178,7 +178,8 @@ def fetcher_compare_revisions(d):
|
||||
return true/false on whether they've changed.
|
||||
"""
|
||||
|
||||
data = persist_data.persist('BB_URI_HEADREVS', d).items()
|
||||
pd = persist_data.persist(d)
|
||||
data = pd['BB_URI_HEADREVS'].items()
|
||||
data2 = bb.fetch.saved_headrevs
|
||||
|
||||
changed = False
|
||||
@@ -755,13 +756,15 @@ class Fetch(object):
|
||||
if not hasattr(self, "_latest_revision"):
|
||||
raise ParameterError
|
||||
|
||||
revs = persist_data.persist('BB_URI_HEADREVS', d)
|
||||
pd = persist_data.persist(d)
|
||||
revs = pd['BB_URI_HEADREVS']
|
||||
key = self.generate_revision_key(url, ud, d)
|
||||
try:
|
||||
return revs[key]
|
||||
except KeyError:
|
||||
revs[key] = rev = self._latest_revision(url, ud, d)
|
||||
return rev
|
||||
rev = revs[key]
|
||||
if rev != None:
|
||||
return str(rev)
|
||||
|
||||
revs[key] = rev = self._latest_revision(url, ud, d)
|
||||
return rev
|
||||
|
||||
def sortable_revision(self, url, ud, d):
|
||||
"""
|
||||
@@ -770,17 +773,18 @@ class Fetch(object):
|
||||
if hasattr(self, "_sortable_revision"):
|
||||
return self._sortable_revision(url, ud, d)
|
||||
|
||||
localcounts = persist_data.persist('BB_URI_LOCALCOUNT', d)
|
||||
pd = persist_data.persist(d)
|
||||
localcounts = pd['BB_URI_LOCALCOUNT']
|
||||
key = self.generate_revision_key(url, ud, d)
|
||||
|
||||
latest_rev = self._build_revision(url, ud, d)
|
||||
last_rev = localcounts.get(key + '_rev')
|
||||
last_rev = localcounts[key + '_rev']
|
||||
uselocalcount = bb.data.getVar("BB_LOCALCOUNT_OVERRIDE", d, True) or False
|
||||
count = None
|
||||
if uselocalcount:
|
||||
count = Fetch.localcount_internal_helper(ud, d)
|
||||
if count is None:
|
||||
count = localcounts.get(key + '_count')
|
||||
count = localcounts[key + '_count']
|
||||
|
||||
if last_rev == latest_rev:
|
||||
return str(count + "+" + latest_rev)
|
||||
|
||||
@@ -67,15 +67,15 @@ class Bzr(Fetch):
|
||||
|
||||
options = []
|
||||
|
||||
if command == "revno":
|
||||
if command is "revno":
|
||||
bzrcmd = "%s revno %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot)
|
||||
else:
|
||||
if ud.revision:
|
||||
options.append("-r %s" % ud.revision)
|
||||
|
||||
if command == "fetch":
|
||||
if command is "fetch":
|
||||
bzrcmd = "%s co %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot)
|
||||
elif command == "update":
|
||||
elif command is "update":
|
||||
bzrcmd = "%s pull %s --overwrite" % (basecmd, " ".join(options))
|
||||
else:
|
||||
raise FetchError("Invalid bzr command %s" % command)
|
||||
|
||||
@@ -242,36 +242,36 @@ class Git(Fetch):
|
||||
"""
|
||||
Look in the cache for the latest revision, if not present ask the SCM.
|
||||
"""
|
||||
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
|
||||
persisted = bb.persist_data.persist(d)
|
||||
revs = persisted['BB_URI_HEADREVS']
|
||||
|
||||
key = self.generate_revision_key(url, ud, d, branch=True)
|
||||
|
||||
try:
|
||||
return revs[key]
|
||||
except KeyError:
|
||||
rev = revs[key]
|
||||
if rev is None:
|
||||
# Compatibility with old key format, no branch included
|
||||
oldkey = self.generate_revision_key(url, ud, d, branch=False)
|
||||
try:
|
||||
rev = revs[oldkey]
|
||||
except KeyError:
|
||||
rev = self._latest_revision(url, ud, d)
|
||||
else:
|
||||
rev = revs[oldkey]
|
||||
if rev is not None:
|
||||
del revs[oldkey]
|
||||
else:
|
||||
rev = self._latest_revision(url, ud, d)
|
||||
revs[key] = rev
|
||||
return rev
|
||||
|
||||
return str(rev)
|
||||
|
||||
def sortable_revision(self, url, ud, d):
|
||||
"""
|
||||
|
||||
"""
|
||||
localcounts = bb.persist_data.persist('BB_URI_LOCALCOUNT', d)
|
||||
pd = bb.persist_data.persist(d)
|
||||
localcounts = pd['BB_URI_LOCALCOUNT']
|
||||
key = self.generate_revision_key(url, ud, d, branch=True)
|
||||
oldkey = self.generate_revision_key(url, ud, d, branch=False)
|
||||
|
||||
latest_rev = self._build_revision(url, ud, d)
|
||||
last_rev = localcounts.get(key + '_rev')
|
||||
last_rev = localcounts[key + '_rev']
|
||||
if last_rev is None:
|
||||
last_rev = localcounts.get(oldkey + '_rev')
|
||||
last_rev = localcounts[oldkey + '_rev']
|
||||
if last_rev is not None:
|
||||
del localcounts[oldkey + '_rev']
|
||||
localcounts[key + '_rev'] = last_rev
|
||||
@@ -281,9 +281,9 @@ class Git(Fetch):
|
||||
if uselocalcount:
|
||||
count = Fetch.localcount_internal_helper(ud, d)
|
||||
if count is None:
|
||||
count = localcounts.get(key + '_count')
|
||||
count = localcounts[key + '_count']
|
||||
if count is None:
|
||||
count = localcounts.get(oldkey + '_count')
|
||||
count = localcounts[oldkey + '_count']
|
||||
if count is not None:
|
||||
del localcounts[oldkey + '_count']
|
||||
localcounts[key + '_count'] = count
|
||||
|
||||
@@ -28,8 +28,10 @@ from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
import os, re
|
||||
import logging
|
||||
import bb.data, bb.persist_data, bb.utils
|
||||
from bb import data
|
||||
import bb
|
||||
from bb import data
|
||||
from bb import persist_data
|
||||
from bb import utils
|
||||
|
||||
__version__ = "2"
|
||||
|
||||
@@ -203,10 +205,7 @@ def uri_replace(ud, uri_find, uri_replace, d):
|
||||
result_decoded[loc] = uri_decoded[loc]
|
||||
if isinstance(i, basestring):
|
||||
if (re.match(i, uri_decoded[loc])):
|
||||
if not uri_replace_decoded[loc]:
|
||||
result_decoded[loc] = ""
|
||||
else:
|
||||
result_decoded[loc] = re.sub(i, uri_replace_decoded[loc], uri_decoded[loc])
|
||||
result_decoded[loc] = re.sub(i, uri_replace_decoded[loc], uri_decoded[loc])
|
||||
if uri_find_decoded.index(i) == 2:
|
||||
if ud.mirrortarball:
|
||||
result_decoded[loc] = os.path.join(os.path.dirname(result_decoded[loc]), os.path.basename(ud.mirrortarball))
|
||||
@@ -225,18 +224,18 @@ def fetcher_init(d):
|
||||
Called to initialize the fetchers once the configuration data is known.
|
||||
Calls before this must not hit the cache.
|
||||
"""
|
||||
pd = persist_data.persist(d)
|
||||
# When to drop SCM head revisions controlled by user policy
|
||||
srcrev_policy = bb.data.getVar('BB_SRCREV_POLICY', d, True) or "clear"
|
||||
if srcrev_policy == "cache":
|
||||
logger.debug(1, "Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
elif srcrev_policy == "clear":
|
||||
logger.debug(1, "Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
|
||||
try:
|
||||
bb.fetch2.saved_headrevs = revs.items()
|
||||
bb.fetch2.saved_headrevs = pd['BB_URI_HEADREVS'].items()
|
||||
except:
|
||||
pass
|
||||
revs.clear()
|
||||
del pd['BB_URI_HEADREVS']
|
||||
else:
|
||||
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
|
||||
|
||||
@@ -250,7 +249,8 @@ def fetcher_compare_revisions(d):
|
||||
return true/false on whether they've changed.
|
||||
"""
|
||||
|
||||
data = bb.persist_data.persist('BB_URI_HEADREVS', d).items()
|
||||
pd = persist_data.persist(d)
|
||||
data = pd['BB_URI_HEADREVS'].items()
|
||||
data2 = bb.fetch2.saved_headrevs
|
||||
|
||||
changed = False
|
||||
@@ -300,22 +300,6 @@ def verify_checksum(u, ud, d):
|
||||
if ud.sha256_expected != sha256data:
|
||||
raise SHA256SumError(ud.localpath, ud.sha256_expected, sha256data, u)
|
||||
|
||||
def update_stamp(u, ud, d):
|
||||
"""
|
||||
donestamp is file stamp indicating the whole fetching is done
|
||||
this function update the stamp after verifying the checksum
|
||||
"""
|
||||
if os.path.exists(ud.donestamp):
|
||||
# Touch the done stamp file to show active use of the download
|
||||
try:
|
||||
os.utime(ud.donestamp, None)
|
||||
except:
|
||||
# Errors aren't fatal here
|
||||
pass
|
||||
else:
|
||||
verify_checksum(u, ud, d)
|
||||
open(ud.donestamp, 'w').close()
|
||||
|
||||
def subprocess_setup():
|
||||
import signal
|
||||
# Python installs a SIGPIPE handler by default. This is usually not what
|
||||
@@ -368,7 +352,7 @@ def get_srcrev(d):
|
||||
|
||||
def localpath(url, d):
|
||||
fetcher = bb.fetch2.Fetch([url], d)
|
||||
return fetcher.localpath(url)
|
||||
return fetcher.localpath(url)
|
||||
|
||||
def runfetchcmd(cmd, d, quiet = False, cleanup = []):
|
||||
"""
|
||||
@@ -388,7 +372,7 @@ def runfetchcmd(cmd, d, quiet = False, cleanup = []):
|
||||
'SSH_AUTH_SOCK', 'SSH_AGENT_PID', 'HOME']
|
||||
|
||||
for var in exportvars:
|
||||
val = bb.data.getVar(var, d, True)
|
||||
val = data.getVar(var, d, True)
|
||||
if val:
|
||||
cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd)
|
||||
|
||||
@@ -514,15 +498,15 @@ def srcrev_internal_helper(ud, d, name):
|
||||
return ud.parm['tag']
|
||||
|
||||
rev = None
|
||||
pn = bb.data.getVar("PN", d, True)
|
||||
pn = data.getVar("PN", d, True)
|
||||
if name != '':
|
||||
rev = bb.data.getVar("SRCREV_%s_pn-%s" % (name, pn), d, True)
|
||||
rev = data.getVar("SRCREV_%s_pn-%s" % (name, pn), d, True)
|
||||
if not rev:
|
||||
rev = bb.data.getVar("SRCREV_%s" % name, d, True)
|
||||
rev = data.getVar("SRCREV_%s" % name, d, True)
|
||||
if not rev:
|
||||
rev = bb.data.getVar("SRCREV_pn-%s" % pn, d, True)
|
||||
rev = data.getVar("SRCREV_pn-%s" % pn, d, True)
|
||||
if not rev:
|
||||
rev = bb.data.getVar("SRCREV", d, True)
|
||||
rev = data.getVar("SRCREV", d, True)
|
||||
if rev == "INVALID":
|
||||
raise FetchError("Please set SRCREV to a valid value", ud.url)
|
||||
if rev == "AUTOINC":
|
||||
@@ -608,12 +592,12 @@ class FetchData(object):
|
||||
if "srcdate" in self.parm:
|
||||
return self.parm['srcdate']
|
||||
|
||||
pn = bb.data.getVar("PN", d, True)
|
||||
pn = data.getVar("PN", d, True)
|
||||
|
||||
if pn:
|
||||
return bb.data.getVar("SRCDATE_%s" % pn, d, True) or bb.data.getVar("SRCDATE", d, True) or bb.data.getVar("DATE", d, True)
|
||||
return data.getVar("SRCDATE_%s" % pn, d, True) or data.getVar("SRCDATE", d, True) or data.getVar("DATE", d, True)
|
||||
|
||||
return bb.data.getVar("SRCDATE", d, True) or bb.data.getVar("DATE", d, True)
|
||||
return data.getVar("SRCDATE", d, True) or data.getVar("DATE", d, True)
|
||||
|
||||
class FetchMethod(object):
|
||||
"""Base class for 'fetch'ing data"""
|
||||
@@ -679,7 +663,7 @@ class FetchMethod(object):
|
||||
|
||||
try:
|
||||
unpack = bb.utils.to_boolean(urldata.parm.get('unpack'), True)
|
||||
except ValueError as exc:
|
||||
except ValueError, exc:
|
||||
bb.fatal("Invalid value for 'unpack' parameter for %s: %s" %
|
||||
(file, urldata.parm.get('unpack')))
|
||||
|
||||
@@ -708,7 +692,7 @@ class FetchMethod(object):
|
||||
elif file.endswith('.zip') or file.endswith('.jar'):
|
||||
try:
|
||||
dos = bb.utils.to_boolean(urldata.parm.get('dos'), False)
|
||||
except ValueError as exc:
|
||||
except ValueError, exc:
|
||||
bb.fatal("Invalid value for 'dos' parameter for %s: %s" %
|
||||
(file, urldata.parm.get('dos')))
|
||||
cmd = 'unzip -q -o'
|
||||
@@ -806,10 +790,10 @@ class FetchMethod(object):
|
||||
|
||||
localcount = None
|
||||
if name != '':
|
||||
pn = bb.data.getVar("PN", d, True)
|
||||
localcount = bb.data.getVar("LOCALCOUNT_" + name, d, True)
|
||||
pn = data.getVar("PN", d, True)
|
||||
localcount = data.getVar("LOCALCOUNT_" + name, d, True)
|
||||
if not localcount:
|
||||
localcount = bb.data.getVar("LOCALCOUNT", d, True)
|
||||
localcount = data.getVar("LOCALCOUNT", d, True)
|
||||
return localcount
|
||||
|
||||
localcount_internal_helper = staticmethod(localcount_internal_helper)
|
||||
@@ -821,13 +805,15 @@ class FetchMethod(object):
|
||||
if not hasattr(self, "_latest_revision"):
|
||||
raise ParameterError("The fetcher for this URL does not support _latest_revision", url)
|
||||
|
||||
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
|
||||
pd = persist_data.persist(d)
|
||||
revs = pd['BB_URI_HEADREVS']
|
||||
key = self.generate_revision_key(url, ud, d, name)
|
||||
try:
|
||||
return revs[key]
|
||||
except KeyError:
|
||||
revs[key] = rev = self._latest_revision(url, ud, d, name)
|
||||
return rev
|
||||
rev = revs[key]
|
||||
if rev != None:
|
||||
return str(rev)
|
||||
|
||||
revs[key] = rev = self._latest_revision(url, ud, d, name)
|
||||
return rev
|
||||
|
||||
def sortable_revision(self, url, ud, d, name):
|
||||
"""
|
||||
@@ -836,17 +822,18 @@ class FetchMethod(object):
|
||||
if hasattr(self, "_sortable_revision"):
|
||||
return self._sortable_revision(url, ud, d)
|
||||
|
||||
localcounts = bb.persist_data.persist('BB_URI_LOCALCOUNT', d)
|
||||
pd = persist_data.persist(d)
|
||||
localcounts = pd['BB_URI_LOCALCOUNT']
|
||||
key = self.generate_revision_key(url, ud, d, name)
|
||||
|
||||
latest_rev = self._build_revision(url, ud, d, name)
|
||||
last_rev = localcounts.get(key + '_rev')
|
||||
last_rev = localcounts[key + '_rev']
|
||||
uselocalcount = bb.data.getVar("BB_LOCALCOUNT_OVERRIDE", d, True) or False
|
||||
count = None
|
||||
if uselocalcount:
|
||||
count = FetchMethod.localcount_internal_helper(ud, d, name)
|
||||
if count is None:
|
||||
count = localcounts.get(key + '_count') or "0"
|
||||
count = localcounts[key + '_count'] or "0"
|
||||
|
||||
if last_rev == latest_rev:
|
||||
return str(count + "+" + latest_rev)
|
||||
@@ -948,9 +935,6 @@ class Fetch(object):
|
||||
if hasattr(m, "build_mirror_data"):
|
||||
m.build_mirror_data(u, ud, self.d)
|
||||
localpath = ud.localpath
|
||||
# early checksum verify, so that if checksum mismatched,
|
||||
# fetcher still have chance to fetch from mirror
|
||||
update_stamp(u, ud, self.d)
|
||||
|
||||
except bb.fetch2.NetworkAccess:
|
||||
raise
|
||||
@@ -967,7 +951,17 @@ class Fetch(object):
|
||||
if not localpath or ((not os.path.exists(localpath)) and localpath.find("*") == -1):
|
||||
raise FetchError("Unable to fetch URL %s from any source." % u, u)
|
||||
|
||||
update_stamp(u, ud, self.d)
|
||||
if os.path.exists(ud.donestamp):
|
||||
# Touch the done stamp file to show active use of the download
|
||||
try:
|
||||
os.utime(ud.donestamp, None)
|
||||
except:
|
||||
# Errors aren't fatal here
|
||||
pass
|
||||
else:
|
||||
# Only check the checksums if we've not seen this item before, then create the stamp
|
||||
verify_checksum(u, ud, self.d)
|
||||
open(ud.donestamp, 'w').close()
|
||||
|
||||
finally:
|
||||
bb.utils.unlockfile(lf)
|
||||
|
||||
@@ -66,15 +66,15 @@ class Bzr(FetchMethod):
|
||||
|
||||
options = []
|
||||
|
||||
if command == "revno":
|
||||
if command is "revno":
|
||||
bzrcmd = "%s revno %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot)
|
||||
else:
|
||||
if ud.revision:
|
||||
options.append("-r %s" % ud.revision)
|
||||
|
||||
if command == "fetch":
|
||||
if command is "fetch":
|
||||
bzrcmd = "%s co %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot)
|
||||
elif command == "update":
|
||||
elif command is "update":
|
||||
bzrcmd = "%s pull %s --overwrite" % (basecmd, " ".join(options))
|
||||
else:
|
||||
raise FetchError("Invalid bzr command %s" % command, ud.url)
|
||||
|
||||
@@ -3,41 +3,6 @@
|
||||
"""
|
||||
BitBake 'Fetch' git implementation
|
||||
|
||||
git fetcher support the SRC_URI with format of:
|
||||
SRC_URI = "git://some.host/somepath;OptionA=xxx;OptionB=xxx;..."
|
||||
|
||||
Supported SRC_URI options are:
|
||||
|
||||
- branch
|
||||
The git branch to retrieve from. The default is "master"
|
||||
|
||||
this option also support multiple branches fetching, branches
|
||||
are seperated by comma. in multiple branches case, the name option
|
||||
must have the same number of names to match the branches, which is
|
||||
used to specify the SRC_REV for the branch
|
||||
e.g:
|
||||
SRC_URI="git://some.host/somepath;branch=branchX,branchY;name=nameX,nameY"
|
||||
SRCREV_nameX = "xxxxxxxxxxxxxxxxxxxx"
|
||||
SRCREV_nameY = "YYYYYYYYYYYYYYYYYYYY"
|
||||
|
||||
- tag
|
||||
The git tag to retrieve. The default is "master"
|
||||
|
||||
- protocol
|
||||
The method to use to access the repository. Common options are "git",
|
||||
"http", "file" and "rsync". The default is "git"
|
||||
|
||||
- rebaseable
|
||||
rebaseable indicates that the upstream git repo may rebase in the future,
|
||||
and current revision may disappear from upstream repo. This option will
|
||||
reminder fetcher to preserve local cache carefully for future use.
|
||||
The default value is "0", set rebaseable=1 for rebaseable git repo
|
||||
|
||||
- nocheckout
|
||||
Don't checkout source code when unpacking. set this option for the recipe
|
||||
who has its own routine to checkout code.
|
||||
The default is "0", set nocheckout=1 if needed.
|
||||
|
||||
"""
|
||||
|
||||
#Copyright (C) 2005 Richard Purdie
|
||||
@@ -86,14 +51,11 @@ class Git(FetchMethod):
|
||||
elif not ud.host:
|
||||
ud.proto = 'file'
|
||||
else:
|
||||
ud.proto = "git"
|
||||
ud.proto = "rsync"
|
||||
|
||||
if not ud.proto in ('git', 'file', 'ssh', 'http', 'https'):
|
||||
raise bb.fetch2.ParameterError("Invalid protocol type", ud.url)
|
||||
|
||||
ud.nocheckout = ud.parm.get("nocheckout","0") == "1"
|
||||
|
||||
ud.rebaseable = ud.parm.get("rebaseable","0") == "1"
|
||||
ud.nocheckout = False
|
||||
if 'nocheckout' in ud.parm:
|
||||
ud.nocheckout = True
|
||||
|
||||
branches = ud.parm.get("branch", "master").split(',')
|
||||
if len(branches) != len(ud.names):
|
||||
@@ -103,9 +65,16 @@ class Git(FetchMethod):
|
||||
branch = branches[ud.names.index(name)]
|
||||
ud.branches[name] = branch
|
||||
|
||||
gitsrcname = '%s%s' % (ud.host, ud.path.replace('/', '.'))
|
||||
ud.mirrortarball = 'git2_%s.tar.gz' % (gitsrcname)
|
||||
ud.fullmirror = os.path.join(data.getVar("DL_DIR", d, True), ud.mirrortarball)
|
||||
ud.clonedir = os.path.join(data.expand('${GITDIR}', d), gitsrcname)
|
||||
|
||||
ud.basecmd = data.getVar("FETCHCMD_git", d, True) or "git"
|
||||
|
||||
ud.write_tarballs = ((data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True) or "0") != "0") or ud.rebaseable
|
||||
ud.write_tarballs = (data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True) or "0") != "0"
|
||||
|
||||
ud.localfile = ud.clonedir
|
||||
|
||||
ud.setup_revisons(d)
|
||||
|
||||
@@ -115,20 +84,6 @@ class Git(FetchMethod):
|
||||
ud.branches[name] = ud.revisions[name]
|
||||
ud.revisions[name] = self.latest_revision(ud.url, ud, d, name)
|
||||
|
||||
gitsrcname = '%s%s' % (ud.host, ud.path.replace('/', '.'))
|
||||
# for rebaseable git repo, it is necessary to keep mirror tar ball
|
||||
# per revision, so that even the revision disappears from the
|
||||
# upstream repo in the future, the mirror will remain intact and still
|
||||
# contains the revision
|
||||
if ud.rebaseable:
|
||||
for name in ud.names:
|
||||
gitsrcname = gitsrcname + '_' + ud.revisions[name]
|
||||
ud.mirrortarball = 'git2_%s.tar.gz' % (gitsrcname)
|
||||
ud.fullmirror = os.path.join(data.getVar("DL_DIR", d, True), ud.mirrortarball)
|
||||
ud.clonedir = os.path.join(data.expand('${GITDIR}', d), gitsrcname)
|
||||
|
||||
ud.localfile = ud.clonedir
|
||||
|
||||
def localpath(self, url, ud, d):
|
||||
return ud.clonedir
|
||||
|
||||
@@ -170,10 +125,8 @@ class Git(FetchMethod):
|
||||
|
||||
# If the repo still doesn't exist, fallback to cloning it
|
||||
if not os.path.exists(ud.clonedir):
|
||||
clone_cmd = "%s clone --bare --mirror %s://%s%s%s %s" % \
|
||||
(ud.basecmd, ud.proto, username, ud.host, ud.path, ud.clonedir)
|
||||
bb.fetch2.check_network_access(d, clone_cmd)
|
||||
runfetchcmd(clone_cmd, d)
|
||||
bb.fetch2.check_network_access(d, "git clone --bare %s%s" % (ud.host, ud.path))
|
||||
runfetchcmd("%s clone --bare %s://%s%s%s %s" % (ud.basecmd, ud.proto, username, ud.host, ud.path, ud.clonedir), d)
|
||||
|
||||
os.chdir(ud.clonedir)
|
||||
# Update the checkout if needed
|
||||
@@ -182,16 +135,15 @@ class Git(FetchMethod):
|
||||
if not self._contains_ref(ud.revisions[name], d):
|
||||
needupdate = True
|
||||
if needupdate:
|
||||
bb.fetch2.check_network_access(d, "git fetch %s%s" % (ud.host, ud.path), ud.url)
|
||||
try:
|
||||
runfetchcmd("%s remote prune origin" % ud.basecmd, d)
|
||||
runfetchcmd("%s remote rm origin" % ud.basecmd, d)
|
||||
except bb.fetch2.FetchError:
|
||||
logger.debug(1, "No Origin")
|
||||
|
||||
runfetchcmd("%s remote add --mirror origin %s://%s%s%s" % (ud.basecmd, ud.proto, username, ud.host, ud.path), d)
|
||||
fetch_cmd = "%s fetch --all -t" % ud.basecmd
|
||||
bb.fetch2.check_network_access(d, fetch_cmd, ud.url)
|
||||
runfetchcmd(fetch_cmd, d)
|
||||
runfetchcmd("%s remote add origin %s://%s%s%s" % (ud.basecmd, ud.proto, username, ud.host, ud.path), d)
|
||||
runfetchcmd("%s fetch --all -t" % ud.basecmd, d)
|
||||
runfetchcmd("%s prune-packed" % ud.basecmd, d)
|
||||
runfetchcmd("%s pack-redundant --all | xargs -r rm" % ud.basecmd, d)
|
||||
ud.repochanged = True
|
||||
@@ -219,11 +171,8 @@ class Git(FetchMethod):
|
||||
runfetchcmd("git clone -s -n %s %s" % (ud.clonedir, destdir), d)
|
||||
if not ud.nocheckout:
|
||||
os.chdir(destdir)
|
||||
if subdir != "":
|
||||
runfetchcmd("%s read-tree %s%s" % (ud.basecmd, ud.revisions[ud.names[0]], readpathspec), d)
|
||||
runfetchcmd("%s checkout-index -q -f -a" % ud.basecmd, d)
|
||||
else:
|
||||
runfetchcmd("%s checkout %s" % (ud.basecmd, ud.revisions[ud.names[0]]), d)
|
||||
runfetchcmd("%s read-tree %s%s" % (ud.basecmd, ud.revisions[ud.names[0]], readpathspec), d)
|
||||
runfetchcmd("%s checkout-index -q -f -a" % ud.basecmd, d)
|
||||
return True
|
||||
|
||||
def clean(self, ud, d):
|
||||
@@ -255,10 +204,9 @@ class Git(FetchMethod):
|
||||
else:
|
||||
username = ""
|
||||
|
||||
bb.fetch2.check_network_access(d, "git ls-remote %s%s %s" % (ud.host, ud.path, ud.branches[name]))
|
||||
basecmd = data.getVar("FETCHCMD_git", d, True) or "git"
|
||||
cmd = "%s ls-remote %s://%s%s%s %s" % \
|
||||
(basecmd, ud.proto, username, ud.host, ud.path, ud.branches[name])
|
||||
bb.fetch2.check_network_access(d, cmd)
|
||||
cmd = "%s ls-remote %s://%s%s%s %s" % (basecmd, ud.proto, username, ud.host, ud.path, ud.branches[name])
|
||||
output = runfetchcmd(cmd, d, True)
|
||||
if not output:
|
||||
raise bb.fetch2.FetchError("The command %s gave empty output unexpectedly" % cmd, url)
|
||||
@@ -278,13 +226,10 @@ class Git(FetchMethod):
|
||||
# Check if we have the rev already
|
||||
|
||||
if not os.path.exists(ud.clonedir):
|
||||
logging.debug("GIT repository for %s does not exist in %s. \
|
||||
Downloading.", url, ud.clonedir)
|
||||
print("no repo")
|
||||
self.download(None, ud, d)
|
||||
if not os.path.exists(ud.clonedir):
|
||||
logger.error("GIT repository for %s does not exist in %s after \
|
||||
download. Cannot get sortable buildnumber, using \
|
||||
old value", url, ud.clonedir)
|
||||
logger.error("GIT repository for %s doesn't exist in %s, cannot get sortable buildnumber, using old value", url, ud.clonedir)
|
||||
return None
|
||||
|
||||
|
||||
|
||||
@@ -94,21 +94,21 @@ class Hg(FetchMethod):
|
||||
else:
|
||||
hgroot = ud.user + "@" + host + ud.path
|
||||
|
||||
if command == "info":
|
||||
if command is "info":
|
||||
return "%s identify -i %s://%s/%s" % (basecmd, proto, hgroot, ud.module)
|
||||
|
||||
options = [];
|
||||
if ud.revision:
|
||||
options.append("-r %s" % ud.revision)
|
||||
|
||||
if command == "fetch":
|
||||
if command is "fetch":
|
||||
cmd = "%s clone %s %s://%s/%s %s" % (basecmd, " ".join(options), proto, hgroot, ud.module, ud.module)
|
||||
elif command == "pull":
|
||||
elif command is "pull":
|
||||
# do not pass options list; limiting pull to rev causes the local
|
||||
# repo not to contain it and immediately following "update" command
|
||||
# will crash
|
||||
cmd = "%s pull" % (basecmd)
|
||||
elif command == "update":
|
||||
elif command is "update":
|
||||
cmd = "%s update -C %s" % (basecmd, " ".join(options))
|
||||
else:
|
||||
raise FetchError("Invalid hg command %s" % command, ud.url)
|
||||
|
||||
@@ -68,9 +68,9 @@ class Osc(FetchMethod):
|
||||
|
||||
coroot = self._strip_leading_slashes(ud.path)
|
||||
|
||||
if command == "fetch":
|
||||
if command is "fetch":
|
||||
osccmd = "%s %s co %s/%s %s" % (basecmd, config, coroot, ud.module, " ".join(options))
|
||||
elif command == "update":
|
||||
elif command is "update":
|
||||
osccmd = "%s %s up %s" % (basecmd, config, " ".join(options))
|
||||
else:
|
||||
raise FetchError("Invalid osc command %s" % command, ud.url)
|
||||
|
||||
@@ -87,7 +87,7 @@ class Svn(FetchMethod):
|
||||
if ud.pswd:
|
||||
options.append("--password %s" % ud.pswd)
|
||||
|
||||
if command == "info":
|
||||
if command is "info":
|
||||
svncmd = "%s info %s %s://%s/%s/" % (basecmd, " ".join(options), proto, svnroot, ud.module)
|
||||
else:
|
||||
suffix = ""
|
||||
@@ -95,9 +95,9 @@ class Svn(FetchMethod):
|
||||
options.append("-r %s" % ud.revision)
|
||||
suffix = "@%s" % (ud.revision)
|
||||
|
||||
if command == "fetch":
|
||||
if command is "fetch":
|
||||
svncmd = "%s co %s %s://%s/%s%s %s" % (basecmd, " ".join(options), proto, svnroot, ud.module, suffix, ud.module)
|
||||
elif command == "update":
|
||||
elif command is "update":
|
||||
svncmd = "%s update %s" % (basecmd, " ".join(options))
|
||||
else:
|
||||
raise FetchError("Invalid svn command %s" % command, ud.url)
|
||||
|
||||
@@ -65,15 +65,9 @@ class BBLogFormatter(logging.Formatter):
|
||||
def format(self, record):
|
||||
record.levelname = self.getLevelName(record.levelno)
|
||||
if record.levelno == self.PLAIN:
|
||||
msg = record.getMessage()
|
||||
return record.getMessage()
|
||||
else:
|
||||
msg = logging.Formatter.format(self, record)
|
||||
|
||||
if hasattr(record, 'bb_exc_info'):
|
||||
etype, value, tb = record.bb_exc_info
|
||||
formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
|
||||
msg += '\n' + ''.join(formatted)
|
||||
return msg
|
||||
return logging.Formatter.format(self, record)
|
||||
|
||||
class Loggers(dict):
|
||||
def __getitem__(self, key):
|
||||
@@ -153,8 +147,8 @@ def set_debug_domains(domainargs):
|
||||
#
|
||||
|
||||
def debug(level, msgdomain, msg):
|
||||
warnings.warn("bb.msg.debug is deprecated in favor of the python 'logging' module",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
warnings.warn("bb.msg.debug will soon be deprecated in favor of the python 'logging' module",
|
||||
PendingDeprecationWarning, stacklevel=2)
|
||||
level = logging.DEBUG - (level - 1)
|
||||
if not msgdomain:
|
||||
logger.debug(level, msg)
|
||||
@@ -162,13 +156,13 @@ def debug(level, msgdomain, msg):
|
||||
loggers[msgdomain].debug(level, msg)
|
||||
|
||||
def plain(msg):
|
||||
warnings.warn("bb.msg.plain is deprecated in favor of the python 'logging' module",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
warnings.warn("bb.msg.plain will soon be deprecated in favor of the python 'logging' module",
|
||||
PendingDeprecationWarning, stacklevel=2)
|
||||
logger.plain(msg)
|
||||
|
||||
def note(level, msgdomain, msg):
|
||||
warnings.warn("bb.msg.note is deprecated in favor of the python 'logging' module",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
warnings.warn("bb.msg.note will soon be deprecated in favor of the python 'logging' module",
|
||||
PendingDeprecationWarning, stacklevel=2)
|
||||
if level > 1:
|
||||
if msgdomain:
|
||||
logger.verbose(msg)
|
||||
@@ -181,22 +175,24 @@ def note(level, msgdomain, msg):
|
||||
loggers[msgdomain].info(msg)
|
||||
|
||||
def warn(msgdomain, msg):
|
||||
warnings.warn("bb.msg.warn is deprecated in favor of the python 'logging' module",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
warnings.warn("bb.msg.warn will soon be deprecated in favor of the python 'logging' module",
|
||||
PendingDeprecationWarning, stacklevel=2)
|
||||
if not msgdomain:
|
||||
logger.warn(msg)
|
||||
else:
|
||||
loggers[msgdomain].warn(msg)
|
||||
|
||||
def error(msgdomain, msg):
|
||||
warnings.warn("bb.msg.error is deprecated in favor of the python 'logging' module",
|
||||
DeprecationWarning, stacklevel=2)
|
||||
warnings.warn("bb.msg.error will soon be deprecated in favor of the python 'logging' module",
|
||||
PendingDeprecationWarning, stacklevel=2)
|
||||
if not msgdomain:
|
||||
logger.error(msg)
|
||||
else:
|
||||
loggers[msgdomain].error(msg)
|
||||
|
||||
def fatal(msgdomain, msg):
|
||||
warnings.warn("bb.msg.fatal will soon be deprecated in favor of raising appropriate exceptions",
|
||||
PendingDeprecationWarning, stacklevel=2)
|
||||
if not msgdomain:
|
||||
logger.critical(msg)
|
||||
else:
|
||||
|
||||
@@ -1,255 +0,0 @@
|
||||
# http://code.activestate.com/recipes/577629-namedtupleabc-abstract-base-class-mix-in-for-named/
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2011 Jan Kaliszewski (zuo). Available under the MIT License.
|
||||
|
||||
"""
|
||||
namedtuple_with_abc.py:
|
||||
* named tuple mix-in + ABC (abstract base class) recipe,
|
||||
* works under Python 2.6, 2.7 as well as 3.x.
|
||||
|
||||
Import this module to patch collections.namedtuple() factory function
|
||||
-- enriching it with the 'abc' attribute (an abstract base class + mix-in
|
||||
for named tuples) and decorating it with a wrapper that registers each
|
||||
newly created named tuple as a subclass of namedtuple.abc.
|
||||
|
||||
How to import:
|
||||
import collections, namedtuple_with_abc
|
||||
or:
|
||||
import namedtuple_with_abc
|
||||
from collections import namedtuple
|
||||
# ^ in this variant you must import namedtuple function
|
||||
# *after* importing namedtuple_with_abc module
|
||||
or simply:
|
||||
from namedtuple_with_abc import namedtuple
|
||||
|
||||
Simple usage example:
|
||||
class Credentials(namedtuple.abc):
|
||||
_fields = 'username password'
|
||||
def __str__(self):
|
||||
return ('{0.__class__.__name__}'
|
||||
'(username={0.username}, password=...)'.format(self))
|
||||
print(Credentials("alice", "Alice's password"))
|
||||
|
||||
For more advanced examples -- see below the "if __name__ == '__main__':".
|
||||
"""
|
||||
|
||||
import collections
|
||||
from abc import ABCMeta, abstractproperty
|
||||
from functools import wraps
|
||||
from sys import version_info
|
||||
|
||||
__all__ = ('namedtuple',)
|
||||
_namedtuple = collections.namedtuple
|
||||
|
||||
|
||||
class _NamedTupleABCMeta(ABCMeta):
|
||||
'''The metaclass for the abstract base class + mix-in for named tuples.'''
|
||||
def __new__(mcls, name, bases, namespace):
|
||||
fields = namespace.get('_fields')
|
||||
for base in bases:
|
||||
if fields is not None:
|
||||
break
|
||||
fields = getattr(base, '_fields', None)
|
||||
if not isinstance(fields, abstractproperty):
|
||||
basetuple = _namedtuple(name, fields)
|
||||
bases = (basetuple,) + bases
|
||||
namespace.pop('_fields', None)
|
||||
namespace.setdefault('__doc__', basetuple.__doc__)
|
||||
namespace.setdefault('__slots__', ())
|
||||
return ABCMeta.__new__(mcls, name, bases, namespace)
|
||||
|
||||
|
||||
exec(
|
||||
# Python 2.x metaclass declaration syntax
|
||||
"""class _NamedTupleABC(object):
|
||||
'''The abstract base class + mix-in for named tuples.'''
|
||||
__metaclass__ = _NamedTupleABCMeta
|
||||
_fields = abstractproperty()""" if version_info[0] < 3 else
|
||||
# Python 3.x metaclass declaration syntax
|
||||
"""class _NamedTupleABC(metaclass=_NamedTupleABCMeta):
|
||||
'''The abstract base class + mix-in for named tuples.'''
|
||||
_fields = abstractproperty()"""
|
||||
)
|
||||
|
||||
|
||||
_namedtuple.abc = _NamedTupleABC
|
||||
#_NamedTupleABC.register(type(version_info)) # (and similar, in the future...)
|
||||
|
||||
@wraps(_namedtuple)
|
||||
def namedtuple(*args, **kwargs):
|
||||
'''Named tuple factory with namedtuple.abc subclass registration.'''
|
||||
cls = _namedtuple(*args, **kwargs)
|
||||
_NamedTupleABC.register(cls)
|
||||
return cls
|
||||
|
||||
collections.namedtuple = namedtuple
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
'''Examples and explanations'''
|
||||
|
||||
# Simple usage
|
||||
|
||||
class MyRecord(namedtuple.abc):
|
||||
_fields = 'x y z' # such form will be transformed into ('x', 'y', 'z')
|
||||
def _my_custom_method(self):
|
||||
return list(self._asdict().items())
|
||||
# (the '_fields' attribute belongs to the named tuple public API anyway)
|
||||
|
||||
rec = MyRecord(1, 2, 3)
|
||||
print(rec)
|
||||
print(rec._my_custom_method())
|
||||
print(rec._replace(y=222))
|
||||
print(rec._replace(y=222)._my_custom_method())
|
||||
|
||||
# Custom abstract classes...
|
||||
|
||||
class MyAbstractRecord(namedtuple.abc):
|
||||
def _my_custom_method(self):
|
||||
return list(self._asdict().items())
|
||||
|
||||
try:
|
||||
MyAbstractRecord() # (abstract classes cannot be instantiated)
|
||||
except TypeError as exc:
|
||||
print(exc)
|
||||
|
||||
class AnotherAbstractRecord(MyAbstractRecord):
|
||||
def __str__(self):
|
||||
return '<<<{0}>>>'.format(super(AnotherAbstractRecord,
|
||||
self).__str__())
|
||||
|
||||
# ...and their non-abstract subclasses
|
||||
|
||||
class MyRecord2(MyAbstractRecord):
|
||||
_fields = 'a, b'
|
||||
|
||||
class MyRecord3(AnotherAbstractRecord):
|
||||
_fields = 'p', 'q', 'r'
|
||||
|
||||
rec2 = MyRecord2('foo', 'bar')
|
||||
print(rec2)
|
||||
print(rec2._my_custom_method())
|
||||
print(rec2._replace(b=222))
|
||||
print(rec2._replace(b=222)._my_custom_method())
|
||||
|
||||
rec3 = MyRecord3('foo', 'bar', 'baz')
|
||||
print(rec3)
|
||||
print(rec3._my_custom_method())
|
||||
print(rec3._replace(q=222))
|
||||
print(rec3._replace(q=222)._my_custom_method())
|
||||
|
||||
# You can also subclass non-abstract ones...
|
||||
|
||||
class MyRecord33(MyRecord3):
|
||||
def __str__(self):
|
||||
return '< {0!r}, ..., {0!r} >'.format(self.p, self.r)
|
||||
|
||||
rec33 = MyRecord33('foo', 'bar', 'baz')
|
||||
print(rec33)
|
||||
print(rec33._my_custom_method())
|
||||
print(rec33._replace(q=222))
|
||||
print(rec33._replace(q=222)._my_custom_method())
|
||||
|
||||
# ...and even override the magic '_fields' attribute again
|
||||
|
||||
class MyRecord345(MyRecord3):
|
||||
_fields = 'e f g h i j k'
|
||||
|
||||
rec345 = MyRecord345(1, 2, 3, 4, 3, 2, 1)
|
||||
print(rec345)
|
||||
print(rec345._my_custom_method())
|
||||
print(rec345._replace(f=222))
|
||||
print(rec345._replace(f=222)._my_custom_method())
|
||||
|
||||
# Mixing-in some other classes is also possible:
|
||||
|
||||
class MyMixIn(object):
|
||||
def method(self):
|
||||
return "MyMixIn.method() called"
|
||||
def _my_custom_method(self):
|
||||
return "MyMixIn._my_custom_method() called"
|
||||
def count(self, item):
|
||||
return "MyMixIn.count({0}) called".format(item)
|
||||
def _asdict(self): # (cannot override a namedtuple method, see below)
|
||||
return "MyMixIn._asdict() called"
|
||||
|
||||
class MyRecord4(MyRecord33, MyMixIn): # mix-in on the right
|
||||
_fields = 'j k l x'
|
||||
|
||||
class MyRecord5(MyMixIn, MyRecord33): # mix-in on the left
|
||||
_fields = 'j k l x y'
|
||||
|
||||
rec4 = MyRecord4(1, 2, 3, 2)
|
||||
print(rec4)
|
||||
print(rec4.method())
|
||||
print(rec4._my_custom_method()) # MyRecord33's
|
||||
print(rec4.count(2)) # tuple's
|
||||
print(rec4._replace(k=222))
|
||||
print(rec4._replace(k=222).method())
|
||||
print(rec4._replace(k=222)._my_custom_method()) # MyRecord33's
|
||||
print(rec4._replace(k=222).count(8)) # tuple's
|
||||
|
||||
rec5 = MyRecord5(1, 2, 3, 2, 1)
|
||||
print(rec5)
|
||||
print(rec5.method())
|
||||
print(rec5._my_custom_method()) # MyMixIn's
|
||||
print(rec5.count(2)) # MyMixIn's
|
||||
print(rec5._replace(k=222))
|
||||
print(rec5._replace(k=222).method())
|
||||
print(rec5._replace(k=222)._my_custom_method()) # MyMixIn's
|
||||
print(rec5._replace(k=222).count(2)) # MyMixIn's
|
||||
|
||||
# None that behavior: the standard namedtuple methods cannot be
|
||||
# overriden by a foreign mix-in -- even if the mix-in is declared
|
||||
# as the leftmost base class (but, obviously, you can override them
|
||||
# in the defined class or its subclasses):
|
||||
|
||||
print(rec4._asdict()) # (returns a dict, not "MyMixIn._asdict() called")
|
||||
print(rec5._asdict()) # (returns a dict, not "MyMixIn._asdict() called")
|
||||
|
||||
class MyRecord6(MyRecord33):
|
||||
_fields = 'j k l x y z'
|
||||
def _asdict(self):
|
||||
return "MyRecord6._asdict() called"
|
||||
rec6 = MyRecord6(1, 2, 3, 1, 2, 3)
|
||||
print(rec6._asdict()) # (this returns "MyRecord6._asdict() called")
|
||||
|
||||
# All that record classes are real subclasses of namedtuple.abc:
|
||||
|
||||
assert issubclass(MyRecord, namedtuple.abc)
|
||||
assert issubclass(MyAbstractRecord, namedtuple.abc)
|
||||
assert issubclass(AnotherAbstractRecord, namedtuple.abc)
|
||||
assert issubclass(MyRecord2, namedtuple.abc)
|
||||
assert issubclass(MyRecord3, namedtuple.abc)
|
||||
assert issubclass(MyRecord33, namedtuple.abc)
|
||||
assert issubclass(MyRecord345, namedtuple.abc)
|
||||
assert issubclass(MyRecord4, namedtuple.abc)
|
||||
assert issubclass(MyRecord5, namedtuple.abc)
|
||||
assert issubclass(MyRecord6, namedtuple.abc)
|
||||
|
||||
# ...but abstract ones are not subclasses of tuple
|
||||
# (and this is what you probably want):
|
||||
|
||||
assert not issubclass(MyAbstractRecord, tuple)
|
||||
assert not issubclass(AnotherAbstractRecord, tuple)
|
||||
|
||||
assert issubclass(MyRecord, tuple)
|
||||
assert issubclass(MyRecord2, tuple)
|
||||
assert issubclass(MyRecord3, tuple)
|
||||
assert issubclass(MyRecord33, tuple)
|
||||
assert issubclass(MyRecord345, tuple)
|
||||
assert issubclass(MyRecord4, tuple)
|
||||
assert issubclass(MyRecord5, tuple)
|
||||
assert issubclass(MyRecord6, tuple)
|
||||
|
||||
# Named tuple classes created with namedtuple() factory function
|
||||
# (in the "traditional" way) are registered as "virtual" subclasses
|
||||
# of namedtuple.abc:
|
||||
|
||||
MyTuple = namedtuple('MyTuple', 'a b c')
|
||||
mt = MyTuple(1, 2, 3)
|
||||
assert issubclass(MyTuple, namedtuple.abc)
|
||||
assert isinstance(mt, namedtuple.abc)
|
||||
@@ -84,9 +84,9 @@ class DataNode(AstNode):
|
||||
|
||||
def getFunc(self, key, data):
|
||||
if 'flag' in self.groupd and self.groupd['flag'] != None:
|
||||
return data.getVarFlag(key, self.groupd['flag'], noweakdefault=True)
|
||||
return bb.data.getVarFlag(key, self.groupd['flag'], data)
|
||||
else:
|
||||
return data.getVar(key, noweakdefault=True)
|
||||
return bb.data.getVar(key, data)
|
||||
|
||||
def eval(self, data):
|
||||
groupd = self.groupd
|
||||
@@ -100,7 +100,7 @@ class DataNode(AstNode):
|
||||
elif "colon" in groupd and groupd["colon"] != None:
|
||||
e = data.createCopy()
|
||||
bb.data.update_data(e)
|
||||
val = bb.data.expand(groupd["value"], e, key + "[:=]")
|
||||
val = bb.data.expand(groupd["value"], e)
|
||||
elif "append" in groupd and groupd["append"] != None:
|
||||
val = "%s %s" % ((self.getFunc(key, data) or ""), groupd["value"])
|
||||
elif "prepend" in groupd and groupd["prepend"] != None:
|
||||
@@ -307,14 +307,6 @@ def handleInherit(statements, filename, lineno, m):
|
||||
statements.append(InheritNode(filename, lineno, classes.split()))
|
||||
|
||||
def finalize(fn, d, variant = None):
|
||||
all_handlers = {}
|
||||
for var in bb.data.getVar('__BBHANDLERS', d) or []:
|
||||
# try to add the handler
|
||||
handler = bb.data.getVar(var, d)
|
||||
bb.event.register(var, handler)
|
||||
|
||||
bb.event.fire(bb.event.RecipePreFinalise(fn), d)
|
||||
|
||||
bb.data.expandKeys(d)
|
||||
bb.data.update_data(d)
|
||||
code = []
|
||||
@@ -323,6 +315,12 @@ def finalize(fn, d, variant = None):
|
||||
bb.utils.simple_exec("\n".join(code), {"d": d})
|
||||
bb.data.update_data(d)
|
||||
|
||||
all_handlers = {}
|
||||
for var in bb.data.getVar('__BBHANDLERS', d) or []:
|
||||
# try to add the handler
|
||||
handler = bb.data.getVar(var, d)
|
||||
bb.event.register(var, handler)
|
||||
|
||||
tasklist = bb.data.getVar('__BBTASKS', d) or []
|
||||
bb.build.add_tasks(tasklist, d)
|
||||
|
||||
@@ -371,14 +369,12 @@ def multi_finalize(fn, d):
|
||||
logger.debug(2, "Appending .bbappend file %s to %s", append, fn)
|
||||
bb.parse.BBHandler.handle(append, d, True)
|
||||
|
||||
onlyfinalise = d.getVar("__ONLYFINALISE", False)
|
||||
|
||||
safe_d = d
|
||||
d = bb.data.createCopy(safe_d)
|
||||
try:
|
||||
finalize(fn, d)
|
||||
except bb.parse.SkipPackage as e:
|
||||
bb.data.setVar("__SKIPPED", e.args[0], d)
|
||||
except bb.parse.SkipPackage:
|
||||
bb.data.setVar("__SKIPPED", True, d)
|
||||
datastores = {"": safe_d}
|
||||
|
||||
versions = (d.getVar("BBVERSIONS", True) or "").split()
|
||||
@@ -420,48 +416,27 @@ def multi_finalize(fn, d):
|
||||
verfunc(pv, d, safe_d)
|
||||
try:
|
||||
finalize(fn, d)
|
||||
except bb.parse.SkipPackage as e:
|
||||
bb.data.setVar("__SKIPPED", e.args[0], d)
|
||||
except bb.parse.SkipPackage:
|
||||
bb.data.setVar("__SKIPPED", True, d)
|
||||
|
||||
_create_variants(datastores, versions, verfunc)
|
||||
|
||||
extended = d.getVar("BBCLASSEXTEND", True) or ""
|
||||
if extended:
|
||||
# the following is to support bbextends with arguments, for e.g. multilib
|
||||
# an example is as follows:
|
||||
# BBCLASSEXTEND = "multilib:lib32"
|
||||
# it will create foo-lib32, inheriting multilib.bbclass and set
|
||||
# BBEXTENDCURR to "multilib" and BBEXTENDVARIANT to "lib32"
|
||||
extendedmap = {}
|
||||
variantmap = {}
|
||||
|
||||
for ext in extended.split():
|
||||
eext = ext.split(':', 2)
|
||||
if len(eext) > 1:
|
||||
extendedmap[ext] = eext[0]
|
||||
variantmap[ext] = eext[1]
|
||||
else:
|
||||
extendedmap[ext] = ext
|
||||
|
||||
pn = d.getVar("PN", True)
|
||||
def extendfunc(name, d):
|
||||
if name != extendedmap[name]:
|
||||
d.setVar("BBEXTENDCURR", extendedmap[name])
|
||||
d.setVar("BBEXTENDVARIANT", variantmap[name])
|
||||
else:
|
||||
d.setVar("PN", "%s-%s" % (pn, name))
|
||||
bb.parse.BBHandler.inherit([extendedmap[name]], d)
|
||||
d.setVar("PN", "%s-%s" % (pn, name))
|
||||
bb.parse.BBHandler.inherit([name], d)
|
||||
|
||||
safe_d.setVar("BBCLASSEXTEND", extended)
|
||||
_create_variants(datastores, extendedmap.keys(), extendfunc)
|
||||
_create_variants(datastores, extended.split(), extendfunc)
|
||||
|
||||
for variant, variant_d in datastores.iteritems():
|
||||
if variant:
|
||||
try:
|
||||
if not onlyfinalise or variant in onlyfinalise:
|
||||
finalize(fn, variant_d, variant)
|
||||
except bb.parse.SkipPackage as e:
|
||||
bb.data.setVar("__SKIPPED", e.args[0], variant_d)
|
||||
finalize(fn, variant_d, variant)
|
||||
except bb.parse.SkipPackage:
|
||||
bb.data.setVar("__SKIPPED", True, variant_d)
|
||||
|
||||
if len(datastores) > 1:
|
||||
variants = filter(None, datastores.iterkeys())
|
||||
|
||||
@@ -96,7 +96,7 @@ def handle(fn, data, include):
|
||||
s = s.rstrip()
|
||||
if s[0] == '#': continue # skip comments
|
||||
while s[-1] == '\\':
|
||||
s2 = f.readline().strip()
|
||||
s2 = f.readline()[:-1].strip()
|
||||
lineno = lineno + 1
|
||||
s = s[:-1] + s2
|
||||
feeder(lineno, s, fn, statements)
|
||||
|
||||
@@ -26,8 +26,7 @@ import logging
|
||||
import os.path
|
||||
import sys
|
||||
import warnings
|
||||
from bb.compat import total_ordering
|
||||
from collections import Mapping
|
||||
import bb.msg, bb.data, bb.utils
|
||||
|
||||
try:
|
||||
import sqlite3
|
||||
@@ -40,11 +39,8 @@ if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
|
||||
|
||||
|
||||
logger = logging.getLogger("BitBake.PersistData")
|
||||
if hasattr(sqlite3, 'enable_shared_cache'):
|
||||
sqlite3.enable_shared_cache(True)
|
||||
|
||||
|
||||
@total_ordering
|
||||
class SQLTable(collections.MutableMapping):
|
||||
"""Object representing a table/domain in the database"""
|
||||
def __init__(self, cursor, table):
|
||||
@@ -66,31 +62,16 @@ class SQLTable(collections.MutableMapping):
|
||||
continue
|
||||
raise
|
||||
|
||||
def __enter__(self):
|
||||
self.cursor.__enter__()
|
||||
return self
|
||||
|
||||
def __exit__(self, *excinfo):
|
||||
self.cursor.__exit__(*excinfo)
|
||||
|
||||
def __getitem__(self, key):
|
||||
data = self._execute("SELECT * from %s where key=?;" %
|
||||
self.table, [key])
|
||||
for row in data:
|
||||
return row[1]
|
||||
raise KeyError(key)
|
||||
|
||||
def __delitem__(self, key):
|
||||
if key not in self:
|
||||
raise KeyError(key)
|
||||
self._execute("DELETE from %s where key=?;" % self.table, [key])
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if not isinstance(key, basestring):
|
||||
raise TypeError('Only string keys are supported')
|
||||
elif not isinstance(value, basestring):
|
||||
raise TypeError('Only string values are supported')
|
||||
|
||||
data = self._execute("SELECT * from %s where key=?;" %
|
||||
self.table, [key])
|
||||
exists = len(list(data))
|
||||
@@ -111,40 +92,53 @@ class SQLTable(collections.MutableMapping):
|
||||
|
||||
def __iter__(self):
|
||||
data = self._execute("SELECT key FROM %s;" % self.table)
|
||||
return (row[0] for row in data)
|
||||
for row in data:
|
||||
yield row[0]
|
||||
|
||||
def __lt__(self, other):
|
||||
if not isinstance(other, Mapping):
|
||||
raise NotImplemented
|
||||
|
||||
return len(self) < len(other)
|
||||
|
||||
def values(self):
|
||||
return list(self.itervalues())
|
||||
def iteritems(self):
|
||||
data = self._execute("SELECT * FROM %s;" % self.table)
|
||||
for row in data:
|
||||
yield row[0], row[1]
|
||||
|
||||
def itervalues(self):
|
||||
data = self._execute("SELECT value FROM %s;" % self.table)
|
||||
return (row[0] for row in data)
|
||||
for row in data:
|
||||
yield row[0]
|
||||
|
||||
def items(self):
|
||||
return list(self.iteritems())
|
||||
|
||||
def iteritems(self):
|
||||
return self._execute("SELECT * FROM %s;" % self.table)
|
||||
class SQLData(object):
|
||||
"""Object representing the persistent data"""
|
||||
def __init__(self, filename):
|
||||
bb.utils.mkdirhier(os.path.dirname(filename))
|
||||
|
||||
def clear(self):
|
||||
self._execute("DELETE FROM %s;" % self.table)
|
||||
self.filename = filename
|
||||
self.connection = sqlite3.connect(filename, timeout=30,
|
||||
isolation_level=None)
|
||||
self.cursor = self.connection.cursor()
|
||||
self._tables = {}
|
||||
|
||||
def has_key(self, key):
|
||||
return key in self
|
||||
def __getitem__(self, table):
|
||||
if not isinstance(table, basestring):
|
||||
raise TypeError("table argument must be a string, not '%s'" %
|
||||
type(table))
|
||||
|
||||
if table in self._tables:
|
||||
return self._tables[table]
|
||||
else:
|
||||
tableobj = self._tables[table] = SQLTable(self.cursor, table)
|
||||
return tableobj
|
||||
|
||||
def __delitem__(self, table):
|
||||
if table in self._tables:
|
||||
del self._tables[table]
|
||||
self.cursor.execute("DROP TABLE IF EXISTS %s;" % table)
|
||||
|
||||
|
||||
class PersistData(object):
|
||||
"""Deprecated representation of the bitbake persistent data store"""
|
||||
def __init__(self, d):
|
||||
warnings.warn("Use of PersistData is deprecated. Please use "
|
||||
"persist(domain, d) instead.",
|
||||
category=DeprecationWarning,
|
||||
warnings.warn("Use of PersistData will be deprecated in the future",
|
||||
category=PendingDeprecationWarning,
|
||||
stacklevel=2)
|
||||
|
||||
self.data = persist(d)
|
||||
@@ -187,19 +181,14 @@ class PersistData(object):
|
||||
"""
|
||||
del self.data[domain][key]
|
||||
|
||||
def connect(database):
|
||||
return sqlite3.connect(database, timeout=30, isolation_level=None)
|
||||
|
||||
def persist(domain, d):
|
||||
"""Convenience factory for SQLTable objects based upon metadata"""
|
||||
import bb.data, bb.utils
|
||||
def persist(d):
|
||||
"""Convenience factory for construction of SQLData based upon metadata"""
|
||||
cachedir = (bb.data.getVar("PERSISTENT_DIR", d, True) or
|
||||
bb.data.getVar("CACHE", d, True))
|
||||
if not cachedir:
|
||||
logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
|
||||
sys.exit(1)
|
||||
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
cachefile = os.path.join(cachedir, "bb_persist_data.sqlite3")
|
||||
connection = connect(cachefile)
|
||||
return SQLTable(connection, domain)
|
||||
return SQLData(cachefile)
|
||||
|
||||
@@ -93,7 +93,7 @@ def run(cmd, input=None, log=None, **options):
|
||||
|
||||
try:
|
||||
pipe = Popen(cmd, **options)
|
||||
except OSError as exc:
|
||||
except OSError, exc:
|
||||
if exc.errno == 2:
|
||||
raise NotFoundError(cmd)
|
||||
else:
|
||||
|
||||
@@ -84,10 +84,10 @@ def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
preferred_ver = None
|
||||
|
||||
localdata = data.createCopy(cfgData)
|
||||
bb.data.setVar('OVERRIDES', "%s:pn-%s:%s" % (data.getVar('OVERRIDES', localdata), pn, pn), localdata)
|
||||
bb.data.setVar('OVERRIDES', "pn-%s:%s:%s" % (pn, pn, data.getVar('OVERRIDES', localdata)), localdata)
|
||||
bb.data.update_data(localdata)
|
||||
|
||||
preferred_v = bb.data.getVar('PREFERRED_VERSION', localdata, True)
|
||||
preferred_v = bb.data.getVar('PREFERRED_VERSION_%s' % pn, localdata, True)
|
||||
if preferred_v:
|
||||
m = re.match('(\d+:)*(.*)(_.*)*', preferred_v)
|
||||
if m:
|
||||
@@ -124,18 +124,6 @@ def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
itemstr = " (for item %s)" % item
|
||||
if preferred_file is None:
|
||||
logger.info("preferred version %s of %s not available%s", pv_str, pn, itemstr)
|
||||
available_vers = []
|
||||
for file_set in pkg_pn:
|
||||
for f in file_set:
|
||||
pe, pv, pr = dataCache.pkg_pepvpr[f]
|
||||
ver_str = pv
|
||||
if pe:
|
||||
ver_str = "%s:%s" % (pe, ver_str)
|
||||
if not ver_str in available_vers:
|
||||
available_vers.append(ver_str)
|
||||
if available_vers:
|
||||
available_vers.sort()
|
||||
logger.info("versions of %s available: %s", pn, ' '.join(available_vers))
|
||||
else:
|
||||
logger.debug(1, "selecting %s as PREFERRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr)
|
||||
|
||||
|
||||
@@ -151,7 +151,7 @@ def builtin_trap(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
for sig in args[1:]:
|
||||
try:
|
||||
env.traps[sig] = action
|
||||
except Exception as e:
|
||||
except Exception, e:
|
||||
stderr.write('trap: %s\n' % str(e))
|
||||
return 0
|
||||
|
||||
@@ -214,7 +214,7 @@ def utility_cat(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
data = f.read()
|
||||
finally:
|
||||
f.close()
|
||||
except IOError as e:
|
||||
except IOError, e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
status = 1
|
||||
@@ -433,7 +433,7 @@ def utility_mkdir(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if option.has_p:
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except IOError as e:
|
||||
except IOError, e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
else:
|
||||
@@ -561,7 +561,7 @@ def utility_sort(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
lines = f.readlines()
|
||||
finally:
|
||||
f.close()
|
||||
except IOError as e:
|
||||
except IOError, e:
|
||||
stderr.write(str(e) + '\n')
|
||||
return 1
|
||||
|
||||
@@ -679,7 +679,7 @@ def run_command(name, args, interp, env, stdin, stdout,
|
||||
p = subprocess.Popen([name] + args, cwd=env['PWD'], env=exec_env,
|
||||
stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
out, err = p.communicate()
|
||||
except WindowsError as e:
|
||||
except WindowsError, e:
|
||||
raise UtilityError(str(e))
|
||||
|
||||
if not unixoutput:
|
||||
|
||||
@@ -248,7 +248,7 @@ class Redirections:
|
||||
raise NotImplementedError('cannot open absolute path %s' % repr(filename))
|
||||
else:
|
||||
f = file(filename, mode+'b')
|
||||
except IOError as e:
|
||||
except IOError, e:
|
||||
raise RedirectionError(str(e))
|
||||
|
||||
wrapper = None
|
||||
@@ -368,7 +368,7 @@ def resolve_shebang(path, ignoreshell=False):
|
||||
if arg is None:
|
||||
return [cmd, win32_to_unix_path(path)]
|
||||
return [cmd, arg, win32_to_unix_path(path)]
|
||||
except IOError as e:
|
||||
except IOError, e:
|
||||
if e.errno!=errno.ENOENT and \
|
||||
(e.errno!=errno.EPERM and not os.path.isdir(path)): # Opening a directory raises EPERM
|
||||
raise
|
||||
@@ -747,7 +747,7 @@ class Interpreter:
|
||||
for cmd in cmds:
|
||||
try:
|
||||
status = self.execute(cmd)
|
||||
except ExitSignal as e:
|
||||
except ExitSignal, e:
|
||||
if sourced:
|
||||
raise
|
||||
status = int(e.args[0])
|
||||
@@ -758,13 +758,13 @@ class Interpreter:
|
||||
if 'debug-utility' in self._debugflags or 'debug-cmd' in self._debugflags:
|
||||
self.log('returncode ' + str(status)+ '\n')
|
||||
return status
|
||||
except CommandNotFound as e:
|
||||
except CommandNotFound, e:
|
||||
print >>self._redirs.stderr, str(e)
|
||||
self._redirs.stderr.flush()
|
||||
# Command not found by non-interactive shell
|
||||
# return 127
|
||||
raise
|
||||
except RedirectionError as e:
|
||||
except RedirectionError, e:
|
||||
# TODO: should be handled depending on the utility status
|
||||
print >>self._redirs.stderr, str(e)
|
||||
self._redirs.stderr.flush()
|
||||
@@ -948,7 +948,7 @@ class Interpreter:
|
||||
status = self.execute(func, redirs)
|
||||
finally:
|
||||
redirs.close()
|
||||
except ReturnSignal as e:
|
||||
except ReturnSignal, e:
|
||||
status = int(e.args[0])
|
||||
env['?'] = status
|
||||
return status
|
||||
@@ -1044,7 +1044,7 @@ class Interpreter:
|
||||
|
||||
except ReturnSignal:
|
||||
raise
|
||||
except ShellError as e:
|
||||
except ShellError, e:
|
||||
if is_special or isinstance(e, (ExitSignal,
|
||||
ShellSyntaxError, ExpansionError)):
|
||||
raise e
|
||||
|
||||
@@ -105,11 +105,6 @@ class RunQueueScheduler(object):
|
||||
if self.rq.runq_running[taskid] == 1:
|
||||
continue
|
||||
if self.rq.runq_buildable[taskid] == 1:
|
||||
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[taskid]]
|
||||
taskname = self.rqdata.runq_task[taskid]
|
||||
stamp = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
|
||||
if stamp in self.rq.build_stamps.values():
|
||||
continue
|
||||
return taskid
|
||||
|
||||
def next(self):
|
||||
@@ -758,6 +753,7 @@ class RunQueueData:
|
||||
self.rqdata.runq_depends[task],
|
||||
self.rqdata.runq_revdeps[task])
|
||||
|
||||
|
||||
class RunQueue:
|
||||
def __init__(self, cooker, cfgData, dataCache, taskData, targets):
|
||||
|
||||
@@ -933,7 +929,7 @@ class RunQueue:
|
||||
|
||||
if self.state is runQueuePrepare:
|
||||
self.rqexe = RunQueueExecuteDummy(self)
|
||||
if self.rqdata.prepare() == 0:
|
||||
if self.rqdata.prepare() is 0:
|
||||
self.state = runQueueComplete
|
||||
else:
|
||||
self.state = runQueueSceneInit
|
||||
@@ -1014,7 +1010,6 @@ class RunQueueExecute:
|
||||
self.runq_complete = []
|
||||
self.build_pids = {}
|
||||
self.build_pipes = {}
|
||||
self.build_stamps = {}
|
||||
self.failed_fnids = []
|
||||
|
||||
def runqueue_process_waitpid(self):
|
||||
@@ -1023,15 +1018,12 @@ class RunQueueExecute:
|
||||
collect the process exit codes and close the information pipe.
|
||||
"""
|
||||
result = os.waitpid(-1, os.WNOHANG)
|
||||
if result[0] == 0 and result[1] == 0:
|
||||
if result[0] is 0 and result[1] is 0:
|
||||
return None
|
||||
task = self.build_pids[result[0]]
|
||||
del self.build_pids[result[0]]
|
||||
self.build_pipes[result[0]].close()
|
||||
del self.build_pipes[result[0]]
|
||||
# self.build_stamps[result[0]] may not exist when use shared work directory.
|
||||
if result[0] in self.build_stamps.keys():
|
||||
del self.build_stamps[result[0]]
|
||||
if result[1] != 0:
|
||||
self.task_fail(task, result[1]>>8)
|
||||
else:
|
||||
@@ -1068,32 +1060,23 @@ class RunQueueExecute:
|
||||
return
|
||||
|
||||
def fork_off_task(self, fn, task, taskname, quieterrors=False):
|
||||
# We need to setup the environment BEFORE the fork, since
|
||||
# a fork() or exec*() activates PSEUDO...
|
||||
|
||||
envbackup = {}
|
||||
umask = None
|
||||
envbackup = os.environ.copy()
|
||||
env = {}
|
||||
|
||||
taskdep = self.rqdata.dataCache.task_deps[fn]
|
||||
if 'umask' in taskdep and taskname in taskdep['umask']:
|
||||
# umask might come in as a number or text string..
|
||||
try:
|
||||
umask = int(taskdep['umask'][taskname],8)
|
||||
except TypeError:
|
||||
umask = taskdep['umask'][taskname]
|
||||
|
||||
if 'fakeroot' in taskdep and taskname in taskdep['fakeroot']:
|
||||
envvars = (self.rqdata.dataCache.fakerootenv[fn] or "").split()
|
||||
for key, value in (var.split('=') for var in envvars):
|
||||
envbackup[key] = os.environ.get(key)
|
||||
os.environ[key] = value
|
||||
for var in envvars:
|
||||
comps = var.split("=")
|
||||
env[comps[0]] = comps[1]
|
||||
|
||||
fakedirs = (self.rqdata.dataCache.fakerootdirs[fn] or "").split()
|
||||
for p in fakedirs:
|
||||
bb.utils.mkdirhier(p)
|
||||
|
||||
logger.debug(2, 'Running %s:%s under fakeroot, fakedirs: %s' %
|
||||
(fn, taskname, ', '.join(fakedirs)))
|
||||
bb.mkdirhier(p)
|
||||
logger.debug(2, "Running %s:%s under fakeroot, state dir is %s" % (fn, taskname, fakedirs))
|
||||
for e in env:
|
||||
os.putenv(e, env[e])
|
||||
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
@@ -1104,7 +1087,6 @@ class RunQueueExecute:
|
||||
pid = os.fork()
|
||||
except OSError as e:
|
||||
bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror))
|
||||
|
||||
if pid == 0:
|
||||
pipein.close()
|
||||
|
||||
@@ -1112,6 +1094,12 @@ class RunQueueExecute:
|
||||
# events
|
||||
bb.event.worker_pid = os.getpid()
|
||||
bb.event.worker_pipe = pipeout
|
||||
bb.event.useStdout = False
|
||||
|
||||
# Child processes should send their messages to the UI
|
||||
# process via the server process, not print them
|
||||
# themselves
|
||||
bblogger.handlers = [bb.event.LogHandler()]
|
||||
|
||||
self.rq.state = runQueueChildProcess
|
||||
# Make the child the process group leader
|
||||
@@ -1120,43 +1108,47 @@ class RunQueueExecute:
|
||||
newsi = os.open(os.devnull, os.O_RDWR)
|
||||
os.dup2(newsi, sys.stdin.fileno())
|
||||
|
||||
if umask:
|
||||
os.umask(umask)
|
||||
|
||||
bb.data.setVar("BB_WORKERCONTEXT", "1", self.cooker.configuration.data)
|
||||
the_data = bb.cache.Cache.loadDataFull(fn, self.cooker.get_file_appends(fn), self.cooker.configuration.data)
|
||||
|
||||
env2 = bb.data.export_vars(the_data)
|
||||
env2 = bb.data.export_envvars(env2, the_data)
|
||||
|
||||
for e in os.environ:
|
||||
os.unsetenv(e)
|
||||
for e in env2:
|
||||
os.putenv(e, env2[e])
|
||||
for e in env:
|
||||
os.putenv(e, env[e])
|
||||
|
||||
if quieterrors:
|
||||
the_data.setVarFlag(taskname, "quieterrors", "1")
|
||||
|
||||
bb.data.setVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY", self, self.cooker.configuration.data)
|
||||
bb.data.setVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY2", fn, self.cooker.configuration.data)
|
||||
bb.data.setVar("BB_WORKERCONTEXT", "1", the_data)
|
||||
bb.parse.siggen.set_taskdata(self.rqdata.hashes, self.rqdata.hash_deps)
|
||||
|
||||
for h in self.rqdata.hashes:
|
||||
bb.data.setVar("BBHASH_%s" % h, self.rqdata.hashes[h], the_data)
|
||||
for h in self.rqdata.hash_deps:
|
||||
bb.data.setVar("BBHASHDEPS_%s" % h, self.rqdata.hash_deps[h], the_data)
|
||||
|
||||
bb.data.setVar("BB_TASKHASH", self.rqdata.runq_hash[task], the_data)
|
||||
|
||||
ret = 0
|
||||
try:
|
||||
the_data = bb.cache.Cache.loadDataFull(fn, self.cooker.get_file_appends(fn), self.cooker.configuration.data)
|
||||
the_data.setVar('BB_TASKHASH', self.rqdata.runq_hash[task])
|
||||
for h in self.rqdata.hashes:
|
||||
the_data.setVar("BBHASH_%s" % h, self.rqdata.hashes[h])
|
||||
for h in self.rqdata.hash_deps:
|
||||
the_data.setVar("BBHASHDEPS_%s" % h, self.rqdata.hash_deps[h])
|
||||
|
||||
os.environ.update(bb.data.exported_vars(the_data))
|
||||
|
||||
if quieterrors:
|
||||
the_data.setVarFlag(taskname, "quieterrors", "1")
|
||||
|
||||
except Exception as exc:
|
||||
if not quieterrors:
|
||||
logger.critical(str(exc))
|
||||
os._exit(1)
|
||||
try:
|
||||
if not self.cooker.configuration.dry_run:
|
||||
ret = bb.build.exec_task(fn, taskname, the_data)
|
||||
os._exit(ret)
|
||||
except:
|
||||
os._exit(1)
|
||||
else:
|
||||
for key, value in envbackup.iteritems():
|
||||
if value is None:
|
||||
del os.environ[key]
|
||||
else:
|
||||
os.environ[key] = value
|
||||
|
||||
for e in env:
|
||||
os.unsetenv(e)
|
||||
for e in envbackup:
|
||||
if e in env:
|
||||
os.putenv(e, envbackup[e])
|
||||
|
||||
return pid, pipein, pipeout
|
||||
|
||||
@@ -1248,7 +1240,7 @@ class RunQueueExecuteTasks(RunQueueExecute):
|
||||
modname, name = sched.rsplit(".", 1)
|
||||
try:
|
||||
module = __import__(modname, fromlist=(name,))
|
||||
except ImportError as exc:
|
||||
except ImportError, exc:
|
||||
logger.critical("Unable to import scheduler '%s' from '%s': %s" % (name, modname, exc))
|
||||
raise SystemExit(1)
|
||||
else:
|
||||
@@ -1339,7 +1331,6 @@ class RunQueueExecuteTasks(RunQueueExecute):
|
||||
|
||||
self.build_pids[pid] = task
|
||||
self.build_pipes[pid] = runQueuePipe(pipein, pipeout, self.cfgData)
|
||||
self.build_stamps[pid] = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
|
||||
self.runq_running[task] = 1
|
||||
self.stats.taskActive()
|
||||
if self.stats.active < self.number_tasks:
|
||||
@@ -1462,25 +1453,16 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
|
||||
sq_taskname = []
|
||||
sq_task = []
|
||||
noexec = []
|
||||
stamppresent = []
|
||||
for task in xrange(len(self.sq_revdeps)):
|
||||
realtask = self.rqdata.runq_setscene[task]
|
||||
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realtask]]
|
||||
taskname = self.rqdata.runq_task[realtask]
|
||||
taskdep = self.rqdata.dataCache.task_deps[fn]
|
||||
|
||||
if 'noexec' in taskdep and taskname in taskdep['noexec']:
|
||||
noexec.append(task)
|
||||
self.task_skip(task)
|
||||
bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCache, fn)
|
||||
continue
|
||||
|
||||
if self.rq.check_stamp_task(realtask, taskname + "_setscene"):
|
||||
logger.debug(2, 'Setscene stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(realtask))
|
||||
stamppresent.append(task)
|
||||
self.task_skip(task)
|
||||
continue
|
||||
|
||||
sq_fn.append(fn)
|
||||
sq_hashfn.append(self.rqdata.dataCache.hashfn[fn])
|
||||
sq_hash.append(self.rqdata.runq_hash[realtask])
|
||||
@@ -1490,7 +1472,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
|
||||
locs = { "sq_fn" : sq_fn, "sq_task" : sq_taskname, "sq_hash" : sq_hash, "sq_hashfn" : sq_hashfn, "d" : self.cooker.configuration.data }
|
||||
valid = bb.utils.better_eval(call, locs)
|
||||
|
||||
valid_new = stamppresent
|
||||
valid_new = []
|
||||
for v in valid:
|
||||
valid_new.append(sq_task[v])
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
|
||||
import time
|
||||
import bb
|
||||
import pickle
|
||||
import signal
|
||||
|
||||
DEBUG = False
|
||||
@@ -35,7 +36,8 @@ DEBUG = False
|
||||
import inspect, select
|
||||
|
||||
class BitBakeServerCommands():
|
||||
def __init__(self, server):
|
||||
def __init__(self, server, cooker):
|
||||
self.cooker = cooker
|
||||
self.server = server
|
||||
|
||||
def runCommand(self, command):
|
||||
@@ -67,7 +69,7 @@ class BBUIEventQueue:
|
||||
self.parent = parent
|
||||
@staticmethod
|
||||
def send(event):
|
||||
bb.server.none.eventQueue.append(event)
|
||||
bb.server.none.eventQueue.append(pickle.loads(event))
|
||||
@staticmethod
|
||||
def quit():
|
||||
return
|
||||
@@ -104,17 +106,13 @@ class BBUIEventQueue:
|
||||
def chldhandler(signum, stackframe):
|
||||
pass
|
||||
|
||||
class BitBakeNoneServer():
|
||||
class BitBakeServer():
|
||||
# remove this when you're done with debugging
|
||||
# allow_reuse_address = True
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, cooker):
|
||||
self._idlefuns = {}
|
||||
self.commands = BitBakeServerCommands(self)
|
||||
|
||||
def addcooker(self, cooker):
|
||||
self.cooker = cooker
|
||||
self.commands.cooker = cooker
|
||||
self.commands = BitBakeServerCommands(self, cooker)
|
||||
|
||||
def register_idle_function(self, function, data):
|
||||
"""Register a function to be called while the server is idle"""
|
||||
@@ -159,10 +157,25 @@ class BitBakeNoneServer():
|
||||
except:
|
||||
pass
|
||||
|
||||
class BitBakeServerConnection():
|
||||
class BitbakeServerInfo():
|
||||
def __init__(self, server):
|
||||
self.server = server.server
|
||||
self.connection = self.server.commands
|
||||
self.server = server
|
||||
self.commands = server.commands
|
||||
|
||||
class BitBakeServerFork():
|
||||
def __init__(self, cooker, server, serverinfo, logfile):
|
||||
serverinfo.logfile = logfile
|
||||
serverinfo.cooker = cooker
|
||||
serverinfo.server = server
|
||||
|
||||
class BitbakeUILauch():
|
||||
def launch(self, serverinfo, uifunc, *args):
|
||||
return bb.cooker.server_main(serverinfo.cooker, uifunc, *args)
|
||||
|
||||
class BitBakeServerConnection():
|
||||
def __init__(self, serverinfo):
|
||||
self.server = serverinfo.server
|
||||
self.connection = serverinfo.commands
|
||||
self.events = bb.server.none.BBUIEventQueue(self.server)
|
||||
for event in bb.event.ui_queue:
|
||||
self.events.queue_event(event)
|
||||
@@ -176,28 +189,3 @@ class BitBakeServerConnection():
|
||||
self.connection.terminateServer()
|
||||
except:
|
||||
pass
|
||||
|
||||
class BitBakeServer(object):
|
||||
def initServer(self):
|
||||
self.server = BitBakeNoneServer()
|
||||
|
||||
def addcooker(self, cooker):
|
||||
self.cooker = cooker
|
||||
self.server.addcooker(cooker)
|
||||
|
||||
def getServerIdleCB(self):
|
||||
return self.server.register_idle_function
|
||||
|
||||
def saveConnectionDetails(self):
|
||||
return
|
||||
|
||||
def detach(self, cooker_logfile):
|
||||
self.logfile = cooker_logfile
|
||||
|
||||
def establishConnection(self):
|
||||
self.connection = BitBakeServerConnection(self)
|
||||
return self.connection
|
||||
|
||||
def launchUI(self, uifunc, *args):
|
||||
return bb.cooker.server_main(self.cooker, uifunc, *args)
|
||||
|
||||
|
||||
@@ -1,270 +0,0 @@
|
||||
#
|
||||
# BitBake Process based server.
|
||||
#
|
||||
# Copyright (C) 2010 Bob Foerster <robert@erafx.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
"""
|
||||
This module implements a multiprocessing.Process based server for bitbake.
|
||||
"""
|
||||
|
||||
import bb
|
||||
import bb.event
|
||||
import itertools
|
||||
import logging
|
||||
import multiprocessing
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
from Queue import Empty
|
||||
from multiprocessing import Event, Process, util, Queue, Pipe, queues
|
||||
|
||||
logger = logging.getLogger('BitBake')
|
||||
|
||||
class ServerCommunicator():
|
||||
def __init__(self, connection):
|
||||
self.connection = connection
|
||||
|
||||
def runCommand(self, command):
|
||||
# @todo try/except
|
||||
self.connection.send(command)
|
||||
|
||||
while True:
|
||||
# don't let the user ctrl-c while we're waiting for a response
|
||||
try:
|
||||
if self.connection.poll(.5):
|
||||
return self.connection.recv()
|
||||
else:
|
||||
return None
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
|
||||
|
||||
class EventAdapter():
|
||||
"""
|
||||
Adapter to wrap our event queue since the caller (bb.event) expects to
|
||||
call a send() method, but our actual queue only has put()
|
||||
"""
|
||||
def __init__(self, queue):
|
||||
self.queue = queue
|
||||
|
||||
def send(self, event):
|
||||
try:
|
||||
self.queue.put(event)
|
||||
except Exception as err:
|
||||
print("EventAdapter puked: %s" % str(err))
|
||||
|
||||
|
||||
class ProcessServer(Process):
|
||||
profile_filename = "profile.log"
|
||||
profile_processed_filename = "profile.log.processed"
|
||||
|
||||
def __init__(self, command_channel, event_queue):
|
||||
Process.__init__(self)
|
||||
self.command_channel = command_channel
|
||||
self.event_queue = event_queue
|
||||
self.event = EventAdapter(event_queue)
|
||||
self._idlefunctions = {}
|
||||
self.quit = False
|
||||
|
||||
self.keep_running = Event()
|
||||
self.keep_running.set()
|
||||
|
||||
def register_idle_function(self, function, data):
|
||||
"""Register a function to be called while the server is idle"""
|
||||
assert hasattr(function, '__call__')
|
||||
self._idlefunctions[function] = data
|
||||
|
||||
def run(self):
|
||||
for event in bb.event.ui_queue:
|
||||
self.event_queue.put(event)
|
||||
self.event_handle = bb.event.register_UIHhandler(self)
|
||||
bb.cooker.server_main(self.cooker, self.main)
|
||||
|
||||
def main(self):
|
||||
# Ignore SIGINT within the server, as all SIGINT handling is done by
|
||||
# the UI and communicated to us
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
while self.keep_running.is_set():
|
||||
try:
|
||||
if self.command_channel.poll():
|
||||
command = self.command_channel.recv()
|
||||
self.runCommand(command)
|
||||
|
||||
self.idle_commands(.1)
|
||||
except Exception:
|
||||
logger.exception('Running command %s', command)
|
||||
|
||||
self.event_queue.cancel_join_thread()
|
||||
bb.event.unregister_UIHhandler(self.event_handle)
|
||||
self.command_channel.close()
|
||||
self.cooker.stop()
|
||||
self.idle_commands(.1)
|
||||
|
||||
def idle_commands(self, delay):
|
||||
nextsleep = delay
|
||||
|
||||
for function, data in self._idlefunctions.items():
|
||||
try:
|
||||
retval = function(self, data, False)
|
||||
if retval is False:
|
||||
del self._idlefunctions[function]
|
||||
elif retval is True:
|
||||
nextsleep = None
|
||||
elif nextsleep is None:
|
||||
continue
|
||||
elif retval < nextsleep:
|
||||
nextsleep = retval
|
||||
except SystemExit:
|
||||
raise
|
||||
except Exception:
|
||||
logger.exception('Running idle function')
|
||||
|
||||
if nextsleep is not None:
|
||||
time.sleep(nextsleep)
|
||||
|
||||
def runCommand(self, command):
|
||||
"""
|
||||
Run a cooker command on the server
|
||||
"""
|
||||
self.command_channel.send(self.cooker.command.runCommand(command))
|
||||
|
||||
def stop(self):
|
||||
self.keep_running.clear()
|
||||
|
||||
def bootstrap_2_6_6(self):
|
||||
"""Pulled from python 2.6.6. Needed to ensure we have the fix from
|
||||
http://bugs.python.org/issue5313 when running on python version 2.6.2
|
||||
or lower."""
|
||||
|
||||
try:
|
||||
self._children = set()
|
||||
self._counter = itertools.count(1)
|
||||
try:
|
||||
sys.stdin.close()
|
||||
sys.stdin = open(os.devnull)
|
||||
except (OSError, ValueError):
|
||||
pass
|
||||
multiprocessing._current_process = self
|
||||
util._finalizer_registry.clear()
|
||||
util._run_after_forkers()
|
||||
util.info('child process calling self.run()')
|
||||
try:
|
||||
self.run()
|
||||
exitcode = 0
|
||||
finally:
|
||||
util._exit_function()
|
||||
except SystemExit as e:
|
||||
if not e.args:
|
||||
exitcode = 1
|
||||
elif type(e.args[0]) is int:
|
||||
exitcode = e.args[0]
|
||||
else:
|
||||
sys.stderr.write(e.args[0] + '\n')
|
||||
sys.stderr.flush()
|
||||
exitcode = 1
|
||||
except:
|
||||
exitcode = 1
|
||||
import traceback
|
||||
sys.stderr.write('Process %s:\n' % self.name)
|
||||
sys.stderr.flush()
|
||||
traceback.print_exc()
|
||||
|
||||
util.info('process exiting with exitcode %d' % exitcode)
|
||||
return exitcode
|
||||
|
||||
# Python versions 2.6.0 through 2.6.2 suffer from a multiprocessing bug
|
||||
# which can result in a bitbake server hang during the parsing process
|
||||
if (2, 6, 0) <= sys.version_info < (2, 6, 3):
|
||||
_bootstrap = bootstrap_2_6_6
|
||||
|
||||
class BitBakeServerConnection():
|
||||
def __init__(self, server):
|
||||
self.server = server
|
||||
self.procserver = server.server
|
||||
self.connection = ServerCommunicator(server.ui_channel)
|
||||
self.events = server.event_queue
|
||||
|
||||
def terminate(self, force = False):
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
self.procserver.stop()
|
||||
if force:
|
||||
self.procserver.join(0.5)
|
||||
if self.procserver.is_alive():
|
||||
self.procserver.terminate()
|
||||
self.procserver.join()
|
||||
else:
|
||||
self.procserver.join()
|
||||
while True:
|
||||
try:
|
||||
event = self.server.event_queue.get(block=False)
|
||||
except (Empty, IOError):
|
||||
break
|
||||
if isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
self.server.ui_channel.close()
|
||||
self.server.event_queue.close()
|
||||
if force:
|
||||
sys.exit(1)
|
||||
|
||||
# Wrap Queue to provide API which isn't server implementation specific
|
||||
class ProcessEventQueue(multiprocessing.queues.Queue):
|
||||
def waitEvent(self, timeout):
|
||||
try:
|
||||
return self.get(True, timeout)
|
||||
except Empty:
|
||||
return None
|
||||
|
||||
def getEvent(self):
|
||||
try:
|
||||
return self.get(False)
|
||||
except Empty:
|
||||
return None
|
||||
|
||||
|
||||
class BitBakeServer(object):
|
||||
def initServer(self):
|
||||
# establish communication channels. We use bidirectional pipes for
|
||||
# ui <--> server command/response pairs
|
||||
# and a queue for server -> ui event notifications
|
||||
#
|
||||
self.ui_channel, self.server_channel = Pipe()
|
||||
self.event_queue = ProcessEventQueue(0)
|
||||
|
||||
self.server = ProcessServer(self.server_channel, self.event_queue)
|
||||
|
||||
def addcooker(self, cooker):
|
||||
self.cooker = cooker
|
||||
self.server.cooker = cooker
|
||||
|
||||
def getServerIdleCB(self):
|
||||
return self.server.register_idle_function
|
||||
|
||||
def saveConnectionDetails(self):
|
||||
return
|
||||
|
||||
def detach(self, cooker_logfile):
|
||||
self.server.start()
|
||||
return
|
||||
|
||||
def establishConnection(self):
|
||||
self.connection = BitBakeServerConnection(self)
|
||||
signal.signal(signal.SIGTERM, lambda i, s: self.connection.terminate(force=True))
|
||||
return self.connection
|
||||
|
||||
def launchUI(self, uifunc, *args):
|
||||
return bb.cooker.server_main(self.cooker, uifunc, *args)
|
||||
|
||||
@@ -122,7 +122,8 @@ def _create_server(host, port):
|
||||
return s
|
||||
|
||||
class BitBakeServerCommands():
|
||||
def __init__(self, server):
|
||||
def __init__(self, server, cooker):
|
||||
self.cooker = cooker
|
||||
self.server = server
|
||||
|
||||
def registerEventHandler(self, host, port):
|
||||
@@ -150,7 +151,7 @@ class BitBakeServerCommands():
|
||||
Trigger the server to quit
|
||||
"""
|
||||
self.server.quit = True
|
||||
print("Server (cooker) exiting")
|
||||
print("Server (cooker) exitting")
|
||||
return
|
||||
|
||||
def ping(self):
|
||||
@@ -159,11 +160,11 @@ class BitBakeServerCommands():
|
||||
"""
|
||||
return True
|
||||
|
||||
class BitBakeXMLRPCServer(SimpleXMLRPCServer):
|
||||
class BitBakeServer(SimpleXMLRPCServer):
|
||||
# remove this when you're done with debugging
|
||||
# allow_reuse_address = True
|
||||
|
||||
def __init__(self, interface = ("localhost", 0)):
|
||||
def __init__(self, cooker, interface = ("localhost", 0)):
|
||||
"""
|
||||
Constructor
|
||||
"""
|
||||
@@ -173,12 +174,9 @@ class BitBakeXMLRPCServer(SimpleXMLRPCServer):
|
||||
self._idlefuns = {}
|
||||
self.host, self.port = self.socket.getsockname()
|
||||
#self.register_introspection_functions()
|
||||
self.commands = BitBakeServerCommands(self)
|
||||
self.autoregister_all_functions(self.commands, "")
|
||||
|
||||
def addcooker(self, cooker):
|
||||
commands = BitBakeServerCommands(self, cooker)
|
||||
self.autoregister_all_functions(commands, "")
|
||||
self.cooker = cooker
|
||||
self.commands.cooker = cooker
|
||||
|
||||
def autoregister_all_functions(self, context, prefix):
|
||||
"""
|
||||
@@ -246,6 +244,14 @@ class BitbakeServerInfo():
|
||||
self.host = server.host
|
||||
self.port = server.port
|
||||
|
||||
class BitBakeServerFork():
|
||||
def __init__(self, cooker, server, serverinfo, logfile):
|
||||
daemonize.createDaemon(server.serve_forever, logfile)
|
||||
|
||||
class BitbakeUILauch():
|
||||
def launch(self, serverinfo, uifunc, *args):
|
||||
return uifunc(*args)
|
||||
|
||||
class BitBakeServerConnection():
|
||||
def __init__(self, serverinfo):
|
||||
self.connection = _create_server(serverinfo.host, serverinfo.port)
|
||||
@@ -265,31 +271,3 @@ class BitBakeServerConnection():
|
||||
self.connection.terminateServer()
|
||||
except:
|
||||
pass
|
||||
|
||||
class BitBakeServer(object):
|
||||
def initServer(self):
|
||||
self.server = BitBakeXMLRPCServer()
|
||||
|
||||
def addcooker(self, cooker):
|
||||
self.cooker = cooker
|
||||
self.server.addcooker(cooker)
|
||||
|
||||
def getServerIdleCB(self):
|
||||
return self.server.register_idle_function
|
||||
|
||||
def saveConnectionDetails(self):
|
||||
self.serverinfo = BitbakeServerInfo(self.server)
|
||||
|
||||
def detach(self, cooker_logfile):
|
||||
daemonize.createDaemon(self.server.serve_forever, cooker_logfile)
|
||||
del self.cooker
|
||||
del self.server
|
||||
|
||||
def establishConnection(self):
|
||||
self.connection = BitBakeServerConnection(self.serverinfo)
|
||||
return self.connection
|
||||
|
||||
def launchUI(self, uifunc, *args):
|
||||
return uifunc(*args)
|
||||
|
||||
|
||||
|
||||
@@ -407,7 +407,7 @@ SRC_URI = ""
|
||||
|
||||
def parse( self, params ):
|
||||
"""(Re-)parse .bb files and calculate the dependency graph"""
|
||||
cooker.status = cache.CacheData(cooker.caches_array)
|
||||
cooker.status = cache.CacheData()
|
||||
ignore = data.getVar("ASSUME_PROVIDED", cooker.configuration.data, 1) or ""
|
||||
cooker.status.ignored_dependencies = set( ignore.split() )
|
||||
cooker.handleCollections( data.getVar("BBFILE_COLLECTIONS", cooker.configuration.data, 1) )
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import bb.data
|
||||
|
||||
@@ -47,9 +46,6 @@ class SignatureGenerator(object):
|
||||
def stampfile(self, stampbase, file_name, taskname, extrainfo):
|
||||
return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.')
|
||||
|
||||
def dump_sigtask(self, fn, task, stampbase, runtime):
|
||||
return
|
||||
|
||||
class SignatureGeneratorBasic(SignatureGenerator):
|
||||
"""
|
||||
"""
|
||||
@@ -82,10 +78,6 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
data = d.getVar(task, False)
|
||||
lookupcache[task] = data
|
||||
|
||||
if data is None:
|
||||
bb.error("Task %s from %s seems to be empty?!" % (task, fn))
|
||||
data = ''
|
||||
|
||||
newdeps = gendeps[task]
|
||||
seen = set()
|
||||
while newdeps:
|
||||
@@ -107,7 +99,9 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
var = d.getVar(dep, False)
|
||||
lookupcache[dep] = var
|
||||
if var:
|
||||
data = data + str(var)
|
||||
data = data + var
|
||||
if data is None:
|
||||
bb.error("Task %s from %s seems to be empty?!" % (task, fn))
|
||||
self.basehash[fn + "." + task] = hashlib.md5(data).hexdigest()
|
||||
taskdeps[task] = sorted(alldeps)
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ class TaskData:
|
||||
"""
|
||||
BitBake Task Data implementation
|
||||
"""
|
||||
def __init__(self, abort = True, tryaltconfigs = False, skiplist = None):
|
||||
def __init__(self, abort = True, tryaltconfigs = False):
|
||||
self.build_names_index = []
|
||||
self.run_names_index = []
|
||||
self.fn_index = []
|
||||
@@ -70,8 +70,6 @@ class TaskData:
|
||||
self.abort = abort
|
||||
self.tryaltconfigs = tryaltconfigs
|
||||
|
||||
self.skiplist = skiplist
|
||||
|
||||
def getbuild_id(self, name):
|
||||
"""
|
||||
Return an ID number for the build target name.
|
||||
@@ -350,22 +348,6 @@ class TaskData:
|
||||
dependees.append(self.fn_index[fnid])
|
||||
return dependees
|
||||
|
||||
def get_reasons(self, item, runtime=False):
|
||||
"""
|
||||
Get the reason(s) for an item not being provided, if any
|
||||
"""
|
||||
reasons = []
|
||||
if self.skiplist:
|
||||
for fn in self.skiplist:
|
||||
skipitem = self.skiplist[fn]
|
||||
if skipitem.pn == item:
|
||||
reasons.append("%s was skipped: %s" % (skipitem.pn, skipitem.skipreason))
|
||||
elif runtime and item in skipitem.rprovides:
|
||||
reasons.append("%s RPROVIDES %s but was skipped: %s" % (skipitem.pn, item, skipitem.skipreason))
|
||||
elif not runtime and item in skipitem.provides:
|
||||
reasons.append("%s PROVIDES %s but was skipped: %s" % (skipitem.pn, item, skipitem.skipreason))
|
||||
return reasons
|
||||
|
||||
def add_provider(self, cfgData, dataCache, item):
|
||||
try:
|
||||
self.add_provider_internal(cfgData, dataCache, item)
|
||||
@@ -387,7 +369,7 @@ class TaskData:
|
||||
return
|
||||
|
||||
if not item in dataCache.providers:
|
||||
bb.event.fire(bb.event.NoProvider(item, dependees=self.get_dependees_str(item), reasons=self.get_reasons(item)), cfgData)
|
||||
bb.event.fire(bb.event.NoProvider(item, dependees=self.get_rdependees_str(item)), cfgData)
|
||||
raise bb.providers.NoProvider(item)
|
||||
|
||||
if self.have_build_target(item):
|
||||
@@ -399,7 +381,7 @@ class TaskData:
|
||||
eligible = [p for p in eligible if not self.getfn_id(p) in self.failed_fnids]
|
||||
|
||||
if not eligible:
|
||||
bb.event.fire(bb.event.NoProvider(item, dependees=self.get_dependees_str(item), reasons=["No eligible PROVIDERs exist for '%s'" % item]), cfgData)
|
||||
bb.event.fire(bb.event.NoProvider(item, dependees=self.get_dependees_str(item)), cfgData)
|
||||
raise bb.providers.NoProvider(item)
|
||||
|
||||
if len(eligible) > 1 and foundUnique == False:
|
||||
@@ -436,14 +418,14 @@ class TaskData:
|
||||
all_p = bb.providers.getRuntimeProviders(dataCache, item)
|
||||
|
||||
if not all_p:
|
||||
bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees_str(item), reasons=self.get_reasons(item, True)), cfgData)
|
||||
bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees_str(item)), cfgData)
|
||||
raise bb.providers.NoRProvider(item)
|
||||
|
||||
eligible, numberPreferred = bb.providers.filterProvidersRunTime(all_p, item, cfgData, dataCache)
|
||||
eligible = [p for p in eligible if not self.getfn_id(p) in self.failed_fnids]
|
||||
|
||||
if not eligible:
|
||||
bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees_str(item), reasons=["No eligible RPROVIDERs exist for '%s'" % item]), cfgData)
|
||||
bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees_str(item)), cfgData)
|
||||
raise bb.providers.NoRProvider(item)
|
||||
|
||||
if len(eligible) > 1 and numberPreferred == 0:
|
||||
|
||||
@@ -1,319 +0,0 @@
|
||||
#
|
||||
# BitBake Graphical GTK User Interface
|
||||
#
|
||||
# Copyright (C) 2011 Intel Corporation
|
||||
#
|
||||
# Authored by Joshua Lock <josh@linux.intel.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import gobject
|
||||
import copy
|
||||
import re, os
|
||||
from bb import data
|
||||
|
||||
class Configurator(gobject.GObject):
|
||||
|
||||
"""
|
||||
A GObject to handle writing modified configuration values back
|
||||
to conf files.
|
||||
"""
|
||||
__gsignals__ = {
|
||||
"layers-loaded" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
()),
|
||||
"layers-changed" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
())
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
gobject.GObject.__init__(self)
|
||||
self.local = None
|
||||
self.bblayers = None
|
||||
self.enabled_layers = {}
|
||||
self.loaded_layers = {}
|
||||
self.config = {}
|
||||
self.orig_config = {}
|
||||
|
||||
# NOTE: cribbed from the cooker...
|
||||
def _parse(self, f, data, include=False):
|
||||
try:
|
||||
return bb.parse.handle(f, data, include)
|
||||
except (IOError, bb.parse.ParseError) as exc:
|
||||
parselog.critical("Unable to parse %s: %s" % (f, exc))
|
||||
sys.exit(1)
|
||||
|
||||
def _loadLocalConf(self, path):
|
||||
def getString(var):
|
||||
return bb.data.getVar(var, data, True) or ""
|
||||
|
||||
self.local = path
|
||||
|
||||
if self.orig_config:
|
||||
del self.orig_config
|
||||
self.orig_config = {}
|
||||
|
||||
data = bb.data.init()
|
||||
data = self._parse(self.local, data)
|
||||
|
||||
# We only need to care about certain variables
|
||||
mach = getString('MACHINE')
|
||||
if mach and mach != self.config.get('MACHINE', ''):
|
||||
self.config['MACHINE'] = mach
|
||||
sdkmach = getString('SDKMACHINE')
|
||||
if sdkmach and sdkmach != self.config.get('SDKMACHINE', ''):
|
||||
self.config['SDKMACHINE'] = sdkmach
|
||||
distro = getString('DISTRO')
|
||||
if distro and distro != self.config.get('DISTRO', ''):
|
||||
self.config['DISTRO'] = distro
|
||||
bbnum = getString('BB_NUMBER_THREADS')
|
||||
if bbnum and bbnum != self.config.get('BB_NUMBER_THREADS', ''):
|
||||
self.config['BB_NUMBER_THREADS'] = bbnum
|
||||
pmake = getString('PARALLEL_MAKE')
|
||||
if pmake and pmake != self.config.get('PARALLEL_MAKE', ''):
|
||||
self.config['PARALLEL_MAKE'] = pmake
|
||||
pclass = getString('PACKAGE_CLASSES')
|
||||
if pclass and pclass != self.config.get('PACKAGE_CLASSES', ''):
|
||||
self.config['PACKAGE_CLASSES'] = pclass
|
||||
fstypes = getString('IMAGE_FSTYPES')
|
||||
if fstypes and fstypes != self.config.get('IMAGE_FSTYPES', ''):
|
||||
self.config['IMAGE_FSTYPES'] = fstypes
|
||||
|
||||
# Values which aren't always set in the conf must be explicitly
|
||||
# loaded as empty values for save to work
|
||||
incompat = getString('INCOMPATIBLE_LICENSE')
|
||||
if incompat and incompat != self.config.get('INCOMPATIBLE_LICENSE', ''):
|
||||
self.config['INCOMPATIBLE_LICENSE'] = incompat
|
||||
else:
|
||||
self.config['INCOMPATIBLE_LICENSE'] = ""
|
||||
|
||||
# Non-standard, namespaces, variables for GUI preferences
|
||||
toolchain = getString('HOB_BUILD_TOOLCHAIN')
|
||||
if toolchain and toolchain != self.config.get('HOB_BUILD_TOOLCHAIN', ''):
|
||||
self.config['HOB_BUILD_TOOLCHAIN'] = toolchain
|
||||
header = getString('HOB_BUILD_TOOLCHAIN_HEADERS')
|
||||
if header and header != self.config.get('HOB_BUILD_TOOLCHAIN_HEADERS', ''):
|
||||
self.config['HOB_BUILD_TOOLCHAIN_HEADERS'] = header
|
||||
|
||||
self.orig_config = copy.deepcopy(self.config)
|
||||
|
||||
def setLocalConfVar(self, var, val):
|
||||
self.config[var] = val
|
||||
|
||||
def getLocalConfVar(self, var):
|
||||
if var in self.config:
|
||||
return self.config[var]
|
||||
else:
|
||||
return ""
|
||||
|
||||
def _loadLayerConf(self, path):
|
||||
self.bblayers = path
|
||||
self.enabled_layers = {}
|
||||
self.loaded_layers = {}
|
||||
data = bb.data.init()
|
||||
data = self._parse(self.bblayers, data)
|
||||
layers = (bb.data.getVar('BBLAYERS', data, True) or "").split()
|
||||
for layer in layers:
|
||||
# TODO: we may be better off calling the layer by its
|
||||
# BBFILE_COLLECTIONS value?
|
||||
name = self._getLayerName(layer)
|
||||
self.loaded_layers[name] = layer
|
||||
|
||||
self.enabled_layers = copy.deepcopy(self.loaded_layers)
|
||||
self.emit("layers-loaded")
|
||||
|
||||
def _addConfigFile(self, path):
|
||||
pref, sep, filename = path.rpartition("/")
|
||||
if filename == "local.conf" or filename == "hob.local.conf":
|
||||
self._loadLocalConf(path)
|
||||
elif filename == "bblayers.conf":
|
||||
self._loadLayerConf(path)
|
||||
|
||||
def _splitLayer(self, path):
|
||||
# we only care about the path up to /conf/layer.conf
|
||||
layerpath, conf, end = path.rpartition("/conf/")
|
||||
return layerpath
|
||||
|
||||
def _getLayerName(self, path):
|
||||
# Should this be the collection name?
|
||||
layerpath, sep, name = path.rpartition("/")
|
||||
return name
|
||||
|
||||
def disableLayer(self, layer):
|
||||
if layer in self.enabled_layers:
|
||||
del self.enabled_layers[layer]
|
||||
|
||||
def addLayerConf(self, confpath):
|
||||
layerpath = self._splitLayer(confpath)
|
||||
name = self._getLayerName(layerpath)
|
||||
if name not in self.enabled_layers:
|
||||
self.addLayer(name, layerpath)
|
||||
return name, layerpath
|
||||
else:
|
||||
return None, None
|
||||
|
||||
def addLayer(self, name, path):
|
||||
self.enabled_layers[name] = path
|
||||
|
||||
def _isLayerConfDirty(self):
|
||||
# if a different number of layers enabled to what was
|
||||
# loaded, definitely different
|
||||
if len(self.enabled_layers) != len(self.loaded_layers):
|
||||
return True
|
||||
|
||||
for layer in self.loaded_layers:
|
||||
# if layer loaded but no longer present, definitely dirty
|
||||
if layer not in self.enabled_layers:
|
||||
return True
|
||||
|
||||
for layer in self.enabled_layers:
|
||||
# if this layer wasn't present at load, definitely dirty
|
||||
if layer not in self.loaded_layers:
|
||||
return True
|
||||
# if this layers path has changed, definitely dirty
|
||||
if self.enabled_layers[layer] != self.loaded_layers[layer]:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _constructLayerEntry(self):
|
||||
"""
|
||||
Returns a string representing the new layer selection
|
||||
"""
|
||||
layers = self.enabled_layers.copy()
|
||||
# Construct BBLAYERS entry
|
||||
layer_entry = "BBLAYERS = \" \\\n"
|
||||
if 'meta' in layers:
|
||||
layer_entry = layer_entry + " %s \\\n" % layers['meta']
|
||||
del layers['meta']
|
||||
for layer in layers:
|
||||
layer_entry = layer_entry + " %s \\\n" % layers[layer]
|
||||
layer_entry = layer_entry + " \""
|
||||
|
||||
return "".join(layer_entry)
|
||||
|
||||
def writeLocalConf(self):
|
||||
# Dictionary containing only new or modified variables
|
||||
changed_values = {}
|
||||
for var in self.config:
|
||||
val = self.config[var]
|
||||
if self.orig_config.get(var, None) != val:
|
||||
changed_values[var] = val
|
||||
|
||||
if not len(changed_values):
|
||||
return
|
||||
|
||||
# Create a backup of the local.conf
|
||||
bkup = "%s~" % self.local
|
||||
os.rename(self.local, bkup)
|
||||
|
||||
# read the original conf into a list
|
||||
with open(bkup, 'r') as config:
|
||||
config_lines = config.readlines()
|
||||
|
||||
new_config_lines = ["\n"]
|
||||
for var in changed_values:
|
||||
# Convenience function for re.subn(). If the pattern matches
|
||||
# return a string which contains an assignment using the same
|
||||
# assignment operator as the old assignment.
|
||||
def replace_val(matchobj):
|
||||
var = matchobj.group(1) # config variable
|
||||
op = matchobj.group(2) # assignment operator
|
||||
val = changed_values[var] # new config value
|
||||
return "%s %s \"%s\"" % (var, op, val)
|
||||
|
||||
pattern = '^\s*(%s)\s*([+=?.]+)(.*)' % re.escape(var)
|
||||
p = re.compile(pattern)
|
||||
cnt = 0
|
||||
replaced = False
|
||||
|
||||
# Iterate over the local.conf lines and if they are a match
|
||||
# for the pattern comment out the line and append a new line
|
||||
# with the new VAR op "value" entry
|
||||
for line in config_lines:
|
||||
new_line, replacements = p.subn(replace_val, line)
|
||||
if replacements:
|
||||
config_lines[cnt] = "#%s" % line
|
||||
new_config_lines.append(new_line)
|
||||
replaced = True
|
||||
cnt = cnt + 1
|
||||
|
||||
if not replaced:
|
||||
new_config_lines.append("%s = \"%s\"\n" % (var, changed_values[var]))
|
||||
|
||||
# Add the modified variables
|
||||
config_lines.extend(new_config_lines)
|
||||
|
||||
# Write the updated lines list object to the local.conf
|
||||
with open(self.local, "w") as n:
|
||||
n.write("".join(config_lines))
|
||||
|
||||
del self.orig_config
|
||||
self.orig_config = copy.deepcopy(self.config)
|
||||
|
||||
def insertTempBBPath(self, bbpath, bbfiles):
|
||||
# Create a backup of the local.conf
|
||||
bkup = "%s~" % self.local
|
||||
os.rename(self.local, bkup)
|
||||
|
||||
# read the original conf into a list
|
||||
with open(bkup, 'r') as config:
|
||||
config_lines = config.readlines()
|
||||
|
||||
if bbpath:
|
||||
config_lines.append("BBPATH := \"${BBPATH}:%s\"\n" % bbpath)
|
||||
if bbfiles:
|
||||
config_lines.append("BBFILES := \"${BBFILES} %s\"\n" % bbfiles)
|
||||
|
||||
# Write the updated lines list object to the local.conf
|
||||
with open(self.local, "w") as n:
|
||||
n.write("".join(config_lines))
|
||||
|
||||
def writeLayerConf(self):
|
||||
# If we've not added/removed new layers don't write
|
||||
if not self._isLayerConfDirty():
|
||||
return
|
||||
|
||||
# This pattern should find the existing BBLAYERS
|
||||
pattern = 'BBLAYERS\s=\s\".*\"'
|
||||
|
||||
# Backup the users bblayers.conf
|
||||
bkup = "%s~" % self.bblayers
|
||||
os.rename(self.bblayers, bkup)
|
||||
|
||||
replacement = self._constructLayerEntry()
|
||||
|
||||
with open(bkup, "r") as f:
|
||||
contents = f.read()
|
||||
p = re.compile(pattern, re.DOTALL)
|
||||
new = p.sub(replacement, contents)
|
||||
|
||||
with open(self.bblayers, "w") as n:
|
||||
n.write(new)
|
||||
|
||||
# At some stage we should remove the backup we've created
|
||||
# though we should probably verify it first
|
||||
#os.remove(bkup)
|
||||
|
||||
# set loaded_layers for dirtiness tracking
|
||||
self.loaded_layers = copy.deepcopy(self.enabled_layers)
|
||||
|
||||
self.emit("layers-changed")
|
||||
|
||||
def configFound(self, handler, path):
|
||||
self._addConfigFile(path)
|
||||
|
||||
def loadConfig(self, path):
|
||||
self._addConfigFile(path)
|
||||
@@ -1,61 +0,0 @@
|
||||
#
|
||||
# BitBake Graphical GTK User Interface
|
||||
#
|
||||
# Copyright (C) 2011 Intel Corporation
|
||||
#
|
||||
# Authored by Joshua Lock <josh@linux.intel.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import gobject
|
||||
import gtk
|
||||
"""
|
||||
The following are convenience classes for implementing GNOME HIG compliant
|
||||
BitBake GUI's
|
||||
In summary: spacing = 12px, border-width = 6px
|
||||
"""
|
||||
|
||||
class CrumbsDialog(gtk.Dialog):
|
||||
"""
|
||||
A GNOME HIG compliant dialog widget.
|
||||
Add buttons with gtk.Dialog.add_button or gtk.Dialog.add_buttons
|
||||
"""
|
||||
def __init__(self, parent=None, label="", icon=gtk.STOCK_INFO):
|
||||
gtk.Dialog.__init__(self, "", parent, gtk.DIALOG_DESTROY_WITH_PARENT)
|
||||
|
||||
#self.set_property("has-separator", False) # note: deprecated in 2.22
|
||||
|
||||
self.set_border_width(6)
|
||||
self.vbox.set_property("spacing", 12)
|
||||
self.action_area.set_property("spacing", 12)
|
||||
self.action_area.set_property("border-width", 6)
|
||||
|
||||
first_row = gtk.HBox(spacing=12)
|
||||
first_row.set_property("border-width", 6)
|
||||
first_row.show()
|
||||
self.vbox.add(first_row)
|
||||
|
||||
self.icon = gtk.Image()
|
||||
self.icon.set_from_stock(icon, gtk.ICON_SIZE_DIALOG)
|
||||
self.icon.set_property("yalign", 0.00)
|
||||
self.icon.show()
|
||||
first_row.add(self.icon)
|
||||
|
||||
self.label = gtk.Label()
|
||||
self.label.set_use_markup(True)
|
||||
self.label.set_line_wrap(True)
|
||||
self.label.set_markup(label)
|
||||
self.label.set_property("yalign", 0.00)
|
||||
self.label.show()
|
||||
first_row.add(self.label)
|
||||
@@ -19,6 +19,7 @@
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import gobject
|
||||
from bb.ui.crumbs.progress import ProgressBar
|
||||
|
||||
progress_total = 0
|
||||
|
||||
@@ -28,102 +29,46 @@ class HobHandler(gobject.GObject):
|
||||
This object does BitBake event handling for the hob gui.
|
||||
"""
|
||||
__gsignals__ = {
|
||||
"machines-updated" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
(gobject.TYPE_PYOBJECT,)),
|
||||
"sdk-machines-updated": (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
(gobject.TYPE_PYOBJECT,)),
|
||||
"distros-updated" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
(gobject.TYPE_PYOBJECT,)),
|
||||
"package-formats-found" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
(gobject.TYPE_PYOBJECT,)),
|
||||
"config-found" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
(gobject.TYPE_STRING,)),
|
||||
"generating-data" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
()),
|
||||
"data-generated" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
()),
|
||||
"error" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
(gobject.TYPE_STRING,)),
|
||||
"reload-triggered" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
(gobject.TYPE_STRING,
|
||||
gobject.TYPE_STRING)),
|
||||
"machines-updated" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
(gobject.TYPE_PYOBJECT,)),
|
||||
"distros-updated" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
(gobject.TYPE_PYOBJECT,)),
|
||||
"generating-data" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
()),
|
||||
"data-generated" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
())
|
||||
}
|
||||
|
||||
(CFG_PATH_LOCAL, CFG_PATH_HOB, CFG_PATH_LAYERS, CFG_FILES_DISTRO, CFG_FILES_MACH, CFG_FILES_SDK, FILES_MATCH_CLASS, GENERATE_TGTS, REPARSE_FILES, BUILD_IMAGE) = range(10)
|
||||
|
||||
def __init__(self, taskmodel, server):
|
||||
gobject.GObject.__init__(self)
|
||||
|
||||
self.current_command = None
|
||||
self.building = None
|
||||
self.build_toolchain = False
|
||||
self.build_toolchain_headers = False
|
||||
self.generating = False
|
||||
self.build_queue = []
|
||||
|
||||
self.model = taskmodel
|
||||
self.server = server
|
||||
self.current_command = None
|
||||
self.building = False
|
||||
|
||||
self.image_output_types = self.server.runCommand(["getVariable", "IMAGE_FSTYPES"]).split(" ")
|
||||
self.command_map = {
|
||||
"findConfigFilesDistro" : ("findConfigFiles", "MACHINE", "findConfigFilesMachine"),
|
||||
"findConfigFilesMachine" : ("generateTargetsTree", "classes/image.bbclass", None),
|
||||
"generateTargetsTree" : (None, None, None),
|
||||
}
|
||||
|
||||
def run_next_command(self):
|
||||
if self.current_command and not self.generating:
|
||||
self.emit("generating-data")
|
||||
self.generating = True
|
||||
# FIXME: this is ugly and I *will* replace it
|
||||
if self.current_command:
|
||||
next_cmd = self.command_map[self.current_command]
|
||||
command = next_cmd[0]
|
||||
argument = next_cmd[1]
|
||||
self.current_command = next_cmd[2]
|
||||
if command == "generateTargetsTree":
|
||||
self.emit("generating-data")
|
||||
self.server.runCommand([command, argument])
|
||||
|
||||
if self.current_command == self.CFG_PATH_LOCAL:
|
||||
self.current_command = self.CFG_PATH_HOB
|
||||
self.server.runCommand(["findConfigFilePath", "hob.local.conf"])
|
||||
elif self.current_command == self.CFG_PATH_HOB:
|
||||
self.current_command = self.CFG_PATH_LAYERS
|
||||
self.server.runCommand(["findConfigFilePath", "bblayers.conf"])
|
||||
elif self.current_command == self.CFG_PATH_LAYERS:
|
||||
self.current_command = self.CFG_FILES_DISTRO
|
||||
self.server.runCommand(["findConfigFiles", "DISTRO"])
|
||||
elif self.current_command == self.CFG_FILES_DISTRO:
|
||||
self.current_command = self.CFG_FILES_MACH
|
||||
self.server.runCommand(["findConfigFiles", "MACHINE"])
|
||||
elif self.current_command == self.CFG_FILES_MACH:
|
||||
self.current_command = self.CFG_FILES_SDK
|
||||
self.server.runCommand(["findConfigFiles", "MACHINE-SDK"])
|
||||
elif self.current_command == self.CFG_FILES_SDK:
|
||||
self.current_command = self.FILES_MATCH_CLASS
|
||||
self.server.runCommand(["findFilesMatchingInDir", "rootfs_", "classes"])
|
||||
elif self.current_command == self.FILES_MATCH_CLASS:
|
||||
self.current_command = self.GENERATE_TGTS
|
||||
self.server.runCommand(["generateTargetsTree", "classes/image.bbclass"])
|
||||
elif self.current_command == self.GENERATE_TGTS:
|
||||
if self.generating:
|
||||
self.emit("data-generated")
|
||||
self.generating = False
|
||||
self.current_command = None
|
||||
elif self.current_command == self.REPARSE_FILES:
|
||||
if self.build_queue:
|
||||
self.current_command = self.BUILD_IMAGE
|
||||
else:
|
||||
self.current_command = self.CFG_PATH_LAYERS
|
||||
self.server.runCommand(["reparseFiles"])
|
||||
elif self.current_command == self.BUILD_IMAGE:
|
||||
self.building = "image"
|
||||
if self.generating:
|
||||
self.emit("data-generated")
|
||||
self.generating = False
|
||||
bbpath = self.server.runCommand(["getVariable", "BBPATH"])
|
||||
bbfiles = self.server.runCommand(["getVariable", "BBFILES"])
|
||||
self.server.runCommand(["buildTargets", self.build_queue, "build"])
|
||||
self.build_queue = []
|
||||
self.current_command = None
|
||||
|
||||
def handle_event(self, event, running_build, pbar):
|
||||
def handle_event(self, event, running_build, pbar=None):
|
||||
if not event:
|
||||
return
|
||||
|
||||
@@ -131,8 +76,10 @@ class HobHandler(gobject.GObject):
|
||||
if self.building:
|
||||
running_build.handle_event(event)
|
||||
elif isinstance(event, bb.event.TargetsTreeGenerated):
|
||||
self.emit("data-generated")
|
||||
if event._model:
|
||||
self.model.populate(event._model)
|
||||
|
||||
elif isinstance(event, bb.event.ConfigFilesFound):
|
||||
var = event._variable
|
||||
if var == "distro":
|
||||
@@ -143,44 +90,26 @@ class HobHandler(gobject.GObject):
|
||||
machines = event._values
|
||||
machines.sort()
|
||||
self.emit("machines-updated", machines)
|
||||
elif var == "machine-sdk":
|
||||
sdk_machines = event._values
|
||||
sdk_machines.sort()
|
||||
self.emit("sdk-machines-updated", sdk_machines)
|
||||
elif isinstance(event, bb.event.ConfigFilePathFound):
|
||||
path = event._path
|
||||
self.emit("config-found", path)
|
||||
elif isinstance(event, bb.event.FilesMatchingFound):
|
||||
# FIXME: hard coding, should at least be a variable shared between
|
||||
# here and the caller
|
||||
if event._pattern == "rootfs_":
|
||||
formats = []
|
||||
for match in event._matches:
|
||||
classname, sep, cls = match.rpartition(".")
|
||||
fs, sep, format = classname.rpartition("_")
|
||||
formats.append(format)
|
||||
formats.sort()
|
||||
self.emit("package-formats-found", formats)
|
||||
|
||||
elif isinstance(event, bb.command.CommandCompleted):
|
||||
self.run_next_command()
|
||||
elif isinstance(event, bb.command.CommandFailed):
|
||||
self.emit("error", event.error)
|
||||
elif isinstance(event, bb.event.CacheLoadStarted):
|
||||
elif isinstance(event, bb.event.CacheLoadStarted) and pbar:
|
||||
pbar.set_title("Loading cache")
|
||||
bb.ui.crumbs.hobeventhandler.progress_total = event.total
|
||||
pbar.set_text("Loading cache: %s/%s" % (0, bb.ui.crumbs.hobeventhandler.progress_total))
|
||||
elif isinstance(event, bb.event.CacheLoadProgress):
|
||||
pbar.set_text("Loading cache: %s/%s" % (event.current, bb.ui.crumbs.hobeventhandler.progress_total))
|
||||
elif isinstance(event, bb.event.CacheLoadCompleted):
|
||||
pbar.set_text("Loading cache: %s/%s" % (bb.ui.crumbs.hobeventhandler.progress_total, bb.ui.crumbs.hobeventhandler.progress_total))
|
||||
elif isinstance(event, bb.event.ParseStarted):
|
||||
if event.total == 0:
|
||||
return
|
||||
pbar.update(0, bb.ui.crumbs.hobeventhandler.progress_total)
|
||||
elif isinstance(event, bb.event.CacheLoadProgress) and pbar:
|
||||
pbar.update(event.current, bb.ui.crumbs.hobeventhandler.progress_total)
|
||||
elif isinstance(event, bb.event.CacheLoadCompleted) and pbar:
|
||||
pbar.update(bb.ui.crumbs.hobeventhandler.progress_total, bb.ui.crumbs.hobeventhandler.progress_total)
|
||||
elif isinstance(event, bb.event.ParseStarted) and pbar:
|
||||
pbar.set_title("Processing recipes")
|
||||
bb.ui.crumbs.hobeventhandler.progress_total = event.total
|
||||
pbar.set_text("Processing recipes: %s/%s" % (0, bb.ui.crumbs.hobeventhandler.progress_total))
|
||||
elif isinstance(event, bb.event.ParseProgress):
|
||||
pbar.set_text("Processing recipes: %s/%s" % (event.current, bb.ui.crumbs.hobeventhandler.progress_total))
|
||||
elif isinstance(event, bb.event.ParseCompleted):
|
||||
pbar.set_fraction(1.0)
|
||||
pbar.update(0, bb.ui.crumbs.hobeventhandler.progress_total)
|
||||
elif isinstance(event, bb.event.ParseProgress) and pbar:
|
||||
pbar.update(event.current, bb.ui.crumbs.hobeventhandler.progress_total)
|
||||
elif isinstance(event, bb.event.ParseCompleted) and pbar:
|
||||
pbar.hide()
|
||||
|
||||
return
|
||||
|
||||
def event_handle_idle_func (self, eventHandler, running_build, pbar):
|
||||
@@ -193,125 +122,16 @@ class HobHandler(gobject.GObject):
|
||||
|
||||
def set_machine(self, machine):
|
||||
self.server.runCommand(["setVariable", "MACHINE", machine])
|
||||
|
||||
def set_sdk_machine(self, sdk_machine):
|
||||
self.server.runCommand(["setVariable", "SDKMACHINE", sdk_machine])
|
||||
self.current_command = "findConfigFilesMachine"
|
||||
self.run_next_command()
|
||||
|
||||
def set_distro(self, distro):
|
||||
self.server.runCommand(["setVariable", "DISTRO", distro])
|
||||
|
||||
def set_package_format(self, format):
|
||||
self.server.runCommand(["setVariable", "PACKAGE_CLASSES", "package_%s" % format])
|
||||
def run_build(self, targets):
|
||||
self.building = True
|
||||
self.server.runCommand(["buildTargets", targets, "build"])
|
||||
|
||||
def reload_data(self, config=None):
|
||||
img = self.model.selected_image
|
||||
selected_packages, _ = self.model.get_selected_packages()
|
||||
self.emit("reload-triggered", img, " ".join(selected_packages))
|
||||
self.current_command = self.REPARSE_FILES
|
||||
self.run_next_command()
|
||||
|
||||
def set_bbthreads(self, threads):
|
||||
self.server.runCommand(["setVariable", "BB_NUMBER_THREADS", threads])
|
||||
|
||||
def set_pmake(self, threads):
|
||||
pmake = "-j %s" % threads
|
||||
self.server.runCommand(["setVariable", "BB_NUMBER_THREADS", pmake])
|
||||
|
||||
def build_image(self, image, image_path, configurator):
|
||||
targets = []
|
||||
targets.append(image)
|
||||
if self.build_toolchain and self.build_toolchain_headers:
|
||||
targets.append("meta-toolchain-sdk")
|
||||
elif self.build_toolchain:
|
||||
targets.append("meta-toolchain")
|
||||
self.build_queue = targets
|
||||
|
||||
bbpath_ok = False
|
||||
bbpath = self.server.runCommand(["getVariable", "BBPATH"])
|
||||
if image_path in bbpath.split(":"):
|
||||
bbpath_ok = True
|
||||
|
||||
bbfiles_ok = False
|
||||
bbfiles = self.server.runCommand(["getVariable", "BBFILES"]).split(" ")
|
||||
for files in bbfiles:
|
||||
import re
|
||||
pattern = "%s/\*.bb" % image_path
|
||||
if re.match(pattern, files):
|
||||
bbfiles_ok = True
|
||||
|
||||
if not bbpath_ok:
|
||||
nbbp = image_path
|
||||
else:
|
||||
nbbp = None
|
||||
|
||||
if not bbfiles_ok:
|
||||
nbbf = "%s/*.bb" % image_path
|
||||
else:
|
||||
nbbf = None
|
||||
|
||||
if not bbfiles_ok or not bbpath_ok:
|
||||
configurator.insertTempBBPath(nbbp, nbbf)
|
||||
|
||||
self.current_command = self.REPARSE_FILES
|
||||
self.run_next_command()
|
||||
|
||||
def build_packages(self, pkgs):
|
||||
self.building = "packages"
|
||||
self.server.runCommand(["buildTargets", pkgs, "build"])
|
||||
|
||||
def cancel_build(self, force=False):
|
||||
if force:
|
||||
# Force the cooker to stop as quickly as possible
|
||||
self.server.runCommand(["stateStop"])
|
||||
else:
|
||||
# Wait for tasks to complete before shutting down, this helps
|
||||
# leave the workdir in a usable state
|
||||
self.server.runCommand(["stateShutdown"])
|
||||
|
||||
def set_incompatible_license(self, incompatible):
|
||||
self.server.runCommand(["setVariable", "INCOMPATIBLE_LICENSE", incompatible])
|
||||
|
||||
def toggle_toolchain(self, enabled):
|
||||
if self.build_toolchain != enabled:
|
||||
self.build_toolchain = enabled
|
||||
|
||||
def toggle_toolchain_headers(self, enabled):
|
||||
if self.build_toolchain_headers != enabled:
|
||||
self.build_toolchain_headers = enabled
|
||||
|
||||
def queue_image_recipe_path(self, path):
|
||||
self.build_queue.append(path)
|
||||
|
||||
def build_complete_cb(self, running_build):
|
||||
if len(self.build_queue) > 0:
|
||||
next = self.build_queue.pop(0)
|
||||
if next.endswith('.bb'):
|
||||
self.build_file(next)
|
||||
self.building = 'image'
|
||||
self.build_file(next)
|
||||
else:
|
||||
self.build_packages(next.split(" "))
|
||||
else:
|
||||
self.building = None
|
||||
self.emit("build-complete")
|
||||
|
||||
def set_fstypes(self, fstypes):
|
||||
self.server.runCommand(["setVariable", "IMAGE_FSTYPES", fstypes])
|
||||
|
||||
def add_image_output_type(self, output_type):
|
||||
if output_type not in self.image_output_types:
|
||||
self.image_output_types.append(output_type)
|
||||
fstypes = " ".join(self.image_output_types)
|
||||
self.set_fstypes(fstypes)
|
||||
return fstypes
|
||||
|
||||
def remove_image_output_type(self, output_type):
|
||||
if output_type in self.image_output_types:
|
||||
ind = self.image_output_types.index(output_type)
|
||||
self.image_output_types.pop(ind)
|
||||
fstypes = " ".join(self.image_output_types)
|
||||
self.set_fstypes(fstypes)
|
||||
return fstypes
|
||||
|
||||
def get_image_deploy_dir(self):
|
||||
return self.server.runCommand(["getVariable", "DEPLOY_DIR_IMAGE"])
|
||||
def cancel_build(self):
|
||||
# Note: this may not be the right way to stop an in-progress build
|
||||
self.server.runCommand(["stateStop"])
|
||||
|
||||
@@ -1,335 +0,0 @@
|
||||
#
|
||||
# BitBake Graphical GTK User Interface
|
||||
#
|
||||
# Copyright (C) 2011 Intel Corporation
|
||||
#
|
||||
# Authored by Joshua Lock <josh@linux.intel.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import gtk
|
||||
import glib
|
||||
from bb.ui.crumbs.configurator import Configurator
|
||||
|
||||
class HobPrefs(gtk.Dialog):
|
||||
"""
|
||||
"""
|
||||
def empty_combo_text(self, combo_text):
|
||||
model = combo_text.get_model()
|
||||
if model:
|
||||
model.clear()
|
||||
|
||||
def output_type_toggled_cb(self, check, handler):
|
||||
ot = check.get_label()
|
||||
enabled = check.get_active()
|
||||
if enabled:
|
||||
self.selected_image_types = handler.add_image_output_type(ot)
|
||||
else:
|
||||
self.selected_image_types = handler.remove_image_output_type(ot)
|
||||
|
||||
self.configurator.setLocalConfVar('IMAGE_FSTYPES', "%s" % self.selected_image_types)
|
||||
|
||||
def sdk_machine_combo_changed_cb(self, combo, handler):
|
||||
sdk_mach = combo.get_active_text()
|
||||
if sdk_mach != self.curr_sdk_mach:
|
||||
self.curr_sdk_mach = sdk_mach
|
||||
self.configurator.setLocalConfVar('SDKMACHINE', sdk_mach)
|
||||
handler.set_sdk_machine(sdk_mach)
|
||||
|
||||
def update_sdk_machines(self, handler, sdk_machines):
|
||||
active = 0
|
||||
# disconnect the signal handler before updating the combo model
|
||||
if self.sdk_machine_handler_id:
|
||||
self.sdk_machine_combo.disconnect(self.sdk_machine_handler_id)
|
||||
self.sdk_machine_handler_id = None
|
||||
|
||||
self.empty_combo_text(self.sdk_machine_combo)
|
||||
for sdk_machine in sdk_machines:
|
||||
self.sdk_machine_combo.append_text(sdk_machine)
|
||||
if sdk_machine == self.curr_sdk_mach:
|
||||
self.sdk_machine_combo.set_active(active)
|
||||
active = active + 1
|
||||
|
||||
self.sdk_machine_handler_id = self.sdk_machine_combo.connect("changed", self.sdk_machine_combo_changed_cb, handler)
|
||||
|
||||
def distro_combo_changed_cb(self, combo, handler):
|
||||
distro = combo.get_active_text()
|
||||
if distro != self.curr_distro:
|
||||
self.curr_distro = distro
|
||||
self.configurator.setLocalConfVar('DISTRO', distro)
|
||||
handler.set_distro(distro)
|
||||
self.reload_required = True
|
||||
|
||||
def update_distros(self, handler, distros):
|
||||
active = 0
|
||||
# disconnect the signal handler before updating combo model
|
||||
if self.distro_handler_id:
|
||||
self.distro_combo.disconnect(self.distro_handler_id)
|
||||
self.distro_handler_id = None
|
||||
|
||||
self.empty_combo_text(self.distro_combo)
|
||||
for distro in distros:
|
||||
self.distro_combo.append_text(distro)
|
||||
if distro == self.curr_distro:
|
||||
self.distro_combo.set_active(active)
|
||||
active = active + 1
|
||||
|
||||
self.distro_handler_id = self.distro_combo.connect("changed", self.distro_combo_changed_cb, handler)
|
||||
|
||||
def package_format_combo_changed_cb(self, combo, handler):
|
||||
package_format = combo.get_active_text()
|
||||
if package_format != self.curr_package_format:
|
||||
self.curr_package_format = package_format
|
||||
self.configurator.setLocalConfVar('PACKAGE_CLASSES', 'package_%s' % package_format)
|
||||
handler.set_package_format(package_format)
|
||||
self.reload_required = True
|
||||
|
||||
def update_package_formats(self, handler, formats):
|
||||
active = 0
|
||||
# disconnect the signal handler before updating the model
|
||||
if self.package_handler_id:
|
||||
self.package_combo.disconnect(self.package_handler_id)
|
||||
self.package_handler_id = None
|
||||
|
||||
self.empty_combo_text(self.package_combo)
|
||||
for format in formats:
|
||||
self.package_combo.append_text(format)
|
||||
if format == self.curr_package_format:
|
||||
self.package_combo.set_active(active)
|
||||
active = active + 1
|
||||
|
||||
self.package_handler_id = self.package_combo.connect("changed", self.package_format_combo_changed_cb, handler)
|
||||
|
||||
def include_gplv3_cb(self, toggle):
|
||||
excluded = toggle.get_active()
|
||||
orig_incompatible = self.configurator.getLocalConfVar('INCOMPATIBLE_LICENSE')
|
||||
new_incompatible = ""
|
||||
if excluded:
|
||||
if not orig_incompatible:
|
||||
new_incompatible = "GPLv3"
|
||||
elif not orig_incompatible.find('GPLv3'):
|
||||
new_incompatible = "%s GPLv3" % orig_incompatible
|
||||
else:
|
||||
new_incompatible = orig_incompatible.replace('GPLv3', '')
|
||||
|
||||
if new_incompatible != orig_incompatible:
|
||||
self.handler.set_incompatible_license(new_incompatible)
|
||||
self.configurator.setLocalConfVar('INCOMPATIBLE_LICENSE', new_incompatible)
|
||||
self.reload_required = True
|
||||
|
||||
def change_bb_threads_cb(self, spinner):
|
||||
val = spinner.get_value_as_int()
|
||||
self.handler.set_bbthreads(val)
|
||||
self.configurator.setLocalConfVar('BB_NUMBER_THREADS', val)
|
||||
|
||||
def change_make_threads_cb(self, spinner):
|
||||
val = spinner.get_value_as_int()
|
||||
self.handler.set_pmake(val)
|
||||
self.configurator.setLocalConfVar('PARALLEL_MAKE', "-j %s" % val)
|
||||
|
||||
def toggle_toolchain_cb(self, check):
|
||||
enabled = check.get_active()
|
||||
toolchain = '0'
|
||||
if enabled:
|
||||
toolchain = '1'
|
||||
self.handler.toggle_toolchain(enabled)
|
||||
self.configurator.setLocalConfVar('HOB_BUILD_TOOLCHAIN', toolchain)
|
||||
|
||||
def toggle_headers_cb(self, check):
|
||||
enabled = check.get_active()
|
||||
headers = '0'
|
||||
if enabled:
|
||||
headers = '1'
|
||||
self.handler.toggle_toolchain_headers(enabled)
|
||||
self.configurator.setLocalConfVar('HOB_BUILD_TOOLCHAIN_HEADERS', headers)
|
||||
|
||||
def set_parent_window(self, parent):
|
||||
self.set_transient_for(parent)
|
||||
|
||||
def write_changes(self):
|
||||
self.configurator.writeLocalConf()
|
||||
|
||||
def prefs_response_cb(self, dialog, response):
|
||||
if self.reload_required:
|
||||
glib.idle_add(self.handler.reload_data)
|
||||
|
||||
def __init__(self, configurator, handler, curr_sdk_mach, curr_distro, pclass,
|
||||
cpu_cnt, pmake, bbthread, selected_image_types, all_image_types,
|
||||
gplv3disabled, build_toolchain, build_toolchain_headers):
|
||||
"""
|
||||
"""
|
||||
gtk.Dialog.__init__(self, "Preferences", None,
|
||||
gtk.DIALOG_DESTROY_WITH_PARENT,
|
||||
(gtk.STOCK_CLOSE, gtk.RESPONSE_OK))
|
||||
|
||||
self.set_border_width(6)
|
||||
self.vbox.set_property("spacing", 12)
|
||||
self.action_area.set_property("spacing", 12)
|
||||
self.action_area.set_property("border-width", 6)
|
||||
|
||||
self.handler = handler
|
||||
self.configurator = configurator
|
||||
|
||||
self.curr_sdk_mach = curr_sdk_mach
|
||||
self.curr_distro = curr_distro
|
||||
self.curr_package_format = pclass
|
||||
self.cpu_cnt = cpu_cnt
|
||||
self.pmake = pmake
|
||||
self.bbthread = bbthread
|
||||
self.selected_image_types = selected_image_types.split(" ")
|
||||
self.gplv3disabled = gplv3disabled
|
||||
self.build_toolchain = build_toolchain
|
||||
self.build_toolchain_headers = build_toolchain_headers
|
||||
|
||||
self.reload_required = False
|
||||
self.distro_handler_id = None
|
||||
self.sdk_machine_handler_id = None
|
||||
self.package_handler_id = None
|
||||
|
||||
left = gtk.SizeGroup(gtk.SIZE_GROUP_HORIZONTAL)
|
||||
right = gtk.SizeGroup(gtk.SIZE_GROUP_HORIZONTAL)
|
||||
|
||||
label = gtk.Label()
|
||||
label.set_markup("<b>Policy</b>")
|
||||
label.show()
|
||||
frame = gtk.Frame()
|
||||
frame.set_label_widget(label)
|
||||
frame.set_shadow_type(gtk.SHADOW_NONE)
|
||||
frame.show()
|
||||
self.vbox.pack_start(frame)
|
||||
pbox = gtk.VBox(False, 12)
|
||||
pbox.show()
|
||||
frame.add(pbox)
|
||||
hbox = gtk.HBox(False, 12)
|
||||
hbox.show()
|
||||
pbox.pack_start(hbox, expand=False, fill=False, padding=6)
|
||||
# Distro selector
|
||||
label = gtk.Label("Distribution:")
|
||||
label.show()
|
||||
hbox.pack_start(label, expand=False, fill=False, padding=6)
|
||||
self.distro_combo = gtk.combo_box_new_text()
|
||||
self.distro_combo.set_tooltip_text("Select the Yocto distribution you would like to use")
|
||||
self.distro_combo.show()
|
||||
hbox.pack_start(self.distro_combo, expand=False, fill=False, padding=6)
|
||||
# Exclude GPLv3
|
||||
check = gtk.CheckButton("Exclude GPLv3 packages")
|
||||
check.set_tooltip_text("Check this box to prevent GPLv3 packages from being included in your image")
|
||||
check.show()
|
||||
check.set_active(self.gplv3disabled)
|
||||
check.connect("toggled", self.include_gplv3_cb)
|
||||
hbox.pack_start(check, expand=False, fill=False, padding=6)
|
||||
hbox = gtk.HBox(False, 12)
|
||||
hbox.show()
|
||||
pbox.pack_start(hbox, expand=False, fill=False, padding=6)
|
||||
# Package format selector
|
||||
label = gtk.Label("Package format:")
|
||||
label.show()
|
||||
hbox.pack_start(label, expand=False, fill=False, padding=6)
|
||||
self.package_combo = gtk.combo_box_new_text()
|
||||
self.package_combo.set_tooltip_text("""The package format is that used in creation
|
||||
of the root filesystem and also dictates the package manager used in your image""")
|
||||
self.package_combo.show()
|
||||
hbox.pack_start(self.package_combo, expand=False, fill=False, padding=6)
|
||||
if all_image_types:
|
||||
# Image output type selector
|
||||
label = gtk.Label("Image output types:")
|
||||
label.show()
|
||||
hbox.pack_start(label, expand=False, fill=False, padding=6)
|
||||
chk_cnt = 3
|
||||
for it in all_image_types.split(" "):
|
||||
chk_cnt = chk_cnt + 1
|
||||
if chk_cnt % 6 == 0:
|
||||
hbox = gtk.HBox(False, 12)
|
||||
hbox.show()
|
||||
pbox.pack_start(hbox, expand=False, fill=False, padding=6)
|
||||
chk = gtk.CheckButton(it)
|
||||
if it in self.selected_image_types:
|
||||
chk.set_active(True)
|
||||
chk.set_tooltip_text("Build an %s image" % it)
|
||||
chk.connect("toggled", self.output_type_toggled_cb, handler)
|
||||
chk.show()
|
||||
hbox.pack_start(chk, expand=False, fill=False, padding=3)
|
||||
# BitBake
|
||||
label = gtk.Label()
|
||||
label.set_markup("<b>BitBake</b>")
|
||||
label.show()
|
||||
frame = gtk.Frame()
|
||||
frame.set_label_widget(label)
|
||||
frame.set_shadow_type(gtk.SHADOW_NONE)
|
||||
frame.show()
|
||||
self.vbox.pack_start(frame)
|
||||
pbox = gtk.VBox(False, 12)
|
||||
pbox.show()
|
||||
frame.add(pbox)
|
||||
hbox = gtk.HBox(False, 12)
|
||||
hbox.show()
|
||||
pbox.pack_start(hbox, expand=False, fill=False, padding=6)
|
||||
label = gtk.Label("BitBake threads:")
|
||||
label.show()
|
||||
# NOTE: may be a good idea in future to intelligently cap the maximum
|
||||
# values but we need more data to make an educated decision, for now
|
||||
# set a high maximum as a value for upper bounds is required by the
|
||||
# gtk.Adjustment
|
||||
spin_max = 30 # seems like a high enough arbitrary number
|
||||
#spin_max = self.cpu_cnt * 3
|
||||
hbox.pack_start(label, expand=False, fill=False, padding=6)
|
||||
bbadj = gtk.Adjustment(value=self.bbthread, lower=1, upper=spin_max, step_incr=1)
|
||||
bbspinner = gtk.SpinButton(adjustment=bbadj, climb_rate=1, digits=0)
|
||||
bbspinner.show()
|
||||
bbspinner.connect("value-changed", self.change_bb_threads_cb)
|
||||
hbox.pack_start(bbspinner, expand=False, fill=False, padding=6)
|
||||
label = gtk.Label("Make threads:")
|
||||
label.show()
|
||||
hbox.pack_start(label, expand=False, fill=False, padding=6)
|
||||
madj = gtk.Adjustment(value=self.pmake, lower=1, upper=spin_max, step_incr=1)
|
||||
makespinner = gtk.SpinButton(adjustment=madj, climb_rate=1, digits=0)
|
||||
makespinner.connect("value-changed", self.change_make_threads_cb)
|
||||
makespinner.show()
|
||||
hbox.pack_start(makespinner, expand=False, fill=False, padding=6)
|
||||
# Toolchain
|
||||
label = gtk.Label()
|
||||
label.set_markup("<b>External Toolchain</b>")
|
||||
label.show()
|
||||
frame = gtk.Frame()
|
||||
frame.set_label_widget(label)
|
||||
frame.set_shadow_type(gtk.SHADOW_NONE)
|
||||
frame.show()
|
||||
self.vbox.pack_start(frame)
|
||||
pbox = gtk.VBox(False, 12)
|
||||
pbox.show()
|
||||
frame.add(pbox)
|
||||
hbox = gtk.HBox(False, 12)
|
||||
hbox.show()
|
||||
pbox.pack_start(hbox, expand=False, fill=False, padding=6)
|
||||
toolcheck = gtk.CheckButton("Build external development toolchain with image")
|
||||
toolcheck.show()
|
||||
toolcheck.set_active(self.build_toolchain)
|
||||
toolcheck.connect("toggled", self.toggle_toolchain_cb)
|
||||
hbox.pack_start(toolcheck, expand=False, fill=False, padding=6)
|
||||
hbox = gtk.HBox(False, 12)
|
||||
hbox.show()
|
||||
pbox.pack_start(hbox, expand=False, fill=False, padding=6)
|
||||
label = gtk.Label("Toolchain host:")
|
||||
label.show()
|
||||
hbox.pack_start(label, expand=False, fill=False, padding=6)
|
||||
self.sdk_machine_combo = gtk.combo_box_new_text()
|
||||
self.sdk_machine_combo.set_tooltip_text("Select the host architecture of the external machine")
|
||||
self.sdk_machine_combo.show()
|
||||
hbox.pack_start(self.sdk_machine_combo, expand=False, fill=False, padding=6)
|
||||
headerscheck = gtk.CheckButton("Include development headers with toolchain")
|
||||
headerscheck.show()
|
||||
headerscheck.set_active(self.build_toolchain_headers)
|
||||
headerscheck.connect("toggled", self.toggle_headers_cb)
|
||||
hbox.pack_start(headerscheck, expand=False, fill=False, padding=6)
|
||||
self.connect("response", self.prefs_response_cb)
|
||||
@@ -1,137 +0,0 @@
|
||||
#
|
||||
# BitBake Graphical GTK User Interface
|
||||
#
|
||||
# Copyright (C) 2011 Intel Corporation
|
||||
#
|
||||
# Authored by Joshua Lock <josh@linux.intel.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import gobject
|
||||
import gtk
|
||||
from bb.ui.crumbs.configurator import Configurator
|
||||
|
||||
class LayerEditor(gtk.Dialog):
|
||||
"""
|
||||
Gtk+ Widget for enabling and disabling layers.
|
||||
Layers are added through using an open dialog to find the layer.conf
|
||||
Disabled layers are deleted from conf/bblayers.conf
|
||||
"""
|
||||
def __init__(self, configurator, parent=None):
|
||||
gtk.Dialog.__init__(self, "Layers", None,
|
||||
gtk.DIALOG_DESTROY_WITH_PARENT,
|
||||
(gtk.STOCK_CLOSE, gtk.RESPONSE_OK))
|
||||
|
||||
# We want to show a little more of the treeview in the default,
|
||||
# emptier, case
|
||||
self.set_size_request(-1, 300)
|
||||
self.set_border_width(6)
|
||||
self.vbox.set_property("spacing", 0)
|
||||
self.action_area.set_property("border-width", 6)
|
||||
|
||||
self.configurator = configurator
|
||||
self.newly_added = {}
|
||||
|
||||
# Label to inform users that meta is enabled but that you can't
|
||||
# disable it as it'd be a *bad* idea
|
||||
msg = "As the core of the build system the <i>meta</i> layer must always be included and therefore can't be viewed or edited here."
|
||||
lbl = gtk.Label()
|
||||
lbl.show()
|
||||
lbl.set_use_markup(True)
|
||||
lbl.set_markup(msg)
|
||||
lbl.set_line_wrap(True)
|
||||
lbl.set_justify(gtk.JUSTIFY_FILL)
|
||||
self.vbox.pack_start(lbl, expand=False, fill=False, padding=6)
|
||||
|
||||
# Create a treeview in which to list layers
|
||||
# ListStore of Name, Path, Enabled
|
||||
self.layer_store = gtk.ListStore(gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_BOOLEAN)
|
||||
self.tv = gtk.TreeView(self.layer_store)
|
||||
self.tv.set_headers_visible(True)
|
||||
|
||||
col0 = gtk.TreeViewColumn('Name')
|
||||
self.tv.append_column(col0)
|
||||
col1 = gtk.TreeViewColumn('Path')
|
||||
self.tv.append_column(col1)
|
||||
col2 = gtk.TreeViewColumn('Enabled')
|
||||
self.tv.append_column(col2)
|
||||
|
||||
cell0 = gtk.CellRendererText()
|
||||
col0.pack_start(cell0, True)
|
||||
col0.set_attributes(cell0, text=0)
|
||||
cell1 = gtk.CellRendererText()
|
||||
col1.pack_start(cell1, True)
|
||||
col1.set_attributes(cell1, text=1)
|
||||
cell2 = gtk.CellRendererToggle()
|
||||
cell2.connect("toggled", self._toggle_layer_cb)
|
||||
col2.pack_start(cell2, True)
|
||||
col2.set_attributes(cell2, active=2)
|
||||
|
||||
self.tv.show()
|
||||
self.vbox.pack_start(self.tv, expand=True, fill=True, padding=0)
|
||||
|
||||
tb = gtk.Toolbar()
|
||||
tb.set_icon_size(gtk.ICON_SIZE_SMALL_TOOLBAR)
|
||||
tb.set_style(gtk.TOOLBAR_BOTH)
|
||||
tb.set_tooltips(True)
|
||||
tb.show()
|
||||
icon = gtk.Image()
|
||||
icon.set_from_stock(gtk.STOCK_ADD, gtk.ICON_SIZE_SMALL_TOOLBAR)
|
||||
icon.show()
|
||||
tb.insert_item("Add Layer", "Add new layer", None, icon,
|
||||
self._find_layer_cb, None, -1)
|
||||
self.vbox.pack_start(tb, expand=False, fill=False, padding=0)
|
||||
|
||||
def set_parent_window(self, parent):
|
||||
self.set_transient_for(parent)
|
||||
|
||||
def load_current_layers(self, data):
|
||||
for layer, path in self.configurator.enabled_layers.items():
|
||||
if layer != 'meta':
|
||||
self.layer_store.append([layer, path, True])
|
||||
|
||||
def save_current_layers(self):
|
||||
self.configurator.writeLayerConf()
|
||||
|
||||
def _toggle_layer_cb(self, cell, path):
|
||||
name = self.layer_store[path][0]
|
||||
toggle = not self.layer_store[path][2]
|
||||
if toggle:
|
||||
self.configurator.addLayer(name, path)
|
||||
else:
|
||||
self.configurator.disableLayer(name)
|
||||
self.layer_store[path][2] = toggle
|
||||
|
||||
def _find_layer_cb(self, button):
|
||||
self.find_layer(self)
|
||||
|
||||
def find_layer(self, parent):
|
||||
dialog = gtk.FileChooserDialog("Add new layer", parent,
|
||||
gtk.FILE_CHOOSER_ACTION_OPEN,
|
||||
(gtk.STOCK_CANCEL, gtk.RESPONSE_NO,
|
||||
gtk.STOCK_OPEN, gtk.RESPONSE_YES))
|
||||
label = gtk.Label("Select the layer.conf of the layer you wish to add")
|
||||
label.show()
|
||||
dialog.set_extra_widget(label)
|
||||
response = dialog.run()
|
||||
path = dialog.get_filename()
|
||||
dialog.destroy()
|
||||
|
||||
if response == gtk.RESPONSE_YES:
|
||||
# FIXME: verify we've actually got a layer conf?
|
||||
if path.endswith(".conf"):
|
||||
name, layerpath = self.configurator.addLayerConf(path)
|
||||
if name:
|
||||
self.newly_added[name] = layerpath
|
||||
self.layer_store.append([name, layerpath, True])
|
||||
@@ -47,18 +47,12 @@ class RunningBuildModel (gtk.TreeStore):
|
||||
|
||||
class RunningBuild (gobject.GObject):
|
||||
__gsignals__ = {
|
||||
'build-started' : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
()),
|
||||
'build-succeeded' : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
()),
|
||||
'build-failed' : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
()),
|
||||
'build-complete' : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
())
|
||||
())
|
||||
}
|
||||
pids_to_task = {}
|
||||
tasks_to_iter = {}
|
||||
@@ -207,7 +201,6 @@ class RunningBuild (gobject.GObject):
|
||||
|
||||
elif isinstance(event, bb.event.BuildStarted):
|
||||
|
||||
self.emit("build-started")
|
||||
self.model.prepend(None, (None,
|
||||
None,
|
||||
None,
|
||||
@@ -225,9 +218,6 @@ class RunningBuild (gobject.GObject):
|
||||
Colors.OK,
|
||||
0))
|
||||
|
||||
# Emit a generic "build-complete" signal for things wishing to
|
||||
# handle when the build is finished
|
||||
self.emit("build-complete")
|
||||
# Emit the appropriate signal depending on the number of failures
|
||||
if (failures >= 1):
|
||||
self.emit ("build-failed")
|
||||
@@ -244,8 +234,6 @@ class RunningBuild (gobject.GObject):
|
||||
pbar.update(self.progress_total, self.progress_total)
|
||||
|
||||
elif isinstance(event, bb.event.ParseStarted) and pbar:
|
||||
if event.total == 0:
|
||||
return
|
||||
pbar.set_title("Processing recipes")
|
||||
self.progress_total = event.total
|
||||
pbar.update(0, self.progress_total)
|
||||
@@ -320,4 +308,4 @@ class RunningBuildTreeView (gtk.TreeView):
|
||||
|
||||
clipboard = gtk.clipboard_get()
|
||||
clipboard.set_text(paste_url)
|
||||
clipboard.store()
|
||||
clipboard.store()
|
||||
@@ -20,67 +20,6 @@
|
||||
|
||||
import gtk
|
||||
import gobject
|
||||
import re
|
||||
|
||||
class BuildRep(gobject.GObject):
|
||||
|
||||
def __init__(self, userpkgs, allpkgs, base_image=None):
|
||||
gobject.GObject.__init__(self)
|
||||
self.base_image = base_image
|
||||
self.allpkgs = allpkgs
|
||||
self.userpkgs = userpkgs
|
||||
|
||||
def loadRecipe(self, pathname):
|
||||
contents = []
|
||||
packages = ""
|
||||
base_image = ""
|
||||
|
||||
with open(pathname, 'r') as f:
|
||||
contents = f.readlines()
|
||||
|
||||
pkg_pattern = "^\s*(IMAGE_INSTALL)\s*([+=.?]+)\s*(\"\S*\")"
|
||||
img_pattern = "^\s*(require)\s+(\S+.bb)"
|
||||
|
||||
for line in contents:
|
||||
matchpkg = re.search(pkg_pattern, line)
|
||||
matchimg = re.search(img_pattern, line)
|
||||
if matchpkg:
|
||||
packages = packages + matchpkg.group(3).strip('"')
|
||||
if matchimg:
|
||||
base_image = os.path.basename(matchimg.group(2)).split(".")[0]
|
||||
|
||||
self.base_image = base_image
|
||||
self.userpkgs = packages
|
||||
|
||||
def writeRecipe(self, writepath, model):
|
||||
template = """
|
||||
# Recipe generated by the HOB
|
||||
|
||||
require %s
|
||||
|
||||
IMAGE_INSTALL += "%s"
|
||||
"""
|
||||
|
||||
empty_template = """
|
||||
# Recipe generated by the HOB
|
||||
|
||||
inherit core-image
|
||||
|
||||
IMAGE_INSTALL = "%s"
|
||||
"""
|
||||
if self.base_image and not self.base_image == "empty":
|
||||
meta_path = model.find_image_path(self.base_image)
|
||||
recipe = template % (meta_path, self.userpkgs)
|
||||
else:
|
||||
recipe = empty_template % self.allpkgs
|
||||
|
||||
if os.path.exists(writepath):
|
||||
os.rename(writepath, "%s~" % writepath)
|
||||
|
||||
with open(writepath, 'w') as r:
|
||||
r.write(recipe)
|
||||
|
||||
return writepath
|
||||
|
||||
class TaskListModel(gtk.ListStore):
|
||||
"""
|
||||
@@ -89,18 +28,12 @@ class TaskListModel(gtk.ListStore):
|
||||
providing convenience functions to access gtk.TreeModel subclasses which
|
||||
provide filtered views of the data.
|
||||
"""
|
||||
(COL_NAME, COL_DESC, COL_LIC, COL_GROUP, COL_DEPS, COL_BINB, COL_TYPE, COL_INC, COL_IMG, COL_PATH) = range(10)
|
||||
(COL_NAME, COL_DESC, COL_LIC, COL_GROUP, COL_DEPS, COL_BINB, COL_TYPE, COL_INC) = range(8)
|
||||
|
||||
__gsignals__ = {
|
||||
"tasklist-populated" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
()),
|
||||
"contents-changed" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
(gobject.TYPE_INT,)),
|
||||
"image-changed" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
(gobject.TYPE_STRING,)),
|
||||
())
|
||||
}
|
||||
|
||||
"""
|
||||
@@ -110,7 +43,6 @@ class TaskListModel(gtk.ListStore):
|
||||
self.tasks = None
|
||||
self.packages = None
|
||||
self.images = None
|
||||
self.selected_image = None
|
||||
|
||||
gtk.ListStore.__init__ (self,
|
||||
gobject.TYPE_STRING,
|
||||
@@ -120,22 +52,7 @@ class TaskListModel(gtk.ListStore):
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_BOOLEAN,
|
||||
gobject.TYPE_BOOLEAN,
|
||||
gobject.TYPE_STRING)
|
||||
|
||||
def contents_changed_cb(self, tree_model, path, it=None):
|
||||
pkg_cnt = self.contents.iter_n_children(None)
|
||||
self.emit("contents-changed", pkg_cnt)
|
||||
|
||||
def contents_model_filter(self, model, it):
|
||||
if not model.get_value(it, self.COL_INC) or model.get_value(it, self.COL_TYPE) == 'image':
|
||||
return False
|
||||
name = model.get_value(it, self.COL_NAME)
|
||||
if name.endswith('-native') or name.endswith('-cross'):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
gobject.TYPE_BOOLEAN)
|
||||
|
||||
"""
|
||||
Create, if required, and return a filtered gtk.TreeModel
|
||||
@@ -145,9 +62,7 @@ class TaskListModel(gtk.ListStore):
|
||||
def contents_model(self):
|
||||
if not self.contents:
|
||||
self.contents = self.filter_new()
|
||||
self.contents.set_visible_func(self.contents_model_filter)
|
||||
self.contents.connect("row-inserted", self.contents_changed_cb)
|
||||
self.contents.connect("row-deleted", self.contents_changed_cb)
|
||||
self.contents.set_visible_column(self.COL_INC)
|
||||
return self.contents
|
||||
|
||||
"""
|
||||
@@ -192,13 +107,10 @@ class TaskListModel(gtk.ListStore):
|
||||
Helper function to determine whether an item is a package
|
||||
"""
|
||||
def package_model_filter(self, model, it):
|
||||
if model.get_value(it, self.COL_TYPE) != 'package':
|
||||
return False
|
||||
else:
|
||||
name = model.get_value(it, self.COL_NAME)
|
||||
if name.count('-native') or name.count('cross'):
|
||||
return False
|
||||
if model.get_value(it, self.COL_TYPE) == 'package':
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
"""
|
||||
Create, if required, and return a filtered gtk.TreeModel
|
||||
@@ -217,78 +129,33 @@ class TaskListModel(gtk.ListStore):
|
||||
to notify any listeners that the model is ready
|
||||
"""
|
||||
def populate(self, event_model):
|
||||
# First clear the model, in case repopulating
|
||||
self.clear()
|
||||
for item in event_model["pn"]:
|
||||
atype = 'package'
|
||||
name = item
|
||||
summary = event_model["pn"][item]["summary"]
|
||||
lic = event_model["pn"][item]["license"]
|
||||
license = event_model["pn"][item]["license"]
|
||||
group = event_model["pn"][item]["section"]
|
||||
filename = event_model["pn"][item]["filename"]
|
||||
depends = event_model["depends"].get(item, "")
|
||||
|
||||
depends = event_model["depends"].get(item, "")
|
||||
rdepends = event_model["rdepends-pn"].get(item, "")
|
||||
if rdepends:
|
||||
for rdep in rdepends:
|
||||
if event_model["packages"].get(rdep, ""):
|
||||
pn = event_model["packages"][rdep].get("pn", "")
|
||||
if pn:
|
||||
depends.append(pn)
|
||||
|
||||
depends = depends + rdepends
|
||||
self.squish(depends)
|
||||
deps = " ".join(depends)
|
||||
|
||||
|
||||
if name.count('task-') > 0:
|
||||
atype = 'task'
|
||||
elif name.count('-image-') > 0:
|
||||
atype = 'image'
|
||||
|
||||
self.set(self.append(), self.COL_NAME, name, self.COL_DESC, summary,
|
||||
self.COL_LIC, lic, self.COL_GROUP, group,
|
||||
self.COL_DEPS, deps, self.COL_BINB, "",
|
||||
self.COL_TYPE, atype, self.COL_INC, False,
|
||||
self.COL_IMG, False, self.COL_PATH, filename)
|
||||
|
||||
self.COL_LIC, license, self.COL_GROUP, group,
|
||||
self.COL_DEPS, deps, self.COL_BINB, "",
|
||||
self.COL_TYPE, atype, self.COL_INC, False)
|
||||
|
||||
self.emit("tasklist-populated")
|
||||
|
||||
"""
|
||||
Load a BuildRep into the model
|
||||
"""
|
||||
def load_image_rep(self, rep):
|
||||
# Unset everything
|
||||
it = self.get_iter_first()
|
||||
while it:
|
||||
path = self.get_path(it)
|
||||
self[path][self.COL_INC] = False
|
||||
self[path][self.COL_IMG] = False
|
||||
it = self.iter_next(it)
|
||||
|
||||
# Iterate the images and disable them all
|
||||
it = self.images.get_iter_first()
|
||||
while it:
|
||||
path = self.images.convert_path_to_child_path(self.images.get_path(it))
|
||||
name = self[path][self.COL_NAME]
|
||||
if name == rep.base_image:
|
||||
self.include_item(path, image_contents=True)
|
||||
else:
|
||||
self[path][self.COL_INC] = False
|
||||
it = self.images.iter_next(it)
|
||||
|
||||
# Mark all of the additional packages for inclusion
|
||||
packages = rep.userpkgs.split(" ")
|
||||
it = self.get_iter_first()
|
||||
while it:
|
||||
path = self.get_path(it)
|
||||
name = self[path][self.COL_NAME]
|
||||
if name in packages:
|
||||
self.include_item(path, binb="User Selected")
|
||||
packages.remove(name)
|
||||
it = self.iter_next(it)
|
||||
|
||||
self.emit("image-changed", rep.base_image)
|
||||
|
||||
"""
|
||||
squish lst so that it doesn't contain any duplicate entries
|
||||
squish lst so that it doesn't contain any duplicates
|
||||
"""
|
||||
def squish(self, lst):
|
||||
seen = {}
|
||||
@@ -306,105 +173,82 @@ class TaskListModel(gtk.ListStore):
|
||||
self[path][self.COL_INC] = False
|
||||
|
||||
"""
|
||||
Recursively called to mark the item at opath and any package which
|
||||
depends on it for removal.
|
||||
NOTE: This method dumbly removes user selected packages and since we don't
|
||||
do significant reverse dependency tracking it's easier and simpler to save
|
||||
the items marked as user selected and re-add them once the removal sweep is
|
||||
complete.
|
||||
"""
|
||||
def mark(self, opath):
|
||||
usersel = {}
|
||||
def mark(self, path):
|
||||
name = self[path][self.COL_NAME]
|
||||
it = self.get_iter_first()
|
||||
name = self[opath][self.COL_NAME]
|
||||
removals = []
|
||||
#print("Removing %s" % name)
|
||||
|
||||
self.remove_item_path(opath)
|
||||
self.remove_item_path(path)
|
||||
|
||||
# Remove all dependent packages, update binb
|
||||
while it:
|
||||
path = self.get_path(it)
|
||||
it = self.iter_next(it)
|
||||
|
||||
inc = self[path][self.COL_INC]
|
||||
deps = self[path][self.COL_DEPS]
|
||||
binb = self[path][self.COL_BINB]
|
||||
itype = self[path][self.COL_TYPE]
|
||||
iname = self[path][self.COL_NAME]
|
||||
|
||||
# We ignore anything that isn't a package
|
||||
if not itype == "package":
|
||||
continue
|
||||
|
||||
# If the user added this item and it's not the item we're removing
|
||||
# we should keep it and its dependencies, the easiest way to do so
|
||||
# is to save its name and re-mark it for inclusion once dependency
|
||||
# processing is complete
|
||||
if binb == "User Selected":
|
||||
usersel[iname] = self[path][self.COL_IMG]
|
||||
|
||||
# FIXME: need to ensure partial name matching doesn't happen
|
||||
if inc and deps.count(name):
|
||||
# FIXME: need to ensure partial name matching doesn't happen, regexp?
|
||||
if self[path][self.COL_INC] and self[path][self.COL_DEPS].count(name):
|
||||
#print("%s depended on %s, marking for removal" % (self[path][self.COL_NAME], name))
|
||||
# found a dependency, remove it
|
||||
self.mark(path)
|
||||
if self[path][self.COL_INC] and self[path][self.COL_BINB].count(name):
|
||||
binb = self.find_alt_dependency(self[path][self.COL_NAME])
|
||||
#print("%s was brought in by %s, binb set to %s" % (self[path][self.COL_NAME], name, binb))
|
||||
self[path][self.COL_BINB] = binb
|
||||
it = self.iter_next(it)
|
||||
|
||||
if inc and binb.count(name):
|
||||
bib = self.find_alt_dependency(name)
|
||||
self[path][self.COL_BINB] = bib
|
||||
|
||||
# Re-add any removed user selected items
|
||||
for u in usersel:
|
||||
npath = self.find_path_for_item(u)
|
||||
self.include_item(item_path=npath,
|
||||
binb="User Selected",
|
||||
image_contents=usersel[u])
|
||||
"""
|
||||
Remove items from contents if the have an empty COL_BINB (brought in by)
|
||||
caused by all packages they are a dependency of being removed.
|
||||
If the item isn't a package we leave it included.
|
||||
"""
|
||||
def sweep_up(self):
|
||||
it = self.contents.get_iter_first()
|
||||
while it:
|
||||
binb = self.contents.get_value(it, self.COL_BINB)
|
||||
itype = self.contents.get_value(it, self.COL_TYPE)
|
||||
remove = False
|
||||
removals = []
|
||||
it = self.get_iter_first()
|
||||
|
||||
if itype == 'package' and not binb:
|
||||
oit = self.contents.convert_iter_to_child_iter(it)
|
||||
opath = self.get_path(oit)
|
||||
self.mark(opath)
|
||||
remove = True
|
||||
while it:
|
||||
path = self.get_path(it)
|
||||
binb = self[path][self.COL_BINB]
|
||||
if binb == "" or binb is None:
|
||||
#print("Sweeping up %s" % self[path][self.COL_NAME])
|
||||
if not path in removals:
|
||||
removals.extend(path)
|
||||
it = self.iter_next(it)
|
||||
|
||||
# When we remove a package from the contents model we alter the
|
||||
# model, so continuing to iterate is bad. *Furthermore* it's
|
||||
# likely that the removal has affected an already iterated item
|
||||
# so we should start from the beginning anyway.
|
||||
# Only when we've managed to iterate the entire contents model
|
||||
# without removing any items do we allow the loop to exit.
|
||||
if remove:
|
||||
it = self.contents.get_iter_first()
|
||||
else:
|
||||
it = self.contents.iter_next(it)
|
||||
while removals:
|
||||
path = removals.pop()
|
||||
self.mark(path)
|
||||
|
||||
"""
|
||||
Remove an item from the contents
|
||||
"""
|
||||
def remove_item(self, path):
|
||||
self.mark(path)
|
||||
self.sweep_up()
|
||||
|
||||
"""
|
||||
Find the name of an item in the image contents which depends on the item
|
||||
name.
|
||||
Returns either an item name (str) or None
|
||||
at contents_path returns either an item name (str) or None
|
||||
NOTE:
|
||||
contents_path must be a path in the self.contents gtk.TreeModel
|
||||
"""
|
||||
def find_alt_dependency(self, name):
|
||||
it = self.contents.get_iter_first()
|
||||
it = self.get_iter_first()
|
||||
while it:
|
||||
# iterate all items in the contents model
|
||||
path = self.contents.get_path(it)
|
||||
deps = self.contents[path][self.COL_DEPS]
|
||||
itname = self.contents[path][self.COL_NAME]
|
||||
inc = self.contents[path][self.COL_INC]
|
||||
# iterate all items in the model
|
||||
path = self.get_path(it)
|
||||
deps = self[path][self.COL_DEPS]
|
||||
itname = self[path][self.COL_NAME]
|
||||
inc = self[path][self.COL_INC]
|
||||
if itname != name and inc and deps.count(name) > 0:
|
||||
# if this item depends on the item, return this items name
|
||||
#print("%s depends on %s" % (itname, name))
|
||||
return itname
|
||||
it = self.contents.iter_next(it)
|
||||
it = self.iter_next(it)
|
||||
return ""
|
||||
|
||||
"""
|
||||
Convert a path in self to a path in the filtered contents model
|
||||
"""
|
||||
def contents_path_for_path(self, path):
|
||||
return self.contents.convert_child_path_to_path(path)
|
||||
|
||||
"""
|
||||
Check the self.contents gtk.TreeModel for an item
|
||||
where COL_NAME matches item_name
|
||||
@@ -422,38 +266,27 @@ class TaskListModel(gtk.ListStore):
|
||||
"""
|
||||
Add this item, and any of its dependencies, to the image contents
|
||||
"""
|
||||
def include_item(self, item_path, binb="", image_contents=False):
|
||||
def include_item(self, item_path, binb=""):
|
||||
name = self[item_path][self.COL_NAME]
|
||||
deps = self[item_path][self.COL_DEPS]
|
||||
cur_inc = self[item_path][self.COL_INC]
|
||||
#print("Adding %s for %s dependency" % (name, binb))
|
||||
if not cur_inc:
|
||||
self[item_path][self.COL_INC] = True
|
||||
self[item_path][self.COL_BINB] = binb
|
||||
|
||||
# We want to do some magic with things which are brought in by the
|
||||
# base image so tag them as so
|
||||
if image_contents:
|
||||
self[item_path][self.COL_IMG] = True
|
||||
if self[item_path][self.COL_TYPE] == 'image':
|
||||
self.selected_image = name
|
||||
|
||||
if deps:
|
||||
#print("Dependencies of %s are %s" % (name, deps))
|
||||
# add all of the deps and set their binb to this item
|
||||
for dep in deps.split(" "):
|
||||
# FIXME: this skipping virtuals can't be right? Unless we choose only to show target
|
||||
# packages? In which case we should handle this server side...
|
||||
# If the contents model doesn't already contain dep, add it
|
||||
# We only care to show things which will end up in the
|
||||
# resultant image, so filter cross and native recipes
|
||||
dep_included = self.contents_includes_name(dep)
|
||||
path = self.find_path_for_item(dep)
|
||||
if not dep_included and not dep.endswith("-native") and not dep.endswith("-cross"):
|
||||
if not dep.startswith("virtual") and not self.contents_includes_name(dep):
|
||||
path = self.find_path_for_item(dep)
|
||||
if path:
|
||||
self.include_item(path, name, image_contents)
|
||||
self.include_item(path, name)
|
||||
else:
|
||||
pass
|
||||
# Set brought in by for any no longer orphan packages
|
||||
elif dep_included and path:
|
||||
if not self[path][self.COL_BINB]:
|
||||
self[path][self.COL_BINB] = name
|
||||
|
||||
"""
|
||||
Find the model path for the item_name
|
||||
@@ -474,122 +307,40 @@ class TaskListModel(gtk.ListStore):
|
||||
Empty self.contents by setting the include of each entry to None
|
||||
"""
|
||||
def reset(self):
|
||||
# Deselect images - slightly more complex logic so that we don't
|
||||
# have to iterate all of the contents of the main model, instead
|
||||
# just iterate the images model.
|
||||
if self.selected_image:
|
||||
iit = self.images.get_iter_first()
|
||||
while iit:
|
||||
pit = self.images.convert_iter_to_child_iter(iit)
|
||||
self.set(pit, self.COL_INC, False)
|
||||
iit = self.images.iter_next(iit)
|
||||
self.selected_image = None
|
||||
|
||||
it = self.contents.get_iter_first()
|
||||
while it:
|
||||
oit = self.contents.convert_iter_to_child_iter(it)
|
||||
self.set(oit,
|
||||
self.COL_INC, False,
|
||||
self.COL_BINB, "",
|
||||
self.COL_IMG, False)
|
||||
path = self.contents.get_path(it)
|
||||
opath = self.contents.convert_path_to_child_path(path)
|
||||
self[opath][self.COL_INC] = False
|
||||
self[opath][self.COL_BINB] = ""
|
||||
# As we've just removed the first item...
|
||||
it = self.contents.get_iter_first()
|
||||
|
||||
"""
|
||||
Returns two lists. One of user selected packages and the other containing
|
||||
all selected packages
|
||||
Returns True if one of the selected tasks is an image, False otherwise
|
||||
"""
|
||||
def get_selected_packages(self):
|
||||
allpkgs = []
|
||||
userpkgs = []
|
||||
|
||||
it = self.contents.get_iter_first()
|
||||
def targets_contains_image(self):
|
||||
it = self.images.get_iter_first()
|
||||
while it:
|
||||
sel = self.contents.get_value(it, self.COL_BINB) == "User Selected"
|
||||
name = self.contents.get_value(it, self.COL_NAME)
|
||||
allpkgs.append(name)
|
||||
if sel:
|
||||
userpkgs.append(name)
|
||||
it = self.contents.iter_next(it)
|
||||
return userpkgs, allpkgs
|
||||
|
||||
def image_contents_removed(self):
|
||||
it = self.get_iter_first()
|
||||
while it:
|
||||
sel = self.get_value(it, self.COL_INC)
|
||||
img = self.get_value(it, self.COL_IMG)
|
||||
if img and not sel:
|
||||
path = self.images.get_path(it)
|
||||
inc = self.images[path][self.COL_INC]
|
||||
if inc:
|
||||
return True
|
||||
it = self.iter_next(it)
|
||||
it = self.images.iter_next(it)
|
||||
return False
|
||||
|
||||
def get_build_rep(self):
|
||||
userpkgs, allpkgs = self.get_selected_packages()
|
||||
# If base image contents have been removed start from an empty rootfs
|
||||
if not self.selected_image or self.image_contents_removed():
|
||||
image = "empty"
|
||||
else:
|
||||
image = self.selected_image
|
||||
"""
|
||||
Return a list of all selected items which are not -native or -cross
|
||||
"""
|
||||
def get_targets(self):
|
||||
tasks = []
|
||||
|
||||
return BuildRep(" ".join(userpkgs), " ".join(allpkgs), image)
|
||||
|
||||
def find_reverse_depends(self, pn):
|
||||
revdeps = []
|
||||
it = self.contents.get_iter_first()
|
||||
|
||||
while it:
|
||||
name = self.contents.get_value(it, self.COL_NAME)
|
||||
itype = self.contents.get_value(it, self.COL_TYPE)
|
||||
deps = self.contents.get_value(it, self.COL_DEPS)
|
||||
|
||||
path = self.contents.get_path(it)
|
||||
name = self.contents[path][self.COL_NAME]
|
||||
stype = self.contents[path][self.COL_TYPE]
|
||||
if not name.count('-native') and not name.count('-cross'):
|
||||
tasks.append(name)
|
||||
it = self.contents.iter_next(it)
|
||||
|
||||
if not itype == 'package':
|
||||
continue
|
||||
|
||||
if deps.count(pn) != 0:
|
||||
revdeps.append(name)
|
||||
|
||||
if pn in revdeps:
|
||||
revdeps.remove(pn)
|
||||
return revdeps
|
||||
|
||||
def set_selected_image(self, img):
|
||||
self.selected_image = img
|
||||
path = self.find_path_for_item(img)
|
||||
self.include_item(item_path=path,
|
||||
binb="User Selected",
|
||||
image_contents=True)
|
||||
|
||||
self.emit("image-changed", self.selected_image)
|
||||
|
||||
def set_selected_packages(self, pkglist):
|
||||
selected = pkglist
|
||||
it = self.get_iter_first()
|
||||
|
||||
while it:
|
||||
name = self.get_value(it, self.COL_NAME)
|
||||
if name in pkglist:
|
||||
pkglist.remove(name)
|
||||
path = self.get_path(it)
|
||||
self.include_item(item_path=path,
|
||||
binb="User Selected")
|
||||
if len(pkglist) == 0:
|
||||
return
|
||||
it = self.iter_next(it)
|
||||
|
||||
def find_image_path(self, image):
|
||||
it = self.images.get_iter_first()
|
||||
|
||||
while it:
|
||||
image_name = self.images.get_value(it, self.COL_NAME)
|
||||
if image_name == image:
|
||||
path = self.images.get_value(it, self.COL_PATH)
|
||||
meta_pattern = "(\S*)/(meta*/)(\S*)"
|
||||
meta_match = re.search(meta_pattern, path)
|
||||
if meta_match:
|
||||
_, lyr, bbrel = path.partition(meta_match.group(2))
|
||||
if bbrel:
|
||||
path = bbrel
|
||||
return path
|
||||
it = self.images.iter_next(it)
|
||||
return tasks
|
||||
|
||||
@@ -199,13 +199,10 @@ class gtkthread(threading.Thread):
|
||||
def main(server, eventHandler):
|
||||
try:
|
||||
cmdline = server.runCommand(["getCmdLineAction"])
|
||||
if cmdline and not cmdline['action']:
|
||||
print(cmdline['msg'])
|
||||
return
|
||||
elif not cmdline or (cmdline['action'] and cmdline['action'][0] != "generateDotGraph"):
|
||||
if not cmdline or cmdline[0] != "generateDotGraph":
|
||||
print("This UI is only compatible with the -g option")
|
||||
return
|
||||
ret = server.runCommand(["generateDepTreeEvent", cmdline['action'][1], cmdline['action'][2]])
|
||||
ret = server.runCommand(["generateDepTreeEvent", cmdline[1], cmdline[2]])
|
||||
if ret != True:
|
||||
print("Couldn't run command! %s" % ret)
|
||||
return
|
||||
@@ -250,13 +247,13 @@ def main(server, eventHandler):
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.CacheLoadCompleted):
|
||||
pbar.hide()
|
||||
gtk.gdk.threads_enter()
|
||||
pbar.update(progress_total, progress_total)
|
||||
gtk.gdk.threads_leave()
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.ParseStarted):
|
||||
progress_total = event.total
|
||||
if progress_total == 0:
|
||||
continue
|
||||
gtk.gdk.threads_enter()
|
||||
pbar.set_title("Processing recipes")
|
||||
pbar.update(0, progress_total)
|
||||
|
||||
@@ -82,12 +82,8 @@ def main (server, eventHandler):
|
||||
try:
|
||||
cmdline = server.runCommand(["getCmdLineAction"])
|
||||
if not cmdline:
|
||||
print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
|
||||
return 1
|
||||
elif not cmdline['action']:
|
||||
print(cmdline['msg'])
|
||||
return 1
|
||||
ret = server.runCommand(cmdline['action'])
|
||||
ret = server.runCommand(cmdline)
|
||||
if ret != True:
|
||||
print("Couldn't get default commandline! %s" % ret)
|
||||
return 1
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -80,12 +80,8 @@ def main(server, eventHandler):
|
||||
try:
|
||||
cmdline = server.runCommand(["getCmdLineAction"])
|
||||
if not cmdline:
|
||||
print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
|
||||
return 1
|
||||
elif not cmdline['action']:
|
||||
print(cmdline['msg'])
|
||||
return 1
|
||||
ret = server.runCommand(cmdline['action'])
|
||||
ret = server.runCommand(cmdline)
|
||||
if ret != True:
|
||||
print("Couldn't get default commandline! %s" % ret)
|
||||
return 1
|
||||
@@ -154,17 +150,12 @@ def main(server, eventHandler):
|
||||
logger.info(event._message)
|
||||
continue
|
||||
if isinstance(event, bb.event.ParseStarted):
|
||||
if event.total == 0:
|
||||
continue
|
||||
parseprogress = new_progress("Parsing recipes", event.total).start()
|
||||
continue
|
||||
if isinstance(event, bb.event.ParseProgress):
|
||||
parseprogress.update(event.current)
|
||||
continue
|
||||
if isinstance(event, bb.event.ParseCompleted):
|
||||
if not parseprogress:
|
||||
continue
|
||||
|
||||
parseprogress.finish()
|
||||
print(("Parsing of %d .bb files complete (%d cached, %d parsed). %d targets, %d skipped, %d masked, %d errors."
|
||||
% ( event.total, event.cached, event.parsed, event.virtuals, event.skipped, event.masked, event.errors)))
|
||||
@@ -208,9 +199,6 @@ def main(server, eventHandler):
|
||||
logger.error("Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)", r, event._item, ", ".join(event._dependees), r)
|
||||
else:
|
||||
logger.error("Nothing %sPROVIDES '%s'", r, event._item)
|
||||
if event._reasons:
|
||||
for reason in event._reasons:
|
||||
logger.error("%s", reason)
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.runqueue.runQueueTaskStarted):
|
||||
@@ -235,7 +223,6 @@ def main(server, eventHandler):
|
||||
bb.event.StampUpdate,
|
||||
bb.event.ConfigParsed,
|
||||
bb.event.RecipeParsed,
|
||||
bb.event.RecipePreFinalise,
|
||||
bb.runqueue.runQueueEvent,
|
||||
bb.runqueue.runQueueExitWait)):
|
||||
continue
|
||||
|
||||
@@ -232,12 +232,8 @@ class NCursesUI:
|
||||
try:
|
||||
cmdline = server.runCommand(["getCmdLineAction"])
|
||||
if not cmdline:
|
||||
print("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.")
|
||||
return
|
||||
elif not cmdline['action']:
|
||||
print(cmdline['msg'])
|
||||
return
|
||||
ret = server.runCommand(cmdline['action'])
|
||||
ret = server.runCommand(cmdline)
|
||||
if ret != True:
|
||||
print("Couldn't get default commandlind! %s" % ret)
|
||||
return
|
||||
|
||||
@@ -76,7 +76,7 @@ class BBUIEventQueue:
|
||||
self.host, self.port = server.socket.getsockname()
|
||||
|
||||
server.register_function( self.system_quit, "event.quit" )
|
||||
server.register_function( self.send_event, "event.sendpickle" )
|
||||
server.register_function( self.send_event, "event.send" )
|
||||
server.socket.settimeout(1)
|
||||
|
||||
self.EventHandle = self.BBServer.registerEventHandler(self.host, self.port)
|
||||
|
||||
@@ -402,7 +402,7 @@ def fileslocked(files):
|
||||
for lock in locks:
|
||||
bb.utils.unlockfile(lock)
|
||||
|
||||
def lockfile(name, shared=False, retry=True):
|
||||
def lockfile(name, shared=False):
|
||||
"""
|
||||
Use the file fn as a lock file, return when the lock has been acquired.
|
||||
Returns a variable to pass to unlockfile().
|
||||
@@ -418,8 +418,6 @@ def lockfile(name, shared=False, retry=True):
|
||||
op = fcntl.LOCK_EX
|
||||
if shared:
|
||||
op = fcntl.LOCK_SH
|
||||
if not retry:
|
||||
op = op | fcntl.LOCK_NB
|
||||
|
||||
while True:
|
||||
# If we leave the lockfiles lying around there is no problem
|
||||
@@ -444,8 +442,6 @@ def lockfile(name, shared=False, retry=True):
|
||||
lf.close()
|
||||
except Exception:
|
||||
continue
|
||||
if not retry:
|
||||
return None
|
||||
|
||||
def unlockfile(lf):
|
||||
"""
|
||||
@@ -856,16 +852,3 @@ def to_boolean(string, default=None):
|
||||
return False
|
||||
else:
|
||||
raise ValueError("Invalid value for to_boolean: %s" % string)
|
||||
|
||||
def contains(variable, checkvalues, truevalue, falsevalue, d):
|
||||
val = d.getVar(variable, True)
|
||||
if not val:
|
||||
return falsevalue
|
||||
val = set(val.split())
|
||||
if isinstance(checkvalues, basestring):
|
||||
checkvalues = set(checkvalues.split())
|
||||
else:
|
||||
checkvalues = set(checkvalues)
|
||||
if checkvalues.issubset(val):
|
||||
return truevalue
|
||||
return falsevalue
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
__version__ = "1.0.0"
|
||||
|
||||
import os, time
|
||||
import sys,logging
|
||||
|
||||
def init_logger(logfile, loglevel):
|
||||
numeric_level = getattr(logging, loglevel.upper(), None)
|
||||
if not isinstance(numeric_level, int):
|
||||
raise ValueError('Invalid log level: %s' % loglevel)
|
||||
logging.basicConfig(level=numeric_level, filename=logfile)
|
||||
|
||||
@@ -1,100 +0,0 @@
|
||||
import logging
|
||||
import os.path
|
||||
import errno
|
||||
import sys
|
||||
import warnings
|
||||
import sqlite3
|
||||
|
||||
try:
|
||||
import sqlite3
|
||||
except ImportError:
|
||||
from pysqlite2 import dbapi2 as sqlite3
|
||||
|
||||
sqlversion = sqlite3.sqlite_version_info
|
||||
if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
|
||||
raise Exception("sqlite3 version 3.3.0 or later is required.")
|
||||
|
||||
class NotFoundError(StandardError):
|
||||
pass
|
||||
|
||||
class PRTable():
|
||||
def __init__(self,cursor,table):
|
||||
self.cursor = cursor
|
||||
self.table = table
|
||||
|
||||
#create the table
|
||||
self._execute("CREATE TABLE IF NOT EXISTS %s \
|
||||
(version TEXT NOT NULL, \
|
||||
checksum TEXT NOT NULL, \
|
||||
value INTEGER, \
|
||||
PRIMARY KEY (version,checksum));"
|
||||
% table)
|
||||
|
||||
def _execute(self, *query):
|
||||
"""Execute a query, waiting to acquire a lock if necessary"""
|
||||
count = 0
|
||||
while True:
|
||||
try:
|
||||
return self.cursor.execute(*query)
|
||||
except sqlite3.OperationalError as exc:
|
||||
if 'database is locked' in str(exc) and count < 500:
|
||||
count = count + 1
|
||||
continue
|
||||
raise
|
||||
except sqlite3.IntegrityError as exc:
|
||||
print "Integrity error %s" % str(exc)
|
||||
break
|
||||
|
||||
def getValue(self, version, checksum):
|
||||
data=self._execute("SELECT value FROM %s WHERE version=? AND checksum=?;" % self.table,
|
||||
(version,checksum))
|
||||
row=data.fetchone()
|
||||
if row != None:
|
||||
return row[0]
|
||||
else:
|
||||
#no value found, try to insert
|
||||
self._execute("INSERT INTO %s VALUES (?, ?, (select ifnull(max(value)+1,0) from %s where version=?));"
|
||||
% (self.table,self.table),
|
||||
(version,checksum,version))
|
||||
data=self._execute("SELECT value FROM %s WHERE version=? AND checksum=?;" % self.table,
|
||||
(version,checksum))
|
||||
row=data.fetchone()
|
||||
if row != None:
|
||||
return row[0]
|
||||
else:
|
||||
raise NotFoundError
|
||||
|
||||
class PRData(object):
|
||||
"""Object representing the PR database"""
|
||||
def __init__(self, filename):
|
||||
self.filename=os.path.abspath(filename)
|
||||
#build directory hierarchy
|
||||
try:
|
||||
os.makedirs(os.path.dirname(self.filename))
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise e
|
||||
self.connection=sqlite3.connect(self.filename, timeout=5,
|
||||
isolation_level=None)
|
||||
self.cursor=self.connection.cursor()
|
||||
self._tables={}
|
||||
|
||||
def __del__(self):
|
||||
print "PRData: closing DB %s" % self.filename
|
||||
self.connection.close()
|
||||
|
||||
def __getitem__(self,tblname):
|
||||
if not isinstance(tblname, basestring):
|
||||
raise TypeError("tblname argument must be a string, not '%s'" %
|
||||
type(tblname))
|
||||
if tblname in self._tables:
|
||||
return self._tables[tblname]
|
||||
else:
|
||||
tableobj = self._tables[tblname] = PRTable(self.cursor, tblname)
|
||||
return tableobj
|
||||
|
||||
def __delitem__(self, tblname):
|
||||
if tblname in self._tables:
|
||||
del self._tables[tblname]
|
||||
logging.info("drop table %s" % (tblname))
|
||||
self.cursor.execute("DROP TABLE IF EXISTS %s;" % tblname)
|
||||
@@ -1,198 +0,0 @@
|
||||
import os,sys,logging
|
||||
import signal,time, atexit
|
||||
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
|
||||
import xmlrpclib,sqlite3
|
||||
|
||||
import bb.server.xmlrpc
|
||||
import prserv
|
||||
import prserv.db
|
||||
|
||||
if sys.hexversion < 0x020600F0:
|
||||
print("Sorry, python 2.6 or later is required.")
|
||||
sys.exit(1)
|
||||
|
||||
class Handler(SimpleXMLRPCRequestHandler):
|
||||
def _dispatch(self,method,params):
|
||||
try:
|
||||
value=self.server.funcs[method](*params)
|
||||
except:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
raise
|
||||
return value
|
||||
|
||||
class PRServer(SimpleXMLRPCServer):
|
||||
pidfile="/tmp/PRServer.pid"
|
||||
def __init__(self, dbfile, logfile, interface, daemon=True):
|
||||
''' constructor '''
|
||||
SimpleXMLRPCServer.__init__(self, interface,
|
||||
requestHandler=SimpleXMLRPCRequestHandler,
|
||||
logRequests=False, allow_none=True)
|
||||
self.dbfile=dbfile
|
||||
self.daemon=daemon
|
||||
self.logfile=logfile
|
||||
self.host, self.port = self.socket.getsockname()
|
||||
self.db=prserv.db.PRData(dbfile)
|
||||
self.table=self.db["PRMAIN"]
|
||||
|
||||
self.register_function(self.getPR, "getPR")
|
||||
self.register_function(self.quit, "quit")
|
||||
self.register_function(self.ping, "ping")
|
||||
self.register_introspection_functions()
|
||||
|
||||
def ping(self):
|
||||
return not self.quit
|
||||
|
||||
def getPR(self, version, checksum):
|
||||
try:
|
||||
return self.table.getValue(version,checksum)
|
||||
except prserv.NotFoundError:
|
||||
logging.error("can not find value for (%s, %s)",version,checksum)
|
||||
return None
|
||||
except sqlite3.Error as exc:
|
||||
logging.error(str(exc))
|
||||
return None
|
||||
|
||||
def quit(self):
|
||||
self.quit=True
|
||||
return
|
||||
|
||||
def _serve_forever(self):
|
||||
self.quit = False
|
||||
self.timeout = 0.5
|
||||
while not self.quit:
|
||||
self.handle_request()
|
||||
|
||||
logging.info("PRServer: stopping...")
|
||||
self.server_close()
|
||||
return
|
||||
|
||||
def start(self):
|
||||
if self.daemon is True:
|
||||
logging.info("PRServer: starting daemon...")
|
||||
self.daemonize()
|
||||
else:
|
||||
logging.info("PRServer: starting...")
|
||||
self._serve_forever()
|
||||
|
||||
def delpid(self):
|
||||
os.remove(PRServer.pidfile)
|
||||
|
||||
def daemonize(self):
|
||||
"""
|
||||
See Advanced Programming in the UNIX, Sec 13.3
|
||||
"""
|
||||
os.umask(0)
|
||||
|
||||
try:
|
||||
pid = os.fork()
|
||||
if pid > 0:
|
||||
sys.exit(0)
|
||||
except OSError as e:
|
||||
sys.stderr.write("1st fork failed: %d %s\n" % (e.errno, e.strerror))
|
||||
sys.exit(1)
|
||||
|
||||
os.setsid()
|
||||
"""
|
||||
fork again to make sure the daemon is not session leader,
|
||||
which prevents it from acquiring controlling terminal
|
||||
"""
|
||||
try:
|
||||
pid = os.fork()
|
||||
if pid > 0: #parent
|
||||
sys.exit(0)
|
||||
except OSError as e:
|
||||
sys.stderr.write("2nd fork failed: %d %s\n" % (e.errno, e.strerror))
|
||||
sys.exit(1)
|
||||
|
||||
os.chdir("/")
|
||||
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
si = file('/dev/null', 'r')
|
||||
so = file(self.logfile, 'a+')
|
||||
se = so
|
||||
os.dup2(si.fileno(),sys.stdin.fileno())
|
||||
os.dup2(so.fileno(),sys.stdout.fileno())
|
||||
os.dup2(se.fileno(),sys.stderr.fileno())
|
||||
|
||||
# write pidfile
|
||||
atexit.register(self.delpid)
|
||||
pid = str(os.getpid())
|
||||
pf = file(PRServer.pidfile, 'w+')
|
||||
pf.write("%s\n" % pid)
|
||||
pf.write("%s\n" % self.host)
|
||||
pf.write("%s\n" % self.port)
|
||||
pf.close()
|
||||
|
||||
self._serve_forever()
|
||||
|
||||
class PRServerConnection():
|
||||
def __init__(self, host, port):
|
||||
self.connection = bb.server.xmlrpc._create_server(host, port)
|
||||
self.host = host
|
||||
self.port = port
|
||||
|
||||
def terminate(self):
|
||||
# Don't wait for server indefinitely
|
||||
import socket
|
||||
socket.setdefaulttimeout(2)
|
||||
try:
|
||||
self.connection.quit()
|
||||
except:
|
||||
pass
|
||||
|
||||
def getPR(self, version, checksum):
|
||||
return self.connection.getPR(version, checksum)
|
||||
|
||||
def ping(self):
|
||||
return self.connection.ping()
|
||||
|
||||
def start_daemon(options):
|
||||
try:
|
||||
pf = file(PRServer.pidfile,'r')
|
||||
pid = int(pf.readline().strip())
|
||||
pf.close()
|
||||
except IOError:
|
||||
pid = None
|
||||
|
||||
if pid:
|
||||
sys.stderr.write("pidfile %s already exist. Daemon already running?\n"
|
||||
% PRServer.pidfile)
|
||||
sys.exit(1)
|
||||
|
||||
server = PRServer(options.dbfile, interface=(options.host, options.port),
|
||||
logfile=os.path.abspath(options.logfile))
|
||||
server.start()
|
||||
|
||||
def stop_daemon():
|
||||
try:
|
||||
pf = file(PRServer.pidfile,'r')
|
||||
pid = int(pf.readline().strip())
|
||||
host = pf.readline().strip()
|
||||
port = int(pf.readline().strip())
|
||||
pf.close()
|
||||
except IOError:
|
||||
pid = None
|
||||
|
||||
if not pid:
|
||||
sys.stderr.write("pidfile %s does not exist. Daemon not running?\n"
|
||||
% PRServer.pidfile)
|
||||
sys.exit(1)
|
||||
|
||||
PRServerConnection(host,port).terminate()
|
||||
time.sleep(0.5)
|
||||
|
||||
try:
|
||||
while 1:
|
||||
os.kill(pid,signal.SIGTERM)
|
||||
time.sleep(0.1)
|
||||
except OSError as err:
|
||||
err = str(err)
|
||||
if err.find("No such process") > 0:
|
||||
if os.path.exists(PRServer.pidfile):
|
||||
os.remove(PRServer.pidfile)
|
||||
else:
|
||||
print err
|
||||
sys.exit(1)
|
||||
|
||||
@@ -52,20 +52,6 @@ STYLESHEET = $(DOC)/*.css
|
||||
|
||||
endif
|
||||
|
||||
ifeq ($(DOC),dev-manual)
|
||||
XSLTOPTS = --stringparam html.stylesheet style.css \
|
||||
--stringparam chapter.autolabel 1 \
|
||||
--stringparam section.autolabel 1 \
|
||||
--stringparam section.label.includes.component.label 1 \
|
||||
--xinclude
|
||||
ALLPREQ = html pdf tarball
|
||||
TARFILES = style.css dev-manual.html dev-manual.pdf figures/dev-title.png
|
||||
MANUALS = $(DOC)/$(DOC).html $(DOC)/$(DOC).pdf
|
||||
FIGURES = figures
|
||||
STYLESHEET = $(DOC)/*.css
|
||||
|
||||
endif
|
||||
|
||||
ifeq ($(DOC),yocto-project-qs)
|
||||
XSLTOPTS = --stringparam html.stylesheet style.css \
|
||||
--xinclude
|
||||
|
||||
@@ -6,20 +6,19 @@
|
||||
<para>
|
||||
Recall that earlier we talked about how to use an existing toolchain
|
||||
tarball that had been installed into <filename>/opt/poky</filename>,
|
||||
which is outside of the Yocto Project build tree
|
||||
which is outside of the Poky build environment
|
||||
(see <xref linkend='using-an-existing-toolchain-tarball'>
|
||||
“Using an Existing Toolchain Tarball”)</xref>.
|
||||
And, that sourcing your architecture-specific environment setup script
|
||||
initializes a suitable cross-toolchain development environment.
|
||||
initializes a suitable development environment.
|
||||
This setup occurs by adding the compiler, QEMU scripts, QEMU binary,
|
||||
a special version of <filename>pkgconfig</filename> and other useful
|
||||
utilities to the <filename>PATH</filename> variable.
|
||||
Variables to assist <filename>pkgconfig</filename> and <filename>autotools</filename>
|
||||
are also defined so that,
|
||||
Variables to assist pkgconfig and autotools are also defined so that,
|
||||
for example, <filename>configure.sh</filename> can find pre-generated
|
||||
test results for tests that need target hardware on which to run.
|
||||
These conditions allow you to easily use the toolchain outside of the
|
||||
Yocto Project build environment on both autotools-based projects and
|
||||
Poky build environment on both autotools-based projects and
|
||||
makefile-based projects.
|
||||
</para>
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
<orderedlist>
|
||||
<listitem><para>Be sure the optimal version of Eclipse IDE
|
||||
is installed.</para></listitem>
|
||||
<listitem><para>Install Eclipse plug-in requirements prior to installing
|
||||
<listitem><para>Install required Eclipse plug-ins prior to installing
|
||||
the Eclipse Yocto Plug-in.</para></listitem>
|
||||
<listitem><para>Configure the Eclipse Yocto Plug-in.</para></listitem>
|
||||
</orderedlist>
|
||||
@@ -38,7 +38,7 @@
|
||||
<section id='installing-eclipse-ide'>
|
||||
<title>Installing Eclipse IDE</title>
|
||||
<para>
|
||||
It is recommended that you have the Indigo 3.7 version of the
|
||||
It is recommended that you have the Helios 3.6.1 version of the
|
||||
Eclipse IDE installed on your development system.
|
||||
If you don’t have this version you can find it at
|
||||
<ulink url='http://www.eclipse.org/downloads'></ulink>.
|
||||
@@ -78,14 +78,14 @@
|
||||
<title>Installing Required Plug-ins and the Eclipse Yocto Plug-in</title>
|
||||
<para>
|
||||
Before installing the Yocto Plug-in you need to be sure that the
|
||||
CDT 8.0, RSE 3.2, and Autotools plug-ins are all installed in the
|
||||
CDT 7.0, RSE 3.2, and Autotools plug-ins are all installed in the
|
||||
following order.
|
||||
After installing these three plug-ins, you can install the
|
||||
Eclipse Yocto Plug-in.
|
||||
Use the following URLs for the plug-ins:
|
||||
<orderedlist>
|
||||
<listitem><para><emphasis>CDT 8.0</emphasis> –
|
||||
<ulink url='http://download.eclipse.org/tools/cdt/releases/indigo/'></ulink>:
|
||||
<listitem><para><emphasis>CDT 7.0</emphasis> –
|
||||
<ulink url='http://download.eclipse.org/tools/cdt/releases/helios/'></ulink>:
|
||||
For CDT main features select the checkbox so you get all items.
|
||||
For CDT optional features expand the selections and check
|
||||
“C/C++ Remote Launch”.</para></listitem>
|
||||
@@ -147,26 +147,26 @@
|
||||
<section id='configuring-the-cross-compiler-options'>
|
||||
<title>Configuring the Cross-Compiler Options</title>
|
||||
<para>
|
||||
Choose between ‘Stand-alone Prebuilt Toolchain’ and ‘Build System Derived Toolchain’ for Cross
|
||||
Choose between ‘SDK Root Mode’ and ‘Poky Tree Mode’ for Cross
|
||||
Compiler Options.
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis>Stand-alone Prebuilt Toolchain</emphasis> – Select this mode
|
||||
when you are not concerned with building a target image or you do not have
|
||||
a Yocto Project build tree on your development system.
|
||||
<listitem><para><emphasis>SDK Root Mode</emphasis> – Select this mode
|
||||
when you are not concerned with building an image or you do not have
|
||||
a Poky build tree on your system.
|
||||
For example, suppose you are an application developer and do not
|
||||
need to build a target image.
|
||||
Instead, you just want to use an architecture-specific toolchain on an
|
||||
existing kernel and target root filesystem.
|
||||
When you use Stand-alone Prebuilt Toolchain you are using the toolchain installed
|
||||
need to build an image.
|
||||
You just want to use an architecture-specific toolchain on an
|
||||
existing kernel and root filesystem.
|
||||
When you use SDK Root Mode you are using the toolchain installed
|
||||
in the <filename>/opt/poky</filename> directory.</para></listitem>
|
||||
<listitem><para><emphasis>Build System Derived Toolchain</emphasis> – Select this mode
|
||||
if you are building images for target hardware or your
|
||||
development environment already has a Yocto Project build tree.
|
||||
In this case you likely already have a Yocto Project build tree installed on
|
||||
<listitem><para><emphasis>Poky Tree Mode</emphasis> – Select this mode
|
||||
if you are concerned with building images for hardware or your
|
||||
development environment already has a build tree.
|
||||
In this case you likely already have a Poky build tree installed on
|
||||
your system or you (or someone else) will be building one.
|
||||
When you select Build System Derived Toolchain you are using the toolchain bundled
|
||||
inside the Yocto Project build tree.
|
||||
If you use this mode you must also supply the Yocto Project build directory
|
||||
When you use the Poky Tree Mode you are using the toolchain bundled
|
||||
inside the Poky build tree.
|
||||
If you use this mode you must also supply the Poky Root Location
|
||||
in the Preferences Dialog.</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
@@ -175,11 +175,11 @@
|
||||
<section id='configuring-the-sysroot'>
|
||||
<title>Configuring the Sysroot</title>
|
||||
<para>
|
||||
Specify the sysroot location, which is where the root filesystem for the
|
||||
target hardware is created on the development system by the ADT Installer.
|
||||
The QEMU user-space tools, the
|
||||
NFS boot process and the cross-toolchain all use the sysroot location
|
||||
regardless of wheather you select (Stand-alone Prebuilt Toolchain or Build System Derived Toolchain).
|
||||
Specify the sysroot, which is used by both the QEMU user-space
|
||||
NFS boot process and by the cross-toolchain regardless of the
|
||||
mode you select (SDK Root Mode or Poky Tree Mode).
|
||||
For example, sysroot is the location to which you extract the
|
||||
downloaded image’s root filesystem to through the ADT Installer.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
@@ -212,11 +212,10 @@
|
||||
<listitem><para><emphasis>QEMU</emphasis> – Select this option if
|
||||
you will be using the QEMU emulator.
|
||||
If you are using the emulator you also need to locate the Kernel
|
||||
and specify any custom options.</para>
|
||||
<para>If you select Build System Derived Toolchain the target kernel you built
|
||||
will be located in the
|
||||
Yocto Project build tree in <filename>tmp/deploy/images</filename> directory.
|
||||
If you select Stand-alone Prebuilt Toolchain the pre-built kernel you downloaded is located
|
||||
and you can specify custom options.</para>
|
||||
<para>In Poky Tree Mode the kernel you built will be located in the
|
||||
Poky Build tree in <filename>tmp/deploy/images</filename> directory.
|
||||
In SDK Root Mode the pre-built kernel you downloaded is located
|
||||
in the directory you specified when you downloaded the image.</para>
|
||||
<para>Most custom options are for advanced QEMU users to further
|
||||
customize their QEMU instance.
|
||||
@@ -288,10 +287,10 @@
|
||||
You can change these settings for a given project by following these steps:
|
||||
<orderedlist>
|
||||
<listitem><para>Select Project -> Invoke Yocto Tools -> Reconfigure Yocto.
|
||||
This brings up the project's Yocto Settings Dialog.
|
||||
This brings up the project Yocto Settings Dialog.
|
||||
Settings are inherited from the default project configuration.
|
||||
The information in this dialogue is identical to that chosen earlier
|
||||
for the Cross Compiler Option (Stand-alone Prebuilt Toolchain or Build System Derived Toolchain),
|
||||
for the Cross Compiler Option (SDK Root Mode or Poky Tree Mode),
|
||||
the Target Architecture, and the Target Options.
|
||||
The settings are inherited from the Yocto Plug-in configuration performed
|
||||
after installing the plug-in.</para></listitem>
|
||||
@@ -309,7 +308,7 @@
|
||||
<title>Building the Project</title>
|
||||
<para>
|
||||
To build the project, select Project -> Build Project.
|
||||
The console should update and you can note the cross-compiler you are using.
|
||||
You should see the console updated and you can note the cross-compiler you are using.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
@@ -401,10 +400,10 @@
|
||||
on your local host machine.
|
||||
The oprofile-server is installed by default in the image.</para></listitem>
|
||||
<listitem><para><emphasis>Lttng-ust:</emphasis> Selecting this tool runs
|
||||
<filename>usttrace</filename> on the remote target, transfers the output data back to the
|
||||
local host machine and uses <filename>lttv-gui</filename> to graphically display the output.
|
||||
The <filename>lttv-gui</filename> must be installed on the local host machine to use this tool.
|
||||
For information on how to use <filename>lttng</filename> to trace an application, see
|
||||
"usttrace" on the remote target, transfers the output data back to the
|
||||
local host machine and uses "lttv-gui" to graphically display the output.
|
||||
The "lttv-gui" must be installed on the local host machine to use this tool.
|
||||
For information on how to use "lttng" to trace an application, see
|
||||
<ulink url='http://lttng.org/files/ust/manual/ust.html'></ulink>.</para>
|
||||
<para>For "Application" you must supply the absolute path name of the
|
||||
application to be traced by user mode lttng.
|
||||
@@ -418,10 +417,10 @@
|
||||
new view called "powertop".</para>
|
||||
<para>"Time to gather data(sec):" is the time passed in seconds before data
|
||||
is gathered from the remote target for analysis.</para>
|
||||
<para>"show pids in wakeups list:" corresponds to the <filename>-p</filename> argument
|
||||
passed to <filename>powertop</filename>.</para></listitem>
|
||||
<para>"show pids in wakeups list:" corresponds to the -p argument
|
||||
passed to "powertop".</para></listitem>
|
||||
<listitem><para><emphasis>LatencyTOP and Perf:</emphasis> "LatencyTOP"
|
||||
identifies system latency, while <filename>perf</filename> monitors the system's
|
||||
identifies system latency, while "perf" monitors the system's
|
||||
performance counter registers.
|
||||
Selecting either of these tools causes an RSE terminal view to appear
|
||||
from which you can run the tools.
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
</para>
|
||||
<para>
|
||||
Additionally, to provide an effective development platform, the Yocto Project
|
||||
makes available and suggests other tools you can use with the ADT.
|
||||
makes available and suggests other tools as part of the ADT.
|
||||
These other tools include the Eclipse IDE Yocto Plug-in, an emulator (QEMU),
|
||||
and various user-space tools that greatly enhance your development experience.
|
||||
</para>
|
||||
@@ -35,9 +35,7 @@
|
||||
<title>The Cross-Toolchain</title>
|
||||
<para>
|
||||
The cross-toolchain consists of a cross-compiler, cross-linker, and cross-debugger
|
||||
that are used to develop for targeted hardware.
|
||||
This toolchain is created either by running the ADT Installer script or
|
||||
through a Yocto Project build tree that is based on your metadata
|
||||
that are all generated through a Poky build that is based on your metadata
|
||||
configuration or extension for your targeted device.
|
||||
The cross-toolchain works with a matching target sysroot.
|
||||
</para>
|
||||
@@ -57,19 +55,9 @@
|
||||
<title>The QEMU Emulator</title>
|
||||
<para>
|
||||
The QEMU emulator allows you to simulate your hardware while running your
|
||||
application or image.
|
||||
QEMU is made available a number of ways:
|
||||
<itemizedlist>
|
||||
<listitem><para>If you use the ADT Installer script to install ADT you can
|
||||
specify whether or not to install QEMU.</para></listitem>
|
||||
<listitem><para>If you have downloaded a Yocto Project release and unpacked
|
||||
it to create a Yocto Project source directory followed by sourcing
|
||||
the Yocto Project environment setup script, QEMU is installed and automatically
|
||||
available.</para></listitem>
|
||||
<listitem><para>If you have installed the cross-toolchain
|
||||
tarball followed by sourcing the toolchain's setup environment script, QEMU
|
||||
is installed and automatically available.</para></listitem>
|
||||
</itemizedlist>
|
||||
application or image.
|
||||
QEMU is installed several ways: as part of the Poky tree, ADT installation
|
||||
through a toolchain tarball, or through the ADT Installer.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
|
||||
@@ -38,6 +38,11 @@
|
||||
<date>23 May 2011</date>
|
||||
<revremark>Released with Yocto Project 1.0.1 on 23 May 2011.</revremark>
|
||||
</revision>
|
||||
<revision>
|
||||
<revnumber>1.0.2</revnumber>
|
||||
<date>20 December 2011</date>
|
||||
<revremark>Released with Yocto Project 1.0.2 on 20 December 2011.</revremark>
|
||||
</revision>
|
||||
</revhistory>
|
||||
|
||||
<copyright>
|
||||
|
||||
@@ -8,13 +8,13 @@
|
||||
likely that you will need to customize your development packages installation.
|
||||
For example, if you are developing a minimal image then you might not need
|
||||
certain packages (e.g. graphics support packages).
|
||||
Thus, you would like to be able to remove those packages from your target sysroot.
|
||||
Thus, you would like to be able to remove those packages from your sysroot.
|
||||
</para>
|
||||
|
||||
<section id='package-management-systems'>
|
||||
<title>Package Management Systems</title>
|
||||
<para>
|
||||
The Yocto Project supports the generation of sysroot files using
|
||||
The Yocto Project supports the generation of root filesystem files using
|
||||
three different Package Management Systems (PMS):
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis>OPKG</emphasis> – A less well known PMS whose use
|
||||
@@ -30,7 +30,7 @@
|
||||
for more information about RPM.</para></listitem>
|
||||
<listitem><para><emphasis>Debian</emphasis> – The PMS for Debian-based systems
|
||||
is built on many PMS tools.
|
||||
The lower-level PMS tool <filename>dpkg</filename> forms the base of the Debian PMS.
|
||||
The lower-level PMS tool dpkg forms the base of the Debian PMS.
|
||||
For information on dpkg see
|
||||
<ulink url='http://en.wikipedia.org/wiki/Dpkg'></ulink>.</para></listitem>
|
||||
</itemizedlist>
|
||||
@@ -44,16 +44,16 @@
|
||||
<filename>PACKAGE_CLASSES</filename> variable in the <filename>conf/local.conf</filename>
|
||||
file is set to reflect that system.
|
||||
The first value you choose for the variable specifies the package file format for the root
|
||||
filesystem at sysroot.
|
||||
filesystem.
|
||||
Additional values specify additional formats for convenience or testing.
|
||||
See the configuration file for details.
|
||||
</para>
|
||||
<para>
|
||||
As an example, consider a scenario where you are using OPKG and you want to add
|
||||
the <filename>libglade</filename> package to the target sysroot.
|
||||
the libglade package to sysroot.
|
||||
</para>
|
||||
<para>
|
||||
First, you should generate the ipk file for the <filename>libglade</filename> package and add it
|
||||
First, you should generate the ipk file for the libglade package and add it
|
||||
into a working opkg repository.
|
||||
Use these commands:
|
||||
<literallayout class='monospaced'>
|
||||
@@ -62,17 +62,17 @@
|
||||
</literallayout>
|
||||
</para>
|
||||
<para>
|
||||
Next, source the environment setup script found in the Yocto Project source directory.
|
||||
Next, source the environment setup script.
|
||||
Follow that by setting up the installation destination to point to your
|
||||
sysroot as <filename><sysroot_dir></filename>.
|
||||
Finally, have an opkg configuration file <filename><conf_file></filename>
|
||||
sysroot as <filename><sysroot dir></filename>.
|
||||
Finally, have an opkg configuration file <filename><conf file></filename>
|
||||
that corresponds to the opkg repository you have just created.
|
||||
The following command forms should now work:
|
||||
<literallayout class='monospaced'>
|
||||
$ opkg-cl –f <conf_file> -o <sysroot-dir> update
|
||||
$ opkg-cl –f <cconf_file>> -o <sysroot-dir> --force-overwrite install libglade
|
||||
$ opkg-cl –f <cconf_file> -o <sysroot-dir> --force-overwrite install libglade-dbg
|
||||
$ opkg-cl –f <conf_file> -o <sysroot-dir> --force-overwrite install libglade-dev
|
||||
$ opkg-cl –f <conf file> -o <sysroot dir> update
|
||||
$ opkg-cl –f <conf file>> -o <sysroot dir> --force-overwrite install libglade
|
||||
$ opkg-cl –f <conf file> -o <sysroot dir> --force-overwrite install libglade-dbg
|
||||
$ opkg-cl –f <conf file> -o <sysroot dir> --force-overwrite install libglade-dev
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
@@ -6,159 +6,74 @@
|
||||
<title>Preparing to Use the Application Development Toolkit (ADT)</title>
|
||||
|
||||
<para>
|
||||
In order to use the ADT you must install it, source a script to set up the
|
||||
environment, and be sure the kernel and filesystem image specific to the target architecture
|
||||
exists.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This section describes how to be sure you meet these requirements.
|
||||
Througout this section two important terms are used:
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis>Yocto Project Source Tree:</emphasis>
|
||||
This term refers to the directory structure created as a result of downloading
|
||||
and unpacking a Yocto Project release tarball.
|
||||
The Yocto Project source tree contains BitBake, Documentation, Meta-data and
|
||||
other files.
|
||||
The name of the top-level directory of the Yocto Project source tree
|
||||
is derived from the Yocto Project release tarball.
|
||||
For example, downloading and unpacking <filename>poky-bernard-5.0.1.tar.bz2</filename>
|
||||
results in a Yocto Project source tree whose Yocto Project source directory is named
|
||||
<filename>poky-bernard-5.0.1</filename>.</para></listitem>
|
||||
<listitem><para><emphasis>Yocto Project Build Tree:</emphasis>
|
||||
This term refers to the area where you run your builds.
|
||||
The area is created when you source the Yocto Project setup environment script
|
||||
that is found in the Yocto Project source directory
|
||||
(e.g. <filename>poky-init-build-env</filename>).
|
||||
You can create the Yocto Project build tree anywhere you want on your
|
||||
development system.
|
||||
Here is an example that creates the tree in <filename>mybuilds</filename>
|
||||
and names the Yocto Project build directory <filename>YP-5.0.1</filename>:
|
||||
<literallayout class='monospaced'>
|
||||
$ source poky-bernard-5.0.1/poky-init-build-env $HOME/mybuilds/YP-5.0.1
|
||||
</literallayout>
|
||||
If you don't specifically name the build directory then BitBake creates it
|
||||
in the current directory and uses the name <filename>build</filename>.
|
||||
Also, if you supply an existing directory then BitBake uses that
|
||||
directory as the Yocto Project build directory and populates the build tree
|
||||
beneath it.</para></listitem>
|
||||
</itemizedlist>
|
||||
In order to use the ADT it must be installed, the environment setup script must be
|
||||
sourced, and the kernel and filesystem image specific to the target architecture must exist.
|
||||
This section describes how to install the ADT, set up the environment, and provides
|
||||
some reference information on kernels and filesystem images.
|
||||
</para>
|
||||
|
||||
<section id='installing-the-adt'>
|
||||
<title>Installing the ADT</title>
|
||||
|
||||
<para>
|
||||
The following list describes how you can install the ADT, which includes the cross-toolchain.
|
||||
Regardless of the installation you choose, however, you must source the cross-toolchain
|
||||
environment setup script before you use the toolchain.
|
||||
See the <xref linkend='setting-up-the-environment'>“Setting Up the Environment”</xref>
|
||||
section for more information.
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis>Use the ADT Installer Script:</emphasis>
|
||||
This method is the recommended way to install the ADT because it
|
||||
automates much of the process for you.
|
||||
For example, you can configure the installation to install the QEMU emulator
|
||||
and the user-space NFS, specify which root filesystem profiles to download,
|
||||
and define the target sysroot location.
|
||||
</para></listitem>
|
||||
<listitem><para><emphasis>Use an Existing Toolchain Tarball:</emphasis>
|
||||
Using this method you select and download an architecture-specific
|
||||
toolchain tarball and then hand-install the toolchain.
|
||||
If you use this method you just get the cross-toolchain and QEMU - you do not
|
||||
get any of the other mentioned benefits had you run the ADT Installer script.</para></listitem>
|
||||
<listitem><para><emphasis>Use the Toolchain from Within a Yocto Project Build Tree:</emphasis>
|
||||
If you already have a Yocto Project build tree you can install the cross-toolchain
|
||||
using that tree.
|
||||
However, like the previous method mentioned, you only get the cross-toolchain and QEMU - you
|
||||
do not get any of the other benefits without taking separate steps.</para></listitem>
|
||||
</itemizedlist>
|
||||
You can install the ADT three ways.
|
||||
However, we recommend configuring and running the ADT Installer script.
|
||||
Running this script automates much of the process for you.
|
||||
For example, the script allows you to install the QEMU emulator and
|
||||
user-space NFS, define which root filesystem profiles to download,
|
||||
and allows you to define the target sysroot location.
|
||||
</para>
|
||||
<note>
|
||||
If you need to generate the ADT tarball you can do so using the following command:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake adt-installer
|
||||
</literallayout>
|
||||
This command generates the file <filename>adt-installer.tar.bz2</filename>
|
||||
in the <filename>../build/tmp/deploy/sdk</filename> directory.
|
||||
</note>
|
||||
|
||||
<section id='using-the-adt-installer'>
|
||||
<title>Using the ADT Installer</title>
|
||||
|
||||
<section id='configuring-and-running-the-adt-installer'>
|
||||
<title>Configuring and Running the ADT Installer</title>
|
||||
<para>
|
||||
To run the ADT Installer you need to first get the ADT Installer tarball and then run the ADT
|
||||
Installer Script.
|
||||
The ADT Installer is contained in a tarball that can be built using
|
||||
<filename>bitbake adt-installer</filename>.
|
||||
Yocto Project has a pre-built ADT Installer tarball that you can download
|
||||
from <filename>tmp/deploy/sdk</filename> located in the build directory.
|
||||
</para>
|
||||
|
||||
<section id='getting-the-adt-installer-tarball'>
|
||||
<title>Getting the ADT Installer Tarball</title>
|
||||
<note>
|
||||
You can install and run the ADT Installer tarball in any directory you want.
|
||||
</note>
|
||||
|
||||
<para>
|
||||
The ADT Installer is contained in the ADT Installer tarball.
|
||||
You can download the tarball into any directory from
|
||||
<ulink url='http://autobuilder.yoctoproject.org/downloads/yocto-1.0/adt-installer/'></ulink>.
|
||||
Or, you can use BitBake to generate the tarball inside the existing Yocto Project build tree.
|
||||
</para>
|
||||
<para>
|
||||
Before running the ADT Installer you need to configure it by editing
|
||||
the <filename>adt-installer.conf</filename> file, which is located in the
|
||||
directory where the ADT Installer tarball was installed.
|
||||
Your configurations determine which kernel and filesystem image are downloaded.
|
||||
The following list describes the variables you can define for the ADT Installer.
|
||||
For configuration values and restrictions see the comments in
|
||||
the <filename>adt-installer.conf</filename> file:
|
||||
|
||||
<para>
|
||||
If you use BitBake to generate the ADT Installer tarball, you must
|
||||
source the Yocto Project environment setup script located in the Yocto Project
|
||||
source directory before running the BitBake command that creates the tarball.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The following example commands download the Yocto Project release tarball, create the Yocto
|
||||
Project source tree, set up the environment while also creating the Yocto Project build tree,
|
||||
and finally run the BitBake command that results in the tarball
|
||||
<filename>~/yocto-project/build/tmp/deploy/sdk/adt_installer.tar.bz2</filename>:
|
||||
<literallayout class='monospaced'>
|
||||
$ cd ~
|
||||
$ mkdir yocto-project
|
||||
$ cd yocto-project
|
||||
$ wget http://www.yoctoproject.org/downloads/poky/poky-bernard-5.0.1.tar.bz2
|
||||
$ tar xjf poky-bernard-5.0.1.tar.bz2
|
||||
$ source poky-bernard-5.0.1/poky-init-build-env poky-5.0.1-build
|
||||
$ bitbake adt-installer
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
</section>
|
||||
|
||||
<section id='configuring-and-running-the-adt-installer-script'>
|
||||
<title>Configuring and Running the ADT Installer Script</title>
|
||||
|
||||
<para>
|
||||
Before running the ADT Installer script you need to unpack the tarball.
|
||||
You can unpack the tarball in any directory you wish.
|
||||
Unpacking it creates the directory <filename>adt-installer</filename>,
|
||||
which contains the ADT Installer script and its configuration file.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Before you run the script, however, you should examine the ADT Installer configuration
|
||||
file (<filename>adt_installer</filename>) and be sure you are going to get what you want.
|
||||
Your configurations determine which kernel and filesystem image are downloaded.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The following list describes the configurations you can define for the ADT Installer.
|
||||
For configuration values and restrictions see the comments in
|
||||
the <filename>adt-installer.conf</filename> file:
|
||||
|
||||
<itemizedlist>
|
||||
<listitem><para><filename>YOCTOADT_IPKG_REPO</filename> – This area
|
||||
includes the IPKG-based packages and the root filesystem upon which
|
||||
the installation is based.
|
||||
If you want to set up your own IPKG repository pointed to by
|
||||
<filename>YOCTOADT_IPKG_REPO</filename>, you need to be sure that the
|
||||
directory structure follows the same layout as the reference directory
|
||||
set up at <ulink url='http://adtrepo.yoctoproject.org'></ulink>.
|
||||
Also, your repository needs to be accessible through HTTP.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>YOCTOADT-TARGETS</filename> – The machine
|
||||
target architectures for which you want to set up cross-development
|
||||
environments.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>YOCTOADT_QEMU</filename> – Indicates whether
|
||||
or not to install the emulator QEMU.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>YOCTOADT_NFS_UTIL</filename> – Indicates whether
|
||||
or not to install user-mode NFS.
|
||||
If you plan to use the Yocto Eclipse IDE plug-in against QEMU,
|
||||
you should install NFS.
|
||||
<itemizedlist>
|
||||
<listitem><para><filename>YOCTOADT_IPKG_REPO</filename> – This area
|
||||
includes the IPKG-based packages and the root filesystem upon which
|
||||
the installation is based.
|
||||
If you want to set up your own IPKG repository pointed to by
|
||||
<filename>YOCTOADT_IPKG_REPO</filename>, you need to be sure that the
|
||||
directory structure follows the same layout as the reference directory
|
||||
set up at <ulink url='http://adtrepo.yoctoproject.org'></ulink>.
|
||||
Also, your repository needs to be accessible through HTTP.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>YOCTOADT-TARGETS</filename> – The machine
|
||||
target architectures for which you want to set up cross-development
|
||||
environments.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>YOCTOADT_QEMU</filename> – Indicates whether
|
||||
or not to install the emulator QEMU.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>YOCTOADT_NFS_UTIL</filename> – Indicates whether
|
||||
or not to install user-mode NFS.
|
||||
If you plan to use the Yocto Eclipse IDE plug-in against QEMU,
|
||||
you should install NFS.
|
||||
<note>
|
||||
To boot QEMU images using our userspace NFS server, you need
|
||||
to be running portmap or rpcbind.
|
||||
@@ -168,138 +83,112 @@
|
||||
Your firewall settings may also have to be modified to allow
|
||||
NFS booting to work.
|
||||
</note>
|
||||
</para></listitem>
|
||||
<listitem><para><filename>YOCTOADT_ROOTFS_<arch></filename> - The root
|
||||
filesystem images you want to download from the <filename>YOCTOADT_IPKG_REPO</filename>
|
||||
repository.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>YOCTOADT_TARGET_SYSROOT_IMAGE_<arch></filename> - The
|
||||
particular root filesystem used to extract and create the target sysroot.
|
||||
The value of this variable must have been specified with
|
||||
<filename>YOCTOADT_ROOTFS_<arch></filename>.
|
||||
For example, if you downloaded both <filename>minimal</filename> and
|
||||
<filename>sato-sdk</filename> images by setting <filename>YOCTOADT_ROOTFS_<arch></filename>
|
||||
to "minimal sato-sdk", then <filename>YOCTOADT_ROOTFS_<arch></filename>
|
||||
must be set to either "minimal" or "sato-sdk".
|
||||
</para></listitem>
|
||||
<listitem><para><filename>YOCTOADT_TARGET_SYSROOT_LOC_<arch></filename> - The
|
||||
location on the development host where the target sysroot will be created.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
After you have configured the <filename>adt_installer.conf</filename> file,
|
||||
run the installer using the following command:
|
||||
<literallayout class='monospaced'>
|
||||
$ adt_installer
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<note>
|
||||
The ADT Installer requires the <filename>libtool</filename> package to complete.
|
||||
If you install the recommended packages as described in the
|
||||
<ulink url='http://www.yoctoproject.org/docs/yocto-project-qs/yocto-project-qs.html'>
|
||||
Yocto Project Quick Start</ulink> then you will have libtool installed.
|
||||
</note>
|
||||
|
||||
<para>
|
||||
Once the installer begins to run you are asked whether you want to run in
|
||||
interactive or silent mode.
|
||||
If you want to closely monitor the installation then choose “I” for interactive
|
||||
mode rather than “S” for silent mode.
|
||||
Follow the prompts from the script to complete the installation.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Once the installation completes, the ADT, which includes the cross-toolchain, is installed.
|
||||
You will notice environment setup files for the cross-toolchain in
|
||||
<filename>/opt/poky/$SDKVERSION</filename>,
|
||||
and image tarballs in the <filename>adt-installer</filename>
|
||||
directory according to your installer configurations, and the target sysroot located
|
||||
according to the <filename>YOCTOADT_TARGET_SYSROOT_LOC_<arch></filename> variable
|
||||
also in your configuration file.
|
||||
</para>
|
||||
|
||||
</section>
|
||||
</section>
|
||||
|
||||
<section id='using-an-existing-toolchain-tarball'>
|
||||
<title>Using a Cross-Toolchain Tarball</title>
|
||||
<para>
|
||||
If you want to simply install the cross-toolchain by hand you can do so by using an existing
|
||||
cross-toolchain tarball.
|
||||
If you install the cross-toolchain by hand you will have to set up the target sysroot separately.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>YOCTOADT_ROOTFS_<arch></filename> - The root
|
||||
filesystem images you want to download.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>YOCTOADT_TARGET_SYSROOT_IMAGE_<arch></filename> - The
|
||||
root filesystem used to extract and create the target sysroot.
|
||||
</para></listitem>
|
||||
<listitem><para><filename>YOCTOADT_TARGET_SYSROOT_LOC_<arch></filename> - The
|
||||
location of the target sysroot that will be set up on the development machine.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
After you have configured the <filename>adt-installer.conf</filename> file,
|
||||
run the installer using the following command:
|
||||
<literallayout class='monospaced'>
|
||||
$ adt_installer
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Once the installer begins to run you are asked whether you want to run in
|
||||
interactive or silent mode.
|
||||
If you want to closely monitor the installation then choose “I” for interactive
|
||||
mode rather than “S” for silent mode.
|
||||
Follow the prompts from the script to complete the installation.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Once the installation completes, the cross-toolchain is installed in
|
||||
<filename>/opt/poky/$SDKVERSION</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Before using the ADT you need to run the environment setup script for
|
||||
your target architecture also located in <filename>/opt/poky/$SDKVERSION</filename>.
|
||||
See the <xref linkend='setting-up-the-environment'>“Setting Up the Environment”</xref>
|
||||
section for information.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='using-an-existing-toolchain-tarball'>
|
||||
<title>Using an Existing Toolchain Tarball</title>
|
||||
<para>
|
||||
If you do not want to use the ADT Installer you can install the toolchain
|
||||
and the sysroot by hand.
|
||||
Follow these steps:
|
||||
<orderedlist>
|
||||
<listitem><para>Go to
|
||||
<ulink url='http://autobuilder.yoctoproject.org/downloads/yocto-1.0/toolchain'></ulink>
|
||||
and find the folder that matches your host development system
|
||||
(i.e. 'i686' for 32-bit machines or 'x86_64' for 64-bit machines).</para>
|
||||
</listitem>
|
||||
<listitem><para>Go into that folder and download the toolchain tarball whose name
|
||||
includes the appropriate target architecture.
|
||||
For example, if your host development system is an Intel-based 64-bit system and
|
||||
you are going to use your cross-toolchain for an arm target go into the
|
||||
<filename>x86_64</filename> folder and download the following tarball:
|
||||
<literallayout class='monospaced'>
|
||||
yocto-eglibc-x86_64-arm-toolchain-gmae-1.0.tar.bz2
|
||||
</literallayout>
|
||||
<listitem><para>Locate and download the architecture-specific toolchain
|
||||
tarball from <ulink url='http://autobuilder.yoctoproject.org/downloads/yocto-1.0'></ulink>.
|
||||
Look in the ‘toolchain’ folder and then open up the folder that matches your
|
||||
host development system (i.e. 'i686' for 32-bit machines or 'x86_64'
|
||||
for 64-bit machines).
|
||||
Then, select the toolchain tarball whose name includes the appropriate
|
||||
target architecture.
|
||||
<note>
|
||||
Alternatively you can build the toolchain tarball if you have a Yocto Project build tree.
|
||||
Use the <filename>bitbake meta-toolchain</filename> command after you have
|
||||
sourced the <filename>poky-build-init script</filename> located in the Yocto Project
|
||||
source directory.
|
||||
When the <filename>bitbake</filename> command completes the toolchain tarball will
|
||||
be in <filename>tmp/deploy/sdk</filename> in the Yocto Project build tree.
|
||||
</note></para></listitem>
|
||||
If you need to build the toolchain tarball use the
|
||||
<filename>bitbake meta-toolchain</filename> command after you have
|
||||
sourced the poky-build-init script.
|
||||
The tarball will be located in the build directory at
|
||||
<filename>tmp/deploy/sdk</filename> after the build.
|
||||
</note>
|
||||
</para></listitem>
|
||||
<listitem><para>Make sure you are in the root directory and then expand
|
||||
the tarball.
|
||||
The tarball expands into <filename>/opt/poky/$SDKVERSION</filename>.
|
||||
Once the tarball in unpacked the cross-toolchain is installed.
|
||||
You will notice environment setup files for the cross-toolchain in the directory.
|
||||
The tarball expands into the <filename>/opt/poky/$SDKVERSION</filename> directory.
|
||||
</para></listitem>
|
||||
<listitem><para>Set up the environment by sourcing the environment set up
|
||||
script.
|
||||
See the <xref linkend='setting-up-the-environment'>“Setting Up the Environment”</xref>
|
||||
for information.
|
||||
</para></listitem>
|
||||
</orderedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='using-the-toolchain-from-within-the-build-tree'>
|
||||
<title>Using BitBake and the Yocto Project Build Tree</title>
|
||||
<title>Using the Toolchain from Within the Build Tree</title>
|
||||
<para>
|
||||
A final way of installing just the cross-toolchain is to use BitBake within an existing
|
||||
Yocto Project build tree.
|
||||
Follow these steps:
|
||||
<orderedlist>
|
||||
<listitem><para>Source the environment setup script located in the Yocto Project
|
||||
source directory.
|
||||
The script has the string <filename>init-build-env</filename>
|
||||
as part of the name.</para></listitem>
|
||||
<listitem><para>At this point you should be sure that the
|
||||
<filename>MACHINE</filename> variable
|
||||
in the <filename>local.conf</filename> file is set for the target architecture.
|
||||
You can find the <filename>local.conf</filename> file in the Yocto Project source
|
||||
directory.
|
||||
Comments within the <filename>local.conf</filename> file list the values you
|
||||
can use for the <filename>MACHINE</filename> variable.
|
||||
<note>You can populate the build tree with the cross-toolchains for more
|
||||
than a single architecture.
|
||||
You just need to edit the <filename>MACHINE</filename> variable in the
|
||||
<filename>local.conf</filename> file and re-run the BitBake command.</note></para></listitem>
|
||||
<listitem><para>Run <filename>bitbake meta-ide-support</filename> to complete the
|
||||
cross-toolchain installation.
|
||||
<note>If you change your working directory after you source the environment
|
||||
setup script and before you run the BitBake command the command will not work.
|
||||
Be sure to run the BitBake command immediately after checking or editing the
|
||||
<filename>local.conf</filename> but without changing your working directory.</note>
|
||||
Once BitBake finishes, the cross-toolchain is installed.
|
||||
You will notice environment setup files for the cross-toolchain in the
|
||||
Yocto Project build tree in the <filename>tmp</filename> directory.
|
||||
Setup script filenames contain the strings <filename>environment-setup</filename>.
|
||||
</para></listitem>
|
||||
</orderedlist>
|
||||
A final way of accessing the toolchain is from the build tree.
|
||||
The build tree can be set up to contain the architecture-specific cross toolchain.
|
||||
To populate the build tree with the toolchain you need to run the following command:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake meta-ide-support
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Before running the command you need to be sure that the
|
||||
<filename>conf/local.conf</filename> file in the build directory has
|
||||
the desired architecture specified for the <filename>MACHINE</filename>
|
||||
variable.
|
||||
See the <filename>local.conf</filename> file for a list of values you
|
||||
can supply for this variable.
|
||||
You can populate the build tree with the cross-toolchains for more
|
||||
than a single architecture.
|
||||
You just need to edit the <filename>local.conf</filename> file and re-run
|
||||
the BitBake command.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Once the build tree has the toolchain you need to source the environment
|
||||
setup script so that you can run the cross-tools without having to locate them.
|
||||
See the <xref linkend='setting-up-the-environment'>“Setting Up the Environment”</xref>
|
||||
for information.
|
||||
</para>
|
||||
</section>
|
||||
</section>
|
||||
@@ -307,14 +196,13 @@
|
||||
<section id='setting-up-the-environment'>
|
||||
<title>Setting Up the Environment</title>
|
||||
<para>
|
||||
Before you can use the cross-toolchain you need to set up the toolchain environment by
|
||||
Before you can use the cross-toolchain you need to set up the environment by
|
||||
sourcing the environment setup script.
|
||||
If you used the ADT Installer or used an existing ADT tarball to install the ADT,
|
||||
If you used adt_installer or used an existing ADT tarball to install the ADT,
|
||||
then you can find this script in the <filename>/opt/poky/$SDKVERSION</filename>
|
||||
directory.
|
||||
If you used BitBake and the Yocto Project Build Tree to install the cross-toolchain
|
||||
then you can find the environment setup scripts in in the Yocto Project build tree
|
||||
in the <filename>tmp</filename> directory.
|
||||
If you are using the ADT from a Poky build tree, then look in the build
|
||||
directory in <filename>tmp</filename> for the setup script.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -325,7 +213,7 @@
|
||||
For example, the environment setup script for a 64-bit IA-based architecture would
|
||||
be the following:
|
||||
<literallayout class='monospaced'>
|
||||
/opt/poky/1.0/environment-setup-x86_64-poky-linux
|
||||
/opt/poky/environment-setup-x86_64-poky-linux
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
@@ -341,10 +229,10 @@
|
||||
<ulink url='http://www.yoctoproject.org/docs/yocto-quick-start/yocto-project-qs.html'></ulink>.
|
||||
<note>
|
||||
Yocto Project provides basic kernels and filesystem images for several
|
||||
architectures (x86, x86-64, mips, powerpc, and arm) that you can use
|
||||
architectures (x86, x86-64, mips, powerpc, and arm) that can be used
|
||||
unaltered in the QEMU emulator.
|
||||
These kernels and filesystem images reside in the Yocto Project release
|
||||
area - <ulink url='http://autobuilder.yoctoproject.org/downloads/yocto-1.0/machines/'></ulink>
|
||||
area - <ulink url='http://autobuilder.yoctoproject.org/downloads/yocto-1.0/'></ulink>
|
||||
and are ideal for experimentation within Yocto Project.
|
||||
</note>
|
||||
</para>
|
||||
|
||||
@@ -44,6 +44,11 @@
|
||||
<date>23 May 2011</date>
|
||||
<revremark>Released with Yocto Project 1.0.1 on 23 May 2011.</revremark>
|
||||
</revision>
|
||||
<revision>
|
||||
<revnumber>1.0.2</revnumber>
|
||||
<date>20 December 2011</date>
|
||||
<revremark>Released with Yocto Project 1.0.2 on 20 December 2011.</revremark>
|
||||
</revision>
|
||||
</revhistory>
|
||||
|
||||
<copyright>
|
||||
|
||||
@@ -27,20 +27,13 @@
|
||||
of software support of hardware.
|
||||
</para>
|
||||
|
||||
<note><para>
|
||||
The information here does not provide an example of how to create a BSP.
|
||||
For information on how to create a BSP, see the Yocto Project Development Manual or the
|
||||
<ulink url='https://wiki.yoctoproject.org/wiki/Transcript:_creating_one_generic_Atom_BSP_from_another'></ulink>
|
||||
wiki page.
|
||||
</para></note>
|
||||
|
||||
<para>
|
||||
The proposed format does have elements that are specific to the Yocto Project and
|
||||
The proposed format does have elements that are specific to the Poky and
|
||||
OpenEmbedded build systems.
|
||||
It is intended that this information can be
|
||||
used by other systems besides Yocto Project and OpenEmbedded and that it will be simple
|
||||
used by other systems besides Poky and OpenEmbedded and that it will be simple
|
||||
to extract information and convert it to other formats if required.
|
||||
Yocto Project, through its standard layers mechanism, can directly accept the format
|
||||
Poky, through its standard layers mechanism, can directly accept the format
|
||||
described as a layer.
|
||||
The BSP captures all
|
||||
the hardware-specific details in one place in a standard format, which is
|
||||
@@ -93,7 +86,7 @@
|
||||
</literallayout>
|
||||
For more detailed information on layers, see the
|
||||
<ulink url='http://www.yoctoproject.org/docs/poky-ref-manual/poky-ref-manual.html#usingpoky-changes-layers'>
|
||||
BitBake Layers</ulink> section of the Yocto Project Reference Manual.
|
||||
BitBake Layers</ulink> section of the Poky Reference Manual.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -211,8 +204,8 @@ meta-<bsp_name>/conf/layer.conf
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
This file identifies the structure as a Yocto Project layer, identifies the
|
||||
contents of the layer, and contains information about how Yocto Project should use it.
|
||||
This file identifies the structure as a Poky layer, identifies the
|
||||
contents of the layer, and contains information about how Poky should use it.
|
||||
Generally, a standard boilerplate file such as the following works.
|
||||
In the following example you would replace "bsp" and "_bsp" with the actual name
|
||||
of the BSP (i.e. <bsp_name> from the example template).
|
||||
@@ -235,7 +228,7 @@ BBFILE_PRIORITY_bsp = "5"
|
||||
|
||||
<para>
|
||||
This file simply makes BitBake aware of the recipes and configuration directories.
|
||||
This file must exist so that the Yocto Project build system can recognize the BSP.
|
||||
This file must exist so that Poky can recognize the BSP.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
@@ -247,7 +240,7 @@ meta-<bsp_name>/conf/machine/*.conf
|
||||
|
||||
<para>
|
||||
The machine files bind together all the information contained elsewhere
|
||||
in the BSP into a format that the Yocto Project build system can understand.
|
||||
in the BSP into a format that Poky can understand.
|
||||
If the BSP supports multiple machines, multiple machine configuration files
|
||||
can be present.
|
||||
These filenames correspond to the values to which users have set the MACHINE variable.
|
||||
@@ -285,10 +278,10 @@ TARGET_CC_ARCH = "-m32 -march=core2 -msse3 -mtune=generic -mfpmath=sse"
|
||||
<para>
|
||||
The tune file would be included by the machine definition and can be
|
||||
contained in the BSP or referenced from one of the standard core set of
|
||||
files included with the Yocto Project.
|
||||
files included with Poky itself.
|
||||
</para>
|
||||
<para>
|
||||
Both the base package architecture file and the tune file are optional for a BSP layer.
|
||||
Both the base package architecture file and the tune file are optional for a Poky BSP layer.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
@@ -350,8 +343,8 @@ meta-<bsp_name>/recipes-kernel/linux/linux-yocto_git.bbappend
|
||||
This file appends your specific changes to the kernel you are using.
|
||||
</para>
|
||||
<para>
|
||||
For your BSP you typically want to use an existing Yocto Project kernel found in the
|
||||
Yocto Project repository at <filename class='directory'>meta/recipes-kernel/linux</filename>.
|
||||
For your BSP you typically want to use an existing Poky kernel found in the
|
||||
Poky repository at <filename class='directory'>meta/recipes-kernel/kernel</filename>.
|
||||
You can append your specific changes to the kernel recipe by using an append file,
|
||||
which is located in the
|
||||
<filename class='directory'>meta-<bsp_name>/recipes-kernel/linux</filename>
|
||||
@@ -382,7 +375,7 @@ KMACHINE_crownbay = "yocto/standard/crownbay"
|
||||
</programlisting>
|
||||
This append file adds "crownbay" as a compatible machine,
|
||||
and additionally sets a Yocto Kernel-specific variable that identifies the name of the
|
||||
BSP branch to use in the Git repository to find configuration information.
|
||||
BSP branch to use in the GIT repository to find configuration information.
|
||||
</para>
|
||||
<para>
|
||||
One thing missing in this particular BSP, which you will typically need when
|
||||
@@ -549,7 +542,7 @@ FILESEXTRAPATHS := "${THISDIR}/${PN}"
|
||||
upon the user a requirement to accept the terms of a
|
||||
'click-through' license.
|
||||
Once the license is accepted the
|
||||
Yocto Project build system can then build and include the
|
||||
Poky build system can then build and include the
|
||||
corresponding component in the final BSP image.
|
||||
Some affected components might be essential to the normal
|
||||
functioning of the system and have no 'free' replacement
|
||||
@@ -581,7 +574,7 @@ FILESEXTRAPATHS := "${THISDIR}/${PN}"
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Several methods exist within the Yocto Project build system to satisfy the licensing
|
||||
Several methods exist within the Poky build system to satisfy the licensing
|
||||
requirements for an encumbered BSP.
|
||||
The following list describes them in preferential order:
|
||||
</para>
|
||||
@@ -616,7 +609,7 @@ FILESEXTRAPATHS := "${THISDIR}/${PN}"
|
||||
</para>
|
||||
|
||||
<programlisting>
|
||||
$ BSPKEY_<keydomain>=<key> bitbake core-image-sato
|
||||
$ BSPKEY_<keydomain>=<key> bitbake poky-image-sato
|
||||
</programlisting>
|
||||
|
||||
<para>
|
||||
@@ -651,7 +644,7 @@ FILESEXTRAPATHS := "${THISDIR}/${PN}"
|
||||
These prompts usually take the form of instructions
|
||||
needed to manually fetch the encumbered package(s)
|
||||
and md5 sums into the required directory
|
||||
(e.g. the <filename>yocto/build/downloads</filename>).
|
||||
(e.g. the <filename>poky/build/downloads</filename>).
|
||||
Once the manual package fetch has been
|
||||
completed, restart the build to continue where
|
||||
it left off.
|
||||
|
||||
@@ -1,779 +0,0 @@
|
||||
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
|
||||
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
|
||||
|
||||
<chapter id='dev-manual-cases'>
|
||||
|
||||
<title>Development Cases</title>
|
||||
|
||||
<para>
|
||||
For the purposes of this manual we are going to focus on two common development cases or groupings:
|
||||
System Development and User Application Development.
|
||||
System Development covers Board Support Package (BSP) development and kernel image modification.
|
||||
User Application Development covers development of applications that you intend to run on some
|
||||
target hardware.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
[WRITERS NOTE: What is undetermined at this point is how much of the entire development process
|
||||
we include in this particular chapter.
|
||||
In other words, do we cover debugging and emulation steps here on a case-specific basis?
|
||||
Or, do we capture that information in the appropriate subsequent chapter by case?]
|
||||
</para>
|
||||
|
||||
<section id='system-development'>
|
||||
<title>System Development</title>
|
||||
|
||||
<para>
|
||||
System development involves modification or creation of an image that you want to run on
|
||||
a specific hardware target.
|
||||
Usually when you want to create an image that runs on embedded hardware the image does
|
||||
not require the same amount of features that a full-fledged Linux distribution provides.
|
||||
Thus, you can create a much smaller image that is designed to just use the hardware
|
||||
features for your particular hardware.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To help you understand how system development works in the Yocto Project, this section
|
||||
covers two types of image development: BSP creation and kernel modification.
|
||||
</para>
|
||||
|
||||
<section id='developing-a-board-support-package-bsp'>
|
||||
<title>Developing a Board Support Package (BSP)</title>
|
||||
|
||||
<para>
|
||||
A BSP is a package of recipes that when applied while building an image results in
|
||||
an image you can run on a particular board.
|
||||
Thus, the package, when compiled into the new image, supports the operation of the board.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Packages consist of recipes.
|
||||
Recipes are sets of instructions for building a package.
|
||||
The recipes describe where to get source code and what patches to apply.
|
||||
Recipes also describe dependencies for libraries or for other recipes.
|
||||
They also contain configuration and compilation options.
|
||||
Recipes are logical units of execution.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Here are the basic steps involved in creating a BSP:
|
||||
<orderedlist>
|
||||
<listitem><para>Be sure you are set up to use Yocto Project (see
|
||||
<xref linkend='dev-manual-start'>Getting Started with the Yocto Project</xref>).</para></listitem>
|
||||
<listitem><para>Choose a BSP available with Yocto Project that most closely represents
|
||||
your hardware.</para></listitem>
|
||||
<listitem><para>Get set up with a base BSP.</para></listitem>
|
||||
<listitem><para>Make a copy of the existing BSP and isolate your work by creating a layer
|
||||
for your recipes.</para></listitem>
|
||||
<listitem><para>Make configuration and recipe changes to your new BSP layer.</para></listitem>
|
||||
<listitem><para>Prepare for the build.</para></listitem>
|
||||
<listitem><para>Select and configure the kernel. (WRITER'S NOTE: Not sure on this step).</para></listitem>
|
||||
<listitem><para>Identify the machine branch in the Git repository.</para></listitem>
|
||||
<listitem><para>Build the image.</para></listitem>
|
||||
</orderedlist>
|
||||
You can view a video presentation of the BSP creation process
|
||||
<ulink url='http://free-electrons.com/blog/elc-2011-videos'>here</ulink>.
|
||||
You can also find supplemental information in the
|
||||
<ulink url='http://yoctoproject.org/docs/1.1/bsp-guide/bsp-guide.html'>
|
||||
Board Support Package (BSP) Development Guide</ulink>.
|
||||
Finally, there is wiki page write up of the example located
|
||||
<ulink url='https://wiki.yoctoproject.org/wiki/Transcript:_creating_one_generic_Atom_BSP_from_another'>
|
||||
here</ulink> you might find helpful.
|
||||
</para>
|
||||
|
||||
<section id='setting-up-yocto-project'>
|
||||
<title>Setting Up Yocto Project</title>
|
||||
|
||||
<para>
|
||||
For general host development system preparation such as package requirements and
|
||||
operating system requirements, see
|
||||
<xref linkend='dev-manual-start'>Getting Started with the Yocto Project</xref>)Chapter 2 of
|
||||
this manual or the
|
||||
<ulink url='http://www.yoctoproject.org/docs/1.1/yocto-project-qs/yocto-project-qs.html'>
|
||||
Yocto Project Quick Start</ulink>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You need to have the Yocto Project source tree available on your host system.
|
||||
You can get that through tarball extraction or by initializing and checking out the
|
||||
Yocto Project Git repository.
|
||||
Typically, checking out the Git repository is the method to use.
|
||||
This allows you to maintain a complete history of changes and facilitates you
|
||||
contributing back to the Yocto Project.
|
||||
However, if you just want the source you can download the Yocto Project Release
|
||||
tarball from the
|
||||
<ulink url='http://yoctoproject.org/download'>download page</ulink>.
|
||||
If you download the tarball you can extract it into any directory you want using the
|
||||
tar command.
|
||||
For example, the following commands extract the 1.0.1 release tarball into
|
||||
<filename>/usr/local/yocto</filename> with the Yocto Project source directory as
|
||||
<filename>poky.bernard.5.0.1.tar.bz2</filename>:
|
||||
<literallayout class='monospaced'>
|
||||
/usr/local/yocto$ tar xfj poky.bernard.5.0.1.tar.bz2
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The following transcript shows how to initialize a Git repository and checkout the
|
||||
Yocto Project source tree:
|
||||
<literallayout class='monospaced'>
|
||||
/usr/local/yocto$ git init
|
||||
Initialized empty Git repository in /usr/local/yocto/.git
|
||||
/usr/local/yocto$ git remote add poky git://git.yoctoproject.org/poky.git
|
||||
/usr/local/yocto$ git remote update
|
||||
Fetching poky
|
||||
remote: Counting objects: 106111, done.
|
||||
remote: Compressing objects: 100% (36106/36106), done.
|
||||
remote: Total 106111 (delta 72275), reused 99193 (delta 66808)
|
||||
Receiving objects: 100% (106111/106111), 69.51 MiB | 518 KiB/s, done.
|
||||
Resolving deltas: 100% (72275/72275), done.
|
||||
From git://git.yoctoproject.org/poky
|
||||
* [new branch] 1.1_M1 -> poky/1.1_M1
|
||||
* [new branch] 1.1_M2 -> poky/1.1_M2
|
||||
* [new branch] bernard -> poky/bernard
|
||||
* [new branch] blinky -> poky/blinky
|
||||
* [new branch] clyde -> poky/clyde
|
||||
* [new branch] elroy -> poky/elroy
|
||||
* [new branch] green -> poky/green
|
||||
* [new branch] laverne -> poky/laverne
|
||||
* [new branch] master -> poky/master
|
||||
* [new branch] pinky -> poky/pinky
|
||||
* [new branch] purple -> poky/purple
|
||||
* [new tag] 1.1_M1.final -> 1.1_M1.final
|
||||
* [new tag] 1.1_M2.rc1 -> 1.1_M2.rc1
|
||||
* [new tag] bernard-5.0.1 -> bernard-5.0.1
|
||||
* [new tag] pinky-3.1.2 -> pinky-3.1.2
|
||||
From git://git.yoctoproject.org/poky
|
||||
* [new tag] 1.1_M1.rc1 -> 1.1_M1.rc1
|
||||
* [new tag] 1.1_M1.rc2 -> 1.1_M1.rc2
|
||||
* [new tag] bernard-1.0rc1 -> bernard-1.0rc1
|
||||
* [new tag] bernard-5.0 -> bernard-5.0
|
||||
* [new tag] bernard-5.0-alpha -> bernard-5.0-alpha
|
||||
* [new tag] bernard-5.0rc1 -> bernard-5.0rc1
|
||||
* [new tag] bernard-5.0rc2 -> bernard-5.0rc2
|
||||
* [new tag] laverne-4.0 -> laverne-4.0
|
||||
* [new tag] laverne-4.0.1 -> laverne-4.0.1
|
||||
* [new tag] m4 -> m4
|
||||
* [new tag] purple-3.2 -> purple-3.2
|
||||
* [new tag] purple-3.2.1 -> purple-3.2.1
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Once you have the repository set up, you have many development branches from which
|
||||
you can work.
|
||||
For this example we are going to use the Yocto Project 1.0.1 Release,
|
||||
which maps to the <filename>Bernard 5.0.1</filename> tag in Git.
|
||||
<literallayout class='monospaced'>
|
||||
/usr/local/yocto$ git checkout -b Bernard-5.0.1 bernard-5.0.1
|
||||
Switched to a new branch 'bernard-5.0.1'
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='choosing-a-base-bsp'>
|
||||
<title>Choosing a Base BSP</title>
|
||||
|
||||
<para>
|
||||
The Yocto Project ships with several BSPs that support various hardware.
|
||||
It is best to base your new BSP on an existing BSP rather than create all the
|
||||
recipes and configuration files from scratch.
|
||||
While it is possible to create everything from scratch, basing your new BSP
|
||||
on something that is close is much easier.
|
||||
Or, at a minimum, it gives you some structure with which to start.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
At this point you need to understand your target hardware well enough to determine which
|
||||
existing BSP most closely matches it.
|
||||
Things to consider are your hardware’s on-board features such as CPU type and graphics support.
|
||||
You should look at the README files for supported BSPs to get an idea of which one
|
||||
you could use.
|
||||
A generic Atom-based BSP to consider is the Crown Bay with no Intel® Embedded Media
|
||||
Graphics Driver (EMGD) support.
|
||||
That is the BSP that this example is going to use.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To see the supported BSPs, go to the Yocto Project
|
||||
<ulink url='http://www.yoctoproject.org/download'>download page</ulink> and click on “BSP Downloads.”
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='getting-your-base-bsp'>
|
||||
<title>Getting Your Base BSP</title>
|
||||
|
||||
<para>
|
||||
You need to have the base BSP layer on your development system.
|
||||
Like the Yocto Project source tree you can get the BSP layer one of two ways:
|
||||
download the tarball and extract it, or initialize a Git repository and check out the BSP.
|
||||
You should use the same method that you used for the Yocto Project source tree.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If you are using tarball extraction then simply download the tarball for the base
|
||||
BSP you chose in the previous step and then extract it into any directory
|
||||
you choose using the tar command.
|
||||
Upon extraction, the BSP source directory (layer) will be named
|
||||
<filename>meta-<BSP_name></filename>.
|
||||
The following command extracts the Crown Bay BSP into a directory named
|
||||
<filename>meta-crownbay</filename>:
|
||||
<literallayout class='monospaced'>
|
||||
/usr/local$ tar xjf crownbay-noemgd-bernard-5.0.1.tar.bz2
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If you initialized a Yocto Project Git repository then you need to do the same for the
|
||||
BSP, which is located in the meta-intel Git repository.
|
||||
The meta-intel repository contains all the metadata that supports BSP creation.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The following transcript shows the steps to create and set up the meta-intel Git
|
||||
repository inside the Yocto Project Git repository:
|
||||
<literallayout class='monospaced'>
|
||||
/usr/local/yocto$ mkdir meta-intel
|
||||
/usr/local/yocto$ cd meta-intel
|
||||
/usr/local/yocto/meta-intel$ git init
|
||||
Initialized empty Git repository in /usr/local/yocto/meta-intel/.git/
|
||||
/usr/local/yocto/meta-intel$ git remote add meta-intel \ git://git.yoctoproject.org/meta-intel.git
|
||||
/usr/local/yocto/meta-intel$ git remote update
|
||||
Fetching meta-intel
|
||||
remote: Counting objects: 1240, done.
|
||||
remote: Compressing objects: 100% (1008/1008), done.
|
||||
remote: Total 1240 (delta 513), reused 85 (delta 27)
|
||||
Receiving objects: 100% (1240/1240), 1.55 MiB | 510 KiB/s, done.
|
||||
Resolving deltas: 100% (513/513), done.
|
||||
From git://git.yoctoproject.org/meta-intel
|
||||
* [new branch] 1.1_M1 -> meta-intel/1.1_M1
|
||||
* [new branch] 1.1_M2 -> meta-intel/1.1_M2
|
||||
* [new branch] bernard -> meta-intel/bernard
|
||||
* [new branch] dvhart/n450 -> meta-intel/dvhart/n450
|
||||
* [new branch] laverne -> meta-intel/laverne
|
||||
* [new branch] master -> meta-intel/master
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Once you have the repository set up, you have many development branches from
|
||||
which you can work.
|
||||
For this example we are going to use Bernard 5.0.
|
||||
<literallayout class='monospaced'>
|
||||
/usr/local/yocto/meta-intel$ git checkout -b Bernard-5.0.1 meta-intel/bernard
|
||||
Branch Bernard-5.0.1 set up to track remote branch bernard from meta-intel.
|
||||
Switched to a new branch 'bernard-5.0.1'
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='making-a-copy-of-the-base bsp-to-create-your-new-bsp-layer'>
|
||||
<title>Making a Copy of the Base BSP to Create Your New BSP Layer</title>
|
||||
|
||||
<para>
|
||||
Now that you have the Yocto Project and base BSP source you need to create a
|
||||
new layer for your BSP.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Layers are ideal for isolating and storing work for a given piece of hardware.
|
||||
A layer is really just a location or area in which you place the recipes for your BSP.
|
||||
In fact, a BSP is, in itself, a special type of layer.
|
||||
Consider an application as another example that illustrates a layer.
|
||||
Suppose you are creating an application that has library or other dependencies in
|
||||
order for it to compile and run.
|
||||
The layer, in this case, would be where all the recipes that define those dependencies
|
||||
are kept. The key point for a layer is that it is an isolated area that contains
|
||||
all the relevant information for the project that the Yocto Project build system knows about.
|
||||
</para>
|
||||
|
||||
<note>
|
||||
The Yocto Project supports four BSPs that are part of the
|
||||
Yocto Project release: <filename>atom-pc</filename>, <filename>beagleboard</filename>,
|
||||
<filename>mpc8315e</filename>, and <filename>routerstationpro</filename>.
|
||||
The recipes and configurations for these four BSPs are located and dispersed
|
||||
within <filename>meta</filename>, which can be found in the Yocto Project source directory.
|
||||
Consequently, they are not totally isolated in the spirit of layers unless you think
|
||||
of <filename>meta</filename> as a layer itself.
|
||||
On the other hand, the Yocto Project has isolated BSP layers within
|
||||
<filename>meta-intel</filename> for the Crown Bay, Emenlow, Jasper Forest, N450, and
|
||||
Sugar Bay.
|
||||
[WRITER'S NOTE: <filename>meta-yocto</filename>, <filename>meta</filename>, and
|
||||
<filename>meta-intel</filename> need some explanation.
|
||||
Not sure about the relationship of meta-yocto as compared to meta-intel.]
|
||||
</note>
|
||||
|
||||
<para>
|
||||
When you set up a layer for a new BSP you should follow a standard layout.
|
||||
This layout is described in
|
||||
<ulink url='http://www.yoctoproject.org/docs/1.1/bsp-guide/bsp-guide.html#bsp-filelayout'>
|
||||
Example Filesystem Layout</ulink> section of the Board Support Package (BSP) Development
|
||||
Guide.
|
||||
In the standard layout you will notice a suggested hierarchy for BSP kernel recipes,
|
||||
graphics recipes, and configuration information.
|
||||
You can see the standard layout for the Crown Bay BSP in this example by examining the
|
||||
directory structure of <filename>meta-crownbay</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To create your BSP layer you simply copy the <filename>meta-crownbay</filename>
|
||||
layer to a new layer.
|
||||
For this example the new layer is named <filename>meta-mymachine</filename>.
|
||||
The name must follow the BSP layer naming convention, which is
|
||||
<filename>meta-<name></filename>.
|
||||
The following example assumes a meta-intel Git repository.
|
||||
If you downloaded and expanded a Crown Bay tarball then you simply copy the resulting
|
||||
<filename>meta-crownbay</filename> directory structure to a location of your choice:
|
||||
<literallayout class='monospaced'>
|
||||
/usr/local/yocto/meta-intel$ cp -a meta-crownbay/ meta-mymachine
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='making-changes-to-your-bsp'>
|
||||
<title>Making Changes to Your BSP</title>
|
||||
|
||||
<para>
|
||||
Right now you have two identical BSP layers with different names:
|
||||
<filename>meta-crownbay</filename> and <filename>meta-mymachine</filename>.
|
||||
You need to change your configurations so that they work for your new BSP and
|
||||
your particular hardware.
|
||||
We will look first at the configurations, which are all done in the layer’s
|
||||
<filename>conf</filename> directory.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
First, since in this example the new BSP will not support EMGD we will get rid of the
|
||||
<filename>crownbay.conf</filename> file and then rename the
|
||||
<filename>crownbay-noemgd.conf</filename> file to <filename>mymachine.conf</filename>.
|
||||
Much of what we do in the configuration directory is designed to help the Yocto Project
|
||||
build system work with the new layer and to be able to find and use the right software.
|
||||
The following two commands result in a single machine configuration file named
|
||||
<filename>mymachine.conf</filename>.
|
||||
<literallayout class='monospaced'>
|
||||
/usr/local/yocto/meta-intel$ rm meta-mymachine/conf/machine/crownbay.conf
|
||||
/usr/local/yocto/meta-intel$ mv meta-mymachine/conf/machine/crownbay-noemgd.conf \
|
||||
meta-mymachine/conf/machine/mymachine.conf
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The next step makes changes to <filename>mymachine.conf</filename> itself.
|
||||
The only changes needed for this example are changes to the comment lines and to the
|
||||
Source Revision (<filename>SRCREV</filename>) lines at the bottom of the file.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For the comments the string <filename>crownbay-noemgd</filename> needs to be changed to
|
||||
<filename>mymachine</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To understand how to complete the changes to the <filename>SRCREV</filename>
|
||||
statements we need to know which kernel we are using.
|
||||
The <filename>PREFERRED_PROVIDER_virtual/kernel</filename> statement in the file specifies
|
||||
the kernel we are going to use.
|
||||
We are going to use <filename>linux-yocto-stable</filename>.
|
||||
The <filename>SRCREV</filename> statement pairs point to the exact machine branch
|
||||
(commit) and <filename>meta</filename> branch in the Git repository.
|
||||
Right now the <filename>SRCREV</filename> variables are as follows in
|
||||
<filename>mymachine.conf</filename>:
|
||||
<literallayout class='monospaced'>
|
||||
SRCREV_machine_pn-linux-yocto_crownbay-noemgd ?= \ "56fe215d3f1a2cc3a5a26482ac9809ba44495695"
|
||||
SRCREV_meta_pn-linux-yocto_crownbay-noemgd ?= \ "e1f85a470934a0cf6abde5d95533e74501822c6b"
|
||||
|
||||
SRCREV_machine_pn-linux-yocto-stable_crownbay-noemgd ?= \ "56fe215d3f1a2cc3a5a26482ac9809ba44495695"
|
||||
SRCREV_meta_pn-linux-yocto-stable_crownbay-noemgd ?= \ "e1f85a470934a0cf6abde5d95533e74501822c6b"
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You will notice that there are two pairs of <filename>SRCREV</filename> statements.
|
||||
The first pair points to a current development kernel, which we don’t care about
|
||||
in this example.
|
||||
The bottom pair points to the stable kernel that we will use:
|
||||
<filename>linux-yocto-stable</filename>.
|
||||
At this point though, the unique commit strings all are still associated with
|
||||
Crown Bay.
|
||||
So the next changes we make to the configuration file gets rid of the pair that points
|
||||
to the development kernel and provides new commit strings that points to the
|
||||
<filename>atom-pc-standard</filename>, which we are choosing for the initial build of this BSP.
|
||||
Here are the final <filename>SRCREV</filename> statements:
|
||||
<literallayout class='monospaced'>
|
||||
SRCREV_machine_pn-linux-yocto-stable_mymachine ?= \ "72ca49ab08b8eb475cec82a10049503602325791"
|
||||
SRCREV_meta_pn-linux-yocto-stable_mymachine ?= \ "ec26387cb168e9e0976999b528b5a9dd62e3157a"
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If you are familiar with Git repositories you probably won’t have trouble locating the
|
||||
exact commit strings you need to change the <filename>SRCREV</filename> statements.
|
||||
You can find all the <filename>machine</filename> and <filename>meta</filename>
|
||||
branch points (commits) for the <filename>linux-yocto-2.6.34</filename> kernel
|
||||
<ulink url='http://git.yoctoproject.org/cgit/cgit.cgi/linux-yocto-2.6.34'>here</ulink>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If you need a little more assistance after going to the link then do the following:
|
||||
<orderedlist>
|
||||
<listitem><para>Expand the list of branches by clicking <filename>[…]</filename></para></listitem>
|
||||
<listitem><para>Click on the <filename>atom-pc-standard</filename> branch</para></listitem>
|
||||
<listitem><para>Click on the commit column header to view the top commit</para></listitem>
|
||||
<listitem><para>Copy the commit string for use in the <filename>mymachine.conf</filename>
|
||||
file</para></listitem>
|
||||
</orderedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For the <filename>SRCREV</filename> statement that points to the <filename>meta</filename>
|
||||
branch use the same procedure except expand the <filename>wrs_meta</filename>
|
||||
branch in step 2 above.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The next configuration file in the new BSP layer we need to edit is <filename>layer.conf</filename>.
|
||||
This file identifies build information needed for the new layer.
|
||||
You can see the
|
||||
<ulink url='http://www.yoctoproject.org/docs/1.1/bsp-guide/bsp-guide.html#bsp-filelayout-layer'>
|
||||
Layer Configuration File</ulink> section in the Board Support Packages (BSP) Development Guide
|
||||
for more information on this configuration file.
|
||||
Basically, we are removing statements that support EMGD and changing the ones that support no EMGD.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
First, remove these statements from the file:
|
||||
<literallayout class='monospaced'>
|
||||
BBFILE_COLLECTIONS_crownbayd += "crownbay"
|
||||
BBFILE_PATTERN_crownbay := "^${LAYERDIR}/"
|
||||
BBFILE_PRIORITY_crownbay = "6"
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This leaves three similar statements that we care about:
|
||||
<literallayout class='monospaced'>
|
||||
BBFILE_COLLECTIONS_crownbay-noemgd += "crownbay-noemgd"
|
||||
BBFILE_PATTERN_crownbay-noemgd := "^${LAYERDIR}/"
|
||||
BBFILE_PRIORITY_crownbay-noemgd = "6"
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Simply substitute the machine string name <filename>crownbay-noemgd</filename>
|
||||
with the new machine name <filename>mymachine</filename> to get the following:
|
||||
<literallayout class='monospaced'>
|
||||
BBFILE_COLLECTIONS_mymachine += "mymachine"
|
||||
BBFILE_PATTERN_mymachine := "^${LAYERDIR}/"
|
||||
BBFILE_PRIORITY_mymachine = "6"
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Now we will take a look at the recipes in your new layer.
|
||||
The standard BSP structure has areas for BSP, graphics, and kernel recipes.
|
||||
When you create a BSP you use these areas for appropriate recipes and append files.
|
||||
Recipes take the form of <filename>.bb</filename> files.
|
||||
If you want to leverage off of existing recipes elsewhere in the Yocto Project
|
||||
source tree but change them you can use <filename>.bbappend</filename> files.
|
||||
All new recipes and append files for your layer go in the layer’s
|
||||
<filename>recipes-bsp</filename>, <filename>recipes-kernel</filename>, and
|
||||
<filename>recipes-graphics</filename> directories.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For this example we are not adding any new BSP recipes.
|
||||
And, we only need to remove the formfactor we do not want and change the name of
|
||||
the remaining one that supports no EMGD.
|
||||
These commands take care of the new layer’s BSP recipes:
|
||||
<literallayout class='monospaced'>
|
||||
/usr/local/yocto/meta-intel$ rm -rf \
|
||||
meta-mymachine/recipes-bsp/formfactor/formfactor/crownbay
|
||||
/usr/local/yocto/meta-intel$ mv \
|
||||
meta-mymachine/recipes-bsp/formfactor/formfactor/crownbay-noemgd/ \
|
||||
meta-mymachine/recipes-bsp/formfactor/formfactor/mymachine
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For this example we want to remove anything that supports EMGD.
|
||||
The following command cleans up the <filename>recipes-graphics</filename> directory:
|
||||
<literallayout class='monospaced'>
|
||||
/usr/local/yocto/meta-intel$ rm –rf \
|
||||
meta-mymachine/recipes-graphics/xorg-xserver/xserver-xf86-emgd*
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
At this point the <filename>recipes-graphics</filename> directory just has files that
|
||||
support Video Electronics Standards Association (VESA) graphics modes.
|
||||
However, we still need to rename a directory in the layer.
|
||||
This command applies the final change to the <filename>recipes-graphics</filename> directory:
|
||||
<literallayout class='monospaced'>
|
||||
/usr/local/yocto/meta-intel$ mv \
|
||||
meta-mymachine/recipes-graphics/xorg-xserver/xserver-xf86-config/crownbay-noemgd \
|
||||
meta-mymachine/recipes-graphics/xorg-xserver/xserver-xf86-config/mymachine
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Finally, let us look at the <filename>recipes-kernel</filename> directory in the example.
|
||||
The only file we are concerned with for the example is
|
||||
<filename>linux-yocto-stable_git.bbappend</filename>.
|
||||
The other files all support the EMGD feature of Crown Bay.
|
||||
These commands clean up the directory:
|
||||
<literallayout class='monospaced'>
|
||||
/usr/local/yocto/meta-intel$ rm –rf meta-mymachine/recipes-kernel/linux/linux-yocto
|
||||
/usr/local/yocto/meta-intel$ rm –rf \
|
||||
meta-mymachine/recipes-kernel/linux/linux-yocto-stable
|
||||
/usr/local/yocto/meta-intel$ rm \
|
||||
meta-mymachine/recipes-kernel/linux/linux-yocto_git.bbappend
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The <filename>linux-yocto-stable_git.bbappend</filename> file appends a Yocto Project
|
||||
recipe having the same name.
|
||||
The changes we need to make are to remove the statements that support EMGD
|
||||
and change the remaining Crown Bay strings to be <filename>mymachine</filename>.
|
||||
We also do not need to include the pointer to the EMGD licenses patch at the end of
|
||||
the file.
|
||||
Here is the original file:
|
||||
<literallayout class='monospaced'>
|
||||
FILESEXTRAPATHS := “${THISDIR}/${PN}”
|
||||
COMPATIBLE_MACHINE_crownbay = “crownbay”
|
||||
KMACHINE_CROWNBAY = “CROWNBAY”
|
||||
COMPATIBLE_MACHINE_crownbay-noemgd = “crownbay-noemgd”
|
||||
KMACHINE_crownbay-noemgd = “crownbay”
|
||||
SRC_URI += file://0001-crownbay-update-a-handful-of-EMGD-licenses.patch
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
After editing the file it looks like this:
|
||||
<literallayout class='monospaced'>
|
||||
FILESEXTRAPATHS := “${THISDIR}/${PN}”
|
||||
COMPATIBLE_MACHINE_mymachine = “mymachine”
|
||||
KMACHINE_mymachine = “mymachine
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
In summary, the edits to the layer’s recipe files result in removal of any files and
|
||||
statements that do not support your targeted hardware in addition to the inclusion
|
||||
of any new recipes you might need.
|
||||
In this example, it was simply a matter of ridding the new layer <filename>meta-machine</filename>
|
||||
of any code that supported the EMGD features.
|
||||
We did not introduce any new recipes to the layer.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Finally, it is also important to update the layer’s <filename>README</filename>
|
||||
file so that the information in it reflects your BSP.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='preparing-for-the-build'>
|
||||
<title>Preparing for the Build</title>
|
||||
|
||||
<para>
|
||||
Once you have made all the changes to your BSP layer there remains a few things
|
||||
you need to do for the Yocto Project build system in order for it to create your image.
|
||||
You need to get the build environment ready by sourcing an environment setup script
|
||||
and you need to be sure two key configuration files are configured appropriately.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The entire process for building an image is overviewed in the
|
||||
<ulink url='http://www.yoctoproject.org/docs/1.1/yocto-project-qs/yocto-project-qs.html#building-image'>
|
||||
Building an Image</ulink> section of the Yocto Project Quick Start.
|
||||
You might want to reference this information.
|
||||
The remainder of this section will apply to our example of the <filename>meta-mymachine</filename> layer.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To get ready to build your new layer you need to do the following:
|
||||
<orderedlist>
|
||||
<listitem><para>Get the environment ready for the build by sourcing the environment
|
||||
script.
|
||||
The environment script is in the Yocto Project source directory and has the string
|
||||
<filename>init-build-env</filename> in the file’s name.
|
||||
For this example, the following command gets the build environment ready:
|
||||
<literallayout class='monospaced'>
|
||||
/usr/local/yocto$ source oe-init-build-env yocto-build
|
||||
</literallayout>
|
||||
When you source the script a build directory is created in the current
|
||||
working directory.
|
||||
In our example we were in the Yocto Project source directory.
|
||||
Thus, entering the previous command created the <filename>yocto-build</filename> directory.
|
||||
If you do not provide a name for the build directory it defaults to build.
|
||||
The build directory contains a <filename>conf</filename> directory that contains
|
||||
two configuration files you will need to check: <filename>bblayers.conf</filename>
|
||||
and <filename>local.conf</filename>.</para></listitem>
|
||||
<listitem><para>Check and edit the resulting <filename>local.conf</filename> file.
|
||||
This file minimally identifies the machine for which to build the image by
|
||||
configuring the <filename>MACHINE</filename> variable.
|
||||
For this example you must set the variable to mymachine as follows:
|
||||
<literallayout class='monospaced'>
|
||||
MACHINE ??= “mymachine”
|
||||
</literallayout>
|
||||
You should also be sure any other variables in which you are interested are set.
|
||||
Some variables to consider are <filename>BB_NUMBER_THREADS</filename>
|
||||
and <filename>PARALLEL_MAKE</filename>, both of which can greatly reduce your build time
|
||||
if you are using a multi-threaded development system (e.g. values of
|
||||
<filename>8</filename> and <filename>j 6</filename>, respectively are optimal
|
||||
for a development machine that has four available cores).</para></listitem>
|
||||
<listitem><para>Update the <filename>bblayers.conf</filename> file so that it includes
|
||||
the path to your new BSP layer.
|
||||
In this example you need to include the pathname to <filename>meta-mymachine</filename>.
|
||||
For example, if you created a Yocto Project Git repository named
|
||||
<filename>yocto</filename> in <filename>/usr/local</filename> then the
|
||||
<filename>BBLAYERS</filename> variable in the file would need to include the following path:
|
||||
<literallayout class='monospaced'>
|
||||
/usr/local/yocto/meta-intel/meta-mymachine
|
||||
</literallayout></para></listitem>
|
||||
</orderedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The appendix
|
||||
<ulink url='http://www.yoctoproject.org/docs/1.1/poky-ref-manual/poky-ref-manual.html#ref-variables-glos'>
|
||||
Reference: Variables Glossary</ulink> in the Yocto Project Reference Manual has more information
|
||||
on configuration variables.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='building-the-image'>
|
||||
<title>Building the Image</title>
|
||||
|
||||
<para>
|
||||
The Yocto Project uses the BitBake tool to build images based on the type of image
|
||||
you want to create.
|
||||
You can find more information on BitBake
|
||||
<ulink url='http://bitbake.berlios.de/manual/'>here</ulink>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The build process supports several types of images to satisfy different needs.
|
||||
When you issue the BitBake command you provide a “top-level” recipe that essentially
|
||||
starts the process off of building the type of image you want.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You can find these recipes in the <filename>meta/recipes-core/images</filename> and
|
||||
<filename>meta/recipes-sato/images</filename> directories of the Yocto Project source
|
||||
tree or Git repository.
|
||||
Although the recipe names are somewhat explanatory, here is a list that describes them:
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis>Base</emphasis> – A foundational basic image without support
|
||||
for X that can be reasonably used for customization.</para></listitem>
|
||||
<listitem><para><emphasis>Core</emphasis> – A foundational basic image with support for
|
||||
X that can be reasonably used for customization.</para></listitem>
|
||||
<listitem><para><emphasis>Direct Disk</emphasis> – An image that you can copy directory to
|
||||
the disk of the target device.</para></listitem>
|
||||
<listitem><para><emphasis>Live</emphasis> – An image you can run from a USB device or from
|
||||
a CD without having to first install something.</para></listitem>
|
||||
<listitem><para><emphasis>Minimal</emphasis> – A small image without a GUI.
|
||||
This image is not much more than a kernel with a shell.</para></listitem>
|
||||
<listitem><para><emphasis>Minimal Development</emphasis> – A Minimal image suitable for
|
||||
development work.</para></listitem>
|
||||
<listitem><para><emphasis>Minimal Direct Disk</emphasis> – A Minimal Direct Disk image.</para></listitem>
|
||||
<listitem><para><emphasis>Minimal RAM-based Initial Root Filesystem</emphasis> – A minimal image
|
||||
that has the <filename>initramfs</filename> as part of the kernel, which allows the
|
||||
system to find the first “init” program more efficiently.</para></listitem>
|
||||
<listitem><para><emphasis>Minimal Live</emphasis> – A Minimal Live image.</para></listitem>
|
||||
<listitem><para><emphasis>Minimal MTD Utilities</emphasis> – A minimal image that has support
|
||||
for the MTD utilities, which let the user interact with the MTD subsystem in
|
||||
the kernel to perform operations on flash devices.</para></listitem>
|
||||
<listitem><para><emphasis>Sato</emphasis> – An image with Sato support, a mobile environment
|
||||
and visual style that works well with mobile devices.</para></listitem>
|
||||
<listitem><para><emphasis>Sato Development</emphasis> – A Sato image suitable for
|
||||
development work.</para></listitem>
|
||||
<listitem><para><emphasis>Sato Direct Disk</emphasis> – A Sato Direct Disk image.</para></listitem>
|
||||
<listitem><para><emphasis>Sato Live</emphasis> – A Sato Live image.</para></listitem>
|
||||
<listitem><para><emphasis>Sato SDK</emphasis> – A Sato image that includes the Yocto Project
|
||||
toolchain and development libraries.</para></listitem>
|
||||
<listitem><para><emphasis>Sato SDK Direct Disk</emphasis> – A Sato SDK Direct
|
||||
Disk image.</para></listitem>
|
||||
<listitem><para><emphasis>Sato SDK Live</emphasis> – A Sato SDK Live image.</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The remainder of this section applies to our example of the <filename>meta-mymachine</filename> layer.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To build the image for our <filename>meta-mymachine</filename> BSP enter the following command
|
||||
from the same shell from which you ran the setup script.
|
||||
You should run the <filename>bitbake</filename> command without any intervening shell commands.
|
||||
For example, moving your working directory around could cause problems.
|
||||
Here is the command for this example:
|
||||
<literallayout class='monospaced'>
|
||||
/usr/local/yocto/yocto-build$ bitbake –k poky-image-sato-live
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This command requests an image that has Sato support and can be run from a USB device or
|
||||
from a CD without having to first install anything.
|
||||
The build process takes significant time and includes thousands of tasks, which are reported
|
||||
at the console.
|
||||
If the build results in any type of error you should check for misspellings in the
|
||||
files you changed or problems with your host development environment such as missing packages.
|
||||
</para>
|
||||
</section>
|
||||
</section>
|
||||
|
||||
<section id='modifying-a-kernel'>
|
||||
<title>Modifying a Kernel</title>
|
||||
|
||||
<para>
|
||||
[WRITER'S NOTE: This section is a second example that focuses on just modifying the kernel.
|
||||
I don't have any information on this yet.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Here are some points to consider though:
|
||||
<itemizedlist>
|
||||
<listitem><para>Reference Darren's presentation
|
||||
<ulink url='http://events.linuxfoundation.org/events/embedded-linux-conference/hart'>
|
||||
here</ulink></para></listitem>
|
||||
<listitem><para>Reference <xref linkend='dev-manual-start'>Getting Started with the Yocto Project</xref>
|
||||
section to get set up at minimum.</para></listitem>
|
||||
<listitem><para>Are there extra steps I need specific to kernel development to get started?</para></listitem>
|
||||
<listitem><para>What do I do to get set up?
|
||||
Is it a matter of just installing YP and having some pieces together?
|
||||
What are the pieces?</para></listitem>
|
||||
<listitem><para>Where do I get the base kernel to start with?</para></listitem>
|
||||
<listitem><para>Do I install the appropriate toolchain?</para></listitem>
|
||||
<listitem><para>What kernel git repository do I use?</para></listitem>
|
||||
<listitem><para>What is the conversion script?
|
||||
What does it do?</para></listitem>
|
||||
<listitem><para>What do I have to do to integrate the kernel layer?</para></listitem>
|
||||
<listitem><para>What do I use to integrate the kernel layer?
|
||||
HOB?
|
||||
Do I just Bitbake it?</para></listitem>
|
||||
<listitem><para>Using the System Image Creator.]</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
</section>
|
||||
|
||||
<section id='user-application-development'>
|
||||
<title>User Application Development</title>
|
||||
|
||||
<para>
|
||||
[WRITER'S NOTE: This section is the second major development case - developing an application.
|
||||
Here are points to consider:
|
||||
<itemizedlist>
|
||||
<listitem><para>User-space Application Development scenario overview.</para></listitem>
|
||||
<listitem><para>Using the Yocto Eclipse Plug-in.</para></listitem>
|
||||
<listitem><para>Back-door support.</para></listitem>
|
||||
<listitem><para>I feel there is more to this area than we have captured during our two review meetings.]</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
</chapter>
|
||||
<!--
|
||||
vim: expandtab tw=80 ts=4
|
||||
-->
|
||||
@@ -1,8 +0,0 @@
|
||||
<?xml version='1.0'?>
|
||||
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns="http://www.w3.org/1999/xhtml" xmlns:fo="http://www.w3.org/1999/XSL/Format" version="1.0">
|
||||
|
||||
<xsl:import href="http://docbook.sourceforge.net/release/xsl/current/xhtml/docbook.xsl" />
|
||||
|
||||
<!-- <xsl:param name="generate.toc" select="'article nop'"></xsl:param> -->
|
||||
|
||||
</xsl:stylesheet>
|
||||
@@ -1,128 +0,0 @@
|
||||
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
|
||||
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
|
||||
|
||||
<chapter id='dev-manual-intro'>
|
||||
|
||||
<title>The Yocto Project Development Manual</title>
|
||||
|
||||
<para>
|
||||
WRITER NOTE: The goal of this manual is to provide an over-arching development guide for using the Yocto Project.
|
||||
The intent is to give the reader the “big picture” around development.
|
||||
Much of the information in the manual will be detailed in other manuals.
|
||||
For example, detailed information on Git, repositories and open-source in general can be found in many places.
|
||||
Another example is getting set up to use the Yocto Project, which our Yocto Project Quick Start covers.
|
||||
However, this manual needs to at least address it.
|
||||
One might ask “What becomes of the Poky Reference Manual?”
|
||||
This manual, over time, needs to develop into a pure reference manual where all procedural information
|
||||
eventually ends up in an appropriate guide.
|
||||
A good example of information perfect for the Poky Reference Manual is the appendix on variable
|
||||
definitions (glossary).
|
||||
</para>
|
||||
|
||||
<section id='intro'>
|
||||
<title>Introduction</title>
|
||||
|
||||
<para>
|
||||
Welcome to the Yocto Project Development Guide!
|
||||
This guide provides an over-arching view of the development process within the Yocto Project.
|
||||
This guide is just that – a guide.
|
||||
It helps you understand the bigger picture involving development using the Yocto Project.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='what-this-manual-provides'>
|
||||
<title>What this Manual Provides</title>
|
||||
|
||||
<para>
|
||||
The following list describes what you can get from this guide:
|
||||
<itemizedlist>
|
||||
<listitem><para>A general idea of and references to information that lets you get set
|
||||
up to develop using the Yocto Project.</para></listitem>
|
||||
<listitem><para>Information to help developers that are new to the open source environment
|
||||
and to the distributed revision control system Git, which the Yocto Project
|
||||
uses.</para></listitem>
|
||||
<listitem><para>An understanding of common end-to-end development models.</para></listitem>
|
||||
<listitem><para>Development case overviews for both system development and user-space
|
||||
applications.</para></listitem>
|
||||
<listitem><para>An overview and understanding of the emulation environment used with
|
||||
the Yocto Project (QEMU).</para></listitem>
|
||||
<listitem><para>A discussion of target-level analysis techniques, tools, tips,
|
||||
and tricks.</para></listitem>
|
||||
<listitem><para>Considerations for deploying your final product.</para></listitem>
|
||||
<listitem><para>An understanding of basic kernel architecture and
|
||||
concepts.</para></listitem>
|
||||
<listitem><para>Information that will help you migrate an existing project to the
|
||||
Yocto Project development environment.</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='what-this-manual-does-not-provide'>
|
||||
<title>What this Manual Does Not Provide</title>
|
||||
|
||||
<para>
|
||||
This manual will not give you the following:
|
||||
<itemizedlist>
|
||||
<listitem><para>Step-by-step instructions when these instructions exist in other Yocto
|
||||
Project documentation.
|
||||
For example, The Application Development Toolkit (ADT) User’s Guide contains detailed
|
||||
instruction on how to obtain and configure the Eclipse Yocto Plug-in.</para></listitem>
|
||||
<listitem><para>Reference material.
|
||||
This type of material resides in an appropriate reference manual.
|
||||
For example, system variables are documented in the Poky Reference Manual.</para></listitem>
|
||||
<listitem><para>Detailed public information that is not specific to the Yocto Project.
|
||||
For example, exhaustive information on how to use Git is better covered in the public
|
||||
domain than in this manual.</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='other-information'>
|
||||
<title>Other Information</title>
|
||||
|
||||
<para>
|
||||
Because this manual presents overview information for many different topics, you will
|
||||
need to supplement it with other information.
|
||||
The following list presents other sources of information you might find helpful:
|
||||
<itemizedlist>
|
||||
<listitem><para>The <ulink url='http://www.yoctoproject.org'>Yocto Project Website</ulink> - The
|
||||
home page for the Yocto Project
|
||||
provides lots of information on the project as well as links to software
|
||||
and documentation.</para></listitem>
|
||||
<listitem><para>The <ulink url='http://www.yoctoproject.org/docs/1.1/yocto-project-qs/yocto-project-qs.html'>
|
||||
Yocto Project Quick Start</ulink> - This short document lets you get started
|
||||
with the Yocto Project quickly and start building an image.</para></listitem>
|
||||
<listitem><para>The <ulink url='http://www.yoctoproject.org/docs/1.1/poky-ref-manual/poky-ref-manual.html'>
|
||||
Yocto Project Reference Manual</ulink> - This manual is the complete reference
|
||||
guide to the Yocto Project build component.
|
||||
The manual also contains a reference chapter on Board Support Package (BSP)
|
||||
layout.</para></listitem>
|
||||
<listitem><para><ulink url='http://www.yoctoproject.org/docs/1.1/adt-manual/adt-manual.html'>
|
||||
Application Development Toolkit (ADT) User's Guide</ulink> - This guide provides
|
||||
information that lets you get going with the ADT to develop projects using the
|
||||
Yocto Project.</para></listitem>
|
||||
<listitem><para><ulink url='http://www.yoctoproject.org/docs/1.1/bsp-guide/bsp-guide.html'>
|
||||
Board Support Package (BSP) Developer's Guide</ulink> - This guide defines the
|
||||
structure for BSP components.
|
||||
Having a commonly understood structure encourages standardization.</para></listitem>
|
||||
<listitem><para><ulink url='http://www.yoctoproject.org/docs/1.1/kernel-manual/kernel-manual.html'>
|
||||
Yocto Project Kernel Architecture and Use Manual</ulink> - This manual
|
||||
describes the architecture of the Yocto Project kernel and provides some work flow
|
||||
examples.</para></listitem>
|
||||
<listitem><para><ulink url='http://www.youtube.com/watch?v=3ZlOu-gLsh0'>
|
||||
Yocto Eclipse Plug-in</ulink> - A step-by-step instructional video that
|
||||
demonstrates how an application developer uses Yocto Plug-in features within
|
||||
the Eclipse IDE.</para></listitem>
|
||||
<listitem><para><ulink url='http://wiki.yoctoproject.org/wiki/FAQ'>FAQ</ulink> - A
|
||||
list of commonly asked questions and their answers.</para></listitem>
|
||||
<listitem><para><ulink url='http://www.yoctoproject.org/download/yocto/yocto-project-1.0-release-notes-poky-5.0'>
|
||||
Release Notes</ulink> - Features, updates and known issues for the current
|
||||
release of the Yocto Project.</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
</chapter>
|
||||
<!--
|
||||
vim: expandtab tw=80 ts=4
|
||||
-->
|
||||
@@ -1,45 +0,0 @@
|
||||
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
|
||||
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
|
||||
|
||||
<chapter id='dev-manual-model'>
|
||||
|
||||
<title>Common Development Models</title>
|
||||
|
||||
<para>
|
||||
[WRITERS NOTE: This chapter presents common development models within the Yocto Project.
|
||||
Reading this chapter will give the user a feel for the overall development process.
|
||||
The chapter will follow the framework for the manual.
|
||||
The team decided to present a single development model and not to try and represent all the
|
||||
various possibilities that might exist.
|
||||
The chapter will include an over-arching diagram that shows a simple, most-common development model.
|
||||
The diagram will consist of boxes that represent high-level areas of the development process.
|
||||
For example, a box for “Setting Up” will be in the model.
|
||||
A box for “Debugging” will exist.
|
||||
The diagram needs to account for the two use-cases we are going to showcase
|
||||
(system development and application development)].
|
||||
</para>
|
||||
|
||||
<section id='place-holder-section-one'>
|
||||
<title>Place-Holder Section One</title>
|
||||
|
||||
<para>
|
||||
Text needed here.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='place-holder-section-two'>
|
||||
<title>Place-Holder Section Two</title>
|
||||
|
||||
<para>
|
||||
Text needed here.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
</chapter>
|
||||
<!--
|
||||
vim: expandtab tw=80 ts=4
|
||||
-->
|
||||
@@ -1,539 +0,0 @@
|
||||
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
|
||||
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
|
||||
|
||||
<chapter id='dev-manual-newbie'>
|
||||
|
||||
<title>Working with Open Source Code</title>
|
||||
|
||||
<para>
|
||||
This chapter presents information for users new or unfamiliar with working in an open source environment.
|
||||
Working in this type of environment is different than working in a closed, proprietary environment.
|
||||
The chapter also presents information specific to the Yocto Project environment.
|
||||
It specifically addresses licensing issues, code repositories, the open-source distributed version control
|
||||
system Git, and best practices within Yocto Project.
|
||||
</para>
|
||||
|
||||
<note><para>
|
||||
If you are a seasoned open source developer and are familiar with Git, you might just be interested
|
||||
in the Yocto Project specific information in this chapter.
|
||||
</para></note>
|
||||
|
||||
<section id='open-source-philosophy'>
|
||||
<title>Open Source Philosophy</title>
|
||||
|
||||
<para>
|
||||
Open source philosophy is characterized by software development directed by peer production,
|
||||
bartering, and collaboration through a concerned community of developers.
|
||||
Contrast this to the more standard centralized development models used by commercial software
|
||||
companies where a finite set of developers produce a product for sale using a defined set
|
||||
of procedures that ultimately result in an end-product whose architecture and source material
|
||||
are closed to the public.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Open source projects conceptually have differing concurrent agendas, approaches, and production.
|
||||
These facets of the development process can come from anyone in the public (community) that has a
|
||||
stake in the software project.
|
||||
The open source environment contains new copyright, licensing, domain, and consumer issues
|
||||
that differ from the more traditional development environment.
|
||||
In an open source environment the end-product, source material, and documentation are
|
||||
all available to the public at no cost.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
A benchmark example of an open source project is the Linux Kernel, which was initially conceived
|
||||
and created by Finnish computer science student Linus Torvalds in 1991.
|
||||
Conversely, a good example of a non-open source project is the Windows family of operating
|
||||
systems developed by Microsoft Corporation.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Wikipedia has a good historical description of the Open Source Philosophy
|
||||
<ulink url='http://en.wikipedia.org/wiki/Open_source'>here</ulink>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You can also find helpful information on how to participate in the Linux Community
|
||||
<ulink url='http://ldn.linuxfoundation.org/book/how-participate-linux-community'>here</ulink>.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='yocto-project-repositories'>
|
||||
<title>Yocto Project Repositories</title>
|
||||
|
||||
<para>
|
||||
The Yocto Project team maintains complete source repositories that allow further development
|
||||
of Yocto Project, its tools, Board Support Packages, and so forth.
|
||||
As a developer who uses Yocto Project, however, you need only to be able to access your
|
||||
kernel or application source code and any layers (modifications) on which you might be working.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For any supported release of Yocto Project you can go to the Yocto Project website’s
|
||||
<ulink url='http://www.yoctoproject.org/download'>download page</ulink> and get a
|
||||
<filename>.bz2</filename> tarball of the release.
|
||||
You can also go to this site to download any supported BSP tarballs.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
After obtaining the code, you can unpack the tarballs and have a working Git repository
|
||||
from which you can develop.
|
||||
Or, you can take steps to create local repositories of Yocto Project source code and metadata on
|
||||
your development system.
|
||||
See the information
|
||||
<ulink url='https://wiki.yoctoproject.org/wiki/Transcript:_from_git_checkout_to_meta-intel_BSP'>here</ulink>
|
||||
for information on how to set up these local Git repositories.
|
||||
</para>
|
||||
|
||||
<note><para>
|
||||
Should you be interested in locations of complete Yocto Project development code, there are
|
||||
two areas where this code is maintained:
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis><ulink url='http://git.yoctoproject.org/cgit/cgit.cgi'>Source Repositories:</ulink></emphasis>
|
||||
This area contains IDE Plugins, Matchbox, Poky, Poky Support, Tools, Yocto Linux Kernel, and Yocto
|
||||
Metadata Layers.</para></listitem>
|
||||
<listitem><para><emphasis><ulink url='http://autobuilder.yoctoproject.org/downloads/'>Index of /downloads:</ulink></emphasis>
|
||||
This area contains an index of the Eclipse-plugin, miscellaneous support, poky, pseudo, and
|
||||
all released versions of Yocto Project.
|
||||
[WRITER NOTE: link will be http://downloads.yoctoproject.org.]</para></listitem>
|
||||
</itemizedlist>
|
||||
</para></note>
|
||||
</section>
|
||||
|
||||
<section id='licensing'>
|
||||
<title>Licensing</title>
|
||||
|
||||
<para>
|
||||
Because open source projects are open to the public they have different licensing structures in place.
|
||||
License evolution for both Open Source and Free Software has an interesting history.
|
||||
If you are interested in the history you can find basic information here:
|
||||
<itemizedlist>
|
||||
<listitem><para><ulink url='http://en.wikipedia.org/wiki/Open-source_license'>Open source license history</ulink>
|
||||
</para></listitem>
|
||||
<listitem><para><ulink url='http://en.wikipedia.org/wiki/Free_software_license'>Free software license
|
||||
history</ulink></para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
In general, Yocto Project is broadly licensed under the Massachusetts Institute of Technology
|
||||
(MIT) License.
|
||||
MIT licensing permits the reuse of software within proprietary software as long as the
|
||||
license is distributed with that software.
|
||||
MIT is also compatible with the GNU General Public License (GPL).
|
||||
Patches to the Yocto Project follow the up-stream licensing scheme.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You can find information on the MIT License <ulink url='http://en.wikipedia.org/wiki/MIT_License'>here</ulink>.
|
||||
You can find information on the GNU GPL <ulink url='http://en.wikipedia.org/wiki/GPL'>here</ulink>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
When you build an image using Yocto Project the build process uses a known list of licenses to
|
||||
ensure compliance.
|
||||
Once the build completes the list of all licenses found and used during the build are
|
||||
kept in the resulting build directory at
|
||||
<filename><build_directory>/tmp/deploy/images/licenses</filename>.
|
||||
If a module requires a license that is not in the base list then the build process
|
||||
generates a warning during the build.
|
||||
It is up to the developer to resolve potential licensing issues.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The base list of licenses used by the build process is a combination of the Software Package
|
||||
Data Exchange (SPDX) list and the Open Source Initiative (OSI) projects.
|
||||
<ulink url='http://spdx.org'>SPDX Group</ulink> is a working group of the Linux Foundation
|
||||
that maintains a specification
|
||||
for a standard format for communicating the components, licenses, and copyrights
|
||||
associated with a software package.
|
||||
<ulink url='http://opensource.org'>OSI</ulink> is a corporation dedicated to the Open Source
|
||||
Definition and the effort for reviewing
|
||||
and approving licenses that are OSD-conformant.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You can find a list of the combined SPDX and OSI licenses that the Yocto Project uses
|
||||
<ulink url='http://git.yoctoproject.org/cgit/cgit.cgi/poky/tree/meta/files/common-licenses'>here</ulink>.
|
||||
The wiki page discusses the license infrastructure used by the Yocto Project.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='git'>
|
||||
<title>Git</title>
|
||||
|
||||
<para>
|
||||
The Yocto Project uses Git, which is a free, open source distributed version control.
|
||||
Git supports distributed development, non-linear development, can handle large projects,
|
||||
cryptographic authentication of history, and toolkit design.
|
||||
It is best that you know how to work with Git if you are going to use Yocto Project for development.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Git has an extensive set of commands that lets you manage and collaborate changes over the life
|
||||
of a project.
|
||||
Conveniently though, you can manage with a small set of basic operations and workflows
|
||||
once you understand the basic philosophy behind Git.
|
||||
You do not have to be an expert in Git to be functional.
|
||||
A good place to look for instruction on a minimal set of Git commands is
|
||||
<ulink url='http://git-scm.com/documentation'>here</ulink>.
|
||||
If you need to download Git you can do so
|
||||
<ulink url='http://git-scm.com/download'>here</ulink>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Git works by using branching techniques that track content change (not files)
|
||||
within a project (e.g. a new feature or updated documentation).
|
||||
Creating a tree-like structure based on project divergence allows for excellent historical
|
||||
information over the life of a project.
|
||||
This methodology also allows for an environment in which you can do lots of
|
||||
experimentation on your project as you develop changes or new features.
|
||||
For example, you can create a “branch”, experiment with some feature, and then
|
||||
if you like the feature you incorporate the branch into the tree.
|
||||
If you don’t, you cut the branch off by deleting it.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If you don’t know much about Git it is strongly suggested that you educate
|
||||
yourself by visiting the links previously mentioned.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The following list briefly describes some basic Git operations as a way to get started.
|
||||
As with any set of commands, this list (in most cases) simply shows the base command and
|
||||
omits the many arguments they support.
|
||||
See the Git documentation for complete descriptions and strategies on how to use these commands:
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis><filename>git init</filename></emphasis> – Initializes an empty Git repository.
|
||||
You cannot use Git commands unless you have a <filename>.git</filename> repository.</para></listitem>
|
||||
<listitem><para><emphasis><filename>git clone</filename></emphasis> – Creates a clone of a repository.
|
||||
During collaboration this command allows you to create a local repository that is on
|
||||
equal footing with a fellow developer’s repository.</para></listitem>
|
||||
<listitem><para><emphasis><filename>git add</filename></emphasis> – Adds updated file contents to the index that
|
||||
Git uses to track changes.
|
||||
All files that have changed must be added before they can be committed.</para></listitem>
|
||||
<listitem><para><emphasis><filename>git commit</filename></emphasis> – Creates a “commit” that documents
|
||||
the changes you made.
|
||||
Commits are used for historical purposes, for determining if a maintainer of a project
|
||||
will allow the change, and for ultimately pushing the change from your local Git repository
|
||||
into the project’s upstream (or master) repository.</para></listitem>
|
||||
<listitem><para><emphasis><filename>git status</filename></emphasis> – Reports any modified files that
|
||||
possibly need added and committed.</para></listitem>
|
||||
<listitem><para><emphasis><filename>git checkout <branch-name></filename></emphasis> - Changes
|
||||
your working branch. This command is analogous to “cd”.</para></listitem>
|
||||
<listitem><para><emphasis><filename>git checkout –b <working-branch></filename></emphasis> - Creates
|
||||
a working branch on your local machine where you can isolate work.
|
||||
It is a good idea to use local branches when adding specific features or changes.
|
||||
This way if you don’t like what you have done you can easily get rid of the work.</para></listitem>
|
||||
<listitem><para><emphasis><filename>git branch</filename></emphasis> – Reports existing branches and
|
||||
tells you which branch in which you are currently working.</para></listitem>
|
||||
<listitem><para><emphasis><filename>git pull</filename></emphasis> – Retrieves information from an upstream Git
|
||||
repository and places it in your local Git repository.
|
||||
You use this command to make sure you are synchronized with the upstream repository
|
||||
from which the project’s maintainer uses to pull changes into the master repository.</para></listitem>
|
||||
<listitem><para><emphasis><filename>git push</filename></emphasis> – Sends all your local changes you
|
||||
have committed to an upstream Git repository.
|
||||
The maintainer of the project draws from these repositories when adding your changes to the
|
||||
project’s master repository.</para></listitem>
|
||||
<listitem><para><emphasis><filename>git merge</filename></emphasis> – Combines or adds changes from one
|
||||
local branch of your repository with another branch.
|
||||
When you create a local Git repository the default branch is named “master”.
|
||||
A typical workflow is to create a temporary branch for isolated work, make and commit your
|
||||
changes, switch to the master branch, merge the changes in the temporary branch with the
|
||||
master branch, and then delete the temporary branch</para></listitem>
|
||||
<listitem><para><emphasis><filename>git cherry-pick</filename></emphasis> – Choose and apply specific
|
||||
commits from one branch into another branch.
|
||||
There are times when you might not be able to merge all the changes in one branch with
|
||||
another but need to pick out certain ones.</para></listitem>
|
||||
<listitem><para><emphasis><filename>gitk</filename></emphasis> – Provides a GUI view of the branches
|
||||
and changes in your local Git repository.
|
||||
This command is a good way to see where things have diverged in your local repository.</para></listitem>
|
||||
<listitem><para><emphasis><filename>git log</filename></emphasis> – Reports a history of your changes to the
|
||||
repository.</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='workflows'>
|
||||
<title>Workflows</title>
|
||||
|
||||
<para>
|
||||
This section provides some overview on workflows using Git.
|
||||
In particular, the information covers basic practices that describe roles and actions in a
|
||||
collaborative development environment.
|
||||
Again, if you are familiar with this type of development environment you might want to just skip the section.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Following are the definitions for some terms used in the Yocto Project.
|
||||
[WRITER NOTE: I need to move this list of definitions somewhere useful.]
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis>Image</emphasis> - An image is a collection of recipes created with
|
||||
Bitbake (baked). Images run on specific hardware and use cases.</para></listitem>
|
||||
<listitem><para><emphasis>Recipe</emphasis> - A set of instructions for building packages.
|
||||
A recipe describes where you get the source and which patches to apply.
|
||||
Recipes describe dependencies for libraries or for other recipes and they
|
||||
also contain configuration and compilation options.
|
||||
Recipes also let you ‘install’ customizations.
|
||||
Recipes contain the logical unit of execution, the software/images to build and
|
||||
use the <filename>.bb</filename> file extension.</para></listitem>
|
||||
<listitem><para><emphasis>BitBake</emphasis> - The task executor and scheduler used by Yocto Project
|
||||
to build images.
|
||||
For more information on BitBake, see the <ulink url='http://bitbake.berlios.de/manual/'>
|
||||
BitBake documentation</ulink>.</para></listitem>
|
||||
<listitem><para><emphasis>Package</emphasis> - A collection of ‘baked’ recipes.
|
||||
You ‘bake’ something by running it through Bitbake.</para></listitem>
|
||||
<listitem><para><emphasis>Layer</emphasis> - A logical collection of recipes representing the core,
|
||||
a BSP, or an application stack.</para></listitem>
|
||||
<listitem><para><emphasis>Metadata</emphasis> - Information for a build that is generally
|
||||
architecture-independent.
|
||||
This information includes Task definitions in recipes, classes, and configuration
|
||||
information.</para></listitem>
|
||||
<listitem><para><emphasis>Configuration File</emphasis>: Configuration information in the
|
||||
<filename>.conf</filename> files provides global definition of variables.
|
||||
The <filename>build/conf/local.conf</filename> configuration file defines local user-defined variables.
|
||||
The <filename>distro/poky.conf</filename> configuration file defines Yocto ‘distro’ configuration
|
||||
variables.
|
||||
The <filename>machine/beagleboard.conf</filename> configuration file defines machine-specific variables.
|
||||
Configuration files end with a <filename>.conf</filename> filename extension.</para></listitem>
|
||||
<listitem><para><emphasis>Classes</emphasis> - Files that encapsulate and inherit logic.
|
||||
Class files end with the <filename>.bbclass</filename> filename extension.</para></listitem>
|
||||
<listitem><para><emphasis>Tasks</emphasis> - Arbitrary groups of software used to contain Recipes.
|
||||
You simply use Tasks to hold recipes that when build usually accomplished a single task.
|
||||
For example, a task could contain the recipes for a company’s proprietary or value-add software.
|
||||
Or the task could contain the recipes that enable graphics.
|
||||
A task is really just another recipe.
|
||||
Because task files are recipes, they end with the <filename>.bb</filename> filename
|
||||
extension.</para></listitem>
|
||||
<listitem><para><emphasis>Common OE-Core</emphasis> - A core set of metadata originating
|
||||
with OpenEmbedded (OE) that is shared between OE and the Yocto Project.</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
A master Git repository exists that contains the project.
|
||||
Usually a key individual is responsible for this repository.
|
||||
It is the “upstream” repository where the final builds of the project occur.
|
||||
The maintainer is responsible for allowing changes in from other developers and for
|
||||
organizing the branch structure of the repository to reflect release strategies and so forth.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The maintainer of the project also owns a contribution repository usually known as a “contrib” area.
|
||||
The contrib area temporarily holds changes to the project that have been submitted or committed
|
||||
by the development team.
|
||||
The maintainer determines if the changes are qualified to be moved into the master repository.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Developers create and maintain cloned repositories of the upstream master repository.
|
||||
These repositories are local to their development platforms and are used to develop changes.
|
||||
When a developer is satisfied with a particular feature or change they “push” the changes
|
||||
up to the contrib repository.
|
||||
Developers are responsible for keeping their local repository up-to-date with the master
|
||||
repository.
|
||||
They are also responsible for straightening out any conflicts that might arise within files
|
||||
that are being worked on simultaneously by more than one person.
|
||||
All this work is done locally on the developer’s machine before anything is pushed upstream
|
||||
and examined at the maintainer’s level.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
A somewhat formal method exists by which developers commit changes and push them into the
|
||||
contrib area and subsequently request that the maintainer include them into the master repository.
|
||||
This process is called “submitting a patch” or “submitting a change.”
|
||||
</para>
|
||||
|
||||
<para>
|
||||
To summarize the environment: we have a single point of entry for changes into the project’s
|
||||
master repository, which is controlled by the project’s maintainer.
|
||||
And, we have a set of developers who independently develop, test, and submit changes
|
||||
upstream for the maintainer to examine.
|
||||
The maintainer then chooses which changes are going to become permanently a part of the project.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
[WRITER NOTE: Would like a figure here for Git workflow]
|
||||
</para>
|
||||
|
||||
<para>
|
||||
While each development environment is unique, there are some best practices or methods
|
||||
that help development run smoothly.
|
||||
The following list describes some of these practices.
|
||||
For more detailed information about these strategies see
|
||||
<ulink url='http://www.kernel.org/pub/software/scm/git/docs/gitworkflows.html'>Git Workflows</ulink>.
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis>Small Changes</emphasis> - It is best to keep your changes you commit
|
||||
small as compared to bundling many disparate changes into a single commit.
|
||||
This practice not only keeps things manageable but also allows the maintainer
|
||||
to more easily include or refuse changes.</para></listitem>
|
||||
<listitem><para><emphasis>Use Branches Liberally</emphasis> - It is very easy to create, use, and
|
||||
delete local branches in your working Git repository.
|
||||
You can name these branches anything you like.
|
||||
It is helpful to give them names associated with the particular feature or change
|
||||
on which you are working.
|
||||
Once you are done with a feature or change you simply discard the branch.</para></listitem>
|
||||
<listitem><para><emphasis>Merge Changes</emphasis> - The Git merge command allows you to take the
|
||||
changes from one branch and fold them into another branch.
|
||||
This process is especially helpful when more than a single developer might be working
|
||||
on different parts of the same feature.
|
||||
Merging changes also automatically identifies any collisions or “conflicts”
|
||||
that might happen resulting from the same lines of code be altered by two different
|
||||
developers.</para></listitem>
|
||||
<listitem><para><emphasis>Manage Branches</emphasis> - Because branches are easy to use, you should
|
||||
use a system where branches indicate varying levels of code readiness.
|
||||
For example, you can have a “work” branch to develop in, a “test” branch where the code or
|
||||
change is tested, a “stage” branch where changes are ready to be committed, and so forth.
|
||||
As your project develops, you can merge code across the branches to reflect ever-increasing
|
||||
stable states of the development.</para></listitem>
|
||||
<listitem><para><emphasis>Use Push and Pull</emphasis> - The push-pull workflow is based on the
|
||||
concept of developers “pushing” local commits upstream to the remote repository, which is
|
||||
usually a contribution repository.
|
||||
It is also based on the developers “pulling” known states of the project down into their
|
||||
local development repositories.
|
||||
This workflow easily allows you to pull changes submitted by other developers from the
|
||||
upstream repository into your work area ensuring that you have the most recent software
|
||||
on which to develop.</para></listitem>
|
||||
<listitem><para><emphasis>Patch Workflow</emphasis> - This workflow allows you to notify the
|
||||
maintainer through an email that you have a change (or patch) you would like considered
|
||||
for the master repository.
|
||||
To send this type of change you format the patch and then send the email using the Git commands
|
||||
<filename>git format-patch</filename> and <filename>git send-email</filename>.
|
||||
You can find information on how to submit later in this chapter.</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='tracking-bugs'>
|
||||
<title>Tracking Bugs</title>
|
||||
|
||||
<para>
|
||||
The Yocto Project uses Bugzilla to track bugs.
|
||||
This bug-tracking application works well for group development because it tracks bugs and code
|
||||
changes, can be used to communicate changes and problems with developers, can be used to
|
||||
submit and review patches, and can be used to manage quality assurance.
|
||||
You can find a good overview of Bugzilla <ulink url='http://www.bugzilla.org/about/'>here</ulink>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Sometimes it is helpful to submit, investigate, or track a bug against the Yocto Project itself.
|
||||
While normally this is a process relevant only to Yocto Project developers, you can find information
|
||||
for Bugzilla configuration and bug tracking procedures specific to the Yocto Project
|
||||
<ulink url='https://wiki.yoctoproject.org/wiki/Bugzilla_Configuration_and_Bug_Tracking'>here</ulink>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The Yocto Project uses its own version of the Bugzilla application.
|
||||
You can find the home page <ulink url='http://bugzilla.yoctoproject.org'>here</ulink>.
|
||||
You need to use this implementation of Bugzilla when logging a defect against anything released
|
||||
by the Yocto Project team.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Here are some things to remember when dealing with bugs against the Yocto Project:
|
||||
<itemizedlist>
|
||||
<listitem><para>The Yocto Project follows a naming bug-naming convention:
|
||||
<filename>[YOCTO <number>]</filename>, where <filename><number></filename> is the
|
||||
assigned defect ID used in Bugzilla.
|
||||
So, for example, a valid way to refer to a defect when creating a commit comment
|
||||
would be <filename>[YOCTO 1011]</filename>.
|
||||
This convention becomes important if you are submitting patches against the Yocto Project
|
||||
code itself (see the next section “How to Submit a Change”).</para></listitem>
|
||||
<listitem><para>Defects for Yocto Project fall into one of four classifications: Yocto Projects,
|
||||
Infrastructure, Poky, and Yocto Metadata Layers.</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='how-to-submit-a-change'>
|
||||
<title>How to Submit a Change</title>
|
||||
|
||||
<para>
|
||||
During the development process it is necessary to submit your changes to the maintainer
|
||||
of the project.
|
||||
Furthermore, in a collaborative environment it is necessary to have some sort of standard
|
||||
or method through which you submit changes.
|
||||
Otherwise, things would get quite chaotic.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Sometimes you might find it necessary to submit a change or patch to the Yocto Project.
|
||||
If so, you must follow certain procedures.
|
||||
In particular, the headers in patches and the commit messages must follow a certain standard.
|
||||
The general process is the same as described earlier in this section.
|
||||
For complete details on how to create proper commit messages and patch headers see
|
||||
[WRITER NOTE: I need the link to Mark's wiki page here that describes the process.]
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Following are general instructions for both pushing changes upstream and for submitting changes as patches.
|
||||
</para>
|
||||
|
||||
<section id='pushing-a-change-upstream'>
|
||||
<title>Pushing a Change Upstream</title>
|
||||
|
||||
<para>
|
||||
The basic flow for pushing a change to an upstream contrib repository is as follows:
|
||||
<itemizedlist>
|
||||
<listitem><para>Make your changes in your local repository.</para></listitem>
|
||||
<listitem><para>Stage your commit (or change) by using the <filename>git add</filename>
|
||||
command.</para></listitem>
|
||||
<listitem><para>Commit the change by using the <filename>git commit</filename>
|
||||
command and push it to an upstream contrib repository.
|
||||
Be sure to provide a commit message that follows the project’s commit standards.</para></listitem>
|
||||
<listitem><para>Notify the maintainer that you have pushed a change.</para></listitem>
|
||||
</itemizedlist>
|
||||
You can find detailed information on how to push a change upstream
|
||||
<ulink url='http://www.kernel.org/pub/software/scm/git/docs/user-manual.html#Developing-With-git'>
|
||||
here</ulink>.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='submitting-a-patch'>
|
||||
<title>Submitting a Patch</title>
|
||||
|
||||
<para>
|
||||
If you have a just a few changes you can commit them and then submit them as an email to the maintainer.
|
||||
Here is the general procedure:
|
||||
<itemizedlist>
|
||||
<listitem><para>Make your changes in your local repository.</para></listitem>
|
||||
<listitem><para>Stage your commit (or change) by using the <filename>git add</filename>
|
||||
command.</para></listitem>
|
||||
<listitem><para>Commit the change by using the <filename>git commit</filename> command.
|
||||
Be sure to provide a commit message that follows the project’s commit standards.</para></listitem>
|
||||
<listitem><para>Format the commit by using the <filename>git-format-patch</filename>
|
||||
command.
|
||||
This step produces a numbered series of files in the current directory – one for
|
||||
each commit.</para></listitem>
|
||||
<listitem><para>Import the files into your mail client by using the
|
||||
<filename>git-send-email</filename> command.</para></listitem>
|
||||
<listitem><para>Send the email by hand to the maintainer.</para></listitem>
|
||||
</itemizedlist>
|
||||
Be aware that there could be protocols and standards that you need to follow for your particular
|
||||
project.
|
||||
You can find detailed information on the general process
|
||||
<ulink url='http://www.kernel.org/pub/software/scm/git/docs/user-manual.html#sharing-development'>
|
||||
here</ulink>.
|
||||
</para>
|
||||
</section>
|
||||
</section>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
</chapter>
|
||||
<!--
|
||||
vim: expandtab tw=80 ts=4
|
||||
-->
|
||||
@@ -1,151 +0,0 @@
|
||||
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
|
||||
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
|
||||
|
||||
<chapter id='dev-manual-start'>
|
||||
|
||||
<title>Getting Started with the Yocto Project</title>
|
||||
|
||||
<para>
|
||||
This chapter introduces the Yocto Project and gives you an idea of what you need to get started.
|
||||
You can find enough information to set your development host up and build or use images for
|
||||
hardware supported by the Yocto Project by reading the
|
||||
<ulink url='http://www.yoctoproject.org/docs/yocto-quick-start/yocto-project-qs.html'>
|
||||
Yocto Project Quick Start</ulink> located on the <ulink url='http://www.yoctoproject.org'>
|
||||
Yocto Project website</ulink>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The remainder of this chapter summarizes what is in the Yocto Project Quick Start and provides
|
||||
some higher level concepts you might want to consider.
|
||||
</para>
|
||||
|
||||
<section id='introducing-the-yocto-project'>
|
||||
<title>Introducing the Yocto Project</title>
|
||||
|
||||
<para>
|
||||
The Yocto Project is an open-source collaboration project focused on embedded Linux developers.
|
||||
The project provides a recent Linux kernel along with a set of system commands, libraries,
|
||||
and system components suitable for the embedded developer.
|
||||
The Yocto Project also features the Sato reference User Interface should you be dealing with
|
||||
devices with restricted screens.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You can use the Yocto Project, which uses the BitBake build tool, to develop complete Linux
|
||||
images and user-space applications for architectures based on ARM, MIPS, PowerPC, x86 and x86-64.
|
||||
You can perform target-level testing and debugging as well as test in a hardware emulated environment.
|
||||
And, if you are an Eclipse user, you can install an Eclipse Yocto Plug-in to allow you to
|
||||
develop within that familiar environment.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='getting-setup'>
|
||||
<title>Getting Setup</title>
|
||||
|
||||
<para>
|
||||
Here is what you need to get set up to use the Yocto Project:
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis>Host System:</emphasis> You need a recent release of Fedora,
|
||||
OpenSUSE, Debian, or Ubuntu.
|
||||
You should have a reasonably current Linux-based host system.
|
||||
You should also have about 100 gigabytes of free disk space if you plan on building
|
||||
images.</para></listitem>
|
||||
<listitem><para><emphasis>Packages:</emphasis> Depending on your host system (Debian-based or RPM-based),
|
||||
you need certain packages.
|
||||
See the <ulink url='http://www.yoctoproject.org/docs/yocto-quick-start/yocto-project-qs.html#packages'>
|
||||
The Packages</ulink> section in the Yocto Project Quick start for the exact package
|
||||
requirements.</para></listitem>
|
||||
<listitem><para><emphasis>Yocto Project Release:</emphasis> You need a release of the Yocto Project.
|
||||
You can get set up for this one of two ways depending on whether you are going to be contributing
|
||||
back into the Yocto Project source repository or not.
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis>Tarball Extraction:</emphasis> If you are not going to contribute
|
||||
back into the Yocto Project you can simply download the Yocto Project release you want
|
||||
from the website’s <ulink url='http://yoctoproject.org/download'>download page</ulink>.
|
||||
Once you have the tarball, just extract it into a directory of your choice.
|
||||
If you are interested in supported Board Support Packages (BSPs) you can also download
|
||||
these release tarballs from the same site and locate them in a directory of your
|
||||
choice.</para></listitem>
|
||||
<listitem><para><emphasis>Git Method:</emphasis> If you are going to be contributing
|
||||
back into the Yocto Project you should probably use Git commands to set up a local
|
||||
Git repository of the Yocto Project.
|
||||
Doing so creates a history of changes you might make and allows you to easily submit
|
||||
changes upstream to the project.
|
||||
For an example of how to set up your own local Git repository of Yocto Project,
|
||||
see this
|
||||
<ulink url='https://wiki.yoctoproject.org/wiki/Transcript:_from_git_checkout_to_meta-intel_BSP'>
|
||||
wiki page</ulink>, which covers checking out the Yocto sources.</para></listitem>
|
||||
</itemizedlist></para></listitem>
|
||||
<listitem><para><emphasis>Supported Board Support Packages (BSPs):</emphasis> The same considerations
|
||||
exist for BSPs.
|
||||
You can get set up for BSP development one of two ways:
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis>Tarball Extraction:</emphasis> You can download any released
|
||||
BSP tarball from the same
|
||||
<ulink url='http://yoctoproject.org/download'>download site</ulink>.
|
||||
Once you have the tarball just extract it into a directory of your choice.</para></listitem>
|
||||
<listitem><para><emphasis>Git Method:</emphasis> For an example of how to integrate
|
||||
the metadata for BSPs into your local Yocto Project Git repository see this
|
||||
<ulink url='https://wiki.yoctoproject.org/wiki/Transcript:_from_git_checkout_to_meta-intel_BSP'>
|
||||
wiki page</ulink>, which covers how to check out the meta-intel repository.</para></listitem>
|
||||
</itemizedlist></para></listitem>
|
||||
<listitem><para><emphasis>Eclipse Yocto Plug-in:</emphasis> If you are developing using the
|
||||
Eclipse Integrated Development Environment (IDE) you will need this plug-in.
|
||||
See the
|
||||
<ulink url='http://www.yoctoproject.org/docs/adt-manual/adt-manual.html#setting-up-the-eclipse-ide'>
|
||||
Setting up the Eclipse IDE</ulink> section in the Yocto Application Development Toolkit (ADT)
|
||||
User’s Guide for more information.</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='building-images'>
|
||||
<title>Building Images</title>
|
||||
|
||||
<para>
|
||||
The build process creates an entire Linux distribution, including the toolchain, from source.
|
||||
For more information on this topic, see the
|
||||
<ulink url='http://www.yoctoproject.org/docs/yocto-quick-start/yocto-project-qs.html#building-image'>
|
||||
Building an Image</ulink> section in the Yocto Project Quick Start.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The build process is as follows:
|
||||
<orderedlist>
|
||||
<listitem><para>Make sure you have the Yocto Project files as described in the
|
||||
previous section.</para></listitem>
|
||||
<listitem><para>Initialize the build environment by sourcing a build environment
|
||||
script.</para></listitem>
|
||||
<listitem><para>Make sure the <filename>conf/local.conf</filename> configuration file is set
|
||||
up how you want it.
|
||||
This file defines the target machine architecture and and other build configurations.</para></listitem>
|
||||
<listitem><para>Build the image using the BitBake command.
|
||||
If you want information on Bitbake, see the user manual at
|
||||
<ulink url='http://docs.openembedded.org/bitbake/html'></ulink>.</para></listitem>
|
||||
<listitem><para>Optionally, you can run the image in the QEMU emulator.</para></listitem>
|
||||
</orderedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='using-pre-built-binaries-and-qemu'>
|
||||
<title>Using Pre-Built Binaries and QEMU</title>
|
||||
|
||||
<para>
|
||||
Another option you have to get started is to use a pre-built binary.
|
||||
This scenario is ideal for developing software applications to run on your target hardware.
|
||||
To do this you need to install the stand-alone Yocto toolchain tarball and then download the
|
||||
pre-built kernel that you will boot using the QEMU emulator.
|
||||
Next, you must download the filesystem for your target machine’s architecture.
|
||||
Finally, you set up the environment to emulate the hardware then start the emulator.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You can find details on all these steps in the
|
||||
<ulink url='http://www.yoctoproject.org/docs/yocto-quick-start/yocto-project-qs.html#using-pre-built'>
|
||||
Using Pre-Built Binaries and QEMU</ulink> section in the Yocto Project Quick Start.
|
||||
</para>
|
||||
</section>
|
||||
</chapter>
|
||||
<!--
|
||||
vim: expandtab tw=80 ts=4
|
||||
-->
|
||||
@@ -1,71 +0,0 @@
|
||||
<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
|
||||
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
|
||||
|
||||
<book id='dev-manual' lang='en'
|
||||
xmlns:xi="http://www.w3.org/2003/XInclude"
|
||||
xmlns="http://docbook.org/ns/docbook"
|
||||
>
|
||||
<bookinfo>
|
||||
|
||||
<mediaobject>
|
||||
<imageobject>
|
||||
<imagedata fileref='figures/dev-title.png'
|
||||
format='SVG'
|
||||
align='left' scalefit='1' width='100%'/>
|
||||
</imageobject>
|
||||
</mediaobject>
|
||||
|
||||
<title></title>
|
||||
|
||||
<authorgroup>
|
||||
<author>
|
||||
<firstname>Scott</firstname> <surname>Rifenbark</surname>
|
||||
<affiliation>
|
||||
<orgname>Intel Corporation</orgname>
|
||||
</affiliation>
|
||||
<email>scott.m.rifenbark@intel.com</email>
|
||||
</author>
|
||||
</authorgroup>
|
||||
|
||||
<revhistory>
|
||||
<revision>
|
||||
<revnumber>1.1</revnumber>
|
||||
<date>TBD 2011</date>
|
||||
<revremark>This revision is the initial document draft and corresponds with
|
||||
the Yocto Project 1.1 Release.</revremark>
|
||||
</revision>
|
||||
</revhistory>
|
||||
|
||||
<copyright>
|
||||
<year>2010-2011</year>
|
||||
<holder>Linux Foundation</holder>
|
||||
</copyright>
|
||||
|
||||
<legalnotice>
|
||||
<para>
|
||||
Permission is granted to copy, distribute and/or modify this document under
|
||||
the terms of the <ulink type="http" url="http://creativecommons.org/licenses/by-sa/2.0/uk/">Creative Commons Attribution-Share Alike 2.0 UK: England & Wales</ulink> as published by Creative Commons.
|
||||
</para>
|
||||
</legalnotice>
|
||||
|
||||
</bookinfo>
|
||||
|
||||
<xi:include href="dev-manual-intro.xml"/>
|
||||
|
||||
<xi:include href="dev-manual-start.xml"/>
|
||||
|
||||
<xi:include href="dev-manual-newbie.xml"/>
|
||||
|
||||
<xi:include href="dev-manual-model.xml"/>
|
||||
|
||||
<xi:include href="dev-manual-cases.xml"/>
|
||||
|
||||
<!-- <index id='index'>
|
||||
<title>Index</title>
|
||||
</index>
|
||||
-->
|
||||
|
||||
</book>
|
||||
<!--
|
||||
vim: expandtab tw=80 ts=4
|
||||
-->
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 12 KiB |
@@ -1,968 +0,0 @@
|
||||
/*
|
||||
Generic XHTML / DocBook XHTML CSS Stylesheet.
|
||||
|
||||
Browser wrangling and typographic design by
|
||||
Oyvind Kolas / pippin@gimp.org
|
||||
|
||||
Customised for Poky by
|
||||
Matthew Allum / mallum@o-hand.com
|
||||
|
||||
Thanks to:
|
||||
Liam R. E. Quin
|
||||
William Skaggs
|
||||
Jakub Steiner
|
||||
|
||||
Structure
|
||||
---------
|
||||
|
||||
The stylesheet is divided into the following sections:
|
||||
|
||||
Positioning
|
||||
Margins, paddings, width, font-size, clearing.
|
||||
Decorations
|
||||
Borders, style
|
||||
Colors
|
||||
Colors
|
||||
Graphics
|
||||
Graphical backgrounds
|
||||
Nasty IE tweaks
|
||||
Workarounds needed to make it work in internet explorer,
|
||||
currently makes the stylesheet non validating, but up until
|
||||
this point it is validating.
|
||||
Mozilla extensions
|
||||
Transparency for footer
|
||||
Rounded corners on boxes
|
||||
|
||||
*/
|
||||
|
||||
|
||||
/*************** /
|
||||
/ Positioning /
|
||||
/ ***************/
|
||||
|
||||
body {
|
||||
font-family: Verdana, Sans, sans-serif;
|
||||
|
||||
min-width: 640px;
|
||||
width: 80%;
|
||||
margin: 0em auto;
|
||||
padding: 2em 5em 5em 5em;
|
||||
color: #333;
|
||||
}
|
||||
|
||||
.reviewer {
|
||||
color: red;
|
||||
}
|
||||
|
||||
h1,h2,h3,h4,h5,h6,h7 {
|
||||
font-family: Arial, Sans;
|
||||
color: #00557D;
|
||||
clear: both;
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: 2em;
|
||||
text-align: left;
|
||||
padding: 0em 0em 0em 0em;
|
||||
margin: 2em 0em 0em 0em;
|
||||
}
|
||||
|
||||
h2.subtitle {
|
||||
margin: 0.10em 0em 3.0em 0em;
|
||||
padding: 0em 0em 0em 0em;
|
||||
font-size: 1.8em;
|
||||
padding-left: 20%;
|
||||
font-weight: normal;
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
h2 {
|
||||
margin: 2em 0em 0.66em 0em;
|
||||
padding: 0.5em 0em 0em 0em;
|
||||
font-size: 1.5em;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
h3.subtitle {
|
||||
margin: 0em 0em 1em 0em;
|
||||
padding: 0em 0em 0em 0em;
|
||||
font-size: 142.14%;
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
h3 {
|
||||
margin: 1em 0em 0.5em 0em;
|
||||
padding: 1em 0em 0em 0em;
|
||||
font-size: 140%;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
h4 {
|
||||
margin: 1em 0em 0.5em 0em;
|
||||
padding: 1em 0em 0em 0em;
|
||||
font-size: 120%;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
h5 {
|
||||
margin: 1em 0em 0.5em 0em;
|
||||
padding: 1em 0em 0em 0em;
|
||||
font-size: 110%;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
h6 {
|
||||
margin: 1em 0em 0em 0em;
|
||||
padding: 1em 0em 0em 0em;
|
||||
font-size: 80%;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.authorgroup {
|
||||
background-color: transparent;
|
||||
background-repeat: no-repeat;
|
||||
padding-top: 256px;
|
||||
background-image: url("figures/dev-title.png");
|
||||
background-position: left top;
|
||||
margin-top: -256px;
|
||||
padding-right: 50px;
|
||||
margin-left: 0px;
|
||||
text-align: right;
|
||||
width: 740px;
|
||||
}
|
||||
|
||||
h3.author {
|
||||
margin: 0em 0me 0em 0em;
|
||||
padding: 0em 0em 0em 0em;
|
||||
font-weight: normal;
|
||||
font-size: 100%;
|
||||
color: #333;
|
||||
clear: both;
|
||||
}
|
||||
|
||||
.author tt.email {
|
||||
font-size: 66%;
|
||||
}
|
||||
|
||||
.titlepage hr {
|
||||
width: 0em;
|
||||
clear: both;
|
||||
}
|
||||
|
||||
.revhistory {
|
||||
padding-top: 2em;
|
||||
clear: both;
|
||||
}
|
||||
|
||||
.toc,
|
||||
.list-of-tables,
|
||||
.list-of-examples,
|
||||
.list-of-figures {
|
||||
padding: 1.33em 0em 2.5em 0em;
|
||||
color: #00557D;
|
||||
}
|
||||
|
||||
.toc p,
|
||||
.list-of-tables p,
|
||||
.list-of-figures p,
|
||||
.list-of-examples p {
|
||||
padding: 0em 0em 0em 0em;
|
||||
padding: 0em 0em 0.3em;
|
||||
margin: 1.5em 0em 0em 0em;
|
||||
}
|
||||
|
||||
.toc p b,
|
||||
.list-of-tables p b,
|
||||
.list-of-figures p b,
|
||||
.list-of-examples p b{
|
||||
font-size: 100.0%;
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.toc dl,
|
||||
.list-of-tables dl,
|
||||
.list-of-figures dl,
|
||||
.list-of-examples dl {
|
||||
margin: 0em 0em 0.5em 0em;
|
||||
padding: 0em 0em 0em 0em;
|
||||
}
|
||||
|
||||
.toc dt {
|
||||
margin: 0em 0em 0em 0em;
|
||||
padding: 0em 0em 0em 0em;
|
||||
}
|
||||
|
||||
.toc dd {
|
||||
margin: 0em 0em 0em 2.6em;
|
||||
padding: 0em 0em 0em 0em;
|
||||
}
|
||||
|
||||
div.glossary dl,
|
||||
div.variablelist dl {
|
||||
}
|
||||
|
||||
.glossary dl dt,
|
||||
.variablelist dl dt,
|
||||
.variablelist dl dt span.term {
|
||||
font-weight: normal;
|
||||
width: 20em;
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
.variablelist dl dt {
|
||||
margin-top: 0.5em;
|
||||
}
|
||||
|
||||
.glossary dl dd,
|
||||
.variablelist dl dd {
|
||||
margin-top: -1em;
|
||||
margin-left: 25.5em;
|
||||
}
|
||||
|
||||
.glossary dd p,
|
||||
.variablelist dd p {
|
||||
margin-top: 0em;
|
||||
margin-bottom: 1em;
|
||||
}
|
||||
|
||||
|
||||
div.calloutlist table td {
|
||||
padding: 0em 0em 0em 0em;
|
||||
margin: 0em 0em 0em 0em;
|
||||
}
|
||||
|
||||
div.calloutlist table td p {
|
||||
margin-top: 0em;
|
||||
margin-bottom: 1em;
|
||||
}
|
||||
|
||||
div p.copyright {
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
div.legalnotice p.legalnotice-title {
|
||||
margin-bottom: 0em;
|
||||
}
|
||||
|
||||
p {
|
||||
line-height: 1.5em;
|
||||
margin-top: 0em;
|
||||
|
||||
}
|
||||
|
||||
dl {
|
||||
padding-top: 0em;
|
||||
}
|
||||
|
||||
hr {
|
||||
border: solid 1px;
|
||||
}
|
||||
|
||||
|
||||
.mediaobject,
|
||||
.mediaobjectco {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
img {
|
||||
border: none;
|
||||
}
|
||||
|
||||
ul {
|
||||
padding: 0em 0em 0em 1.5em;
|
||||
}
|
||||
|
||||
ul li {
|
||||
padding: 0em 0em 0em 0em;
|
||||
}
|
||||
|
||||
ul li p {
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
table {
|
||||
width :100%;
|
||||
}
|
||||
|
||||
th {
|
||||
padding: 0.25em;
|
||||
text-align: left;
|
||||
font-weight: normal;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
td {
|
||||
padding: 0.25em;
|
||||
vertical-align: top;
|
||||
}
|
||||
|
||||
p a[id] {
|
||||
margin: 0px;
|
||||
padding: 0px;
|
||||
display: inline;
|
||||
background-image: none;
|
||||
}
|
||||
|
||||
a {
|
||||
text-decoration: underline;
|
||||
color: #444;
|
||||
}
|
||||
|
||||
pre {
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
text-decoration: underline;
|
||||
/*font-weight: bold;*/
|
||||
}
|
||||
|
||||
|
||||
div.informalfigure,
|
||||
div.informalexample,
|
||||
div.informaltable,
|
||||
div.figure,
|
||||
div.table,
|
||||
div.example {
|
||||
margin: 1em 0em;
|
||||
padding: 1em;
|
||||
page-break-inside: avoid;
|
||||
}
|
||||
|
||||
|
||||
div.informalfigure p.title b,
|
||||
div.informalexample p.title b,
|
||||
div.informaltable p.title b,
|
||||
div.figure p.title b,
|
||||
div.example p.title b,
|
||||
div.table p.title b{
|
||||
padding-top: 0em;
|
||||
margin-top: 0em;
|
||||
font-size: 100%;
|
||||
font-weight: normal;
|
||||
}
|
||||
|
||||
.mediaobject .caption,
|
||||
.mediaobject .caption p {
|
||||
text-align: center;
|
||||
font-size: 80%;
|
||||
padding-top: 0.5em;
|
||||
padding-bottom: 0.5em;
|
||||
}
|
||||
|
||||
.epigraph {
|
||||
padding-left: 55%;
|
||||
margin-bottom: 1em;
|
||||
}
|
||||
|
||||
.epigraph p {
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.epigraph .quote {
|
||||
font-style: italic;
|
||||
}
|
||||
.epigraph .attribution {
|
||||
font-style: normal;
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
span.application {
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.programlisting {
|
||||
font-family: monospace;
|
||||
font-size: 80%;
|
||||
white-space: pre;
|
||||
margin: 1.33em 0em;
|
||||
padding: 1.33em;
|
||||
}
|
||||
|
||||
.tip,
|
||||
.warning,
|
||||
.caution,
|
||||
.note {
|
||||
margin-top: 1em;
|
||||
margin-bottom: 1em;
|
||||
|
||||
}
|
||||
|
||||
/* force full width of table within div */
|
||||
.tip table,
|
||||
.warning table,
|
||||
.caution table,
|
||||
.note table {
|
||||
border: none;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
|
||||
.tip table th,
|
||||
.warning table th,
|
||||
.caution table th,
|
||||
.note table th {
|
||||
padding: 0.8em 0.0em 0.0em 0.0em;
|
||||
margin : 0em 0em 0em 0em;
|
||||
}
|
||||
|
||||
.tip p,
|
||||
.warning p,
|
||||
.caution p,
|
||||
.note p {
|
||||
margin-top: 0.5em;
|
||||
margin-bottom: 0.5em;
|
||||
padding-right: 1em;
|
||||
text-align: left;
|
||||
}
|
||||
|
||||
.acronym {
|
||||
text-transform: uppercase;
|
||||
}
|
||||
|
||||
b.keycap,
|
||||
.keycap {
|
||||
padding: 0.09em 0.3em;
|
||||
margin: 0em;
|
||||
}
|
||||
|
||||
.itemizedlist li {
|
||||
clear: none;
|
||||
}
|
||||
|
||||
.filename {
|
||||
font-size: medium;
|
||||
font-family: Courier, monospace;
|
||||
}
|
||||
|
||||
|
||||
div.navheader, div.heading{
|
||||
position: absolute;
|
||||
left: 0em;
|
||||
top: 0em;
|
||||
width: 100%;
|
||||
background-color: #cdf;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
div.navfooter, div.footing{
|
||||
position: fixed;
|
||||
left: 0em;
|
||||
bottom: 0em;
|
||||
background-color: #eee;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
|
||||
div.navheader td,
|
||||
div.navfooter td {
|
||||
font-size: 66%;
|
||||
}
|
||||
|
||||
div.navheader table th {
|
||||
/*font-family: Georgia, Times, serif;*/
|
||||
/*font-size: x-large;*/
|
||||
font-size: 80%;
|
||||
}
|
||||
|
||||
div.navheader table {
|
||||
border-left: 0em;
|
||||
border-right: 0em;
|
||||
border-top: 0em;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
div.navfooter table {
|
||||
border-left: 0em;
|
||||
border-right: 0em;
|
||||
border-bottom: 0em;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
div.navheader table td a,
|
||||
div.navfooter table td a {
|
||||
color: #777;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
/* normal text in the footer */
|
||||
div.navfooter table td {
|
||||
color: black;
|
||||
}
|
||||
|
||||
div.navheader table td a:visited,
|
||||
div.navfooter table td a:visited {
|
||||
color: #444;
|
||||
}
|
||||
|
||||
|
||||
/* links in header and footer */
|
||||
div.navheader table td a:hover,
|
||||
div.navfooter table td a:hover {
|
||||
text-decoration: underline;
|
||||
background-color: transparent;
|
||||
color: #33a;
|
||||
}
|
||||
|
||||
div.navheader hr,
|
||||
div.navfooter hr {
|
||||
display: none;
|
||||
}
|
||||
|
||||
|
||||
.qandaset tr.question td p {
|
||||
margin: 0em 0em 1em 0em;
|
||||
padding: 0em 0em 0em 0em;
|
||||
}
|
||||
|
||||
.qandaset tr.answer td p {
|
||||
margin: 0em 0em 1em 0em;
|
||||
padding: 0em 0em 0em 0em;
|
||||
}
|
||||
.answer td {
|
||||
padding-bottom: 1.5em;
|
||||
}
|
||||
|
||||
.emphasis {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
|
||||
/************* /
|
||||
/ decorations /
|
||||
/ *************/
|
||||
|
||||
.titlepage {
|
||||
}
|
||||
|
||||
.part .title {
|
||||
}
|
||||
|
||||
.subtitle {
|
||||
border: none;
|
||||
}
|
||||
|
||||
/*
|
||||
h1 {
|
||||
border: none;
|
||||
}
|
||||
|
||||
h2 {
|
||||
border-top: solid 0.2em;
|
||||
border-bottom: solid 0.06em;
|
||||
}
|
||||
|
||||
h3 {
|
||||
border-top: 0em;
|
||||
border-bottom: solid 0.06em;
|
||||
}
|
||||
|
||||
h4 {
|
||||
border: 0em;
|
||||
border-bottom: solid 0.06em;
|
||||
}
|
||||
|
||||
h5 {
|
||||
border: 0em;
|
||||
}
|
||||
*/
|
||||
|
||||
.programlisting {
|
||||
border: solid 1px;
|
||||
}
|
||||
|
||||
div.figure,
|
||||
div.table,
|
||||
div.informalfigure,
|
||||
div.informaltable,
|
||||
div.informalexample,
|
||||
div.example {
|
||||
border: 1px solid;
|
||||
}
|
||||
|
||||
|
||||
|
||||
.tip,
|
||||
.warning,
|
||||
.caution,
|
||||
.note {
|
||||
border: 1px solid;
|
||||
}
|
||||
|
||||
.tip table th,
|
||||
.warning table th,
|
||||
.caution table th,
|
||||
.note table th {
|
||||
border-bottom: 1px solid;
|
||||
}
|
||||
|
||||
.question td {
|
||||
border-top: 1px solid black;
|
||||
}
|
||||
|
||||
.answer {
|
||||
}
|
||||
|
||||
|
||||
b.keycap,
|
||||
.keycap {
|
||||
border: 1px solid;
|
||||
}
|
||||
|
||||
|
||||
div.navheader, div.heading{
|
||||
border-bottom: 1px solid;
|
||||
}
|
||||
|
||||
|
||||
div.navfooter, div.footing{
|
||||
border-top: 1px solid;
|
||||
}
|
||||
|
||||
/********* /
|
||||
/ colors /
|
||||
/ *********/
|
||||
|
||||
body {
|
||||
color: #333;
|
||||
background: white;
|
||||
}
|
||||
|
||||
a {
|
||||
background: transparent;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
background-color: #dedede;
|
||||
}
|
||||
|
||||
|
||||
h1,
|
||||
h2,
|
||||
h3,
|
||||
h4,
|
||||
h5,
|
||||
h6,
|
||||
h7,
|
||||
h8 {
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
hr {
|
||||
border-color: #aaa;
|
||||
}
|
||||
|
||||
|
||||
.tip, .warning, .caution, .note {
|
||||
border-color: #aaa;
|
||||
}
|
||||
|
||||
|
||||
.tip table th,
|
||||
.warning table th,
|
||||
.caution table th,
|
||||
.note table th {
|
||||
border-bottom-color: #aaa;
|
||||
}
|
||||
|
||||
|
||||
.warning {
|
||||
background-color: #fea;
|
||||
}
|
||||
|
||||
.caution {
|
||||
background-color: #fea;
|
||||
}
|
||||
|
||||
.tip {
|
||||
background-color: #eff;
|
||||
}
|
||||
|
||||
.note {
|
||||
background-color: #dfc;
|
||||
}
|
||||
|
||||
.glossary dl dt,
|
||||
.variablelist dl dt,
|
||||
.variablelist dl dt span.term {
|
||||
color: #044;
|
||||
}
|
||||
|
||||
div.figure,
|
||||
div.table,
|
||||
div.example,
|
||||
div.informalfigure,
|
||||
div.informaltable,
|
||||
div.informalexample {
|
||||
border-color: #aaa;
|
||||
}
|
||||
|
||||
pre.programlisting {
|
||||
color: black;
|
||||
background-color: #fff;
|
||||
border-color: #aaa;
|
||||
border-width: 2px;
|
||||
}
|
||||
|
||||
.guimenu,
|
||||
.guilabel,
|
||||
.guimenuitem {
|
||||
background-color: #eee;
|
||||
}
|
||||
|
||||
|
||||
b.keycap,
|
||||
.keycap {
|
||||
background-color: #eee;
|
||||
border-color: #999;
|
||||
}
|
||||
|
||||
|
||||
div.navheader {
|
||||
border-color: black;
|
||||
}
|
||||
|
||||
|
||||
div.navfooter {
|
||||
border-color: black;
|
||||
}
|
||||
|
||||
|
||||
/*********** /
|
||||
/ graphics /
|
||||
/ ***********/
|
||||
|
||||
/*
|
||||
body {
|
||||
background-image: url("images/body_bg.jpg");
|
||||
background-attachment: fixed;
|
||||
}
|
||||
|
||||
.navheader,
|
||||
.note,
|
||||
.tip {
|
||||
background-image: url("images/note_bg.jpg");
|
||||
background-attachment: fixed;
|
||||
}
|
||||
|
||||
.warning,
|
||||
.caution {
|
||||
background-image: url("images/warning_bg.jpg");
|
||||
background-attachment: fixed;
|
||||
}
|
||||
|
||||
.figure,
|
||||
.informalfigure,
|
||||
.example,
|
||||
.informalexample,
|
||||
.table,
|
||||
.informaltable {
|
||||
background-image: url("images/figure_bg.jpg");
|
||||
background-attachment: fixed;
|
||||
}
|
||||
|
||||
*/
|
||||
h1,
|
||||
h2,
|
||||
h3,
|
||||
h4,
|
||||
h5,
|
||||
h6,
|
||||
h7{
|
||||
}
|
||||
|
||||
/*
|
||||
Example of how to stick an image as part of the title.
|
||||
|
||||
div.article .titlepage .title
|
||||
{
|
||||
background-image: url("figures/white-on-black.png");
|
||||
background-position: center;
|
||||
background-repeat: repeat-x;
|
||||
}
|
||||
*/
|
||||
|
||||
div.preface .titlepage .title,
|
||||
div.colophon .title,
|
||||
div.chapter .titlepage .title,
|
||||
div.article .titlepage .title
|
||||
{
|
||||
}
|
||||
|
||||
div.section div.section .titlepage .title,
|
||||
div.sect2 .titlepage .title {
|
||||
background: none;
|
||||
}
|
||||
|
||||
|
||||
h1.title {
|
||||
background-color: transparent;
|
||||
background-image: url("figures/yocto-project-bw.png");
|
||||
background-repeat: no-repeat;
|
||||
height: 256px;
|
||||
text-indent: -9000px;
|
||||
overflow:hidden;
|
||||
}
|
||||
|
||||
h2.subtitle {
|
||||
background-color: transparent;
|
||||
text-indent: -9000px;
|
||||
overflow:hidden;
|
||||
width: 0px;
|
||||
display: none;
|
||||
}
|
||||
|
||||
/*************************************** /
|
||||
/ pippin.gimp.org specific alterations /
|
||||
/ ***************************************/
|
||||
|
||||
/*
|
||||
div.heading, div.navheader {
|
||||
color: #777;
|
||||
font-size: 80%;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
text-align: left;
|
||||
position: absolute;
|
||||
top: 0px;
|
||||
left: 0px;
|
||||
width: 100%;
|
||||
height: 50px;
|
||||
background: url('/gfx/heading_bg.png') transparent;
|
||||
background-repeat: repeat-x;
|
||||
background-attachment: fixed;
|
||||
border: none;
|
||||
}
|
||||
|
||||
div.heading a {
|
||||
color: #444;
|
||||
}
|
||||
|
||||
div.footing, div.navfooter {
|
||||
border: none;
|
||||
color: #ddd;
|
||||
font-size: 80%;
|
||||
text-align:right;
|
||||
|
||||
width: 100%;
|
||||
padding-top: 10px;
|
||||
position: absolute;
|
||||
bottom: 0px;
|
||||
left: 0px;
|
||||
|
||||
background: url('/gfx/footing_bg.png') transparent;
|
||||
}
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/****************** /
|
||||
/ nasty ie tweaks /
|
||||
/ ******************/
|
||||
|
||||
/*
|
||||
div.heading, div.navheader {
|
||||
width:expression(document.body.clientWidth + "px");
|
||||
}
|
||||
|
||||
div.footing, div.navfooter {
|
||||
width:expression(document.body.clientWidth + "px");
|
||||
margin-left:expression("-5em");
|
||||
}
|
||||
body {
|
||||
padding:expression("4em 5em 0em 5em");
|
||||
}
|
||||
*/
|
||||
|
||||
/**************************************** /
|
||||
/ mozilla vendor specific css extensions /
|
||||
/ ****************************************/
|
||||
/*
|
||||
div.navfooter, div.footing{
|
||||
-moz-opacity: 0.8em;
|
||||
}
|
||||
|
||||
div.figure,
|
||||
div.table,
|
||||
div.informalfigure,
|
||||
div.informaltable,
|
||||
div.informalexample,
|
||||
div.example,
|
||||
.tip,
|
||||
.warning,
|
||||
.caution,
|
||||
.note {
|
||||
-moz-border-radius: 0.5em;
|
||||
}
|
||||
|
||||
b.keycap,
|
||||
.keycap {
|
||||
-moz-border-radius: 0.3em;
|
||||
}
|
||||
*/
|
||||
|
||||
table tr td table tr td {
|
||||
display: none;
|
||||
}
|
||||
|
||||
|
||||
hr {
|
||||
display: none;
|
||||
}
|
||||
|
||||
table {
|
||||
border: 0em;
|
||||
}
|
||||
|
||||
.photo {
|
||||
float: right;
|
||||
margin-left: 1.5em;
|
||||
margin-bottom: 1.5em;
|
||||
margin-top: 0em;
|
||||
max-width: 17em;
|
||||
border: 1px solid gray;
|
||||
padding: 3px;
|
||||
background: white;
|
||||
}
|
||||
.seperator {
|
||||
padding-top: 2em;
|
||||
clear: both;
|
||||
}
|
||||
|
||||
#validators {
|
||||
margin-top: 5em;
|
||||
text-align: right;
|
||||
color: #777;
|
||||
}
|
||||
@media print {
|
||||
body {
|
||||
font-size: 8pt;
|
||||
}
|
||||
.noprint {
|
||||
display: none;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
.tip,
|
||||
.note {
|
||||
background: #666666;
|
||||
color: #fff;
|
||||
padding: 20px;
|
||||
margin: 20px;
|
||||
}
|
||||
|
||||
.tip h3,
|
||||
.note h3 {
|
||||
padding: 0em;
|
||||
margin: 0em;
|
||||
font-size: 2em;
|
||||
font-weight: bold;
|
||||
color: #fff;
|
||||
}
|
||||
|
||||
.tip a,
|
||||
.note a {
|
||||
color: #fff;
|
||||
text-decoration: underline;
|
||||
}
|
||||
@@ -46,11 +46,11 @@
|
||||
the baseline kernel is the most stable official release.</para></listitem>
|
||||
<listitem><para>Include major technological features as part of Yocto Project's up-rev
|
||||
strategy.</para></listitem>
|
||||
<listitem><para>Present a Git tree, that just like the upstream kernel.org tree, has a
|
||||
<listitem><para>Present a git tree, that just like the upstream kernel.org tree, has a
|
||||
clear and continuous history.</para></listitem>
|
||||
<listitem><para>Deliver a key set of supported kernel types, where each type is tailored
|
||||
to a specific use case (i.e. networking, consumer, devices, and so forth).</para></listitem>
|
||||
<listitem><para>Employ a Git branching strategy that from a customer's point of view
|
||||
to a specific use case (i.g. networking, consumer, devices, and so forth).</para></listitem>
|
||||
<listitem><para>Employ a git branching strategy that from a customer's point of view
|
||||
results in a linear path from the baseline kernel.org, through a select group of features and
|
||||
ends with their BSP-specific commits.</para></listitem>
|
||||
</itemizedlist>
|
||||
@@ -170,7 +170,7 @@
|
||||
You can think of the Yocto Project kernel as consisting of a baseline kernel with
|
||||
added features logically structured on top of the baseline.
|
||||
The features are tagged and organized by way of a branching strategy implemented by the
|
||||
source code manager (SCM) Git.
|
||||
source code manager (SCM) git.
|
||||
The result is that the user has the ability to see the added features and
|
||||
the commits that make up those features.
|
||||
In addition to being able to see added features, the user can also view the history of what
|
||||
@@ -279,20 +279,15 @@
|
||||
</section>
|
||||
|
||||
<section id='source-code-manager-git'>
|
||||
<title>Source Code Manager - Git</title>
|
||||
<title>Source Code Manager - git</title>
|
||||
<para>
|
||||
The Source Code Manager (SCM) is Git and it is the obvious mechanism for meeting the
|
||||
The Source Code Manager (SCM) is git and it is the obvious mechanism for meeting the
|
||||
previously mentioned goals.
|
||||
Not only is it the SCM for kernel.org but Git continues to grow in popularity and
|
||||
Not only is it the SCM for kernel.org but git continues to grow in popularity and
|
||||
supports many different work flows, front-ends and management techniques.
|
||||
</para>
|
||||
<para>
|
||||
You can find documentation on Git at <ulink url='http://git-scm.com/documentation'></ulink>.
|
||||
Also, the Yocto Project Development manual has an introduction to Git and describes a
|
||||
minimal set of commands that allow you to be functional with Git.
|
||||
</para>
|
||||
<note><para>
|
||||
It should be noted that you can use as much, or as little, of what Git has to offer
|
||||
It should be noted that you can use as much, or as little, of what git has to offer
|
||||
as is appropriate to your project.
|
||||
</para></note>
|
||||
</section>
|
||||
@@ -301,22 +296,21 @@
|
||||
<section id='kernel-tools'>
|
||||
<title>Kernel Tools</title>
|
||||
<para>
|
||||
Since most standard workflows involve moving forward with an existing tree by
|
||||
continuing to add and alter the underlying baseline, the tools that manage
|
||||
the Yocto Project's kernel construction are largely hidden from the developer to
|
||||
present a simplified view of the kernel for ease of use.
|
||||
</para>
|
||||
<para>
|
||||
The fundamental properties of the tools that manage and construct the
|
||||
Yocto Project kernel are:
|
||||
<itemizedlist>
|
||||
<listitem><para>Group patches into named, reusable features.</para></listitem>
|
||||
<listitem><para>Allow top down control of included features.</para></listitem>
|
||||
<listitem><para>Bind kernel configuration to kernel patches and features.</para></listitem>
|
||||
<listitem><para>Present a seamless Git repository that blends Yocto Project value
|
||||
with the kernel.org history and development.</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
Since most standard workflows involve moving forward with an existing tree by
|
||||
continuing to add and alter the underlying baseline, the tools that manage
|
||||
Yocto Project's kernel construction are largely hidden from the developer to
|
||||
present a simplified view of the kernel for ease of use.
|
||||
</para>
|
||||
<para>
|
||||
The fundamental properties of the tools that manage and construct the
|
||||
kernel are:
|
||||
<itemizedlist>
|
||||
<listitem><para>the ability to group patches into named, reusable features</para></listitem>
|
||||
<listitem><para>to allow top down control of included features</para></listitem>
|
||||
<listitem><para>the binding of kernel configuration to kernel patches/features</para></listitem>
|
||||
<listitem><para>the presentation of a seamless git repository that blends Yocto Project value with the kernel.org history and development</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
<!--<para>
|
||||
The tools that construct a kernel tree will be discussed later in this
|
||||
document. The following tools form the foundation of the Yocto Project
|
||||
|
||||
@@ -8,9 +8,9 @@
|
||||
<section id='book-intro'>
|
||||
<title>Introduction</title>
|
||||
<para>
|
||||
The Yocto Project presents the kernel as a fully patched, history-clean Git
|
||||
The Yocto Project presents the kernel as a fully patched, history-clean git
|
||||
repository.
|
||||
The Git tree represents the selected features, board support,
|
||||
The git tree represents the selected features, board support,
|
||||
and configurations extensively tested by Yocto Project.
|
||||
The Yocto Project kernel allows the end user to leverage community
|
||||
best practices to seamlessly manage the development, build and debug cycles.
|
||||
|
||||
@@ -10,8 +10,6 @@
|
||||
<title>Introduction</title>
|
||||
<para>
|
||||
This chapter describes how to accomplish tasks involving the kernel's tree structure.
|
||||
This information is designed to help the developer that wants to modify the Yocto Project kernel
|
||||
and contribute changes upstream to the Yocto Project.
|
||||
The information covers the following:
|
||||
<itemizedlist>
|
||||
<listitem><para>Tree construction</para></listitem>
|
||||
@@ -40,31 +38,19 @@
|
||||
in the product.
|
||||
Those feature descriptions list all necessary patches,
|
||||
configuration, branching, tagging and feature divisions found in the kernel.
|
||||
Thus, the Yocto Project kernel repository (or tree) is built.
|
||||
The existence of this tree allows you to build images based on your configurations
|
||||
and features.
|
||||
</para>
|
||||
<para>
|
||||
You can find the files used to describe all the valid features and BSPs in the Yocto Project
|
||||
kernel in any clone of the kernel Git tree.
|
||||
For example, the following command clones the Yocto Project baseline kernel that
|
||||
branched off of linux.org version 2.6.37:
|
||||
<literallayout class='monospaced'>
|
||||
$ git clone http://git.yoctoproject.org/cgit/cgit.cgi/linux-yocto-2.6.37
|
||||
</literallayout>
|
||||
After you switch to the <filename>meta</filename> branch within the repository
|
||||
you can see a snapshot of all the kernel configuration and feature descriptions that are
|
||||
used to build the kernel repository.
|
||||
These descriptions are in the form of <filename>.scc</filename> files.
|
||||
</para>
|
||||
<para>
|
||||
kernel in any clone of the kernel git tree.
|
||||
The directory <filename>meta/cfg/kernel-cache/</filename> is a snapshot of all the kernel
|
||||
configuration and feature descriptions (.scc) used to build the kernel repository.
|
||||
You should realize, however, that browsing the snapshot of feature
|
||||
descriptions and patches is not an effective way to determine what is in a
|
||||
particular kernel branch.
|
||||
Instead, you should use Git directly to discover the changes
|
||||
Instead, you should use git directly to discover the changes
|
||||
in a branch.
|
||||
Using Git is an efficient and flexible way to inspect changes to the kernel.
|
||||
For examples showing how to use Git to inspect kernel commits, see the following sections
|
||||
Using git is a efficient and flexible way to inspect changes to the kernel.
|
||||
For examples showing how to use git to inspect kernel commits, see the following sections
|
||||
in this chapter.
|
||||
</para>
|
||||
<note><para>
|
||||
@@ -74,56 +60,46 @@
|
||||
and development.
|
||||
</para></note>
|
||||
<para>
|
||||
The following steps describe what happens during tree construction given the introduction
|
||||
of a new top-level kernel feature or BSP.
|
||||
These are the actions that effectively create the tree that includes the new feature, patch,
|
||||
or BSP:
|
||||
<orderedlist>
|
||||
<listitem><para>A top-level kernel feature is passed to the kernel build subsystem.
|
||||
Normally, this is a BSP for a particular kernel type.</para></listitem>
|
||||
The general flow for constructing a project-specific kernel tree is as follows:
|
||||
<orderedlist>
|
||||
<listitem><para>A top-level kernel feature is passed to the kernel build subsystem.
|
||||
Normally, this is a BSP for a particular kernel type.</para></listitem>
|
||||
|
||||
<listitem><para>The file that describes the top-level feature is located by searching
|
||||
these system directories:
|
||||
<listitem><para>The file that describes the top-level feature is located by searching
|
||||
these system directories:</para>
|
||||
|
||||
<itemizedlist>
|
||||
<listitem><para>The in-tree kernel-cache directories, which are located
|
||||
in <filename>meta/cfg/kernel-cache</filename></para></listitem>
|
||||
<itemizedlist>
|
||||
<listitem><para>The in-tree kernel-cache directories</para></listitem>
|
||||
<!-- <listitem><para>kernel-*-cache directories in layers</para></listitem> -->
|
||||
<listitem><para>Areas pointed to by <filename>SRC_URI</filename> statements
|
||||
found in recipes</para></listitem>
|
||||
<listitem><para>Recipe SRC_URIs</para></listitem>
|
||||
<!-- <listitem><para>configured and default templates</para></listitem> -->
|
||||
</itemizedlist>
|
||||
</itemizedlist>
|
||||
|
||||
For a typical build, the target of the search is a
|
||||
feature description in an <filename>.scc</filename> file
|
||||
whose name follows this format:
|
||||
<literallayout class='monospaced'>
|
||||
<bsp_name>-<kernel_type>.scc
|
||||
</literallayout>
|
||||
</para></listitem>
|
||||
<para>For a typical build a feature description of the format:
|
||||
<bsp name>-<kernel type>.scc is the target of the search.
|
||||
</para></listitem>
|
||||
|
||||
<listitem><para>Once located, the feature description is either compiled into a simple script
|
||||
of actions, or into an existing equivalent script that is already part of the
|
||||
shipped kernel.</para></listitem>
|
||||
<listitem><para>Once located, the feature description is either compiled into a simple script
|
||||
of actions, or an existing equivalent script that was part of the
|
||||
shipped kernel is located.</para></listitem>
|
||||
|
||||
<listitem><para>Extra features are appended to the top-level feature description.
|
||||
These features can come from the <filename>KERNEL_FEATURES</filename> variable in
|
||||
recipes.</para></listitem>
|
||||
<listitem><para>Extra features are appended to the top-level feature description.
|
||||
These features can come from the KERNEL_FEATURES variable in recipes.</para></listitem>
|
||||
|
||||
<listitem><para>Each extra feature is located, compiled and appended to the script
|
||||
as described in step three.</para></listitem>
|
||||
<listitem><para>Each extra feature is located, compiled and appended to the script from
|
||||
step #3</para></listitem>
|
||||
|
||||
<listitem><para>The script is executed to produce a meta-series.
|
||||
The meta-series is a description of all the branches, tags, patches and configurations that
|
||||
need to be applied to the base Git repository to completely create the
|
||||
source (build) branch for the new BSP or feature.</para></listitem>
|
||||
<listitem><para>The script is executed, and a meta-series is produced.
|
||||
The meta-series is a description of all the branches, tags, patches and configuration that
|
||||
needs to be applied to the base git repository to completely create the
|
||||
BSP source (build) branch.</para></listitem>
|
||||
|
||||
<listitem><para>The base repository is cloned, and the actions
|
||||
listed in the meta-series are applied to the tree.</para></listitem>
|
||||
<listitem><para>The base repository is cloned, and the actions
|
||||
listed in the meta-series are applied to the tree.</para></listitem>
|
||||
|
||||
<listitem><para>The Git repository is left with the desired branch checked out and any
|
||||
required branching, patching and tagging has been performed.</para></listitem>
|
||||
</orderedlist>
|
||||
<listitem><para>The git repository is left with the desired branch checked out and any
|
||||
required branching, patching and tagging has been performed.</para></listitem>
|
||||
</orderedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -137,7 +113,7 @@
|
||||
official Yocto Project kernel repositories is the combination of all
|
||||
supported boards and configurations.</para>
|
||||
|
||||
<para>This technique is flexible and allows for seamless blending of an immutable
|
||||
<para>This technique is flexible and allows the seamless blending of an immutable
|
||||
history with additional deployment specific patches.
|
||||
Any additions to the kernel become an integrated part of the branches.
|
||||
</para></note>
|
||||
@@ -161,7 +137,7 @@ A summary of end user tree construction activities follow:
|
||||
<itemizedlist>
|
||||
<listitem><para>compile and link a full top-down kernel description from feature descriptions</para></listitem>
|
||||
<listitem><para>execute the complete description to generate a meta-series</para></listitem>
|
||||
<listitem><para>interpret the meta-series to create a customized Git repository for the
|
||||
<listitem><para>interpret the meta-series to create a customized git repository for the
|
||||
board</para></listitem>
|
||||
<listitem><para>migrate configuration fragments and configure the kernel</para></listitem>
|
||||
<listitem><para>checkout the BSP branch and build</para></listitem>
|
||||
@@ -177,7 +153,7 @@ A summary of end user tree construction activities follow:
|
||||
</para>
|
||||
|
||||
<itemizedlist>
|
||||
<listitem><para>There must be a kernel Git repository indicated in the SRC_URI.</para></listitem>
|
||||
<listitem><para>There must be a kernel git repository indicated in the SRC_URI.</para></listitem>
|
||||
<listitem><para>There must be a BSP build branch - <bsp name>-<kernel type> in 0.9 or
|
||||
<kernel type>/<bsp name> in 1.0.</para></listitem>
|
||||
</itemizedlist>
|
||||
@@ -192,14 +168,12 @@ A summary of end user tree construction activities follow:
|
||||
|
||||
<para>
|
||||
Before building a kernel it is configured by processing all of the
|
||||
configuration "fragments" specified by feature descriptions in the <filename>scc</filename>
|
||||
files.
|
||||
configuration "fragments" specified by the scc feature descriptions.
|
||||
As the features are compiled, associated kernel configuration fragments are noted
|
||||
and recorded in the meta-series in their compilation order.
|
||||
The fragments are migrated, pre-processed and passed to the Linux Kernel
|
||||
Configuration subsystem (<filename>lkc</filename>) as raw input in the form
|
||||
of a <filename>.config</filename> file.
|
||||
The <filename>lkc</filename> uses its own internal dependency constraints to do the final
|
||||
Configuration subsystem (lkc) as raw input in the form of a <filename>.config</filename> file.
|
||||
The lkc uses its own internal dependency constraints to do the final
|
||||
processing of that information and generates the final <filename>.config</filename> file
|
||||
that is used during compilation.
|
||||
</para>
|
||||
@@ -210,7 +184,7 @@ A summary of end user tree construction activities follow:
|
||||
</para>
|
||||
|
||||
<para>The other thing that you will first see once you configure a kernel is that
|
||||
it will generate a build tree that is separate from your Git source tree.
|
||||
it will generate a build tree that is separate from your git source tree.
|
||||
This build tree has the name using the following form:
|
||||
<literallayout class='monospaced'>
|
||||
linux-<BSPname>-<kerntype>-build
|
||||
@@ -227,7 +201,7 @@ A summary of end user tree construction activities follow:
|
||||
The files include the final <filename>.config</filename>, all the <filename>.o</filename>
|
||||
files, the <filename>.a</filename> files, and so forth.
|
||||
Since each BSP has its own separate build directory in its own separate branch
|
||||
of the Git tree you can easily switch between different BSP builds.
|
||||
of the git tree you can easily switch between different BSP builds.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
@@ -246,7 +220,7 @@ to be used or not. The 2.0 release already made use of some stateful
|
||||
construction of series files, but since the delivery mechanism was unchanged
|
||||
(tar + patches + series files), most people were not aware of anything really
|
||||
different. The 3.0 release continues with this stateful construction of
|
||||
series files, but since the delivery mechanism is changed (Git + branches) it
|
||||
series files, but since the delivery mechanism is changed (git + branches) it
|
||||
now is more apparent to people.
|
||||
</para>
|
||||
<para>
|
||||
@@ -255,7 +229,7 @@ compiler". Its role is to combine feature descriptions into a format that can
|
||||
be used to generate a meta-series. A meta series contains all the required
|
||||
information to construct a complete set of branches that are required to
|
||||
build a desired board and feature set. The meta series is interpreted by the
|
||||
kgit tools to create a Git repository that could be built.
|
||||
kgit tools to create a git repository that could be built.
|
||||
</para>
|
||||
<para>
|
||||
To illustrate how scc works, a feature description must first be understood.
|
||||
@@ -272,7 +246,7 @@ Each feature description can use any of the following valid scc commands:
|
||||
<listitem><para>shell constructs: bash conditionals and other utilities can be used in a feature
|
||||
description. During compilation, the working directory is the feature
|
||||
description itself, so any command that is "raw shell" and not from the
|
||||
list of supported commands, can not directly modify a Git repository.</para></listitem>
|
||||
list of supported commands, can not directly modify a git repository.</para></listitem>
|
||||
|
||||
<listitem><para>patch <relative path>/<patch name>: outputs a patch to be included in a feature's patch set. Only the name of
|
||||
the patch is supplied, the path is calculated from the currently set
|
||||
@@ -323,9 +297,9 @@ Each feature description can use any of the following valid scc commands:
|
||||
include is processed, so is normally only used by a new top level feature
|
||||
to modify the order of features in something it is including.</para></listitem>
|
||||
|
||||
<listitem><para>git <command>: Issues any Git command during tree construction. Note: this command is
|
||||
<listitem><para>git <command>: Issues any git command during tree construction. Note: this command is
|
||||
not validated/sanitized so care must be taken to not damage the
|
||||
tree. This can be used to script branching, tagging, pulls or other Git
|
||||
tree. This can be used to script branching, tagging, pulls or other git
|
||||
operations.</para></listitem>
|
||||
|
||||
<listitem><para>dir <directory>: changes the working directory for "patch" directives. This can be used to
|
||||
@@ -375,17 +349,17 @@ kgit-meta is the actual application of feature description(s) to a kernel repo.
|
||||
In other words, it is responsible for interpreting the meta series generated
|
||||
from a scc compiled script. As a result, kgit-meta is coupled to the set of
|
||||
commands permitted in a .scc feature description (listed in the scc section).
|
||||
kgit-meta understands both the meta series format and how to use Git and
|
||||
guilt to modify a base Git repository. It processes a meta-series line by
|
||||
kgit-meta understands both the meta series format and how to use git and
|
||||
guilt to modify a base git repository. It processes a meta-series line by
|
||||
line, branching, tagging, patching and tracking changes that are made to the
|
||||
base Git repository.
|
||||
base git repository.
|
||||
</para>
|
||||
<para>
|
||||
Once kgit-meta has processed a meta-series, it leaves the repository with the
|
||||
last branch checked out, and creates the necessary guilt infrastructure to
|
||||
inspect the tree, or add to it via using guilt. As was previously mentioned,
|
||||
guilt is not required, but is provided as a convenience. Other utilities such
|
||||
as quilt, stgit, Git or others can also be used to manipulate the Git
|
||||
as quilt, stgit, git or others can also be used to manipulate the git
|
||||
repository.
|
||||
</para>
|
||||
</section> -->
|
||||
@@ -394,12 +368,12 @@ repository.
|
||||
<title>Workflow Examples</title>
|
||||
|
||||
<para>
|
||||
As previously noted, the Yocto Project kernel has built in Git integration.
|
||||
As previously noted, the Yocto Project kernel has built in git integration.
|
||||
However, these utilities are not the only way to work with the kernel repository.
|
||||
Yocto Project has not made changes to Git or to other tools that
|
||||
Yocto Project has not made changes to git or to other tools that
|
||||
would invalidate alternate workflows.
|
||||
Additionally, the way the kernel repository is constructed results in using
|
||||
only core Git functionality thus allowing any number of tools or front ends to use the
|
||||
only core git functionality thus allowing any number of tools or front ends to use the
|
||||
resulting tree.
|
||||
</para>
|
||||
|
||||
@@ -428,7 +402,7 @@ repository.
|
||||
|
||||
<para>
|
||||
A more efficient way to determine what has changed in the kernel is to use
|
||||
Git and inspect or search the kernel tree.
|
||||
git and inspect or search the kernel tree.
|
||||
This method gives you a full view of not only the source code modifications,
|
||||
but also provides the reasons for the changes.
|
||||
</para>
|
||||
@@ -437,8 +411,8 @@ repository.
|
||||
<title>What Changed in a BSP?</title>
|
||||
|
||||
<para>
|
||||
Following are a few examples that show how to use Git to examine changes.
|
||||
Note that because the Yocto Project Git repository does not break existing Git
|
||||
Following are a few examples that show how to use git to examine changes.
|
||||
Note that because the Yocto Project git repository does not break existing git
|
||||
functionality and because there exists many permutations of these types of
|
||||
commands there are many more methods to discover changes.
|
||||
</para>
|
||||
@@ -501,7 +475,7 @@ repository.
|
||||
<para>
|
||||
You can use many other comparisons to isolate BSP changes.
|
||||
For example, you can compare against kernel.org tags (e.g. v2.6.27.18, etc), or
|
||||
you can compare against subsystems (e.g. <filename>git whatchanged mm</filename>).
|
||||
you can compare against subsystems (e.g. git whatchanged mm).
|
||||
</para>
|
||||
</section>
|
||||
</section>
|
||||
@@ -516,9 +490,9 @@ repository.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Since the Yocto Project kernel source tree is backed by Git, this activity is
|
||||
Since the Yocto Project kernel source tree is backed by git, this activity is
|
||||
much easier as compared to with previous releases.
|
||||
Because Git tracks file modifications, additions and deletions, it is easy
|
||||
Because git tracks file modifications, additions and deletions, it is easy
|
||||
to modify the code and later realize that the changes should be saved.
|
||||
It is also easy to determine what has changed.
|
||||
This method also provides many tools to commit, undo and export those modifications.
|
||||
@@ -531,7 +505,7 @@ repository.
|
||||
|
||||
<itemizedlist>
|
||||
<listitem><para>Bulk storage</para></listitem>
|
||||
<listitem><para>Internal sharing either through patches or by using Git</para></listitem>
|
||||
<listitem><para>Internal sharing either through patches or by using git</para></listitem>
|
||||
<listitem><para>External submissions</para></listitem>
|
||||
<listitem><para>Exporting for integration into another SCM</para></listitem>
|
||||
</itemizedlist>
|
||||
@@ -579,7 +553,7 @@ repository.
|
||||
|
||||
<para>
|
||||
The previous operations capture all the local changes in the project source
|
||||
tree in a single Git commit.
|
||||
tree in a single git commit.
|
||||
And, that commit is also stored in the project's source tree.
|
||||
</para>
|
||||
|
||||
@@ -599,12 +573,12 @@ repository.
|
||||
The examples in this section assume that changes have been incrementally committed
|
||||
to the tree during development and now need to be exported. The sections that follow
|
||||
describe how you can export your changes internally through either patches or by
|
||||
using Git commands.
|
||||
using git commands.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
During development the following commands are of interest.
|
||||
For full Git documentation, refer to the Git man pages or to an online resource such
|
||||
For full git documentation, refer to the git man pages or to an online resource such
|
||||
as <ulink url='http://github.com'></ulink>.
|
||||
|
||||
<literallayout class='monospaced'>
|
||||
@@ -643,15 +617,15 @@ repository.
|
||||
associated with development by using the following commands:
|
||||
|
||||
<literallayout class='monospaced'>
|
||||
> Git add >path</file
|
||||
> Git commit --amend
|
||||
> Git rebase or Git rebase -i
|
||||
> git add >path</file
|
||||
> git commit --amend
|
||||
> git rebase or git rebase -i
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Again, assuming that the changes have not been pushed upstream, and that
|
||||
no pending works-in-progress exist (use <filename>git status</filename> to check) then
|
||||
no pending works-in-progress exist (use "git status" to check) then
|
||||
you can revert (undo) commits by using the following commands:
|
||||
|
||||
<literallayout class='monospaced'>
|
||||
@@ -666,13 +640,13 @@ repository.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You can create branches, "cherry-pick" changes or perform any number of Git
|
||||
You can create branches, "cherry-pick" changes or perform any number of git
|
||||
operations until the commits are in good order for pushing upstream
|
||||
or for pull requests.
|
||||
After a push or pull, commits are normally considered
|
||||
"permanent" and you should not modify them.
|
||||
If they need to be changed you can incrementally do so with new commits.
|
||||
These practices follow the standard Git workflow and the kernel.org best
|
||||
These practices follow the standard "git" workflow and the kernel.org best
|
||||
practices, which Yocto Project recommends.
|
||||
</para>
|
||||
|
||||
@@ -741,7 +715,7 @@ repository.
|
||||
</section>
|
||||
|
||||
<section id='export-internally-via-git'>
|
||||
<title>Exporting Changes Internally by Using Git</title>
|
||||
<title>Exporting Changes Internally by Using git</title>
|
||||
|
||||
<para>
|
||||
This section describes how you can export changes from a working directory
|
||||
@@ -753,8 +727,7 @@ repository.
|
||||
<para>
|
||||
Use this command form to push the changes:
|
||||
<literallayout class='monospaced'>
|
||||
> git push ssh://<master_server>/<path_to_repo>
|
||||
<local_branch>:<remote_branch>
|
||||
git push ssh://<master server>/<path to repo> <local branch>:<remote branch>
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
@@ -763,26 +736,25 @@ repository.
|
||||
<filename>yocto/standard/common-pc/base</filename> to the remote branch with the same name
|
||||
in the master repository <filename>//git.mycompany.com/pub/git/kernel-2.6.37</filename>.
|
||||
<literallayout class='monospaced'>
|
||||
> git push ssh://git.mycompany.com/pub/git/kernel-2.6.37 \
|
||||
yocto/standard/common-pc/base:yocto/standard/common-pc/base
|
||||
> push ssh://git.mycompany.com/pub/git/kernel-2.6.37 yocto/standard/common-pc/base:yocto/standard/common-pc/base
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
A pull request entails using <filename>git request-pull</filename> to compose an email to the
|
||||
A pull request entails using "git request-pull" to compose an email to the
|
||||
maintainer requesting that a branch be pulled into the master repository, see
|
||||
<ulink url='http://github.com/guides/pull-requests'></ulink> for an example.
|
||||
</para>
|
||||
|
||||
<note><para>
|
||||
Other commands such as <filename>git stash</filename> or branching can also be used to save
|
||||
Other commands such as 'git stash' or branching can also be used to save
|
||||
changes, but are not covered in this document.
|
||||
</para></note>
|
||||
|
||||
<!--<para>
|
||||
See the section "importing from another SCM" for how a Git push to the
|
||||
See the section "importing from another SCM" for how a git push to the
|
||||
default_kernel, can be used to automatically update the builds of all users
|
||||
of a central Git repository.
|
||||
of a central git repository.
|
||||
</para>-->
|
||||
</section>
|
||||
</section>
|
||||
@@ -813,7 +785,7 @@ repository.
|
||||
The messages used to commit changes are a large part of these standards.
|
||||
Consequently, be sure that the headers for each commit have the required information.
|
||||
If the initial commits were not properly documented or do not meet those standards,
|
||||
you can re-base by using the <filename>git rebase -i</filename> command to manipulate the commits and
|
||||
you can re-base by using the "git rebase -i" command to manipulate the commits and
|
||||
get them into the required format.
|
||||
Other techniques such as branching and cherry-picking commits are also viable options.
|
||||
</para>
|
||||
@@ -821,7 +793,7 @@ repository.
|
||||
<para>
|
||||
Once you complete the commits, you can generate the email that sends the patches
|
||||
to the maintainer(s) or lists that review and integrate changes.
|
||||
The command <filename>git send-email</filename> is commonly used to ensure that patches are properly
|
||||
The command "git send-email" is commonly used to ensure that patches are properly
|
||||
formatted for easy application and avoid mailer-induced patch damage.
|
||||
</para>
|
||||
|
||||
@@ -853,7 +825,7 @@ repository.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Many SCMs can directly import Git commits, or can translate Git patches so that
|
||||
Many SCMs can directly import git commits, or can translate git patches so that
|
||||
information is not lost.
|
||||
Those facilities are SCM-dependent and you should use them whenever possible.
|
||||
</para>
|
||||
@@ -882,7 +854,7 @@ repository.
|
||||
|
||||
<para>
|
||||
Depending on the SCM it might be possible to export the entire Yocto Project
|
||||
kernel Git repository, branches and all, into a new environment.
|
||||
kernel git repository, branches and all, into a new environment.
|
||||
This method is preferred because it has the most flexibility and potential to maintain
|
||||
the meta data associated with each commit.
|
||||
</para>
|
||||
@@ -928,14 +900,14 @@ repository.
|
||||
automatically apply them to the kernel during patching.
|
||||
</para>
|
||||
<!--<para>
|
||||
If changes are imported directly into Git, they must be propagated to the
|
||||
If changes are imported directly into git, they must be propagated to the
|
||||
wrll-linux-2.6.27/git/default_kernel bare clone of each individual build
|
||||
to be present when the kernel is checked out.
|
||||
</para>
|
||||
<para>
|
||||
The following example illustrates one variant of this workflow:
|
||||
<literallayout class='monospaced'>
|
||||
# on master Git repository
|
||||
# on master git repository
|
||||
> cd linux-2.6.27
|
||||
> git tag -d common_pc-standard-mark
|
||||
> git pull ssh://<foo>@<bar>/pub/git/kernel-2.6.27 common_pc-standard:common_pc-standard
|
||||
@@ -956,7 +928,7 @@ The following example illustrates one variant of this workflow:
|
||||
<!-- <section id='bsp-template-migration-from-2'>
|
||||
<title>BSP: Template Migration from 2.0</title>
|
||||
<para>
|
||||
The move to a Git-backed kernel build system in 3.0 introduced a small new
|
||||
The move to a git-backed kernel build system in 3.0 introduced a small new
|
||||
requirement for any BSP that is not integrated into the GA release of the
|
||||
product: branching information.
|
||||
</para>
|
||||
@@ -1034,60 +1006,204 @@ That's it. Configure and build.
|
||||
<title>Creating a BSP Based on an Existing Similar BSP</title>
|
||||
|
||||
<para>
|
||||
This section overviews the process of creating a BSP based on an
|
||||
existing similar BSP.
|
||||
The information is introductory in nature and does not provide step-by-step examples.
|
||||
For detailed information on how to create a BSP given an existing similar BSP
|
||||
see the Yocto Project Development Manual [NEED LINK] or the
|
||||
<ulink url='https://wiki.yoctoproject.org/wiki/Transcript:_creating_one_generic_Atom_BSP_from_another'></ulink>
|
||||
wiki page.
|
||||
</para>
|
||||
This section provides an example for creating a BSP
|
||||
that is based on an existing, and hopefully, similar
|
||||
one. It assumes you will be using a local kernel
|
||||
repository and will be pointing the kernel recipe at
|
||||
that. Follow these steps and keep in mind your
|
||||
particular situation and differences:
|
||||
|
||||
<para>
|
||||
The basic steps you need to follow are:
|
||||
<orderedlist>
|
||||
<listitem><para>Make sure you have the Yocto Project source tree available.
|
||||
You should either create a Yocto Project Git repository (recommended), or
|
||||
you should get the Yocto Project release tarball and extract it.</para></listitem>
|
||||
<listitem><para>Choose an existing BSP available with the Yocto Project.
|
||||
Try to map your board features as closely to the features of a BSP that is
|
||||
already supported and exists in the Yocto Project.
|
||||
Starting with something as close as possible to your board makes developing
|
||||
your BSP easier.
|
||||
You can find all the BSPs that are supported and ship with the Yocto Project
|
||||
on the Yocto Project's Download page at
|
||||
<ulink url='http://www.yoctoproject.org/download'></ulink>.</para></listitem>
|
||||
<listitem><para>Be sure you have the Base BSP.
|
||||
You need to either have the Yocto Project Git repository set up or download
|
||||
the tarball of the base BSP.
|
||||
Either method gives you access to the BSP source files.</para></listitem>
|
||||
<listitem><para>Make a copy of the existing BSP, thus isolating your new BSP work.
|
||||
Copying the existing BSP structure gives you a new area in which to work.</para></listitem>
|
||||
<listitem><para>Make configuration and recipe changes to your new BSP.
|
||||
Configuration changes involve the files in the BSP's <filename>conf</filename>
|
||||
directory.
|
||||
Changes include creating a machine-specific configuration file and editing the
|
||||
<filename>layer.conf</filename> file.
|
||||
The configuration changes identify the kernel you will be using.
|
||||
Recipe changes include removing, modifying, or adding new recipe files that
|
||||
instruct the build process on what features to include in the image.</para></listitem>
|
||||
<listitem><para>Prepare for the build.
|
||||
Before you actually initiate the build you need to set up the build environment
|
||||
by sourcing the environment initialization script.
|
||||
After setting up the environment you need to make some build configuration
|
||||
changes to the <filename>local.conf</filename> and <filename>bblayers.conf</filename>
|
||||
files.</para></listitem>
|
||||
<listitem><para>Build the image.
|
||||
The Yocto Project uses the BitBake tool to create the image.
|
||||
You need to decide on the type of image you are going to build (e.g. minimal, base,
|
||||
core, sato, and so forth) and then start the build using the <filename>bitbake</filename>
|
||||
command.</para></listitem>
|
||||
</orderedlist>
|
||||
<orderedlist>
|
||||
<listitem><para>
|
||||
Identify a machine configuration file that matches your machine.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You can start with something in <filename>meta/conf/machine</filename> - <filename>
|
||||
meta/conf/machine/atom-pc.conf</filename> for example. Or, you can start with a machine
|
||||
configuration from any of the BSP layers in the meta-intel repository at
|
||||
<ulink url='http://git.yoctoproject.org/cgit/cgit.cgi/meta-intel/'></ulink>, such as
|
||||
<filename>meta-intel/meta-emenlow/conf/machine/emenlow.conf</filename>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The main difference between the two is that "emenlow" is in its own layer.
|
||||
It is in its own layer because it needs extra machine-specific packages such as its
|
||||
own video driver and other supporting packages.
|
||||
The "atom-pc" is simpler and does not need any special packages - everything it needs can
|
||||
be specified in the configuration file.
|
||||
The "atom-pc" machine also supports all of Asus eee901, Acer Aspire One, Toshiba NB305,
|
||||
and the Intel® Embedded Development Board 1-N450 with no changes.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If you want to make minor changes to support a slightly different machine, you can
|
||||
create a new configuration file for it and add it alongside the others.
|
||||
You might consider keeping the common information separate and including it.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Similarly, you can also use multiple configuration files for different machines even
|
||||
if you do it as a separate layer like meta-emenlow.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
As an example consider this:
|
||||
<itemizedlist>
|
||||
<listitem><para>Copy meta-emenlow to meta-mymachine</para></listitem>
|
||||
<listitem><para>Fix or remove anything you do not need.
|
||||
For this example the only thing left was the kernel directory with a
|
||||
<filename>linux-yocto_git.bbappend</filename>
|
||||
file
|
||||
and <filename>meta-mymachine/conf/machine/mymachine.conf</filename>
|
||||
(linux-yocto is the kernel listed in
|
||||
<filename>meta-emenlow/conf/machine/emenlow.conf</filename>)</para></listitem>.
|
||||
<listitem><para>Add a new entry in the <filename>build/conf/bblayers.conf</filename>
|
||||
so the new layer can be found by BitBake.</para></listitem>
|
||||
</itemizedlist>
|
||||
</para></listitem>
|
||||
|
||||
<listitem><para>
|
||||
Create a machine branch for your machine.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For the kernel to compile successfully, you need to create a branch in the git repository
|
||||
specifically named for your machine.
|
||||
To create this branch first create a bare clone of the Yocto Project git repository.
|
||||
Next, create a local clone of that:
|
||||
<literallayout class='monospaced'>
|
||||
$ git clone --bare git://git.yoctoproject.org/linux-yocto-2.6.37.git
|
||||
linux-yocto-2.6.37.git
|
||||
$ git clone linux-yocto-2.6.37.git linux-yocto-2.6.37
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Now create a branch in the local clone and push it to the bare clone:
|
||||
<literallayout class='monospaced'>
|
||||
$ git checkout -b yocto/standard/mymachine origin/yocto/standard/base
|
||||
$ git push origin yocto/standard/mymachine:yocto/standard/mymachine
|
||||
</literallayout>
|
||||
</para></listitem>
|
||||
|
||||
<listitem><para>
|
||||
In a layer, create a <filename>linux-yocto_git.bbappend</filename>
|
||||
file with the following:
|
||||
</para>
|
||||
|
||||
<para>
|
||||
<literallayout class='monospaced'>
|
||||
FILESEXTRAPATHS := "${THISDIR}/${PN}"
|
||||
COMPATIBLE_MACHINE_mymachine = "mymachine"
|
||||
|
||||
# It is often nice to have a local clone of the kernel repository, to
|
||||
# allow patches to be staged, branches created, and so forth. Modify
|
||||
# KSRC to point to your local clone as appropriate.
|
||||
|
||||
KSRC ?= /path/to/your/bare/clone/for/example/linux-yocto-2.6.37.git
|
||||
|
||||
# KMACHINE is the branch to be built, or alternatively
|
||||
# KBRANCH can be directly set.
|
||||
# KBRANCH is set to KMACHINE in the main linux-yocto_git.bb
|
||||
# KBRANCH ?= "${LINUX_KERNEL_TYPE}/${KMACHINE}"
|
||||
|
||||
KMACHINE_mymachine = "yocto/standard/mymachine"
|
||||
|
||||
SRC_URI = "git://${KSRC};nocheckout=1;branch=${KBRANCH},meta;name=machine,meta"
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
After doing that, select the machine in <filename>build/conf/local.conf</filename>:
|
||||
<literallayout class='monospaced'>
|
||||
#
|
||||
MACHINE ?= "mymachine"
|
||||
#
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You should now be able to build and boot an image with the new kernel:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake poky-image-sato-live
|
||||
</literallayout>
|
||||
</para></listitem>
|
||||
|
||||
<listitem><para>
|
||||
Modify the kernel configuration for your machine.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Of course, that will give you a kernel with the default configuration file, which is probably
|
||||
not what you want.
|
||||
If you just want to set some kernel configuration options, you can do that by
|
||||
putting them in a file.
|
||||
For example, inserting the following into some <filename>.cfg</filename> file:
|
||||
<literallayout class='monospaced'>
|
||||
CONFIG_NETDEV_1000=y
|
||||
CONFIG_E1000E=y
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
And, another <filename>.cfg</filename> file would contain:
|
||||
<literallayout class='monospaced'>
|
||||
CONFIG_LOG_BUF_SHIFT=18
|
||||
</literallayout>
|
||||
|
||||
<para>
|
||||
These config fragments could then be picked up and
|
||||
applied to the kernel .config by appending them to the kernel SRC_URI:
|
||||
</para>
|
||||
|
||||
<literallayout class='monospaced'>
|
||||
SRC_URI_append_mymachine = " file://some.cfg \
|
||||
file://other.cfg \
|
||||
"
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You could also add these directly to the git repository <filename>meta</filename>
|
||||
branch as well.
|
||||
However, the former method is a simple starting point.
|
||||
</para></listitem>
|
||||
|
||||
<listitem><para>
|
||||
If you're also adding patches to the kernel, you can do the same thing.
|
||||
Put your patches in the SRC_URI as well (plus <filename>.cfg</filename> for their kernel
|
||||
configuration options if needed).
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Practically speaking, to generate the patches, you'd go to the source in the build tree:
|
||||
<literallayout class='monospaced'>
|
||||
build/tmp/work/mymachine-poky-linux/linux-yocto-2.6.37+git0+d1cd5c80ee97e81e130be8c3de3965b770f320d6_0+
|
||||
0431115c9d720fee5bb105f6a7411efb4f851d26-r13/linux
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Then, modify the code there, using quilt to save the changes, and recompile until
|
||||
it works:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake -c compile -f linux-yocto
|
||||
</literallayout>
|
||||
</para></listitem>
|
||||
|
||||
<listitem><para>
|
||||
Once you have the final patch from quilt, copy it to the
|
||||
SRC_URI location.
|
||||
The patch is applied the next time you do a clean build.
|
||||
Of course, since you have a branch for the BSP in git, it would be better to put it there instead.
|
||||
For example, in this case, commit the patch to the "yocto/standard/mymachine" branch, and during the
|
||||
next build it is applied from there.
|
||||
</para></listitem>
|
||||
</orderedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
|
||||
<!--
|
||||
<section id='bsp-creating-bsp-without-a-local-kernel-repo'>
|
||||
<title>Creating a BSP Based on an Existing Similar BSP Without a Local Kernel Repository</title>
|
||||
|
||||
@@ -1120,8 +1236,7 @@ That's it. Configure and build.
|
||||
</section>
|
||||
|
||||
|
||||
|
||||
<section id='bsp-creating-a-new-bsp'>
|
||||
<!-- <section id='bsp-creating-a-new-bsp'>
|
||||
<title>BSP: Creating a New BSP</title>
|
||||
<para>
|
||||
Although it is obvious that the structure of a new BSP uses the migrated
|
||||
@@ -1314,7 +1429,7 @@ In this technique the .scc file in the board template is slightly different
|
||||
<para>
|
||||
The previous examples created the board templates and configured a build
|
||||
before beginning work on a new BSP. It is also possible for advanced users to
|
||||
simply treat the Yocto Project Git repository as an upstream source and begin
|
||||
simply treat the Yocto Project git repository as an upstream source and begin
|
||||
BSP development directly on the repository. This is the closest match to how
|
||||
the kernel community at large would operate.
|
||||
</para>
|
||||
@@ -1564,7 +1679,7 @@ Or you can do this:
|
||||
</para>
|
||||
<para>
|
||||
For details on conflict resolution and patch application, see the
|
||||
Git manual, or other suitable online references.
|
||||
git manual, or other suitable online references.
|
||||
<literallayout class='monospaced'>
|
||||
> git am <mbox>
|
||||
# conflict
|
||||
@@ -1692,8 +1807,8 @@ Other guilt operations of interest are:
|
||||
</literallayout>
|
||||
</para>
|
||||
<note><para>
|
||||
Guilt only uses Git commands and Git plumbing to perform its operations,
|
||||
anything that guilt does can also be done using Git directly. It is provided
|
||||
Guilt only uses git commands and git plumbing to perform its operations,
|
||||
anything that guilt does can also be done using git directly. It is provided
|
||||
as a convenience utility, but is not required and the developer can use whatever
|
||||
tools or workflow they wish.
|
||||
</para></note>
|
||||
@@ -1702,7 +1817,7 @@ The following builds from the above instructions to show how guilt can be
|
||||
used to assist in getting your BSP kernel patches ready. You should follow
|
||||
the above instructions up to and including 'make linux.config'. In this
|
||||
example I will create a new commit (patch) from scratch and import another
|
||||
fictitious patch from some external public Git tree (ie, a commit with full
|
||||
fictitious patch from some external public git tree (ie, a commit with full
|
||||
message, signoff etc.). Please ensure you have host-cross/bin in your path.
|
||||
<literallayout class='monospaced'>
|
||||
%> cd linux
|
||||
@@ -1720,7 +1835,7 @@ message, signoff etc.). Please ensure you have host-cross/bin in your path.
|
||||
Here are a few notes about the above:
|
||||
<itemizedlist>
|
||||
<listitem><para>guilt-header -e ‐‐ this will open editing of the patch header in
|
||||
EDITOR. As with a Git commit the first line is the short log and
|
||||
EDITOR. As with a git commit the first line is the short log and
|
||||
should be just that short and concise message about the commit. Follow
|
||||
the short log with lines of text that will be the long description but
|
||||
note Do not put a blank line after the short log. As usual you will
|
||||
@@ -1734,7 +1849,7 @@ Here are a few notes about the above:
|
||||
review comment in the first patch (first_one.patch in the case of this
|
||||
example) it is very easy to use guilt to pop the other patches off
|
||||
allowing you to make the necessary changes without having to use more
|
||||
inventive Git type strategies.</para></listitem>
|
||||
inventive git type strategies.</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
@@ -1839,7 +1954,7 @@ This section shows an example of transforms:
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You can use the Git command above to report modified, removed, or added files.
|
||||
You can use the git command above to report modified, removed, or added files.
|
||||
You should commit those changes to the tree regardless of whether they will be saved,
|
||||
exported, or used.
|
||||
Once you commit the changes you need to rebuild the kernel.
|
||||
@@ -1866,7 +1981,7 @@ This section shows an example of transforms:
|
||||
|
||||
<orderedlist>
|
||||
<listitem><para>Create a custom kernel layer.</para></listitem>
|
||||
<listitem><para>Create a Git repository of the transition kernel.</para></listitem>
|
||||
<listitem><para>Create a git repository of the transition kernel.</para></listitem>
|
||||
</orderedlist>
|
||||
</para>
|
||||
|
||||
@@ -1908,12 +2023,12 @@ patches. If a custom BSP is being used, this is not required.
|
||||
</section> -->
|
||||
|
||||
<!-- <section id='git-repo-of-the-transition-kernel'>
|
||||
<title>Git Repo of the Transition Kernel</title>
|
||||
<title>git Repo of the Transition Kernel</title>
|
||||
<para>
|
||||
The kernel build system requires a base kernel repository to
|
||||
seed the build process. This repository must be found in the
|
||||
same layer as the build infrastructure (i.e wrll-linux-2.6.27)
|
||||
in the <filename>.git</filename> subdir, with the name 'default_kernel'
|
||||
in the 'git' subdir, with the name 'default_kernel'
|
||||
</para>
|
||||
<para>Since Yocto Project Linux ships with a default_kernel
|
||||
(the validated Yocto Project kernel) in the wrll-linux-2.6.27
|
||||
@@ -1922,15 +2037,15 @@ transition kernel.
|
||||
</para>
|
||||
<para>If the Yocto Project install cannot be directly modified
|
||||
with the new default kernel, then the path to the transition
|
||||
kernel layer's <filename>.git</filename> subdir must be passed to the build
|
||||
kernel layer's 'git' subdir must be passed to the build
|
||||
process via:
|
||||
<programlisting>
|
||||
linux_GIT_BASE=<absolute path to layer>/git
|
||||
</programlisting>
|
||||
</para>
|
||||
<para>
|
||||
If the transition kernel has not been delivered via Git,
|
||||
then a Git repo should be created, and bare cloned into
|
||||
If the transition kernel has not been delivered via git,
|
||||
then a git repo should be created, and bare cloned into
|
||||
place. Creating this repository is as simple as:
|
||||
<literallayout class='monospaced'>
|
||||
> tar zxvf temp_kernel.tgz
|
||||
@@ -2003,7 +2118,7 @@ To build the kernel:
|
||||
</para>
|
||||
<para>
|
||||
If this is to build without some user intervention (passing of the
|
||||
GIT_BASE), you must do the clone into the <filename>wrll-linux-2.6.27/.git</filename> directory.
|
||||
GIT_BASE), you must do the clone into the wrll-linux-2.6.27/git directory.
|
||||
</para>
|
||||
<note><para>Unless you define valid "hardware.kcf" and "non-hardware.kcf" some
|
||||
non fatal warnings will be seen. They can be fixed by populating these
|
||||
@@ -2053,7 +2168,7 @@ options.
|
||||
<listitem><para>Building a 'dirty' image.</para></listitem>
|
||||
<listitem><para>Temporarily using a different base kernel.</para></listitem>
|
||||
<listitem><para>Creating a custom kernel layer.</para></listitem>
|
||||
<listitem><para>Creating the Git repository of the transition kernel.</para></listitem>
|
||||
<listitem><para>Creating the git repository of the transition kernel.</para></listitem>
|
||||
</itemizedlist> -->
|
||||
|
||||
|
||||
|
||||
@@ -44,6 +44,11 @@
|
||||
<date>23 May 2011</date>
|
||||
<revremark>Released with Yocto Project 1.0.1 on 23 May 2011.</revremark>
|
||||
</revision>
|
||||
<revision>
|
||||
<revnumber>1.0.2</revnumber>
|
||||
<date>20 December 2011</date>
|
||||
<revremark>Released with Yocto Project 1.0.2 on 20 December 2011.</revremark>
|
||||
</revision>
|
||||
</revhistory>
|
||||
|
||||
<copyright>
|
||||
|
||||
@@ -383,8 +383,8 @@
|
||||
triplet is "i586-poky-linux".</para></listitem>
|
||||
<listitem><para>Kernel: Use the file chooser to select the kernel used with QEMU.</para></listitem>
|
||||
<listitem><para>Root filesystem: Use the file chooser to select the root
|
||||
filesystem directory. This directory is where you use "runqemu-extract-sdk" to extract the
|
||||
core-image-sdk tarball.</para></listitem>
|
||||
filesystem directory. This directory is where you use "poky-extract-sdk" to extract the
|
||||
poky-image-sdk tarball.</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
@@ -738,7 +738,7 @@ tmp/sysroots/<host-arch>/usr/bin/<target-abi>-gdb
|
||||
<para>
|
||||
Perhaps the easiest is to have an 'sdk' image that corresponds to the plain
|
||||
image installed on the device.
|
||||
In the case of 'core-image-sato', 'core-image-sdk' would contain suitable symbols.
|
||||
In the case of 'poky-image-sato', 'poky-image-sdk' would contain suitable symbols.
|
||||
Because the sdk images already have the debugging symbols installed it is just a
|
||||
question of expanding the archive to some location and then informing GDB.
|
||||
</para>
|
||||
@@ -764,17 +764,17 @@ tmp/sysroots/<host-arch>/usr/bin/<target-abi>-gdb
|
||||
<filename>tmp/rootfs</filename>:
|
||||
<programlisting>
|
||||
tmp/sysroots/i686-linux/usr/bin/opkg-cl -f \
|
||||
tmp/work/<target-abi>/core-image-sato-1.0-r0/temp/opkg.conf -o \
|
||||
tmp/work/<target-abi>/poky-image-sato-1.0-r0/temp/opkg.conf -o \
|
||||
tmp/rootfs/ update
|
||||
</programlisting></para></listitem>
|
||||
<listitem><para>Install the debugging information:
|
||||
<programlisting>
|
||||
tmp/sysroots/i686-linux/usr/bin/opkg-cl -f \
|
||||
tmp/work/<target-abi>/core-image-sato-1.0-r0/temp/opkg.conf \
|
||||
tmp/work/<target-abi>/poky-image-sato-1.0-r0/temp/opkg.conf \
|
||||
-o tmp/rootfs install foo
|
||||
|
||||
tmp/sysroots/i686-linux/usr/bin/opkg-cl -f \
|
||||
tmp/work/<target-abi>/core-image-sato-1.0-r0/temp/opkg.conf \
|
||||
tmp/work/<target-abi>/poky-image-sato-1.0-r0/temp/opkg.conf \
|
||||
-o tmp/rootfs install foo-dbg
|
||||
</programlisting></para></listitem>
|
||||
</orderedlist>
|
||||
|
||||
@@ -269,9 +269,9 @@ fi
|
||||
The following example shows the form for the two lines you need:
|
||||
</para>
|
||||
<programlisting>
|
||||
IMAGE_INSTALL = "task-core-x11-base package1 package2"
|
||||
IMAGE_INSTALL = "task-poky-x11-base package1 package2"
|
||||
|
||||
inherit core-image
|
||||
inherit poky-image
|
||||
</programlisting>
|
||||
<para>
|
||||
By creating a custom image, a developer has total control
|
||||
@@ -283,11 +283,11 @@ inherit core-image
|
||||
</para>
|
||||
<para>
|
||||
The other method for creating a custom image is to modify an existing image.
|
||||
For example, if a developer wants to add "strace" into "core-image-sato", they can use
|
||||
For example, if a developer wants to add "strace" into "poky-image-sato", they can use
|
||||
the following recipe:
|
||||
</para>
|
||||
<programlisting>
|
||||
require core-image-sato.bb
|
||||
require poky-image-sato.bb
|
||||
|
||||
IMAGE_INSTALL += "strace"
|
||||
</programlisting>
|
||||
@@ -355,7 +355,7 @@ RRECOMMENDS_task-custom-tools = "\
|
||||
<glossterm><link linkend='var-IMAGE_FEATURES'>IMAGE_FEATURES</link></glossterm>
|
||||
variable.
|
||||
To create these features, the best reference is
|
||||
<filename>meta/classes/core-image.bbclass</filename>, which shows how poky achieves this.
|
||||
<filename>meta/classes/poky-image.bbclass</filename>, which shows how poky achieves this.
|
||||
In summary, the file looks at the contents of the
|
||||
<glossterm><link linkend='var-IMAGE_FEATURES'>IMAGE_FEATURES</link></glossterm>
|
||||
variable and then maps that into a set of tasks or packages.
|
||||
@@ -371,8 +371,8 @@ RRECOMMENDS_task-custom-tools = "\
|
||||
Poky ships with two SSH servers you can use in your images: Dropbear and OpenSSH.
|
||||
Dropbear is a minimal SSH server appropriate for resource-constrained environments,
|
||||
while OpenSSH is a well-known standard SSH server implementation.
|
||||
By default, core-image-sato is configured to use Dropbear.
|
||||
The core-image-basic and core-image-lsb images both include OpenSSH.
|
||||
By default, poky-image-sato is configured to use Dropbear.
|
||||
The poky-image-basic and poky-image-lsb images both include OpenSSH.
|
||||
To change these defaults, edit the <filename>IMAGE_FEATURES</filename> variable
|
||||
so that it sets the image you are working with to include ssh-server-dropbear
|
||||
or ssh-server-openssh.
|
||||
@@ -415,7 +415,7 @@ DISTRO_EXTRA_RDEPENDS += "strace"
|
||||
</para>
|
||||
<programlisting>
|
||||
$ bitbake -c clean task-boot task-base task-poky
|
||||
$ bitbake core-image-sato
|
||||
$ bitbake poky-image-sato
|
||||
</programlisting>
|
||||
</section>
|
||||
|
||||
@@ -637,7 +637,7 @@ BBFILE_PRIORITY_emenlow = "6"
|
||||
tree.</para></listitem>
|
||||
</itemizedlist>
|
||||
Following these recommendations keeps your Poky tree and its configuration entirely
|
||||
inside COREBASE.
|
||||
inside POKYBASE.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
@@ -675,7 +675,7 @@ BBFILE_PRIORITY_emenlow = "6"
|
||||
|
||||
These functions allow generation of dependency data between functions and
|
||||
variables allowing moves to be made towards generating checksums and allowing
|
||||
use of the dependency information in other parts of BitBake.
|
||||
use of the dependency information in other parts of bitbake.
|
||||
|
||||
Signed-off-by: Richard Purdie richard.purdie@linuxfoundation.org
|
||||
</literallayout>
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
<qandaentry>
|
||||
<question>
|
||||
<para>
|
||||
I only have Python 2.4 or 2.5 but BitBake requires Python 2.6 or 2.7.
|
||||
I only have Python 2.4 or 2.5 but BitBake requires Python 2.6.
|
||||
Can I still use Poky?
|
||||
</para>
|
||||
</question>
|
||||
@@ -37,8 +37,8 @@
|
||||
You can use a stand-alone tarball to provide Python 2.6.
|
||||
You can find pre-built 32 and 64-bit versions of Python 2.6 at the following locations:
|
||||
<itemizedlist>
|
||||
<listitem><para><ulink url='http://autobuilder.yoctoproject.org/downloads/miscsupport/yocto-1.0-python-nativesdk/python-nativesdk-standalone-i686.tar.bz2'>32-bit tarball</ulink></para></listitem>
|
||||
<listitem><para><ulink url='http://autobuilder.yoctoproject.org/downloads/miscsupport/yocto-1.0-python-nativesdk/python-nativesdk-standalone-x86_64.tar.bz2'>64-bit tarball</ulink></para></listitem>
|
||||
<listitem><para><ulink url='http://autobuilder.yoctoproject.org/downloads/miscsupport/python-nativesdk-standalone-i586.tar.bz2'></ulink></para></listitem>
|
||||
<listitem><para><ulink url='http://autobuilder.yoctoproject.org/downloads/miscsupport/python-nativesdk-standalone-x86_64.tar.bz2'></ulink></para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
<para>
|
||||
|
||||
@@ -57,6 +57,11 @@
|
||||
<date>23 May 2011</date>
|
||||
<revremark>Released with Yocto Project 1.0.1 on 23 May 2011.</revremark>
|
||||
</revision>
|
||||
<revision>
|
||||
<revnumber>1.0.2</revnumber>
|
||||
<date>20 December 2011</date>
|
||||
<revremark>Released with Yocto Project 1.0.2 on 20 December 2011.</revremark>
|
||||
</revision>
|
||||
</revhistory>
|
||||
|
||||
<copyright>
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
BitBake is a program written in Python that interprets the metadata that makes up Poky.
|
||||
At some point, people wonder what actually happens when you enter:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake core-image-sato
|
||||
$ bitbake poky-image-sato
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
@@ -111,11 +111,11 @@
|
||||
|
||||
<para>
|
||||
Once all the <filename>.bb</filename> files have been
|
||||
parsed, BitBake starts to build the target (core-image-sato in the previous section's
|
||||
parsed, BitBake starts to build the target (poky-image-sato in the previous section's
|
||||
example) and looks for providers of that target.
|
||||
Once a provider is selected, BitBake resolves all the dependencies for
|
||||
the target.
|
||||
In the case of "core-image-sato", it would lead to <filename>task-base.bb</filename>,
|
||||
In the case of "poky-image-sato", it would lead to <filename>task-base.bb</filename>,
|
||||
which in turn leads to packages like <application>Contacts</application>,
|
||||
<application>Dates</application> and <application>BusyBox</application>.
|
||||
These packages in turn depend on glibc and the toolchain.
|
||||
@@ -251,7 +251,7 @@ PREFERRED_PROVIDER_virtual/kernel = "linux-rp"
|
||||
<title>BitBake Command Line</title>
|
||||
|
||||
<para>
|
||||
Following is the BitBake manpage:
|
||||
Following is the bitbake manpage:
|
||||
</para>
|
||||
|
||||
<screen>
|
||||
|
||||
@@ -28,41 +28,41 @@
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>
|
||||
<emphasis>core-image-minimal</emphasis> - A small image just capable
|
||||
<emphasis>poky-image-minimal</emphasis> - A small image just capable
|
||||
of allowing a device to boot.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<emphasis>core-image-base</emphasis> - A console-only image that fully
|
||||
<emphasis>poky-image-base</emphasis> - A console-only image that fully
|
||||
supports the target device hardware.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<emphasis>core-image-core</emphasis> - An X11 image with simple
|
||||
<emphasis>poky-image-core</emphasis> - An X11 image with simple
|
||||
applications such as terminal, editor, and file manager.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<emphasis>core-image-sato</emphasis> - An X11 image with Sato theme and
|
||||
<emphasis>poky-image-sato</emphasis> - An X11 image with Sato theme and
|
||||
Pimlico applications.
|
||||
The image also contains terminal, editor, and file manager.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<emphasis>core-image-sato-dev</emphasis> - An X11 image similar to
|
||||
core-image-sato but
|
||||
<emphasis>poky-image-sato-dev</emphasis> - An X11 image similar to
|
||||
poky-image-sato but
|
||||
also includes a native toolchain and libraries needed to build applications
|
||||
on the device itself. The image also includes testing and profiling tools
|
||||
as well as debug symbols. This image was formerly core-image-sdk.
|
||||
as well as debug symbols. This image was formerly poky-image-sdk.
|
||||
</para>
|
||||
</listitem>
|
||||
<listitem>
|
||||
<para>
|
||||
<emphasis>core-image-lsb</emphasis> - An image suitable for implementations
|
||||
<emphasis>poky-image-lsb</emphasis> - An image suitable for implementations
|
||||
that conform to Linux Standard Base (LSB).
|
||||
</para>
|
||||
</listitem>
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
Consequently, most users do not need to worry about BitBake.
|
||||
The <filename class="directory">bitbake/bin/</filename> directory is placed
|
||||
into the PATH environment variable by the
|
||||
<link linkend="structure-core-script">oe-init-build-env</link> script.
|
||||
<link linkend="structure-core-script">poky-init-build-env</link> script.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -47,7 +47,7 @@
|
||||
It is also possible to place output and configuration
|
||||
files in a directory separate from the Poky source.
|
||||
For information on separating output from the Poky source, see <link
|
||||
linkend='structure-core-script'>oe-init-build-env</link>.
|
||||
linkend='structure-core-script'>poky-init-build-env</link>.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
@@ -104,7 +104,7 @@
|
||||
<para>
|
||||
This directory contains various integration scripts that implement
|
||||
extra functionality in the Poky environment (e.g. QEMU scripts).
|
||||
The <link linkend="structure-core-script">oe-init-build-env</link> script appends this
|
||||
The <link linkend="structure-core-script">poky-init-build-env</link> script appends this
|
||||
directory to the PATH environment variable.
|
||||
</para>
|
||||
</section>
|
||||
@@ -154,7 +154,7 @@
|
||||
</section>
|
||||
|
||||
<section id='structure-core-script'>
|
||||
<title><filename>oe-init-build-env</filename></title>
|
||||
<title><filename>poky-init-build-env</filename></title>
|
||||
|
||||
<para>
|
||||
This script sets up the Poky build environment.
|
||||
@@ -168,7 +168,7 @@
|
||||
</para>
|
||||
|
||||
<literallayout class='monospaced'>
|
||||
$ source POKY_SRC/oe-init-build-env [BUILDDIR]
|
||||
$ source POKY_SRC/poky-init-build-env [BUILDDIR]
|
||||
</literallayout>
|
||||
|
||||
<para>
|
||||
|
||||
@@ -139,14 +139,14 @@
|
||||
</para>
|
||||
<para>
|
||||
<literallayout class='monospaced'>
|
||||
$ source oe-init-build-env [build_dir]
|
||||
$ source poky-init-build-env [build_dir]
|
||||
</literallayout>
|
||||
</para>
|
||||
<para>
|
||||
The build_dir is the dir containing all the build's object files. The default
|
||||
build dir is poky-dir/build. A different build_dir can be used for each of the targets.
|
||||
For example, ~/build/x86 for a qemux86 target, and ~/build/arm for a qemuarm target.
|
||||
Please refer to <link linkend="structure-core-script">oe-init-build-env</link>
|
||||
Please refer to <link linkend="structure-core-script">poky-init-build-env</link>
|
||||
for more detailed information.
|
||||
</para>
|
||||
<para>
|
||||
|
||||
@@ -102,8 +102,8 @@
|
||||
<para>
|
||||
Another important Yocto Project feature is the Sato reference User Interface.
|
||||
This optional GNOME mobile-based UI, which is intended for devices with
|
||||
restricted screen sizes, sits neatly on top of a device using the
|
||||
GNOME Mobile Stack and provides a well-defined user experience.
|
||||
resolution but restricted size screens, sits neatly on top of a device using the
|
||||
GNOME Mobile Stack providing a well-defined user experience.
|
||||
Implemented in its own layer, it makes it clear to developers how they can implement
|
||||
their own UIs on top of Yocto Linux.
|
||||
</para>
|
||||
@@ -119,7 +119,7 @@
|
||||
<itemizedlist>
|
||||
<listitem>
|
||||
<para>A host system running a supported Linux distribution (i.e. recent releases of
|
||||
Fedora, openSUSE, Debian, and Ubuntu).
|
||||
Fedora, OpenSUSE, Debian, and Ubuntu).
|
||||
<note>
|
||||
For notes about using the Yocto Project on development systems that use
|
||||
older Linux distributions see
|
||||
@@ -138,51 +138,14 @@
|
||||
<title>The Linux Distribution</title>
|
||||
|
||||
<para>
|
||||
The Yocto Project has been tested and is known to work on the current releases minus one
|
||||
of the following distributions.
|
||||
Follow this <ulink url='https://wiki.pokylinux.org/wiki/Distro_Test'>link </ulink> for more
|
||||
information on distribution testing.
|
||||
<itemizedlist>
|
||||
<listitem><para>Ubuntu</para></listitem>
|
||||
<listitem><para>Fedora</para></listitem>
|
||||
<listitem><para>openSUSE</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
<para>
|
||||
The build system should be able to run on any modern distribution with Python 2.6 or 2.7.
|
||||
Earlier releases of Python are known to not work and the system does not support Python 3 at this time.
|
||||
This document assumes you are running one of the previously noted distributions on your Linux-based
|
||||
host systems.
|
||||
This document assumes you are running a reasonably current Linux-based host system.
|
||||
The examples work for both Debian-based and RPM-based distributions.
|
||||
</para>
|
||||
<note><para>
|
||||
If you attempt to use a distribution not in the above list, you may or may not have success - you
|
||||
are venturing into untested territory.
|
||||
Refer to
|
||||
<ulink url='http://openembedded.net/index.php?title=OEandYourDistro&action=historysubmit&diff=4309&okdid=4225'>OE and Your Distro</ulink> and
|
||||
<ulink url='http://openembedded.net/index.php?title=Required_software&action=historysubmit&diff=4311&oldid=4251'>Required Software</ulink>
|
||||
for information for other distributions used with the Open Embedded project, which might be
|
||||
a starting point for exploration.
|
||||
If you go down this path, you should expect problems.
|
||||
When you do, please go to <ulink url='http://bugzilla.yoctoproject.org'>Yocto Project Bugzilla</ulink>
|
||||
and submit a bug.
|
||||
We are interested in hearing about your experience.
|
||||
</para></note>
|
||||
</section>
|
||||
|
||||
<section id='packages'>
|
||||
<title>The Packages</title>
|
||||
|
||||
<para>
|
||||
Packages and package installation vary depending on your development system.
|
||||
In general, you need to have root access and then install the required packages.
|
||||
</para>
|
||||
|
||||
<note><para>
|
||||
If you are using a Fedora version prior to version 15 you will need to take some
|
||||
extra steps to enable <filename>sudo</filename>.
|
||||
See <ulink url='https://fedoraproject.org/wiki/Configuring_Sudo'></ulink> for details.
|
||||
</para></note>
|
||||
|
||||
<para>
|
||||
The packages you need for a Debian-based host are shown in the following command:
|
||||
</para>
|
||||
@@ -192,12 +155,11 @@
|
||||
unzip texi2html texinfo libsdl1.2-dev docbook-utils gawk \
|
||||
python-pysqlite2 diffstat help2man make gcc build-essential \
|
||||
g++ desktop-file-utils chrpath libgl1-mesa-dev libglu1-mesa-dev \
|
||||
mercurial autoconf automake groff libtool
|
||||
mercurial autoconf automake groff
|
||||
</literallayout>
|
||||
|
||||
<para>
|
||||
The packages you need for an RPM-based host like Fedora and openSUSE,
|
||||
respectively, are as follows:
|
||||
The packages you need for an RPM-based host like Fedora are shown in these commands:
|
||||
</para>
|
||||
|
||||
<literallayout class='monospaced'>
|
||||
@@ -209,15 +171,17 @@
|
||||
groff linuxdoc-tools patch linuxdoc-tools cmake help2man \
|
||||
perl-ExtUtils-MakeMaker tcl-devel gettext chrpath ncurses apr \
|
||||
SDL-devel mesa-libGL-devel mesa-libGLU-devel gnome-doc-utils \
|
||||
autoconf automake libtool
|
||||
</literallayout>
|
||||
|
||||
<literallayout class='monospaced'>
|
||||
$ sudo zypper install python gcc gcc-c++ libtool \
|
||||
subversion git chrpath automake \
|
||||
help2man diffstat texinfo mercurial wget
|
||||
autoconf automake
|
||||
</literallayout>
|
||||
|
||||
<note><para>
|
||||
Packages vary in number and name for other Linux distributions.
|
||||
The commands here should work. We are interested, though, to learn what works for you.
|
||||
You can find more information for package requirements on common Linux distributions
|
||||
at <ulink url="http://wiki.openembedded.net/index.php/OEandYourDistro"></ulink>.
|
||||
However, you should be careful when using this information as the information applies
|
||||
to old Linux distributions that are known to not work with a current Poky install.
|
||||
</para></note>
|
||||
</section>
|
||||
|
||||
<section id='releases'>
|
||||
@@ -294,9 +258,9 @@
|
||||
|
||||
<para>
|
||||
<literallayout class='monospaced'>
|
||||
$ wget http://www.yoctoproject.org/downloads/poky/poky-bernard-5.0.1.tar.bz2
|
||||
$ tar xjf poky-bernard-5.0.1.tar.bz2
|
||||
$ source poky-bernard-5.0.1/poky-init-build-env poky-5.0.1-build
|
||||
$ wget http://www.yoctoproject.org/downloads/poky/poky-bernard-5.0.2.tar.bz2
|
||||
$ tar xjf poky-bernard-5.0.2.tar.bz2
|
||||
$ source poky-bernard-5.0.2/poky-init-build-env poky-5.0.2-build
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
@@ -314,8 +278,8 @@
|
||||
<listitem><para>The first two commands extract the Yocto Project files from the
|
||||
release tarball and place them into a subdirectory of your current directory.</para></listitem>
|
||||
<listitem><para>The <command>source</command> command creates the
|
||||
<filename>poky-5.0.1-build</filename> directory and executes the <command>cd</command>
|
||||
command to make <filename>poky-5.0.1-build</filename> the working directory.
|
||||
<filename>poky-5.0.2-build</filename> directory and executes the <command>cd</command>
|
||||
command to make <filename>poky-5.0.2-build</filename> the working directory.
|
||||
The resulting build directory contains all the files created during the build.
|
||||
By default the target architecture is qemux86.
|
||||
To change this default, edit the value of the MACHINE variable in the
|
||||
@@ -324,24 +288,22 @@
|
||||
<para>
|
||||
Take some time to examine your <filename>conf/local.conf</filename> file.
|
||||
The defaults should work fine.
|
||||
However, if you have a multi-core CPU you might want to set the variable
|
||||
BB_NUMBER_THREADS equal to twice the number of processor cores your system has.
|
||||
And, set the variable PARALLEL_MAKE equal to the number of processor cores.
|
||||
Setting these variables can significantly shorten your build time.
|
||||
However, if you have a multi-core CPU you might want to set the variables
|
||||
BB_NUMBER_THREADS and PARALLEL_MAKE to the number of processor cores on your build machine.
|
||||
By default, these variables are commented out.
|
||||
</para>
|
||||
<para>
|
||||
Continue with the following command to build an OS image for the target, which is
|
||||
<filename>core-image-sato</filename> in this example.
|
||||
<filename>poky-image-sato</filename> in this example.
|
||||
For information on the <filename>‐k</filename> option use the
|
||||
<filename>bitbake ‐‐help</filename> command or see
|
||||
<ulink url='http://www.yoctoproject.org/docs/poky-ref-manual/poky-ref-manual.html#usingpoky-components-bitbake'>
|
||||
BitBake</ulink> section in the Poky Reference Manual.
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake -k core-image-sato
|
||||
$ bitbake -k poky-image-sato
|
||||
</literallayout>
|
||||
<note><para>
|
||||
BitBake requires Python 2.6 or 2.7. For more information on this requirement,
|
||||
BitBake requires Python 2.6. For more information on this requirement,
|
||||
see the FAQ appendix in the
|
||||
<ulink url='http://www.yoctoproject.org/docs/poky-ref-manual/poky-ref-manual.html'>
|
||||
Poky Reference Manual</ulink>.
|
||||
@@ -425,7 +387,7 @@
|
||||
</para>
|
||||
|
||||
<literallayout class='monospaced'>
|
||||
yocto-eglibc<<emphasis>host_system</emphasis>>-<<emphasis>arch</emphasis>>-toolchain-gmae-<<emphasis>release</emphasis>>.tar.bz2
|
||||
yocto-eglibc<<emphasis>host_system</emphasis>>-<<emphasis>arch</emphasis>>-toolchain-sdk-<<emphasis>release</emphasis>>.tar.bz2
|
||||
|
||||
Where:
|
||||
<<emphasis>host_system</emphasis>> is a string representing your development system:
|
||||
@@ -443,11 +405,11 @@
|
||||
</para>
|
||||
|
||||
<literallayout class='monospaced'>
|
||||
yocto-eglibc-x86_64-i686-toolchain-gmae-1.0.tar.bz2
|
||||
yocto-eglibc-x86_64-i686-toolchain-sdk-1.0.tar.bz2
|
||||
</literallayout>
|
||||
|
||||
<para>
|
||||
The toolchain tarballs are self-contained and must be installed into <filename>/opt/poky</filename>.
|
||||
The toolchain tarballs are self-contained and should be installed into <filename>/opt/poky</filename>.
|
||||
The following commands show how you install the toolchain tarball given a 64-bit development host system
|
||||
and a 32-bit target architecture.
|
||||
</para>
|
||||
@@ -455,7 +417,7 @@
|
||||
<para>
|
||||
<literallayout class='monospaced'>
|
||||
$ cd /
|
||||
$ sudo tar -xvjf yocto-eglibc-x86_64-i686-toolchain-gmae-1.0.tar.bz2
|
||||
$ sudo tar -xvjf yocto-eglibc-x86_64-i686-toolchain-sdk-1.0.tar.bz2
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
@@ -471,19 +433,15 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Most kernel files have one of the following forms:
|
||||
Most kernel files have the following form:
|
||||
</para>
|
||||
|
||||
<literallayout class='monospaced'>
|
||||
*zImage-<<emphasis>kernel-rev</emphasis>>-qemu<<emphasis>arch</emphasis>>*.bin
|
||||
vmlinux-<<emphasis>kernel-rev</emphasis>>-<<emphasis>arch</emphasis>>*.bin
|
||||
*zImage*qemu<<emphasis>arch</emphasis>>*.bin
|
||||
|
||||
Where:
|
||||
<<emphasis>arch</emphasis>> is a string representing the target architecture:
|
||||
x86, x86-64, ppc, mips, or arm.
|
||||
|
||||
<<emphasis>kernel-rev</emphasis>> is the base Linux kernel revision
|
||||
(e.g. 2.6.37).
|
||||
</literallayout>
|
||||
</section>
|
||||
|
||||
@@ -497,7 +455,7 @@
|
||||
</para>
|
||||
|
||||
<literallayout class='monospaced'>
|
||||
yocto-image-<<emphasis>profile</emphasis>>-qemu<<emphasis>arch</emphasis>>.rootfs.ext3.bz2
|
||||
yocto-image-<<emphasis>profile</emphasis>>-qemu<<emphasis>arch</emphasis>>.rootfs.ext3
|
||||
yocto-image-<<emphasis>profile</emphasis>>-qemu<<emphasis>arch</emphasis>>.rootfs.tar.bz2
|
||||
|
||||
Where:
|
||||
@@ -546,15 +504,13 @@
|
||||
|
||||
<para>
|
||||
Continuing with the example, the following two commands setup the emulation
|
||||
environment and launch QEMU.
|
||||
This example assumes the root filesystem tarball has been downloaded and expanded, and
|
||||
that the kernel and filesystem are for a 32-bit target architecture.
|
||||
environment and launch QEMU.
|
||||
The kernel and filesystem are for a 32-bit target architecture.
|
||||
</para>
|
||||
|
||||
<literallayout class='monospaced'>
|
||||
$ source /opt/poky/1.0/environment-setup-i686-poky-linux
|
||||
$ poky-qemu qemux86 bzImage-2.6.37-qemux86-1.0.bin \
|
||||
yocto-image-sato-qemux86-1.0.rootfs.ext3
|
||||
$ source /opt/poky/environment-setup-i686-poky-linux
|
||||
$ poky-qemu qemux86 zImage-2.6.34-qemux86-1.0.bin yocto-image-sdk-qemux86-1.0.rootfs.ext3
|
||||
</literallayout>
|
||||
|
||||
<para>
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
DESCRIPTION = "FarSight is an audio/video conferencing framework specifically designed for Instant Messengers."
|
||||
HOMEPAGE = "http://farsight.sf.net"
|
||||
SRC_URI = "http://farsight.freedesktop.org/releases/farsight2/${BPN}-${PV}.tar.gz"
|
||||
SRC_URI = "http://farsight.freedesktop.org/releases/farsight2/${P}.tar.gz"
|
||||
LICENSE = "GPLv2.1"
|
||||
DEPENDS = "libnice glib-2.0 libxml2 zlib dbus gstreamer gst-plugins-base"
|
||||
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
Upstream-Status: Inappropriate [configuration]
|
||||
|
||||
---
|
||||
configure.ac | 1 +
|
||||
1 file changed, 1 insertion(+)
|
||||
|
||||
@@ -5,6 +5,6 @@ LICENSE = "LGPL"
|
||||
DEPENDS = "glib-2.0 gnutls libcheck"
|
||||
PR = "r2"
|
||||
|
||||
SRC_URI = "http://ftp.imendio.com/pub/imendio/${BPN}/src/${BPN}-${PV}.tar.bz2"
|
||||
SRC_URI = "http://ftp.imendio.com/pub/imendio/${PN}/src/${PN}-${PV}.tar.bz2"
|
||||
|
||||
inherit autotools pkgconfig
|
||||
|
||||
@@ -6,7 +6,8 @@ HOMEPAGE = "http://www.openswan.org"
|
||||
LICENSE = "GPLv2"
|
||||
DEPENDS = "gmp flex-native"
|
||||
RRECOMMENDS_${PN} = "kernel-module-ipsec"
|
||||
PR = "r2"
|
||||
RDEPENDS_${PN}_nylon = "perl"
|
||||
PR = "r1"
|
||||
|
||||
SRC_URI = "http://www.openswan.org/download/old/openswan-${PV}.tar.gz \
|
||||
file://openswan-2.4.7-gentoo.patch;patch=1 \
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
Upstream-Status: Inappropriate [configuration]
|
||||
|
||||
---
|
||||
cmake/OpenSyncInternal.cmake.in | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
Upstream-Status: Inappropriate [configuration]
|
||||
|
||||
---
|
||||
opensync/CMakeLists.txt | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
Upstream-Status: Inappropriate [configuration]
|
||||
|
||||
---
|
||||
CMakeLists.txt | 1 -
|
||||
1 file changed, 1 deletion(-)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user