mirror of
https://git.yoctoproject.org/poky
synced 2026-02-15 21:23:04 +01:00
Compare commits
8 Commits
bernard-5.
...
pinky-3.1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9c08da6cd1 | ||
|
|
4168c08285 | ||
|
|
f026861423 | ||
|
|
602d17d8cb | ||
|
|
a1868835fe | ||
|
|
2f6c30fc9a | ||
|
|
0f9a67f1cf | ||
|
|
7d846ee9bb |
37
.gitignore
vendored
37
.gitignore
vendored
@@ -1,37 +0,0 @@
|
||||
*.pyc
|
||||
*.pyo
|
||||
build/conf/local.conf
|
||||
build/conf/bblayers.conf
|
||||
build/downloads
|
||||
build/tmp/
|
||||
build/sstate-cache
|
||||
build/pyshtables.py
|
||||
pstage/
|
||||
scripts/poky-git-proxy-socks
|
||||
sources/
|
||||
meta-darwin
|
||||
meta-maemo
|
||||
meta-extras
|
||||
meta-m2
|
||||
meta-prvt*
|
||||
poky-autobuilder*
|
||||
*.swp
|
||||
*.orig
|
||||
*.rej
|
||||
*~
|
||||
documentation/poky-ref-manual/poky-ref-manual.html
|
||||
documentation/poky-ref-manual/poky-ref-manual.pdf
|
||||
documentation/poky-ref-manual/poky-ref-manual.tgz
|
||||
documentation/poky-ref-manual/bsp-guide.html
|
||||
documentation/poky-ref-manual/bsp-guide.pdf
|
||||
documentation/bsp-guide/bsp-guide.html
|
||||
documentation/bsp-guide/bsp-guide.pdf
|
||||
documentation/bsp-guide/bsp-guide.tgz
|
||||
documentation/yocto-project-qs/yocto-project-qs.html
|
||||
documentation/yocto-project-qs/yocto-project-qs.tgz
|
||||
documentation/kernel-manual/kernel-manual.html
|
||||
documentation/kernel-manual/kernel-manual.tgz
|
||||
documentation/kernel-manual/kernel-manual.pdf
|
||||
|
||||
|
||||
|
||||
5
LICENSE
5
LICENSE
@@ -5,10 +5,7 @@ bitbake/COPYING (GPLv2)
|
||||
meta/COPYING.MIT (MIT)
|
||||
meta-extras/COPYING.MIT (MIT)
|
||||
|
||||
which cover the components in those subdirectories. This means all
|
||||
metadata is MIT licensed unless otherwise stated. Source code included
|
||||
in tree for individual recipes is under the LICENSE stated in the .bb
|
||||
file for those software projects unless otherwise stated.
|
||||
which cover the components in those subdirectories.
|
||||
|
||||
License information for any other files is either explicitly stated
|
||||
or defaults to GPL version 2.
|
||||
|
||||
602
README.hardware
602
README.hardware
@@ -1,428 +1,436 @@
|
||||
Poky Hardware README
|
||||
====================
|
||||
Poky Hardware Reference Guide
|
||||
=============================
|
||||
|
||||
This file gives details about using Poky with different hardware reference
|
||||
boards and consumer devices. A full list of target machines can be found by
|
||||
looking in the meta/conf/machine/ directory. If in doubt about using Poky with
|
||||
your hardware, consult the documentation for your board/device.
|
||||
boards and consumer devices. A full list of target machines can be found by
|
||||
looking in the meta/conf/machine/ directory. If in doubt about using Poky with
|
||||
your hardware, consult the documentation for your board/device. To discuss
|
||||
support for further hardware reference boards/devices please contact OpenedHand.
|
||||
|
||||
Support for additional devices is normally added by creating BSP layers - for
|
||||
more information please see the Yocto Board Support Package (BSP) Developer's
|
||||
Guide - documentation source is in documentation/bspguide or download the PDF
|
||||
from:
|
||||
|
||||
http://yoctoproject.org/community/documentation
|
||||
|
||||
Support for machines other than QEMU may be moved out to separate BSP layers in
|
||||
future versions.
|
||||
|
||||
|
||||
QEMU Emulation Targets
|
||||
======================
|
||||
QEMU Emulation Images (qemuarm and qemux86)
|
||||
===========================================
|
||||
|
||||
To simplify development Poky supports building images to work with the QEMU
|
||||
emulator in system emulation mode. Several architectures are currently
|
||||
supported:
|
||||
|
||||
* ARM (qemuarm)
|
||||
* x86 (qemux86)
|
||||
* x86-64 (qemux86-64)
|
||||
* PowerPC (qemuppc)
|
||||
* MIPS (qemumips)
|
||||
|
||||
Use of the QEMU images is covered in the Poky Reference Manual. The Poky
|
||||
MACHINE setting corresponding to the target is given in brackets.
|
||||
|
||||
emulator in system emulation mode. Two architectures are currently supported,
|
||||
ARM (via qemuarm) and x86 (via qemux86). Use of the QEMU images is covered
|
||||
in the Poky Handbook.
|
||||
|
||||
Hardware Reference Boards
|
||||
=========================
|
||||
|
||||
The following boards are supported by Poky's core layer:
|
||||
The following boards are supported by Poky:
|
||||
|
||||
* Texas Instruments Beagleboard (beagleboard)
|
||||
* Freescale MPC8315E-RDB (mpc8315e-rdb)
|
||||
* Ubiquiti Networks RouterStation Pro (routerstationpro)
|
||||
* Compulab CM-X270 (cm-x270)
|
||||
* Compulab EM-X270 (em-x270)
|
||||
* FreeScale iMX31ADS (mx31ads)
|
||||
* Marvell PXA3xx Zylonite (zylonite)
|
||||
* Logic iMX31 Lite Kit (mx31litekit)
|
||||
* Phytec phyCORE-iMX31 (mx31phy)
|
||||
|
||||
For more information see the board's section below. The Poky MACHINE setting
|
||||
For more information see board's section below. The Poky MACHINE setting
|
||||
corresponding to the board is given in brackets.
|
||||
|
||||
|
||||
Consumer Devices
|
||||
================
|
||||
|
||||
The following consumer devices are supported by Poky's core layer:
|
||||
The following consumer devices are supported by Poky:
|
||||
|
||||
* Intel Atom based PCs and devices (atom-pc)
|
||||
* FIC Neo1973 GTA01 smartphone (fic-gta01)
|
||||
* HTC Universal (htcuniversal)
|
||||
* Nokia 770/N800/N810 Internet Tablets (nokia770 and nokia800)
|
||||
* Sharp Zaurus SL-C7x0 series (c7x0)
|
||||
* Sharp Zaurus SL-C1000 (akita)
|
||||
* Sharp Zaurus SL-C3x00 series (spitz)
|
||||
|
||||
For more information see the device's section below. The Poky MACHINE setting
|
||||
corresponding to the device is given in brackets.
|
||||
For more information see board's section below. The Poky MACHINE setting
|
||||
corresponding to the board is given in brackets.
|
||||
|
||||
Poky Boot CD (bootcdx86)
|
||||
========================
|
||||
|
||||
The Poky boot CD iso images are designed as a demonstration of the Poky
|
||||
environment and to show the versatile image formats Poky can generate. It will
|
||||
run on Pentium2 or greater PC style computers. The iso image can be
|
||||
burnt to CD and then booted from.
|
||||
|
||||
Specific Hardware Documentation
|
||||
===============================
|
||||
|
||||
Hardware Reference Boards
|
||||
=========================
|
||||
|
||||
Intel Atom based PCs and devices (atom-pc)
|
||||
==========================================
|
||||
Compulab CM-X270 (cm-x270)
|
||||
==========================
|
||||
|
||||
The atom-pc MACHINE is tested on the following platforms:
|
||||
The bootloader on this board doesn't support writing jffs2 images directly to
|
||||
NAND and normally uses a proprietary kernel flash driver. To allow the use of
|
||||
jffs2 images, a two stage updating procedure is needed. Firstly, an initramfs
|
||||
is booted which contains mtd utilities and this is then used to write the main
|
||||
filesystem.
|
||||
|
||||
o Asus eee901
|
||||
o Acer Aspire One
|
||||
o Toshiba NB305
|
||||
o Intel Embedded Development Board 1-N450 (Black Sand)
|
||||
It is assumed the board is connected to a network where a TFTP server is
|
||||
available and that a serial terminal is available to communicate with the
|
||||
bootloader (38400, 8N1). If a DHCP server is available the device will use it
|
||||
to obtain an IP address. If not, run:
|
||||
|
||||
and is likely to work on many unlisted atom based devices. The MACHINE type
|
||||
supports ethernet, wifi, sound, and i915 graphics by default in addition to
|
||||
common PC input devices, busses, and so on.
|
||||
ARMmon > setip dhcp off
|
||||
ARMmon > setip ip 192.168.1.203
|
||||
ARMmon > setip mask 255.255.255.0
|
||||
|
||||
Depending on the device, it can boot from a traditional hard-disk, a USB device,
|
||||
or over the network. Writing poky generated images to physical media is
|
||||
straightforward with a caveat for USB devices. The following examples assume the
|
||||
target boot device is /dev/sdb, be sure to verify this and use the correct
|
||||
device as the following commands are run as root and are not reversable.
|
||||
To reflash the kernel:
|
||||
|
||||
Hard Disk:
|
||||
1. Build a directdisk image format. This will generate proper partition tables
|
||||
that will in turn be written to the physical media. For example:
|
||||
ARMmon > download kernel tftp zimage 192.168.1.202
|
||||
ARMmon > flash kernel
|
||||
|
||||
$ bitbake poky-image-minimal-directdisk
|
||||
|
||||
2. Use the "dd" utility to write the image to the raw block device. For example:
|
||||
where zimage is the name of the kernel on the TFTP server and its IP address is
|
||||
192.168.1.202. The names of the files must be all lowercase.
|
||||
|
||||
# dd if=poky-image-minimal-directdisk-atom-pc.hdddirect of=/dev/sdb
|
||||
To reflash the initrd/initramfs:
|
||||
|
||||
USB Device:
|
||||
1. Build an hddimg image format. This is a simple filesystem without partition
|
||||
tables and is suitable for USB keys. For example:
|
||||
ARMmon > download ramdisk tftp diskimage 192.168.1.202
|
||||
ARMmon > flash ramdisk
|
||||
|
||||
$ bitbake poky-image-minimal-live
|
||||
where diskimage is the name of the initramfs image (a cpio.gz file).
|
||||
|
||||
2. Use the "dd" utility to write the image to the raw block device. For
|
||||
example:
|
||||
To boot the initramfs:
|
||||
|
||||
# dd if=poky-image-minimal-live-atom-pc.hddimg of=/dev/sdb
|
||||
ARMmon > ramdisk on
|
||||
ARMmon > bootos "console=ttyS0,38400 rdinit=/sbin/init"
|
||||
|
||||
If the device fails to boot with "Boot error" displayed, it is likely the BIOS
|
||||
cannot understand the physical layout of the disk (or rather it expects a
|
||||
particular layout and cannot handle anything else). There are two possible
|
||||
solutions to this problem:
|
||||
To reflash the main image login to the system as user "root", then run:
|
||||
|
||||
1. Change the BIOS USB Device setting to HDD mode. The label will vary by
|
||||
device, but the idea is to force BIOS to read the Cylinder/Head/Sector
|
||||
geometry from the device.
|
||||
# ifconfig eth0 192.168.1.203
|
||||
# tftp -g -r mainimage 192.168.1.202
|
||||
# flash_eraseall /dev/mtd1
|
||||
# nandwrite /dev/mtd1 mainimage
|
||||
|
||||
2. Without such an option, the BIOS generally boots the device in USB-ZIP
|
||||
mode.
|
||||
which configures the network interface with the IP address 192.168.1.203,
|
||||
downloads the "mainimage" file from the TFTP server at 192.168.1.202, erases
|
||||
the flash and then writes the new image to the flash.
|
||||
|
||||
a. Configure the USB device for USB-ZIP mode:
|
||||
|
||||
# mkdiskimage -4 /dev/sdb 0 63 62
|
||||
The main image can then be booted with:
|
||||
|
||||
Where 63 and 62 are the head and sector count as reported by fdisk.
|
||||
Remove and reinsert the device to allow the kernel to detect the new
|
||||
partition layout.
|
||||
ARMmon > bootos "console=ttyS0,38400 root=/dev/mtdblock1 rootfstype=jffs2"
|
||||
|
||||
b. Copy the contents of the poky image to the USB-ZIP mode device:
|
||||
Note that the initramfs image is built by poky in a slightly different mode to
|
||||
normal since it uses uclibc. To generate this use a command like:
|
||||
|
||||
# mount -o loop poky-image-minimal-live-atom-pc.hddimg /tmp/image
|
||||
# mount /dev/sdb4 /tmp/usbkey
|
||||
# cp -rf /tmp/image/* /tmp/usbkey
|
||||
IMAGE_FSTYPES=cpio.gz MACHINE=cm-x270 POKYLIBC=uclibc bitbake poky-image-minimal-mtdutils
|
||||
|
||||
c. Install the syslinux boot loader:
|
||||
|
||||
# syslinux /dev/sdb4
|
||||
Compulab EM-X270 (em-x270)
|
||||
==========================
|
||||
|
||||
Install the boot device in the target board and configure the BIOS to boot
|
||||
from it.
|
||||
Fetch the "Linux - kernel and run-time image (Angstrom)" ZIP file from the
|
||||
Compulab website. Inside the images directory of this ZIP file is another ZIP
|
||||
file called 'LiveDisk.zip'. Extract this over a cleanly formatted vfat USB flash
|
||||
drive. Replace the 'em_x270.img' file with the 'updater-em-x270.ext2' file.
|
||||
|
||||
For more details on the USB-ZIP scenario, see the syslinux documentation:
|
||||
http://git.kernel.org/?p=boot/syslinux/syslinux.git;a=blob_plain;f=doc/usbkey.txt;hb=HEAD
|
||||
Insert this USB disk into the supplied adapter and connect this to the
|
||||
board. Whilst holding down the the suspend button press the reset button. The
|
||||
board will now boot off the USB key and into a version of Angstrom. On the
|
||||
desktop is an icon labelled "Updater". Run this program to launch the updater
|
||||
that will flash the Poky kernel and rootfs to the board.
|
||||
|
||||
|
||||
Texas Instruments Beagleboard (beagleboard)
|
||||
===========================================
|
||||
FreeScale iMX31ADS (mx31ads)
|
||||
===========================
|
||||
|
||||
The Beagleboard is an ARM Cortex-A8 development board with USB, DVI-D, S-Video,
|
||||
2D/3D accelerated graphics, audio, serial, JTAG, and SD/MMC. The xM adds a
|
||||
faster CPU, more RAM, an ethernet port, more USB ports, microSD, and removes
|
||||
the NAND flash. The beagleboard MACHINE is tested on the following platforms:
|
||||
The correct serial port is the top-most female connector to the right of the
|
||||
ethernet socket.
|
||||
|
||||
o Beagleboard C4
|
||||
o Beagleboard xM Rev A
|
||||
For uploading data to RedBoot we are going to use tftp. In this example we
|
||||
assume that the tftpserver is on 192.168.9.1 and the board is on192.168.9.2.
|
||||
|
||||
The Beagleboard C4 has NAND, while the xM does not. For the sake of simplicity,
|
||||
these instructions assume you have erased the NAND on the C4 so its boot
|
||||
behavior matches that of the xM. To do this, issue the following commands from
|
||||
the u-boot prompt (note that the unlock may be unecessary depending on the
|
||||
version of u-boot installed on your board and only one of the erase commands
|
||||
will succeed):
|
||||
To set the IP address, run:
|
||||
|
||||
# nand unlock
|
||||
# nand erase
|
||||
# nand erase.chip
|
||||
ip_address -l 192.168.9.2/24 -h 192.168.9.1
|
||||
|
||||
To further tailor these instructions for your board, please refer to the
|
||||
documentation at http://www.beagleboard.org.
|
||||
To download a kernel called "zimage" from the TFTP server, run:
|
||||
|
||||
From a Linux system with access to the image files perform the following steps
|
||||
as root, replacing mmcblk0* with the SD card device on your machine (such as sdc
|
||||
if used via a usb card reader):
|
||||
load -r -b 0x100000 zimage
|
||||
|
||||
1. Partition and format an SD card:
|
||||
# fdisk -lu /dev/mmcblk0
|
||||
|
||||
Disk /dev/mmcblk0: 3951 MB, 3951034368 bytes
|
||||
255 heads, 63 sectors/track, 480 cylinders, total 7716864 sectors
|
||||
Units = sectors of 1 * 512 = 512 bytes
|
||||
|
||||
Device Boot Start End Blocks Id System
|
||||
/dev/mmcblk0p1 * 63 144584 72261 c Win95 FAT32 (LBA)
|
||||
/dev/mmcblk0p2 144585 465884 160650 83 Linux
|
||||
To write the kernel to flash run:
|
||||
|
||||
# mkfs.vfat -F 16 -n "boot" /dev/mmcblk0p1
|
||||
# mke2fs -j -L "root" /dev/mmcblk0p2
|
||||
fis create kernel
|
||||
|
||||
The following assumes the SD card partition 1 and 2 are mounted at
|
||||
/media/boot and /media/root respectively. Removing the card and reinserting
|
||||
it will do just that on most modern Linux desktop environments.
|
||||
|
||||
The files referenced below are made available after the build in
|
||||
build/tmp/deploy/images.
|
||||
To download a rootfs jffs2 image "rootfs" from the TFTP server, run:
|
||||
|
||||
2. Install the boot loaders
|
||||
# cp MLO-beagleboard /media/boot/MLO
|
||||
# cp u-boot-beagleboard.bin /media/boot/u-boot.bin
|
||||
load -r -b 0x100000 rootfs
|
||||
|
||||
3. Install the root filesystem
|
||||
# tar x -C /media/root -f poky-image-$IMAGE_TYPE-beagleboard.tar.bz2
|
||||
# tar x -C /media/root -f modules-$KERNEL_VERSION-beagleboard.tgz
|
||||
To write the root filesystem to flash run:
|
||||
|
||||
4. Install the kernel uImage
|
||||
# cp uImage-beagleboard.bin /media/boot/uImage
|
||||
fis create root
|
||||
|
||||
5. Prepare a u-boot script to simplify the boot process
|
||||
The Beagleboard can be made to boot at this point from the u-boot command
|
||||
shell. To automate this process, generate a user.scr script as follows.
|
||||
To load and boot a kernel and rootfs from flash:
|
||||
|
||||
Install uboot-mkimage (from uboot-mkimage on Ubuntu or uboot-tools on Fedora).
|
||||
fis load kernel
|
||||
exec -b 0x100000 -l 0x200000 -c "noinitrd console=ttymxc0,115200 root=/dev/mtdblock2 rootfstype=jffs2 init=linuxrc ip=none"
|
||||
|
||||
Prepare a script config:
|
||||
To load and boot a kernel from a TFTP server with the rootfs over NFS:
|
||||
|
||||
# (cat << EOF
|
||||
setenv bootcmd 'mmc init; fatload mmc 0:1 0x80300000 uImage; bootm 0x80300000'
|
||||
setenv bootargs 'console=tty0 console=ttyO2,115200n8 root=/dev/mmcblk0p2 rootwait rootfstype=ext3 ro'
|
||||
boot
|
||||
EOF
|
||||
) > serial-boot.cmd
|
||||
# mkimage -A arm -O linux -T script -C none -a 0 -e 0 -n "Core Minimal" -d ./serial-boot.cmd ./boot.scr
|
||||
# cp boot.scr /media/boot
|
||||
load -r -b 0x100000 zimage
|
||||
exec -b 0x100000 -l 0x200000 -c "noinitrd console=ttymxc0,115200 root=/dev/nfs nfsroot=192.168.9.1:/mnt/nfsmx31 rw ip=192.168.9.2::192.168.9.1:255.255.255.0"
|
||||
|
||||
6. Unmount the SD partitions, insert the SD card into the Beagleboard, and
|
||||
boot the Beagleboard
|
||||
The instructions above are for using the (default) NOR flash on the board,
|
||||
there is also 128M of NAND flash. It is possible to install Poky to the NAND
|
||||
flash which gives more space for the rootfs and instructions for using this are
|
||||
given below. To switch to the NAND flash:
|
||||
|
||||
Note: As of the 2.6.37 linux-yocto kernel recipe, the Beagleboard uses the
|
||||
OMAP_SERIAL device (ttyO2). If you are using an older kernel, such as the
|
||||
2.6.34 linux-yocto-stable, be sure to replace ttyO2 with ttyS2 above. You
|
||||
should also override the machine SERIAL_CONSOLE in your local.conf in
|
||||
order to setup the getty on the serial line:
|
||||
factive NAND
|
||||
|
||||
SERIAL_CONSOLE_beagleboard = "115200 ttyS2"
|
||||
This will then restart RedBoot using the NAND rather than the NOR. If you
|
||||
have not used the NAND before then it is unlikely that there will be a
|
||||
partition table yet. You can get the list of partitions with 'fis list'.
|
||||
|
||||
If this shows no partitions then you can create them with:
|
||||
|
||||
Freescale MPC8315E-RDB (mpc8315e-rdb)
|
||||
=====================================
|
||||
fis init
|
||||
|
||||
The MPC8315 PowerPC reference platform (MPC8315E-RDB) is aimed at hardware and
|
||||
software development of network attached storage (NAS) and digital media server
|
||||
applications. The MPC8315E-RDB features the PowerQUICC II Pro processor, which
|
||||
includes a built-in security accelerator.
|
||||
The output of 'fis list' should now show:
|
||||
|
||||
Setup instructions
|
||||
------------------
|
||||
Name FLASH addr Mem addr Length Entry point
|
||||
RedBoot 0xE0000000 0xE0000000 0x00040000 0x00000000
|
||||
FIS directory 0xE7FF4000 0xE7FF4000 0x00003000 0x00000000
|
||||
RedBoot config 0xE7FF7000 0xE7FF7000 0x00001000 0x00000000
|
||||
|
||||
You will need the following:
|
||||
* nfs root setup on your workstation
|
||||
* tftp server installed on your workstation
|
||||
Partitions for the kernel and rootfs need to be created:
|
||||
|
||||
Load the kernel and boot it as follows:
|
||||
fis create -l 0x1A0000 -e 0x00100000 kernel
|
||||
fis create -l 0x5000000 -e 0x00100000 root
|
||||
|
||||
1. Get the kernel (uImage.mpc8315erdb) and dtb (mpc8315erdb.dtb) files from
|
||||
the Poky build tmp/deploy directory, and make them available on your tftp
|
||||
server.
|
||||
You may now use the instructions above for flashing. However it is important
|
||||
to note that the erase block size for the NAND is different to the NOR so the
|
||||
JFFS erase size will need to be changed to 0x4000. Stardard images are built
|
||||
for NOR and you will need to build custom images for NAND.
|
||||
|
||||
2. Set up the environment in U-Boot:
|
||||
You will also need to update the kernel command line to use the correct root
|
||||
filesystem. This should be '/dev/mtdblock7' if you adhere to the partitioning
|
||||
scheme shown above. If this fails then you can doublecheck against the output
|
||||
from the kernel when it evaluates the available mtd partitions.
|
||||
|
||||
=>setenv ipaddr <board ip>
|
||||
=>setenv serverip <tftp server ip>
|
||||
=>setenv bootargs root=/dev/nfs rw nfsroot=<nfsroot ip>:<rootfs path> ip=<board ip>:<server ip>:<gateway ip>:255.255.255.0:mpc8315e:eth0:off console=ttyS0,115200
|
||||
|
||||
3. Download kernel and dtb to boot kernel.
|
||||
Marvell PXA3xx Zylonite (zylonite)
|
||||
==================================
|
||||
|
||||
=>tftp 800000 uImage.mpc8315erdb
|
||||
=>tftp 780000 mpc8315erdb.dtb
|
||||
=>bootm 800000 - 780000
|
||||
These instructions assume the Zylonite is connected to a machine running a TFTP
|
||||
server at address 192.168.123.5 and that a serial link (38400 8N1) is available
|
||||
to access the blob bootloader. The kernel is on the TFTP server as
|
||||
"zylonite-kernel" and the root filesystem jffs2 file is "zylonite-rootfs" and
|
||||
the images are to be saved in NAND flash.
|
||||
|
||||
The following commands setup blob:
|
||||
|
||||
Ubiquiti Networks RouterStation Pro (routerstationpro)
|
||||
======================================================
|
||||
blob> setip client 192.168.123.4
|
||||
blob> setip server 192.168.123.5
|
||||
|
||||
The RouterStation Pro is an Atheros AR7161 MIPS-based board. Geared towards
|
||||
networking applications, it has all of the usual features as well as three
|
||||
type IIIA mini-PCI slots and an on-board 3-port 10/100/1000 Ethernet switch,
|
||||
in addition to the 10/100/1000 Ethernet WAN port which supports
|
||||
Power-over-Ethernet.
|
||||
To flash the kernel:
|
||||
|
||||
Setup instructions
|
||||
------------------
|
||||
blob> tftp zylonite-kernel
|
||||
blob> nandwrite -j 0x80800000 0x60000 0x200000
|
||||
|
||||
You will need the following:
|
||||
* A serial cable - female to female (or female to male + gender changer)
|
||||
NOTE: cable must be straight through, *not* a null modem cable.
|
||||
* USB flash drive or hard disk that is able to be powered from the
|
||||
board's USB port.
|
||||
* tftp server installed on your workstation
|
||||
To flash the rootfs:
|
||||
|
||||
NOTE: in the following instructions it is assumed that /dev/sdb corresponds
|
||||
to the USB disk when it is plugged into your workstation. If this is not the
|
||||
case in your setup then please be careful to substitute the correct device
|
||||
name in all commands where appropriate.
|
||||
blob> tftp zylonite-rootfs
|
||||
blob> nanderase -j 0x260000 0x5000000
|
||||
blob> nandwrite -j 0x80800000 0x260000 <length>
|
||||
|
||||
--- Preparation ---
|
||||
(where <length> is the rootfs size which will be printed by the tftp step)
|
||||
|
||||
1) Build an image (e.g. poky-image-minimal) using "routerstationpro" as the
|
||||
MACHINE
|
||||
To boot the board:
|
||||
|
||||
2) Partition the USB drive so that primary partition 1 is type Linux (83).
|
||||
Minimum size depends on your root image size - poky-image-minimal probably
|
||||
only needs 8-16MB, other images will need more.
|
||||
blob> nkernel
|
||||
blob> boot
|
||||
|
||||
# fdisk /dev/sdb
|
||||
Command (m for help): p
|
||||
|
||||
Disk /dev/sdb: 4011 MB, 4011491328 bytes
|
||||
124 heads, 62 sectors/track, 1019 cylinders, total 7834944 sectors
|
||||
Units = sectors of 1 * 512 = 512 bytes
|
||||
Sector size (logical/physical): 512 bytes / 512 bytes
|
||||
I/O size (minimum/optimal): 512 bytes / 512 bytes
|
||||
Disk identifier: 0x0009e87d
|
||||
Logic iMX31 Lite Kit (mx31litekit)
|
||||
===============================
|
||||
|
||||
Device Boot Start End Blocks Id System
|
||||
/dev/sdb1 62 1952751 976345 83 Linux
|
||||
The easiest method to boot this board is to take an MMC/SD card and format
|
||||
the first partition as ext2, then extract the poky image onto this as root.
|
||||
Assuming the board is network connected, a TFTP server is available at
|
||||
192.168.1.33 and a serial terminal is available (115200 8N1), the following
|
||||
commands will boot a kernel called "mx31kern" from the TFTP server:
|
||||
|
||||
3) Format partition 1 on the USB as ext3
|
||||
losh> ifconfig sm0 192.168.1.203 255.255.255.0 192.168.1.33
|
||||
losh> load raw 0x80100000 0x200000 /tftp/192.168.1.33:mx31kern
|
||||
losh> exec 0x80100000 -
|
||||
|
||||
# mke2fs -j /dev/sdb1
|
||||
|
||||
4) Mount partition 1 and then extract the contents of
|
||||
tmp/deploy/images/poky-image-XXXX.tar.bz2 into it (preserving permissions).
|
||||
Phytec phyCORE-iMX31 (mx31phy)
|
||||
==============================
|
||||
|
||||
# mount /dev/sdb1 /media/sdb1
|
||||
# cd /media/sdb1
|
||||
# tar -xvjpf tmp/deploy/images/poky-image-XXXX.tar.bz2
|
||||
Support for this board is currently being developed. Experimental jffs2
|
||||
images and a suitable kernel are available and are known to work with the
|
||||
board.
|
||||
|
||||
5) Unmount the USB drive and then plug it into the board's USB port
|
||||
|
||||
6) Connect the board's serial port to your workstation and then start up
|
||||
your favourite serial terminal so that you will be able to interact with
|
||||
the serial console. If you don't have a favourite, picocom is suggested:
|
||||
Consumer Devices
|
||||
================
|
||||
|
||||
$ picocom /dev/ttyUSB0 -b 115200
|
||||
FIC Neo1973 GTA01 smartphone (fic-gta01)
|
||||
========================================
|
||||
|
||||
7) Connect the network into eth0 (the one that is NOT the 3 port switch). If
|
||||
you are using power-over-ethernet then the board will power up at this point.
|
||||
To install Poky on a GTA01 smartphone you will need "dfu-util" tool
|
||||
which you can build with "bitbake dfu-util-native" command.
|
||||
|
||||
8) Start up the board, watch the serial console. Hit Ctrl+C to abort the
|
||||
autostart if the board is configured that way (it is by default). The
|
||||
bootloader's fconfig command can be used to disable autostart and configure
|
||||
the IP settings if you need to change them (default IP is 192.168.1.20).
|
||||
Flashing requires these steps:
|
||||
|
||||
9) Make the kernel (tmp/deploy/images/vmlinux-routerstationpro.bin) available
|
||||
on the tftp server.
|
||||
1. Power down the device.
|
||||
2. Connect the device to the host machine via USB.
|
||||
3. Hold AUX key and press Power key. There should be a bootmenu
|
||||
on screen.
|
||||
4. Run "dfu-util -l" to check if the phone is visible on the USB bus.
|
||||
The output should look like this:
|
||||
|
||||
10) If you are going to write the kernel to flash (optional - see "Booting a
|
||||
kernel directly" below for the alternative), remove the current kernel and
|
||||
rootfs flash partitions. You can list the partitions using the following
|
||||
bootloader command:
|
||||
dfu-util - (C) 2007 by OpenMoko Inc.
|
||||
This program is Free Software and has ABSOLUTELY NO WARRANTY
|
||||
|
||||
RedBoot> fis list
|
||||
Found Runtime: [0x1457:0x5119] devnum=19, cfg=0, intf=2, alt=0, name="USB Device Firmware Upgrade"
|
||||
|
||||
You can delete the existing kernel and rootfs with these commands:
|
||||
5. Flash the kernel with "dfu-util -a kernel -D uImage-2.6.21.6-moko11-r2-fic-gta01.bin"
|
||||
6. Flash rootfs with "dfu-util -a rootfs -D <image>", where <image> is the
|
||||
jffs2 image file to use as the root filesystem
|
||||
(e.g. ./tmp/deploy/images/poky-image-sato-fic-gta01.jffs2)
|
||||
|
||||
RedBoot> fis delete kernel
|
||||
RedBoot> fis delete rootfs
|
||||
|
||||
--- Booting a kernel directly ---
|
||||
HTC Universal (htcuniversal)
|
||||
============================
|
||||
|
||||
1) Load the kernel using the following bootloader command:
|
||||
Note: HTC Universal support is highly experimental.
|
||||
|
||||
RedBoot> load -m tftp -h <ip of tftp server> vmlinux-routerstationpro.bin
|
||||
On the HTC Universal, entirely replacing the Windows installation is not
|
||||
supported, instead Poky is booted from an MMC/SD card from Windows. Once Poky
|
||||
has booted, Windows is no longer in memory or active but when power is removed,
|
||||
the user will be returned to windows and will need to return to Linux from
|
||||
there.
|
||||
|
||||
You should see a message on it being successfully loaded.
|
||||
Once an MMC/SD card is available it is suggested its split into two partitions,
|
||||
one for a program called HaRET which lets you boot Linux from within Windows
|
||||
and the second for the rootfs. The HaRET partition should be the first partition
|
||||
on the card and be vfat formatted. It doesn't need to be large, just enough for
|
||||
HaRET and a kernel (say 5MB max). The rootfs should be ext2 and is usually the
|
||||
second partition. The first partition should be vfat so Windows recognises it
|
||||
as if it doesn't, it has been known to reformat cards.
|
||||
|
||||
2) Execute the kernel:
|
||||
On the first partition you need three files:
|
||||
|
||||
RedBoot> exec -c "console=ttyS0,115200 root=/dev/sda1 rw rootdelay=2 board=UBNT-RSPRO"
|
||||
* a HaRET binary (version 0.5.1 works well and a working version
|
||||
should be part of the last Poky release)
|
||||
* a kernel renamed to "zImage"
|
||||
* a default.txt which contains:
|
||||
|
||||
Note that specifying the command line with -c is important as linux-yocto does
|
||||
not provide a default command line.
|
||||
set kernel "zImage"
|
||||
set mtype "855"
|
||||
set cmdline "root=/dev/mmcblk0p2 rw console=ttyS0,115200n8 console=tty0 rootdelay=5 fbcon=rotate:1"
|
||||
boot2
|
||||
|
||||
--- Writing a kernel to flash ---
|
||||
On the second parition the root file system is extracted as root. A different
|
||||
partition layout or other kernel options can be changed in the default.txt file.
|
||||
|
||||
1) Go to your tftp server and gzip the kernel you want in flash. It should
|
||||
halve the size.
|
||||
When inserted into the device, Windows should see the card and let you browse
|
||||
its contents using File Explorer. Running the HaRET binary will present a dialog
|
||||
box (maybe after messages warning about running unsigned binaries) where you
|
||||
select OK and you should then see Poky boot. Kernel messages can be seen by
|
||||
adding psplash=false to the kernel commandline.
|
||||
|
||||
2) Load the kernel using the following bootloader command:
|
||||
|
||||
RedBoot> load -r -b 0x80600000 -m tftp -h <ip of tftp server> vmlinux-routerstationpro.bin.gz
|
||||
Nokia 770/N800/N810 Internet Tablets (nokia770 and nokia800)
|
||||
============================================================
|
||||
|
||||
This should output something similar to the following:
|
||||
Note: Nokia tablet support is highly experimental.
|
||||
|
||||
Raw file loaded 0x80600000-0x8087c537, assumed entry at 0x80600000
|
||||
The Nokia internet tablet devices are OMAP based tablet formfactor devices
|
||||
with large screens (800x480), wifi and touchscreen.
|
||||
|
||||
Calculate the length by subtracting the first number from the second number
|
||||
and then rounding the result up to the nearest 0x1000.
|
||||
To flash images to these devices you need the "flasher" utility which can be
|
||||
downloaded from the http://tablets-dev.nokia.com/d3.php?f=flasher-3.0. This
|
||||
utility needs to be run as root and the usb filesystem needs to be mounted
|
||||
although most distributions will have done this for you. Once you have this
|
||||
follow these steps:
|
||||
|
||||
3) Using the length calculated above, create a flash partition for the kernel:
|
||||
1. Power down the device.
|
||||
2. Connect the device to the host machine via USB
|
||||
(connecting power to the device doesn't hurt either).
|
||||
3. Run "flasher -i"
|
||||
4. Power on the device.
|
||||
5. The program should give an indication it's found
|
||||
a tablet device. If not, recheck the cables, make sure you're
|
||||
root and usbfs/usbdevfs is mounted.
|
||||
6. Run "flasher -r <image> -k <kernel> -f", where <image> is the
|
||||
jffs2 image file to use as the root filesystem
|
||||
(e.g. ./tmp/deploy/images/poky-image-sato-nokia800.jffs2)
|
||||
and <kernel> is the kernel to use
|
||||
(e.g. ./tmp/deploy/images/zImage-nokia800.bin).
|
||||
7. Run "flasher -R" to reboot the device.
|
||||
8. The device should boot into Poky.
|
||||
|
||||
RedBoot> fis create -b 0x80600000 -l 0x240000 kernel
|
||||
The nokia800 images and kernel will run on both the N800 and N810.
|
||||
|
||||
(change 0x240000 to your rounded length -- change "kernel" to whatever
|
||||
you want to name your kernel)
|
||||
|
||||
--- Booting a kernel from flash ---
|
||||
Sharp Zaurus SL-C7x0 series (c7x0)
|
||||
==================================
|
||||
|
||||
To boot the flashed kernel perform the following steps.
|
||||
The Sharp Zaurus c7x0 series (SL-C700, SL-C750, SL-C760, SL-C860, SL-7500)
|
||||
are PXA25x based handheld PDAs with VGA screens. To install Poky images on
|
||||
these devices follow these steps:
|
||||
|
||||
1) At the bootloader prompt, load the kernel:
|
||||
1. Obtain an SD/MMC or CF card with a vfat or ext2 filesystem.
|
||||
2. Copy a jffs2 image file (e.g. poky-image-sato-c7x0.jffs2) onto the
|
||||
card as "initrd.bin":
|
||||
|
||||
RedBoot> fis load -d -e kernel
|
||||
$ cp ./tmp/deploy/images/poky-image-sato-c7x0.jffs2 /path/to/my-cf-card/initrd.bin
|
||||
|
||||
(Change the name "kernel" above if you chose something different earlier)
|
||||
3. Copy an Linux kernel file (zImage-c7x0.bin) onto the card as
|
||||
"zImage.bin":
|
||||
|
||||
(-e means 'elf', -d 'decompress')
|
||||
$ cp ./tmp/deploy/images/zImage-c7x0.bin /path/to/my-cf-card/zImage.bin
|
||||
|
||||
2) Execute the kernel using the exec command as above.
|
||||
4. Copy an updater script (updater.sh.c7x0) onto the card
|
||||
as "updater.sh":
|
||||
|
||||
--- Automating the boot process ---
|
||||
$ cp ./tmp/deploy/images/updater.sh.c7x0 /path/to/my-cf-card/updater.sh
|
||||
|
||||
5. Power down the Zaurus.
|
||||
6. Hold "OK" key and power on the device. An update menu should appear
|
||||
(in Japanese).
|
||||
7. Choose "Update" (item 4).
|
||||
8. The next screen will ask for the source, choose the appropriate
|
||||
card (CF or SD).
|
||||
9. Make sure AC power is connected.
|
||||
10. The next screen asks for confirmation, choose "Yes" (the left button).
|
||||
11. The update process will start, flash the files on the card onto
|
||||
the device and the device will then reboot into Poky.
|
||||
|
||||
|
||||
Sharp Zaurus SL-C1000 (akita)
|
||||
=============================
|
||||
|
||||
The Sharp Zaurus SL-C1000 is a PXA270 based device otherwise similar to the
|
||||
c7x0. To install Poky images on this device follow the instructions for
|
||||
the c7x0 but replace "c7x0" with "akita" where appropriate.
|
||||
|
||||
|
||||
Sharp Zaurus SL-C3x00 series (spitz)
|
||||
====================================
|
||||
|
||||
The Sharp Zaurus SL-C3x00 devices are PXA270 based devices similar
|
||||
to akita but with an internal microdrive. The installation procedure
|
||||
assumes a standard microdrive based device where the root (first)
|
||||
partition has been enlarged to fit the image (at least 100MB,
|
||||
400MB for the SDK).
|
||||
|
||||
The procedure is the same as for the c7x0 and akita models with the
|
||||
following differences:
|
||||
|
||||
1. Instead of a jffs2 image you need to copy a compressed tarball of the
|
||||
root fileystem (e.g. poky-image-sato-spitz.tar.gz) onto the
|
||||
card as "hdimage1.tgz":
|
||||
|
||||
$ cp ./tmp/deploy/images/poky-image-sato-spitz.tar.gz /path/to/my-cf-card/hdimage1.tgz
|
||||
|
||||
2. You additionally need to copy a special tar utility (gnu-tar) onto
|
||||
the card as "gnu-tar":
|
||||
|
||||
$ cp ./tmp/deploy/images/gnu-tar /path/to/my-cf-card/gnu-tar
|
||||
|
||||
After writing the kernel to flash and testing the load and exec commands
|
||||
manually, you can automate the boot process with a boot script.
|
||||
|
||||
1) RedBoot> fconfig
|
||||
(Answer the questions not specified here as they pertain to your environment)
|
||||
2) Run script at boot: true
|
||||
Boot script:
|
||||
.. fis load -d -e kernel
|
||||
.. exec
|
||||
Enter script, terminate with empty line
|
||||
>> fis load -d -e kernel
|
||||
>> exec -c "console=ttyS0,115200 root=/dev/sda1 rw rootdelay=2 board=UBNT-RSPRO"
|
||||
>>
|
||||
3) Answer the remaining questions and write the changes to flash:
|
||||
Update RedBoot non-volatile configuration - continue (y/n)? y
|
||||
... Erase from 0xbfff0000-0xc0000000: .
|
||||
... Program from 0x87ff0000-0x88000000 at 0xbfff0000: .
|
||||
4) Power cycle the board.
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
Tim Ansell <mithro@mithis.net>
|
||||
Phil Blundell <pb@handhelds.org>
|
||||
Seb Frankengul <seb@frankengul.org>
|
||||
Holger Freyther <holger@moiji-mobile.com>
|
||||
Marcin Juszkiewicz <marcin@juszkiewicz.com.pl>
|
||||
Holger Freyther <zecke@handhelds.org>
|
||||
Marcin Juszkiewicz <hrw@hrw.one.pl>
|
||||
Chris Larson <kergoth@handhelds.org>
|
||||
Ulrich Luckas <luckas@musoft.de>
|
||||
Mickey Lauer <mickey@Vanille.de>
|
||||
|
||||
@@ -1,61 +1,12 @@
|
||||
Changes in Bitbake 1.9.x:
|
||||
- Add PE (Package Epoch) support from Philipp Zabel (pH5)
|
||||
- Treat python functions the same as shell functions for logging
|
||||
- Use TMPDIR/anonfunc as a __anonfunc temp directory (T)
|
||||
- Catch truncated cache file errors
|
||||
- Allow operations other than assignment on flag variables
|
||||
- Add code to handle inter-task dependencies
|
||||
- Fix cache errors when generation dotGraphs
|
||||
- Make sure __inherit_cache is updated before calling include() (from Michael Krelin)
|
||||
- Fix bug when target was in ASSUME_PROVIDED (#2236)
|
||||
- Raise ParseError for filenames with multiple underscores instead of infinitely looping (#2062)
|
||||
- Fix invalid regexp in BBMASK error handling (missing import) (#1124)
|
||||
- Promote certain warnings from debug to note 2 level
|
||||
- Update manual
|
||||
- Correctly redirect stdin when forking
|
||||
- If parsing errors are found, exit, too many users miss the errors
|
||||
- Remove supriours PREFERRED_PROVIDER warnings
|
||||
- svn fetcher: Add _buildsvncommand function
|
||||
- Improve certain error messages
|
||||
- Rewrite svn fetcher to make adding extra operations easier
|
||||
as part of future SRCDATE="now" fixes
|
||||
(requires new FETCHCMD_svn definition in bitbake.conf)
|
||||
- Change SVNDIR layout to be more unique (fixes #2644 and #2624)
|
||||
- Add ConfigParsed Event after configuration parsing is complete
|
||||
- Add SRCREV support for svn fetcher
|
||||
- data.emit_var() - only call getVar if we need the variable
|
||||
- Stop generating the A variable (seems to be legacy code)
|
||||
- Make sure intertask depends get processed correcting in recursive depends
|
||||
- Add pn-PN to overrides when evaluating PREFERRED_VERSION
|
||||
- Improve the progress indicator by skipping tasks that have
|
||||
already run before starting the build rather than during it
|
||||
- Add profiling option (-P)
|
||||
- Add BB_SRCREV_POLICY variable (clear or cache) to control SRCREV cache
|
||||
- Add SRCREV_FORMAT support
|
||||
- Fix local fetcher's localpath return values
|
||||
- Apply OVERRIDES before performing immediate expansions
|
||||
- Allow the -b -e option combination to take regular expressions
|
||||
- Fix handling of variables with expansion in the name using _append/_prepend
|
||||
e.g. RRECOMMENDS_${PN}_append_xyz = "abc"
|
||||
- Add plain message function to bb.msg
|
||||
- Sort the list of providers before processing so dependency problems are
|
||||
reproducible rather than effectively random
|
||||
- Fix/improve bitbake -s output
|
||||
- Add locking for fetchers so only one tries to fetch a given file at a given time
|
||||
- Fix int(0)/None confusion in runqueue.py which causes random gaps in dependency chains
|
||||
- Expand data in addtasks
|
||||
- Print the list of missing DEPENDS,RDEPENDS for the "No buildable providers available for required...."
|
||||
error message.
|
||||
- Rework add_task to be more efficient (6% speedup, 7% number of function calls reduction)
|
||||
- Sort digraph output to make builds more reproducible
|
||||
- Split expandKeys into two for loops to benefit from the expand_cache (12% speedup)
|
||||
- runqueue.py: Fix idepends handling to avoid dependency errors
|
||||
- Clear the terminal TOSTOP flag if set (and warn the user)
|
||||
- Fix regression from r653 and make SRCDATE/CVSDATE work for packages again
|
||||
Changes in BitBake 1.8.x:
|
||||
- Fix exit code for build failures in --continue mode
|
||||
- Fix git branch tags fetching
|
||||
|
||||
Changes in BitBake 1.8.10:
|
||||
- Psyco is available only for x86 - do not use it on other architectures.
|
||||
- Fix a bug in bb.decodeurl where http://some.where.com/somefile.tgz decoded to host="" (#1530)
|
||||
- Warn about malformed PREFERRED_PROVIDERS (#1072)
|
||||
- Add support for BB_NICE_LEVEL option (#1627)
|
||||
- Psyco is used only on x86 as there is no support for other architectures.
|
||||
- Sort initial providers list by default preference (#1145, #2024)
|
||||
- Improve provider sorting so prefered versions have preference over latest versions (#768)
|
||||
- Detect builds of tasks with overlapping providers and warn (will become a fatal error) (#1359)
|
||||
@@ -67,8 +18,7 @@ Changes in Bitbake 1.9.x:
|
||||
do_taskname[cleandirs] = "dir"
|
||||
- bzr fetcher tweaks from Robert Schuster (#2913)
|
||||
- Add mercurial (hg) fetcher from Robert Schuster (#2913)
|
||||
- Don't add duplicates to BBPATH
|
||||
- Fix preferred_version return values (providers.py)
|
||||
- Fix bogus preferred_version return values
|
||||
- Fix 'depends' flag splitting
|
||||
- Fix unexport handling (#3135)
|
||||
- Add bb.copyfile function similar to bb.movefile (and improve movefile error reporting)
|
||||
@@ -91,93 +41,68 @@ Changes in Bitbake 1.9.x:
|
||||
- Fix shell data updating problems (#1880)
|
||||
- Properly raise errors for invalid source URI protocols
|
||||
- Change the wget fetcher failure handling to avoid lockfile problems
|
||||
- Add git branch support
|
||||
- Add support for branches in git fetcher (Otavio Salvador, Michael Lauer)
|
||||
- Make taskdata and runqueue errors more user friendly
|
||||
- Add norecurse and fullpath options to cvs fetcher
|
||||
- Fix exit code for build failures in --continue mode
|
||||
- Fix git branch tags fetching
|
||||
- Change parseConfigurationFile so it works on real data, not a copy
|
||||
- Handle 'base' inherit and all other INHERITs from parseConfigurationFile
|
||||
instead of BBHandler
|
||||
- Fix getVarFlags bug in data_smart
|
||||
- Optmise cache handling by more quickly detecting an invalid cache, only
|
||||
saving the cache when its changed, moving the cache validity check into
|
||||
the parsing loop and factoring some getVar calls outside a for loop
|
||||
- Cooker: Remove a debug message from the parsing loop to lower overhead
|
||||
- Convert build.py exec_task to use getVarFlags
|
||||
- Update shell to use cooker.buildFile
|
||||
- Add StampUpdate event
|
||||
- Convert -b option to use taskdata/runqueue
|
||||
- Remove digraph and switch to new stamp checking code. exec_task no longer
|
||||
honours dependencies
|
||||
- Make fetcher timestamp updating non-fatal when permissions don't allow
|
||||
updates
|
||||
- Add BB_SCHEDULER variable/option ("completion" or "speed") controlling
|
||||
the way bitbake schedules tasks
|
||||
- Add BB_STAMP_POLICY variable/option ("perfile" or "full") controlling
|
||||
how extensively stamps are looked at for validity
|
||||
- When handling build target failures make sure idepends are checked and
|
||||
failed where needed. Fixes --continue mode crashes.
|
||||
- Fix -f (force) in conjunction with -b
|
||||
- Fix problems with recrdeptask handling where some idepends weren't handled
|
||||
correctly.
|
||||
- Handle exit codes correctly (from pH5)
|
||||
- Work around refs/HEAD issues with git over http (#3410)
|
||||
- Add proxy support to the CVS fetcher (from Cyril Chemparathy)
|
||||
- Improve runfetchcmd so errors are seen and various GIT variables are exported
|
||||
- Add ability to fetchers to check URL validity without downloading
|
||||
- Improve runtime PREFERRED_PROVIDERS warning message
|
||||
- Add BB_STAMP_WHITELIST option which contains a list of stamps to ignore when
|
||||
checking stamp dependencies and using a BB_STAMP_POLICY of "whitelist"
|
||||
- No longer weight providers on the basis of a package being "already staged". This
|
||||
leads to builds being non-deterministic.
|
||||
- Flush stdout/stderr before forking to fix duplicate console output
|
||||
- Make sure recrdeps tasks include all inter-task dependencies of a given fn
|
||||
- Add bb.runqueue.check_stamp_fn() for use by packaged-staging
|
||||
- Add PERSISTENT_DIR to store the PersistData in a persistent
|
||||
directory != the cache dir.
|
||||
- Add md5 and sha256 checksum generation functions to utils.py
|
||||
- Correctly handle '-' characters in class names (#2958)
|
||||
- Make sure expandKeys has been called on the data dictionary before running tasks
|
||||
- Correctly add a task override in the form task-TASKNAME.
|
||||
- Revert the '-' character fix in class names since it breaks things
|
||||
- When a regexp fails to compile for PACKAGES_DYNAMIC, print a more useful error (#4444)
|
||||
- Allow to checkout CVS by Date and Time. Just add HHmm to the SRCDATE.
|
||||
- Move prunedir function to utils.py and add explode_dep_versions function
|
||||
- Raise an exception if SRCREV == 'INVALID'
|
||||
- Fix hg fetcher username/password handling and fix crash
|
||||
- Fix PACKAGES_DYNAMIC handling of packages with '++' in the name
|
||||
- Rename __depends to __base_depends after configuration parsing so we don't
|
||||
recheck the validity of the config files time after time
|
||||
- Add better environmental variable handling. By default it will now only pass certain
|
||||
whitelisted variables into the data store. If BB_PRESERVE_ENV is set bitbake will use
|
||||
all variable from the environment. If BB_ENV_WHITELIST is set, that whitelist will be
|
||||
used instead of the internal bitbake one. Alternatively, BB_ENV_EXTRAWHITE can be used
|
||||
to extend the internal whitelist.
|
||||
- Perforce fetcher fix to use commandline options instead of being overriden by the environment
|
||||
- bb.utils.prunedir can cope with symlinks to directoriees without exceptions
|
||||
- use @rev when doing a svn checkout
|
||||
- Add osc fetcher (from Joshua Lock in Poky)
|
||||
- When SRCREV autorevisioning for a recipe is in use, don't cache the recipe
|
||||
- Add tryaltconfigs option to control whether bitbake trys using alternative providers
|
||||
to fulfil failed dependencies. It defaults to off, changing the default since this
|
||||
behaviour confuses many users and isn't often useful.
|
||||
- Improve lock file function error handling
|
||||
- Add username handling to the git fetcher (Robert Bragg)
|
||||
- Add support for HTTP_PROXY and HTTP_PROXY_IGNORE variables to the wget fetcher
|
||||
- Export more variables to the fetcher commands to allow ssh checkouts and checkouts through
|
||||
proxies to work better. (from Poky)
|
||||
- Also allow user and pswd options in SRC_URIs globally (from Poky)
|
||||
- Improve proxy handling when using mirrors (from Poky)
|
||||
- Add bb.utils.prune_suffix function
|
||||
- Fix hg checkouts of specific revisions (from Poky)
|
||||
- Fix wget fetching of urls with parameters specified (from Poky)
|
||||
- Add username handling to git fetcher (from Poky)
|
||||
- Set HOME environmental variable when running fetcher commands (from Poky)
|
||||
- Make sure allowed variables inherited from the environment are exported again (from Poky)
|
||||
- When running a stage task in bbshell, run populate_staging, not the stage task (from Poky)
|
||||
- Fix + character escaping from PACKAGES_DYNAMIC (thanks Otavio Salvador)
|
||||
- Addition of BBCLASSEXTEND support for allowing one recipe to provide multiple targets (from Poky)
|
||||
|
||||
Changes in Bitbake 1.8.8:
|
||||
- Rewrite svn fetcher to make adding extra operations easier
|
||||
as part of future SRCDATE="now" fixes
|
||||
(requires new FETCHCMD_svn definition in bitbake.conf)
|
||||
- Change SVNDIR layout to be more unique (fixes #2644 and #2624)
|
||||
- Import persistent data store from trunk
|
||||
- Sync fetcher code with that in trunk, adding SRCREV support for svn
|
||||
- Add ConfigParsed Event after configuration parsing is complete
|
||||
- data.emit_var() - only call getVar if we need the variable
|
||||
- Stop generating the A variable (seems to be legacy code)
|
||||
- Make sure intertask depends get processed correcting in recursive depends
|
||||
- Add pn-PN to overrides when evaluating PREFERRED_VERSION
|
||||
- Improve the progress indicator by skipping tasks that have
|
||||
already run before starting the build rather than during it
|
||||
- Add profiling option (-P)
|
||||
- Add BB_SRCREV_POLICY variable (clear or cache) to control SRCREV cache
|
||||
- Add SRCREV_FORMAT support
|
||||
- Fix local fetcher's localpath return values
|
||||
- Apply OVERRIDES before performing immediate expansions
|
||||
- Allow the -b -e option combination to take regular expressions
|
||||
- Add plain message function to bb.msg
|
||||
- Sort the list of providers before processing so dependency problems are
|
||||
reproducible rather than effectively random
|
||||
- Add locking for fetchers so only one tries to fetch a given file at a given time
|
||||
- Fix int(0)/None confusion in runqueue.py which causes random gaps in dependency chains
|
||||
- Fix handling of variables with expansion in the name using _append/_prepend
|
||||
e.g. RRECOMMENDS_${PN}_append_xyz = "abc"
|
||||
- Expand data in addtasks
|
||||
- Print the list of missing DEPENDS,RDEPENDS for the "No buildable providers available for required...."
|
||||
error message.
|
||||
- Rework add_task to be more efficient (6% speedup, 7% number of function calls reduction)
|
||||
- Sort digraph output to make builds more reproducible
|
||||
- Split expandKeys into two for loops to benefit from the expand_cache (12% speedup)
|
||||
- runqueue.py: Fix idepends handling to avoid dependency errors
|
||||
- Clear the terminal TOSTOP flag if set (and warn the user)
|
||||
- Fix regression from r653 and make SRCDATE/CVSDATE work for packages again
|
||||
|
||||
Changes in Bitbake 1.8.6:
|
||||
- Correctly redirect stdin when forking
|
||||
- If parsing errors are found, exit, too many users miss the errors
|
||||
- Remove supriours PREFERRED_PROVIDER warnings
|
||||
|
||||
Changes in Bitbake 1.8.4:
|
||||
- Make sure __inherit_cache is updated before calling include() (from Michael Krelin)
|
||||
- Fix bug when target was in ASSUME_PROVIDED (#2236)
|
||||
- Raise ParseError for filenames with multiple underscores instead of infinitely looping (#2062)
|
||||
- Fix invalid regexp in BBMASK error handling (missing import) (#1124)
|
||||
- Don't run build sanity checks on incomplete builds
|
||||
- Promote certain warnings from debug to note 2 level
|
||||
- Update manual
|
||||
|
||||
Changes in Bitbake 1.8.2:
|
||||
- Catch truncated cache file errors
|
||||
- Add PE (Package Epoch) support from Philipp Zabel (pH5)
|
||||
- Add code to handle inter-task dependencies
|
||||
- Allow operations other than assignment on flag variables
|
||||
- Fix cache errors when generation dotGraphs
|
||||
|
||||
Changes in Bitbake 1.8.0:
|
||||
- Release 1.7.x as a stable series
|
||||
|
||||
52
bitbake/MANIFEST
Normal file
52
bitbake/MANIFEST
Normal file
@@ -0,0 +1,52 @@
|
||||
AUTHORS
|
||||
COPYING
|
||||
ChangeLog
|
||||
MANIFEST
|
||||
setup.py
|
||||
bin/bitdoc
|
||||
bin/bbimage
|
||||
bin/bitbake
|
||||
lib/bb/__init__.py
|
||||
lib/bb/build.py
|
||||
lib/bb/cache.py
|
||||
lib/bb/cooker.py
|
||||
lib/bb/COW.py
|
||||
lib/bb/data.py
|
||||
lib/bb/data_smart.py
|
||||
lib/bb/event.py
|
||||
lib/bb/fetch/__init__.py
|
||||
lib/bb/fetch/bzr.py
|
||||
lib/bb/fetch/cvs.py
|
||||
lib/bb/fetch/git.py
|
||||
lib/bb/fetch/hg.py
|
||||
lib/bb/fetch/local.py
|
||||
lib/bb/fetch/perforce.py
|
||||
lib/bb/fetch/ssh.py
|
||||
lib/bb/fetch/svk.py
|
||||
lib/bb/fetch/svn.py
|
||||
lib/bb/fetch/wget.py
|
||||
lib/bb/manifest.py
|
||||
lib/bb/methodpool.py
|
||||
lib/bb/msg.py
|
||||
lib/bb/parse/__init__.py
|
||||
lib/bb/parse/parse_py/__init__.py
|
||||
lib/bb/parse/parse_py/BBHandler.py
|
||||
lib/bb/parse/parse_py/ConfHandler.py
|
||||
lib/bb/persist_data.py
|
||||
lib/bb/providers.py
|
||||
lib/bb/runqueue.py
|
||||
lib/bb/shell.py
|
||||
lib/bb/taskdata.py
|
||||
lib/bb/utils.py
|
||||
setup.py
|
||||
doc/COPYING.GPL
|
||||
doc/COPYING.MIT
|
||||
doc/bitbake.1
|
||||
doc/manual/html.css
|
||||
doc/manual/Makefile
|
||||
doc/manual/usermanual.xml
|
||||
contrib/bbdev.sh
|
||||
contrib/vim/syntax/bitbake.vim
|
||||
contrib/vim/ftdetect/bitbake.vim
|
||||
conf/bitbake.conf
|
||||
classes/base.bbclass
|
||||
155
bitbake/bin/bbimage
Executable file
155
bitbake/bin/bbimage
Executable file
@@ -0,0 +1,155 @@
|
||||
#!/usr/bin/env python
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# Copyright (C) 2003 Chris Larson
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import sys, os
|
||||
sys.path.insert(0,os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
import bb
|
||||
from bb import *
|
||||
|
||||
__version__ = 1.1
|
||||
type = "jffs2"
|
||||
cfg_bb = data.init()
|
||||
cfg_oespawn = data.init()
|
||||
|
||||
bb.msg.set_debug_level(0)
|
||||
|
||||
def usage():
|
||||
print "Usage: bbimage [options ...]"
|
||||
print "Creates an image for a target device from a root filesystem,"
|
||||
print "obeying configuration parameters from the BitBake"
|
||||
print "configuration files, thereby easing handling of deviceisms."
|
||||
print ""
|
||||
print " %s\t\t%s" % ("-r [arg], --root [arg]", "root directory (default=${IMAGE_ROOTFS})")
|
||||
print " %s\t\t%s" % ("-t [arg], --type [arg]", "image type (jffs2[default], cramfs)")
|
||||
print " %s\t\t%s" % ("-n [arg], --name [arg]", "image name (override IMAGE_NAME variable)")
|
||||
print " %s\t\t%s" % ("-v, --version", "output version information and exit")
|
||||
sys.exit(0)
|
||||
|
||||
def version():
|
||||
print "BitBake Build Tool Core version %s" % bb.__version__
|
||||
print "BBImage version %s" % __version__
|
||||
|
||||
def emit_bb(d, base_d = {}):
|
||||
for v in d.keys():
|
||||
if d[v] != base_d[v]:
|
||||
data.emit_var(v, d)
|
||||
|
||||
def getopthash(l):
|
||||
h = {}
|
||||
for (opt, val) in l:
|
||||
h[opt] = val
|
||||
return h
|
||||
|
||||
import getopt
|
||||
try:
|
||||
(opts, args) = getopt.getopt(sys.argv[1:], 'vr:t:e:n:', [ 'version', 'root=', 'type=', 'bbfile=', 'name=' ])
|
||||
except getopt.GetoptError:
|
||||
usage()
|
||||
|
||||
# handle opts
|
||||
opthash = getopthash(opts)
|
||||
|
||||
if '--version' in opthash or '-v' in opthash:
|
||||
version()
|
||||
sys.exit(0)
|
||||
|
||||
try:
|
||||
cfg_bb = parse.handle(os.path.join('conf', 'bitbake.conf'), cfg_bb)
|
||||
except IOError:
|
||||
fatal("Unable to open bitbake.conf")
|
||||
|
||||
# sanity check
|
||||
if cfg_bb is None:
|
||||
fatal("Unable to open/parse %s" % os.path.join('conf', 'bitbake.conf'))
|
||||
usage(1)
|
||||
|
||||
rootfs = None
|
||||
extra_files = []
|
||||
|
||||
if '--root' in opthash:
|
||||
rootfs = opthash['--root']
|
||||
if '-r' in opthash:
|
||||
rootfs = opthash['-r']
|
||||
|
||||
if '--type' in opthash:
|
||||
type = opthash['--type']
|
||||
if '-t' in opthash:
|
||||
type = opthash['-t']
|
||||
|
||||
if '--bbfile' in opthash:
|
||||
extra_files.append(opthash['--bbfile'])
|
||||
if '-e' in opthash:
|
||||
extra_files.append(opthash['-e'])
|
||||
|
||||
for f in extra_files:
|
||||
try:
|
||||
cfg_bb = parse.handle(f, cfg_bb)
|
||||
except IOError:
|
||||
print "unable to open %s" % f
|
||||
|
||||
if not rootfs:
|
||||
rootfs = data.getVar('IMAGE_ROOTFS', cfg_bb, 1)
|
||||
|
||||
if not rootfs:
|
||||
bb.fatal("IMAGE_ROOTFS not defined")
|
||||
|
||||
data.setVar('IMAGE_ROOTFS', rootfs, cfg_bb)
|
||||
|
||||
from copy import copy, deepcopy
|
||||
localdata = data.createCopy(cfg_bb)
|
||||
|
||||
overrides = data.getVar('OVERRIDES', localdata)
|
||||
if not overrides:
|
||||
bb.fatal("OVERRIDES not defined.")
|
||||
data.setVar('OVERRIDES', '%s:%s' % (overrides, type), localdata)
|
||||
data.update_data(localdata)
|
||||
data.setVar('OVERRIDES', overrides, localdata)
|
||||
|
||||
if '-n' in opthash:
|
||||
data.setVar('IMAGE_NAME', opthash['-n'], localdata)
|
||||
if '--name' in opthash:
|
||||
data.setVar('IMAGE_NAME', opthash['--name'], localdata)
|
||||
|
||||
topdir = data.getVar('TOPDIR', localdata, 1) or os.getcwd()
|
||||
|
||||
cmd = data.getVar('IMAGE_CMD', localdata, 1)
|
||||
if not cmd:
|
||||
bb.fatal("IMAGE_CMD not defined")
|
||||
|
||||
outdir = data.getVar('DEPLOY_DIR_IMAGE', localdata, 1)
|
||||
if not outdir:
|
||||
bb.fatal('DEPLOY_DIR_IMAGE not defined')
|
||||
mkdirhier(outdir)
|
||||
|
||||
#depends = data.getVar('IMAGE_DEPENDS', localdata, 1) or ""
|
||||
#if depends:
|
||||
# bb.note("Spawning bbmake to satisfy dependencies: %s" % depends)
|
||||
# ret = os.system('bbmake %s' % depends)
|
||||
# if ret != 0:
|
||||
# bb.error("executing bbmake to satisfy dependencies")
|
||||
|
||||
bb.note("Executing %s" % cmd)
|
||||
data.setVar('image_cmd', cmd, localdata)
|
||||
data.setVarFlag('image_cmd', 'func', 1, localdata)
|
||||
try:
|
||||
bb.build.exec_func('image_cmd', localdata)
|
||||
except bb.build.FuncFailed:
|
||||
sys.exit(1)
|
||||
#ret = os.system(cmd)
|
||||
#sys.exit(ret)
|
||||
@@ -22,203 +22,113 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os
|
||||
import sys, logging
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)),
|
||||
'lib'))
|
||||
|
||||
import optparse
|
||||
import warnings
|
||||
from traceback import format_exception
|
||||
try:
|
||||
import bb
|
||||
except RuntimeError, exc:
|
||||
sys.exit(str(exc))
|
||||
from bb import event
|
||||
import bb.msg
|
||||
import sys, os, getopt, re, time, optparse
|
||||
sys.path.insert(0,os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
import bb
|
||||
from bb import cooker
|
||||
from bb import ui
|
||||
from bb import server
|
||||
from bb.server import none
|
||||
#from bb.server import xmlrpc
|
||||
|
||||
__version__ = "1.11.0"
|
||||
logger = logging.getLogger("BitBake")
|
||||
__version__ = "1.8.11"
|
||||
|
||||
|
||||
class BBConfiguration(object):
|
||||
#============================================================================#
|
||||
# BBOptions
|
||||
#============================================================================#
|
||||
class BBConfiguration( object ):
|
||||
"""
|
||||
Manages build options and configurations for one run
|
||||
"""
|
||||
|
||||
def __init__(self, options):
|
||||
def __init__( self, options ):
|
||||
for key, val in options.__dict__.items():
|
||||
setattr(self, key, val)
|
||||
self.pkgs_to_build = []
|
||||
setattr( self, key, val )
|
||||
|
||||
|
||||
def get_ui(config):
|
||||
if config.ui:
|
||||
interface = config.ui
|
||||
else:
|
||||
interface = 'knotty'
|
||||
|
||||
try:
|
||||
# Dynamically load the UI based on the ui name. Although we
|
||||
# suggest a fixed set this allows you to have flexibility in which
|
||||
# ones are available.
|
||||
module = __import__("bb.ui", fromlist = [interface])
|
||||
return getattr(module, interface).main
|
||||
except AttributeError:
|
||||
sys.exit("FATAL: Invalid user interface '%s' specified.\n"
|
||||
"Valid interfaces: depexp, goggle, ncurses, knotty [default]." % interface)
|
||||
|
||||
|
||||
# Display bitbake/OE warnings via the BitBake.Warnings logger, ignoring others"""
|
||||
warnlog = logging.getLogger("BitBake.Warnings")
|
||||
_warnings_showwarning = warnings.showwarning
|
||||
def _showwarning(message, category, filename, lineno, file=None, line=None):
|
||||
if file is not None:
|
||||
if _warnings_showwarning is not None:
|
||||
_warnings_showwarning(message, category, filename, lineno, file, line)
|
||||
else:
|
||||
s = warnings.formatwarning(message, category, filename, lineno)
|
||||
warnlog.warn(s)
|
||||
|
||||
warnings.showwarning = _showwarning
|
||||
warnings.filterwarnings("ignore")
|
||||
warnings.filterwarnings("default", module="(<string>$|(oe|bb)\.)")
|
||||
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
|
||||
warnings.filterwarnings("ignore", category=ImportWarning)
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning, module="<string>$")
|
||||
warnings.filterwarnings("ignore", message="With-statements now directly support multiple context managers")
|
||||
|
||||
#============================================================================#
|
||||
# main
|
||||
#============================================================================#
|
||||
|
||||
def main():
|
||||
parser = optparse.OptionParser(
|
||||
version = "BitBake Build Tool Core version %s, %%prog version %s" % (bb.__version__, __version__),
|
||||
usage = """%prog [options] [package ...]
|
||||
parser = optparse.OptionParser( version = "BitBake Build Tool Core version %s, %%prog version %s" % ( bb.__version__, __version__ ),
|
||||
usage = """%prog [options] [package ...]
|
||||
|
||||
Executes the specified task (default is 'build') for a given set of BitBake files.
|
||||
It expects that BBFILES is defined, which is a space separated list of files to
|
||||
be executed. BBFILES does support wildcards.
|
||||
Default BBFILES are the .bb files in the current directory.""")
|
||||
Default BBFILES are the .bb files in the current directory.""" )
|
||||
|
||||
parser.add_option("-b", "--buildfile", help = "execute the task against this .bb file, rather than a package from BBFILES.",
|
||||
action = "store", dest = "buildfile", default = None)
|
||||
parser.add_option( "-b", "--buildfile", help = "execute the task against this .bb file, rather than a package from BBFILES.",
|
||||
action = "store", dest = "buildfile", default = None )
|
||||
|
||||
parser.add_option("-k", "--continue", help = "continue as much as possible after an error. While the target that failed, and those that depend on it, cannot be remade, the other dependencies of these targets can be processed all the same.",
|
||||
action = "store_false", dest = "abort", default = True)
|
||||
parser.add_option( "-k", "--continue", help = "continue as much as possible after an error. While the target that failed, and those that depend on it, cannot be remade, the other dependencies of these targets can be processed all the same.",
|
||||
action = "store_false", dest = "abort", default = True )
|
||||
|
||||
parser.add_option("-a", "--tryaltconfigs", help = "continue with builds by trying to use alternative providers where possible.",
|
||||
action = "store_true", dest = "tryaltconfigs", default = False)
|
||||
parser.add_option( "-f", "--force", help = "force run of specified cmd, regardless of stamp status",
|
||||
action = "store_true", dest = "force", default = False )
|
||||
|
||||
parser.add_option("-f", "--force", help = "force run of specified cmd, regardless of stamp status",
|
||||
action = "store_true", dest = "force", default = False)
|
||||
parser.add_option( "-i", "--interactive", help = "drop into the interactive mode also called the BitBake shell.",
|
||||
action = "store_true", dest = "interactive", default = False )
|
||||
|
||||
parser.add_option("-c", "--cmd", help = "Specify task to execute. Note that this only executes the specified task for the providee and the packages it depends on, i.e. 'compile' does not implicitly call stage for the dependencies (IOW: use only if you know what you are doing). Depending on the base.bbclass a listtasks tasks is defined and will show available tasks",
|
||||
action = "store", dest = "cmd")
|
||||
parser.add_option( "-c", "--cmd", help = "Specify task to execute. Note that this only executes the specified task for the providee and the packages it depends on, i.e. 'compile' does not implicitly call stage for the dependencies (IOW: use only if you know what you are doing). Depending on the base.bbclass a listtasks tasks is defined and will show available tasks",
|
||||
action = "store", dest = "cmd" )
|
||||
|
||||
parser.add_option("-r", "--read", help = "read the specified file before bitbake.conf",
|
||||
action = "append", dest = "file", default = [])
|
||||
parser.add_option( "-r", "--read", help = "read the specified file before bitbake.conf",
|
||||
action = "append", dest = "file", default = [] )
|
||||
|
||||
parser.add_option("-v", "--verbose", help = "output more chit-chat to the terminal",
|
||||
action = "store_true", dest = "verbose", default = False)
|
||||
parser.add_option( "-v", "--verbose", help = "output more chit-chat to the terminal",
|
||||
action = "store_true", dest = "verbose", default = False )
|
||||
|
||||
parser.add_option("-D", "--debug", help = "Increase the debug level. You can specify this more than once.",
|
||||
parser.add_option( "-D", "--debug", help = "Increase the debug level. You can specify this more than once.",
|
||||
action = "count", dest="debug", default = 0)
|
||||
|
||||
parser.add_option("-n", "--dry-run", help = "don't execute, just go through the motions",
|
||||
action = "store_true", dest = "dry_run", default = False)
|
||||
parser.add_option( "-n", "--dry-run", help = "don't execute, just go through the motions",
|
||||
action = "store_true", dest = "dry_run", default = False )
|
||||
|
||||
parser.add_option("-S", "--dump-signatures", help = "don't execute, just dump out the signature construction information",
|
||||
action = "store_true", dest = "dump_signatures", default = False)
|
||||
parser.add_option( "-p", "--parse-only", help = "quit after parsing the BB files (developers only)",
|
||||
action = "store_true", dest = "parse_only", default = False )
|
||||
|
||||
parser.add_option("-p", "--parse-only", help = "quit after parsing the BB files (developers only)",
|
||||
action = "store_true", dest = "parse_only", default = False)
|
||||
parser.add_option( "-d", "--disable-psyco", help = "disable using the psyco just-in-time compiler (not recommended)",
|
||||
action = "store_true", dest = "disable_psyco", default = False )
|
||||
|
||||
parser.add_option("-d", "--disable-psyco", help = "disable using the psyco just-in-time compiler (not recommended)",
|
||||
action = "store_true", dest = "disable_psyco", default = False)
|
||||
parser.add_option( "-s", "--show-versions", help = "show current and preferred versions of all packages",
|
||||
action = "store_true", dest = "show_versions", default = False )
|
||||
|
||||
parser.add_option("-s", "--show-versions", help = "show current and preferred versions of all packages",
|
||||
action = "store_true", dest = "show_versions", default = False)
|
||||
parser.add_option( "-e", "--environment", help = "show the global or per-package environment (this is what used to be bbread)",
|
||||
action = "store_true", dest = "show_environment", default = False )
|
||||
|
||||
parser.add_option("-e", "--environment", help = "show the global or per-package environment (this is what used to be bbread)",
|
||||
action = "store_true", dest = "show_environment", default = False)
|
||||
parser.add_option( "-g", "--graphviz", help = "emit the dependency trees of the specified packages in the dot syntax",
|
||||
action = "store_true", dest = "dot_graph", default = False )
|
||||
|
||||
parser.add_option("-g", "--graphviz", help = "emit the dependency trees of the specified packages in the dot syntax",
|
||||
action = "store_true", dest = "dot_graph", default = False)
|
||||
parser.add_option( "-I", "--ignore-deps", help = """Stop processing at the given list of dependencies when generating dependency graphs. This can help to make the graph more appealing""",
|
||||
action = "append", dest = "ignored_dot_deps", default = [] )
|
||||
|
||||
parser.add_option("-I", "--ignore-deps", help = """Assume these dependencies don't exist and are already provided (equivalent to ASSUME_PROVIDED). Useful to make dependency graphs more appealing""",
|
||||
action = "append", dest = "extra_assume_provided", default = [])
|
||||
parser.add_option( "-l", "--log-domains", help = """Show debug logging for the specified logging domains""",
|
||||
action = "append", dest = "debug_domains", default = [] )
|
||||
|
||||
parser.add_option("-l", "--log-domains", help = """Show debug logging for the specified logging domains""",
|
||||
action = "append", dest = "debug_domains", default = [])
|
||||
|
||||
parser.add_option("-P", "--profile", help = "profile the command and print a report",
|
||||
action = "store_true", dest = "profile", default = False)
|
||||
|
||||
parser.add_option("-u", "--ui", help = "userinterface to use",
|
||||
action = "store", dest = "ui")
|
||||
|
||||
parser.add_option("", "--revisions-changed", help = "Set the exit code depending on whether upstream floating revisions have changed or not",
|
||||
action = "store_true", dest = "revisions_changed", default = False)
|
||||
parser.add_option( "-P", "--profile", help = "profile the command and print a report",
|
||||
action = "store_true", dest = "profile", default = False )
|
||||
|
||||
options, args = parser.parse_args(sys.argv)
|
||||
|
||||
configuration = BBConfiguration(options)
|
||||
configuration.pkgs_to_build = []
|
||||
configuration.pkgs_to_build.extend(args[1:])
|
||||
configuration.initial_path = os.environ['PATH']
|
||||
|
||||
ui_main = get_ui(configuration)
|
||||
cooker = bb.cooker.BBCooker(configuration)
|
||||
|
||||
loghandler = event.LogHandler()
|
||||
logger.addHandler(loghandler)
|
||||
if configuration.profile:
|
||||
try:
|
||||
import cProfile as profile
|
||||
except:
|
||||
import profile
|
||||
|
||||
#server = bb.server.xmlrpc
|
||||
server = bb.server.none
|
||||
|
||||
# Save a logfile for cooker into the current working directory. When the
|
||||
# server is daemonized this logfile will be truncated.
|
||||
cooker_logfile = os.path.join(os.getcwd(), "cooker.log")
|
||||
|
||||
bb.utils.init_logger(bb.msg, configuration.verbose, configuration.debug,
|
||||
configuration.debug_domains)
|
||||
|
||||
# Clear away any spurious environment variables. But don't wipe the
|
||||
# environment totally. This is necessary to ensure the correct operation
|
||||
# of the UIs (e.g. for DISPLAY, etc.)
|
||||
bb.utils.clean_environment()
|
||||
|
||||
cooker = bb.cooker.BBCooker(configuration, server)
|
||||
cooker.parseCommandLine()
|
||||
|
||||
serverinfo = server.BitbakeServerInfo(cooker.server)
|
||||
|
||||
server.BitBakeServerFork(cooker, cooker.server, serverinfo, cooker_logfile)
|
||||
del cooker
|
||||
|
||||
logger.removeHandler(loghandler)
|
||||
|
||||
# Setup a connection to the server (cooker)
|
||||
server_connection = server.BitBakeServerConnection(serverinfo)
|
||||
|
||||
# Launch the UI
|
||||
if configuration.ui:
|
||||
ui = configuration.ui
|
||||
profile.runctx("cooker.cook()", globals(), locals(), "profile.log")
|
||||
import pstats
|
||||
p = pstats.Stats('profile.log')
|
||||
p.sort_stats('time')
|
||||
p.print_stats()
|
||||
p.print_callers()
|
||||
p.sort_stats('cumulative')
|
||||
p.print_stats()
|
||||
else:
|
||||
ui = "knotty"
|
||||
|
||||
try:
|
||||
return server.BitbakeUILauch().launch(serverinfo, ui_main, server_connection.connection, server_connection.events)
|
||||
finally:
|
||||
server_connection.terminate()
|
||||
cooker.cook()
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
ret = main()
|
||||
except Exception:
|
||||
ret = 1
|
||||
import traceback
|
||||
traceback.print_exc(5)
|
||||
sys.exit(ret)
|
||||
main()
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
|
||||
import bb.siggen
|
||||
|
||||
if len(sys.argv) > 2:
|
||||
bb.siggen.compare_sigfiles(sys.argv[1], sys.argv[2])
|
||||
else:
|
||||
bb.siggen.dump_sigfile(sys.argv[1])
|
||||
@@ -1,158 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# This script has subcommands which operate against your bitbake layers, either
|
||||
# displaying useful information, or acting against them.
|
||||
# Currently, it only provides a show_appends command, which shows you what
|
||||
# bbappends are in effect, and warns you if you have appends which are not being
|
||||
# utilized.
|
||||
|
||||
import cmd
|
||||
import logging
|
||||
import os.path
|
||||
import sys
|
||||
|
||||
bindir = os.path.dirname(__file__)
|
||||
topdir = os.path.dirname(bindir)
|
||||
sys.path[0:0] = [os.path.join(topdir, 'lib')]
|
||||
|
||||
import bb.cache
|
||||
import bb.cooker
|
||||
import bb.providers
|
||||
from bb.cooker import state
|
||||
from bb.server import none
|
||||
|
||||
|
||||
logger = logging.getLogger('BitBake')
|
||||
default_cmd = 'show_appends'
|
||||
|
||||
|
||||
def main(args):
|
||||
logging.basicConfig(format='%(levelname)s: %(message)s')
|
||||
bb.utils.clean_environment()
|
||||
|
||||
cmds = Commands()
|
||||
if args:
|
||||
cmds.onecmd(' '.join(args))
|
||||
else:
|
||||
cmds.onecmd(default_cmd)
|
||||
return cmds.returncode
|
||||
|
||||
|
||||
class Commands(cmd.Cmd):
|
||||
def __init__(self):
|
||||
cmd.Cmd.__init__(self)
|
||||
|
||||
self.returncode = 0
|
||||
self.config = Config(parse_only=True)
|
||||
self.cooker = bb.cooker.BBCooker(self.config,
|
||||
bb.server.none)
|
||||
self.config_data = self.cooker.configuration.data
|
||||
bb.providers.logger.setLevel(logging.ERROR)
|
||||
self.prepare_cooker()
|
||||
|
||||
def prepare_cooker(self):
|
||||
sys.stderr.write("Parsing recipes..")
|
||||
logger.setLevel(logging.ERROR)
|
||||
|
||||
try:
|
||||
while self.cooker.state in (state.initial, state.parsing):
|
||||
self.cooker.updateCache()
|
||||
except KeyboardInterrupt:
|
||||
self.cooker.shutdown()
|
||||
self.cooker.updateCache()
|
||||
sys.exit(2)
|
||||
|
||||
logger.setLevel(logging.INFO)
|
||||
sys.stderr.write("done.\n")
|
||||
|
||||
self.cooker_data = self.cooker.status
|
||||
self.cooker_data.appends = self.cooker.appendlist
|
||||
|
||||
def do_show_layers(self, args):
|
||||
logger.info(str(self.config_data.getVar('BBLAYERS', True)))
|
||||
|
||||
def do_show_appends(self, args):
|
||||
if not self.cooker_data.appends:
|
||||
logger.info('No append files found')
|
||||
return
|
||||
|
||||
logger.info('State of append files:')
|
||||
|
||||
for pn in self.cooker_data.pkg_pn:
|
||||
self.show_appends_for_pn(pn)
|
||||
|
||||
self.show_appends_with_no_recipes()
|
||||
|
||||
def show_appends_for_pn(self, pn):
|
||||
filenames = self.cooker_data.pkg_pn[pn]
|
||||
|
||||
best = bb.providers.findBestProvider(pn,
|
||||
self.cooker.configuration.data,
|
||||
self.cooker_data,
|
||||
self.cooker_data.pkg_pn)
|
||||
best_filename = os.path.basename(best[3])
|
||||
|
||||
appended, missing = self.get_appends_for_files(filenames)
|
||||
if appended:
|
||||
for basename, appends in appended:
|
||||
logger.info('%s:', basename)
|
||||
for append in appends:
|
||||
logger.info(' %s', append)
|
||||
|
||||
if best_filename in missing:
|
||||
logger.warn('%s: missing append for preferred version',
|
||||
best_filename)
|
||||
self.returncode |= 1
|
||||
|
||||
def get_appends_for_files(self, filenames):
|
||||
appended, notappended = set(), set()
|
||||
for filename in filenames:
|
||||
_, cls = bb.cache.Cache.virtualfn2realfn(filename)
|
||||
if cls:
|
||||
continue
|
||||
|
||||
basename = os.path.basename(filename)
|
||||
appends = self.cooker_data.appends.get(basename)
|
||||
if appends:
|
||||
appended.add((basename, frozenset(appends)))
|
||||
else:
|
||||
notappended.add(basename)
|
||||
return appended, notappended
|
||||
|
||||
def show_appends_with_no_recipes(self):
|
||||
recipes = set(os.path.basename(f)
|
||||
for f in self.cooker_data.pkg_fn.iterkeys())
|
||||
appended_recipes = self.cooker_data.appends.iterkeys()
|
||||
appends_without_recipes = [self.cooker_data.appends[recipe]
|
||||
for recipe in appended_recipes
|
||||
if recipe not in recipes]
|
||||
if appends_without_recipes:
|
||||
appendlines = (' %s' % append
|
||||
for appends in appends_without_recipes
|
||||
for append in appends)
|
||||
logger.warn('No recipes available for:\n%s',
|
||||
'\n'.join(appendlines))
|
||||
self.returncode |= 4
|
||||
|
||||
def do_EOF(self, line):
|
||||
return True
|
||||
|
||||
|
||||
class Config(object):
|
||||
def __init__(self, **options):
|
||||
self.pkgs_to_build = []
|
||||
self.debug_domains = []
|
||||
self.extra_assume_provided = []
|
||||
self.file = []
|
||||
self.debug = 0
|
||||
self.__dict__.update(options)
|
||||
|
||||
def __getattr__(self, attribute):
|
||||
try:
|
||||
return super(Config, self).__getattribute__(attribute)
|
||||
except AttributeError:
|
||||
return None
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main(sys.argv[1:]) or 0)
|
||||
@@ -1,120 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
bb.msg.note(1, bb.msg.domain.Cache, "Importing cPickle failed. Falling back to a very slow implementation.")
|
||||
|
||||
class BBConfiguration(object):
|
||||
"""
|
||||
Manages build options and configurations for one run
|
||||
"""
|
||||
|
||||
def __init__(self, debug, debug_domains):
|
||||
setattr(self, "data", {})
|
||||
setattr(self, "file", [])
|
||||
setattr(self, "cmd", None)
|
||||
setattr(self, "dump_signatures", True)
|
||||
setattr(self, "debug", debug)
|
||||
setattr(self, "debug_domains", debug_domains)
|
||||
|
||||
_warnings_showwarning = warnings.showwarning
|
||||
def _showwarning(message, category, filename, lineno, file=None, line=None):
|
||||
"""Display python warning messages using bb.msg"""
|
||||
if file is not None:
|
||||
if _warnings_showwarning is not None:
|
||||
_warnings_showwarning(message, category, filename, lineno, file, line)
|
||||
else:
|
||||
s = warnings.formatwarning(message, category, filename, lineno)
|
||||
s = s.split("\n")[0]
|
||||
bb.msg.warn(None, s)
|
||||
|
||||
warnings.showwarning = _showwarning
|
||||
warnings.simplefilter("ignore", DeprecationWarning)
|
||||
|
||||
import bb.event
|
||||
|
||||
# Need to map our I/O correctly. stdout is a pipe to the server expecting
|
||||
# events. We save this and then map stdout to stderr.
|
||||
|
||||
eventfd = os.dup(sys.stdout.fileno())
|
||||
bb.event.worker_pipe = os.fdopen(eventfd, 'w', 0)
|
||||
|
||||
# map stdout to stderr
|
||||
os.dup2(sys.stderr.fileno(), sys.stdout.fileno())
|
||||
|
||||
# Replace those fds with our own
|
||||
#logout = data.expand("${TMPDIR}/log/stdout.%s" % os.getpid(), self.cfgData, True)
|
||||
#mkdirhier(os.path.dirname(logout))
|
||||
#newso = open("/tmp/stdout.%s" % os.getpid(), 'w')
|
||||
#os.dup2(newso.fileno(), sys.stdout.fileno())
|
||||
#os.dup2(newso.fileno(), sys.stderr.fileno())
|
||||
|
||||
# Don't read from stdin from the parent
|
||||
si = file("/dev/null", 'r')
|
||||
os.dup2(si.fileno( ), sys.stdin.fileno( ))
|
||||
|
||||
# We don't want to see signals to our parent, e.g. Ctrl+C
|
||||
os.setpgrp()
|
||||
|
||||
# Save out the PID so that the event can include it the
|
||||
# events
|
||||
bb.event.worker_pid = os.getpid()
|
||||
bb.event.useStdout = False
|
||||
|
||||
hashfile = sys.argv[1]
|
||||
buildfile = sys.argv[2]
|
||||
taskname = sys.argv[3]
|
||||
|
||||
import bb.cooker
|
||||
|
||||
p = pickle.Unpickler(file(hashfile, "rb"))
|
||||
hashdata = p.load()
|
||||
|
||||
debug = hashdata["msg-debug"]
|
||||
debug_domains = hashdata["msg-debug-domains"]
|
||||
verbose = hashdata["verbose"]
|
||||
|
||||
bb.utils.init_logger(bb.msg, verbose, debug, debug_domains)
|
||||
|
||||
cooker = bb.cooker.BBCooker(BBConfiguration(debug, debug_domains), None)
|
||||
cooker.parseConfiguration()
|
||||
|
||||
cooker.bb_cache = bb.cache.init(cooker)
|
||||
cooker.status = bb.cache.CacheData()
|
||||
|
||||
(fn, cls) = cooker.bb_cache.virtualfn2realfn(buildfile)
|
||||
buildfile = cooker.matchFile(fn)
|
||||
fn = cooker.bb_cache.realfn2virtual(buildfile, cls)
|
||||
|
||||
cooker.buildSetVars()
|
||||
|
||||
# Load data into the cache for fn and parse the loaded cache data
|
||||
the_data = cooker.bb_cache.loadDataFull(fn, cooker.get_file_appends(fn), cooker.configuration.data)
|
||||
cooker.bb_cache.setData(fn, buildfile, the_data)
|
||||
cooker.bb_cache.handle_data(fn, cooker.status)
|
||||
|
||||
#exportlist = bb.utils.preserved_envvars_export_list()
|
||||
#bb.utils.filter_environment(exportlist)
|
||||
|
||||
if taskname.endswith("_setscene"):
|
||||
the_data.setVarFlag(taskname, "quieterrors", "1")
|
||||
|
||||
bb.parse.siggen.set_taskdata(hashdata["hashes"], hashdata["deps"])
|
||||
|
||||
for h in hashdata["hashes"]:
|
||||
bb.data.setVar("BBHASH_%s" % h, hashdata["hashes"][h], the_data)
|
||||
for h in hashdata["deps"]:
|
||||
bb.data.setVar("BBHASHDEPS_%s" % h, hashdata["deps"][h], the_data)
|
||||
|
||||
ret = 0
|
||||
if sys.argv[4] != "True":
|
||||
ret = bb.build.exec_task(fn, taskname, the_data)
|
||||
sys.exit(ret)
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
import optparse, os, sys
|
||||
|
||||
# bitbake
|
||||
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(__file__), 'lib'))
|
||||
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
import bb
|
||||
import bb.parse
|
||||
from string import split, join
|
||||
@@ -48,7 +48,7 @@ class HTMLFormatter:
|
||||
From pydoc... almost identical at least
|
||||
"""
|
||||
while pairs:
|
||||
(a, b) = pairs[0]
|
||||
(a,b) = pairs[0]
|
||||
text = join(split(text, a), b)
|
||||
pairs = pairs[1:]
|
||||
return text
|
||||
@@ -87,7 +87,7 @@ class HTMLFormatter:
|
||||
|
||||
return txt + ",".join(txts)
|
||||
|
||||
def groups(self, item):
|
||||
def groups(self,item):
|
||||
"""
|
||||
Create HTML to link to related groups
|
||||
"""
|
||||
@@ -99,12 +99,12 @@ class HTMLFormatter:
|
||||
txt = "<p><b>See also:</b><br>"
|
||||
txts = []
|
||||
for group in item.groups():
|
||||
txts.append( """<a href="group%s.html">%s</a> """ % (group, group) )
|
||||
txts.append( """<a href="group%s.html">%s</a> """ % (group,group) )
|
||||
|
||||
return txt + ",".join(txts)
|
||||
|
||||
|
||||
def createKeySite(self, item):
|
||||
def createKeySite(self,item):
|
||||
"""
|
||||
Create a site for a key. It contains the header/navigator, a heading,
|
||||
the description, links to related keys and to the groups.
|
||||
@@ -149,7 +149,8 @@ class HTMLFormatter:
|
||||
"""
|
||||
|
||||
groups = ""
|
||||
sorted_groups = sorted(doc.groups())
|
||||
sorted_groups = doc.groups()
|
||||
sorted_groups.sort()
|
||||
for group in sorted_groups:
|
||||
groups += """<a href="group%s.html">%s</a><br>""" % (group, group)
|
||||
|
||||
@@ -184,7 +185,8 @@ class HTMLFormatter:
|
||||
Create Overview of all avilable keys
|
||||
"""
|
||||
keys = ""
|
||||
sorted_keys = sorted(doc.doc_keys())
|
||||
sorted_keys = doc.doc_keys()
|
||||
sorted_keys.sort()
|
||||
for key in sorted_keys:
|
||||
keys += """<a href="key%s.html">%s</a><br>""" % (key, key)
|
||||
|
||||
@@ -212,7 +214,7 @@ class HTMLFormatter:
|
||||
description += "<h2 Description of Grozp %s</h2>" % gr
|
||||
description += _description
|
||||
|
||||
items.sort(lambda x, y:cmp(x.name(), y.name()))
|
||||
items.sort(lambda x,y:cmp(x.name(),y.name()))
|
||||
for group in items:
|
||||
groups += """<a href="key%s.html">%s</a><br>""" % (group.name(), group.name())
|
||||
|
||||
@@ -341,7 +343,7 @@ class DocumentationItem:
|
||||
def addGroup(self, group):
|
||||
self._groups.append(group)
|
||||
|
||||
def addRelation(self, relation):
|
||||
def addRelation(self,relation):
|
||||
self._related.append(relation)
|
||||
|
||||
def sort(self):
|
||||
@@ -394,7 +396,7 @@ class Documentation:
|
||||
"""
|
||||
return self.__groups.keys()
|
||||
|
||||
def group_content(self, group_name):
|
||||
def group_content(self,group_name):
|
||||
"""
|
||||
Return a list of keys/names that are in a specefic
|
||||
group or the empty list
|
||||
@@ -410,7 +412,7 @@ def parse_cmdline(args):
|
||||
Parse the CMD line and return the result as a n-tuple
|
||||
"""
|
||||
|
||||
parser = optparse.OptionParser( version = "Bitbake Documentation Tool Core version %s, %%prog version %s" % (bb.__version__, __version__))
|
||||
parser = optparse.OptionParser( version = "Bitbake Documentation Tool Core version %s, %%prog version %s" % (bb.__version__,__version__))
|
||||
usage = """%prog [options]
|
||||
|
||||
Create a set of html pages (documentation) for a bitbake.conf....
|
||||
@@ -426,7 +428,7 @@ Create a set of html pages (documentation) for a bitbake.conf....
|
||||
parser.add_option( "-D", "--debug", help = "Increase the debug level",
|
||||
action = "count", dest = "debug", default = 0 )
|
||||
|
||||
parser.add_option( "-v", "--verbose", help = "output more chit-char to the terminal",
|
||||
parser.add_option( "-v","--verbose", help = "output more chit-char to the terminal",
|
||||
action = "store_true", dest = "verbose", default = False )
|
||||
|
||||
options, args = parser.parse_args( sys.argv )
|
||||
@@ -441,7 +443,7 @@ def main():
|
||||
The main Method
|
||||
"""
|
||||
|
||||
(config_file, output_dir) = parse_cmdline( sys.argv )
|
||||
(config_file,output_dir) = parse_cmdline( sys.argv )
|
||||
|
||||
# right to let us load the file now
|
||||
try:
|
||||
@@ -451,8 +453,6 @@ def main():
|
||||
except bb.parse.ParseError:
|
||||
bb.fatal( "Unable to parse %s" % config_file )
|
||||
|
||||
if isinstance(documentation, dict):
|
||||
documentation = documentation[""]
|
||||
|
||||
# Assuming we've the file loaded now, we will initialize the 'tree'
|
||||
doc = Documentation()
|
||||
@@ -497,7 +497,7 @@ def main():
|
||||
doc.insert_doc_item(doc_ins)
|
||||
|
||||
# let us create the HTML now
|
||||
bb.utils.mkdirhier(output_dir)
|
||||
bb.mkdirhier(output_dir)
|
||||
os.chdir(output_dir)
|
||||
|
||||
# Let us create the sites now. We do it in the following order
|
||||
|
||||
@@ -1,24 +1,4 @@
|
||||
" Vim filetype detection file
|
||||
" Language: BitBake
|
||||
" Author: Ricardo Salveti <rsalveti@rsalveti.net>
|
||||
" Copyright: Copyright (C) 2008 Ricardo Salveti <rsalveti@rsalveti.net>
|
||||
" Licence: You may redistribute this under the same terms as Vim itself
|
||||
"
|
||||
" This sets up the syntax highlighting for BitBake files, like .bb, .bbclass and .inc
|
||||
|
||||
if &compatible || version < 600
|
||||
finish
|
||||
endif
|
||||
|
||||
" .bb and .bbclass
|
||||
au BufNewFile,BufRead *.b{b,bclass} set filetype=bitbake
|
||||
|
||||
" .inc
|
||||
au BufNewFile,BufRead *.inc set filetype=bitbake
|
||||
|
||||
" .conf
|
||||
au BufNewFile,BufRead *.conf
|
||||
\ if (match(expand("%:p:h"), "conf") > 0) |
|
||||
\ set filetype=bitbake |
|
||||
\ endif
|
||||
|
||||
au BufNewFile,BufRead *.bb setfiletype bitbake
|
||||
au BufNewFile,BufRead *.bbclass setfiletype bitbake
|
||||
au BufNewFile,BufRead *.inc setfiletype bitbake
|
||||
" au BufNewFile,BufRead *.conf setfiletype bitbake
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
set sts=4 sw=4 et
|
||||
@@ -1,85 +0,0 @@
|
||||
" Vim plugin file
|
||||
" Purpose: Create a template for new bb files
|
||||
" Author: Ricardo Salveti <rsalveti@gmail.com>
|
||||
" Copyright: Copyright (C) 2008 Ricardo Salveti <rsalveti@gmail.com>
|
||||
"
|
||||
" This file is licensed under the MIT license, see COPYING.MIT in
|
||||
" this source distribution for the terms.
|
||||
"
|
||||
" Based on the gentoo-syntax package
|
||||
"
|
||||
" Will try to use git to find the user name and email
|
||||
|
||||
if &compatible || v:version < 600
|
||||
finish
|
||||
endif
|
||||
|
||||
fun! <SID>GetUserName()
|
||||
let l:user_name = system("git-config --get user.name")
|
||||
if v:shell_error
|
||||
return "Unknow User"
|
||||
else
|
||||
return substitute(l:user_name, "\n", "", "")
|
||||
endfun
|
||||
|
||||
fun! <SID>GetUserEmail()
|
||||
let l:user_email = system("git-config --get user.email")
|
||||
if v:shell_error
|
||||
return "unknow@user.org"
|
||||
else
|
||||
return substitute(l:user_email, "\n", "", "")
|
||||
endfun
|
||||
|
||||
fun! BBHeader()
|
||||
let l:current_year = strftime("%Y")
|
||||
let l:user_name = <SID>GetUserName()
|
||||
let l:user_email = <SID>GetUserEmail()
|
||||
0 put ='# Copyright (C) ' . l:current_year .
|
||||
\ ' ' . l:user_name . ' <' . l:user_email . '>'
|
||||
put ='# Released under the MIT license (see COPYING.MIT for the terms)'
|
||||
$
|
||||
endfun
|
||||
|
||||
fun! NewBBTemplate()
|
||||
let l:paste = &paste
|
||||
set nopaste
|
||||
|
||||
" Get the header
|
||||
call BBHeader()
|
||||
|
||||
" New the bb template
|
||||
put ='DESCRIPTION = \"\"'
|
||||
put ='HOMEPAGE = \"\"'
|
||||
put ='LICENSE = \"\"'
|
||||
put ='SECTION = \"\"'
|
||||
put ='DEPENDS = \"\"'
|
||||
put ='PR = \"r0\"'
|
||||
put =''
|
||||
put ='SRC_URI = \"\"'
|
||||
|
||||
" Go to the first place to edit
|
||||
0
|
||||
/^DESCRIPTION =/
|
||||
exec "normal 2f\""
|
||||
|
||||
if paste == 1
|
||||
set paste
|
||||
endif
|
||||
endfun
|
||||
|
||||
if !exists("g:bb_create_on_empty")
|
||||
let g:bb_create_on_empty = 1
|
||||
endif
|
||||
|
||||
" disable in case of vimdiff
|
||||
if v:progname =~ "vimdiff"
|
||||
let g:bb_create_on_empty = 0
|
||||
endif
|
||||
|
||||
augroup NewBB
|
||||
au BufNewFile *.bb
|
||||
\ if g:bb_create_on_empty |
|
||||
\ call NewBBTemplate() |
|
||||
\ endif
|
||||
augroup END
|
||||
|
||||
@@ -1,123 +1,120 @@
|
||||
" Vim syntax file
|
||||
" Language: BitBake bb/bbclasses/inc
|
||||
" Author: Chris Larson <kergoth@handhelds.org>
|
||||
" Ricardo Salveti <rsalveti@rsalveti.net>
|
||||
" Copyright: Copyright (C) 2004 Chris Larson <kergoth@handhelds.org>
|
||||
" Copyright (C) 2008 Ricardo Salveti <rsalveti@rsalveti.net>
|
||||
"
|
||||
" Copyright (C) 2004 Chris Larson <kergoth@handhelds.org>
|
||||
" This file is licensed under the MIT license, see COPYING.MIT in
|
||||
" this source distribution for the terms.
|
||||
"
|
||||
" Syntax highlighting for bb, bbclasses and inc files.
|
||||
" Language: BitBake
|
||||
" Maintainer: Chris Larson <kergoth@handhelds.org>
|
||||
" Filenames: *.bb, *.bbclass
|
||||
|
||||
if version < 600
|
||||
syntax clear
|
||||
elseif exists("b:current_syntax")
|
||||
finish
|
||||
endif
|
||||
|
||||
syn case match
|
||||
|
||||
|
||||
" Catch incorrect syntax (only matches if nothing else does)
|
||||
"
|
||||
" It's an entirely new type, just has specific syntax in shell and python code
|
||||
syn match bbUnmatched "."
|
||||
|
||||
if &compatible || v:version < 600
|
||||
finish
|
||||
endif
|
||||
if exists("b:current_syntax")
|
||||
finish
|
||||
endif
|
||||
|
||||
" Other
|
||||
|
||||
syn match bbComment "^#.*$" display contains=bbTodo
|
||||
syn keyword bbTodo TODO FIXME XXX contained
|
||||
syn match bbDelimiter "[(){}=]" contained
|
||||
syn match bbQuote /['"]/ contained
|
||||
syn match bbArrayBrackets "[\[\]]" contained
|
||||
|
||||
|
||||
" BitBake strings
|
||||
|
||||
syn match bbContinue "\\$"
|
||||
syn region bbString matchgroup=bbQuote start=/"/ skip=/\\$/ excludenl end=/"/ contained keepend contains=bbTodo,bbContinue,bbVarDeref
|
||||
syn region bbString matchgroup=bbQuote start=/'/ skip=/\\$/ excludenl end=/'/ contained keepend contains=bbTodo,bbContinue,bbVarDeref
|
||||
|
||||
|
||||
" BitBake variable metadata
|
||||
|
||||
syn keyword bbExportFlag export contained nextgroup=bbIdentifier skipwhite
|
||||
syn match bbVarDeref "${[a-zA-Z0-9\-_\.]\+}" contained
|
||||
syn match bbVarDef "^\(export\s*\)\?\([a-zA-Z0-9\-_\.]\+\(_[${}a-zA-Z0-9\-_\.]\+\)\?\)\s*\(:=\|+=\|=+\|\.=\|=\.\|?=\|=\)\@=" contains=bbExportFlag,bbIdentifier,bbVarDeref nextgroup=bbVarEq
|
||||
|
||||
syn match bbIdentifier "[a-zA-Z0-9\-_\.]\+" display contained
|
||||
"syn keyword bbVarEq = display contained nextgroup=bbVarValue
|
||||
syn match bbVarEq "\(:=\|+=\|=+\|\.=\|=\.\|?=\|=\)" contained nextgroup=bbVarValue
|
||||
syn match bbVarValue ".*$" contained contains=bbString,bbVarDeref
|
||||
|
||||
|
||||
" BitBake variable metadata flags
|
||||
syn match bbVarFlagDef "^\([a-zA-Z0-9\-_\.]\+\)\(\[[a-zA-Z0-9\-_\.]\+\]\)\@=" contains=bbIdentifier nextgroup=bbVarFlagFlag
|
||||
syn region bbVarFlagFlag matchgroup=bbArrayBrackets start="\[" end="\]\s*\(=\)\@=" keepend excludenl contained contains=bbIdentifier nextgroup=bbVarEq
|
||||
"syn match bbVarFlagFlag "\[\([a-zA-Z0-9\-_\.]\+\)\]\s*\(=\)\@=" contains=bbIdentifier nextgroup=bbVarEq
|
||||
|
||||
|
||||
" Functions!
|
||||
syn match bbFunction "\h\w*" display contained
|
||||
|
||||
|
||||
" BitBake python metadata
|
||||
syn include @python syntax/python.vim
|
||||
if exists("b:current_syntax")
|
||||
unlet b:current_syntax
|
||||
endif
|
||||
|
||||
" BitBake syntax
|
||||
syn keyword bbPythonFlag python contained nextgroup=bbFunction
|
||||
syn match bbPythonFuncDef "^\(python\s\+\)\(\w\+\)\?\(\s*()\s*\)\({\)\@=" contains=bbPythonFlag,bbFunction,bbDelimiter nextgroup=bbPythonFuncRegion skipwhite
|
||||
syn region bbPythonFuncRegion matchgroup=bbDelimiter start="{\s*$" end="^}\s*$" keepend contained contains=@python
|
||||
"hi def link bbPythonFuncRegion Comment
|
||||
|
||||
" Matching case
|
||||
syn case match
|
||||
|
||||
" Indicates the error when nothing is matched
|
||||
syn match bbUnmatched "."
|
||||
|
||||
" Comments
|
||||
syn cluster bbCommentGroup contains=bbTodo,@Spell
|
||||
syn keyword bbTodo COMBAK FIXME TODO XXX contained
|
||||
syn match bbComment "#.*$" contains=@bbCommentGroup
|
||||
|
||||
" String helpers
|
||||
syn match bbQuote +['"]+ contained
|
||||
syn match bbDelimiter "[(){}=]" contained
|
||||
syn match bbArrayBrackets "[\[\]]" contained
|
||||
|
||||
" BitBake strings
|
||||
syn match bbContinue "\\$"
|
||||
syn region bbString matchgroup=bbQuote start=+"+ skip=+\\$+ excludenl end=+"+ contained keepend contains=bbTodo,bbContinue,bbVarDeref,bbVarPyValue,@Spell
|
||||
syn region bbString matchgroup=bbQuote start=+'+ skip=+\\$+ excludenl end=+'+ contained keepend contains=bbTodo,bbContinue,bbVarDeref,bbVarPyValue,@Spell
|
||||
|
||||
" Vars definition
|
||||
syn match bbExport "^export" nextgroup=bbIdentifier skipwhite
|
||||
syn keyword bbExportFlag export contained nextgroup=bbIdentifier skipwhite
|
||||
syn match bbIdentifier "[a-zA-Z0-9\-_\.\/\+]\+" display contained
|
||||
syn match bbVarDeref "${[a-zA-Z0-9\-_\.\/\+]\+}" contained
|
||||
syn match bbVarEq "\(:=\|+=\|=+\|\.=\|=\.\|?=\|=\)" contained nextgroup=bbVarValue
|
||||
syn match bbVarDef "^\(export\s*\)\?\([a-zA-Z0-9\-_\.\/\+]\+\(_[${}a-zA-Z0-9\-_\.\/\+]\+\)\?\)\s*\(:=\|+=\|=+\|\.=\|=\.\|?=\|=\)\@=" contains=bbExportFlag,bbIdentifier,bbVarDeref nextgroup=bbVarEq
|
||||
syn match bbVarValue ".*$" contained contains=bbString,bbVarDeref,bbVarPyValue
|
||||
syn region bbVarPyValue start=+${@+ skip=+\\$+ excludenl end=+}+ contained contains=@python
|
||||
|
||||
" Vars metadata flags
|
||||
syn match bbVarFlagDef "^\([a-zA-Z0-9\-_\.]\+\)\(\[[a-zA-Z0-9\-_\.]\+\]\)\@=" contains=bbIdentifier nextgroup=bbVarFlagFlag
|
||||
syn region bbVarFlagFlag matchgroup=bbArrayBrackets start="\[" end="\]\s*\(=\)\@=" keepend excludenl contained contains=bbIdentifier nextgroup=bbVarEq
|
||||
|
||||
" Includes and requires
|
||||
syn keyword bbInclude inherit include require contained
|
||||
syn match bbIncludeRest ".*$" contained contains=bbString,bbVarDeref
|
||||
syn match bbIncludeLine "^\(inherit\|include\|require\)\s\+" contains=bbInclude nextgroup=bbIncludeRest
|
||||
|
||||
" Add taks and similar
|
||||
syn keyword bbStatement addtask addhandler after before EXPORT_FUNCTIONS contained
|
||||
syn match bbStatementRest ".*$" skipwhite contained contains=bbStatement
|
||||
syn match bbStatementLine "^\(addtask\|addhandler\|after\|before\|EXPORT_FUNCTIONS\)\s\+" contains=bbStatement nextgroup=bbStatementRest
|
||||
|
||||
" OE Important Functions
|
||||
syn keyword bbOEFunctions do_fetch do_unpack do_patch do_configure do_compile do_stage do_install do_package contained
|
||||
|
||||
" Generic Functions
|
||||
syn match bbFunction "\h[0-9A-Za-z_-]*" display contained contains=bbOEFunctions
|
||||
|
||||
" BitBake shell metadata
|
||||
syn include @shell syntax/sh.vim
|
||||
if exists("b:current_syntax")
|
||||
unlet b:current_syntax
|
||||
endif
|
||||
syn keyword bbShFakeRootFlag fakeroot contained
|
||||
syn match bbShFuncDef "^\(fakeroot\s*\)\?\([0-9A-Za-z_-]\+\)\(python\)\@<!\(\s*()\s*\)\({\)\@=" contains=bbShFakeRootFlag,bbFunction,bbDelimiter nextgroup=bbShFuncRegion skipwhite
|
||||
syn region bbShFuncRegion matchgroup=bbDelimiter start="{\s*$" end="^}\s*$" keepend contained contains=@shell
|
||||
|
||||
" BitBake python metadata
|
||||
syn keyword bbPyFlag python contained
|
||||
syn match bbPyFuncDef "^\(python\s\+\)\([0-9A-Za-z_-]\+\)\?\(\s*()\s*\)\({\)\@=" contains=bbPyFlag,bbFunction,bbDelimiter nextgroup=bbPyFuncRegion skipwhite
|
||||
syn region bbPyFuncRegion matchgroup=bbDelimiter start="{\s*$" end="^}\s*$" keepend contained contains=@python
|
||||
syn keyword bbFakerootFlag fakeroot contained nextgroup=bbFunction
|
||||
syn match bbShellFuncDef "^\(fakeroot\s*\)\?\(\w\+\)\(python\)\@<!\(\s*()\s*\)\({\)\@=" contains=bbFakerootFlag,bbFunction,bbDelimiter nextgroup=bbShellFuncRegion skipwhite
|
||||
syn region bbShellFuncRegion matchgroup=bbDelimiter start="{\s*$" end="^}\s*$" keepend contained contains=@shell
|
||||
"hi def link bbShellFuncRegion Comment
|
||||
|
||||
|
||||
" BitBake 'def'd python functions
|
||||
syn keyword bbPyDef def contained
|
||||
syn region bbPyDefRegion start='^\(def\s\+\)\([0-9A-Za-z_-]\+\)\(\s*(.*)\s*\):\s*$' end='^\(\s\|$\)\@!' contains=@python
|
||||
syn keyword bbDef def contained
|
||||
syn region bbDefRegion start='^def\s\+\w\+\s*([^)]*)\s*:\s*$' end='^\(\s\|$\)\@!' contains=@python
|
||||
|
||||
" Highlighting Definitions
|
||||
hi def link bbUnmatched Error
|
||||
hi def link bbInclude Include
|
||||
hi def link bbTodo Todo
|
||||
hi def link bbComment Comment
|
||||
hi def link bbQuote String
|
||||
hi def link bbString String
|
||||
hi def link bbDelimiter Keyword
|
||||
hi def link bbArrayBrackets Statement
|
||||
hi def link bbContinue Special
|
||||
hi def link bbExport Type
|
||||
hi def link bbExportFlag Type
|
||||
hi def link bbIdentifier Identifier
|
||||
hi def link bbVarDeref PreProc
|
||||
hi def link bbVarDef Identifier
|
||||
hi def link bbVarValue String
|
||||
hi def link bbShFakeRootFlag Type
|
||||
hi def link bbFunction Function
|
||||
hi def link bbPyFlag Type
|
||||
hi def link bbPyDef Statement
|
||||
hi def link bbStatement Statement
|
||||
hi def link bbStatementRest Identifier
|
||||
hi def link bbOEFunctions Special
|
||||
hi def link bbVarPyValue PreProc
|
||||
|
||||
" BitBake statements
|
||||
syn keyword bbStatement include inherit require addtask addhandler EXPORT_FUNCTIONS display contained
|
||||
syn match bbStatementLine "^\(include\|inherit\|require\|addtask\|addhandler\|EXPORT_FUNCTIONS\)\s\+" contains=bbStatement nextgroup=bbStatementRest
|
||||
syn match bbStatementRest ".*$" contained contains=bbString,bbVarDeref
|
||||
|
||||
" Highlight
|
||||
"
|
||||
hi def link bbArrayBrackets Statement
|
||||
hi def link bbUnmatched Error
|
||||
hi def link bbVarDeref String
|
||||
hi def link bbContinue Special
|
||||
hi def link bbDef Statement
|
||||
hi def link bbPythonFlag Type
|
||||
hi def link bbExportFlag Type
|
||||
hi def link bbFakerootFlag Type
|
||||
hi def link bbStatement Statement
|
||||
hi def link bbString String
|
||||
hi def link bbTodo Todo
|
||||
hi def link bbComment Comment
|
||||
hi def link bbOperator Operator
|
||||
hi def link bbError Error
|
||||
hi def link bbFunction Function
|
||||
hi def link bbDelimiter Delimiter
|
||||
hi def link bbIdentifier Identifier
|
||||
hi def link bbVarEq Operator
|
||||
hi def link bbQuote String
|
||||
hi def link bbVarValue String
|
||||
|
||||
let b:current_syntax = "bb"
|
||||
|
||||
@@ -32,7 +32,7 @@ command.
|
||||
\fBbitbake\fP is a program that executes the specified task (default is 'build')
|
||||
for a given set of BitBake files.
|
||||
.br
|
||||
It expects that BBFILES is defined, which is a space separated list of files to
|
||||
It expects that BBFILES is defined, which is a space seperated list of files to
|
||||
be executed. BBFILES does support wildcards.
|
||||
.br
|
||||
Default BBFILES are the .bb files in the current directory.
|
||||
@@ -54,9 +54,6 @@ continue as much as possible after an error. While the target that failed, and
|
||||
those that depend on it, cannot be remade, the other dependencies of these
|
||||
targets can be processed all the same.
|
||||
.TP
|
||||
.B \-a, \-\-tryaltconfigs
|
||||
continue with builds by trying to use alternative providers where possible.
|
||||
.TP
|
||||
.B \-f, \-\-force
|
||||
force run of specified cmd, regardless of stamp status
|
||||
.TP
|
||||
@@ -67,7 +64,7 @@ drop into the interactive mode also called the BitBake shell.
|
||||
Specify task to execute. Note that this only executes the specified task for
|
||||
the providee and the packages it depends on, i.e. 'compile' does not implicitly
|
||||
call stage for the dependencies (IOW: use only if you know what you are doing).
|
||||
Depending on the base.bbclass a listtasks task is defined and will show
|
||||
Depending on the base.bbclass a listtaks tasks is defined and will show
|
||||
available tasks.
|
||||
.TP
|
||||
.B \-rFILE, \-\-read=FILE
|
||||
@@ -100,13 +97,12 @@ emit the dependency trees of the specified packages in the dot syntax
|
||||
.B \-IIGNORED\_DOT\_DEPS, \-\-ignore-deps=IGNORED_DOT_DEPS
|
||||
Stop processing at the given list of dependencies when generating dependency
|
||||
graphs. This can help to make the graph more appealing
|
||||
.TP
|
||||
.B \-lDEBUG_DOMAINS, \-\-log-domains=DEBUG_DOMAINS
|
||||
Show debug logging for the specified logging domains
|
||||
.TP
|
||||
.B \-P, \-\-profile
|
||||
profile the command and print a report
|
||||
.TP
|
||||
.\"
|
||||
.\" Next option is only in BitBake 1.7.x (trunk)
|
||||
.\"
|
||||
.\".TP
|
||||
.\".B \-lDEBUG_DOMAINS, \-\-log-domains=DEBUG_DOMAINS
|
||||
.\"Show debug logging for the specified logging domains
|
||||
|
||||
.SH AUTHORS
|
||||
BitBake was written by
|
||||
|
||||
@@ -45,7 +45,7 @@ endif
|
||||
$(call command,xsltproc --stringparam base.dir $@/ $(if $(htmlcssfile),--stringparam html.stylesheet $(htmlcssfile)) $(htmlxsl) $(manual),XSLTPROC $@ $(manual))
|
||||
|
||||
$(xmltotypes): $(manual)
|
||||
$(call command,xmlto --with-dblatex --extensions -o $(topdir)/$@ $@ $(manual),XMLTO $@ $(manual))
|
||||
$(call command,xmlto --extensions -o $(topdir)/$@ $@ $(manual),XMLTO $@ $(manual))
|
||||
|
||||
clean:
|
||||
rm -rf $(cleanfiles)
|
||||
|
||||
@@ -88,17 +88,6 @@ share common metadata between many packages.</para></listitem>
|
||||
<varname>B</varname> = "pre${A}post"</screen></para>
|
||||
<para>This results in <varname>A</varname> containing <literal>aval</literal> and <varname>B</varname> containing <literal>preavalpost</literal>.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Setting a default value (?=)</title>
|
||||
<para><screen><varname>A</varname> ?= "aval"</screen></para>
|
||||
<para>If <varname>A</varname> is set before the above is called, it will retain it's previous value. If <varname>A</varname> is unset prior to the above call, <varname>A</varname> will be set to <literal>aval</literal>. Note that this assignment is immediate, so if there are multiple ?= assignments to a single variable, the first of those will be used.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Setting a default value (??=)</title>
|
||||
<para><screen><varname>A</varname> ??= "somevalue"</screen></para>
|
||||
<para><screen><varname>A</varname> ??= "someothervalue"</screen></para>
|
||||
<para>If <varname>A</varname> is set before the above, it will retain that value. If <varname>A</varname> is unset prior to the above, <varname>A</varname> will be set to <literal>someothervalue</literal>. This is a lazy version of ?=, in that the assignment does not occur until the end of the parsing process, so that the last, rather than the first, ??= assignment to a given variable will be used.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Immediate variable expansion (:=)</title>
|
||||
<para>:= results in a variable's contents being expanded immediately, rather than when the variable is actually used.</para>
|
||||
@@ -130,7 +119,7 @@ will be introduced.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Conditional metadata set</title>
|
||||
<para>OVERRIDES is a <quote>:</quote> separated variable containing each item you want to satisfy conditions. So, if you have a variable which is conditional on <quote>arm</quote>, and <quote>arm</quote> is in OVERRIDES, then the <quote>arm</quote> specific version of the variable is used rather than the non-conditional version. Example:</para>
|
||||
<para>OVERRIDES is a <quote>:</quote> seperated variable containing each item you want to satisfy conditions. So, if you have a variable which is conditional on <quote>arm</quote>, and <quote>arm</quote> is in OVERRIDES, then the <quote>arm</quote> specific version of the variable is used rather than the non-conditional version. Example:</para>
|
||||
<para><screen><varname>OVERRIDES</varname> = "architecture:os:machine"
|
||||
<varname>TEST</varname> = "defaultvalue"
|
||||
<varname>TEST_os</varname> = "osspecificvalue"
|
||||
@@ -195,7 +184,7 @@ include</literal> directive.</para>
|
||||
<section>
|
||||
<title>Inheritance</title>
|
||||
<para><emphasis>NOTE:</emphasis> This is only supported in .bb and .bbclass files.</para>
|
||||
<para>The <literal>inherit</literal> directive is a means of specifying what classes of functionality your .bb requires. It is a rudimentary form of inheritance. For example, you can easily abstract out the tasks involved in building a package that uses autoconf and automake, and put that into a bbclass for your packages to make use of. A given bbclass is located by searching for classes/filename.oeclass in <envar>BBPATH</envar>, where filename is what you inherited.</para>
|
||||
<para>The <literal>inherit</literal> directive is a means of specifying what classes of functionality your .bb requires. It is a rudamentary form of inheritence. For example, you can easily abstract out the tasks involved in building a package that uses autoconf and automake, and put that into a bbclass for your packages to make use of. A given bbclass is located by searching for classes/filename.oeclass in <envar>BBPATH</envar>, where filename is what you inherited.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Tasks</title>
|
||||
@@ -215,29 +204,19 @@ addtask printdate before do_build</screen></para>
|
||||
<para>BitBake allows to install event handlers. Events are triggered at certain points during operation, such as, the beginning of operation against a given .bb, the start of a given task, task failure, task success, et cetera. The intent was to make it easy to do things like email notifications on build failure.</para>
|
||||
<para><screen>addhandler myclass_eventhandler
|
||||
python myclass_eventhandler() {
|
||||
from bb.event import getName
|
||||
from bb.event import NotHandled, getName
|
||||
from bb import data
|
||||
|
||||
print("The name of the Event is %s" % getName(e))
|
||||
print("The file we run for is %s" % data.getVar('FILE', e.data, True))
|
||||
print "The name of the Event is %s" % getName(e)
|
||||
print "The file we run for is %s" % data.getVar('FILE', e.data, True)
|
||||
|
||||
return NotHandled
|
||||
}
|
||||
</screen></para><para>
|
||||
This event handler gets called every time an event is triggered. A global variable <varname>e</varname> is defined. <varname>e</varname>.data contains an instance of bb.data. With the getName(<varname>e</varname>)
|
||||
method one can get the name of the triggered event.</para><para>The above event handler prints the name
|
||||
of the event and the content of the <varname>FILE</varname> variable.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Variants</title>
|
||||
<para>Two Bitbake features exist to facilitate the creation of multiple buildable incarnations from a single recipe file.</para>
|
||||
<para>The first is <varname>BBCLASSEXTEND</varname>. This variable is a space separated list of classes to utilize to "extend" the recipe for each variant. As an example, setting <screen>BBCLASSEXTEND = "native"</screen> results in a second incarnation of the current recipe being available. This second incarantion will have the "native" class inherited.</para>
|
||||
<para>The second feature is <varname>BBVERSIONS</varname>. This variable allows a single recipe to be able to build multiple versions of a project from a single recipe file, and allows you to specify conditional metadata (using the <varname>OVERRIDES</varname> mechanism) for a single version, or an optionally named range of versions:</para>
|
||||
<para><screen>BBVERSIONS = "1.0 2.0 git"
|
||||
SRC_URI_git = "git://someurl/somepath.git"</screen></para>
|
||||
<para><screen>BBVERSIONS = "1.0.[0-6]:1.0.0+ \
|
||||
1.0.[7-9]:1.0.7+"
|
||||
SRC_URI_append_1.0.7+ = "file://some_patch_which_the_new_versions_need.patch;patch=1"</screen></para>
|
||||
<para>Note that the name of the range will default to the original version of the recipe, so given OE, a recipe file of foo_1.0.0+.bb will default the name of its versions to 1.0.0+. This is useful, as the range name is not only placed into overrides, it's also made available for the metadata to use in the form of the <varname>BPV</varname> variable, for use in file:// search paths (<varname>FILESPATH</varname>).</para>
|
||||
</section>
|
||||
</section>
|
||||
<section>
|
||||
<title>Dependency Handling</title>
|
||||
@@ -279,18 +258,16 @@ SRC_URI_append_1.0.7+ = "file://some_patch_which_the_new_versions_need.patch;pat
|
||||
<title>Parsing</title>
|
||||
<section>
|
||||
<title>Configuration Files</title>
|
||||
<para>The first of the classifications of metadata in BitBake is configuration metadata. This metadata is global, and therefore affects <emphasis>all</emphasis> packages and tasks which are executed.</para>
|
||||
<para>Bitbake will first search the current working directory for an optional "conf/bblayers.conf" configuration file. This file is expected to contain a BBLAYERS variable which is a space delimited list of 'layer' directories. For each directory in this list a "conf/layer.conf" file will be searched for and parsed with the LAYERDIR variable being set to the directory where the layer was found. The idea is these files will setup BBPATH and other variables correctly for a given build directory automatically for the user.</para>
|
||||
<para>Bitbake will then expect to find 'conf/bitbake.conf' somewhere in the user specified <envar>BBPATH</envar>. That configuration file generally has include directives to pull in any other metadata (generally files specific to architecture, machine, <emphasis>local</emphasis> and so on.</para>
|
||||
<para>The first of the classifications of metadata in BitBake is configuration metadata. This metadata is global, and therefore affects <emphasis>all</emphasis> packages and tasks which are executed. Currently, BitBake has hardcoded knowledge of a single configuration file. It expects to find 'conf/bitbake.conf' somewhere in the user specified <envar>BBPATH</envar>. That configuration file generally has include directives to pull in any other metadata (generally files specific to architecture, machine, <emphasis>local</emphasis> and so on.</para>
|
||||
<para>Only variable definitions and include directives are allowed in .conf files.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>Classes</title>
|
||||
<para>BitBake classes are our rudimentary inheritance mechanism. As briefly mentioned in the metadata introduction, they're parsed when an <literal>inherit</literal> directive is encountered, and they are located in classes/ relative to the dirs in <envar>BBPATH</envar>.</para>
|
||||
<para>BitBake classes are our rudamentary inheritence mechanism. As briefly mentioned in the metadata introduction, they're parsed when an <literal>inherit</literal> directive is encountered, and they are located in classes/ relative to the dirs in <envar>BBPATH</envar>.</para>
|
||||
</section>
|
||||
<section>
|
||||
<title>.bb Files</title>
|
||||
<para>A BitBake (.bb) file is a logical unit of tasks to be executed. Normally this is a package to be built. Inter-.bb dependencies are obeyed. The files themselves are located via the <varname>BBFILES</varname> variable, which is set to a space separated list of .bb files, and does handle wildcards.</para>
|
||||
<para>A BitBake (.bb) file is a logical unit of tasks to be executed. Normally this is a package to be built. Inter-.bb dependencies are obeyed. The files themselves are located via the <varname>BBFILES</varname> variable, which is set to a space seperated list of .bb files, and does handle wildcards.</para>
|
||||
</section>
|
||||
</section>
|
||||
</chapter>
|
||||
@@ -316,9 +293,9 @@ a per URI parameters separated by a <quote>;</quote> consisting of a key and a v
|
||||
|
||||
<section>
|
||||
<title>CVS File Fetcher</title>
|
||||
<para>The URN for the CVS Fetcher is <emphasis>cvs</emphasis>. This Fetcher honors the variables <varname>DL_DIR</varname>, <varname>SRCDATE</varname>, <varname>FETCHCOMMAND_cvs</varname>, <varname>UPDATECOMMAND_cvs</varname>. <varname>DL_DIR</varname> specifies where a temporary checkout is saved, <varname>SRCDATE</varname> specifies which date to use when doing the fetching (the special value of "now" will cause the checkout to be updated on every build), <varname>FETCHCOMMAND</varname> and <varname>UPDATECOMMAND</varname> specify which executables should be used when doing the CVS checkout or update.
|
||||
<para>The URN for the CVS Fetcher is <emphasis>cvs</emphasis>. This Fetcher honors the variables <varname>DL_DIR</varname>, <varname>SRCDATE</varname>, <varname>FETCHCOMMAND_cvs</varname>, <varname>UPDATECOMMAND_cvs</varname>. <varname>DL_DIRS</varname> specifies where a temporary checkout is saved, <varname>SRCDATE</varname> specifies which date to use when doing the fetching (the special value of "now" will cause the checkout to be updated on every build), <varname>FETCHCOMMAND</varname> and <varname>UPDATECOMMAND</varname> specify which executables should be used when doing the CVS checkout or update.
|
||||
</para>
|
||||
<para>The supported Parameters are <varname>module</varname>, <varname>tag</varname>, <varname>date</varname>, <varname>method</varname>, <varname>localdir</varname>, <varname>rsh</varname> and <varname>scmdata</varname>. The <varname>module</varname> specifies which module to check out, the <varname>tag</varname> describes which CVS TAG should be used for the checkout. By default the TAG is empty. A <varname>date</varname> can be specified to override the SRCDATE of the configuration to checkout a specific date. The special value of "now" will cause the checkout to be updated on every build.<varname>method</varname> is by default <emphasis>pserver</emphasis>, if <emphasis>ext</emphasis> is used the <varname>rsh</varname> parameter will be evaluated and <varname>CVS_RSH</varname> will be set. Finally <varname>localdir</varname> is used to checkout into a special directory relative to <varname>CVSDIR</varname>. If <varname>scmdata</varname> is set to <quote>keep</quote>
|
||||
<para>The supported Parameters are <varname>module</varname>, <varname>tag</varname>, <varname>date</varname>, <varname>method</varname>, <varname>localdir</varname>, <varname>rsh</varname>. The <varname>module</varname> specifies which module to check out, the <varname>tag</varname> describes which CVS TAG should be used for the checkout by default the TAG is empty. A <varname>date</varname> can be specified to override the SRCDATE of the configuration to checkout a specific date. The special value of "now" will cause the checkout to be updated on every build.<varname>method</varname> is by default <emphasis>pserver</emphasis>, if <emphasis>ext</emphasis> is used the <varname>rsh</varname> parameter will be evaluated and <varname>CVS_RSH</varname> will be set. Finally <varname>localdir</varname> is used to checkout into a special directory relative to <varname>CVSDIR></varname>.
|
||||
<screen><varname>SRC_URI</varname> = "cvs://CVSROOT;module=mymodule;tag=some-version;method=ext"
|
||||
<varname>SRC_URI</varname> = "cvs://CVSROOT;module=mymodule;date=20060126;localdir=usethat"
|
||||
</screen>
|
||||
@@ -351,7 +328,7 @@ will be tried first when fetching a file if that fails the actual file will be t
|
||||
</para>
|
||||
<para>This Fetcher honors the variables <varname>FETCHCOMMAND_svn</varname>, <varname>DL_DIR</varname>, <varname>SRCDATE</varname>. <varname>FETCHCOMMAND</varname> contains the subversion command, <varname>DL_DIR</varname> is the directory where tarballs will be saved, <varname>SRCDATE</varname> specifies which date to use when doing the fetching (the special value of "now" will cause the checkout to be updated on every build).
|
||||
</para>
|
||||
<para>The supported Parameters are <varname>proto</varname>, <varname>rev</varname> and <varname>scmdata</varname>. <varname>proto</varname> is the subversion protocol, <varname>rev</varname> is the subversion revision. If <varname>scmdata</varname> is set to <quote>keep</quote>, the <quote>.svn</quote> directories will be available during compile-time.
|
||||
<para>The supported Parameters are <varname>proto</varname>, <varname>rev</varname>. <varname>proto</varname> is the subversion prototype, <varname>rev</varname> is the subversions revision.
|
||||
</para>
|
||||
<para><screen><varname>SRC_URI</varname> = "svn://svn.oe.handhelds.org/svn;module=vip;proto=http;rev=667"
|
||||
<varname>SRC_URI</varname> = "svn://svn.oe.handhelds.org/svn/;module=opie;proto=svn+ssh;date=20060126"
|
||||
@@ -364,7 +341,7 @@ will be tried first when fetching a file if that fails the actual file will be t
|
||||
</para>
|
||||
<para>The Variables <varname>DL_DIR</varname>, <varname>GITDIR</varname> are used. <varname>DL_DIR</varname> will be used to store the checkedout version. <varname>GITDIR</varname> will be used as the base directory where the git tree is cloned to.
|
||||
</para>
|
||||
<para>The Parameters are <emphasis>tag</emphasis>, <emphasis>protocol</emphasis> and <emphasis>scmdata</emphasis>. <emphasis>tag</emphasis> is a git tag, the default is <quote>master</quote>. <emphasis>protocol</emphasis> is the git protocol to use and defaults to <quote>rsync</quote>. If <emphasis>scmdata</emphasis> is set to <quote>keep</quote>, the <quote>.git</quote> directory will be available during compile-time.
|
||||
<para>The Parameters are <emphasis>tag</emphasis>, <emphasis>protocol</emphasis>. <emphasis>tag</emphasis> is a git tag, the default is <quote>master</quote>. <emphasis>protocol</emphasis> is the git protocol to use and defaults to <quote>rsync</quote>.
|
||||
</para>
|
||||
<para><screen><varname>SRC_URI</varname> = "git://git.oe.handhelds.org/git/vip.git;tag=version-1"
|
||||
<varname>SRC_URI</varname> = "git://git.oe.handhelds.org/git/vip.git;protocol=http"
|
||||
@@ -375,7 +352,15 @@ will be tried first when fetching a file if that fails the actual file will be t
|
||||
|
||||
|
||||
<chapter>
|
||||
<title>The bitbake command</title>
|
||||
<title>Commands</title>
|
||||
<section>
|
||||
<title>bbread</title>
|
||||
<para>bbread is a command for displaying BitBake metadata. When run with no arguments, it has the core parse 'conf/bitbake.conf', as located in BBPATH, and displays that. If you supply a file on the commandline, such as a .bb, then it parses that afterwards, using the aforementioned configuration metadata.</para>
|
||||
<para><emphasis>NOTE: the stand a lone bbread command was removed. Instead of bbread use bitbake -e.
|
||||
</emphasis></para>
|
||||
</section>
|
||||
<section>
|
||||
<title>bitbake</title>
|
||||
<section>
|
||||
<title>Introduction</title>
|
||||
<para>bitbake is the primary command in the system. It facilitates executing tasks in a single .bb file, or executing a given task on a set of multiple .bb files, accounting for interdependencies amongst them.</para>
|
||||
@@ -387,7 +372,7 @@ will be tried first when fetching a file if that fails the actual file will be t
|
||||
usage: bitbake [options] [package ...]
|
||||
|
||||
Executes the specified task (default is 'build') for a given set of BitBake files.
|
||||
It expects that BBFILES is defined, which is a space separated list of files to
|
||||
It expects that BBFILES is defined, which is a space seperated list of files to
|
||||
be executed. BBFILES does support wildcards.
|
||||
Default BBFILES are the .bb files in the current directory.
|
||||
|
||||
@@ -409,7 +394,7 @@ options:
|
||||
it depends on, i.e. 'compile' does not implicitly call
|
||||
stage for the dependencies (IOW: use only if you know
|
||||
what you are doing). Depending on the base.bbclass a
|
||||
listtasks task is defined and will show available
|
||||
listtasks tasks is defined and will show available
|
||||
tasks
|
||||
-r FILE, --read=FILE read the specified file before bitbake.conf
|
||||
-v, --verbose output more chit-chat to the terminal
|
||||
@@ -430,8 +415,6 @@ options:
|
||||
the graph more appealing
|
||||
-l DEBUG_DOMAINS, --log-domains=DEBUG_DOMAINS
|
||||
Show debug logging for the specified logging domains
|
||||
-P, --profile profile the command and print a report
|
||||
|
||||
|
||||
</screen>
|
||||
</para>
|
||||
@@ -478,12 +461,12 @@ Two files will be written into the current working directory, <emphasis>depends.
|
||||
</section>
|
||||
<section>
|
||||
<title>Metadata</title>
|
||||
<para>As you may have seen in the usage information, or in the information about .bb files, the BBFILES variable is how the bitbake tool locates its files. This variable is a space separated list of files that are available, and supports wildcards.
|
||||
<para>As you may have seen in the usage information, or in the information about .bb files, the BBFILES variable is how the bitbake tool locates its files. This variable is a space seperated list of files that are available, and supports wildcards.
|
||||
<example>
|
||||
<title>Setting BBFILES</title>
|
||||
<programlisting><varname>BBFILES</varname> = "/path/to/bbfiles/*.bb"</programlisting>
|
||||
</example></para>
|
||||
<para>With regard to dependencies, it expects the .bb to define a <varname>DEPENDS</varname> variable, which contains a space separated list of <quote>package names</quote>, which themselves are the <varname>PN</varname> variable. The <varname>PN</varname> variable is, in general, by default, set to a component of the .bb filename.</para>
|
||||
<para>With regard to dependencies, it expects the .bb to define a <varname>DEPENDS</varname> variable, which contains a space seperated list of <quote>package names</quote>, which themselves are the <varname>PN</varname> variable. The <varname>PN</varname> variable is, in general, by default, set to a component of the .bb filename.</para>
|
||||
<example>
|
||||
<title>Depending on another .bb</title>
|
||||
<para>a.bb:
|
||||
@@ -530,5 +513,6 @@ BBFILE_PRIORITY_upstream = "5"
|
||||
BBFILE_PRIORITY_local = "10"</screen>
|
||||
</example>
|
||||
</section>
|
||||
</section>
|
||||
</chapter>
|
||||
</book>
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
#
|
||||
# This is a copy on write dictionary and set which abuses classes to try and be nice and fast.
|
||||
#
|
||||
# Copyright (C) 2006 Tim Amsell
|
||||
# Copyright (C) 2006 Tim Amsell
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
@@ -18,31 +18,31 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
#Please Note:
|
||||
#Please Note:
|
||||
# Be careful when using mutable types (ie Dict and Lists) - operations involving these are SLOW.
|
||||
# Assign a file to __warn__ to get warnings about slow operations.
|
||||
#
|
||||
|
||||
from __future__ import print_function
|
||||
from inspect import getmro
|
||||
|
||||
import copy
|
||||
import types
|
||||
ImmutableTypes = (
|
||||
types.NoneType,
|
||||
bool,
|
||||
complex,
|
||||
float,
|
||||
int,
|
||||
long,
|
||||
tuple,
|
||||
frozenset,
|
||||
basestring
|
||||
)
|
||||
import types, sets
|
||||
types.ImmutableTypes = tuple([ \
|
||||
types.BooleanType, \
|
||||
types.ComplexType, \
|
||||
types.FloatType, \
|
||||
types.IntType, \
|
||||
types.LongType, \
|
||||
types.NoneType, \
|
||||
types.TupleType, \
|
||||
sets.ImmutableSet] + \
|
||||
list(types.StringTypes))
|
||||
|
||||
MUTABLE = "__mutable__"
|
||||
|
||||
class COWMeta(type):
|
||||
pass
|
||||
|
||||
|
||||
class COWDictMeta(COWMeta):
|
||||
__warn__ = False
|
||||
__hasmutable__ = False
|
||||
@@ -61,12 +61,12 @@ class COWDictMeta(COWMeta):
|
||||
__call__ = cow
|
||||
|
||||
def __setitem__(cls, key, value):
|
||||
if not isinstance(value, ImmutableTypes):
|
||||
if not isinstance(value, types.ImmutableTypes):
|
||||
if not isinstance(value, COWMeta):
|
||||
cls.__hasmutable__ = True
|
||||
key += MUTABLE
|
||||
setattr(cls, key, value)
|
||||
|
||||
|
||||
def __getmutable__(cls, key, readonly=False):
|
||||
nkey = key + MUTABLE
|
||||
try:
|
||||
@@ -79,10 +79,10 @@ class COWDictMeta(COWMeta):
|
||||
return value
|
||||
|
||||
if not cls.__warn__ is False and not isinstance(value, COWMeta):
|
||||
print("Warning: Doing a copy because %s is a mutable type." % key, file=cls.__warn__)
|
||||
print >> cls.__warn__, "Warning: Doing a copy because %s is a mutable type." % key
|
||||
try:
|
||||
value = value.copy()
|
||||
except AttributeError as e:
|
||||
except AttributeError, e:
|
||||
value = copy.copy(value)
|
||||
setattr(cls, nkey, value)
|
||||
return value
|
||||
@@ -100,13 +100,13 @@ class COWDictMeta(COWMeta):
|
||||
value = getattr(cls, key)
|
||||
except AttributeError:
|
||||
value = cls.__getmutable__(key, readonly)
|
||||
|
||||
# This is for values which have been deleted
|
||||
|
||||
# This is for values which have been deleted
|
||||
if value is cls.__marker__:
|
||||
raise AttributeError("key %s does not exist." % key)
|
||||
|
||||
return value
|
||||
except AttributeError as e:
|
||||
except AttributeError, e:
|
||||
if not default is cls.__getmarker__:
|
||||
return default
|
||||
|
||||
@@ -120,9 +120,6 @@ class COWDictMeta(COWMeta):
|
||||
key += MUTABLE
|
||||
delattr(cls, key)
|
||||
|
||||
def __contains__(cls, key):
|
||||
return cls.has_key(key)
|
||||
|
||||
def has_key(cls, key):
|
||||
value = cls.__getreadonly__(key, cls.__marker__)
|
||||
if value is cls.__marker__:
|
||||
@@ -132,7 +129,7 @@ class COWDictMeta(COWMeta):
|
||||
def iter(cls, type, readonly=False):
|
||||
for key in dir(cls):
|
||||
if key.startswith("__"):
|
||||
continue
|
||||
continue
|
||||
|
||||
if key.endswith(MUTABLE):
|
||||
key = key[:-len(MUTABLE)]
|
||||
@@ -158,11 +155,11 @@ class COWDictMeta(COWMeta):
|
||||
return cls.iter("keys")
|
||||
def itervalues(cls, readonly=False):
|
||||
if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False:
|
||||
print("Warning: If you arn't going to change any of the values call with True.", file=cls.__warn__)
|
||||
print >> cls.__warn__, "Warning: If you arn't going to change any of the values call with True."
|
||||
return cls.iter("values", readonly)
|
||||
def iteritems(cls, readonly=False):
|
||||
if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False:
|
||||
print("Warning: If you arn't going to change any of the values call with True.", file=cls.__warn__)
|
||||
print >> cls.__warn__, "Warning: If you arn't going to change any of the values call with True."
|
||||
return cls.iter("items", readonly)
|
||||
|
||||
class COWSetMeta(COWDictMeta):
|
||||
@@ -181,13 +178,13 @@ class COWSetMeta(COWDictMeta):
|
||||
|
||||
def remove(cls, value):
|
||||
COWDictMeta.__delitem__(cls, repr(hash(value)))
|
||||
|
||||
|
||||
def __in__(cls, value):
|
||||
return COWDictMeta.has_key(repr(hash(value)))
|
||||
|
||||
def iterkeys(cls):
|
||||
raise TypeError("sets don't have keys")
|
||||
|
||||
|
||||
def iteritems(cls):
|
||||
raise TypeError("sets don't have 'items'")
|
||||
|
||||
@@ -204,120 +201,120 @@ if __name__ == "__main__":
|
||||
import sys
|
||||
COWDictBase.__warn__ = sys.stderr
|
||||
a = COWDictBase()
|
||||
print("a", a)
|
||||
print "a", a
|
||||
|
||||
a['a'] = 'a'
|
||||
a['b'] = 'b'
|
||||
a['dict'] = {}
|
||||
|
||||
b = a.copy()
|
||||
print("b", b)
|
||||
print "b", b
|
||||
b['c'] = 'b'
|
||||
|
||||
print()
|
||||
print
|
||||
|
||||
print("a", a)
|
||||
print "a", a
|
||||
for x in a.iteritems():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
print x
|
||||
print "--"
|
||||
print "b", b
|
||||
for x in b.iteritems():
|
||||
print(x)
|
||||
print()
|
||||
print x
|
||||
print
|
||||
|
||||
b['dict']['a'] = 'b'
|
||||
b['a'] = 'c'
|
||||
|
||||
print("a", a)
|
||||
print "a", a
|
||||
for x in a.iteritems():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
print x
|
||||
print "--"
|
||||
print "b", b
|
||||
for x in b.iteritems():
|
||||
print(x)
|
||||
print()
|
||||
print x
|
||||
print
|
||||
|
||||
try:
|
||||
b['dict2']
|
||||
except KeyError as e:
|
||||
print("Okay!")
|
||||
except KeyError, e:
|
||||
print "Okay!"
|
||||
|
||||
a['set'] = COWSetBase()
|
||||
a['set'].add("o1")
|
||||
a['set'].add("o1")
|
||||
a['set'].add("o2")
|
||||
|
||||
print("a", a)
|
||||
print "a", a
|
||||
for x in a['set'].itervalues():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
print x
|
||||
print "--"
|
||||
print "b", b
|
||||
for x in b['set'].itervalues():
|
||||
print(x)
|
||||
print()
|
||||
print x
|
||||
print
|
||||
|
||||
b['set'].add('o3')
|
||||
|
||||
print("a", a)
|
||||
print "a", a
|
||||
for x in a['set'].itervalues():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
print x
|
||||
print "--"
|
||||
print "b", b
|
||||
for x in b['set'].itervalues():
|
||||
print(x)
|
||||
print()
|
||||
print x
|
||||
print
|
||||
|
||||
a['set2'] = set()
|
||||
a['set2'].add("o1")
|
||||
a['set2'].add("o1")
|
||||
a['set2'].add("o2")
|
||||
|
||||
print("a", a)
|
||||
print "a", a
|
||||
for x in a.iteritems():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
print x
|
||||
print "--"
|
||||
print "b", b
|
||||
for x in b.iteritems(readonly=True):
|
||||
print(x)
|
||||
print()
|
||||
print x
|
||||
print
|
||||
|
||||
del b['b']
|
||||
try:
|
||||
print(b['b'])
|
||||
print b['b']
|
||||
except KeyError:
|
||||
print("Yay! deleted key raises error")
|
||||
print "Yay! deleted key raises error"
|
||||
|
||||
if b.has_key('b'):
|
||||
print("Boo!")
|
||||
print "Boo!"
|
||||
else:
|
||||
print("Yay - has_key with delete works!")
|
||||
|
||||
print("a", a)
|
||||
print "Yay - has_key with delete works!"
|
||||
|
||||
print "a", a
|
||||
for x in a.iteritems():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
print x
|
||||
print "--"
|
||||
print "b", b
|
||||
for x in b.iteritems(readonly=True):
|
||||
print(x)
|
||||
print()
|
||||
print x
|
||||
print
|
||||
|
||||
b.__revertitem__('b')
|
||||
|
||||
print("a", a)
|
||||
print "a", a
|
||||
for x in a.iteritems():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
print x
|
||||
print "--"
|
||||
print "b", b
|
||||
for x in b.iteritems(readonly=True):
|
||||
print(x)
|
||||
print()
|
||||
print x
|
||||
print
|
||||
|
||||
b.__revertitem__('dict')
|
||||
print("a", a)
|
||||
print "a", a
|
||||
for x in a.iteritems():
|
||||
print(x)
|
||||
print("--")
|
||||
print("b", b)
|
||||
print x
|
||||
print "--"
|
||||
print "b", b
|
||||
for x in b.iteritems(readonly=True):
|
||||
print(x)
|
||||
print()
|
||||
print x
|
||||
print
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -25,53 +25,25 @@
|
||||
#
|
||||
#Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import bb
|
||||
import bb.msg
|
||||
import bb.process
|
||||
from contextlib import nested
|
||||
from bb import data, event, mkdirhier, utils
|
||||
|
||||
bblogger = logging.getLogger('BitBake')
|
||||
logger = logging.getLogger('BitBake.Build')
|
||||
|
||||
NULL = open(os.devnull, 'r+')
|
||||
|
||||
|
||||
# When we execute a python function we'd like certain things
|
||||
# in all namespaces, hence we add them to __builtins__
|
||||
# If we do not do this and use the exec globals, they will
|
||||
# not be available to subfunctions.
|
||||
__builtins__['bb'] = bb
|
||||
__builtins__['os'] = os
|
||||
from bb import data, fetch, event, mkdirhier, utils
|
||||
import bb, os
|
||||
|
||||
# events
|
||||
class FuncFailed(Exception):
|
||||
def __init__(self, name = None, logfile = None):
|
||||
self.logfile = logfile
|
||||
self.name = name
|
||||
if name:
|
||||
self.msg = "Function '%s' failed" % name
|
||||
else:
|
||||
self.msg = "Function failed"
|
||||
"""Executed function failed"""
|
||||
|
||||
def __str__(self):
|
||||
if self.logfile and os.path.exists(self.logfile):
|
||||
msg = ("%s (see %s for further information)" %
|
||||
(self.msg, self.logfile))
|
||||
else:
|
||||
msg = self.msg
|
||||
return msg
|
||||
class EventException(Exception):
|
||||
"""Exception which is associated with an Event."""
|
||||
|
||||
def __init__(self, msg, event):
|
||||
self.args = msg, event
|
||||
|
||||
class TaskBase(event.Event):
|
||||
"""Base class for task events"""
|
||||
|
||||
def __init__(self, t, d ):
|
||||
self._task = t
|
||||
self._package = bb.data.getVar("PF", d, 1)
|
||||
event.Event.__init__(self)
|
||||
self._message = "package %s: task %s: %s" % (bb.data.getVar("PF", d, 1), t, bb.event.getName(self)[4:])
|
||||
event.Event.__init__(self, d)
|
||||
|
||||
def getTask(self):
|
||||
return self._task
|
||||
@@ -90,312 +62,333 @@ class TaskSucceeded(TaskBase):
|
||||
class TaskFailed(TaskBase):
|
||||
"""Task execution failed"""
|
||||
|
||||
def __init__(self, task, logfile, metadata):
|
||||
self.logfile = logfile
|
||||
super(TaskFailed, self).__init__(task, metadata)
|
||||
|
||||
class TaskInvalid(TaskBase):
|
||||
|
||||
def __init__(self, task, metadata):
|
||||
super(TaskInvalid, self).__init__(task, metadata)
|
||||
self._message = "No such task '%s'" % task
|
||||
|
||||
|
||||
class LogTee(object):
|
||||
def __init__(self, logger, outfile):
|
||||
self.outfile = outfile
|
||||
self.logger = logger
|
||||
self.name = self.outfile.name
|
||||
|
||||
def write(self, string):
|
||||
self.logger.plain(string)
|
||||
self.outfile.write(string)
|
||||
|
||||
def __enter__(self):
|
||||
self.outfile.__enter__()
|
||||
return self
|
||||
|
||||
def __exit__(self, *excinfo):
|
||||
self.outfile.__exit__(*excinfo)
|
||||
|
||||
def __repr__(self):
|
||||
return '<LogTee {0}>'.format(self.name)
|
||||
class InvalidTask(TaskBase):
|
||||
"""Invalid Task"""
|
||||
|
||||
# functions
|
||||
|
||||
def exec_func(func, d, dirs = None):
|
||||
"""Execute an BB 'function'"""
|
||||
|
||||
body = data.getVar(func, d)
|
||||
if not body:
|
||||
if body is None:
|
||||
logger.warn("Function %s doesn't exist", func)
|
||||
return
|
||||
|
||||
flags = data.getVarFlags(func, d)
|
||||
cleandirs = flags.get('cleandirs')
|
||||
if cleandirs:
|
||||
for cdir in data.expand(cleandirs, d).split():
|
||||
bb.utils.remove(cdir, True)
|
||||
cleandirs = (data.expand(data.getVarFlag(func, 'cleandirs', d), d) or "").split()
|
||||
for cdir in cleandirs:
|
||||
os.system("rm -rf %s" % cdir)
|
||||
|
||||
if dirs is None:
|
||||
dirs = flags.get('dirs')
|
||||
if dirs:
|
||||
dirs = data.expand(dirs, d).split()
|
||||
if not dirs:
|
||||
dirs = (data.expand(data.getVarFlag(func, 'dirs', d), d) or "").split()
|
||||
for adir in dirs:
|
||||
mkdirhier(adir)
|
||||
|
||||
if dirs:
|
||||
for adir in dirs:
|
||||
bb.utils.mkdirhier(adir)
|
||||
if len(dirs) > 0:
|
||||
adir = dirs[-1]
|
||||
else:
|
||||
adir = data.getVar('B', d, 1)
|
||||
if not os.path.exists(adir):
|
||||
adir = None
|
||||
|
||||
ispython = flags.get('python')
|
||||
if flags.get('fakeroot') and not flags.get('task'):
|
||||
bb.fatal("Function %s specifies fakeroot but isn't a task?!" % func)
|
||||
adir = data.expand(adir, d)
|
||||
|
||||
lockflag = flags.get('lockfiles')
|
||||
if lockflag:
|
||||
lockfiles = [data.expand(f, d) for f in lockflag.split()]
|
||||
else:
|
||||
lockfiles = None
|
||||
|
||||
tempdir = data.getVar('T', d, 1)
|
||||
runfile = os.path.join(tempdir, 'run.{0}.{1}'.format(func, os.getpid()))
|
||||
|
||||
with bb.utils.fileslocked(lockfiles):
|
||||
if ispython:
|
||||
exec_func_python(func, d, runfile, cwd=adir)
|
||||
else:
|
||||
exec_func_shell(func, d, runfile, cwd=adir)
|
||||
|
||||
_functionfmt = """
|
||||
def {function}(d):
|
||||
{body}
|
||||
|
||||
{function}(d)
|
||||
"""
|
||||
logformatter = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
def exec_func_python(func, d, runfile, cwd=None):
|
||||
"""Execute a python BB 'function'"""
|
||||
|
||||
bbfile = d.getVar('FILE', True)
|
||||
try:
|
||||
olddir = os.getcwd()
|
||||
prevdir = os.getcwd()
|
||||
except OSError:
|
||||
olddir = None
|
||||
code = _functionfmt.format(function=func, body=d.getVar(func, True))
|
||||
bb.utils.mkdirhier(os.path.dirname(runfile))
|
||||
with open(runfile, 'w') as script:
|
||||
script.write(code)
|
||||
prevdir = data.expand('${TOPDIR}', d)
|
||||
if adir and os.access(adir, os.F_OK):
|
||||
os.chdir(adir)
|
||||
|
||||
if cwd:
|
||||
os.chdir(cwd)
|
||||
locks = []
|
||||
lockfiles = (data.expand(data.getVarFlag(func, 'lockfiles', d), d) or "").split()
|
||||
for lock in lockfiles:
|
||||
locks.append(bb.utils.lockfile(lock))
|
||||
|
||||
try:
|
||||
comp = utils.better_compile(code, func, bbfile)
|
||||
utils.better_exec(comp, {"d": d}, code, bbfile)
|
||||
except:
|
||||
if sys.exc_info()[0] in (bb.parse.SkipPackage, bb.build.FuncFailed):
|
||||
raise
|
||||
if data.getVarFlag(func, "python", d):
|
||||
exec_func_python(func, d)
|
||||
else:
|
||||
exec_func_shell(func, d)
|
||||
|
||||
raise FuncFailed(func, None)
|
||||
finally:
|
||||
if olddir:
|
||||
os.chdir(olddir)
|
||||
for lock in locks:
|
||||
bb.utils.unlockfile(lock)
|
||||
|
||||
def exec_func_shell(function, d, runfile, cwd=None):
|
||||
"""Execute a shell function from the metadata
|
||||
if os.path.exists(prevdir):
|
||||
os.chdir(prevdir)
|
||||
|
||||
def exec_func_python(func, d):
|
||||
"""Execute a python BB 'function'"""
|
||||
import re, os
|
||||
|
||||
tmp = "def " + func + "():\n%s" % data.getVar(func, d)
|
||||
tmp += '\n' + func + '()'
|
||||
comp = utils.better_compile(tmp, func, bb.data.getVar('FILE', d, 1) )
|
||||
prevdir = os.getcwd()
|
||||
g = {} # globals
|
||||
g['bb'] = bb
|
||||
g['os'] = os
|
||||
g['d'] = d
|
||||
utils.better_exec(comp,g,tmp, bb.data.getVar('FILE',d,1))
|
||||
if os.path.exists(prevdir):
|
||||
os.chdir(prevdir)
|
||||
|
||||
def exec_func_shell(func, d):
|
||||
"""Execute a shell BB 'function' Returns true if execution was successful.
|
||||
|
||||
For this, it creates a bash shell script in the tmp dectory, writes the local
|
||||
data into it and finally executes. The output of the shell will end in a log file and stdout.
|
||||
|
||||
Note on directory behavior. The 'dirs' varflag should contain a list
|
||||
of the directories you need created prior to execution. The last
|
||||
item in the list is where we will chdir/cd to.
|
||||
"""
|
||||
import sys
|
||||
|
||||
# Don't let the emitted shell script override PWD
|
||||
d.delVarFlag('PWD', 'export')
|
||||
deps = data.getVarFlag(func, 'deps', d)
|
||||
check = data.getVarFlag(func, 'check', d)
|
||||
interact = data.getVarFlag(func, 'interactive', d)
|
||||
if check in globals():
|
||||
if globals()[check](func, deps):
|
||||
return
|
||||
|
||||
with open(runfile, 'w') as script:
|
||||
script.write('#!/bin/sh -e\n')
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
script.write("set -x\n")
|
||||
data.emit_func(function, script, d)
|
||||
if cwd:
|
||||
script.write("cd %s\n" % cwd)
|
||||
script.write("%s\n" % function)
|
||||
os.fchmod(script.fileno(), 0775)
|
||||
global logfile
|
||||
t = data.getVar('T', d, 1)
|
||||
if not t:
|
||||
return 0
|
||||
mkdirhier(t)
|
||||
logfile = "%s/log.%s.%s" % (t, func, str(os.getpid()))
|
||||
runfile = "%s/run.%s.%s" % (t, func, str(os.getpid()))
|
||||
|
||||
env = {
|
||||
'PATH': d.getVar('PATH', True),
|
||||
'LC_ALL': 'C',
|
||||
}
|
||||
f = open(runfile, "w")
|
||||
f.write("#!/bin/sh -e\n")
|
||||
if bb.msg.debug_level['default'] > 0: f.write("set -x\n")
|
||||
data.emit_env(f, d)
|
||||
|
||||
cmd = runfile
|
||||
f.write("cd %s\n" % os.getcwd())
|
||||
if func: f.write("%s\n" % func)
|
||||
f.close()
|
||||
os.chmod(runfile, 0775)
|
||||
if not func:
|
||||
bb.msg.error(bb.msg.domain.Build, "Function not specified")
|
||||
raise FuncFailed()
|
||||
|
||||
if logger.isEnabledFor(logging.DEBUG):
|
||||
logfile = LogTee(logger, sys.stdout)
|
||||
else:
|
||||
logfile = sys.stdout
|
||||
|
||||
try:
|
||||
bb.process.run(cmd, env=env, shell=False, stdin=NULL, log=logfile)
|
||||
except bb.process.CmdError:
|
||||
logfn = d.getVar('BB_LOGFILE', True)
|
||||
raise FuncFailed(function, logfn)
|
||||
|
||||
def _task_data(fn, task, d):
|
||||
localdata = data.createCopy(d)
|
||||
localdata.setVar('BB_FILENAME', fn)
|
||||
localdata.setVar('BB_CURRENTTASK', task[3:])
|
||||
localdata.setVar('OVERRIDES', 'task-%s:%s' %
|
||||
(task[3:], d.getVar('OVERRIDES', False)))
|
||||
localdata.finalize()
|
||||
data.expandKeys(localdata)
|
||||
return localdata
|
||||
|
||||
def _exec_task(fn, task, d, quieterr):
|
||||
"""Execute a BB 'task'
|
||||
|
||||
Execution of a task involves a bit more setup than executing a function,
|
||||
running it with its own local metadata, and with some useful variables set.
|
||||
"""
|
||||
if not data.getVarFlag(task, 'task', d):
|
||||
event.fire(TaskInvalid(task, d), d)
|
||||
logger.error("No such task: %s" % task)
|
||||
return 1
|
||||
|
||||
logger.debug(1, "Executing task %s", task)
|
||||
|
||||
localdata = _task_data(fn, task, d)
|
||||
tempdir = localdata.getVar('T', True)
|
||||
if not tempdir:
|
||||
bb.fatal("T variable not set, unable to build")
|
||||
|
||||
bb.utils.mkdirhier(tempdir)
|
||||
loglink = os.path.join(tempdir, 'log.{0}'.format(task))
|
||||
logfn = os.path.join(tempdir, 'log.{0}.{1}'.format(task, os.getpid()))
|
||||
if loglink:
|
||||
bb.utils.remove(loglink)
|
||||
|
||||
try:
|
||||
os.symlink(logfn, loglink)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
prefuncs = localdata.getVarFlag(task, 'prefuncs', expand=True)
|
||||
postfuncs = localdata.getVarFlag(task, 'postfuncs', expand=True)
|
||||
|
||||
# Handle logfiles
|
||||
# open logs
|
||||
si = file('/dev/null', 'r')
|
||||
try:
|
||||
logfile = file(logfn, 'w')
|
||||
except OSError:
|
||||
logger.exception("Opening log file '%s'", logfn)
|
||||
if bb.msg.debug_level['default'] > 0:
|
||||
so = os.popen("tee \"%s\"" % logfile, "w")
|
||||
else:
|
||||
so = file(logfile, 'w')
|
||||
except OSError, e:
|
||||
bb.msg.error(bb.msg.domain.Build, "opening log file: %s" % e)
|
||||
pass
|
||||
|
||||
# Dup the existing fds so we dont lose them
|
||||
osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()]
|
||||
oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()]
|
||||
ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()]
|
||||
se = so
|
||||
|
||||
# Replace those fds with our own
|
||||
os.dup2(si.fileno(), osi[1])
|
||||
os.dup2(logfile.fileno(), oso[1])
|
||||
os.dup2(logfile.fileno(), ose[1])
|
||||
if not interact:
|
||||
# dup the existing fds so we dont lose them
|
||||
osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()]
|
||||
oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()]
|
||||
ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()]
|
||||
|
||||
# Ensure python logging goes to the logfile
|
||||
handler = logging.StreamHandler(logfile)
|
||||
handler.setFormatter(logformatter)
|
||||
bblogger.addHandler(handler)
|
||||
# replace those fds with our own
|
||||
os.dup2(si.fileno(), osi[1])
|
||||
os.dup2(so.fileno(), oso[1])
|
||||
os.dup2(se.fileno(), ose[1])
|
||||
|
||||
localdata.setVar('BB_LOGFILE', logfn)
|
||||
|
||||
event.fire(TaskStarted(task, localdata), localdata)
|
||||
# execute function
|
||||
prevdir = os.getcwd()
|
||||
if data.getVarFlag(func, "fakeroot", d):
|
||||
maybe_fakeroot = "PATH=\"%s\" fakeroot " % bb.data.getVar("PATH", d, 1)
|
||||
else:
|
||||
maybe_fakeroot = ''
|
||||
lang_environment = "LC_ALL=C "
|
||||
ret = os.system('%s%ssh -e %s' % (lang_environment, maybe_fakeroot, runfile))
|
||||
try:
|
||||
for func in (prefuncs or '').split():
|
||||
exec_func(func, localdata)
|
||||
exec_func(task, localdata)
|
||||
for func in (postfuncs or '').split():
|
||||
exec_func(func, localdata)
|
||||
except FuncFailed as exc:
|
||||
if not quieterr:
|
||||
logger.error(str(exc))
|
||||
event.fire(TaskFailed(task, logfn, localdata), localdata)
|
||||
return 1
|
||||
finally:
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
os.chdir(prevdir)
|
||||
except:
|
||||
pass
|
||||
|
||||
bblogger.removeHandler(handler)
|
||||
|
||||
# Restore the backup fds
|
||||
if not interact:
|
||||
# restore the backups
|
||||
os.dup2(osi[0], osi[1])
|
||||
os.dup2(oso[0], oso[1])
|
||||
os.dup2(ose[0], ose[1])
|
||||
|
||||
# Close the backup fds
|
||||
# close our logs
|
||||
si.close()
|
||||
so.close()
|
||||
se.close()
|
||||
|
||||
# close the backup fds
|
||||
os.close(osi[0])
|
||||
os.close(oso[0])
|
||||
os.close(ose[0])
|
||||
si.close()
|
||||
|
||||
logfile.close()
|
||||
if os.path.exists(logfn) and os.path.getsize(logfn) == 0:
|
||||
logger.debug(2, "Zero size logfn %s, removing", logfn)
|
||||
bb.utils.remove(logfn)
|
||||
bb.utils.remove(loglink)
|
||||
event.fire(TaskSucceeded(task, localdata), localdata)
|
||||
if ret==0:
|
||||
if bb.msg.debug_level['default'] > 0:
|
||||
os.remove(runfile)
|
||||
# os.remove(logfile)
|
||||
return
|
||||
else:
|
||||
bb.msg.error(bb.msg.domain.Build, "function %s failed" % func)
|
||||
if data.getVar("BBINCLUDELOGS", d):
|
||||
bb.msg.error(bb.msg.domain.Build, "log data follows (%s)" % logfile)
|
||||
number_of_lines = data.getVar("BBINCLUDELOGS_LINES", d)
|
||||
if number_of_lines:
|
||||
os.system('tail -n%s %s' % (number_of_lines, logfile))
|
||||
else:
|
||||
f = open(logfile, "r")
|
||||
while True:
|
||||
l = f.readline()
|
||||
if l == '':
|
||||
break
|
||||
l = l.rstrip()
|
||||
print '| %s' % l
|
||||
f.close()
|
||||
else:
|
||||
bb.msg.error(bb.msg.domain.Build, "see log in %s" % logfile)
|
||||
raise FuncFailed( logfile )
|
||||
|
||||
if not localdata.getVarFlag(task, 'nostamp') and not localdata.getVarFlag(task, 'selfstamp'):
|
||||
make_stamp(task, localdata)
|
||||
|
||||
return 0
|
||||
def exec_task(task, d):
|
||||
"""Execute an BB 'task'
|
||||
|
||||
def exec_task(fn, task, d):
|
||||
try:
|
||||
quieterr = False
|
||||
if d.getVarFlag(task, "quieterrors") is not None:
|
||||
quieterr = True
|
||||
The primary difference between executing a task versus executing
|
||||
a function is that a task exists in the task digraph, and therefore
|
||||
has dependencies amongst other tasks."""
|
||||
|
||||
return _exec_task(fn, task, d, quieterr)
|
||||
except Exception:
|
||||
from traceback import format_exc
|
||||
if not quieterr:
|
||||
logger.error("Build of %s failed" % (task))
|
||||
logger.error(format_exc())
|
||||
failedevent = TaskFailed(task, None, d)
|
||||
event.fire(failedevent, d)
|
||||
# check if the task is in the graph..
|
||||
task_graph = data.getVar('_task_graph', d)
|
||||
if not task_graph:
|
||||
task_graph = bb.digraph()
|
||||
data.setVar('_task_graph', task_graph, d)
|
||||
task_cache = data.getVar('_task_cache', d)
|
||||
if not task_cache:
|
||||
task_cache = []
|
||||
data.setVar('_task_cache', task_cache, d)
|
||||
if not task_graph.hasnode(task):
|
||||
raise EventException("Missing node in task graph", InvalidTask(task, d))
|
||||
|
||||
# check whether this task needs executing..
|
||||
if stamp_is_current(task, d):
|
||||
return 1
|
||||
|
||||
def stamp_internal(taskname, d, file_name):
|
||||
# follow digraph path up, then execute our way back down
|
||||
def execute(graph, item):
|
||||
if data.getVarFlag(item, 'task', d):
|
||||
if item in task_cache:
|
||||
return 1
|
||||
|
||||
if task != item:
|
||||
# deeper than toplevel, exec w/ deps
|
||||
exec_task(item, d)
|
||||
return 1
|
||||
|
||||
try:
|
||||
bb.msg.debug(1, bb.msg.domain.Build, "Executing task %s" % item)
|
||||
old_overrides = data.getVar('OVERRIDES', d, 0)
|
||||
localdata = data.createCopy(d)
|
||||
data.setVar('OVERRIDES', 'task_%s:%s' % (item, old_overrides), localdata)
|
||||
data.update_data(localdata)
|
||||
event.fire(TaskStarted(item, localdata))
|
||||
exec_func(item, localdata)
|
||||
event.fire(TaskSucceeded(item, localdata))
|
||||
task_cache.append(item)
|
||||
data.setVar('_task_cache', task_cache, d)
|
||||
except FuncFailed, reason:
|
||||
bb.msg.note(1, bb.msg.domain.Build, "Task failed: %s" % reason )
|
||||
failedevent = TaskFailed(item, d)
|
||||
event.fire(failedevent)
|
||||
raise EventException("Function failed in task: %s" % reason, failedevent)
|
||||
|
||||
if data.getVarFlag(task, 'dontrundeps', d):
|
||||
execute(None, task)
|
||||
else:
|
||||
task_graph.walkdown(task, execute)
|
||||
|
||||
# make stamp, or cause event and raise exception
|
||||
if not data.getVarFlag(task, 'nostamp', d) and not data.getVarFlag(task, 'selfstamp', d):
|
||||
make_stamp(task, d)
|
||||
|
||||
def extract_stamp_data(d, fn):
|
||||
"""
|
||||
Extracts stamp data from d which is either a data dictonary (fn unset)
|
||||
or a dataCache entry (fn set).
|
||||
"""
|
||||
if fn:
|
||||
return (d.task_queues[fn], d.stamp[fn], d.task_deps[fn])
|
||||
task_graph = data.getVar('_task_graph', d)
|
||||
if not task_graph:
|
||||
task_graph = bb.digraph()
|
||||
data.setVar('_task_graph', task_graph, d)
|
||||
return (task_graph, data.getVar('STAMP', d, 1), None)
|
||||
|
||||
def extract_stamp(d, fn):
|
||||
"""
|
||||
Extracts stamp format which is either a data dictonary (fn unset)
|
||||
or a dataCache entry (fn set).
|
||||
"""
|
||||
if fn:
|
||||
return d.stamp[fn]
|
||||
return data.getVar('STAMP', d, 1)
|
||||
|
||||
def stamp_is_current(task, d, file_name = None, checkdeps = 1):
|
||||
"""
|
||||
Check status of a given task's stamp.
|
||||
Returns 0 if it is not current and needs updating.
|
||||
(d can be a data dict or dataCache)
|
||||
"""
|
||||
|
||||
(task_graph, stampfn, taskdep) = extract_stamp_data(d, file_name)
|
||||
|
||||
if not stampfn:
|
||||
return 0
|
||||
|
||||
stampfile = "%s.%s" % (stampfn, task)
|
||||
if not os.access(stampfile, os.F_OK):
|
||||
return 0
|
||||
|
||||
if checkdeps == 0:
|
||||
return 1
|
||||
|
||||
import stat
|
||||
tasktime = os.stat(stampfile)[stat.ST_MTIME]
|
||||
|
||||
_deps = []
|
||||
def checkStamp(graph, task):
|
||||
# check for existance
|
||||
if file_name:
|
||||
if 'nostamp' in taskdep and task in taskdep['nostamp']:
|
||||
return 1
|
||||
else:
|
||||
if data.getVarFlag(task, 'nostamp', d):
|
||||
return 1
|
||||
|
||||
if not stamp_is_current(task, d, file_name, 0 ):
|
||||
return 0
|
||||
|
||||
depfile = "%s.%s" % (stampfn, task)
|
||||
deptime = os.stat(depfile)[stat.ST_MTIME]
|
||||
if deptime > tasktime:
|
||||
return 0
|
||||
return 1
|
||||
|
||||
return task_graph.walkdown(task, checkStamp)
|
||||
|
||||
def stamp_internal(task, d, file_name):
|
||||
"""
|
||||
Internal stamp helper function
|
||||
Removes any stamp for the given task
|
||||
Makes sure the stamp directory exists
|
||||
Returns the stamp path+filename
|
||||
|
||||
In the bitbake core, d can be a CacheData and file_name will be set.
|
||||
When called in task context, d will be a data store, file_name will not be set
|
||||
"""
|
||||
taskflagname = taskname
|
||||
if taskname.endswith("_setscene") and taskname != "do_setscene":
|
||||
taskflagname = taskname.replace("_setscene", "")
|
||||
|
||||
if file_name:
|
||||
stamp = d.stamp[file_name]
|
||||
extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or ""
|
||||
else:
|
||||
stamp = d.getVar('STAMP', True)
|
||||
file_name = d.getVar('BB_FILENAME', True)
|
||||
extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info', True) or ""
|
||||
|
||||
stamp = extract_stamp(d, file_name)
|
||||
if not stamp:
|
||||
return
|
||||
|
||||
stamp = bb.parse.siggen.stampfile(stamp, file_name, taskname, extrainfo)
|
||||
|
||||
bb.utils.mkdirhier(os.path.dirname(stamp))
|
||||
|
||||
stamp = "%s.%s" % (stamp, task)
|
||||
mkdirhier(os.path.dirname(stamp))
|
||||
# Remove the file and recreate to force timestamp
|
||||
# change on broken NFS filesystems
|
||||
if os.access(stamp, os.F_OK):
|
||||
os.remove(stamp)
|
||||
return stamp
|
||||
|
||||
def make_stamp(task, d, file_name = None):
|
||||
@@ -404,10 +397,7 @@ def make_stamp(task, d, file_name = None):
|
||||
(d can be a data dict or dataCache)
|
||||
"""
|
||||
stamp = stamp_internal(task, d, file_name)
|
||||
# Remove the file and recreate to force timestamp
|
||||
# change on broken NFS filesystems
|
||||
if stamp:
|
||||
bb.utils.remove(stamp)
|
||||
f = open(stamp, "w")
|
||||
f.close()
|
||||
|
||||
@@ -416,52 +406,43 @@ def del_stamp(task, d, file_name = None):
|
||||
Removes a stamp for a given task
|
||||
(d can be a data dict or dataCache)
|
||||
"""
|
||||
stamp = stamp_internal(task, d, file_name)
|
||||
bb.utils.remove(stamp)
|
||||
|
||||
def stampfile(taskname, d, file_name = None):
|
||||
"""
|
||||
Return the stamp for a given task
|
||||
(d can be a data dict or dataCache)
|
||||
"""
|
||||
return stamp_internal(taskname, d, file_name)
|
||||
stamp_internal(task, d, file_name)
|
||||
|
||||
def add_tasks(tasklist, d):
|
||||
task_graph = data.getVar('_task_graph', d)
|
||||
task_deps = data.getVar('_task_deps', d)
|
||||
if not task_graph:
|
||||
task_graph = bb.digraph()
|
||||
if not task_deps:
|
||||
task_deps = {}
|
||||
if not 'tasks' in task_deps:
|
||||
task_deps['tasks'] = []
|
||||
if not 'parents' in task_deps:
|
||||
task_deps['parents'] = {}
|
||||
|
||||
for task in tasklist:
|
||||
deps = tasklist[task]
|
||||
task = data.expand(task, d)
|
||||
|
||||
data.setVarFlag(task, 'task', 1, d)
|
||||
task_graph.addnode(task, None)
|
||||
for dep in deps:
|
||||
dep = data.expand(dep, d)
|
||||
if not task_graph.hasnode(dep):
|
||||
task_graph.addnode(dep, None)
|
||||
task_graph.addnode(task, dep)
|
||||
|
||||
if not task in task_deps['tasks']:
|
||||
task_deps['tasks'].append(task)
|
||||
|
||||
flags = data.getVarFlags(task, d)
|
||||
flags = data.getVarFlags(task, d)
|
||||
def getTask(name):
|
||||
if not name in task_deps:
|
||||
task_deps[name] = {}
|
||||
if name in flags:
|
||||
deptask = data.expand(flags[name], d)
|
||||
if not name in task_deps:
|
||||
task_deps[name] = {}
|
||||
task_deps[name][task] = deptask
|
||||
getTask('depends')
|
||||
getTask('deptask')
|
||||
getTask('rdeptask')
|
||||
getTask('recrdeptask')
|
||||
getTask('nostamp')
|
||||
getTask('fakeroot')
|
||||
getTask('noexec')
|
||||
task_deps['parents'][task] = []
|
||||
for dep in flags['deps']:
|
||||
dep = data.expand(dep, d)
|
||||
task_deps['parents'][task].append(dep)
|
||||
|
||||
# don't assume holding a reference
|
||||
data.setVar('_task_graph', task_graph, d)
|
||||
data.setVar('_task_deps', task_deps, d)
|
||||
|
||||
def remove_task(task, kill, d):
|
||||
@@ -469,4 +450,22 @@ def remove_task(task, kill, d):
|
||||
|
||||
If kill is 1, also remove tasks that depend on this task."""
|
||||
|
||||
task_graph = data.getVar('_task_graph', d)
|
||||
if not task_graph:
|
||||
task_graph = bb.digraph()
|
||||
if not task_graph.hasnode(task):
|
||||
return
|
||||
|
||||
data.delVarFlag(task, 'task', d)
|
||||
ref = 1
|
||||
if kill == 1:
|
||||
ref = 2
|
||||
task_graph.delnode(task, ref)
|
||||
data.setVar('_task_graph', task_graph, d)
|
||||
|
||||
def task_exists(task, d):
|
||||
task_graph = data.getVar('_task_graph', d)
|
||||
if not task_graph:
|
||||
task_graph = bb.digraph()
|
||||
data.setVar('_task_graph', task_graph, d)
|
||||
return task_graph.hasnode(task)
|
||||
|
||||
@@ -28,338 +28,134 @@
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
|
||||
import os
|
||||
import logging
|
||||
from collections import defaultdict, namedtuple
|
||||
import os, re
|
||||
import bb.data
|
||||
import bb.utils
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
from sets import Set
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
logger.info("Importing cPickle failed. "
|
||||
"Falling back to a very slow implementation.")
|
||||
bb.msg.note(1, bb.msg.domain.Cache, "Importing cPickle failed. Falling back to a very slow implementation.")
|
||||
|
||||
__cache_version__ = "138"
|
||||
__cache_version__ = "127"
|
||||
|
||||
recipe_fields = (
|
||||
'pn',
|
||||
'pv',
|
||||
'pr',
|
||||
'pe',
|
||||
'defaultpref',
|
||||
'depends',
|
||||
'provides',
|
||||
'task_deps',
|
||||
'stamp',
|
||||
'stamp_extrainfo',
|
||||
'broken',
|
||||
'not_world',
|
||||
'skipped',
|
||||
'timestamp',
|
||||
'packages',
|
||||
'packages_dynamic',
|
||||
'rdepends',
|
||||
'rdepends_pkg',
|
||||
'rprovides',
|
||||
'rprovides_pkg',
|
||||
'rrecommends',
|
||||
'rrecommends_pkg',
|
||||
'nocache',
|
||||
'variants',
|
||||
'file_depends',
|
||||
'tasks',
|
||||
'basetaskhashes',
|
||||
'hashfilename',
|
||||
'inherits',
|
||||
'summary',
|
||||
'license',
|
||||
'section',
|
||||
'fakerootenv',
|
||||
'fakerootdirs'
|
||||
)
|
||||
|
||||
|
||||
class RecipeInfo(namedtuple('RecipeInfo', recipe_fields)):
|
||||
__slots__ = ()
|
||||
|
||||
@classmethod
|
||||
def listvar(cls, var, metadata):
|
||||
return cls.getvar(var, metadata).split()
|
||||
|
||||
@classmethod
|
||||
def intvar(cls, var, metadata):
|
||||
return int(cls.getvar(var, metadata) or 0)
|
||||
|
||||
@classmethod
|
||||
def depvar(cls, var, metadata):
|
||||
return bb.utils.explode_deps(cls.getvar(var, metadata))
|
||||
|
||||
@classmethod
|
||||
def pkgvar(cls, var, packages, metadata):
|
||||
return dict((pkg, cls.depvar("%s_%s" % (var, pkg), metadata))
|
||||
for pkg in packages)
|
||||
|
||||
@classmethod
|
||||
def taskvar(cls, var, tasks, metadata):
|
||||
return dict((task, cls.getvar("%s_task-%s" % (var, task), metadata))
|
||||
for task in tasks)
|
||||
|
||||
@classmethod
|
||||
def flaglist(cls, flag, varlist, metadata):
|
||||
return dict((var, metadata.getVarFlag(var, flag, True))
|
||||
for var in varlist)
|
||||
|
||||
@classmethod
|
||||
def getvar(cls, var, metadata):
|
||||
return metadata.getVar(var, True) or ''
|
||||
|
||||
@classmethod
|
||||
def make_optional(cls, default=None, **kwargs):
|
||||
"""Construct the namedtuple from the specified keyword arguments,
|
||||
with every value considered optional, using the default value if
|
||||
it was not specified."""
|
||||
for field in cls._fields:
|
||||
kwargs[field] = kwargs.get(field, default)
|
||||
return cls(**kwargs)
|
||||
|
||||
@classmethod
|
||||
def from_metadata(cls, filename, metadata):
|
||||
if cls.getvar('__SKIPPED', metadata):
|
||||
return cls.make_optional(skipped=True)
|
||||
|
||||
tasks = metadata.getVar('__BBTASKS', False)
|
||||
|
||||
pn = cls.getvar('PN', metadata)
|
||||
packages = cls.listvar('PACKAGES', metadata)
|
||||
if not pn in packages:
|
||||
packages.append(pn)
|
||||
|
||||
return RecipeInfo(
|
||||
tasks = tasks,
|
||||
basetaskhashes = cls.taskvar('BB_BASEHASH', tasks, metadata),
|
||||
hashfilename = cls.getvar('BB_HASHFILENAME', metadata),
|
||||
|
||||
file_depends = metadata.getVar('__depends', False),
|
||||
task_deps = metadata.getVar('_task_deps', False) or
|
||||
{'tasks': [], 'parents': {}},
|
||||
variants = cls.listvar('__VARIANTS', metadata) + [''],
|
||||
|
||||
skipped = False,
|
||||
timestamp = bb.parse.cached_mtime(filename),
|
||||
packages = cls.listvar('PACKAGES', metadata),
|
||||
pn = pn,
|
||||
pe = cls.getvar('PE', metadata),
|
||||
pv = cls.getvar('PV', metadata),
|
||||
pr = cls.getvar('PR', metadata),
|
||||
nocache = cls.getvar('__BB_DONT_CACHE', metadata),
|
||||
defaultpref = cls.intvar('DEFAULT_PREFERENCE', metadata),
|
||||
broken = cls.getvar('BROKEN', metadata),
|
||||
not_world = cls.getvar('EXCLUDE_FROM_WORLD', metadata),
|
||||
stamp = cls.getvar('STAMP', metadata),
|
||||
stamp_extrainfo = cls.flaglist('stamp-extra-info', tasks, metadata),
|
||||
packages_dynamic = cls.listvar('PACKAGES_DYNAMIC', metadata),
|
||||
depends = cls.depvar('DEPENDS', metadata),
|
||||
provides = cls.depvar('PROVIDES', metadata),
|
||||
rdepends = cls.depvar('RDEPENDS', metadata),
|
||||
rprovides = cls.depvar('RPROVIDES', metadata),
|
||||
rrecommends = cls.depvar('RRECOMMENDS', metadata),
|
||||
rprovides_pkg = cls.pkgvar('RPROVIDES', packages, metadata),
|
||||
rdepends_pkg = cls.pkgvar('RDEPENDS', packages, metadata),
|
||||
rrecommends_pkg = cls.pkgvar('RRECOMMENDS', packages, metadata),
|
||||
inherits = cls.getvar('__inherit_cache', metadata),
|
||||
summary = cls.getvar('SUMMARY', metadata),
|
||||
license = cls.getvar('LICENSE', metadata),
|
||||
section = cls.getvar('SECTION', metadata),
|
||||
fakerootenv = cls.getvar('FAKEROOTENV', metadata),
|
||||
fakerootdirs = cls.getvar('FAKEROOTDIRS', metadata),
|
||||
)
|
||||
|
||||
|
||||
class Cache(object):
|
||||
class Cache:
|
||||
"""
|
||||
BitBake Cache implementation
|
||||
"""
|
||||
def __init__(self, cooker):
|
||||
|
||||
def __init__(self, data):
|
||||
self.cachedir = bb.data.getVar("CACHE", data, True)
|
||||
self.clean = set()
|
||||
self.checked = set()
|
||||
|
||||
self.cachedir = bb.data.getVar("CACHE", cooker.configuration.data, True)
|
||||
self.clean = {}
|
||||
self.depends_cache = {}
|
||||
self.data = None
|
||||
self.data_fn = None
|
||||
self.cacheclean = True
|
||||
|
||||
if self.cachedir in [None, '']:
|
||||
self.has_cache = False
|
||||
logger.info("Not using a cache. "
|
||||
"Set CACHE = <directory> to enable.")
|
||||
return
|
||||
|
||||
self.has_cache = True
|
||||
self.cachefile = os.path.join(self.cachedir, "bb_cache.dat")
|
||||
|
||||
logger.debug(1, "Using cache in '%s'", self.cachedir)
|
||||
bb.utils.mkdirhier(self.cachedir)
|
||||
|
||||
# If any of configuration.data's dependencies are newer than the
|
||||
# cache there isn't even any point in loading it...
|
||||
newest_mtime = 0
|
||||
deps = bb.data.getVar("__base_depends", data)
|
||||
|
||||
old_mtimes = [old_mtime for _, old_mtime in deps]
|
||||
old_mtimes.append(newest_mtime)
|
||||
newest_mtime = max(old_mtimes)
|
||||
|
||||
if bb.parse.cached_mtime_noerror(self.cachefile) >= newest_mtime:
|
||||
self.load_cachefile()
|
||||
elif os.path.isfile(self.cachefile):
|
||||
logger.info("Out of date cache found, rebuilding...")
|
||||
|
||||
def load_cachefile(self):
|
||||
with open(self.cachefile, "rb") as cachefile:
|
||||
pickled = pickle.Unpickler(cachefile)
|
||||
bb.msg.note(1, bb.msg.domain.Cache, "Not using a cache. Set CACHE = <directory> to enable.")
|
||||
else:
|
||||
self.has_cache = True
|
||||
self.cachefile = os.path.join(self.cachedir,"bb_cache.dat")
|
||||
|
||||
bb.msg.debug(1, bb.msg.domain.Cache, "Using cache in '%s'" % self.cachedir)
|
||||
try:
|
||||
cache_ver = pickled.load()
|
||||
bitbake_ver = pickled.load()
|
||||
except Exception:
|
||||
logger.info('Invalid cache, rebuilding...')
|
||||
return
|
||||
os.stat( self.cachedir )
|
||||
except OSError:
|
||||
bb.mkdirhier( self.cachedir )
|
||||
|
||||
if cache_ver != __cache_version__:
|
||||
logger.info('Cache version mismatch, rebuilding...')
|
||||
return
|
||||
elif bitbake_ver != bb.__version__:
|
||||
logger.info('Bitbake version mismatch, rebuilding...')
|
||||
return
|
||||
if self.has_cache and (self.mtime(self.cachefile)):
|
||||
try:
|
||||
p = pickle.Unpickler( file(self.cachefile,"rb"))
|
||||
self.depends_cache, version_data = p.load()
|
||||
if version_data['CACHE_VER'] != __cache_version__:
|
||||
raise ValueError, 'Cache Version Mismatch'
|
||||
if version_data['BITBAKE_VER'] != bb.__version__:
|
||||
raise ValueError, 'Bitbake Version Mismatch'
|
||||
except EOFError:
|
||||
bb.msg.note(1, bb.msg.domain.Cache, "Truncated cache found, rebuilding...")
|
||||
self.depends_cache = {}
|
||||
except (ValueError, KeyError):
|
||||
bb.msg.note(1, bb.msg.domain.Cache, "Invalid cache found, rebuilding...")
|
||||
self.depends_cache = {}
|
||||
|
||||
cachesize = os.fstat(cachefile.fileno()).st_size
|
||||
bb.event.fire(bb.event.CacheLoadStarted(cachesize), self.data)
|
||||
if self.depends_cache:
|
||||
for fn in self.depends_cache.keys():
|
||||
self.clean[fn] = ""
|
||||
self.cacheValidUpdate(fn)
|
||||
|
||||
previous_percent = 0
|
||||
while cachefile:
|
||||
try:
|
||||
key = pickled.load()
|
||||
value = pickled.load()
|
||||
except Exception:
|
||||
break
|
||||
|
||||
self.depends_cache[key] = value
|
||||
|
||||
# only fire events on even percentage boundaries
|
||||
current_progress = cachefile.tell()
|
||||
current_percent = 100 * current_progress / cachesize
|
||||
if current_percent > previous_percent:
|
||||
previous_percent = current_percent
|
||||
bb.event.fire(bb.event.CacheLoadProgress(current_progress),
|
||||
self.data)
|
||||
|
||||
bb.event.fire(bb.event.CacheLoadCompleted(cachesize,
|
||||
len(self.depends_cache)),
|
||||
self.data)
|
||||
|
||||
@staticmethod
|
||||
def virtualfn2realfn(virtualfn):
|
||||
def getVar(self, var, fn, exp = 0):
|
||||
"""
|
||||
Convert a virtual file name to a real one + the associated subclass keyword
|
||||
Gets the value of a variable
|
||||
(similar to getVar in the data class)
|
||||
|
||||
There are two scenarios:
|
||||
1. We have cached data - serve from depends_cache[fn]
|
||||
2. We're learning what data to cache - serve from data
|
||||
backend but add a copy of the data to the cache.
|
||||
"""
|
||||
|
||||
fn = virtualfn
|
||||
cls = ""
|
||||
if virtualfn.startswith('virtual:'):
|
||||
cls = virtualfn.split(':', 2)[1]
|
||||
fn = virtualfn.replace('virtual:' + cls + ':', '')
|
||||
return (fn, cls)
|
||||
if fn in self.clean:
|
||||
return self.depends_cache[fn][var]
|
||||
|
||||
@staticmethod
|
||||
def realfn2virtual(realfn, cls):
|
||||
"""
|
||||
Convert a real filename + the associated subclass keyword to a virtual filename
|
||||
"""
|
||||
if cls == "":
|
||||
return realfn
|
||||
return "virtual:" + cls + ":" + realfn
|
||||
if not fn in self.depends_cache:
|
||||
self.depends_cache[fn] = {}
|
||||
|
||||
@classmethod
|
||||
def loadDataFull(cls, virtualfn, appends, cfgData):
|
||||
if fn != self.data_fn:
|
||||
# We're trying to access data in the cache which doesn't exist
|
||||
# yet setData hasn't been called to setup the right access. Very bad.
|
||||
bb.msg.error(bb.msg.domain.Cache, "Parsing error data_fn %s and fn %s don't match" % (self.data_fn, fn))
|
||||
|
||||
result = bb.data.getVar(var, self.data, exp)
|
||||
self.depends_cache[fn][var] = result
|
||||
return result
|
||||
|
||||
def setData(self, fn, data):
|
||||
"""
|
||||
Called to prime bb_cache ready to learn which variables to cache.
|
||||
Will be followed by calls to self.getVar which aren't cached
|
||||
but can be fulfilled from self.data.
|
||||
"""
|
||||
self.data_fn = fn
|
||||
self.data = data
|
||||
|
||||
# Make sure __depends makes the depends_cache
|
||||
self.getVar("__depends", fn, True)
|
||||
self.depends_cache[fn]["CACHETIMESTAMP"] = bb.parse.cached_mtime(fn)
|
||||
|
||||
def loadDataFull(self, fn, cfgData):
|
||||
"""
|
||||
Return a complete set of data for fn.
|
||||
To do this, we need to parse the file.
|
||||
"""
|
||||
bb_data, skipped = self.load_bbfile(fn, cfgData)
|
||||
return bb_data
|
||||
|
||||
(fn, virtual) = cls.virtualfn2realfn(virtualfn)
|
||||
def loadData(self, fn, cfgData):
|
||||
"""
|
||||
Load a subset of data for fn.
|
||||
If the cached data is valid we do nothing,
|
||||
To do this, we need to parse the file and set the system
|
||||
to record the variables accessed.
|
||||
Return the cache status and whether the file was skipped when parsed
|
||||
"""
|
||||
if self.cacheValid(fn):
|
||||
if "SKIPPED" in self.depends_cache[fn]:
|
||||
return True, True
|
||||
return True, False
|
||||
|
||||
logger.debug(1, "Parsing %s (full)", fn)
|
||||
|
||||
bb_data = cls.load_bbfile(fn, appends, cfgData)
|
||||
return bb_data[virtual]
|
||||
|
||||
@classmethod
|
||||
def parse(cls, filename, appends, configdata):
|
||||
"""Parse the specified filename, returning the recipe information"""
|
||||
infos = []
|
||||
datastores = cls.load_bbfile(filename, appends, configdata)
|
||||
depends = set()
|
||||
for variant, data in sorted(datastores.iteritems(),
|
||||
key=lambda i: i[0],
|
||||
reverse=True):
|
||||
virtualfn = cls.realfn2virtual(filename, variant)
|
||||
depends |= (data.getVar("__depends", False) or set())
|
||||
if depends and not variant:
|
||||
data.setVar("__depends", depends)
|
||||
info = RecipeInfo.from_metadata(filename, data)
|
||||
infos.append((virtualfn, info))
|
||||
return infos
|
||||
|
||||
def load(self, filename, appends, configdata):
|
||||
"""Obtain the recipe information for the specified filename,
|
||||
using cached values if available, otherwise parsing.
|
||||
|
||||
Note that if it does parse to obtain the info, it will not
|
||||
automatically add the information to the cache or to your
|
||||
CacheData. Use the add or add_info method to do so after
|
||||
running this, or use loadData instead."""
|
||||
cached = self.cacheValid(filename)
|
||||
if cached:
|
||||
infos = []
|
||||
info = self.depends_cache[filename]
|
||||
for variant in info.variants:
|
||||
virtualfn = self.realfn2virtual(filename, variant)
|
||||
infos.append((virtualfn, self.depends_cache[virtualfn]))
|
||||
else:
|
||||
logger.debug(1, "Parsing %s", filename)
|
||||
return self.parse(filename, appends, configdata)
|
||||
|
||||
return cached, infos
|
||||
|
||||
def loadData(self, fn, appends, cfgData, cacheData):
|
||||
"""Load the recipe info for the specified filename,
|
||||
parsing and adding to the cache if necessary, and adding
|
||||
the recipe information to the supplied CacheData instance."""
|
||||
skipped, virtuals = 0, 0
|
||||
|
||||
cached, infos = self.load(fn, appends, cfgData)
|
||||
for virtualfn, info in infos:
|
||||
if info.skipped:
|
||||
logger.debug(1, "Skipping %s", virtualfn)
|
||||
skipped += 1
|
||||
else:
|
||||
self.add_info(virtualfn, info, cacheData, not cached)
|
||||
virtuals += 1
|
||||
|
||||
return cached, skipped, virtuals
|
||||
bb_data, skipped = self.load_bbfile(fn, cfgData)
|
||||
self.setData(fn, bb_data)
|
||||
return False, skipped
|
||||
|
||||
def cacheValid(self, fn):
|
||||
"""
|
||||
Is the cache valid for fn?
|
||||
Fast version, no timestamps checked.
|
||||
"""
|
||||
if fn not in self.checked:
|
||||
self.cacheValidUpdate(fn)
|
||||
|
||||
# Is cache enabled?
|
||||
if not self.has_cache:
|
||||
return False
|
||||
@@ -376,80 +172,62 @@ class Cache(object):
|
||||
if not self.has_cache:
|
||||
return False
|
||||
|
||||
self.checked.add(fn)
|
||||
|
||||
# File isn't in depends_cache
|
||||
if not fn in self.depends_cache:
|
||||
logger.debug(2, "Cache: %s is not cached", fn)
|
||||
return False
|
||||
|
||||
mtime = bb.parse.cached_mtime_noerror(fn)
|
||||
|
||||
# Check file still exists
|
||||
if mtime == 0:
|
||||
logger.debug(2, "Cache: %s no longer exists", fn)
|
||||
if self.mtime(fn) == 0:
|
||||
bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s not longer exists" % fn)
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
# File isn't in depends_cache
|
||||
if not fn in self.depends_cache:
|
||||
bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s is not cached" % fn)
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
info = self.depends_cache[fn]
|
||||
# Check the file's timestamp
|
||||
if mtime != info.timestamp:
|
||||
logger.debug(2, "Cache: %s changed", fn)
|
||||
if bb.parse.cached_mtime(fn) > self.getVar("CACHETIMESTAMP", fn, True):
|
||||
bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s changed" % fn)
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
# Check dependencies are still valid
|
||||
depends = info.file_depends
|
||||
if depends:
|
||||
for f, old_mtime in depends:
|
||||
fmtime = bb.parse.cached_mtime_noerror(f)
|
||||
# Check if file still exists
|
||||
if old_mtime != 0 and fmtime == 0:
|
||||
logger.debug(2, "Cache: %s's dependency %s was removed",
|
||||
fn, f)
|
||||
self.remove(fn)
|
||||
return False
|
||||
depends = self.getVar("__depends", fn, True)
|
||||
for f,old_mtime in depends:
|
||||
# Check if file still exists
|
||||
if self.mtime(f) == 0:
|
||||
return False
|
||||
|
||||
if (fmtime != old_mtime):
|
||||
logger.debug(2, "Cache: %s's dependency %s changed",
|
||||
fn, f)
|
||||
self.remove(fn)
|
||||
return False
|
||||
new_mtime = bb.parse.cached_mtime(f)
|
||||
if (new_mtime > old_mtime):
|
||||
bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s's dependency %s changed" % (fn, f))
|
||||
self.remove(fn)
|
||||
return False
|
||||
|
||||
invalid = False
|
||||
for cls in info.variants:
|
||||
virtualfn = self.realfn2virtual(fn, cls)
|
||||
self.clean.add(virtualfn)
|
||||
if virtualfn not in self.depends_cache:
|
||||
logger.debug(2, "Cache: %s is not cached", virtualfn)
|
||||
invalid = True
|
||||
bb.msg.debug(2, bb.msg.domain.Cache, "Depends Cache: %s is clean" % fn)
|
||||
if not fn in self.clean:
|
||||
self.clean[fn] = ""
|
||||
|
||||
# If any one of the variants is not present, mark as invalid for all
|
||||
if invalid:
|
||||
for cls in info.variants:
|
||||
virtualfn = self.realfn2virtual(fn, cls)
|
||||
if virtualfn in self.clean:
|
||||
logger.debug(2, "Cache: Removing %s from cache", virtualfn)
|
||||
self.clean.remove(virtualfn)
|
||||
if fn in self.clean:
|
||||
logger.debug(2, "Cache: Marking %s as not clean", fn)
|
||||
self.clean.remove(fn)
|
||||
return False
|
||||
|
||||
self.clean.add(fn)
|
||||
return True
|
||||
|
||||
def skip(self, fn):
|
||||
"""
|
||||
Mark a fn as skipped
|
||||
Called from the parser
|
||||
"""
|
||||
if not fn in self.depends_cache:
|
||||
self.depends_cache[fn] = {}
|
||||
self.depends_cache[fn]["SKIPPED"] = "1"
|
||||
|
||||
def remove(self, fn):
|
||||
"""
|
||||
Remove a fn from the cache
|
||||
Called from the parser in error cases
|
||||
"""
|
||||
bb.msg.debug(1, bb.msg.domain.Cache, "Removing %s from cache" % fn)
|
||||
if fn in self.depends_cache:
|
||||
logger.debug(1, "Removing %s from cache", fn)
|
||||
del self.depends_cache[fn]
|
||||
if fn in self.clean:
|
||||
logger.debug(1, "Marking %s as unclean", fn)
|
||||
self.clean.remove(fn)
|
||||
del self.clean[fn]
|
||||
|
||||
def sync(self):
|
||||
"""
|
||||
@@ -460,88 +238,148 @@ class Cache(object):
|
||||
if not self.has_cache:
|
||||
return
|
||||
|
||||
if self.cacheclean:
|
||||
logger.debug(2, "Cache is clean, not saving.")
|
||||
return
|
||||
version_data = {}
|
||||
version_data['CACHE_VER'] = __cache_version__
|
||||
version_data['BITBAKE_VER'] = bb.__version__
|
||||
|
||||
with open(self.cachefile, "wb") as cachefile:
|
||||
pickler = pickle.Pickler(cachefile, pickle.HIGHEST_PROTOCOL)
|
||||
pickler.dump(__cache_version__)
|
||||
pickler.dump(bb.__version__)
|
||||
for key, value in self.depends_cache.iteritems():
|
||||
pickler.dump(key)
|
||||
pickler.dump(value)
|
||||
p = pickle.Pickler(file(self.cachefile, "wb" ), -1 )
|
||||
p.dump([self.depends_cache, version_data])
|
||||
|
||||
del self.depends_cache
|
||||
|
||||
@staticmethod
|
||||
def mtime(cachefile):
|
||||
def mtime(self, cachefile):
|
||||
return bb.parse.cached_mtime_noerror(cachefile)
|
||||
|
||||
def add_info(self, filename, info, cacheData, parsed=None):
|
||||
cacheData.add_from_recipeinfo(filename, info)
|
||||
if not self.has_cache:
|
||||
return
|
||||
|
||||
if 'SRCREVINACTION' not in info.pv and not info.nocache:
|
||||
if parsed:
|
||||
self.cacheclean = False
|
||||
self.depends_cache[filename] = info
|
||||
|
||||
def add(self, file_name, data, cacheData, parsed=None):
|
||||
def handle_data(self, file_name, cacheData):
|
||||
"""
|
||||
Save data we need into the cache
|
||||
Save data we need into the cache
|
||||
"""
|
||||
|
||||
realfn = self.virtualfn2realfn(file_name)[0]
|
||||
info = RecipeInfo.from_metadata(realfn, data)
|
||||
self.add_info(file_name, info, cacheData, parsed)
|
||||
pn = self.getVar('PN', file_name, True)
|
||||
pe = self.getVar('PE', file_name, True) or "0"
|
||||
pv = self.getVar('PV', file_name, True)
|
||||
pr = self.getVar('PR', file_name, True)
|
||||
dp = int(self.getVar('DEFAULT_PREFERENCE', file_name, True) or "0")
|
||||
provides = Set([pn] + (self.getVar("PROVIDES", file_name, True) or "").split())
|
||||
depends = bb.utils.explode_deps(self.getVar("DEPENDS", file_name, True) or "")
|
||||
packages = (self.getVar('PACKAGES', file_name, True) or "").split()
|
||||
packages_dynamic = (self.getVar('PACKAGES_DYNAMIC', file_name, True) or "").split()
|
||||
rprovides = (self.getVar("RPROVIDES", file_name, True) or "").split()
|
||||
|
||||
@staticmethod
|
||||
def load_bbfile(bbfile, appends, config):
|
||||
cacheData.task_queues[file_name] = self.getVar("_task_graph", file_name, True)
|
||||
cacheData.task_deps[file_name] = self.getVar("_task_deps", file_name, True)
|
||||
|
||||
# build PackageName to FileName lookup table
|
||||
if pn not in cacheData.pkg_pn:
|
||||
cacheData.pkg_pn[pn] = []
|
||||
cacheData.pkg_pn[pn].append(file_name)
|
||||
|
||||
cacheData.stamp[file_name] = self.getVar('STAMP', file_name, True)
|
||||
|
||||
# build FileName to PackageName lookup table
|
||||
cacheData.pkg_fn[file_name] = pn
|
||||
cacheData.pkg_pepvpr[file_name] = (pe,pv,pr)
|
||||
cacheData.pkg_dp[file_name] = dp
|
||||
|
||||
# Build forward and reverse provider hashes
|
||||
# Forward: virtual -> [filenames]
|
||||
# Reverse: PN -> [virtuals]
|
||||
if pn not in cacheData.pn_provides:
|
||||
cacheData.pn_provides[pn] = Set()
|
||||
cacheData.pn_provides[pn] |= provides
|
||||
|
||||
cacheData.fn_provides[file_name] = Set()
|
||||
for provide in provides:
|
||||
if provide not in cacheData.providers:
|
||||
cacheData.providers[provide] = []
|
||||
cacheData.providers[provide].append(file_name)
|
||||
cacheData.fn_provides[file_name].add(provide)
|
||||
|
||||
cacheData.deps[file_name] = Set()
|
||||
for dep in depends:
|
||||
cacheData.all_depends.add(dep)
|
||||
cacheData.deps[file_name].add(dep)
|
||||
|
||||
# Build reverse hash for PACKAGES, so runtime dependencies
|
||||
# can be be resolved (RDEPENDS, RRECOMMENDS etc.)
|
||||
for package in packages:
|
||||
if not package in cacheData.packages:
|
||||
cacheData.packages[package] = []
|
||||
cacheData.packages[package].append(file_name)
|
||||
rprovides += (self.getVar("RPROVIDES_%s" % package, file_name, 1) or "").split()
|
||||
|
||||
for package in packages_dynamic:
|
||||
if not package in cacheData.packages_dynamic:
|
||||
cacheData.packages_dynamic[package] = []
|
||||
cacheData.packages_dynamic[package].append(file_name)
|
||||
|
||||
for rprovide in rprovides:
|
||||
if not rprovide in cacheData.rproviders:
|
||||
cacheData.rproviders[rprovide] = []
|
||||
cacheData.rproviders[rprovide].append(file_name)
|
||||
|
||||
# Build hash of runtime depends and rececommends
|
||||
|
||||
def add_dep(deplist, deps):
|
||||
for dep in deps:
|
||||
if not dep in deplist:
|
||||
deplist[dep] = ""
|
||||
|
||||
if not file_name in cacheData.rundeps:
|
||||
cacheData.rundeps[file_name] = {}
|
||||
if not file_name in cacheData.runrecs:
|
||||
cacheData.runrecs[file_name] = {}
|
||||
|
||||
for package in packages + [pn]:
|
||||
if not package in cacheData.rundeps[file_name]:
|
||||
cacheData.rundeps[file_name][package] = {}
|
||||
if not package in cacheData.runrecs[file_name]:
|
||||
cacheData.runrecs[file_name][package] = {}
|
||||
|
||||
add_dep(cacheData.rundeps[file_name][package], bb.utils.explode_deps(self.getVar('RDEPENDS', file_name, True) or ""))
|
||||
add_dep(cacheData.runrecs[file_name][package], bb.utils.explode_deps(self.getVar('RRECOMMENDS', file_name, True) or ""))
|
||||
add_dep(cacheData.rundeps[file_name][package], bb.utils.explode_deps(self.getVar("RDEPENDS_%s" % package, file_name, True) or ""))
|
||||
add_dep(cacheData.runrecs[file_name][package], bb.utils.explode_deps(self.getVar("RRECOMMENDS_%s" % package, file_name, True) or ""))
|
||||
|
||||
# Collect files we may need for possible world-dep
|
||||
# calculations
|
||||
if not self.getVar('BROKEN', file_name, True) and not self.getVar('EXCLUDE_FROM_WORLD', file_name, True):
|
||||
cacheData.possible_world.append(file_name)
|
||||
|
||||
|
||||
def load_bbfile( self, bbfile , config):
|
||||
"""
|
||||
Load and parse one .bb build file
|
||||
Return the data and whether parsing resulted in the file being skipped
|
||||
"""
|
||||
chdir_back = False
|
||||
|
||||
from bb import data, parse
|
||||
import bb
|
||||
from bb import utils, data, parse, debug, event, fatal
|
||||
|
||||
# expand tmpdir to include this topdir
|
||||
data.setVar('TMPDIR', data.getVar('TMPDIR', config, 1) or "", config)
|
||||
bbfile_loc = os.path.abspath(os.path.dirname(bbfile))
|
||||
oldpath = os.path.abspath(os.getcwd())
|
||||
parse.cached_mtime_noerror(bbfile_loc)
|
||||
if self.mtime(bbfile_loc):
|
||||
os.chdir(bbfile_loc)
|
||||
bb_data = data.init_db(config)
|
||||
# The ConfHandler first looks if there is a TOPDIR and if not
|
||||
# then it would call getcwd().
|
||||
# Previously, we chdir()ed to bbfile_loc, called the handler
|
||||
# and finally chdir()ed back, a couple of thousand times. We now
|
||||
# just fill in TOPDIR to point to bbfile_loc if there is no TOPDIR yet.
|
||||
if not data.getVar('TOPDIR', bb_data):
|
||||
chdir_back = True
|
||||
data.setVar('TOPDIR', bbfile_loc, bb_data)
|
||||
try:
|
||||
if appends:
|
||||
data.setVar('__BBAPPEND', " ".join(appends), bb_data)
|
||||
bb_data = parse.handle(bbfile, bb_data)
|
||||
if chdir_back:
|
||||
os.chdir(oldpath)
|
||||
return bb_data
|
||||
bb_data = parse.handle(bbfile, bb_data) # read .bb data
|
||||
os.chdir(oldpath)
|
||||
return bb_data, False
|
||||
except bb.parse.SkipPackage:
|
||||
os.chdir(oldpath)
|
||||
return bb_data, True
|
||||
except:
|
||||
if chdir_back:
|
||||
os.chdir(oldpath)
|
||||
os.chdir(oldpath)
|
||||
raise
|
||||
|
||||
|
||||
def init(cooker):
|
||||
"""
|
||||
The Objective: Cache the minimum amount of data possible yet get to the
|
||||
The Objective: Cache the minimum amount of data possible yet get to the
|
||||
stage of building packages (i.e. tryBuild) without reparsing any .bb files.
|
||||
|
||||
To do this, we intercept getVar calls and only cache the variables we see
|
||||
being accessed. We rely on the cache getVar calls being made for all
|
||||
variables bitbake might need to use to reach this stage. For each cached
|
||||
To do this, we intercept getVar calls and only cache the variables we see
|
||||
being accessed. We rely on the cache getVar calls being made for all
|
||||
variables bitbake might need to use to reach this stage. For each cached
|
||||
file we need to track:
|
||||
|
||||
* Its mtime
|
||||
@@ -551,107 +389,48 @@ def init(cooker):
|
||||
Files causing parsing errors are evicted from the cache.
|
||||
|
||||
"""
|
||||
return Cache(cooker.configuration.data)
|
||||
return Cache(cooker)
|
||||
|
||||
|
||||
class CacheData(object):
|
||||
|
||||
#============================================================================#
|
||||
# CacheData
|
||||
#============================================================================#
|
||||
class CacheData:
|
||||
"""
|
||||
The data structures we compile from the cached data
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# Direct cache variables
|
||||
self.providers = defaultdict(list)
|
||||
self.rproviders = defaultdict(list)
|
||||
self.packages = defaultdict(list)
|
||||
self.packages_dynamic = defaultdict(list)
|
||||
"""
|
||||
Direct cache variables
|
||||
(from Cache.handle_data)
|
||||
"""
|
||||
self.providers = {}
|
||||
self.rproviders = {}
|
||||
self.packages = {}
|
||||
self.packages_dynamic = {}
|
||||
self.possible_world = []
|
||||
self.pkg_pn = defaultdict(list)
|
||||
self.pkg_pn = {}
|
||||
self.pkg_fn = {}
|
||||
self.pkg_pepvpr = {}
|
||||
self.pkg_dp = {}
|
||||
self.pn_provides = defaultdict(list)
|
||||
self.pn_provides = {}
|
||||
self.fn_provides = {}
|
||||
self.all_depends = []
|
||||
self.deps = defaultdict(list)
|
||||
self.rundeps = defaultdict(lambda: defaultdict(list))
|
||||
self.runrecs = defaultdict(lambda: defaultdict(list))
|
||||
self.all_depends = Set()
|
||||
self.deps = {}
|
||||
self.rundeps = {}
|
||||
self.runrecs = {}
|
||||
self.task_queues = {}
|
||||
self.task_deps = {}
|
||||
self.stamp = {}
|
||||
self.stamp_extrainfo = {}
|
||||
self.preferred = {}
|
||||
self.tasks = {}
|
||||
self.basetaskhash = {}
|
||||
self.hashfn = {}
|
||||
self.inherits = {}
|
||||
self.summary = {}
|
||||
self.license = {}
|
||||
self.section = {}
|
||||
self.fakerootenv = {}
|
||||
self.fakerootdirs = {}
|
||||
|
||||
# Indirect Cache variables (set elsewhere)
|
||||
"""
|
||||
Indirect Cache variables
|
||||
(set elsewhere)
|
||||
"""
|
||||
self.ignored_dependencies = []
|
||||
self.world_target = set()
|
||||
self.world_target = Set()
|
||||
self.bbfile_priority = {}
|
||||
self.bbfile_config_priorities = []
|
||||
|
||||
def add_from_recipeinfo(self, fn, info):
|
||||
self.task_deps[fn] = info.task_deps
|
||||
self.pkg_fn[fn] = info.pn
|
||||
self.pkg_pn[info.pn].append(fn)
|
||||
self.pkg_pepvpr[fn] = (info.pe, info.pv, info.pr)
|
||||
self.pkg_dp[fn] = info.defaultpref
|
||||
self.stamp[fn] = info.stamp
|
||||
self.stamp_extrainfo[fn] = info.stamp_extrainfo
|
||||
|
||||
provides = [info.pn]
|
||||
for provide in info.provides:
|
||||
if provide not in provides:
|
||||
provides.append(provide)
|
||||
self.fn_provides[fn] = provides
|
||||
|
||||
for provide in provides:
|
||||
self.providers[provide].append(fn)
|
||||
if provide not in self.pn_provides[info.pn]:
|
||||
self.pn_provides[info.pn].append(provide)
|
||||
|
||||
for dep in info.depends:
|
||||
if dep not in self.deps[fn]:
|
||||
self.deps[fn].append(dep)
|
||||
if dep not in self.all_depends:
|
||||
self.all_depends.append(dep)
|
||||
|
||||
rprovides = info.rprovides
|
||||
for package in info.packages:
|
||||
self.packages[package].append(fn)
|
||||
rprovides += info.rprovides_pkg[package]
|
||||
|
||||
for rprovide in rprovides:
|
||||
self.rproviders[rprovide].append(fn)
|
||||
|
||||
for package in info.packages_dynamic:
|
||||
self.packages_dynamic[package].append(fn)
|
||||
|
||||
# Build hash of runtime depends and rececommends
|
||||
for package in info.packages + [info.pn]:
|
||||
self.rundeps[fn][package] = list(info.rdepends) + info.rdepends_pkg[package]
|
||||
self.runrecs[fn][package] = list(info.rrecommends) + info.rrecommends_pkg[package]
|
||||
|
||||
# Collect files we may need for possible world-dep
|
||||
# calculations
|
||||
if not info.broken and not info.not_world:
|
||||
self.possible_world.append(fn)
|
||||
|
||||
self.hashfn[fn] = info.hashfilename
|
||||
for task, taskhash in info.basetaskhashes.iteritems():
|
||||
identifier = '%s.%s' % (fn, task)
|
||||
self.basetaskhash[identifier] = taskhash
|
||||
|
||||
self.inherits[fn] = info.inherits
|
||||
self.summary[fn] = info.summary
|
||||
self.license[fn] = info.license
|
||||
self.section[fn] = info.section
|
||||
self.fakerootenv[fn] = info.fakerootenv
|
||||
self.fakerootdirs[fn] = info.fakerootdirs
|
||||
|
||||
@@ -1,336 +0,0 @@
|
||||
import ast
|
||||
import codegen
|
||||
import logging
|
||||
import os.path
|
||||
import bb.utils, bb.data
|
||||
from itertools import chain
|
||||
from pysh import pyshyacc, pyshlex, sherrors
|
||||
|
||||
|
||||
logger = logging.getLogger('BitBake.CodeParser')
|
||||
PARSERCACHE_VERSION = 2
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
logger.info('Importing cPickle failed. Falling back to a very slow implementation.')
|
||||
|
||||
|
||||
def check_indent(codestr):
|
||||
"""If the code is indented, add a top level piece of code to 'remove' the indentation"""
|
||||
|
||||
i = 0
|
||||
while codestr[i] in ["\n", " ", " "]:
|
||||
i = i + 1
|
||||
|
||||
if i == 0:
|
||||
return codestr
|
||||
|
||||
if codestr[i-1] is " " or codestr[i-1] is " ":
|
||||
return "if 1:\n" + codestr
|
||||
|
||||
return codestr
|
||||
|
||||
pythonparsecache = {}
|
||||
shellparsecache = {}
|
||||
|
||||
def parser_cachefile(d):
|
||||
cachedir = (bb.data.getVar("PERSISTENT_DIR", d, True) or
|
||||
bb.data.getVar("CACHE", d, True))
|
||||
if cachedir in [None, '']:
|
||||
return None
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
cachefile = os.path.join(cachedir, "bb_codeparser.dat")
|
||||
logger.debug(1, "Using cache in '%s' for codeparser cache", cachefile)
|
||||
return cachefile
|
||||
|
||||
def parser_cache_init(d):
|
||||
global pythonparsecache
|
||||
global shellparsecache
|
||||
|
||||
cachefile = parser_cachefile(d)
|
||||
if not cachefile:
|
||||
return
|
||||
|
||||
try:
|
||||
p = pickle.Unpickler(file(cachefile, "rb"))
|
||||
data, version = p.load()
|
||||
except:
|
||||
return
|
||||
|
||||
if version != PARSERCACHE_VERSION:
|
||||
return
|
||||
|
||||
pythonparsecache = data[0]
|
||||
shellparsecache = data[1]
|
||||
|
||||
def parser_cache_save(d):
|
||||
cachefile = parser_cachefile(d)
|
||||
if not cachefile:
|
||||
return
|
||||
|
||||
p = pickle.Pickler(file(cachefile, "wb"), -1)
|
||||
p.dump([[pythonparsecache, shellparsecache], PARSERCACHE_VERSION])
|
||||
|
||||
class PythonParser():
|
||||
class ValueVisitor():
|
||||
"""Visitor to traverse a python abstract syntax tree and obtain
|
||||
the variables referenced via bitbake metadata APIs, and the external
|
||||
functions called.
|
||||
"""
|
||||
|
||||
getvars = ("d.getVar", "bb.data.getVar", "data.getVar")
|
||||
expands = ("d.expand", "bb.data.expand", "data.expand")
|
||||
execs = ("bb.build.exec_func", "bb.build.exec_task")
|
||||
|
||||
@classmethod
|
||||
def _compare_name(cls, strparts, node):
|
||||
"""Given a sequence of strings representing a python name,
|
||||
where the last component is the actual Name and the prior
|
||||
elements are Attribute nodes, determine if the supplied node
|
||||
matches.
|
||||
"""
|
||||
|
||||
if not strparts:
|
||||
return True
|
||||
|
||||
current, rest = strparts[0], strparts[1:]
|
||||
if isinstance(node, ast.Attribute):
|
||||
if current == node.attr:
|
||||
return cls._compare_name(rest, node.value)
|
||||
elif isinstance(node, ast.Name):
|
||||
if current == node.id:
|
||||
return True
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def compare_name(cls, value, node):
|
||||
"""Convenience function for the _compare_node method, which
|
||||
can accept a string (which is split by '.' for you), or an
|
||||
iterable of strings, in which case it checks to see if any of
|
||||
them match, similar to isinstance.
|
||||
"""
|
||||
|
||||
if isinstance(value, basestring):
|
||||
return cls._compare_name(tuple(reversed(value.split("."))),
|
||||
node)
|
||||
else:
|
||||
return any(cls.compare_name(item, node) for item in value)
|
||||
|
||||
def __init__(self, value):
|
||||
self.var_references = set()
|
||||
self.var_execs = set()
|
||||
self.direct_func_calls = set()
|
||||
self.var_expands = set()
|
||||
self.value = value
|
||||
|
||||
@classmethod
|
||||
def warn(cls, func, arg):
|
||||
"""Warn about calls of bitbake APIs which pass a non-literal
|
||||
argument for the variable name, as we're not able to track such
|
||||
a reference.
|
||||
"""
|
||||
|
||||
try:
|
||||
funcstr = codegen.to_source(func)
|
||||
argstr = codegen.to_source(arg)
|
||||
except TypeError:
|
||||
logger.debug(2, 'Failed to convert function and argument to source form')
|
||||
else:
|
||||
logger.debug(1, "Warning: in call to '%s', argument '%s' is "
|
||||
"not a literal", funcstr, argstr)
|
||||
|
||||
def visit_Call(self, node):
|
||||
if self.compare_name(self.getvars, node.func):
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
self.var_references.add(node.args[0].s)
|
||||
else:
|
||||
self.warn(node.func, node.args[0])
|
||||
elif self.compare_name(self.expands, node.func):
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
self.warn(node.func, node.args[0])
|
||||
self.var_expands.update(node.args[0].s)
|
||||
elif isinstance(node.args[0], ast.Call) and \
|
||||
self.compare_name(self.getvars, node.args[0].func):
|
||||
pass
|
||||
else:
|
||||
self.warn(node.func, node.args[0])
|
||||
elif self.compare_name(self.execs, node.func):
|
||||
if isinstance(node.args[0], ast.Str):
|
||||
self.var_execs.add(node.args[0].s)
|
||||
else:
|
||||
self.warn(node.func, node.args[0])
|
||||
elif isinstance(node.func, ast.Name):
|
||||
self.direct_func_calls.add(node.func.id)
|
||||
elif isinstance(node.func, ast.Attribute):
|
||||
# We must have a qualified name. Therefore we need
|
||||
# to walk the chain of 'Attribute' nodes to determine
|
||||
# the qualification.
|
||||
attr_node = node.func.value
|
||||
identifier = node.func.attr
|
||||
while isinstance(attr_node, ast.Attribute):
|
||||
identifier = attr_node.attr + "." + identifier
|
||||
attr_node = attr_node.value
|
||||
if isinstance(attr_node, ast.Name):
|
||||
identifier = attr_node.id + "." + identifier
|
||||
self.direct_func_calls.add(identifier)
|
||||
|
||||
def __init__(self):
|
||||
#self.funcdefs = set()
|
||||
self.execs = set()
|
||||
#self.external_cmds = set()
|
||||
self.references = set()
|
||||
|
||||
def parse_python(self, node):
|
||||
|
||||
h = hash(str(node))
|
||||
|
||||
if h in pythonparsecache:
|
||||
self.references = pythonparsecache[h]["refs"]
|
||||
self.execs = pythonparsecache[h]["execs"]
|
||||
return
|
||||
|
||||
code = compile(check_indent(str(node)), "<string>", "exec",
|
||||
ast.PyCF_ONLY_AST)
|
||||
|
||||
visitor = self.ValueVisitor(code)
|
||||
for n in ast.walk(code):
|
||||
if n.__class__.__name__ == "Call":
|
||||
visitor.visit_Call(n)
|
||||
|
||||
self.references.update(visitor.var_references)
|
||||
self.references.update(visitor.var_execs)
|
||||
self.execs = visitor.direct_func_calls
|
||||
|
||||
pythonparsecache[h] = {}
|
||||
pythonparsecache[h]["refs"] = self.references
|
||||
pythonparsecache[h]["execs"] = self.execs
|
||||
|
||||
class ShellParser():
|
||||
def __init__(self):
|
||||
self.funcdefs = set()
|
||||
self.allexecs = set()
|
||||
self.execs = set()
|
||||
|
||||
def parse_shell(self, value):
|
||||
"""Parse the supplied shell code in a string, returning the external
|
||||
commands it executes.
|
||||
"""
|
||||
|
||||
h = hash(str(value))
|
||||
|
||||
if h in shellparsecache:
|
||||
self.execs = shellparsecache[h]["execs"]
|
||||
return self.execs
|
||||
|
||||
try:
|
||||
tokens, _ = pyshyacc.parse(value, eof=True, debug=False)
|
||||
except pyshlex.NeedMore:
|
||||
raise sherrors.ShellSyntaxError("Unexpected EOF")
|
||||
|
||||
for token in tokens:
|
||||
self.process_tokens(token)
|
||||
self.execs = set(cmd for cmd in self.allexecs if cmd not in self.funcdefs)
|
||||
|
||||
shellparsecache[h] = {}
|
||||
shellparsecache[h]["execs"] = self.execs
|
||||
|
||||
return self.execs
|
||||
|
||||
def process_tokens(self, tokens):
|
||||
"""Process a supplied portion of the syntax tree as returned by
|
||||
pyshyacc.parse.
|
||||
"""
|
||||
|
||||
def function_definition(value):
|
||||
self.funcdefs.add(value.name)
|
||||
return [value.body], None
|
||||
|
||||
def case_clause(value):
|
||||
# Element 0 of each item in the case is the list of patterns, and
|
||||
# Element 1 of each item in the case is the list of commands to be
|
||||
# executed when that pattern matches.
|
||||
words = chain(*[item[0] for item in value.items])
|
||||
cmds = chain(*[item[1] for item in value.items])
|
||||
return cmds, words
|
||||
|
||||
def if_clause(value):
|
||||
main = chain(value.cond, value.if_cmds)
|
||||
rest = value.else_cmds
|
||||
if isinstance(rest, tuple) and rest[0] == "elif":
|
||||
return chain(main, if_clause(rest[1]))
|
||||
else:
|
||||
return chain(main, rest)
|
||||
|
||||
def simple_command(value):
|
||||
return None, chain(value.words, (assign[1] for assign in value.assigns))
|
||||
|
||||
token_handlers = {
|
||||
"and_or": lambda x: ((x.left, x.right), None),
|
||||
"async": lambda x: ([x], None),
|
||||
"brace_group": lambda x: (x.cmds, None),
|
||||
"for_clause": lambda x: (x.cmds, x.items),
|
||||
"function_definition": function_definition,
|
||||
"if_clause": lambda x: (if_clause(x), None),
|
||||
"pipeline": lambda x: (x.commands, None),
|
||||
"redirect_list": lambda x: ([x.cmd], None),
|
||||
"subshell": lambda x: (x.cmds, None),
|
||||
"while_clause": lambda x: (chain(x.condition, x.cmds), None),
|
||||
"until_clause": lambda x: (chain(x.condition, x.cmds), None),
|
||||
"simple_command": simple_command,
|
||||
"case_clause": case_clause,
|
||||
}
|
||||
|
||||
for token in tokens:
|
||||
name, value = token
|
||||
try:
|
||||
more_tokens, words = token_handlers[name](value)
|
||||
except KeyError:
|
||||
raise NotImplementedError("Unsupported token type " + name)
|
||||
|
||||
if more_tokens:
|
||||
self.process_tokens(more_tokens)
|
||||
|
||||
if words:
|
||||
self.process_words(words)
|
||||
|
||||
def process_words(self, words):
|
||||
"""Process a set of 'words' in pyshyacc parlance, which includes
|
||||
extraction of executed commands from $() blocks, as well as grabbing
|
||||
the command name argument.
|
||||
"""
|
||||
|
||||
words = list(words)
|
||||
for word in list(words):
|
||||
wtree = pyshlex.make_wordtree(word[1])
|
||||
for part in wtree:
|
||||
if not isinstance(part, list):
|
||||
continue
|
||||
|
||||
if part[0] in ('`', '$('):
|
||||
command = pyshlex.wordtree_as_string(part[1:-1])
|
||||
self.parse_shell(command)
|
||||
|
||||
if word[0] in ("cmd_name", "cmd_word"):
|
||||
if word in words:
|
||||
words.remove(word)
|
||||
|
||||
usetoken = False
|
||||
for word in words:
|
||||
if word[0] in ("cmd_name", "cmd_word") or \
|
||||
(usetoken and word[0] == "TOKEN"):
|
||||
if "=" in word[1]:
|
||||
usetoken = True
|
||||
continue
|
||||
|
||||
cmd = word[1]
|
||||
if cmd.startswith("$"):
|
||||
logger.debug(1, "Warning: execution of non-literal "
|
||||
"command '%s'", cmd)
|
||||
elif cmd == "eval":
|
||||
command = " ".join(word for _, word in words[1:])
|
||||
self.parse_shell(command)
|
||||
else:
|
||||
self.allexecs.add(cmd)
|
||||
break
|
||||
@@ -1,292 +0,0 @@
|
||||
"""
|
||||
BitBake 'Command' module
|
||||
|
||||
Provide an interface to interact with the bitbake server through 'commands'
|
||||
"""
|
||||
|
||||
# Copyright (C) 2006-2007 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
"""
|
||||
The bitbake server takes 'commands' from its UI/commandline.
|
||||
Commands are either synchronous or asynchronous.
|
||||
Async commands return data to the client in the form of events.
|
||||
Sync commands must only return data through the function return value
|
||||
and must not trigger events, directly or indirectly.
|
||||
Commands are queued in a CommandQueue
|
||||
"""
|
||||
|
||||
import bb.event
|
||||
import bb.cooker
|
||||
import bb.data
|
||||
|
||||
async_cmds = {}
|
||||
sync_cmds = {}
|
||||
|
||||
|
||||
class CommandCompleted(bb.event.Event):
|
||||
pass
|
||||
|
||||
class CommandExit(bb.event.Event):
|
||||
def __init__(self, exitcode):
|
||||
bb.event.Event.__init__(self)
|
||||
self.exitcode = int(exitcode)
|
||||
|
||||
class CommandFailed(CommandExit):
|
||||
def __init__(self, message):
|
||||
self.error = message
|
||||
CommandExit.__init__(self, 1)
|
||||
|
||||
class Command:
|
||||
"""
|
||||
A queue of asynchronous commands for bitbake
|
||||
"""
|
||||
def __init__(self, cooker):
|
||||
self.cooker = cooker
|
||||
self.cmds_sync = CommandsSync()
|
||||
self.cmds_async = CommandsAsync()
|
||||
|
||||
# FIXME Add lock for this
|
||||
self.currentAsyncCommand = None
|
||||
|
||||
for attr in CommandsSync.__dict__:
|
||||
command = attr[:].lower()
|
||||
method = getattr(CommandsSync, attr)
|
||||
sync_cmds[command] = (method)
|
||||
|
||||
for attr in CommandsAsync.__dict__:
|
||||
command = attr[:].lower()
|
||||
method = getattr(CommandsAsync, attr)
|
||||
async_cmds[command] = (method)
|
||||
|
||||
def runCommand(self, commandline):
|
||||
try:
|
||||
command = commandline.pop(0)
|
||||
if command in CommandsSync.__dict__:
|
||||
# Can run synchronous commands straight away
|
||||
return getattr(CommandsSync, command)(self.cmds_sync, self, commandline)
|
||||
if self.currentAsyncCommand is not None:
|
||||
return "Busy (%s in progress)" % self.currentAsyncCommand[0]
|
||||
if command not in CommandsAsync.__dict__:
|
||||
return "No such command"
|
||||
self.currentAsyncCommand = (command, commandline)
|
||||
self.cooker.server.register_idle_function(self.cooker.runCommands, self.cooker)
|
||||
return True
|
||||
except:
|
||||
import traceback
|
||||
return traceback.format_exc()
|
||||
|
||||
def runAsyncCommand(self):
|
||||
try:
|
||||
if self.currentAsyncCommand is not None:
|
||||
(command, options) = self.currentAsyncCommand
|
||||
commandmethod = getattr(CommandsAsync, command)
|
||||
needcache = getattr( commandmethod, "needcache" )
|
||||
if (needcache and self.cooker.state in
|
||||
(bb.cooker.state.initial, bb.cooker.state.parsing)):
|
||||
self.cooker.updateCache()
|
||||
return True
|
||||
else:
|
||||
commandmethod(self.cmds_async, self, options)
|
||||
return False
|
||||
else:
|
||||
return False
|
||||
except KeyboardInterrupt as exc:
|
||||
self.finishAsyncCommand("Interrupted")
|
||||
return False
|
||||
except SystemExit as exc:
|
||||
arg = exc.args[0]
|
||||
if isinstance(arg, basestring):
|
||||
self.finishAsyncCommand(arg)
|
||||
else:
|
||||
self.finishAsyncCommand("Exited with %s" % arg)
|
||||
return False
|
||||
except Exception:
|
||||
import traceback
|
||||
self.finishAsyncCommand(traceback.format_exc())
|
||||
return False
|
||||
|
||||
def finishAsyncCommand(self, msg=None, code=None):
|
||||
if msg:
|
||||
bb.event.fire(CommandFailed(msg), self.cooker.configuration.event_data)
|
||||
elif code:
|
||||
bb.event.fire(CommandExit(code), self.cooker.configuration.event_data)
|
||||
else:
|
||||
bb.event.fire(CommandCompleted(), self.cooker.configuration.event_data)
|
||||
self.currentAsyncCommand = None
|
||||
|
||||
|
||||
class CommandsSync:
|
||||
"""
|
||||
A class of synchronous commands
|
||||
These should run quickly so as not to hurt interactive performance.
|
||||
These must not influence any running synchronous command.
|
||||
"""
|
||||
|
||||
def stateShutdown(self, command, params):
|
||||
"""
|
||||
Trigger cooker 'shutdown' mode
|
||||
"""
|
||||
command.cooker.shutdown()
|
||||
|
||||
def stateStop(self, command, params):
|
||||
"""
|
||||
Stop the cooker
|
||||
"""
|
||||
command.cooker.stop()
|
||||
|
||||
def getCmdLineAction(self, command, params):
|
||||
"""
|
||||
Get any command parsed from the commandline
|
||||
"""
|
||||
return command.cooker.commandlineAction
|
||||
|
||||
def getVariable(self, command, params):
|
||||
"""
|
||||
Read the value of a variable from configuration.data
|
||||
"""
|
||||
varname = params[0]
|
||||
expand = True
|
||||
if len(params) > 1:
|
||||
expand = params[1]
|
||||
|
||||
return bb.data.getVar(varname, command.cooker.configuration.data, expand)
|
||||
|
||||
def setVariable(self, command, params):
|
||||
"""
|
||||
Set the value of variable in configuration.data
|
||||
"""
|
||||
varname = params[0]
|
||||
value = params[1]
|
||||
bb.data.setVar(varname, value, command.cooker.configuration.data)
|
||||
|
||||
|
||||
class CommandsAsync:
|
||||
"""
|
||||
A class of asynchronous commands
|
||||
These functions communicate via generated events.
|
||||
Any function that requires metadata parsing should be here.
|
||||
"""
|
||||
|
||||
def buildFile(self, command, params):
|
||||
"""
|
||||
Build a single specified .bb file
|
||||
"""
|
||||
bfile = params[0]
|
||||
task = params[1]
|
||||
|
||||
command.cooker.buildFile(bfile, task)
|
||||
buildFile.needcache = False
|
||||
|
||||
def buildTargets(self, command, params):
|
||||
"""
|
||||
Build a set of targets
|
||||
"""
|
||||
pkgs_to_build = params[0]
|
||||
task = params[1]
|
||||
|
||||
command.cooker.buildTargets(pkgs_to_build, task)
|
||||
buildTargets.needcache = True
|
||||
|
||||
def generateDepTreeEvent(self, command, params):
|
||||
"""
|
||||
Generate an event containing the dependency information
|
||||
"""
|
||||
pkgs_to_build = params[0]
|
||||
task = params[1]
|
||||
|
||||
command.cooker.generateDepTreeEvent(pkgs_to_build, task)
|
||||
command.finishAsyncCommand()
|
||||
generateDepTreeEvent.needcache = True
|
||||
|
||||
def generateDotGraph(self, command, params):
|
||||
"""
|
||||
Dump dependency information to disk as .dot files
|
||||
"""
|
||||
pkgs_to_build = params[0]
|
||||
task = params[1]
|
||||
|
||||
command.cooker.generateDotGraphFiles(pkgs_to_build, task)
|
||||
command.finishAsyncCommand()
|
||||
generateDotGraph.needcache = True
|
||||
|
||||
def generateTargetsTree(self, command, params):
|
||||
"""
|
||||
Generate a tree of all buildable targets.
|
||||
"""
|
||||
klass = params[0]
|
||||
|
||||
command.cooker.generateTargetsTree(klass)
|
||||
command.finishAsyncCommand()
|
||||
generateTargetsTree.needcache = True
|
||||
|
||||
def findConfigFiles(self, command, params):
|
||||
"""
|
||||
Find config files which provide appropriate values
|
||||
for the passed configuration variable. i.e. MACHINE
|
||||
"""
|
||||
varname = params[0]
|
||||
|
||||
command.cooker.findConfigFiles(varname)
|
||||
command.finishAsyncCommand()
|
||||
findConfigFiles.needcache = True
|
||||
|
||||
def showVersions(self, command, params):
|
||||
"""
|
||||
Show the currently selected versions
|
||||
"""
|
||||
command.cooker.showVersions()
|
||||
command.finishAsyncCommand()
|
||||
showVersions.needcache = True
|
||||
|
||||
def showEnvironmentTarget(self, command, params):
|
||||
"""
|
||||
Print the environment of a target recipe
|
||||
(needs the cache to work out which recipe to use)
|
||||
"""
|
||||
pkg = params[0]
|
||||
|
||||
command.cooker.showEnvironment(None, pkg)
|
||||
command.finishAsyncCommand()
|
||||
showEnvironmentTarget.needcache = True
|
||||
|
||||
def showEnvironment(self, command, params):
|
||||
"""
|
||||
Print the standard environment
|
||||
or if specified the environment for a specified recipe
|
||||
"""
|
||||
bfile = params[0]
|
||||
|
||||
command.cooker.showEnvironment(bfile)
|
||||
command.finishAsyncCommand()
|
||||
showEnvironment.needcache = False
|
||||
|
||||
def parseFiles(self, command, params):
|
||||
"""
|
||||
Parse the .bb files
|
||||
"""
|
||||
command.cooker.updateCache()
|
||||
command.finishAsyncCommand()
|
||||
parseFiles.needcache = True
|
||||
|
||||
def compareRevisions(self, command, params):
|
||||
"""
|
||||
Parse the .bb files
|
||||
"""
|
||||
if bb.fetch.fetcher_compare_revisions(command.cooker.configuration.data):
|
||||
command.finishAsyncCommand(code=1)
|
||||
else:
|
||||
command.finishAsyncCommand()
|
||||
compareRevisions.needcache = True
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,190 +0,0 @@
|
||||
"""
|
||||
Python Deamonizing helper
|
||||
|
||||
Configurable daemon behaviors:
|
||||
|
||||
1.) The current working directory set to the "/" directory.
|
||||
2.) The current file creation mode mask set to 0.
|
||||
3.) Close all open files (1024).
|
||||
4.) Redirect standard I/O streams to "/dev/null".
|
||||
|
||||
A failed call to fork() now raises an exception.
|
||||
|
||||
References:
|
||||
1) Advanced Programming in the Unix Environment: W. Richard Stevens
|
||||
2) Unix Programming Frequently Asked Questions:
|
||||
http://www.erlenstar.demon.co.uk/unix/faq_toc.html
|
||||
|
||||
Modified to allow a function to be daemonized and return for
|
||||
bitbake use by Richard Purdie
|
||||
"""
|
||||
|
||||
__author__ = "Chad J. Schroeder"
|
||||
__copyright__ = "Copyright (C) 2005 Chad J. Schroeder"
|
||||
__version__ = "0.2"
|
||||
|
||||
# Standard Python modules.
|
||||
import os # Miscellaneous OS interfaces.
|
||||
import sys # System-specific parameters and functions.
|
||||
|
||||
# Default daemon parameters.
|
||||
# File mode creation mask of the daemon.
|
||||
# For BitBake's children, we do want to inherit the parent umask.
|
||||
UMASK = None
|
||||
|
||||
# Default maximum for the number of available file descriptors.
|
||||
MAXFD = 1024
|
||||
|
||||
# The standard I/O file descriptors are redirected to /dev/null by default.
|
||||
if (hasattr(os, "devnull")):
|
||||
REDIRECT_TO = os.devnull
|
||||
else:
|
||||
REDIRECT_TO = "/dev/null"
|
||||
|
||||
def createDaemon(function, logfile):
|
||||
"""
|
||||
Detach a process from the controlling terminal and run it in the
|
||||
background as a daemon, returning control to the caller.
|
||||
"""
|
||||
|
||||
try:
|
||||
# Fork a child process so the parent can exit. This returns control to
|
||||
# the command-line or shell. It also guarantees that the child will not
|
||||
# be a process group leader, since the child receives a new process ID
|
||||
# and inherits the parent's process group ID. This step is required
|
||||
# to insure that the next call to os.setsid is successful.
|
||||
pid = os.fork()
|
||||
except OSError as e:
|
||||
raise Exception("%s [%d]" % (e.strerror, e.errno))
|
||||
|
||||
if (pid == 0): # The first child.
|
||||
# To become the session leader of this new session and the process group
|
||||
# leader of the new process group, we call os.setsid(). The process is
|
||||
# also guaranteed not to have a controlling terminal.
|
||||
os.setsid()
|
||||
|
||||
# Is ignoring SIGHUP necessary?
|
||||
#
|
||||
# It's often suggested that the SIGHUP signal should be ignored before
|
||||
# the second fork to avoid premature termination of the process. The
|
||||
# reason is that when the first child terminates, all processes, e.g.
|
||||
# the second child, in the orphaned group will be sent a SIGHUP.
|
||||
#
|
||||
# "However, as part of the session management system, there are exactly
|
||||
# two cases where SIGHUP is sent on the death of a process:
|
||||
#
|
||||
# 1) When the process that dies is the session leader of a session that
|
||||
# is attached to a terminal device, SIGHUP is sent to all processes
|
||||
# in the foreground process group of that terminal device.
|
||||
# 2) When the death of a process causes a process group to become
|
||||
# orphaned, and one or more processes in the orphaned group are
|
||||
# stopped, then SIGHUP and SIGCONT are sent to all members of the
|
||||
# orphaned group." [2]
|
||||
#
|
||||
# The first case can be ignored since the child is guaranteed not to have
|
||||
# a controlling terminal. The second case isn't so easy to dismiss.
|
||||
# The process group is orphaned when the first child terminates and
|
||||
# POSIX.1 requires that every STOPPED process in an orphaned process
|
||||
# group be sent a SIGHUP signal followed by a SIGCONT signal. Since the
|
||||
# second child is not STOPPED though, we can safely forego ignoring the
|
||||
# SIGHUP signal. In any case, there are no ill-effects if it is ignored.
|
||||
#
|
||||
# import signal # Set handlers for asynchronous events.
|
||||
# signal.signal(signal.SIGHUP, signal.SIG_IGN)
|
||||
|
||||
try:
|
||||
# Fork a second child and exit immediately to prevent zombies. This
|
||||
# causes the second child process to be orphaned, making the init
|
||||
# process responsible for its cleanup. And, since the first child is
|
||||
# a session leader without a controlling terminal, it's possible for
|
||||
# it to acquire one by opening a terminal in the future (System V-
|
||||
# based systems). This second fork guarantees that the child is no
|
||||
# longer a session leader, preventing the daemon from ever acquiring
|
||||
# a controlling terminal.
|
||||
pid = os.fork() # Fork a second child.
|
||||
except OSError as e:
|
||||
raise Exception("%s [%d]" % (e.strerror, e.errno))
|
||||
|
||||
if (pid == 0): # The second child.
|
||||
# We probably don't want the file mode creation mask inherited from
|
||||
# the parent, so we give the child complete control over permissions.
|
||||
if UMASK is not None:
|
||||
os.umask(UMASK)
|
||||
else:
|
||||
# Parent (the first child) of the second child.
|
||||
os._exit(0)
|
||||
else:
|
||||
# exit() or _exit()?
|
||||
# _exit is like exit(), but it doesn't call any functions registered
|
||||
# with atexit (and on_exit) or any registered signal handlers. It also
|
||||
# closes any open file descriptors. Using exit() may cause all stdio
|
||||
# streams to be flushed twice and any temporary files may be unexpectedly
|
||||
# removed. It's therefore recommended that child branches of a fork()
|
||||
# and the parent branch(es) of a daemon use _exit().
|
||||
return
|
||||
|
||||
# Close all open file descriptors. This prevents the child from keeping
|
||||
# open any file descriptors inherited from the parent. There is a variety
|
||||
# of methods to accomplish this task. Three are listed below.
|
||||
#
|
||||
# Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
|
||||
# number of open file descriptors to close. If it doesn't exists, use
|
||||
# the default value (configurable).
|
||||
#
|
||||
# try:
|
||||
# maxfd = os.sysconf("SC_OPEN_MAX")
|
||||
# except (AttributeError, ValueError):
|
||||
# maxfd = MAXFD
|
||||
#
|
||||
# OR
|
||||
#
|
||||
# if (os.sysconf_names.has_key("SC_OPEN_MAX")):
|
||||
# maxfd = os.sysconf("SC_OPEN_MAX")
|
||||
# else:
|
||||
# maxfd = MAXFD
|
||||
#
|
||||
# OR
|
||||
#
|
||||
# Use the getrlimit method to retrieve the maximum file descriptor number
|
||||
# that can be opened by this process. If there is not limit on the
|
||||
# resource, use the default value.
|
||||
#
|
||||
import resource # Resource usage information.
|
||||
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
|
||||
if (maxfd == resource.RLIM_INFINITY):
|
||||
maxfd = MAXFD
|
||||
|
||||
# Iterate through and close all file descriptors.
|
||||
# for fd in range(0, maxfd):
|
||||
# try:
|
||||
# os.close(fd)
|
||||
# except OSError: # ERROR, fd wasn't open to begin with (ignored)
|
||||
# pass
|
||||
|
||||
# Redirect the standard I/O file descriptors to the specified file. Since
|
||||
# the daemon has no controlling terminal, most daemons redirect stdin,
|
||||
# stdout, and stderr to /dev/null. This is done to prevent side-effects
|
||||
# from reads and writes to the standard I/O file descriptors.
|
||||
|
||||
# This call to open is guaranteed to return the lowest file descriptor,
|
||||
# which will be 0 (stdin), since it was closed above.
|
||||
# os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
|
||||
|
||||
# Duplicate standard input to standard output and standard error.
|
||||
# os.dup2(0, 1) # standard output (1)
|
||||
# os.dup2(0, 2) # standard error (2)
|
||||
|
||||
|
||||
si = file('/dev/null', 'r')
|
||||
so = file(logfile, 'w')
|
||||
se = so
|
||||
|
||||
|
||||
# Replace those fds with our own
|
||||
os.dup2(si.fileno(), sys.stdin.fileno())
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(se.fileno(), sys.stderr.fileno())
|
||||
|
||||
function()
|
||||
|
||||
os._exit(0)
|
||||
@@ -11,7 +11,7 @@ operations. At night the cookie monster came by and
|
||||
suggested 'give me cookies on setting the variables and
|
||||
things will work out'. Taking this suggestion into account
|
||||
applying the skills from the not yet passed 'Entwurf und
|
||||
Analyse von Algorithmen' lecture and the cookie
|
||||
Analyse von Algorithmen' lecture and the cookie
|
||||
monster seems to be right. We will track setVar more carefully
|
||||
to have faster update_data and expandKeys operations.
|
||||
|
||||
@@ -37,41 +37,36 @@ the speed is more critical here.
|
||||
#
|
||||
#Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import sys, os, re
|
||||
import sys, os, re, time, types
|
||||
if sys.argv[0][-5:] == "pydoc":
|
||||
path = os.path.dirname(os.path.dirname(sys.argv[1]))
|
||||
else:
|
||||
path = os.path.dirname(os.path.dirname(sys.argv[0]))
|
||||
sys.path.insert(0, path)
|
||||
from itertools import groupby
|
||||
sys.path.insert(0,path)
|
||||
|
||||
from bb import data_smart
|
||||
from bb import codeparser
|
||||
import bb
|
||||
|
||||
_dict_type = data_smart.DataSmart
|
||||
|
||||
def init():
|
||||
"""Return a new object representing the Bitbake data"""
|
||||
return _dict_type()
|
||||
|
||||
def init_db(parent = None):
|
||||
"""Return a new object representing the Bitbake data,
|
||||
optionally based on an existing object"""
|
||||
if parent:
|
||||
return parent.createCopy()
|
||||
else:
|
||||
return _dict_type()
|
||||
|
||||
def createCopy(source):
|
||||
"""Link the source set to the destination
|
||||
If one does not find the value in the destination set,
|
||||
search will go on to the source set to get the value.
|
||||
Value from source are copy-on-write. i.e. any try to
|
||||
modify one of them will end up putting the modified value
|
||||
in the destination set.
|
||||
"""
|
||||
return source.createCopy()
|
||||
"""Link the source set to the destination
|
||||
If one does not find the value in the destination set,
|
||||
search will go on to the source set to get the value.
|
||||
Value from source are copy-on-write. i.e. any try to
|
||||
modify one of them will end up putting the modified value
|
||||
in the destination set.
|
||||
"""
|
||||
return source.createCopy()
|
||||
|
||||
def initVar(var, d):
|
||||
"""Non-destructive var init for data structure"""
|
||||
@@ -79,34 +74,91 @@ def initVar(var, d):
|
||||
|
||||
|
||||
def setVar(var, value, d):
|
||||
"""Set a variable to a given value"""
|
||||
d.setVar(var, value)
|
||||
"""Set a variable to a given value
|
||||
|
||||
Example:
|
||||
>>> d = init()
|
||||
>>> setVar('TEST', 'testcontents', d)
|
||||
>>> print getVar('TEST', d)
|
||||
testcontents
|
||||
"""
|
||||
d.setVar(var,value)
|
||||
|
||||
|
||||
def getVar(var, d, exp = 0):
|
||||
"""Gets the value of a variable"""
|
||||
return d.getVar(var, exp)
|
||||
"""Gets the value of a variable
|
||||
|
||||
Example:
|
||||
>>> d = init()
|
||||
>>> setVar('TEST', 'testcontents', d)
|
||||
>>> print getVar('TEST', d)
|
||||
testcontents
|
||||
"""
|
||||
return d.getVar(var,exp)
|
||||
|
||||
|
||||
def renameVar(key, newkey, d):
|
||||
"""Renames a variable from key to newkey"""
|
||||
"""Renames a variable from key to newkey
|
||||
|
||||
Example:
|
||||
>>> d = init()
|
||||
>>> setVar('TEST', 'testcontents', d)
|
||||
>>> renameVar('TEST', 'TEST2', d)
|
||||
>>> print getVar('TEST2', d)
|
||||
testcontents
|
||||
"""
|
||||
d.renameVar(key, newkey)
|
||||
|
||||
def delVar(var, d):
|
||||
"""Removes a variable from the data set"""
|
||||
"""Removes a variable from the data set
|
||||
|
||||
Example:
|
||||
>>> d = init()
|
||||
>>> setVar('TEST', 'testcontents', d)
|
||||
>>> print getVar('TEST', d)
|
||||
testcontents
|
||||
>>> delVar('TEST', d)
|
||||
>>> print getVar('TEST', d)
|
||||
None
|
||||
"""
|
||||
d.delVar(var)
|
||||
|
||||
def setVarFlag(var, flag, flagvalue, d):
|
||||
"""Set a flag for a given variable to a given value"""
|
||||
d.setVarFlag(var, flag, flagvalue)
|
||||
"""Set a flag for a given variable to a given value
|
||||
|
||||
Example:
|
||||
>>> d = init()
|
||||
>>> setVarFlag('TEST', 'python', 1, d)
|
||||
>>> print getVarFlag('TEST', 'python', d)
|
||||
1
|
||||
"""
|
||||
d.setVarFlag(var,flag,flagvalue)
|
||||
|
||||
def getVarFlag(var, flag, d):
|
||||
"""Gets given flag from given var"""
|
||||
return d.getVarFlag(var, flag)
|
||||
"""Gets given flag from given var
|
||||
|
||||
Example:
|
||||
>>> d = init()
|
||||
>>> setVarFlag('TEST', 'python', 1, d)
|
||||
>>> print getVarFlag('TEST', 'python', d)
|
||||
1
|
||||
"""
|
||||
return d.getVarFlag(var,flag)
|
||||
|
||||
def delVarFlag(var, flag, d):
|
||||
"""Removes a given flag from the variable's flags"""
|
||||
d.delVarFlag(var, flag)
|
||||
"""Removes a given flag from the variable's flags
|
||||
|
||||
Example:
|
||||
>>> d = init()
|
||||
>>> setVarFlag('TEST', 'testflag', 1, d)
|
||||
>>> print getVarFlag('TEST', 'testflag', d)
|
||||
1
|
||||
>>> delVarFlag('TEST', 'testflag', d)
|
||||
>>> print getVarFlag('TEST', 'testflag', d)
|
||||
None
|
||||
|
||||
"""
|
||||
d.delVarFlag(var,flag)
|
||||
|
||||
def setVarFlags(var, flags, d):
|
||||
"""Set the flags for a given variable
|
||||
@@ -115,27 +167,115 @@ def setVarFlags(var, flags, d):
|
||||
setVarFlags will not clear previous
|
||||
flags. Think of this method as
|
||||
addVarFlags
|
||||
|
||||
Example:
|
||||
>>> d = init()
|
||||
>>> myflags = {}
|
||||
>>> myflags['test'] = 'blah'
|
||||
>>> setVarFlags('TEST', myflags, d)
|
||||
>>> print getVarFlag('TEST', 'test', d)
|
||||
blah
|
||||
"""
|
||||
d.setVarFlags(var, flags)
|
||||
d.setVarFlags(var,flags)
|
||||
|
||||
def getVarFlags(var, d):
|
||||
"""Gets a variable's flags"""
|
||||
"""Gets a variable's flags
|
||||
|
||||
Example:
|
||||
>>> d = init()
|
||||
>>> setVarFlag('TEST', 'test', 'blah', d)
|
||||
>>> print getVarFlags('TEST', d)['test']
|
||||
blah
|
||||
"""
|
||||
return d.getVarFlags(var)
|
||||
|
||||
def delVarFlags(var, d):
|
||||
"""Removes a variable's flags"""
|
||||
"""Removes a variable's flags
|
||||
|
||||
Example:
|
||||
>>> data = init()
|
||||
>>> setVarFlag('TEST', 'testflag', 1, data)
|
||||
>>> print getVarFlag('TEST', 'testflag', data)
|
||||
1
|
||||
>>> delVarFlags('TEST', data)
|
||||
>>> print getVarFlags('TEST', data)
|
||||
None
|
||||
|
||||
"""
|
||||
d.delVarFlags(var)
|
||||
|
||||
def keys(d):
|
||||
"""Return a list of keys in d"""
|
||||
"""Return a list of keys in d
|
||||
|
||||
Example:
|
||||
>>> d = init()
|
||||
>>> setVar('TEST', 1, d)
|
||||
>>> setVar('MOO' , 2, d)
|
||||
>>> setVarFlag('TEST', 'test', 1, d)
|
||||
>>> keys(d)
|
||||
['TEST', 'MOO']
|
||||
"""
|
||||
return d.keys()
|
||||
|
||||
def getData(d):
|
||||
"""Returns the data object used"""
|
||||
return d
|
||||
|
||||
def setData(newData, d):
|
||||
"""Sets the data object to the supplied value"""
|
||||
d = newData
|
||||
|
||||
|
||||
##
|
||||
## Cookie Monsters' query functions
|
||||
##
|
||||
def _get_override_vars(d, override):
|
||||
"""
|
||||
Internal!!!
|
||||
|
||||
Get the Names of Variables that have a specific
|
||||
override. This function returns a iterable
|
||||
Set or an empty list
|
||||
"""
|
||||
return []
|
||||
|
||||
def _get_var_flags_triple(d):
|
||||
"""
|
||||
Internal!!!
|
||||
|
||||
"""
|
||||
return []
|
||||
|
||||
__expand_var_regexp__ = re.compile(r"\${[^{}]+}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@.+?}")
|
||||
|
||||
def expand(s, d, varname = None):
|
||||
"""Variable expansion using the data store"""
|
||||
"""Variable expansion using the data store.
|
||||
|
||||
Example:
|
||||
Standard expansion:
|
||||
>>> d = init()
|
||||
>>> setVar('A', 'sshd', d)
|
||||
>>> print expand('/usr/bin/${A}', d)
|
||||
/usr/bin/sshd
|
||||
|
||||
Python expansion:
|
||||
>>> d = init()
|
||||
>>> print expand('result: ${@37 * 72}', d)
|
||||
result: 2664
|
||||
|
||||
Shell expansion:
|
||||
>>> d = init()
|
||||
>>> print expand('${TARGET_MOO}', d)
|
||||
${TARGET_MOO}
|
||||
>>> setVar('TARGET_MOO', 'yupp', d)
|
||||
>>> print expand('${TARGET_MOO}',d)
|
||||
yupp
|
||||
>>> setVar('SRC_URI', 'http://somebug.${TARGET_MOO}', d)
|
||||
>>> delVar('TARGET_MOO', d)
|
||||
>>> print expand('${SRC_URI}', d)
|
||||
http://somebug.${TARGET_MOO}
|
||||
"""
|
||||
return d.expand(s, varname)
|
||||
|
||||
def expandKeys(alterdata, readdata = None):
|
||||
@@ -152,23 +292,53 @@ def expandKeys(alterdata, readdata = None):
|
||||
continue
|
||||
todolist[key] = ekey
|
||||
|
||||
# These two for loops are split for performance to maximise the
|
||||
# These two for loops are split for performance to maximise the
|
||||
# usefulness of the expand cache
|
||||
|
||||
for key in todolist:
|
||||
ekey = todolist[key]
|
||||
renameVar(key, ekey, alterdata)
|
||||
|
||||
def expandData(alterdata, readdata = None):
|
||||
"""For each variable in alterdata, expand it, and update the var contents.
|
||||
Replacements use data from readdata.
|
||||
|
||||
Example:
|
||||
>>> a=init()
|
||||
>>> b=init()
|
||||
>>> setVar("dlmsg", "dl_dir is ${DL_DIR}", a)
|
||||
>>> setVar("DL_DIR", "/path/to/whatever", b)
|
||||
>>> expandData(a, b)
|
||||
>>> print getVar("dlmsg", a)
|
||||
dl_dir is /path/to/whatever
|
||||
"""
|
||||
if readdata == None:
|
||||
readdata = alterdata
|
||||
|
||||
for key in keys(alterdata):
|
||||
val = getVar(key, alterdata)
|
||||
if type(val) is not types.StringType:
|
||||
continue
|
||||
expanded = expand(val, readdata)
|
||||
# print "key is %s, val is %s, expanded is %s" % (key, val, expanded)
|
||||
if val != expanded:
|
||||
setVar(key, expanded, alterdata)
|
||||
|
||||
import os
|
||||
|
||||
def inheritFromOS(d):
|
||||
"""Inherit variables from the environment."""
|
||||
exportlist = bb.utils.preserved_envvars_exported()
|
||||
# fakeroot needs to be able to set these
|
||||
non_inherit_vars = [ "LD_LIBRARY_PATH", "LD_PRELOAD" ]
|
||||
for s in os.environ.keys():
|
||||
try:
|
||||
setVar(s, os.environ[s], d)
|
||||
if s in exportlist:
|
||||
setVarFlag(s, "export", True, d)
|
||||
except TypeError:
|
||||
pass
|
||||
if not s in non_inherit_vars:
|
||||
try:
|
||||
setVar(s, os.environ[s], d)
|
||||
setVarFlag(s, 'matchesenv', '1', d)
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
import sys
|
||||
|
||||
def emit_var(var, o=sys.__stdout__, d = init(), all=False):
|
||||
"""Emit a variable to be sourced by a shell."""
|
||||
@@ -185,15 +355,20 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False):
|
||||
if all:
|
||||
oval = getVar(var, d, 0)
|
||||
val = getVar(var, d, 1)
|
||||
except (KeyboardInterrupt, bb.build.FuncFailed):
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception, exc:
|
||||
o.write('# expansion of %s threw %s: %s\n' % (var, exc.__class__.__name__, str(exc)))
|
||||
except:
|
||||
excname = str(sys.exc_info()[0])
|
||||
if excname == "bb.build.FuncFailed":
|
||||
raise
|
||||
o.write('# expansion of %s threw %s\n' % (var, excname))
|
||||
return 0
|
||||
|
||||
if all:
|
||||
commentVal = re.sub('\n', '\n#', str(oval))
|
||||
o.write('# %s=%s\n' % (var, commentVal))
|
||||
o.write('# %s=%s\n' % (var, oval))
|
||||
|
||||
if type(val) is not types.StringType:
|
||||
return 0
|
||||
|
||||
if (var.find("-") != -1 or var.find(".") != -1 or var.find('{') != -1 or var.find('}') != -1 or var.find('+') != -1) and not all:
|
||||
return 0
|
||||
@@ -202,13 +377,15 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False):
|
||||
|
||||
if unexport:
|
||||
o.write('unset %s\n' % varExpanded)
|
||||
return 1
|
||||
|
||||
if getVarFlag(var, 'matchesenv', d):
|
||||
return 0
|
||||
|
||||
val.rstrip()
|
||||
if not val:
|
||||
return 0
|
||||
|
||||
val = str(val)
|
||||
|
||||
if func:
|
||||
# NOTE: should probably check for unbalanced {} within the var
|
||||
o.write("%s() {\n%s\n}\n" % (varExpanded, val))
|
||||
@@ -220,119 +397,174 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False):
|
||||
# if we're going to output this within doublequotes,
|
||||
# to a shell, we need to escape the quotes in the var
|
||||
alter = re.sub('"', '\\"', val.strip())
|
||||
alter = re.sub('\n', ' \\\n', alter)
|
||||
o.write('%s="%s"\n' % (varExpanded, alter))
|
||||
return 0
|
||||
return 1
|
||||
|
||||
|
||||
def emit_env(o=sys.__stdout__, d = init(), all=False):
|
||||
"""Emits all items in the data store in a format such that it can be sourced by a shell."""
|
||||
|
||||
isfunc = lambda key: bool(d.getVarFlag(key, "func"))
|
||||
keys = sorted((key for key in d.keys() if not key.startswith("__")), key=isfunc)
|
||||
grouped = groupby(keys, isfunc)
|
||||
for isfunc, keys in grouped:
|
||||
for key in keys:
|
||||
emit_var(key, o, d, all and not isfunc) and o.write('\n')
|
||||
env = keys(d)
|
||||
|
||||
def export_vars(d):
|
||||
keys = (key for key in d.keys() if d.getVarFlag(key, "export"))
|
||||
ret = {}
|
||||
for k in keys:
|
||||
try:
|
||||
v = d.getVar(k, True)
|
||||
if v:
|
||||
ret[k] = v
|
||||
except (KeyboardInterrupt, bb.build.FuncFailed):
|
||||
raise
|
||||
except Exception, exc:
|
||||
pass
|
||||
return ret
|
||||
for e in env:
|
||||
if getVarFlag(e, "func", d):
|
||||
continue
|
||||
emit_var(e, o, d, all) and o.write('\n')
|
||||
|
||||
def export_envvars(v, d):
|
||||
for s in os.environ.keys():
|
||||
if s not in v:
|
||||
v[s] = os.environ[s]
|
||||
return v
|
||||
|
||||
def emit_func(func, o=sys.__stdout__, d = init()):
|
||||
"""Emits all items in the data store in a format such that it can be sourced by a shell."""
|
||||
|
||||
keys = (key for key in d.keys() if not key.startswith("__") and not d.getVarFlag(key, "func"))
|
||||
for key in keys:
|
||||
emit_var(key, o, d, False) and o.write('\n')
|
||||
|
||||
emit_var(func, o, d, False) and o.write('\n')
|
||||
newdeps = bb.codeparser.ShellParser().parse_shell(d.getVar(func, True))
|
||||
seen = set()
|
||||
while newdeps:
|
||||
deps = newdeps
|
||||
seen |= deps
|
||||
newdeps = set()
|
||||
for dep in deps:
|
||||
if bb.data.getVarFlag(dep, "func", d):
|
||||
emit_var(dep, o, d, False) and o.write('\n')
|
||||
newdeps |= bb.codeparser.ShellParser().parse_shell(d.getVar(dep, True))
|
||||
newdeps -= seen
|
||||
for e in env:
|
||||
if not getVarFlag(e, "func", d):
|
||||
continue
|
||||
emit_var(e, o, d) and o.write('\n')
|
||||
|
||||
def update_data(d):
|
||||
"""Performs final steps upon the datastore, including application of overrides"""
|
||||
d.finalize()
|
||||
"""Modifies the environment vars according to local overrides and commands.
|
||||
Examples:
|
||||
Appending to a variable:
|
||||
>>> d = init()
|
||||
>>> setVar('TEST', 'this is a', d)
|
||||
>>> setVar('TEST_append', ' test', d)
|
||||
>>> setVar('TEST_append', ' of the emergency broadcast system.', d)
|
||||
>>> update_data(d)
|
||||
>>> print getVar('TEST', d)
|
||||
this is a test of the emergency broadcast system.
|
||||
|
||||
def build_dependencies(key, keys, shelldeps, d):
|
||||
deps = set()
|
||||
try:
|
||||
if d.getVarFlag(key, "func"):
|
||||
if d.getVarFlag(key, "python"):
|
||||
parsedvar = d.expandWithRefs(d.getVar(key, False), key)
|
||||
parser = bb.codeparser.PythonParser()
|
||||
parser.parse_python(parsedvar.value)
|
||||
deps = deps | parser.references
|
||||
else:
|
||||
parsedvar = d.expandWithRefs(d.getVar(key, False), key)
|
||||
parser = bb.codeparser.ShellParser()
|
||||
parser.parse_shell(parsedvar.value)
|
||||
deps = deps | shelldeps
|
||||
deps = deps | parsedvar.references
|
||||
deps = deps | (keys & parser.execs) | (keys & parsedvar.execs)
|
||||
else:
|
||||
parser = d.expandWithRefs(d.getVar(key, False), key)
|
||||
deps |= parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
deps |= set((d.getVarFlag(key, "vardeps", True) or "").split())
|
||||
deps -= set((d.getVarFlag(key, "vardepsexclude", True) or "").split())
|
||||
except:
|
||||
bb.note("Error expanding variable %s" % key)
|
||||
raise
|
||||
return deps
|
||||
#bb.note("Variable %s references %s and calls %s" % (key, str(deps), str(execs)))
|
||||
#d.setVarFlag(key, "vardeps", deps)
|
||||
Prepending to a variable:
|
||||
>>> setVar('TEST', 'virtual/libc', d)
|
||||
>>> setVar('TEST_prepend', 'virtual/tmake ', d)
|
||||
>>> setVar('TEST_prepend', 'virtual/patcher ', d)
|
||||
>>> update_data(d)
|
||||
>>> print getVar('TEST', d)
|
||||
virtual/patcher virtual/tmake virtual/libc
|
||||
|
||||
def generate_dependencies(d):
|
||||
Overrides:
|
||||
>>> setVar('TEST_arm', 'target', d)
|
||||
>>> setVar('TEST_ramses', 'machine', d)
|
||||
>>> setVar('TEST_local', 'local', d)
|
||||
>>> setVar('OVERRIDES', 'arm', d)
|
||||
|
||||
keys = set(key for key in d.keys() if not key.startswith("__"))
|
||||
shelldeps = set(key for key in keys if d.getVarFlag(key, "export") and not d.getVarFlag(key, "unexport"))
|
||||
>>> setVar('TEST', 'original', d)
|
||||
>>> update_data(d)
|
||||
>>> print getVar('TEST', d)
|
||||
target
|
||||
|
||||
deps = {}
|
||||
>>> setVar('OVERRIDES', 'arm:ramses:local', d)
|
||||
>>> setVar('TEST', 'original', d)
|
||||
>>> update_data(d)
|
||||
>>> print getVar('TEST', d)
|
||||
local
|
||||
|
||||
CopyMonster:
|
||||
>>> e = d.createCopy()
|
||||
>>> setVar('TEST_foo', 'foo', e)
|
||||
>>> update_data(e)
|
||||
>>> print getVar('TEST', e)
|
||||
local
|
||||
|
||||
>>> setVar('OVERRIDES', 'arm:ramses:local:foo', e)
|
||||
>>> update_data(e)
|
||||
>>> print getVar('TEST', e)
|
||||
foo
|
||||
|
||||
>>> f = d.createCopy()
|
||||
>>> setVar('TEST_moo', 'something', f)
|
||||
>>> setVar('OVERRIDES', 'moo:arm:ramses:local:foo', e)
|
||||
>>> update_data(e)
|
||||
>>> print getVar('TEST', e)
|
||||
foo
|
||||
|
||||
|
||||
>>> h = init()
|
||||
>>> setVar('SRC_URI', 'file://append.foo;patch=1 ', h)
|
||||
>>> g = h.createCopy()
|
||||
>>> setVar('SRC_URI_append_arm', 'file://other.foo;patch=1', g)
|
||||
>>> setVar('OVERRIDES', 'arm:moo', g)
|
||||
>>> update_data(g)
|
||||
>>> print getVar('SRC_URI', g)
|
||||
file://append.foo;patch=1 file://other.foo;patch=1
|
||||
|
||||
"""
|
||||
bb.msg.debug(2, bb.msg.domain.Data, "update_data()")
|
||||
|
||||
# now ask the cookie monster for help
|
||||
#print "Cookie Monster"
|
||||
#print "Append/Prepend %s" % d._special_values
|
||||
#print "Overrides %s" % d._seen_overrides
|
||||
|
||||
overrides = (getVar('OVERRIDES', d, 1) or "").split(':') or []
|
||||
|
||||
#
|
||||
# Well let us see what breaks here. We used to iterate
|
||||
# over each variable and apply the override and then
|
||||
# do the line expanding.
|
||||
# If we have bad luck - which we will have - the keys
|
||||
# where in some order that is so important for this
|
||||
# method which we don't have anymore.
|
||||
# Anyway we will fix that and write test cases this
|
||||
# time.
|
||||
|
||||
#
|
||||
# First we apply all overrides
|
||||
# Then we will handle _append and _prepend
|
||||
#
|
||||
|
||||
for o in overrides:
|
||||
# calculate '_'+override
|
||||
l = len(o)+1
|
||||
|
||||
# see if one should even try
|
||||
if not d._seen_overrides.has_key(o):
|
||||
continue
|
||||
|
||||
vars = d._seen_overrides[o]
|
||||
for var in vars:
|
||||
name = var[:-l]
|
||||
try:
|
||||
d[name] = d[var]
|
||||
except:
|
||||
bb.msg.note(1, bb.msg.domain.Data, "Untracked delVar")
|
||||
|
||||
# now on to the appends and prepends
|
||||
if d._special_values.has_key('_append'):
|
||||
appends = d._special_values['_append'] or []
|
||||
for append in appends:
|
||||
for (a, o) in getVarFlag(append, '_append', d) or []:
|
||||
# maybe the OVERRIDE was not yet added so keep the append
|
||||
if (o and o in overrides) or not o:
|
||||
delVarFlag(append, '_append', d)
|
||||
if o and not o in overrides:
|
||||
continue
|
||||
|
||||
sval = getVar(append,d) or ""
|
||||
sval+=a
|
||||
setVar(append, sval, d)
|
||||
|
||||
|
||||
if d._special_values.has_key('_prepend'):
|
||||
prepends = d._special_values['_prepend'] or []
|
||||
|
||||
for prepend in prepends:
|
||||
for (a, o) in getVarFlag(prepend, '_prepend', d) or []:
|
||||
# maybe the OVERRIDE was not yet added so keep the prepend
|
||||
if (o and o in overrides) or not o:
|
||||
delVarFlag(prepend, '_prepend', d)
|
||||
if o and not o in overrides:
|
||||
continue
|
||||
|
||||
sval = a + (getVar(prepend,d) or "")
|
||||
setVar(prepend, sval, d)
|
||||
|
||||
tasklist = bb.data.getVar('__BBTASKS', d) or []
|
||||
for task in tasklist:
|
||||
deps[task] = build_dependencies(task, keys, shelldeps, d)
|
||||
newdeps = deps[task]
|
||||
seen = set()
|
||||
while newdeps:
|
||||
nextdeps = newdeps
|
||||
seen |= nextdeps
|
||||
newdeps = set()
|
||||
for dep in nextdeps:
|
||||
if dep not in deps:
|
||||
deps[dep] = build_dependencies(dep, keys, shelldeps, d)
|
||||
newdeps |= deps[dep]
|
||||
newdeps -= seen
|
||||
#print "For %s: %s" % (task, str(taskdeps[task]))
|
||||
return tasklist, deps
|
||||
|
||||
def inherits_class(klass, d):
|
||||
val = getVar('__inherit_cache', d) or []
|
||||
if os.path.join('classes', '%s.bbclass' % klass) in val:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _test():
|
||||
"""Start a doctest run on this module"""
|
||||
import doctest
|
||||
from bb import data
|
||||
doctest.testmod(data)
|
||||
|
||||
if __name__ == "__main__":
|
||||
_test()
|
||||
|
||||
@@ -28,80 +28,21 @@ BitBake build tools.
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import copy, re
|
||||
from collections import MutableMapping
|
||||
import logging
|
||||
import bb, bb.codeparser
|
||||
from bb import utils
|
||||
from bb.COW import COWDictBase
|
||||
import copy, os, re, sys, time, types
|
||||
import bb
|
||||
from bb import utils, methodpool
|
||||
from COW import COWDictBase
|
||||
from sets import Set
|
||||
from new import classobj
|
||||
|
||||
logger = logging.getLogger("BitBake.Data")
|
||||
|
||||
__setvar_keyword__ = ["_append", "_prepend"]
|
||||
__setvar_keyword__ = ["_append","_prepend"]
|
||||
__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend)(_(?P<add>.*))?')
|
||||
__expand_var_regexp__ = re.compile(r"\${[^{}]+}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@.+?}")
|
||||
|
||||
|
||||
class VariableParse:
|
||||
def __init__(self, varname, d, val = None):
|
||||
self.varname = varname
|
||||
self.d = d
|
||||
self.value = val
|
||||
|
||||
self.references = set()
|
||||
self.execs = set()
|
||||
|
||||
def var_sub(self, match):
|
||||
key = match.group()[2:-1]
|
||||
if self.varname and key:
|
||||
if self.varname == key:
|
||||
raise Exception("variable %s references itself!" % self.varname)
|
||||
var = self.d.getVar(key, 1)
|
||||
if var is not None:
|
||||
self.references.add(key)
|
||||
return var
|
||||
else:
|
||||
return match.group()
|
||||
|
||||
def python_sub(self, match):
|
||||
code = match.group()[3:-1]
|
||||
codeobj = compile(code.strip(), self.varname or "<expansion>", "eval")
|
||||
|
||||
parser = bb.codeparser.PythonParser()
|
||||
parser.parse_python(code)
|
||||
self.references |= parser.references
|
||||
self.execs |= parser.execs
|
||||
|
||||
value = utils.better_eval(codeobj, DataContext(self.d))
|
||||
return str(value)
|
||||
|
||||
|
||||
class DataContext(dict):
|
||||
def __init__(self, metadata, **kwargs):
|
||||
self.metadata = metadata
|
||||
dict.__init__(self, **kwargs)
|
||||
self['d'] = metadata
|
||||
|
||||
def __missing__(self, key):
|
||||
value = self.metadata.getVar(key, True)
|
||||
if value is None or self.metadata.getVarFlag(key, 'func'):
|
||||
raise KeyError(key)
|
||||
else:
|
||||
return value
|
||||
|
||||
class ExpansionError(Exception):
|
||||
def __init__(self, varname, expression, exception):
|
||||
self.expression = expression
|
||||
self.variablename = varname
|
||||
self.exception = exception
|
||||
self.msg = "Failure expanding variable %s, expression was %s which triggered exception %s: %s" % (varname, expression, type(exception).__name__, exception)
|
||||
Exception.__init__(self, self.msg)
|
||||
self.args = (varname, expression, exception)
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
class DataSmart(MutableMapping):
|
||||
class DataSmart:
|
||||
def __init__(self, special = COWDictBase.copy(), seen = COWDictBase.copy() ):
|
||||
self.dict = {}
|
||||
|
||||
@@ -111,114 +52,68 @@ class DataSmart(MutableMapping):
|
||||
|
||||
self.expand_cache = {}
|
||||
|
||||
def expandWithRefs(self, s, varname):
|
||||
def expand(self,s, varname):
|
||||
def var_sub(match):
|
||||
key = match.group()[2:-1]
|
||||
if varname and key:
|
||||
if varname == key:
|
||||
raise Exception("variable %s references itself!" % varname)
|
||||
var = self.getVar(key, 1)
|
||||
if var is not None:
|
||||
return var
|
||||
else:
|
||||
return match.group()
|
||||
|
||||
if not isinstance(s, basestring): # sanity check
|
||||
return VariableParse(varname, self, s)
|
||||
def python_sub(match):
|
||||
import bb
|
||||
code = match.group()[3:-1]
|
||||
locals()['d'] = self
|
||||
s = eval(code)
|
||||
if type(s) == types.IntType: s = str(s)
|
||||
return s
|
||||
|
||||
if type(s) is not types.StringType: # sanity check
|
||||
return s
|
||||
|
||||
if varname and varname in self.expand_cache:
|
||||
return self.expand_cache[varname]
|
||||
|
||||
varparse = VariableParse(varname, self)
|
||||
|
||||
while s.find('${') != -1:
|
||||
olds = s
|
||||
try:
|
||||
s = __expand_var_regexp__.sub(varparse.var_sub, s)
|
||||
s = __expand_python_regexp__.sub(varparse.python_sub, s)
|
||||
if s == olds:
|
||||
break
|
||||
except ExpansionError:
|
||||
s = __expand_var_regexp__.sub(var_sub, s)
|
||||
s = __expand_python_regexp__.sub(python_sub, s)
|
||||
if s == olds: break
|
||||
if type(s) is not types.StringType: # sanity check
|
||||
bb.msg.error(bb.msg.domain.Data, 'expansion of %s returned non-string %s' % (olds, s))
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except:
|
||||
bb.msg.note(1, bb.msg.domain.Data, "%s:%s while evaluating:\n%s" % (sys.exc_info()[0], sys.exc_info()[1], s))
|
||||
raise
|
||||
except Exception as exc:
|
||||
raise ExpansionError(varname, s, exc)
|
||||
|
||||
varparse.value = s
|
||||
|
||||
if varname:
|
||||
self.expand_cache[varname] = varparse
|
||||
self.expand_cache[varname] = s
|
||||
|
||||
return varparse
|
||||
|
||||
def expand(self, s, varname):
|
||||
return self.expandWithRefs(s, varname).value
|
||||
|
||||
|
||||
def finalize(self):
|
||||
"""Performs final steps upon the datastore, including application of overrides"""
|
||||
|
||||
overrides = (self.getVar("OVERRIDES", True) or "").split(":") or []
|
||||
|
||||
#
|
||||
# Well let us see what breaks here. We used to iterate
|
||||
# over each variable and apply the override and then
|
||||
# do the line expanding.
|
||||
# If we have bad luck - which we will have - the keys
|
||||
# where in some order that is so important for this
|
||||
# method which we don't have anymore.
|
||||
# Anyway we will fix that and write test cases this
|
||||
# time.
|
||||
|
||||
#
|
||||
# First we apply all overrides
|
||||
# Then we will handle _append and _prepend
|
||||
#
|
||||
|
||||
for o in overrides:
|
||||
# calculate '_'+override
|
||||
l = len(o) + 1
|
||||
|
||||
# see if one should even try
|
||||
if o not in self._seen_overrides:
|
||||
continue
|
||||
|
||||
vars = self._seen_overrides[o]
|
||||
for var in vars:
|
||||
name = var[:-l]
|
||||
try:
|
||||
self.setVar(name, self.getVar(var, False))
|
||||
except Exception:
|
||||
logger.info("Untracked delVar")
|
||||
|
||||
# now on to the appends and prepends
|
||||
for op in __setvar_keyword__:
|
||||
if op in self._special_values:
|
||||
appends = self._special_values[op] or []
|
||||
for append in appends:
|
||||
keep = []
|
||||
for (a, o) in self.getVarFlag(append, op) or []:
|
||||
if o and not o in overrides:
|
||||
keep.append((a ,o))
|
||||
continue
|
||||
|
||||
if op is "_append":
|
||||
sval = self.getVar(append, False) or ""
|
||||
sval += a
|
||||
self.setVar(append, sval)
|
||||
elif op is "_prepend":
|
||||
sval = a + (self.getVar(append, False) or "")
|
||||
self.setVar(append, sval)
|
||||
|
||||
# We save overrides that may be applied at some later stage
|
||||
if keep:
|
||||
self.setVarFlag(append, op, keep)
|
||||
else:
|
||||
self.delVarFlag(append, op)
|
||||
return s
|
||||
|
||||
def initVar(self, var):
|
||||
self.expand_cache = {}
|
||||
if not var in self.dict:
|
||||
self.dict[var] = {}
|
||||
|
||||
def _findVar(self, var):
|
||||
dest = self.dict
|
||||
while dest:
|
||||
if var in dest:
|
||||
return dest[var]
|
||||
def _findVar(self,var):
|
||||
_dest = self.dict
|
||||
|
||||
if "_data" not in dest:
|
||||
while (_dest and var not in _dest):
|
||||
if not "_data" in _dest:
|
||||
_dest = None
|
||||
break
|
||||
dest = dest["_data"]
|
||||
_dest = _dest["_data"]
|
||||
|
||||
if _dest and var in _dest:
|
||||
return _dest[var]
|
||||
return None
|
||||
|
||||
def _makeShadowCopy(self, var):
|
||||
if var in self.dict:
|
||||
@@ -231,7 +126,7 @@ class DataSmart(MutableMapping):
|
||||
else:
|
||||
self.initVar(var)
|
||||
|
||||
def setVar(self, var, value):
|
||||
def setVar(self,var,value):
|
||||
self.expand_cache = {}
|
||||
match = __setvar_regexp__.match(var)
|
||||
if match and match.group("keyword") in __setvar_keyword__:
|
||||
@@ -246,77 +141,74 @@ class DataSmart(MutableMapping):
|
||||
# pay the cookie monster
|
||||
try:
|
||||
self._special_values[keyword].add( base )
|
||||
except KeyError:
|
||||
self._special_values[keyword] = set()
|
||||
except:
|
||||
self._special_values[keyword] = Set()
|
||||
self._special_values[keyword].add( base )
|
||||
|
||||
return
|
||||
|
||||
if not var in self.dict:
|
||||
self._makeShadowCopy(var)
|
||||
if self.getVarFlag(var, 'matchesenv'):
|
||||
self.delVarFlag(var, 'matchesenv')
|
||||
self.setVarFlag(var, 'export', 1)
|
||||
|
||||
# more cookies for the cookie monster
|
||||
if '_' in var:
|
||||
override = var[var.rfind('_')+1:]
|
||||
if override not in self._seen_overrides:
|
||||
self._seen_overrides[override] = set()
|
||||
if not self._seen_overrides.has_key(override):
|
||||
self._seen_overrides[override] = Set()
|
||||
self._seen_overrides[override].add( var )
|
||||
|
||||
# setting var
|
||||
self.dict[var]["content"] = value
|
||||
|
||||
def getVar(self, var, exp):
|
||||
value = self.getVarFlag(var, "content")
|
||||
def getVar(self,var,exp):
|
||||
value = self.getVarFlag(var,"content")
|
||||
|
||||
if exp and value:
|
||||
return self.expand(value, var)
|
||||
return self.expand(value,var)
|
||||
return value
|
||||
|
||||
def renameVar(self, key, newkey):
|
||||
"""
|
||||
Rename the variable key to newkey
|
||||
Rename the variable key to newkey
|
||||
"""
|
||||
val = self.getVar(key, 0)
|
||||
if val is not None:
|
||||
self.setVar(newkey, val)
|
||||
if val is None:
|
||||
return
|
||||
|
||||
self.setVar(newkey, val)
|
||||
|
||||
for i in ('_append', '_prepend'):
|
||||
src = self.getVarFlag(key, i)
|
||||
if src is None:
|
||||
continue
|
||||
|
||||
dest = self.getVarFlag(newkey, i) or []
|
||||
src = self.getVarFlag(key, i) or []
|
||||
dest.extend(src)
|
||||
self.setVarFlag(newkey, i, dest)
|
||||
|
||||
if i in self._special_values and key in self._special_values[i]:
|
||||
|
||||
if self._special_values.has_key(i) and key in self._special_values[i]:
|
||||
self._special_values[i].remove(key)
|
||||
self._special_values[i].add(newkey)
|
||||
|
||||
self.delVar(key)
|
||||
|
||||
def delVar(self, var):
|
||||
def delVar(self,var):
|
||||
self.expand_cache = {}
|
||||
self.dict[var] = {}
|
||||
|
||||
def setVarFlag(self, var, flag, flagvalue):
|
||||
def setVarFlag(self,var,flag,flagvalue):
|
||||
if not var in self.dict:
|
||||
self._makeShadowCopy(var)
|
||||
self.dict[var][flag] = flagvalue
|
||||
|
||||
def getVarFlag(self, var, flag, expand=False):
|
||||
def getVarFlag(self,var,flag):
|
||||
local_var = self._findVar(var)
|
||||
value = None
|
||||
if local_var:
|
||||
if flag in local_var:
|
||||
value = copy.copy(local_var[flag])
|
||||
elif flag == "content" and "defaultval" in local_var:
|
||||
value = copy.copy(local_var["defaultval"])
|
||||
if expand and value:
|
||||
value = self.expand(value, None)
|
||||
return value
|
||||
return copy.copy(local_var[flag])
|
||||
return None
|
||||
|
||||
def delVarFlag(self, var, flag):
|
||||
def delVarFlag(self,var,flag):
|
||||
local_var = self._findVar(var)
|
||||
if not local_var:
|
||||
return
|
||||
@@ -326,31 +218,31 @@ class DataSmart(MutableMapping):
|
||||
if var in self.dict and flag in self.dict[var]:
|
||||
del self.dict[var][flag]
|
||||
|
||||
def setVarFlags(self, var, flags):
|
||||
def setVarFlags(self,var,flags):
|
||||
if not var in self.dict:
|
||||
self._makeShadowCopy(var)
|
||||
|
||||
for i in flags:
|
||||
for i in flags.keys():
|
||||
if i == "content":
|
||||
continue
|
||||
self.dict[var][i] = flags[i]
|
||||
|
||||
def getVarFlags(self, var):
|
||||
def getVarFlags(self,var):
|
||||
local_var = self._findVar(var)
|
||||
flags = {}
|
||||
|
||||
if local_var:
|
||||
for i in local_var:
|
||||
for i in self.dict[var].keys():
|
||||
if i == "content":
|
||||
continue
|
||||
flags[i] = local_var[i]
|
||||
flags[i] = self.dict[var][i]
|
||||
|
||||
if len(flags) == 0:
|
||||
return None
|
||||
return flags
|
||||
|
||||
|
||||
def delVarFlags(self, var):
|
||||
def delVarFlags(self,var):
|
||||
if not var in self.dict:
|
||||
self._makeShadowCopy(var)
|
||||
|
||||
@@ -376,53 +268,25 @@ class DataSmart(MutableMapping):
|
||||
|
||||
return data
|
||||
|
||||
def expandVarref(self, variable, parents=False):
|
||||
"""Find all references to variable in the data and expand it
|
||||
in place, optionally descending to parent datastores."""
|
||||
|
||||
if parents:
|
||||
keys = iter(self)
|
||||
else:
|
||||
keys = self.localkeys()
|
||||
|
||||
ref = '${%s}' % variable
|
||||
value = self.getVar(variable, False)
|
||||
for key in keys:
|
||||
referrervalue = self.getVar(key, False)
|
||||
if referrervalue and ref in referrervalue:
|
||||
self.setVar(key, referrervalue.replace(ref, value))
|
||||
|
||||
def localkeys(self):
|
||||
for key in self.dict:
|
||||
if key != '_data':
|
||||
yield key
|
||||
|
||||
def __iter__(self):
|
||||
seen = set()
|
||||
def _keys(d):
|
||||
# Dictionary Methods
|
||||
def keys(self):
|
||||
def _keys(d, mykey):
|
||||
if "_data" in d:
|
||||
for key in _keys(d["_data"]):
|
||||
yield key
|
||||
_keys(d["_data"],mykey)
|
||||
|
||||
for key in d:
|
||||
for key in d.keys():
|
||||
if key != "_data":
|
||||
if not key in seen:
|
||||
seen.add(key)
|
||||
yield key
|
||||
return _keys(self.dict)
|
||||
mykey[key] = None
|
||||
keytab = {}
|
||||
_keys(self.dict,keytab)
|
||||
return keytab.keys()
|
||||
|
||||
def __len__(self):
|
||||
return len(frozenset(self))
|
||||
def __getitem__(self,item):
|
||||
#print "Warning deprecated"
|
||||
return self.getVar(item, False)
|
||||
|
||||
def __getitem__(self, item):
|
||||
value = self.getVar(item, False)
|
||||
if value is None:
|
||||
raise KeyError(item)
|
||||
else:
|
||||
return value
|
||||
def __setitem__(self,var,data):
|
||||
#print "Warning deprecated"
|
||||
self.setVar(var,data)
|
||||
|
||||
def __setitem__(self, var, value):
|
||||
self.setVar(var, value)
|
||||
|
||||
def __delitem__(self, var):
|
||||
self.delVar(var)
|
||||
|
||||
@@ -22,151 +22,100 @@ BitBake build tools.
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os, sys
|
||||
import warnings
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
import logging
|
||||
import atexit
|
||||
import os, re
|
||||
import bb.utils
|
||||
|
||||
# This is the pid for which we should generate the event. This is set when
|
||||
# the runqueue forks off.
|
||||
worker_pid = 0
|
||||
worker_pipe = None
|
||||
|
||||
class Event(object):
|
||||
class Event:
|
||||
"""Base class for events"""
|
||||
type = "Event"
|
||||
|
||||
def __init__(self):
|
||||
self.pid = worker_pid
|
||||
def __init__(self, d):
|
||||
self._data = d
|
||||
|
||||
def getData(self):
|
||||
return self._data
|
||||
|
||||
def setData(self, data):
|
||||
self._data = data
|
||||
|
||||
data = property(getData, setData, None, "data property")
|
||||
|
||||
NotHandled = 0
|
||||
Handled = 1
|
||||
Handled = 1
|
||||
|
||||
Registered = 10
|
||||
AlreadyRegistered = 14
|
||||
|
||||
# Internal
|
||||
_handlers = {}
|
||||
_ui_handlers = {}
|
||||
_ui_handler_seq = 0
|
||||
_handlers = []
|
||||
_handlers_dict = {}
|
||||
|
||||
# For compatibility
|
||||
bb.utils._context["NotHandled"] = NotHandled
|
||||
bb.utils._context["Handled"] = Handled
|
||||
def tmpHandler(event):
|
||||
"""Default handler for code events"""
|
||||
return NotHandled
|
||||
|
||||
def fire_class_handlers(event, d):
|
||||
if isinstance(event, logging.LogRecord):
|
||||
return
|
||||
def defaultTmpHandler():
|
||||
tmp = "def tmpHandler(e):\n\t\"\"\"heh\"\"\"\n\treturn NotHandled"
|
||||
comp = bb.utils.better_compile(tmp, "tmpHandler(e)", "bb.event.defaultTmpHandler")
|
||||
return comp
|
||||
|
||||
for handler in _handlers:
|
||||
h = _handlers[handler]
|
||||
event.data = d
|
||||
if type(h).__name__ == "code":
|
||||
locals = {"e": event}
|
||||
bb.utils.simple_exec(h, locals)
|
||||
ret = bb.utils.better_eval("tmpHandler(e)", locals)
|
||||
if ret is not None:
|
||||
warnings.warn("Using Handled/NotHandled in event handlers is deprecated",
|
||||
DeprecationWarning, stacklevel = 2)
|
||||
else:
|
||||
h(event)
|
||||
del event.data
|
||||
|
||||
ui_queue = []
|
||||
@atexit.register
|
||||
def print_ui_queue():
|
||||
"""If we're exiting before a UI has been spawned, display any queued
|
||||
LogRecords to the console."""
|
||||
logger = logging.getLogger("BitBake")
|
||||
if not _ui_handlers:
|
||||
from bb.msg import BBLogFormatter
|
||||
console = logging.StreamHandler(sys.stdout)
|
||||
console.setFormatter(BBLogFormatter("%(levelname)s: %(message)s"))
|
||||
logger.handlers = [console]
|
||||
while ui_queue:
|
||||
event = ui_queue.pop()
|
||||
if isinstance(event, logging.LogRecord):
|
||||
logger.handle(event)
|
||||
|
||||
def fire_ui_handlers(event, d):
|
||||
if not _ui_handlers:
|
||||
# No UI handlers registered yet, queue up the messages
|
||||
ui_queue.append(event)
|
||||
return
|
||||
|
||||
errors = []
|
||||
for h in _ui_handlers:
|
||||
#print "Sending event %s" % event
|
||||
try:
|
||||
# We use pickle here since it better handles object instances
|
||||
# which xmlrpc's marshaller does not. Events *must* be serializable
|
||||
# by pickle.
|
||||
_ui_handlers[h].event.send((pickle.dumps(event)))
|
||||
except:
|
||||
errors.append(h)
|
||||
for h in errors:
|
||||
del _ui_handlers[h]
|
||||
|
||||
def fire(event, d):
|
||||
def fire(event):
|
||||
"""Fire off an Event"""
|
||||
|
||||
# We can fire class handlers in the worker process context and this is
|
||||
# desired so they get the task based datastore.
|
||||
# UI handlers need to be fired in the server context so we defer this. They
|
||||
# don't have a datastore so the datastore context isn't a problem.
|
||||
|
||||
fire_class_handlers(event, d)
|
||||
if worker_pid != 0:
|
||||
worker_fire(event, d)
|
||||
else:
|
||||
fire_ui_handlers(event, d)
|
||||
|
||||
def worker_fire(event, d):
|
||||
data = "<event>" + pickle.dumps(event) + "</event>"
|
||||
worker_pipe.write(data)
|
||||
|
||||
def fire_from_worker(event, d):
|
||||
if not event.startswith("<event>") or not event.endswith("</event>"):
|
||||
print("Error, not an event %s" % event)
|
||||
return
|
||||
event = pickle.loads(event[7:-8])
|
||||
fire_ui_handlers(event, d)
|
||||
for h in _handlers:
|
||||
if type(h).__name__ == "code":
|
||||
exec(h)
|
||||
if tmpHandler(event) == Handled:
|
||||
return Handled
|
||||
else:
|
||||
if h(event) == Handled:
|
||||
return Handled
|
||||
return NotHandled
|
||||
|
||||
def register(name, handler):
|
||||
"""Register an Event handler"""
|
||||
|
||||
# already registered
|
||||
if name in _handlers:
|
||||
if name in _handlers_dict:
|
||||
return AlreadyRegistered
|
||||
|
||||
if handler is not None:
|
||||
# handle string containing python code
|
||||
if isinstance(handler, basestring):
|
||||
tmp = "def tmpHandler(e):\n%s" % handler
|
||||
comp = bb.utils.better_compile(tmp, "tmpHandler(e)", "bb.event._registerCode")
|
||||
_handlers[name] = comp
|
||||
# handle string containing python code
|
||||
if type(handler).__name__ == "str":
|
||||
_registerCode(handler)
|
||||
else:
|
||||
_handlers[name] = handler
|
||||
_handlers.append(handler)
|
||||
|
||||
_handlers_dict[name] = 1
|
||||
return Registered
|
||||
|
||||
def _registerCode(handlerStr):
|
||||
"""Register a 'code' Event.
|
||||
Deprecated interface; call register instead.
|
||||
|
||||
Expects to be passed python code as a string, which will
|
||||
be passed in turn to compile() and then exec(). Note that
|
||||
the code will be within a function, so should have had
|
||||
appropriate tabbing put in place."""
|
||||
tmp = "def tmpHandler(e):\n%s" % handlerStr
|
||||
comp = bb.utils.better_compile(tmp, "tmpHandler(e)", "bb.event._registerCode")
|
||||
# prevent duplicate registration
|
||||
_handlers.append(comp)
|
||||
|
||||
def remove(name, handler):
|
||||
"""Remove an Event handler"""
|
||||
_handlers.pop(name)
|
||||
|
||||
def register_UIHhandler(handler):
|
||||
bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1
|
||||
_ui_handlers[_ui_handler_seq] = handler
|
||||
return _ui_handler_seq
|
||||
_handlers_dict.pop(name)
|
||||
if type(handler).__name__ == "str":
|
||||
return _removeCode(handler)
|
||||
else:
|
||||
_handlers.remove(handler)
|
||||
|
||||
def unregister_UIHhandler(handlerNum):
|
||||
if handlerNum in _ui_handlers:
|
||||
del _ui_handlers[handlerNum]
|
||||
return
|
||||
def _removeCode(handlerStr):
|
||||
"""Remove a 'code' Event handler
|
||||
Deprecated interface; call remove instead."""
|
||||
tmp = "def tmpHandler(e):\n%s" % handlerStr
|
||||
comp = bb.utils.better_compile(tmp, "tmpHandler(e)", "bb.event._removeCode")
|
||||
_handlers.remove(comp)
|
||||
|
||||
def getName(e):
|
||||
"""Returns the name of a class or class instance"""
|
||||
@@ -178,37 +127,29 @@ def getName(e):
|
||||
class ConfigParsed(Event):
|
||||
"""Configuration Parsing Complete"""
|
||||
|
||||
class RecipeParsed(Event):
|
||||
""" Recipe Parsing Complete """
|
||||
class PkgBase(Event):
|
||||
"""Base class for package events"""
|
||||
|
||||
def __init__(self, fn):
|
||||
self.fn = fn
|
||||
Event.__init__(self)
|
||||
def __init__(self, t, d):
|
||||
self._pkg = t
|
||||
Event.__init__(self, d)
|
||||
|
||||
class StampUpdate(Event):
|
||||
"""Trigger for any adjustment of the stamp files to happen"""
|
||||
def getPkg(self):
|
||||
return self._pkg
|
||||
|
||||
def __init__(self, targets, stampfns):
|
||||
self._targets = targets
|
||||
self._stampfns = stampfns
|
||||
Event.__init__(self)
|
||||
def setPkg(self, pkg):
|
||||
self._pkg = pkg
|
||||
|
||||
def getStampPrefix(self):
|
||||
return self._stampfns
|
||||
pkg = property(getPkg, setPkg, None, "pkg property")
|
||||
|
||||
def getTargets(self):
|
||||
return self._targets
|
||||
|
||||
stampPrefix = property(getStampPrefix)
|
||||
targets = property(getTargets)
|
||||
|
||||
class BuildBase(Event):
|
||||
"""Base class for bbmake run events"""
|
||||
|
||||
def __init__(self, n, p, failures = 0):
|
||||
def __init__(self, n, p, c, failures = 0):
|
||||
self._name = n
|
||||
self._pkgs = p
|
||||
Event.__init__(self)
|
||||
Event.__init__(self, c)
|
||||
self._failures = failures
|
||||
|
||||
def getPkgs(self):
|
||||
@@ -240,7 +181,32 @@ class BuildBase(Event):
|
||||
cfg = property(getCfg, setCfg, None, "cfg property")
|
||||
|
||||
|
||||
class DepBase(PkgBase):
|
||||
"""Base class for dependency events"""
|
||||
|
||||
def __init__(self, t, data, d):
|
||||
self._dep = d
|
||||
PkgBase.__init__(self, t, data)
|
||||
|
||||
def getDep(self):
|
||||
return self._dep
|
||||
|
||||
def setDep(self, dep):
|
||||
self._dep = dep
|
||||
|
||||
dep = property(getDep, setDep, None, "dep property")
|
||||
|
||||
|
||||
class PkgStarted(PkgBase):
|
||||
"""Package build started"""
|
||||
|
||||
|
||||
class PkgFailed(PkgBase):
|
||||
"""Package build failed"""
|
||||
|
||||
|
||||
class PkgSucceeded(PkgBase):
|
||||
"""Package build completed"""
|
||||
|
||||
|
||||
class BuildStarted(BuildBase):
|
||||
@@ -251,16 +217,20 @@ class BuildCompleted(BuildBase):
|
||||
"""bbmake build run completed"""
|
||||
|
||||
|
||||
class UnsatisfiedDep(DepBase):
|
||||
"""Unsatisfied Dependency"""
|
||||
|
||||
|
||||
class RecursiveDep(DepBase):
|
||||
"""Recursive Dependency"""
|
||||
|
||||
class NoProvider(Event):
|
||||
"""No Provider for an Event"""
|
||||
|
||||
def __init__(self, item, runtime=False, dependees=None):
|
||||
Event.__init__(self)
|
||||
def __init__(self, item, data,runtime=False):
|
||||
Event.__init__(self, data)
|
||||
self._item = item
|
||||
self._runtime = runtime
|
||||
self._dependees = dependees
|
||||
|
||||
def getItem(self):
|
||||
return self._item
|
||||
@@ -271,8 +241,8 @@ class NoProvider(Event):
|
||||
class MultipleProviders(Event):
|
||||
"""Multiple Providers"""
|
||||
|
||||
def __init__(self, item, candidates, runtime = False):
|
||||
Event.__init__(self)
|
||||
def __init__(self, item, candidates, data, runtime = False):
|
||||
Event.__init__(self, data)
|
||||
self._item = item
|
||||
self._candidates = candidates
|
||||
self._is_runtime = runtime
|
||||
@@ -294,110 +264,3 @@ class MultipleProviders(Event):
|
||||
Get the possible Candidates for a PROVIDER.
|
||||
"""
|
||||
return self._candidates
|
||||
|
||||
class ParseStarted(Event):
|
||||
"""Recipe parsing for the runqueue has begun"""
|
||||
def __init__(self, total):
|
||||
Event.__init__(self)
|
||||
self.total = total
|
||||
|
||||
class ParseCompleted(Event):
|
||||
"""Recipe parsing for the runqueue has completed"""
|
||||
|
||||
def __init__(self, cached, parsed, skipped, masked, virtuals, errors, total):
|
||||
Event.__init__(self)
|
||||
self.cached = cached
|
||||
self.parsed = parsed
|
||||
self.skipped = skipped
|
||||
self.virtuals = virtuals
|
||||
self.masked = masked
|
||||
self.errors = errors
|
||||
self.sofar = cached + parsed
|
||||
self.total = total
|
||||
|
||||
class ParseProgress(Event):
|
||||
"""Recipe parsing progress"""
|
||||
|
||||
def __init__(self, current):
|
||||
self.current = current
|
||||
|
||||
class CacheLoadStarted(Event):
|
||||
"""Loading of the dependency cache has begun"""
|
||||
def __init__(self, total):
|
||||
Event.__init__(self)
|
||||
self.total = total
|
||||
|
||||
class CacheLoadProgress(Event):
|
||||
"""Cache loading progress"""
|
||||
def __init__(self, current):
|
||||
Event.__init__(self)
|
||||
self.current = current
|
||||
|
||||
class CacheLoadCompleted(Event):
|
||||
"""Cache loading is complete"""
|
||||
def __init__(self, total, num_entries):
|
||||
Event.__init__(self)
|
||||
self.total = total
|
||||
self.num_entries = num_entries
|
||||
|
||||
|
||||
class DepTreeGenerated(Event):
|
||||
"""
|
||||
Event when a dependency tree has been generated
|
||||
"""
|
||||
|
||||
def __init__(self, depgraph):
|
||||
Event.__init__(self)
|
||||
self._depgraph = depgraph
|
||||
|
||||
class TargetsTreeGenerated(Event):
|
||||
"""
|
||||
Event when a set of buildable targets has been generated
|
||||
"""
|
||||
def __init__(self, model):
|
||||
Event.__init__(self)
|
||||
self._model = model
|
||||
|
||||
class ConfigFilesFound(Event):
|
||||
"""
|
||||
Event when a list of appropriate config files has been generated
|
||||
"""
|
||||
def __init__(self, variable, values):
|
||||
Event.__init__(self)
|
||||
self._variable = variable
|
||||
self._values = values
|
||||
|
||||
class MsgBase(Event):
|
||||
"""Base class for messages"""
|
||||
|
||||
def __init__(self, msg):
|
||||
self._message = msg
|
||||
Event.__init__(self)
|
||||
|
||||
class MsgDebug(MsgBase):
|
||||
"""Debug Message"""
|
||||
|
||||
class MsgNote(MsgBase):
|
||||
"""Note Message"""
|
||||
|
||||
class MsgWarn(MsgBase):
|
||||
"""Warning Message"""
|
||||
|
||||
class MsgError(MsgBase):
|
||||
"""Error Message"""
|
||||
|
||||
class MsgFatal(MsgBase):
|
||||
"""Fatal Message"""
|
||||
|
||||
class MsgPlain(MsgBase):
|
||||
"""General output"""
|
||||
|
||||
class LogHandler(logging.Handler):
|
||||
"""Dispatch logging messages as bitbake events"""
|
||||
|
||||
def emit(self, record):
|
||||
fire(record, None)
|
||||
|
||||
def filter(self, record):
|
||||
record.taskpid = worker_pid
|
||||
return True
|
||||
|
||||
@@ -24,21 +24,15 @@ BitBake build tools.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import print_function
|
||||
import os, re
|
||||
import logging
|
||||
import os, re, fcntl
|
||||
import bb
|
||||
from bb import data
|
||||
from bb import persist_data
|
||||
from bb import utils
|
||||
|
||||
__version__ = "1"
|
||||
|
||||
logger = logging.getLogger("BitBake.Fetch")
|
||||
|
||||
class MalformedUrl(Exception):
|
||||
"""Exception raised when encountering an invalid url"""
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
|
||||
class FetchError(Exception):
|
||||
"""Exception raised when a download fails"""
|
||||
@@ -55,142 +49,57 @@ class ParameterError(Exception):
|
||||
class MD5SumError(Exception):
|
||||
"""Exception raised when a MD5SUM of a file does not match the expected one"""
|
||||
|
||||
class InvalidSRCREV(Exception):
|
||||
"""Exception raised when an invalid SRCREV is encountered"""
|
||||
|
||||
def decodeurl(url):
|
||||
"""Decodes an URL into the tokens (scheme, network location, path,
|
||||
user, password, parameters).
|
||||
"""
|
||||
|
||||
m = re.compile('(?P<type>[^:]*)://((?P<user>.+)@)?(?P<location>[^;]+)(;(?P<parm>.*))?').match(url)
|
||||
if not m:
|
||||
raise MalformedUrl(url)
|
||||
|
||||
type = m.group('type')
|
||||
location = m.group('location')
|
||||
if not location:
|
||||
raise MalformedUrl(url)
|
||||
user = m.group('user')
|
||||
parm = m.group('parm')
|
||||
|
||||
locidx = location.find('/')
|
||||
if locidx != -1 and type.lower() != 'file':
|
||||
host = location[:locidx]
|
||||
path = location[locidx:]
|
||||
else:
|
||||
host = ""
|
||||
path = location
|
||||
if user:
|
||||
m = re.compile('(?P<user>[^:]+)(:?(?P<pswd>.*))').match(user)
|
||||
if m:
|
||||
user = m.group('user')
|
||||
pswd = m.group('pswd')
|
||||
else:
|
||||
user = ''
|
||||
pswd = ''
|
||||
|
||||
p = {}
|
||||
if parm:
|
||||
for s in parm.split(';'):
|
||||
s1, s2 = s.split('=')
|
||||
p[s1] = s2
|
||||
|
||||
return (type, host, path, user, pswd, p)
|
||||
|
||||
def encodeurl(decoded):
|
||||
"""Encodes a URL from tokens (scheme, network location, path,
|
||||
user, password, parameters).
|
||||
"""
|
||||
|
||||
(type, host, path, user, pswd, p) = decoded
|
||||
|
||||
if not type or not path:
|
||||
raise MissingParameterError("Type or path url components missing when encoding %s" % decoded)
|
||||
url = '%s://' % type
|
||||
if user:
|
||||
url += "%s" % user
|
||||
if pswd:
|
||||
url += ":%s" % pswd
|
||||
url += "@"
|
||||
if host:
|
||||
url += "%s" % host
|
||||
url += "%s" % path
|
||||
if p:
|
||||
for parm in p:
|
||||
url += ";%s=%s" % (parm, p[parm])
|
||||
|
||||
return url
|
||||
|
||||
def uri_replace(uri, uri_find, uri_replace, d):
|
||||
# bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: operating on %s" % uri)
|
||||
if not uri or not uri_find or not uri_replace:
|
||||
logger.debug(1, "uri_replace: passed an undefined value, not replacing")
|
||||
uri_decoded = list(decodeurl(uri))
|
||||
uri_find_decoded = list(decodeurl(uri_find))
|
||||
uri_replace_decoded = list(decodeurl(uri_replace))
|
||||
result_decoded = ['', '', '', '', '', {}]
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "uri_replace: passed an undefined value, not replacing")
|
||||
uri_decoded = list(bb.decodeurl(uri))
|
||||
uri_find_decoded = list(bb.decodeurl(uri_find))
|
||||
uri_replace_decoded = list(bb.decodeurl(uri_replace))
|
||||
result_decoded = ['','','','','',{}]
|
||||
for i in uri_find_decoded:
|
||||
loc = uri_find_decoded.index(i)
|
||||
result_decoded[loc] = uri_decoded[loc]
|
||||
if isinstance(i, basestring):
|
||||
import types
|
||||
if type(i) == types.StringType:
|
||||
import re
|
||||
if (re.match(i, uri_decoded[loc])):
|
||||
result_decoded[loc] = re.sub(i, uri_replace_decoded[loc], uri_decoded[loc])
|
||||
if uri_find_decoded.index(i) == 2:
|
||||
if d:
|
||||
localfn = bb.fetch.localpath(uri, d)
|
||||
if localfn:
|
||||
result_decoded[loc] = os.path.join(os.path.dirname(result_decoded[loc]), os.path.basename(bb.fetch.localpath(uri, d)))
|
||||
result_decoded[loc] = os.path.dirname(result_decoded[loc]) + "/" + os.path.basename(bb.fetch.localpath(uri, d))
|
||||
# bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: matching %s against %s and replacing with %s" % (i, uri_decoded[loc], uri_replace_decoded[loc]))
|
||||
else:
|
||||
# bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: no match")
|
||||
return uri
|
||||
return encodeurl(result_decoded)
|
||||
# else:
|
||||
# for j in i.keys():
|
||||
# FIXME: apply replacements against options
|
||||
return bb.encodeurl(result_decoded)
|
||||
|
||||
methods = []
|
||||
urldata_cache = {}
|
||||
saved_headrevs = {}
|
||||
|
||||
def fetcher_init(d):
|
||||
"""
|
||||
Called to initialize the fetchers once the configuration data is known.
|
||||
Called to initilize the fetchers once the configuration data is known
|
||||
Calls before this must not hit the cache.
|
||||
"""
|
||||
pd = persist_data.persist(d)
|
||||
# When to drop SCM head revisions controlled by user policy
|
||||
pd = persist_data.PersistData(d)
|
||||
# When to drop SCM head revisions controled by user policy
|
||||
srcrev_policy = bb.data.getVar('BB_SRCREV_POLICY', d, 1) or "clear"
|
||||
if srcrev_policy == "cache":
|
||||
logger.debug(1, "Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Keeping SRCREV cache due to cache policy of: %s" % srcrev_policy)
|
||||
elif srcrev_policy == "clear":
|
||||
logger.debug(1, "Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
try:
|
||||
bb.fetch.saved_headrevs = pd['BB_URI_HEADREVS'].items()
|
||||
except:
|
||||
pass
|
||||
del pd['BB_URI_HEADREVS']
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Clearing SRCREV cache due to cache policy of: %s" % srcrev_policy)
|
||||
pd.delDomain("BB_URI_HEADREVS")
|
||||
else:
|
||||
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
|
||||
|
||||
for m in methods:
|
||||
if hasattr(m, "init"):
|
||||
m.init(d)
|
||||
|
||||
def fetcher_compare_revisions(d):
|
||||
"""
|
||||
Compare the revisions in the persistant cache with current values and
|
||||
return true/false on whether they've changed.
|
||||
"""
|
||||
|
||||
pd = persist_data.persist(d)
|
||||
data = pd['BB_URI_HEADREVS'].items()
|
||||
data2 = bb.fetch.saved_headrevs
|
||||
|
||||
changed = False
|
||||
for key in data:
|
||||
if key not in data2 or data2[key] != data[key]:
|
||||
logger.debug(1, "%s changed", key)
|
||||
changed = True
|
||||
return True
|
||||
else:
|
||||
logger.debug(2, "%s did not change", key)
|
||||
return False
|
||||
bb.msg.fatal(bb.msg.domain.Fetcher, "Invalid SRCREV cache policy of: %s" % srcrev_policy)
|
||||
# Make sure our domains exist
|
||||
pd.addDomain("BB_URI_HEADREVS")
|
||||
pd.addDomain("BB_URI_LOCALCOUNT")
|
||||
|
||||
# Function call order is usually:
|
||||
# 1. init
|
||||
@@ -200,7 +109,6 @@ def fetcher_compare_revisions(d):
|
||||
|
||||
def init(urls, d, setup = True):
|
||||
urldata = {}
|
||||
|
||||
fn = bb.data.getVar('FILE', d, 1)
|
||||
if fn in urldata_cache:
|
||||
urldata = urldata_cache[fn]
|
||||
@@ -212,135 +120,39 @@ def init(urls, d, setup = True):
|
||||
if setup:
|
||||
for url in urldata:
|
||||
if not urldata[url].setup:
|
||||
urldata[url].setup_localpath(d)
|
||||
urldata[url].setup_localpath(d)
|
||||
|
||||
urldata_cache[fn] = urldata
|
||||
return urldata
|
||||
|
||||
def mirror_from_string(data):
|
||||
return [ i.split() for i in (data or "").replace('\\n','\n').split('\n') if i ]
|
||||
|
||||
def verify_checksum(u, ud, d):
|
||||
"""
|
||||
verify the MD5 and SHA256 checksum for downloaded src
|
||||
|
||||
return value:
|
||||
- True: checksum matched
|
||||
- False: checksum unmatched
|
||||
|
||||
if checksum is missing in recipes file, "BB_STRICT_CHECKSUM" decide the return value.
|
||||
if BB_STRICT_CHECKSUM = "1" then return false as unmatched, otherwise return true as
|
||||
matched
|
||||
"""
|
||||
|
||||
if not ud.type in ["http", "https", "ftp", "ftps"]:
|
||||
return
|
||||
|
||||
md5data = bb.utils.md5_file(ud.localpath)
|
||||
sha256data = bb.utils.sha256_file(ud.localpath)
|
||||
|
||||
if (ud.md5_expected == None or ud.sha256_expected == None):
|
||||
logger.warn('Missing SRC_URI checksum for %s, consider adding to the recipe:\n'
|
||||
'SRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"',
|
||||
ud.localpath, ud.md5_name, md5data,
|
||||
ud.sha256_name, sha256data)
|
||||
if bb.data.getVar("BB_STRICT_CHECKSUM", d, True) == "1":
|
||||
raise FetchError("No checksum specified for %s." % u)
|
||||
return
|
||||
|
||||
if (ud.md5_expected != md5data or ud.sha256_expected != sha256data):
|
||||
logger.error('The checksums for "%s" did not match.\n'
|
||||
' MD5: expected "%s", got "%s"\n'
|
||||
' SHA256: expected "%s", got "%s"\n',
|
||||
ud.localpath, ud.md5_expected, md5data,
|
||||
ud.sha256_expected, sha256data)
|
||||
raise FetchError("%s checksum mismatch." % u)
|
||||
|
||||
def go(d, urls = None):
|
||||
def go(d):
|
||||
"""
|
||||
Fetch all urls
|
||||
init must have previously been called
|
||||
"""
|
||||
if not urls:
|
||||
urls = d.getVar("SRC_URI", 1).split()
|
||||
urldata = init(urls, d, True)
|
||||
|
||||
for u in urls:
|
||||
ud = urldata[u]
|
||||
m = ud.method
|
||||
localpath = ""
|
||||
|
||||
if not ud.localfile:
|
||||
continue
|
||||
|
||||
lf = bb.utils.lockfile(ud.lockfile)
|
||||
|
||||
if m.try_premirror(u, ud, d):
|
||||
# First try fetching uri, u, from PREMIRRORS
|
||||
mirrors = mirror_from_string(bb.data.getVar('PREMIRRORS', d, True))
|
||||
localpath = try_mirrors(d, u, mirrors, False, m.forcefetch(u, ud, d))
|
||||
elif os.path.exists(ud.localfile):
|
||||
localpath = ud.localfile
|
||||
|
||||
# Need to re-test forcefetch() which will return true if our copy is too old
|
||||
if m.forcefetch(u, ud, d) or not localpath:
|
||||
# Next try fetching from the original uri, u
|
||||
try:
|
||||
m.go(u, ud, d)
|
||||
localpath = ud.localpath
|
||||
except FetchError:
|
||||
# Remove any incomplete file
|
||||
bb.utils.remove(ud.localpath)
|
||||
# Finally, try fetching uri, u, from MIRRORS
|
||||
mirrors = mirror_from_string(bb.data.getVar('MIRRORS', d, True))
|
||||
localpath = try_mirrors (d, u, mirrors)
|
||||
if not localpath or not os.path.exists(localpath):
|
||||
raise FetchError("Unable to fetch URL %s from any source." % u)
|
||||
|
||||
ud.localpath = localpath
|
||||
|
||||
if os.path.exists(ud.md5):
|
||||
# Touch the md5 file to show active use of the download
|
||||
try:
|
||||
os.utime(ud.md5, None)
|
||||
except:
|
||||
# Errors aren't fatal here
|
||||
pass
|
||||
else:
|
||||
# Only check the checksums if we've not seen this item before
|
||||
verify_checksum(u, ud, d)
|
||||
Fetch.write_md5sum(u, ud, d)
|
||||
|
||||
bb.utils.unlockfile(lf)
|
||||
|
||||
def checkstatus(d, urls = None):
|
||||
"""
|
||||
Check all urls exist upstream
|
||||
init must have previously been called
|
||||
"""
|
||||
urldata = init([], d, True)
|
||||
|
||||
if not urls:
|
||||
urls = urldata
|
||||
|
||||
for u in urls:
|
||||
for u in urldata:
|
||||
ud = urldata[u]
|
||||
m = ud.method
|
||||
logger.debug(1, "Testing URL %s", u)
|
||||
# First try checking uri, u, from PREMIRRORS
|
||||
mirrors = mirror_from_string(bb.data.getVar('PREMIRRORS', d, True))
|
||||
ret = try_mirrors(d, u, mirrors, True)
|
||||
if not ret:
|
||||
# Next try checking from the original uri, u
|
||||
try:
|
||||
ret = m.checkstatus(u, ud, d)
|
||||
except:
|
||||
# Finally, try checking uri, u, from MIRRORS
|
||||
mirrors = mirror_from_string(bb.data.getVar('MIRRORS', d, True))
|
||||
ret = try_mirrors (d, u, mirrors, True)
|
||||
|
||||
if not ret:
|
||||
raise FetchError("URL %s doesn't work" % u)
|
||||
if ud.localfile:
|
||||
if not m.forcefetch(u, ud, d) and os.path.exists(ud.md5):
|
||||
# File already present along with md5 stamp file
|
||||
# Touch md5 file to show activity
|
||||
os.utime(ud.md5, None)
|
||||
continue
|
||||
lf = bb.utils.lockfile(ud.lockfile)
|
||||
if not m.forcefetch(u, ud, d) and os.path.exists(ud.md5):
|
||||
# If someone else fetched this before we got the lock,
|
||||
# notice and don't try again
|
||||
os.utime(ud.md5, None)
|
||||
bb.utils.unlockfile(lf)
|
||||
continue
|
||||
m.go(u, ud, d)
|
||||
if ud.localfile:
|
||||
if not m.forcefetch(u, ud, d):
|
||||
Fetch.write_md5sum(u, ud, d)
|
||||
bb.utils.unlockfile(lf)
|
||||
|
||||
def localpaths(d):
|
||||
"""
|
||||
@@ -350,30 +162,27 @@ def localpaths(d):
|
||||
urldata = init([], d, True)
|
||||
|
||||
for u in urldata:
|
||||
ud = urldata[u]
|
||||
ud = urldata[u]
|
||||
local.append(ud.localpath)
|
||||
|
||||
return local
|
||||
|
||||
srcrev_internal_call = False
|
||||
|
||||
def get_autorev(d):
|
||||
return get_srcrev(d)
|
||||
|
||||
def get_srcrev(d):
|
||||
"""
|
||||
Return the version string for the current package
|
||||
(usually to be used as PV)
|
||||
Most packages usually only have one SCM so we just pass on the call.
|
||||
In the multi SCM case, we build a value based on SRCREV_FORMAT which must
|
||||
In the multi SCM case, we build a value based on SRCREV_FORMAT which must
|
||||
have been set.
|
||||
"""
|
||||
|
||||
#
|
||||
# Ugly code alert. localpath in the fetchers will try to evaluate SRCREV which
|
||||
# Ugly code alert. localpath in the fetchers will try to evaluate SRCREV which
|
||||
# could translate into a call to here. If it does, we need to catch this
|
||||
# and provide some way so it knows get_srcrev is active instead of being
|
||||
# some number etc. hence the srcrev_internal_call tracking and the magic
|
||||
# some number etc. hence the srcrev_internal_call tracking and the magic
|
||||
# "SRCREVINACTION" return value.
|
||||
#
|
||||
# Neater solutions welcome!
|
||||
@@ -383,22 +192,19 @@ def get_srcrev(d):
|
||||
|
||||
scms = []
|
||||
|
||||
# Only call setup_localpath on URIs which supports_srcrev()
|
||||
# Only call setup_localpath on URIs which suppports_srcrev()
|
||||
urldata = init(bb.data.getVar('SRC_URI', d, 1).split(), d, False)
|
||||
for u in urldata:
|
||||
ud = urldata[u]
|
||||
if ud.method.supports_srcrev():
|
||||
if ud.method.suppports_srcrev():
|
||||
if not ud.setup:
|
||||
ud.setup_localpath(d)
|
||||
scms.append(u)
|
||||
|
||||
if len(scms) == 0:
|
||||
logger.error("SRCREV was used yet no valid SCM was found in SRC_URI")
|
||||
bb.msg.error(bb.msg.domain.Fetcher, "SRCREV was used yet no valid SCM was found in SRC_URI")
|
||||
raise ParameterError
|
||||
|
||||
if bb.data.getVar('BB_SRCREV_POLICY', d, True) != "cache":
|
||||
bb.data.setVar('__BB_DONT_CACHE', '1', d)
|
||||
|
||||
if len(scms) == 1:
|
||||
return urldata[scms[0]].method.sortable_revision(scms[0], urldata[scms[0]], d)
|
||||
|
||||
@@ -407,7 +213,7 @@ def get_srcrev(d):
|
||||
#
|
||||
format = bb.data.getVar('SRCREV_FORMAT', d, 1)
|
||||
if not format:
|
||||
logger.error("The SRCREV_FORMAT variable must be set when multiple SCMs are used.")
|
||||
bb.msg.error(bb.msg.domain.Fetcher, "The SRCREV_FORMAT variable must be set when multiple SCMs are used.")
|
||||
raise ParameterError
|
||||
|
||||
for scm in scms:
|
||||
@@ -420,7 +226,7 @@ def get_srcrev(d):
|
||||
|
||||
def localpath(url, d, cache = True):
|
||||
"""
|
||||
Called from the parser with cache=False since the cache isn't ready
|
||||
Called from the parser with cache=False since the cache isn't ready
|
||||
at this point. Also called from classed in OE e.g. patch.bbclass
|
||||
"""
|
||||
ud = init([url], d)
|
||||
@@ -434,115 +240,44 @@ def runfetchcmd(cmd, d, quiet = False):
|
||||
Raise an error if interrupted or cmd fails
|
||||
Optionally echo command output to stdout
|
||||
"""
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % cmd)
|
||||
|
||||
# Need to export PATH as binary could be in metadata paths
|
||||
# rather than host provided
|
||||
# Also include some other variables.
|
||||
# FIXME: Should really include all export varaiables?
|
||||
exportvars = ['PATH', 'GIT_PROXY_COMMAND', 'GIT_PROXY_HOST',
|
||||
'GIT_PROXY_PORT', 'GIT_CONFIG', 'http_proxy', 'ftp_proxy',
|
||||
'https_proxy', 'no_proxy', 'ALL_PROXY', 'all_proxy',
|
||||
'KRB5CCNAME', 'SSH_AUTH_SOCK', 'SSH_AGENT_PID', 'HOME']
|
||||
pathcmd = 'export PATH=%s; %s' % (data.expand('${PATH}', d), cmd)
|
||||
|
||||
for var in exportvars:
|
||||
val = data.getVar(var, d, True)
|
||||
if val:
|
||||
cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd)
|
||||
|
||||
logger.debug(1, "Running %s", cmd)
|
||||
|
||||
# redirect stderr to stdout
|
||||
stdout_handle = os.popen(cmd + " 2>&1", "r")
|
||||
stdout_handle = os.popen(pathcmd, "r")
|
||||
output = ""
|
||||
|
||||
while True:
|
||||
while 1:
|
||||
line = stdout_handle.readline()
|
||||
if not line:
|
||||
break
|
||||
if not quiet:
|
||||
print(line, end=' ')
|
||||
print line,
|
||||
output += line
|
||||
|
||||
status = stdout_handle.close() or 0
|
||||
status = stdout_handle.close() or 0
|
||||
signal = status >> 8
|
||||
exitstatus = status & 0xff
|
||||
|
||||
if signal:
|
||||
raise FetchError("Fetch command %s failed with signal %s, output:\n%s" % (cmd, signal, output))
|
||||
raise FetchError("Fetch command %s failed with signal %s, output:\n%s" % (pathcmd, signal, output))
|
||||
elif status != 0:
|
||||
raise FetchError("Fetch command %s failed with exit code %s, output:\n%s" % (cmd, status, output))
|
||||
raise FetchError("Fetch command %s failed with exit code %s, output:\n%s" % (pathcmd, status, output))
|
||||
|
||||
return output
|
||||
|
||||
def try_mirrors(d, uri, mirrors, check = False, force = False):
|
||||
"""
|
||||
Try to use a mirrored version of the sources.
|
||||
This method will be automatically called before the fetchers go.
|
||||
|
||||
d Is a bb.data instance
|
||||
uri is the original uri we're trying to download
|
||||
mirrors is the list of mirrors we're going to try
|
||||
"""
|
||||
fpath = os.path.join(data.getVar("DL_DIR", d, 1), os.path.basename(uri))
|
||||
if not check and os.access(fpath, os.R_OK) and not force:
|
||||
logger.debug(1, "%s already exists, skipping checkout.", fpath)
|
||||
return fpath
|
||||
|
||||
ld = d.createCopy()
|
||||
for (find, replace) in mirrors:
|
||||
newuri = uri_replace(uri, find, replace, ld)
|
||||
if newuri != uri:
|
||||
try:
|
||||
ud = FetchData(newuri, ld)
|
||||
except bb.fetch.NoMethodError:
|
||||
logger.debug(1, "No method for %s", uri)
|
||||
continue
|
||||
|
||||
ud.setup_localpath(ld)
|
||||
|
||||
try:
|
||||
if check:
|
||||
found = ud.method.checkstatus(newuri, ud, ld)
|
||||
if found:
|
||||
return found
|
||||
else:
|
||||
ud.method.go(newuri, ud, ld)
|
||||
return ud.localpath
|
||||
except (bb.fetch.MissingParameterError,
|
||||
bb.fetch.FetchError,
|
||||
bb.fetch.MD5SumError):
|
||||
import sys
|
||||
(type, value, traceback) = sys.exc_info()
|
||||
logger.debug(2, "Mirror fetch failure: %s", value)
|
||||
bb.utils.remove(ud.localpath)
|
||||
continue
|
||||
return None
|
||||
|
||||
|
||||
class FetchData(object):
|
||||
"""
|
||||
A class which represents the fetcher state for a given URI.
|
||||
"""
|
||||
def __init__(self, url, d):
|
||||
self.localfile = ""
|
||||
(self.type, self.host, self.path, self.user, self.pswd, self.parm) = decodeurl(data.expand(url, d))
|
||||
(self.type, self.host, self.path, self.user, self.pswd, self.parm) = bb.decodeurl(data.expand(url, d))
|
||||
self.date = Fetch.getSRCDate(self, d)
|
||||
self.url = url
|
||||
if not self.user and "user" in self.parm:
|
||||
self.user = self.parm["user"]
|
||||
if not self.pswd and "pswd" in self.parm:
|
||||
self.pswd = self.parm["pswd"]
|
||||
self.setup = False
|
||||
|
||||
if "name" in self.parm:
|
||||
self.md5_name = "%s.md5sum" % self.parm["name"]
|
||||
self.sha256_name = "%s.sha256sum" % self.parm["name"]
|
||||
else:
|
||||
self.md5_name = "md5sum"
|
||||
self.sha256_name = "sha256sum"
|
||||
self.md5_expected = bb.data.getVarFlag("SRC_URI", self.md5_name, d)
|
||||
self.sha256_expected = bb.data.getVarFlag("SRC_URI", self.sha256_name, d)
|
||||
|
||||
for m in methods:
|
||||
if m.supports(url, self, d):
|
||||
self.method = m
|
||||
@@ -554,36 +289,15 @@ class FetchData(object):
|
||||
if "localpath" in self.parm:
|
||||
# if user sets localpath for file, use it instead.
|
||||
self.localpath = self.parm["localpath"]
|
||||
self.basename = os.path.basename(self.localpath)
|
||||
else:
|
||||
premirrors = bb.data.getVar('PREMIRRORS', d, True)
|
||||
local = ""
|
||||
if premirrors and self.url:
|
||||
aurl = self.url.split(";")[0]
|
||||
mirrors = mirror_from_string(premirrors)
|
||||
for (find, replace) in mirrors:
|
||||
if replace.startswith("file://"):
|
||||
path = aurl.split("://")[1]
|
||||
path = path.split(";")[0]
|
||||
local = replace.split("://")[1] + os.path.basename(path)
|
||||
if local == aurl or not os.path.exists(local) or os.path.isdir(local):
|
||||
local = ""
|
||||
self.localpath = local
|
||||
if not local:
|
||||
try:
|
||||
bb.fetch.srcrev_internal_call = True
|
||||
self.localpath = self.method.localpath(self.url, self, d)
|
||||
finally:
|
||||
bb.fetch.srcrev_internal_call = False
|
||||
# We have to clear data's internal caches since the cached value of SRCREV is now wrong.
|
||||
# Horrible...
|
||||
bb.data.delVar("ISHOULDNEVEREXIST", d)
|
||||
|
||||
if self.localpath is not None:
|
||||
# Note: These files should always be in DL_DIR whereas localpath may not be.
|
||||
basepath = bb.data.expand("${DL_DIR}/%s" % os.path.basename(self.localpath), d)
|
||||
self.md5 = basepath + '.md5'
|
||||
self.lockfile = basepath + '.lock'
|
||||
bb.fetch.srcrev_internal_call = True
|
||||
self.localpath = self.method.localpath(self.url, self, d)
|
||||
bb.fetch.srcrev_internal_call = False
|
||||
# We have to clear data's internal caches since the cached value of SRCREV is now wrong.
|
||||
# Horrible...
|
||||
bb.data.delVar("ISHOULDNEVEREXIST", d)
|
||||
self.md5 = self.localpath + '.md5'
|
||||
self.lockfile = self.localpath + '.lock'
|
||||
|
||||
|
||||
class Fetch(object):
|
||||
@@ -601,17 +315,10 @@ class Fetch(object):
|
||||
def localpath(self, url, urldata, d):
|
||||
"""
|
||||
Return the local filename of a given url assuming a successful fetch.
|
||||
Can also setup variables in urldata for use in go (saving code duplication
|
||||
Can also setup variables in urldata for use in go (saving code duplication
|
||||
and duplicate code execution)
|
||||
"""
|
||||
return url
|
||||
def _strip_leading_slashes(self, relpath):
|
||||
"""
|
||||
Remove leading slash as os.path.join can't cope
|
||||
"""
|
||||
while os.path.isabs(relpath):
|
||||
relpath = relpath[1:]
|
||||
return relpath
|
||||
|
||||
def setUrls(self, urls):
|
||||
self.__urls = urls
|
||||
@@ -627,7 +334,7 @@ class Fetch(object):
|
||||
"""
|
||||
return False
|
||||
|
||||
def supports_srcrev(self):
|
||||
def suppports_srcrev(self):
|
||||
"""
|
||||
The fetcher supports auto source revisions (SRCREV)
|
||||
"""
|
||||
@@ -640,25 +347,6 @@ class Fetch(object):
|
||||
"""
|
||||
raise NoMethodError("Missing implementation for url")
|
||||
|
||||
def try_premirror(self, url, urldata, d):
|
||||
"""
|
||||
Should premirrors be used?
|
||||
"""
|
||||
if urldata.method.forcefetch(url, urldata, d):
|
||||
return True
|
||||
elif os.path.exists(urldata.md5) and os.path.exists(urldata.localfile):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def checkstatus(self, url, urldata, d):
|
||||
"""
|
||||
Check the status of a URL
|
||||
Assumes localpath was called first
|
||||
"""
|
||||
logger.info("URL %s could not be checked for status since no method exists.", url)
|
||||
return True
|
||||
|
||||
def getSRCDate(urldata, d):
|
||||
"""
|
||||
Return the SRC Date for the component
|
||||
@@ -680,8 +368,8 @@ class Fetch(object):
|
||||
"""
|
||||
Return:
|
||||
a) a source revision if specified
|
||||
b) True if auto srcrev is in action
|
||||
c) False otherwise
|
||||
b) True if auto srcrev is in action
|
||||
c) False otherwise
|
||||
"""
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
@@ -693,45 +381,56 @@ class Fetch(object):
|
||||
rev = None
|
||||
if 'name' in ud.parm:
|
||||
pn = data.getVar("PN", d, 1)
|
||||
rev = data.getVar("SRCREV_%s_pn-%s" % (ud.parm['name'], pn), d, 1)
|
||||
if not rev:
|
||||
rev = data.getVar("SRCREV_pn-%s_%s" % (pn, ud.parm['name']), d, 1)
|
||||
if not rev:
|
||||
rev = data.getVar("SRCREV_%s" % (ud.parm['name']), d, 1)
|
||||
rev = data.getVar("SRCREV_pn-" + pn + "_" + ud.parm['name'], d, 1)
|
||||
if not rev:
|
||||
rev = data.getVar("SRCREV", d, 1)
|
||||
if rev == "INVALID":
|
||||
raise InvalidSRCREV("Please set SRCREV to a valid value")
|
||||
if not rev:
|
||||
return False
|
||||
if rev == "SRCREVINACTION":
|
||||
if rev is "SRCREVINACTION":
|
||||
return True
|
||||
return rev
|
||||
|
||||
srcrev_internal_helper = staticmethod(srcrev_internal_helper)
|
||||
|
||||
def localcount_internal_helper(ud, d):
|
||||
def try_mirror(d, tarfn):
|
||||
"""
|
||||
Return:
|
||||
a) a locked localcount if specified
|
||||
b) None otherwise
|
||||
Try to use a mirrored version of the sources. We do this
|
||||
to avoid massive loads on foreign cvs and svn servers.
|
||||
This method will be used by the different fetcher
|
||||
implementations.
|
||||
|
||||
d Is a bb.data instance
|
||||
tarfn is the name of the tarball
|
||||
"""
|
||||
tarpath = os.path.join(data.getVar("DL_DIR", d, 1), tarfn)
|
||||
if os.access(tarpath, os.R_OK):
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists, skipping checkout." % tarfn)
|
||||
return True
|
||||
|
||||
localcount = None
|
||||
if 'name' in ud.parm:
|
||||
pn = data.getVar("PN", d, 1)
|
||||
localcount = data.getVar("LOCALCOUNT_" + ud.parm['name'], d, 1)
|
||||
if not localcount:
|
||||
localcount = data.getVar("LOCALCOUNT", d, 1)
|
||||
return localcount
|
||||
pn = data.getVar('PN', d, True)
|
||||
src_tarball_stash = None
|
||||
if pn:
|
||||
src_tarball_stash = (data.getVar('SRC_TARBALL_STASH_%s' % pn, d, True) or data.getVar('CVS_TARBALL_STASH_%s' % pn, d, True) or data.getVar('SRC_TARBALL_STASH', d, True) or data.getVar('CVS_TARBALL_STASH', d, True) or "").split()
|
||||
|
||||
localcount_internal_helper = staticmethod(localcount_internal_helper)
|
||||
for stash in src_tarball_stash:
|
||||
fetchcmd = data.getVar("FETCHCOMMAND_mirror", d, True) or data.getVar("FETCHCOMMAND_wget", d, True)
|
||||
uri = stash + tarfn
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "fetch " + uri)
|
||||
fetchcmd = fetchcmd.replace("${URI}", uri)
|
||||
ret = os.system(fetchcmd)
|
||||
if ret == 0:
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Fetched %s from tarball stash, skipping checkout" % tarfn)
|
||||
return True
|
||||
return False
|
||||
try_mirror = staticmethod(try_mirror)
|
||||
|
||||
def verify_md5sum(ud, got_sum):
|
||||
"""
|
||||
Verify the md5sum we wanted with the one we got
|
||||
"""
|
||||
wanted_sum = ud.parm.get('md5sum')
|
||||
wanted_sum = None
|
||||
if 'md5sum' in ud.parm:
|
||||
wanted_sum = ud.parm['md5sum']
|
||||
if not wanted_sum:
|
||||
return True
|
||||
|
||||
@@ -739,7 +438,14 @@ class Fetch(object):
|
||||
verify_md5sum = staticmethod(verify_md5sum)
|
||||
|
||||
def write_md5sum(url, ud, d):
|
||||
md5data = bb.utils.md5_file(ud.localpath)
|
||||
if bb.which(data.getVar('PATH', d), 'md5sum'):
|
||||
try:
|
||||
md5pipe = os.popen('md5sum ' + ud.localpath)
|
||||
md5data = (md5pipe.readline().split() or [ "" ])[0]
|
||||
md5pipe.close()
|
||||
except OSError:
|
||||
md5data = ""
|
||||
|
||||
# verify the md5sum
|
||||
if not Fetch.verify_md5sum(ud, md5data):
|
||||
raise MD5SumError(url)
|
||||
@@ -756,71 +462,53 @@ class Fetch(object):
|
||||
if not hasattr(self, "_latest_revision"):
|
||||
raise ParameterError
|
||||
|
||||
pd = persist_data.persist(d)
|
||||
revs = pd['BB_URI_HEADREVS']
|
||||
key = self.generate_revision_key(url, ud, d)
|
||||
rev = revs[key]
|
||||
pd = persist_data.PersistData(d)
|
||||
key = self._revision_key(url, ud, d)
|
||||
rev = pd.getValue("BB_URI_HEADREVS", key)
|
||||
if rev != None:
|
||||
return str(rev)
|
||||
|
||||
revs[key] = rev = self._latest_revision(url, ud, d)
|
||||
rev = self._latest_revision(url, ud, d)
|
||||
pd.setValue("BB_URI_HEADREVS", key, rev)
|
||||
return rev
|
||||
|
||||
def sortable_revision(self, url, ud, d):
|
||||
"""
|
||||
|
||||
|
||||
"""
|
||||
if hasattr(self, "_sortable_revision"):
|
||||
return self._sortable_revision(url, ud, d)
|
||||
|
||||
pd = persist_data.persist(d)
|
||||
localcounts = pd['BB_URI_LOCALCOUNT']
|
||||
key = self.generate_revision_key(url, ud, d)
|
||||
|
||||
pd = persist_data.PersistData(d)
|
||||
key = self._revision_key(url, ud, d)
|
||||
latest_rev = self._build_revision(url, ud, d)
|
||||
last_rev = localcounts[key + '_rev']
|
||||
uselocalcount = bb.data.getVar("BB_LOCALCOUNT_OVERRIDE", d, True) or False
|
||||
count = None
|
||||
if uselocalcount:
|
||||
count = Fetch.localcount_internal_helper(ud, d)
|
||||
if count is None:
|
||||
count = localcounts[key + '_count']
|
||||
last_rev = pd.getValue("BB_URI_LOCALCOUNT", key + "_rev")
|
||||
count = pd.getValue("BB_URI_LOCALCOUNT", key + "_count")
|
||||
|
||||
if last_rev == latest_rev:
|
||||
return str(count + "+" + latest_rev)
|
||||
|
||||
buildindex_provided = hasattr(self, "_sortable_buildindex")
|
||||
if buildindex_provided:
|
||||
count = self._sortable_buildindex(url, ud, d, latest_rev)
|
||||
|
||||
if count is None:
|
||||
count = "0"
|
||||
elif uselocalcount or buildindex_provided:
|
||||
count = str(count)
|
||||
else:
|
||||
count = str(int(count) + 1)
|
||||
|
||||
localcounts[key + '_rev'] = latest_rev
|
||||
localcounts[key + '_count'] = count
|
||||
pd.setValue("BB_URI_LOCALCOUNT", key + "_rev", latest_rev)
|
||||
pd.setValue("BB_URI_LOCALCOUNT", key + "_count", count)
|
||||
|
||||
return str(count + "+" + latest_rev)
|
||||
|
||||
def generate_revision_key(self, url, ud, d):
|
||||
key = self._revision_key(url, ud, d)
|
||||
return "%s-%s" % (key, bb.data.getVar("PN", d, True) or "")
|
||||
|
||||
from . import cvs
|
||||
from . import git
|
||||
from . import local
|
||||
from . import svn
|
||||
from . import wget
|
||||
from . import svk
|
||||
from . import ssh
|
||||
from . import perforce
|
||||
from . import bzr
|
||||
from . import hg
|
||||
from . import osc
|
||||
from . import repo
|
||||
import cvs
|
||||
import git
|
||||
import local
|
||||
import svn
|
||||
import wget
|
||||
import svk
|
||||
import ssh
|
||||
import perforce
|
||||
import bzr
|
||||
import hg
|
||||
|
||||
methods.append(local.Local())
|
||||
methods.append(wget.Wget())
|
||||
@@ -832,5 +520,3 @@ methods.append(ssh.SSH())
|
||||
methods.append(perforce.Perforce())
|
||||
methods.append(bzr.Bzr())
|
||||
methods.append(hg.Hg())
|
||||
methods.append(osc.Osc())
|
||||
methods.append(repo.Repo())
|
||||
|
||||
@@ -25,10 +25,12 @@ BitBake 'Fetch' implementation for bzr.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch import Fetch, FetchError, runfetchcmd, logger
|
||||
from bb.fetch import Fetch
|
||||
from bb.fetch import FetchError
|
||||
from bb.fetch import MissingParameterError
|
||||
from bb.fetch import runfetchcmd
|
||||
|
||||
class Bzr(Fetch):
|
||||
def supports(self, url, ud, d):
|
||||
@@ -37,20 +39,23 @@ class Bzr(Fetch):
|
||||
def localpath (self, url, ud, d):
|
||||
|
||||
# Create paths to bzr checkouts
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
relpath = ud.path
|
||||
if relpath.startswith('/'):
|
||||
# Remove leading slash as os.path.join can't cope
|
||||
relpath = relpath[1:]
|
||||
ud.pkgdir = os.path.join(data.expand('${BZRDIR}', d), ud.host, relpath)
|
||||
|
||||
revision = Fetch.srcrev_internal_helper(ud, d)
|
||||
if revision is True:
|
||||
ud.revision = self.latest_revision(url, ud, d)
|
||||
ud.revision = self.latest_revision(url, ud, d)
|
||||
elif revision:
|
||||
ud.revision = revision
|
||||
|
||||
if not ud.revision:
|
||||
ud.revision = self.latest_revision(url, ud, d)
|
||||
ud.revision = self.latest_revision(url, ud, d)
|
||||
|
||||
ud.localfile = data.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision), d)
|
||||
|
||||
|
||||
return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
|
||||
|
||||
def _buildbzrcommand(self, ud, d, command):
|
||||
@@ -61,7 +66,9 @@ class Bzr(Fetch):
|
||||
|
||||
basecmd = data.expand('${FETCHCMD_bzr}', d)
|
||||
|
||||
proto = ud.parm.get('proto', 'http')
|
||||
proto = "http"
|
||||
if "proto" in ud.parm:
|
||||
proto = ud.parm["proto"]
|
||||
|
||||
bzrroot = ud.host + ud.path
|
||||
|
||||
@@ -85,31 +92,29 @@ class Bzr(Fetch):
|
||||
def go(self, loc, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
# try to use the tarball stash
|
||||
if Fetch.try_mirror(d, ud.localfile):
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping bzr checkout." % ud.localpath)
|
||||
return
|
||||
|
||||
if os.access(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir), '.bzr'), os.R_OK):
|
||||
bzrcmd = self._buildbzrcommand(ud, d, "update")
|
||||
logger.debug(1, "BZR Update %s", loc)
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "BZR Update %s" % loc)
|
||||
os.chdir(os.path.join (ud.pkgdir, os.path.basename(ud.path)))
|
||||
runfetchcmd(bzrcmd, d)
|
||||
else:
|
||||
bb.utils.remove(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir)), True)
|
||||
os.system("rm -rf %s" % os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir)))
|
||||
bzrcmd = self._buildbzrcommand(ud, d, "fetch")
|
||||
logger.debug(1, "BZR Checkout %s", loc)
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "BZR Checkout %s" % loc)
|
||||
bb.mkdirhier(ud.pkgdir)
|
||||
os.chdir(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", bzrcmd)
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % bzrcmd)
|
||||
runfetchcmd(bzrcmd, d)
|
||||
|
||||
os.chdir(ud.pkgdir)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude '.bzr' --exclude '.bzrtags'"
|
||||
|
||||
# tar them up to a defined filename
|
||||
try:
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(ud.pkgdir)), d)
|
||||
runfetchcmd("tar -czf %s %s" % (ud.localpath, os.path.basename(ud.pkgdir)), d)
|
||||
except:
|
||||
t, v, tb = sys.exc_info()
|
||||
try:
|
||||
@@ -118,7 +123,7 @@ class Bzr(Fetch):
|
||||
pass
|
||||
raise t, v, tb
|
||||
|
||||
def supports_srcrev(self):
|
||||
def suppports_srcrev(self):
|
||||
return True
|
||||
|
||||
def _revision_key(self, url, ud, d):
|
||||
@@ -131,7 +136,7 @@ class Bzr(Fetch):
|
||||
"""
|
||||
Return the latest upstream revision number
|
||||
"""
|
||||
logger.debug(2, "BZR fetcher hitting network for %s", url)
|
||||
bb.msg.debug(2, bb.msg.domain.Fetcher, "BZR fetcher hitting network for %s" % url)
|
||||
|
||||
output = runfetchcmd(self._buildbzrcommand(ud, d, "revno"), d, True)
|
||||
|
||||
@@ -146,3 +151,4 @@ class Bzr(Fetch):
|
||||
|
||||
def _build_revision(self, url, ud, d):
|
||||
return ud.revision
|
||||
|
||||
|
||||
@@ -26,11 +26,12 @@ BitBake build tools.
|
||||
#Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
#
|
||||
|
||||
import os
|
||||
import logging
|
||||
import os, re
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch import Fetch, FetchError, MissingParameterError, logger
|
||||
from bb.fetch import Fetch
|
||||
from bb.fetch import FetchError
|
||||
from bb.fetch import MissingParameterError
|
||||
|
||||
class Cvs(Fetch):
|
||||
"""
|
||||
@@ -40,14 +41,16 @@ class Cvs(Fetch):
|
||||
"""
|
||||
Check to see if a given url can be fetched with cvs.
|
||||
"""
|
||||
return ud.type in ['cvs']
|
||||
return ud.type in ['cvs', 'pserver']
|
||||
|
||||
def localpath(self, url, ud, d):
|
||||
if not "module" in ud.parm:
|
||||
raise MissingParameterError("cvs method needs a 'module' parameter")
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
ud.tag = ud.parm.get('tag', "")
|
||||
ud.tag = ""
|
||||
if 'tag' in ud.parm:
|
||||
ud.tag = ud.parm['tag']
|
||||
|
||||
# Override the default date in certain cases
|
||||
if 'date' in ud.parm:
|
||||
@@ -74,9 +77,22 @@ class Cvs(Fetch):
|
||||
|
||||
def go(self, loc, ud, d):
|
||||
|
||||
method = ud.parm.get('method', 'pserver')
|
||||
localdir = ud.parm.get('localdir', ud.module)
|
||||
cvs_port = ud.parm.get('port', '')
|
||||
# try to use the tarball stash
|
||||
if not self.forcefetch(loc, ud, d) and Fetch.try_mirror(d, ud.localfile):
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping cvs checkout." % ud.localpath)
|
||||
return
|
||||
|
||||
method = "pserver"
|
||||
if "method" in ud.parm:
|
||||
method = ud.parm["method"]
|
||||
|
||||
localdir = ud.module
|
||||
if "localdir" in ud.parm:
|
||||
localdir = ud.parm["localdir"]
|
||||
|
||||
cvs_port = ""
|
||||
if "port" in ud.parm:
|
||||
cvs_port = ud.parm["port"]
|
||||
|
||||
cvs_rsh = None
|
||||
if method == "ext":
|
||||
@@ -86,14 +102,7 @@ class Cvs(Fetch):
|
||||
if method == "dir":
|
||||
cvsroot = ud.path
|
||||
else:
|
||||
cvsroot = ":" + method
|
||||
cvsproxyhost = data.getVar('CVS_PROXY_HOST', d, True)
|
||||
if cvsproxyhost:
|
||||
cvsroot += ";proxy=" + cvsproxyhost
|
||||
cvsproxyport = data.getVar('CVS_PROXY_PORT', d, True)
|
||||
if cvsproxyport:
|
||||
cvsroot += ";proxyport=" + cvsproxyport
|
||||
cvsroot += ":" + ud.user
|
||||
cvsroot = ":" + method + ":" + ud.user
|
||||
if ud.pswd:
|
||||
cvsroot += ":" + ud.pswd
|
||||
cvsroot += "@" + ud.host + ":" + cvs_port + ud.path
|
||||
@@ -102,11 +111,7 @@ class Cvs(Fetch):
|
||||
if 'norecurse' in ud.parm:
|
||||
options.append("-l")
|
||||
if ud.date:
|
||||
# treat YYYYMMDDHHMM specially for CVS
|
||||
if len(ud.date) == 12:
|
||||
options.append("-D \"%s %s:%s UTC\"" % (ud.date[0:8], ud.date[8:10], ud.date[10:12]))
|
||||
else:
|
||||
options.append("-D \"%s UTC\"" % ud.date)
|
||||
options.append("-D %s" % ud.date)
|
||||
if ud.tag:
|
||||
options.append("-r %s" % ud.tag)
|
||||
|
||||
@@ -125,44 +130,38 @@ class Cvs(Fetch):
|
||||
cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd)
|
||||
|
||||
# create module directory
|
||||
logger.debug(2, "Fetch: checking for module directory")
|
||||
bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory")
|
||||
pkg = data.expand('${PN}', d)
|
||||
pkgdir = os.path.join(data.expand('${CVSDIR}', localdata), pkg)
|
||||
moddir = os.path.join(pkgdir, localdir)
|
||||
if os.access(os.path.join(moddir, 'CVS'), os.R_OK):
|
||||
logger.info("Update " + loc)
|
||||
moddir = os.path.join(pkgdir,localdir)
|
||||
if os.access(os.path.join(moddir,'CVS'), os.R_OK):
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc)
|
||||
# update sources there
|
||||
os.chdir(moddir)
|
||||
myret = os.system(cvsupdatecmd)
|
||||
else:
|
||||
logger.info("Fetch " + loc)
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(pkgdir)
|
||||
bb.mkdirhier(pkgdir)
|
||||
os.chdir(pkgdir)
|
||||
logger.debug(1, "Running %s", cvscmd)
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % cvscmd)
|
||||
myret = os.system(cvscmd)
|
||||
|
||||
if myret != 0 or not os.access(moddir, os.R_OK):
|
||||
try:
|
||||
os.rmdir(moddir)
|
||||
except OSError:
|
||||
pass
|
||||
pass
|
||||
raise FetchError(ud.module)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude 'CVS'"
|
||||
|
||||
# tar them up to a defined filename
|
||||
if 'fullpath' in ud.parm:
|
||||
os.chdir(pkgdir)
|
||||
myret = os.system("tar %s -czf %s %s" % (tar_flags, ud.localpath, localdir))
|
||||
myret = os.system("tar -czf %s %s" % (ud.localpath, localdir))
|
||||
else:
|
||||
os.chdir(moddir)
|
||||
os.chdir('..')
|
||||
myret = os.system("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(moddir)))
|
||||
myret = os.system("tar -czf %s %s" % (ud.localpath, os.path.basename(moddir)))
|
||||
|
||||
if myret != 0:
|
||||
try:
|
||||
|
||||
@@ -20,22 +20,24 @@ BitBake 'Fetch' git implementation
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os
|
||||
import os, re
|
||||
import bb
|
||||
import bb.persist_data
|
||||
from bb import data
|
||||
from bb.fetch import Fetch
|
||||
from bb.fetch import FetchError
|
||||
from bb.fetch import runfetchcmd
|
||||
from bb.fetch import logger
|
||||
|
||||
def prunedir(topdir):
|
||||
# Delete everything reachable from the directory named in 'topdir'.
|
||||
# CAUTION: This is dangerous!
|
||||
for root, dirs, files in os.walk(topdir, topdown=False):
|
||||
for name in files:
|
||||
os.remove(os.path.join(root, name))
|
||||
for name in dirs:
|
||||
os.rmdir(os.path.join(root, name))
|
||||
|
||||
class Git(Fetch):
|
||||
"""Class to fetch a module or modules from git repositories"""
|
||||
def init(self, d):
|
||||
#
|
||||
# Only enable _sortable revision if the key is set
|
||||
#
|
||||
if bb.data.getVar("BB_GIT_CLONE_FOR_SRCREV", d, True):
|
||||
self._sortable_buildindex = self._sortable_buildindex_disabled
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with git.
|
||||
@@ -44,296 +46,97 @@ class Git(Fetch):
|
||||
|
||||
def localpath(self, url, ud, d):
|
||||
|
||||
ud.proto = "rsync"
|
||||
if 'protocol' in ud.parm:
|
||||
ud.proto = ud.parm['protocol']
|
||||
elif not ud.host:
|
||||
ud.proto = 'file'
|
||||
else:
|
||||
ud.proto = "rsync"
|
||||
|
||||
ud.branch = ud.parm.get("branch", "master")
|
||||
|
||||
gitsrcname = '%s%s' % (ud.host, ud.path.replace('/', '.'))
|
||||
ud.mirrortarball = 'git_%s.tar.gz' % (gitsrcname)
|
||||
ud.clonedir = os.path.join(data.expand('${GITDIR}', d), gitsrcname)
|
||||
ud.branch = ud.parm.get("branch", "")
|
||||
|
||||
tag = Fetch.srcrev_internal_helper(ud, d)
|
||||
if tag is True:
|
||||
ud.tag = self.latest_revision(url, ud, d)
|
||||
ud.tag = self.latest_revision(url, ud, d)
|
||||
elif tag:
|
||||
ud.tag = tag
|
||||
|
||||
if not ud.tag or ud.tag == "master":
|
||||
if not ud.tag:
|
||||
ud.tag = self.latest_revision(url, ud, d)
|
||||
|
||||
if ud.tag == "master":
|
||||
ud.tag = self.latest_revision(url, ud, d)
|
||||
|
||||
subdir = ud.parm.get("subpath", "")
|
||||
if subdir != "":
|
||||
if subdir.endswith("/"):
|
||||
subdir = subdir[:-1]
|
||||
subdirpath = os.path.join(ud.path, subdir);
|
||||
else:
|
||||
subdirpath = ud.path;
|
||||
|
||||
if 'fullclone' in ud.parm:
|
||||
ud.localfile = ud.mirrortarball
|
||||
else:
|
||||
ud.localfile = data.expand('git_%s%s_%s.tar.gz' % (ud.host, subdirpath.replace('/', '.'), ud.tag), d)
|
||||
|
||||
ud.basecmd = data.getVar("FETCHCMD_git", d, True) or "git"
|
||||
|
||||
if 'noclone' in ud.parm:
|
||||
ud.localfile = None
|
||||
return None
|
||||
ud.localfile = data.expand('git_%s%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.tag), d)
|
||||
|
||||
return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
|
||||
|
||||
def forcefetch(self, url, ud, d):
|
||||
if 'fullclone' in ud.parm:
|
||||
return True
|
||||
if 'noclone' in ud.parm:
|
||||
return False
|
||||
if os.path.exists(ud.localpath):
|
||||
return False
|
||||
if not self._contains_ref(ud.tag, d):
|
||||
return True
|
||||
return False
|
||||
|
||||
def try_premirror(self, u, ud, d):
|
||||
if 'noclone' in ud.parm:
|
||||
return False
|
||||
if os.path.exists(ud.clonedir):
|
||||
return False
|
||||
if os.path.exists(ud.localpath):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def go(self, loc, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
if ud.user:
|
||||
username = ud.user + '@'
|
||||
else:
|
||||
username = ""
|
||||
|
||||
repofile = os.path.join(data.getVar("DL_DIR", d, 1), ud.mirrortarball)
|
||||
|
||||
|
||||
coname = '%s' % (ud.tag)
|
||||
codir = os.path.join(ud.clonedir, coname)
|
||||
|
||||
# If we have no existing clone and no mirror tarball, try and obtain one
|
||||
if not os.path.exists(ud.clonedir) and not os.path.exists(repofile):
|
||||
try:
|
||||
Fetch.try_mirrors(ud.mirrortarball)
|
||||
except:
|
||||
pass
|
||||
|
||||
# If the checkout doesn't exist and the mirror tarball does, extract it
|
||||
if not os.path.exists(ud.clonedir) and os.path.exists(repofile):
|
||||
bb.utils.mkdirhier(ud.clonedir)
|
||||
os.chdir(ud.clonedir)
|
||||
runfetchcmd("tar -xzf %s" % (repofile), d)
|
||||
|
||||
# If the repo still doesn't exist, fallback to cloning it
|
||||
if not os.path.exists(ud.clonedir):
|
||||
runfetchcmd("%s clone -n %s://%s%s%s %s" % (ud.basecmd, ud.proto, username, ud.host, ud.path, ud.clonedir), d)
|
||||
|
||||
os.chdir(ud.clonedir)
|
||||
# Update the checkout if needed
|
||||
if not self._contains_ref(ud.tag, d) or 'fullclone' in ud.parm:
|
||||
# Remove all but the .git directory
|
||||
runfetchcmd("rm * -Rf", d)
|
||||
if 'fullclone' in ud.parm:
|
||||
runfetchcmd("%s fetch --all" % (ud.basecmd), d)
|
||||
else:
|
||||
runfetchcmd("%s fetch %s://%s%s%s %s" % (ud.basecmd, ud.proto, username, ud.host, ud.path, ud.branch), d)
|
||||
runfetchcmd("%s fetch --tags %s://%s%s%s" % (ud.basecmd, ud.proto, username, ud.host, ud.path), d)
|
||||
runfetchcmd("%s prune-packed" % ud.basecmd, d)
|
||||
runfetchcmd("%s pack-redundant --all | xargs -r rm" % ud.basecmd, d)
|
||||
|
||||
# Generate a mirror tarball if needed
|
||||
os.chdir(ud.clonedir)
|
||||
mirror_tarballs = data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True)
|
||||
if mirror_tarballs != "0" or 'fullclone' in ud.parm:
|
||||
logger.info("Creating tarball of git repository")
|
||||
runfetchcmd("tar -czf %s %s" % (repofile, os.path.join(".", ".git", "*") ), d)
|
||||
|
||||
if 'fullclone' in ud.parm:
|
||||
if Fetch.try_mirror(d, ud.localfile):
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists (or was stashed). Skipping git checkout." % ud.localpath)
|
||||
return
|
||||
|
||||
if os.path.exists(codir):
|
||||
bb.utils.prunedir(codir)
|
||||
gitsrcname = '%s%s' % (ud.host, ud.path.replace('/', '.'))
|
||||
|
||||
subdir = ud.parm.get("subpath", "")
|
||||
if subdir != "":
|
||||
if subdir.endswith("/"):
|
||||
subdirbase = os.path.basename(subdir[:-1])
|
||||
repofilename = 'git_%s.tar.gz' % (gitsrcname)
|
||||
repofile = os.path.join(data.getVar("DL_DIR", d, 1), repofilename)
|
||||
repodir = os.path.join(data.expand('${GITDIR}', d), gitsrcname)
|
||||
|
||||
coname = '%s' % (ud.tag)
|
||||
codir = os.path.join(repodir, coname)
|
||||
|
||||
if not os.path.exists(repodir):
|
||||
if Fetch.try_mirror(d, repofilename):
|
||||
bb.mkdirhier(repodir)
|
||||
os.chdir(repodir)
|
||||
runfetchcmd("tar -xzf %s" % (repofile), d)
|
||||
else:
|
||||
subdirbase = os.path.basename(subdir)
|
||||
else:
|
||||
subdirbase = ""
|
||||
runfetchcmd("git clone -n %s://%s%s %s" % (ud.proto, ud.host, ud.path, repodir), d)
|
||||
|
||||
if subdir != "":
|
||||
readpathspec = ":%s" % (subdir)
|
||||
codir = os.path.join(codir, "git")
|
||||
coprefix = os.path.join(codir, subdirbase, "")
|
||||
else:
|
||||
readpathspec = ""
|
||||
coprefix = os.path.join(codir, "git", "")
|
||||
os.chdir(repodir)
|
||||
# Remove all but the .git directory
|
||||
runfetchcmd("rm * -Rf", d)
|
||||
runfetchcmd("git fetch %s://%s%s" % (ud.proto, ud.host, ud.path), d)
|
||||
runfetchcmd("git fetch --tags %s://%s%s" % (ud.proto, ud.host, ud.path), d)
|
||||
runfetchcmd("git prune-packed", d)
|
||||
runfetchcmd("git pack-redundant --all | xargs -r rm", d)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
runfetchcmd("%s clone -n %s %s" % (ud.basecmd, ud.clonedir, coprefix), d)
|
||||
os.chdir(coprefix)
|
||||
runfetchcmd("%s checkout -q -f %s%s" % (ud.basecmd, ud.tag, readpathspec), d)
|
||||
else:
|
||||
bb.utils.mkdirhier(codir)
|
||||
os.chdir(ud.clonedir)
|
||||
runfetchcmd("%s read-tree %s%s" % (ud.basecmd, ud.tag, readpathspec), d)
|
||||
runfetchcmd("%s checkout-index -q -f --prefix=%s -a" % (ud.basecmd, coprefix), d)
|
||||
os.chdir(repodir)
|
||||
mirror_tarballs = data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True)
|
||||
if mirror_tarballs != "0":
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Creating tarball of git repository")
|
||||
runfetchcmd("tar -czf %s %s" % (repofile, os.path.join(".", ".git", "*") ), d)
|
||||
|
||||
if os.path.exists(codir):
|
||||
prunedir(codir)
|
||||
|
||||
bb.mkdirhier(codir)
|
||||
os.chdir(repodir)
|
||||
runfetchcmd("git read-tree %s" % (ud.tag), d)
|
||||
runfetchcmd("git checkout-index -q -f --prefix=%s -a" % (os.path.join(codir, "git", "")), d)
|
||||
|
||||
os.chdir(codir)
|
||||
logger.info("Creating tarball of git checkout")
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Creating tarball of git checkout")
|
||||
runfetchcmd("tar -czf %s %s" % (ud.localpath, os.path.join(".", "*") ), d)
|
||||
|
||||
os.chdir(ud.clonedir)
|
||||
bb.utils.prunedir(codir)
|
||||
os.chdir(repodir)
|
||||
prunedir(codir)
|
||||
|
||||
def supports_srcrev(self):
|
||||
def suppports_srcrev(self):
|
||||
return True
|
||||
|
||||
def _contains_ref(self, tag, d):
|
||||
basecmd = data.getVar("FETCHCMD_git", d, True) or "git"
|
||||
output = runfetchcmd("%s log --pretty=oneline -n 1 %s -- 2> /dev/null | wc -l" % (basecmd, tag), d, quiet=True)
|
||||
return output.split()[0] != "0"
|
||||
|
||||
def _revision_key(self, url, ud, d, branch=False):
|
||||
def _revision_key(self, url, ud, d):
|
||||
"""
|
||||
Return a unique key for the url
|
||||
"""
|
||||
key = 'git:' + ud.host + ud.path.replace('/', '.')
|
||||
if branch:
|
||||
return key + ud.branch
|
||||
else:
|
||||
return key
|
||||
|
||||
def generate_revision_key(self, url, ud, d, branch=False):
|
||||
key = self._revision_key(url, ud, d, branch)
|
||||
return "%s-%s" % (key, bb.data.getVar("PN", d, True) or "")
|
||||
return "git:" + ud.host + ud.path.replace('/', '.')
|
||||
|
||||
def _latest_revision(self, url, ud, d):
|
||||
"""
|
||||
Compute the HEAD revision for the url
|
||||
"""
|
||||
if ud.user:
|
||||
username = ud.user + '@'
|
||||
else:
|
||||
username = ""
|
||||
|
||||
basecmd = data.getVar("FETCHCMD_git", d, True) or "git"
|
||||
cmd = "%s ls-remote %s://%s%s%s %s" % (basecmd, ud.proto, username, ud.host, ud.path, ud.branch)
|
||||
output = runfetchcmd(cmd, d, True)
|
||||
if not output:
|
||||
raise bb.fetch.FetchError("Fetch command %s gave empty output\n" % (cmd))
|
||||
output = runfetchcmd("git ls-remote %s://%s%s %s" % (ud.proto, ud.host, ud.path, ud.branch), d, True)
|
||||
return output.split()[0]
|
||||
|
||||
def latest_revision(self, url, ud, d):
|
||||
"""
|
||||
Look in the cache for the latest revision, if not present ask the SCM.
|
||||
"""
|
||||
persisted = bb.persist_data.persist(d)
|
||||
revs = persisted['BB_URI_HEADREVS']
|
||||
|
||||
key = self.generate_revision_key(url, ud, d, branch=True)
|
||||
rev = revs[key]
|
||||
if rev is None:
|
||||
# Compatibility with old key format, no branch included
|
||||
oldkey = self.generate_revision_key(url, ud, d, branch=False)
|
||||
rev = revs[oldkey]
|
||||
if rev is not None:
|
||||
del revs[oldkey]
|
||||
else:
|
||||
rev = self._latest_revision(url, ud, d)
|
||||
revs[key] = rev
|
||||
|
||||
return str(rev)
|
||||
|
||||
def sortable_revision(self, url, ud, d):
|
||||
"""
|
||||
|
||||
"""
|
||||
pd = bb.persist_data.persist(d)
|
||||
localcounts = pd['BB_URI_LOCALCOUNT']
|
||||
key = self.generate_revision_key(url, ud, d, branch=True)
|
||||
oldkey = self.generate_revision_key(url, ud, d, branch=False)
|
||||
|
||||
latest_rev = self._build_revision(url, ud, d)
|
||||
last_rev = localcounts[key + '_rev']
|
||||
if last_rev is None:
|
||||
last_rev = localcounts[oldkey + '_rev']
|
||||
if last_rev is not None:
|
||||
del localcounts[oldkey + '_rev']
|
||||
localcounts[key + '_rev'] = last_rev
|
||||
|
||||
uselocalcount = bb.data.getVar("BB_LOCALCOUNT_OVERRIDE", d, True) or False
|
||||
count = None
|
||||
if uselocalcount:
|
||||
count = Fetch.localcount_internal_helper(ud, d)
|
||||
if count is None:
|
||||
count = localcounts[key + '_count']
|
||||
if count is None:
|
||||
count = localcounts[oldkey + '_count']
|
||||
if count is not None:
|
||||
del localcounts[oldkey + '_count']
|
||||
localcounts[key + '_count'] = count
|
||||
|
||||
if last_rev == latest_rev:
|
||||
return str(count + "+" + latest_rev)
|
||||
|
||||
buildindex_provided = hasattr(self, "_sortable_buildindex")
|
||||
if buildindex_provided:
|
||||
count = self._sortable_buildindex(url, ud, d, latest_rev)
|
||||
if count is None:
|
||||
count = "0"
|
||||
elif uselocalcount or buildindex_provided:
|
||||
count = str(count)
|
||||
else:
|
||||
count = str(int(count) + 1)
|
||||
|
||||
localcounts[key + '_rev'] = latest_rev
|
||||
localcounts[key + '_count'] = count
|
||||
|
||||
return str(count + "+" + latest_rev)
|
||||
|
||||
def _build_revision(self, url, ud, d):
|
||||
return ud.tag
|
||||
|
||||
def _sortable_buildindex_disabled(self, url, ud, d, rev):
|
||||
"""
|
||||
Return a suitable buildindex for the revision specified. This is done by counting revisions
|
||||
using "git rev-list" which may or may not work in different circumstances.
|
||||
"""
|
||||
|
||||
cwd = os.getcwd()
|
||||
|
||||
# Check if we have the rev already
|
||||
|
||||
if not os.path.exists(ud.clonedir):
|
||||
print("no repo")
|
||||
self.go(None, ud, d)
|
||||
if not os.path.exists(ud.clonedir):
|
||||
logger.error("GIT repository for %s doesn't exist in %s, cannot get sortable buildnumber, using old value", url, ud.clonedir)
|
||||
return None
|
||||
|
||||
|
||||
os.chdir(ud.clonedir)
|
||||
if not self._contains_ref(rev, d):
|
||||
self.go(None, ud, d)
|
||||
|
||||
output = runfetchcmd("%s rev-list %s -- 2> /dev/null | wc -l" % (ud.basecmd, rev), d, quiet=True)
|
||||
os.chdir(cwd)
|
||||
|
||||
buildindex = "%s" % output.split()[0]
|
||||
logger.debug(1, "GIT repository for %s in %s is returning %s revisions in rev-list before %s", url, ud.clonedir, buildindex, rev)
|
||||
return buildindex
|
||||
|
||||
@@ -24,29 +24,23 @@ BitBake 'Fetch' implementation for mercurial DRCS (hg).
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import os, re
|
||||
import sys
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch import Fetch
|
||||
from bb.fetch import FetchError
|
||||
from bb.fetch import MissingParameterError
|
||||
from bb.fetch import runfetchcmd
|
||||
from bb.fetch import logger
|
||||
|
||||
class Hg(Fetch):
|
||||
"""Class to fetch from mercurial repositories"""
|
||||
"""Class to fetch a from mercurial repositories"""
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with mercurial.
|
||||
"""
|
||||
return ud.type in ['hg']
|
||||
|
||||
def forcefetch(self, url, ud, d):
|
||||
revTag = ud.parm.get('rev', 'tip')
|
||||
return revTag == "tip"
|
||||
|
||||
def localpath(self, url, ud, d):
|
||||
if not "module" in ud.parm:
|
||||
raise MissingParameterError("hg method needs a 'module' parameter")
|
||||
@@ -54,20 +48,15 @@ class Hg(Fetch):
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
# Create paths to mercurial checkouts
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
relpath = ud.path
|
||||
if relpath.startswith('/'):
|
||||
# Remove leading slash as os.path.join can't cope
|
||||
relpath = relpath[1:]
|
||||
ud.pkgdir = os.path.join(data.expand('${HGDIR}', d), ud.host, relpath)
|
||||
ud.moddir = os.path.join(ud.pkgdir, ud.module)
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
else:
|
||||
tag = Fetch.srcrev_internal_helper(ud, d)
|
||||
if tag is True:
|
||||
ud.revision = self.latest_revision(url, ud, d)
|
||||
elif tag:
|
||||
ud.revision = tag
|
||||
else:
|
||||
ud.revision = self.latest_revision(url, ud, d)
|
||||
|
||||
ud.localfile = data.expand('%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision), d)
|
||||
|
||||
@@ -81,17 +70,16 @@ class Hg(Fetch):
|
||||
|
||||
basecmd = data.expand('${FETCHCMD_hg}', d)
|
||||
|
||||
proto = ud.parm.get('proto', 'http')
|
||||
proto = "http"
|
||||
if "proto" in ud.parm:
|
||||
proto = ud.parm["proto"]
|
||||
|
||||
host = ud.host
|
||||
if proto == "file":
|
||||
host = "/"
|
||||
ud.host = "localhost"
|
||||
|
||||
if not ud.user:
|
||||
hgroot = host + ud.path
|
||||
else:
|
||||
hgroot = ud.user + "@" + host + ud.path
|
||||
hgroot = host + ud.path
|
||||
|
||||
if command is "info":
|
||||
return "%s identify -i %s://%s/%s" % (basecmd, proto, hgroot, ud.module)
|
||||
@@ -103,10 +91,7 @@ class Hg(Fetch):
|
||||
if command is "fetch":
|
||||
cmd = "%s clone %s %s://%s/%s %s" % (basecmd, " ".join(options), proto, hgroot, ud.module, ud.module)
|
||||
elif command is "pull":
|
||||
# do not pass options list; limiting pull to rev causes the local
|
||||
# repo not to contain it and immediately following "update" command
|
||||
# will crash
|
||||
cmd = "%s pull" % (basecmd)
|
||||
cmd = "%s pull %s" % (basecmd, " ".join(options))
|
||||
elif command is "update":
|
||||
cmd = "%s update -C %s" % (basecmd, " ".join(options))
|
||||
else:
|
||||
@@ -117,41 +102,36 @@ class Hg(Fetch):
|
||||
def go(self, loc, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
# try to use the tarball stash
|
||||
if Fetch.try_mirror(d, ud.localfile):
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping hg checkout." % ud.localpath)
|
||||
return
|
||||
|
||||
bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
if os.access(os.path.join(ud.moddir, '.hg'), os.R_OK):
|
||||
updatecmd = self._buildhgcommand(ud, d, "pull")
|
||||
logger.info("Update " + loc)
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc)
|
||||
# update sources there
|
||||
os.chdir(ud.moddir)
|
||||
logger.debug(1, "Running %s", updatecmd)
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % updatecmd)
|
||||
runfetchcmd(updatecmd, d)
|
||||
|
||||
updatecmd = self._buildhgcommand(ud, d, "update")
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % updatecmd)
|
||||
runfetchcmd(updatecmd, d)
|
||||
else:
|
||||
fetchcmd = self._buildhgcommand(ud, d, "fetch")
|
||||
logger.info("Fetch " + loc)
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
bb.mkdirhier(ud.pkgdir)
|
||||
os.chdir(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", fetchcmd)
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % fetchcmd)
|
||||
runfetchcmd(fetchcmd, d)
|
||||
|
||||
# Even when we clone (fetch), we still need to update as hg's clone
|
||||
# won't checkout the specified revision if its on a branch
|
||||
updatecmd = self._buildhgcommand(ud, d, "update")
|
||||
os.chdir(ud.moddir)
|
||||
logger.debug(1, "Running %s", updatecmd)
|
||||
runfetchcmd(updatecmd, d)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude '.hg' --exclude '.hgrags'"
|
||||
|
||||
os.chdir(ud.pkgdir)
|
||||
try:
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.module), d)
|
||||
runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d)
|
||||
except:
|
||||
t, v, tb = sys.exc_info()
|
||||
try:
|
||||
@@ -159,22 +139,3 @@ class Hg(Fetch):
|
||||
except OSError:
|
||||
pass
|
||||
raise t, v, tb
|
||||
|
||||
def supports_srcrev(self):
|
||||
return True
|
||||
|
||||
def _latest_revision(self, url, ud, d):
|
||||
"""
|
||||
Compute tip revision for the url
|
||||
"""
|
||||
output = runfetchcmd(self._buildhgcommand(ud, d, "info"), d)
|
||||
return output.strip()
|
||||
|
||||
def _build_revision(self, url, ud, d):
|
||||
return ud.revision
|
||||
|
||||
def _revision_key(self, url, ud, d):
|
||||
"""
|
||||
Return a unique key for the url
|
||||
"""
|
||||
return "hg:" + ud.moddir
|
||||
|
||||
@@ -25,18 +25,17 @@ BitBake build tools.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import os, re
|
||||
import bb
|
||||
import bb.utils
|
||||
from bb import data
|
||||
from bb.fetch import Fetch
|
||||
|
||||
class Local(Fetch):
|
||||
def supports(self, url, urldata, d):
|
||||
"""
|
||||
Check to see if a given url represents a local fetch.
|
||||
Check to see if a given url can be fetched with cvs.
|
||||
"""
|
||||
return urldata.type in ['file']
|
||||
return urldata.type in ['file','patch']
|
||||
|
||||
def localpath(self, url, urldata, d):
|
||||
"""
|
||||
@@ -48,7 +47,7 @@ class Local(Fetch):
|
||||
if path[0] != "/":
|
||||
filespath = data.getVar('FILESPATH', d, 1)
|
||||
if filespath:
|
||||
newpath = bb.utils.which(filespath, path)
|
||||
newpath = bb.which(filespath, path)
|
||||
if not newpath:
|
||||
filesdir = data.getVar('FILESDIR', d, 1)
|
||||
if filesdir:
|
||||
@@ -60,14 +59,3 @@ class Local(Fetch):
|
||||
"""Fetch urls (no-op for Local method)"""
|
||||
# no need to fetch local files, we'll deal with them in place.
|
||||
return 1
|
||||
|
||||
def checkstatus(self, url, urldata, d):
|
||||
"""
|
||||
Check the status of the url
|
||||
"""
|
||||
if urldata.localpath.find("*") != -1:
|
||||
logger.info("URL %s looks like a glob and was therefore not checked.", url)
|
||||
return True
|
||||
if os.path.exists(urldata.localpath):
|
||||
return True
|
||||
return False
|
||||
|
||||
@@ -1,143 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
Bitbake "Fetch" implementation for osc (Opensuse build service client).
|
||||
Based on the svn "Fetch" implementation.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb import utils
|
||||
from bb.fetch import Fetch
|
||||
from bb.fetch import FetchError
|
||||
from bb.fetch import MissingParameterError
|
||||
from bb.fetch import runfetchcmd
|
||||
|
||||
class Osc(Fetch):
|
||||
"""Class to fetch a module or modules from Opensuse build server
|
||||
repositories."""
|
||||
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with osc.
|
||||
"""
|
||||
return ud.type in ['osc']
|
||||
|
||||
def localpath(self, url, ud, d):
|
||||
if not "module" in ud.parm:
|
||||
raise MissingParameterError("osc method needs a 'module' parameter.")
|
||||
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
# Create paths to osc checkouts
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(data.expand('${OSCDIR}', d), ud.host)
|
||||
ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module)
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
else:
|
||||
pv = data.getVar("PV", d, 0)
|
||||
rev = Fetch.srcrev_internal_helper(ud, d)
|
||||
if rev and rev != True:
|
||||
ud.revision = rev
|
||||
else:
|
||||
ud.revision = ""
|
||||
|
||||
ud.localfile = data.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision), d)
|
||||
|
||||
return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
|
||||
|
||||
def _buildosccommand(self, ud, d, command):
|
||||
"""
|
||||
Build up an ocs commandline based on ud
|
||||
command is "fetch", "update", "info"
|
||||
"""
|
||||
|
||||
basecmd = data.expand('${FETCHCMD_osc}', d)
|
||||
|
||||
proto = ud.parm.get('proto', 'ocs')
|
||||
|
||||
options = []
|
||||
|
||||
config = "-c %s" % self.generate_config(ud, d)
|
||||
|
||||
if ud.revision:
|
||||
options.append("-r %s" % ud.revision)
|
||||
|
||||
coroot = self._strip_leading_slashes(ud.path)
|
||||
|
||||
if command is "fetch":
|
||||
osccmd = "%s %s co %s/%s %s" % (basecmd, config, coroot, ud.module, " ".join(options))
|
||||
elif command is "update":
|
||||
osccmd = "%s %s up %s" % (basecmd, config, " ".join(options))
|
||||
else:
|
||||
raise FetchError("Invalid osc command %s" % command)
|
||||
|
||||
return osccmd
|
||||
|
||||
def go(self, loc, ud, d):
|
||||
"""
|
||||
Fetch url
|
||||
"""
|
||||
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
if os.access(os.path.join(data.expand('${OSCDIR}', d), ud.path, ud.module), os.R_OK):
|
||||
oscupdatecmd = self._buildosccommand(ud, d, "update")
|
||||
logger.info("Update "+ loc)
|
||||
# update sources there
|
||||
os.chdir(ud.moddir)
|
||||
logger.debug(1, "Running %s", oscupdatecmd)
|
||||
runfetchcmd(oscupdatecmd, d)
|
||||
else:
|
||||
oscfetchcmd = self._buildosccommand(ud, d, "fetch")
|
||||
logger.info("Fetch " + loc)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
os.chdir(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", oscfetchcmd)
|
||||
runfetchcmd(oscfetchcmd, d)
|
||||
|
||||
os.chdir(os.path.join(ud.pkgdir + ud.path))
|
||||
# tar them up to a defined filename
|
||||
try:
|
||||
runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d)
|
||||
except:
|
||||
t, v, tb = sys.exc_info()
|
||||
try:
|
||||
os.unlink(ud.localpath)
|
||||
except OSError:
|
||||
pass
|
||||
raise t, v, tb
|
||||
|
||||
def supports_srcrev(self):
|
||||
return False
|
||||
|
||||
def generate_config(self, ud, d):
|
||||
"""
|
||||
Generate a .oscrc to be used for this run.
|
||||
"""
|
||||
|
||||
config_path = os.path.join(data.expand('${OSCDIR}', d), "oscrc")
|
||||
bb.utils.remove(config_path)
|
||||
|
||||
f = open(config_path, 'w')
|
||||
f.write("[general]\n")
|
||||
f.write("apisrv = %s\n" % ud.host)
|
||||
f.write("scheme = http\n")
|
||||
f.write("su-wrapper = su -c\n")
|
||||
f.write("build-root = %s\n" % data.expand('${WORKDIR}', d))
|
||||
f.write("urllist = http://moblin-obs.jf.intel.com:8888/build/%(project)s/%(repository)s/%(buildarch)s/:full/%(name)s.rpm\n")
|
||||
f.write("extra-pkgs = gzip\n")
|
||||
f.write("\n")
|
||||
f.write("[%s]\n" % ud.host)
|
||||
f.write("user = %s\n" % ud.parm["user"])
|
||||
f.write("pass = %s\n" % ud.parm["pswd"])
|
||||
f.close()
|
||||
|
||||
return config_path
|
||||
@@ -25,28 +25,26 @@ BitBake build tools.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
from future_builtins import zip
|
||||
import os
|
||||
import logging
|
||||
import os, re
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch import Fetch
|
||||
from bb.fetch import FetchError
|
||||
from bb.fetch import logger
|
||||
from bb.fetch import MissingParameterError
|
||||
|
||||
class Perforce(Fetch):
|
||||
def supports(self, url, ud, d):
|
||||
return ud.type in ['p4']
|
||||
|
||||
def doparse(url, d):
|
||||
def doparse(url,d):
|
||||
parm = {}
|
||||
path = url.split("://")[1]
|
||||
delim = path.find("@");
|
||||
if delim != -1:
|
||||
(user, pswd, host, port) = path.split('@')[0].split(":")
|
||||
(user,pswd,host,port) = path.split('@')[0].split(":")
|
||||
path = path.split('@')[1]
|
||||
else:
|
||||
(host, port) = data.getVar('P4PORT', d).split(':')
|
||||
(host,port) = data.getVar('P4PORT', d).split(':')
|
||||
user = ""
|
||||
pswd = ""
|
||||
|
||||
@@ -56,28 +54,27 @@ class Perforce(Fetch):
|
||||
plist = path.split(';')
|
||||
for item in plist:
|
||||
if item.count('='):
|
||||
(key, value) = item.split('=')
|
||||
(key,value) = item.split('=')
|
||||
keys.append(key)
|
||||
values.append(value)
|
||||
|
||||
parm = dict(zip(keys, values))
|
||||
parm = dict(zip(keys,values))
|
||||
path = "//" + path.split(';')[0]
|
||||
host += ":%s" % (port)
|
||||
parm["cset"] = Perforce.getcset(d, path, host, user, pswd, parm)
|
||||
|
||||
return host, path, user, pswd, parm
|
||||
return host,path,user,pswd,parm
|
||||
doparse = staticmethod(doparse)
|
||||
|
||||
def getcset(d, depot, host, user, pswd, parm):
|
||||
p4opt = ""
|
||||
def getcset(d, depot,host,user,pswd,parm):
|
||||
if "cset" in parm:
|
||||
return parm["cset"];
|
||||
if user:
|
||||
p4opt += " -u %s" % (user)
|
||||
data.setVar('P4USER', user, d)
|
||||
if pswd:
|
||||
p4opt += " -P %s" % (pswd)
|
||||
data.setVar('P4PASSWD', pswd, d)
|
||||
if host:
|
||||
p4opt += " -p %s" % (host)
|
||||
data.setVar('P4PORT', host, d)
|
||||
|
||||
p4date = data.getVar("P4DATE", d, 1)
|
||||
if "revision" in parm:
|
||||
@@ -88,19 +85,19 @@ class Perforce(Fetch):
|
||||
depot += "@%s" % (p4date)
|
||||
|
||||
p4cmd = data.getVar('FETCHCOMMAND_p4', d, 1)
|
||||
logger.debug(1, "Running %s%s changes -m 1 %s", p4cmd, p4opt, depot)
|
||||
p4file = os.popen("%s%s changes -m 1 %s" % (p4cmd, p4opt, depot))
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s changes -m 1 %s" % (p4cmd, depot))
|
||||
p4file = os.popen("%s changes -m 1 %s" % (p4cmd,depot))
|
||||
cset = p4file.readline().strip()
|
||||
logger.debug(1, "READ %s", cset)
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "READ %s" % (cset))
|
||||
if not cset:
|
||||
return -1
|
||||
|
||||
return cset.split(' ')[1]
|
||||
getcset = staticmethod(getcset)
|
||||
|
||||
def localpath(self, url, ud, d):
|
||||
def localpath(self, url, ud, d):
|
||||
|
||||
(host, path, user, pswd, parm) = Perforce.doparse(url, d)
|
||||
(host,path,user,pswd,parm) = Perforce.doparse(url,d)
|
||||
|
||||
# If a label is specified, we use that as our filename
|
||||
|
||||
@@ -113,11 +110,12 @@ class Perforce(Fetch):
|
||||
if which != -1:
|
||||
base = path[:which]
|
||||
|
||||
base = self._strip_leading_slashes(base)
|
||||
if base[0] == "/":
|
||||
base = base[1:]
|
||||
|
||||
cset = Perforce.getcset(d, path, host, user, pswd, parm)
|
||||
|
||||
ud.localfile = data.expand('%s+%s+%s.tar.gz' % (host, base.replace('/', '.'), cset), d)
|
||||
ud.localfile = data.expand('%s+%s+%s.tar.gz' % (host,base.replace('/', '.'), cset), d)
|
||||
|
||||
return os.path.join(data.getVar("DL_DIR", d, 1), ud.localfile)
|
||||
|
||||
@@ -126,60 +124,67 @@ class Perforce(Fetch):
|
||||
Fetch urls
|
||||
"""
|
||||
|
||||
(host, depot, user, pswd, parm) = Perforce.doparse(loc, d)
|
||||
# try to use the tarball stash
|
||||
if Fetch.try_mirror(d, ud.localfile):
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping perforce checkout." % ud.localpath)
|
||||
return
|
||||
|
||||
(host,depot,user,pswd,parm) = Perforce.doparse(loc, d)
|
||||
|
||||
if depot.find('/...') != -1:
|
||||
path = depot[:depot.find('/...')]
|
||||
else:
|
||||
path = depot
|
||||
|
||||
module = parm.get('module', os.path.basename(path))
|
||||
if "module" in parm:
|
||||
module = parm["module"]
|
||||
else:
|
||||
module = os.path.basename(path)
|
||||
|
||||
localdata = data.createCopy(d)
|
||||
data.setVar('OVERRIDES', "p4:%s" % data.getVar('OVERRIDES', localdata), localdata)
|
||||
data.update_data(localdata)
|
||||
|
||||
# Get the p4 command
|
||||
p4opt = ""
|
||||
if user:
|
||||
p4opt += " -u %s" % (user)
|
||||
data.setVar('P4USER', user, localdata)
|
||||
|
||||
if pswd:
|
||||
p4opt += " -P %s" % (pswd)
|
||||
data.setVar('P4PASSWD', pswd, localdata)
|
||||
|
||||
if host:
|
||||
p4opt += " -p %s" % (host)
|
||||
data.setVar('P4PORT', host, localdata)
|
||||
|
||||
p4cmd = data.getVar('FETCHCOMMAND', localdata, 1)
|
||||
|
||||
# create temp directory
|
||||
logger.debug(2, "Fetch: creating temporary directory")
|
||||
bb.utils.mkdirhier(data.expand('${WORKDIR}', localdata))
|
||||
bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: creating temporary directory")
|
||||
bb.mkdirhier(data.expand('${WORKDIR}', localdata))
|
||||
data.setVar('TMPBASE', data.expand('${WORKDIR}/oep4.XXXXXX', localdata), localdata)
|
||||
tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false")
|
||||
tmpfile = tmppipe.readline().strip()
|
||||
if not tmpfile:
|
||||
logger.error("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.")
|
||||
bb.error("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.")
|
||||
raise FetchError(module)
|
||||
|
||||
if "label" in parm:
|
||||
depot = "%s@%s" % (depot, parm["label"])
|
||||
depot = "%s@%s" % (depot,parm["label"])
|
||||
else:
|
||||
cset = Perforce.getcset(d, depot, host, user, pswd, parm)
|
||||
depot = "%s@%s" % (depot, cset)
|
||||
depot = "%s@%s" % (depot,cset)
|
||||
|
||||
os.chdir(tmpfile)
|
||||
logger.info("Fetch " + loc)
|
||||
logger.info("%s%s files %s", p4cmd, p4opt, depot)
|
||||
p4file = os.popen("%s%s files %s" % (p4cmd, p4opt, depot))
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc)
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "%s files %s" % (p4cmd, depot))
|
||||
p4file = os.popen("%s files %s" % (p4cmd, depot))
|
||||
|
||||
if not p4file:
|
||||
logger.error("Fetch: unable to get the P4 files from %s", depot)
|
||||
bb.error("Fetch: unable to get the P4 files from %s" % (depot))
|
||||
raise FetchError(module)
|
||||
|
||||
count = 0
|
||||
|
||||
for file in p4file:
|
||||
for file in p4file:
|
||||
list = file.split()
|
||||
|
||||
if list[2] == "delete":
|
||||
@@ -188,11 +193,11 @@ class Perforce(Fetch):
|
||||
dest = list[0][len(path)+1:]
|
||||
where = dest.find("#")
|
||||
|
||||
os.system("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module, dest[:where], list[0]))
|
||||
os.system("%s print -o %s/%s %s" % (p4cmd, module,dest[:where],list[0]))
|
||||
count = count + 1
|
||||
|
||||
|
||||
if count == 0:
|
||||
logger.error("Fetch: No files gathered from the P4 fetch")
|
||||
bb.error("Fetch: No files gathered from the P4 fetch")
|
||||
raise FetchError(module)
|
||||
|
||||
myret = os.system("tar -czf %s %s" % (ud.localpath, module))
|
||||
@@ -203,4 +208,6 @@ class Perforce(Fetch):
|
||||
pass
|
||||
raise FetchError(module)
|
||||
# cleanup
|
||||
bb.utils.prunedir(tmpfile)
|
||||
os.system('rm -rf %s' % tmpfile)
|
||||
|
||||
|
||||
|
||||
@@ -1,98 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake "Fetch" repo (git) implementation
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2009 Tom Rini <trini@embeddedalley.com>
|
||||
#
|
||||
# Based on git.py which is:
|
||||
#Copyright (C) 2005 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch import Fetch
|
||||
from bb.fetch import runfetchcmd
|
||||
|
||||
class Repo(Fetch):
|
||||
"""Class to fetch a module or modules from repo (git) repositories"""
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with repo.
|
||||
"""
|
||||
return ud.type in ["repo"]
|
||||
|
||||
def localpath(self, url, ud, d):
|
||||
"""
|
||||
We don"t care about the git rev of the manifests repository, but
|
||||
we do care about the manifest to use. The default is "default".
|
||||
We also care about the branch or tag to be used. The default is
|
||||
"master".
|
||||
"""
|
||||
|
||||
ud.proto = ud.parm.get('protocol', 'git')
|
||||
ud.branch = ud.parm.get('branch', 'master')
|
||||
ud.manifest = ud.parm.get('manifest', 'default.xml')
|
||||
if not ud.manifest.endswith('.xml'):
|
||||
ud.manifest += '.xml'
|
||||
|
||||
ud.localfile = data.expand("repo_%s%s_%s_%s.tar.gz" % (ud.host, ud.path.replace("/", "."), ud.manifest, ud.branch), d)
|
||||
|
||||
return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
|
||||
|
||||
def go(self, loc, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
if os.access(os.path.join(data.getVar("DL_DIR", d, True), ud.localfile), os.R_OK):
|
||||
logger.debug(1, "%s already exists (or was stashed). Skipping repo init / sync.", ud.localpath)
|
||||
return
|
||||
|
||||
gitsrcname = "%s%s" % (ud.host, ud.path.replace("/", "."))
|
||||
repodir = data.getVar("REPODIR", d, True) or os.path.join(data.getVar("DL_DIR", d, True), "repo")
|
||||
codir = os.path.join(repodir, gitsrcname, ud.manifest)
|
||||
|
||||
if ud.user:
|
||||
username = ud.user + "@"
|
||||
else:
|
||||
username = ""
|
||||
|
||||
bb.utils.mkdirhier(os.path.join(codir, "repo"))
|
||||
os.chdir(os.path.join(codir, "repo"))
|
||||
if not os.path.exists(os.path.join(codir, "repo", ".repo")):
|
||||
runfetchcmd("repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), d)
|
||||
|
||||
runfetchcmd("repo sync", d)
|
||||
os.chdir(codir)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude '.repo' --exclude '.git'"
|
||||
|
||||
# Create a cache
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.join(".", "*") ), d)
|
||||
|
||||
def supports_srcrev(self):
|
||||
return False
|
||||
|
||||
def _build_revision(self, url, ud, d):
|
||||
return ud.manifest
|
||||
|
||||
def _want_sortable_revision(self, url, ud, d):
|
||||
return False
|
||||
@@ -37,9 +37,11 @@ IETF secsh internet draft:
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import re, os
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch import Fetch
|
||||
from bb.fetch import FetchError
|
||||
from bb.fetch import MissingParameterError
|
||||
|
||||
|
||||
__pattern__ = re.compile(r'''
|
||||
@@ -114,5 +116,5 @@ class SSH(Fetch):
|
||||
|
||||
(exitstatus, output) = commands.getstatusoutput(cmd)
|
||||
if exitstatus != 0:
|
||||
print(output)
|
||||
print output
|
||||
raise FetchError('Unable to fetch %s' % url)
|
||||
|
||||
@@ -25,20 +25,18 @@ This implementation is for svk. It is based on the svn implementation
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import logging
|
||||
import os, re
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch import Fetch
|
||||
from bb.fetch import FetchError
|
||||
from bb.fetch import MissingParameterError
|
||||
from bb.fetch import logger
|
||||
|
||||
class Svk(Fetch):
|
||||
"""Class to fetch a module or modules from svk repositories"""
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with svk.
|
||||
Check to see if a given url can be fetched with cvs.
|
||||
"""
|
||||
return ud.type in ['svk']
|
||||
|
||||
@@ -48,41 +46,48 @@ class Svk(Fetch):
|
||||
else:
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
ud.revision = ud.parm.get('rev', "")
|
||||
ud.revision = ""
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
|
||||
ud.localfile = data.expand('%s_%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision, ud.date), d)
|
||||
|
||||
return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
|
||||
|
||||
def forcefetch(self, url, ud, d):
|
||||
return ud.date == "now"
|
||||
if (ud.date == "now"):
|
||||
return True
|
||||
return False
|
||||
|
||||
def go(self, loc, ud, d):
|
||||
"""Fetch urls"""
|
||||
|
||||
if not self.forcefetch(loc, ud, d) and Fetch.try_mirror(d, ud.localfile):
|
||||
return
|
||||
|
||||
svkroot = ud.host + ud.path
|
||||
|
||||
svkcmd = "svk co -r {%s} %s/%s" % (ud.date, svkroot, ud.module)
|
||||
svkcmd = "svk co -r {%s} %s/%s" % (date, svkroot, ud.module)
|
||||
|
||||
if ud.revision:
|
||||
svkcmd = "svk co -r %s %s/%s" % (ud.revision, svkroot, ud.module)
|
||||
svkcmd = "svk co -r %s/%s" % (ud.revision, svkroot, ud.module)
|
||||
|
||||
# create temp directory
|
||||
localdata = data.createCopy(d)
|
||||
data.update_data(localdata)
|
||||
logger.debug(2, "Fetch: creating temporary directory")
|
||||
bb.utils.mkdirhier(data.expand('${WORKDIR}', localdata))
|
||||
bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: creating temporary directory")
|
||||
bb.mkdirhier(data.expand('${WORKDIR}', localdata))
|
||||
data.setVar('TMPBASE', data.expand('${WORKDIR}/oesvk.XXXXXX', localdata), localdata)
|
||||
tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false")
|
||||
tmpfile = tmppipe.readline().strip()
|
||||
if not tmpfile:
|
||||
logger.error("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.")
|
||||
bb.msg.error(bb.msg.domain.Fetcher, "Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.")
|
||||
raise FetchError(ud.module)
|
||||
|
||||
# check out sources there
|
||||
os.chdir(tmpfile)
|
||||
logger.info("Fetch " + loc)
|
||||
logger.debug(1, "Running %s", svkcmd)
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc)
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svkcmd)
|
||||
myret = os.system(svkcmd)
|
||||
if myret != 0:
|
||||
try:
|
||||
@@ -101,4 +106,4 @@ class Svk(Fetch):
|
||||
pass
|
||||
raise FetchError(ud.module)
|
||||
# cleanup
|
||||
bb.utils.prunedir(tmpfile)
|
||||
os.system('rm -rf %s' % tmpfile)
|
||||
|
||||
@@ -23,16 +23,14 @@ BitBake 'Fetch' implementation for svn.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import os, re
|
||||
import sys
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch import Fetch
|
||||
from bb.fetch import FetchError
|
||||
from bb.fetch import MissingParameterError
|
||||
from bb.fetch import runfetchcmd
|
||||
from bb.fetch import logger
|
||||
|
||||
class Svn(Fetch):
|
||||
"""Class to fetch a module or modules from svn repositories"""
|
||||
@@ -49,7 +47,10 @@ class Svn(Fetch):
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
# Create paths to svn checkouts
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
relpath = ud.path
|
||||
if relpath.startswith('/'):
|
||||
# Remove leading slash as os.path.join can't cope
|
||||
relpath = relpath[1:]
|
||||
ud.pkgdir = os.path.join(data.expand('${SVNDIR}', d), ud.host, relpath)
|
||||
ud.moddir = os.path.join(ud.pkgdir, ud.module)
|
||||
|
||||
@@ -77,7 +78,7 @@ class Svn(Fetch):
|
||||
ud.revision = rev
|
||||
ud.date = ""
|
||||
else:
|
||||
ud.revision = ""
|
||||
ud.revision = ""
|
||||
|
||||
ud.localfile = data.expand('%s_%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision, ud.date), d)
|
||||
|
||||
@@ -91,7 +92,9 @@ class Svn(Fetch):
|
||||
|
||||
basecmd = data.expand('${FETCHCMD_svn}', d)
|
||||
|
||||
proto = ud.parm.get('proto', 'svn')
|
||||
proto = "svn"
|
||||
if "proto" in ud.parm:
|
||||
proto = ud.parm["proto"]
|
||||
|
||||
svn_rsh = None
|
||||
if proto == "svn+ssh" and "rsh" in ud.parm:
|
||||
@@ -111,15 +114,13 @@ class Svn(Fetch):
|
||||
if command is "info":
|
||||
svncmd = "%s info %s %s://%s/%s/" % (basecmd, " ".join(options), proto, svnroot, ud.module)
|
||||
else:
|
||||
suffix = ""
|
||||
if ud.revision:
|
||||
options.append("-r %s" % ud.revision)
|
||||
suffix = "@%s" % (ud.revision)
|
||||
elif ud.date:
|
||||
options.append("-r {%s}" % ud.date)
|
||||
|
||||
if command is "fetch":
|
||||
svncmd = "%s co %s %s://%s/%s%s %s" % (basecmd, " ".join(options), proto, svnroot, ud.module, suffix, ud.module)
|
||||
svncmd = "%s co %s %s://%s/%s %s" % (basecmd, " ".join(options), proto, svnroot, ud.module, ud.module)
|
||||
elif command is "update":
|
||||
svncmd = "%s update %s" % (basecmd, " ".join(options))
|
||||
else:
|
||||
@@ -133,34 +134,33 @@ class Svn(Fetch):
|
||||
def go(self, loc, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
# try to use the tarball stash
|
||||
if Fetch.try_mirror(d, ud.localfile):
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping svn checkout." % ud.localpath)
|
||||
return
|
||||
|
||||
bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK):
|
||||
svnupdatecmd = self._buildsvncommand(ud, d, "update")
|
||||
logger.info("Update " + loc)
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc)
|
||||
# update sources there
|
||||
os.chdir(ud.moddir)
|
||||
logger.debug(1, "Running %s", svnupdatecmd)
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svnupdatecmd)
|
||||
runfetchcmd(svnupdatecmd, d)
|
||||
else:
|
||||
svnfetchcmd = self._buildsvncommand(ud, d, "fetch")
|
||||
logger.info("Fetch " + loc)
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
bb.mkdirhier(ud.pkgdir)
|
||||
os.chdir(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", svnfetchcmd)
|
||||
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svnfetchcmd)
|
||||
runfetchcmd(svnfetchcmd, d)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude '.svn'"
|
||||
|
||||
os.chdir(ud.pkgdir)
|
||||
# tar them up to a defined filename
|
||||
try:
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.module), d)
|
||||
runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d)
|
||||
except:
|
||||
t, v, tb = sys.exc_info()
|
||||
try:
|
||||
@@ -169,7 +169,7 @@ class Svn(Fetch):
|
||||
pass
|
||||
raise t, v, tb
|
||||
|
||||
def supports_srcrev(self):
|
||||
def suppports_srcrev(self):
|
||||
return True
|
||||
|
||||
def _revision_key(self, url, ud, d):
|
||||
@@ -182,7 +182,7 @@ class Svn(Fetch):
|
||||
"""
|
||||
Return the latest upstream revision number
|
||||
"""
|
||||
logger.debug(2, "SVN fetcher hitting network for %s", url)
|
||||
bb.msg.debug(2, bb.msg.domain.Fetcher, "SVN fetcher hitting network for %s" % url)
|
||||
|
||||
output = runfetchcmd("LANG=C LC_ALL=C " + self._buildsvncommand(ud, d, "info"), d, True)
|
||||
|
||||
|
||||
@@ -25,56 +25,51 @@ BitBake build tools.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import logging
|
||||
import os, re
|
||||
import bb
|
||||
import urllib
|
||||
from bb import data
|
||||
from bb.fetch import Fetch, FetchError, encodeurl, decodeurl, logger, runfetchcmd
|
||||
from bb.fetch import Fetch
|
||||
from bb.fetch import FetchError
|
||||
from bb.fetch import uri_replace
|
||||
|
||||
class Wget(Fetch):
|
||||
"""Class to fetch urls via 'wget'"""
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with wget.
|
||||
Check to see if a given url can be fetched with cvs.
|
||||
"""
|
||||
return ud.type in ['http', 'https', 'ftp']
|
||||
return ud.type in ['http','https','ftp']
|
||||
|
||||
def localpath(self, url, ud, d):
|
||||
|
||||
url = encodeurl([ud.type, ud.host, ud.path, ud.user, ud.pswd, {}])
|
||||
url = bb.encodeurl([ud.type, ud.host, ud.path, ud.user, ud.pswd, {}])
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
ud.localfile = data.expand(urllib.unquote(ud.basename), d)
|
||||
ud.localfile = data.expand(os.path.basename(url), d)
|
||||
|
||||
return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
|
||||
|
||||
def go(self, uri, ud, d, checkonly = False):
|
||||
def go(self, uri, ud, d):
|
||||
"""Fetch urls"""
|
||||
|
||||
def fetch_uri(uri, ud, d):
|
||||
if checkonly:
|
||||
fetchcmd = data.getVar("CHECKCOMMAND", d, 1)
|
||||
elif os.path.exists(ud.localpath):
|
||||
if os.path.exists(ud.localpath):
|
||||
# file exists, but we didnt complete it.. trying again..
|
||||
fetchcmd = data.getVar("RESUMECOMMAND", d, 1)
|
||||
else:
|
||||
fetchcmd = data.getVar("FETCHCOMMAND", d, 1)
|
||||
|
||||
uri = uri.split(";")[0]
|
||||
uri_decoded = list(decodeurl(uri))
|
||||
uri_type = uri_decoded[0]
|
||||
uri_host = uri_decoded[1]
|
||||
|
||||
fetchcmd = fetchcmd.replace("${URI}", uri.split(";")[0])
|
||||
bb.msg.note(1, bb.msg.domain.Fetcher, "fetch " + uri)
|
||||
fetchcmd = fetchcmd.replace("${URI}", uri)
|
||||
fetchcmd = fetchcmd.replace("${FILE}", ud.basename)
|
||||
logger.info("fetch " + uri)
|
||||
logger.debug(2, "executing " + fetchcmd)
|
||||
runfetchcmd(fetchcmd, d)
|
||||
bb.msg.debug(2, bb.msg.domain.Fetcher, "executing " + fetchcmd)
|
||||
ret = os.system(fetchcmd)
|
||||
if ret != 0:
|
||||
return False
|
||||
|
||||
# Sanity check since wget can pretend it succeed when it didn't
|
||||
# Also, this used to happen if sourceforge sent us to the mirror page
|
||||
if not os.path.exists(ud.localpath) and not checkonly:
|
||||
logger.debug(2, "The fetch command for %s returned success but %s doesn't exist?...", uri, ud.localpath)
|
||||
if not os.path.exists(ud.localpath):
|
||||
bb.msg.debug(2, bb.msg.domain.Fetcher, "The fetch command for %s returned success but %s doesn't exist?..." % (uri, ud.localpath))
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -83,11 +78,22 @@ class Wget(Fetch):
|
||||
data.setVar('OVERRIDES', "wget:" + data.getVar('OVERRIDES', localdata), localdata)
|
||||
data.update_data(localdata)
|
||||
|
||||
premirrors = [ i.split() for i in (data.getVar('PREMIRRORS', localdata, 1) or "").split('\n') if i ]
|
||||
for (find, replace) in premirrors:
|
||||
newuri = uri_replace(uri, find, replace, d)
|
||||
if newuri != uri:
|
||||
if fetch_uri(newuri, ud, localdata):
|
||||
return
|
||||
|
||||
if fetch_uri(uri, ud, localdata):
|
||||
return True
|
||||
return
|
||||
|
||||
# try mirrors
|
||||
mirrors = [ i.split() for i in (data.getVar('MIRRORS', localdata, 1) or "").split('\n') if i ]
|
||||
for (find, replace) in mirrors:
|
||||
newuri = uri_replace(uri, find, replace, d)
|
||||
if newuri != uri:
|
||||
if fetch_uri(newuri, ud, localdata):
|
||||
return
|
||||
|
||||
raise FetchError(uri)
|
||||
|
||||
|
||||
def checkstatus(self, uri, ud, d):
|
||||
return self.go(uri, ud, d, True)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,143 +0,0 @@
|
||||
"""
|
||||
BitBake 'Fetch' implementation for bzr.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2007 Ross Burton
|
||||
# Copyright (C) 2007 Richard Purdie
|
||||
#
|
||||
# Classes for obtaining upstream sources for the
|
||||
# BitBake build tools.
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
|
||||
class Bzr(FetchMethod):
|
||||
def supports(self, url, ud, d):
|
||||
return ud.type in ['bzr']
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""
|
||||
init bzr specific variable within url data
|
||||
"""
|
||||
# Create paths to bzr checkouts
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(data.expand('${BZRDIR}', d), ud.host, relpath)
|
||||
|
||||
ud.setup_revisons(d)
|
||||
|
||||
if not ud.revision:
|
||||
ud.revision = self.latest_revision(ud.url, ud, d)
|
||||
|
||||
ud.localfile = data.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision), d)
|
||||
|
||||
def _buildbzrcommand(self, ud, d, command):
|
||||
"""
|
||||
Build up an bzr commandline based on ud
|
||||
command is "fetch", "update", "revno"
|
||||
"""
|
||||
|
||||
basecmd = data.expand('${FETCHCMD_bzr}', d)
|
||||
|
||||
proto = ud.parm.get('proto', 'http')
|
||||
|
||||
bzrroot = ud.host + ud.path
|
||||
|
||||
options = []
|
||||
|
||||
if command is "revno":
|
||||
bzrcmd = "%s revno %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot)
|
||||
else:
|
||||
if ud.revision:
|
||||
options.append("-r %s" % ud.revision)
|
||||
|
||||
if command is "fetch":
|
||||
bzrcmd = "%s co %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot)
|
||||
elif command is "update":
|
||||
bzrcmd = "%s pull %s --overwrite" % (basecmd, " ".join(options))
|
||||
else:
|
||||
raise FetchError("Invalid bzr command %s" % command, ud.url)
|
||||
|
||||
return bzrcmd
|
||||
|
||||
def download(self, loc, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
if os.access(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir), '.bzr'), os.R_OK):
|
||||
bzrcmd = self._buildbzrcommand(ud, d, "update")
|
||||
logger.debug(1, "BZR Update %s", loc)
|
||||
bb.fetch2.check_network_access(d, bzrcmd, ud.url)
|
||||
os.chdir(os.path.join (ud.pkgdir, os.path.basename(ud.path)))
|
||||
runfetchcmd(bzrcmd, d)
|
||||
else:
|
||||
bb.utils.remove(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir)), True)
|
||||
bzrcmd = self._buildbzrcommand(ud, d, "fetch")
|
||||
bb.fetch2.check_network_access(d, bzrcmd, ud.url)
|
||||
logger.debug(1, "BZR Checkout %s", loc)
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
os.chdir(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", bzrcmd)
|
||||
runfetchcmd(bzrcmd, d)
|
||||
|
||||
os.chdir(ud.pkgdir)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude '.bzr' --exclude '.bzrtags'"
|
||||
|
||||
# tar them up to a defined filename
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(ud.pkgdir)), d, cleanup = [ud.localpath])
|
||||
|
||||
def supports_srcrev(self):
|
||||
return True
|
||||
|
||||
def _revision_key(self, url, ud, d, name):
|
||||
"""
|
||||
Return a unique key for the url
|
||||
"""
|
||||
return "bzr:" + ud.pkgdir
|
||||
|
||||
def _latest_revision(self, url, ud, d, name):
|
||||
"""
|
||||
Return the latest upstream revision number
|
||||
"""
|
||||
logger.debug(2, "BZR fetcher hitting network for %s", url)
|
||||
|
||||
bb.fetch2.check_network_access(d, self._buildbzrcommand(ud, d, "revno"), ud.url)
|
||||
|
||||
output = runfetchcmd(self._buildbzrcommand(ud, d, "revno"), d, True)
|
||||
|
||||
return output.strip()
|
||||
|
||||
def _sortable_revision(self, url, ud, d):
|
||||
"""
|
||||
Return a sortable revision number which in our case is the revision number
|
||||
"""
|
||||
|
||||
return self._build_revision(url, ud, d)
|
||||
|
||||
def _build_revision(self, url, ud, d):
|
||||
return ud.revision
|
||||
@@ -1,181 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'Fetch' implementations
|
||||
|
||||
Classes for obtaining upstream sources for the
|
||||
BitBake build tools.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
#Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
#
|
||||
|
||||
import os
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod, FetchError, MissingParameterError, logger
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
class Cvs(FetchMethod):
|
||||
"""
|
||||
Class to fetch a module or modules from cvs repositories
|
||||
"""
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with cvs.
|
||||
"""
|
||||
return ud.type in ['cvs']
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
if not "module" in ud.parm:
|
||||
raise MissingParameterError("module", ud.url)
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
ud.tag = ud.parm.get('tag', "")
|
||||
|
||||
# Override the default date in certain cases
|
||||
if 'date' in ud.parm:
|
||||
ud.date = ud.parm['date']
|
||||
elif ud.tag:
|
||||
ud.date = ""
|
||||
|
||||
norecurse = ''
|
||||
if 'norecurse' in ud.parm:
|
||||
norecurse = '_norecurse'
|
||||
|
||||
fullpath = ''
|
||||
if 'fullpath' in ud.parm:
|
||||
fullpath = '_fullpath'
|
||||
|
||||
ud.localfile = data.expand('%s_%s_%s_%s%s%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date, norecurse, fullpath), d)
|
||||
|
||||
def need_update(self, url, ud, d):
|
||||
if (ud.date == "now"):
|
||||
return True
|
||||
if not os.path.exists(ud.localpath):
|
||||
return True
|
||||
return False
|
||||
|
||||
def download(self, loc, ud, d):
|
||||
|
||||
method = ud.parm.get('method', 'pserver')
|
||||
localdir = ud.parm.get('localdir', ud.module)
|
||||
cvs_port = ud.parm.get('port', '')
|
||||
|
||||
cvs_rsh = None
|
||||
if method == "ext":
|
||||
if "rsh" in ud.parm:
|
||||
cvs_rsh = ud.parm["rsh"]
|
||||
|
||||
if method == "dir":
|
||||
cvsroot = ud.path
|
||||
else:
|
||||
cvsroot = ":" + method
|
||||
cvsproxyhost = data.getVar('CVS_PROXY_HOST', d, True)
|
||||
if cvsproxyhost:
|
||||
cvsroot += ";proxy=" + cvsproxyhost
|
||||
cvsproxyport = data.getVar('CVS_PROXY_PORT', d, True)
|
||||
if cvsproxyport:
|
||||
cvsroot += ";proxyport=" + cvsproxyport
|
||||
cvsroot += ":" + ud.user
|
||||
if ud.pswd:
|
||||
cvsroot += ":" + ud.pswd
|
||||
cvsroot += "@" + ud.host + ":" + cvs_port + ud.path
|
||||
|
||||
options = []
|
||||
if 'norecurse' in ud.parm:
|
||||
options.append("-l")
|
||||
if ud.date:
|
||||
# treat YYYYMMDDHHMM specially for CVS
|
||||
if len(ud.date) == 12:
|
||||
options.append("-D \"%s %s:%s UTC\"" % (ud.date[0:8], ud.date[8:10], ud.date[10:12]))
|
||||
else:
|
||||
options.append("-D \"%s UTC\"" % ud.date)
|
||||
if ud.tag:
|
||||
options.append("-r %s" % ud.tag)
|
||||
|
||||
localdata = data.createCopy(d)
|
||||
data.setVar('OVERRIDES', "cvs:%s" % data.getVar('OVERRIDES', localdata), localdata)
|
||||
data.update_data(localdata)
|
||||
|
||||
data.setVar('CVSROOT', cvsroot, localdata)
|
||||
data.setVar('CVSCOOPTS', " ".join(options), localdata)
|
||||
data.setVar('CVSMODULE', ud.module, localdata)
|
||||
cvscmd = data.getVar('FETCHCOMMAND', localdata, True)
|
||||
cvsupdatecmd = data.getVar('UPDATECOMMAND', localdata, True)
|
||||
|
||||
if cvs_rsh:
|
||||
cvscmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvscmd)
|
||||
cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd)
|
||||
|
||||
# create module directory
|
||||
logger.debug(2, "Fetch: checking for module directory")
|
||||
pkg = data.expand('${PN}', d)
|
||||
pkgdir = os.path.join(data.expand('${CVSDIR}', localdata), pkg)
|
||||
moddir = os.path.join(pkgdir, localdir)
|
||||
if os.access(os.path.join(moddir, 'CVS'), os.R_OK):
|
||||
logger.info("Update " + loc)
|
||||
bb.fetch2.check_network_access(d, cvsupdatecmd, ud.url)
|
||||
# update sources there
|
||||
os.chdir(moddir)
|
||||
cmd = cvsupdatecmd
|
||||
else:
|
||||
logger.info("Fetch " + loc)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(pkgdir)
|
||||
os.chdir(pkgdir)
|
||||
logger.debug(1, "Running %s", cvscmd)
|
||||
bb.fetch2.check_network_access(d, cvscmd, ud.url)
|
||||
cmd = cvscmd
|
||||
|
||||
runfetchcmd(cmd, d, cleanup = [moddir])
|
||||
|
||||
if not os.access(moddir, os.R_OK):
|
||||
raise FetchError("Directory %s was not readable despite sucessful fetch?!" % moddir, ud.url)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude 'CVS'"
|
||||
|
||||
# tar them up to a defined filename
|
||||
if 'fullpath' in ud.parm:
|
||||
os.chdir(pkgdir)
|
||||
cmd = "tar %s -czf %s %s" % (tar_flags, ud.localpath, localdir)
|
||||
else:
|
||||
os.chdir(moddir)
|
||||
os.chdir('..')
|
||||
cmd = "tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(moddir))
|
||||
|
||||
runfetchcmd(cmd, d, cleanup = [ud.localpath])
|
||||
|
||||
def clean(self, ud, d):
|
||||
""" Clean CVS Files and tarballs """
|
||||
|
||||
pkg = data.expand('${PN}', d)
|
||||
localdata = data.createCopy(d)
|
||||
data.setVar('OVERRIDES', "cvs:%s" % data.getVar('OVERRIDES', localdata), localdata)
|
||||
data.update_data(localdata)
|
||||
pkgdir = os.path.join(data.expand('${CVSDIR}', localdata), pkg)
|
||||
|
||||
bb.utils.remove(pkgdir, True)
|
||||
bb.utils.remove(ud.localpath)
|
||||
|
||||
@@ -1,245 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'Fetch' git implementation
|
||||
|
||||
"""
|
||||
|
||||
#Copyright (C) 2005 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
|
||||
class Git(FetchMethod):
|
||||
"""Class to fetch a module or modules from git repositories"""
|
||||
def init(self, d):
|
||||
#
|
||||
# Only enable _sortable revision if the key is set
|
||||
#
|
||||
if bb.data.getVar("BB_GIT_CLONE_FOR_SRCREV", d, True):
|
||||
self._sortable_buildindex = self._sortable_buildindex_disabled
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with git.
|
||||
"""
|
||||
return ud.type in ['git']
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""
|
||||
init git specific variable within url data
|
||||
so that the git method like latest_revision() can work
|
||||
"""
|
||||
if 'protocol' in ud.parm:
|
||||
ud.proto = ud.parm['protocol']
|
||||
elif not ud.host:
|
||||
ud.proto = 'file'
|
||||
else:
|
||||
ud.proto = "rsync"
|
||||
|
||||
ud.nocheckout = False
|
||||
if 'nocheckout' in ud.parm:
|
||||
ud.nocheckout = True
|
||||
|
||||
branches = ud.parm.get("branch", "master").split(',')
|
||||
if len(branches) != len(ud.names):
|
||||
raise bb.fetch2.ParameterError("The number of name and branch parameters is not balanced", ud.url)
|
||||
ud.branches = {}
|
||||
for name in ud.names:
|
||||
branch = branches[ud.names.index(name)]
|
||||
ud.branches[name] = branch
|
||||
|
||||
gitsrcname = '%s%s' % (ud.host, ud.path.replace('/', '.'))
|
||||
ud.mirrortarball = 'git2_%s.tar.gz' % (gitsrcname)
|
||||
ud.fullmirror = os.path.join(data.getVar("DL_DIR", d, True), ud.mirrortarball)
|
||||
ud.clonedir = os.path.join(data.expand('${GITDIR}', d), gitsrcname)
|
||||
|
||||
ud.basecmd = data.getVar("FETCHCMD_git", d, True) or "git"
|
||||
|
||||
ud.write_tarballs = (data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True) or "0") != "0"
|
||||
|
||||
ud.localfile = ud.clonedir
|
||||
|
||||
ud.setup_revisons(d)
|
||||
|
||||
for name in ud.names:
|
||||
# Ensure anything that doesn't look like a sha256 checksum/revision is translated into one
|
||||
if not ud.revisions[name] or len(ud.revisions[name]) != 40 or (False in [c in "abcdef0123456789" for c in ud.revisions[name]]):
|
||||
ud.branches[name] = ud.revisions[name]
|
||||
ud.revisions[name] = self.latest_revision(ud.url, ud, d, name)
|
||||
|
||||
def localpath(self, url, ud, d):
|
||||
return ud.clonedir
|
||||
|
||||
def need_update(self, u, ud, d):
|
||||
if not os.path.exists(ud.clonedir):
|
||||
return True
|
||||
os.chdir(ud.clonedir)
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud.revisions[name], d):
|
||||
return True
|
||||
if ud.write_tarballs and not os.path.exists(ud.fullmirror):
|
||||
return True
|
||||
return False
|
||||
|
||||
def try_premirror(self, u, ud, d):
|
||||
# If we don't do this, updating an existing checkout with only premirrors
|
||||
# is not possible
|
||||
if bb.data.getVar("BB_FETCH_PREMIRRORONLY", d, True) is not None:
|
||||
return True
|
||||
if os.path.exists(ud.clonedir):
|
||||
return False
|
||||
return True
|
||||
|
||||
def download(self, loc, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
if ud.user:
|
||||
username = ud.user + '@'
|
||||
else:
|
||||
username = ""
|
||||
|
||||
ud.repochanged = not os.path.exists(ud.fullmirror)
|
||||
|
||||
# If the checkout doesn't exist and the mirror tarball does, extract it
|
||||
if not os.path.exists(ud.clonedir) and os.path.exists(ud.fullmirror):
|
||||
bb.utils.mkdirhier(ud.clonedir)
|
||||
os.chdir(ud.clonedir)
|
||||
runfetchcmd("tar -xzf %s" % (ud.fullmirror), d)
|
||||
|
||||
# If the repo still doesn't exist, fallback to cloning it
|
||||
if not os.path.exists(ud.clonedir):
|
||||
bb.fetch2.check_network_access(d, "git clone --bare %s%s" % (ud.host, ud.path))
|
||||
runfetchcmd("%s clone --bare %s://%s%s%s %s" % (ud.basecmd, ud.proto, username, ud.host, ud.path, ud.clonedir), d)
|
||||
|
||||
os.chdir(ud.clonedir)
|
||||
# Update the checkout if needed
|
||||
needupdate = False
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud.revisions[name], d):
|
||||
needupdate = True
|
||||
if needupdate:
|
||||
bb.fetch2.check_network_access(d, "git fetch %s%s" % (ud.host, ud.path), ud.url)
|
||||
try:
|
||||
runfetchcmd("%s remote prune origin" % ud.basecmd, d)
|
||||
runfetchcmd("%s remote rm origin" % ud.basecmd, d)
|
||||
except bb.fetch2.FetchError:
|
||||
logger.debug(1, "No Origin")
|
||||
|
||||
runfetchcmd("%s remote add origin %s://%s%s%s" % (ud.basecmd, ud.proto, username, ud.host, ud.path), d)
|
||||
runfetchcmd("%s fetch --all -t" % ud.basecmd, d)
|
||||
runfetchcmd("%s prune-packed" % ud.basecmd, d)
|
||||
runfetchcmd("%s pack-redundant --all | xargs -r rm" % ud.basecmd, d)
|
||||
ud.repochanged = True
|
||||
|
||||
def build_mirror_data(self, url, ud, d):
|
||||
# Generate a mirror tarball if needed
|
||||
if ud.write_tarballs and (ud.repochanged or not os.path.exists(ud.fullmirror)):
|
||||
os.chdir(ud.clonedir)
|
||||
logger.info("Creating tarball of git repository")
|
||||
runfetchcmd("tar -czf %s %s" % (ud.fullmirror, os.path.join(".") ), d)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
""" unpack the downloaded src to destdir"""
|
||||
|
||||
subdir = ud.parm.get("subpath", "")
|
||||
if subdir != "":
|
||||
readpathspec = ":%s" % (subdir)
|
||||
else:
|
||||
readpathspec = ""
|
||||
|
||||
destdir = os.path.join(destdir, "git/")
|
||||
if os.path.exists(destdir):
|
||||
bb.utils.prunedir(destdir)
|
||||
|
||||
runfetchcmd("git clone -s -n %s %s" % (ud.clonedir, destdir), d)
|
||||
if not ud.nocheckout:
|
||||
os.chdir(destdir)
|
||||
runfetchcmd("%s read-tree %s%s" % (ud.basecmd, ud.revisions[ud.names[0]], readpathspec), d)
|
||||
runfetchcmd("%s checkout-index -q -f -a" % ud.basecmd, d)
|
||||
return True
|
||||
|
||||
def clean(self, ud, d):
|
||||
""" clean the git directory """
|
||||
|
||||
bb.utils.remove(ud.localpath, True)
|
||||
bb.utils.remove(ud.fullmirror)
|
||||
|
||||
def supports_srcrev(self):
|
||||
return True
|
||||
|
||||
def _contains_ref(self, tag, d):
|
||||
basecmd = data.getVar("FETCHCMD_git", d, True) or "git"
|
||||
output = runfetchcmd("%s log --pretty=oneline -n 1 %s -- 2> /dev/null | wc -l" % (basecmd, tag), d, quiet=True)
|
||||
return output.split()[0] != "0"
|
||||
|
||||
def _revision_key(self, url, ud, d, name):
|
||||
"""
|
||||
Return a unique key for the url
|
||||
"""
|
||||
return "git:" + ud.host + ud.path.replace('/', '.') + ud.branches[name]
|
||||
|
||||
def _latest_revision(self, url, ud, d, name):
|
||||
"""
|
||||
Compute the HEAD revision for the url
|
||||
"""
|
||||
if ud.user:
|
||||
username = ud.user + '@'
|
||||
else:
|
||||
username = ""
|
||||
|
||||
bb.fetch2.check_network_access(d, "git ls-remote %s%s %s" % (ud.host, ud.path, ud.branches[name]))
|
||||
basecmd = data.getVar("FETCHCMD_git", d, True) or "git"
|
||||
cmd = "%s ls-remote %s://%s%s%s %s" % (basecmd, ud.proto, username, ud.host, ud.path, ud.branches[name])
|
||||
output = runfetchcmd(cmd, d, True)
|
||||
if not output:
|
||||
raise bb.fetch2.FetchError("The command %s gave empty output unexpectedly" % cmd, url)
|
||||
return output.split()[0]
|
||||
|
||||
def _build_revision(self, url, ud, d, name):
|
||||
return ud.revisions[name]
|
||||
|
||||
def _sortable_buildindex_disabled(self, url, ud, d, rev):
|
||||
"""
|
||||
Return a suitable buildindex for the revision specified. This is done by counting revisions
|
||||
using "git rev-list" which may or may not work in different circumstances.
|
||||
"""
|
||||
|
||||
cwd = os.getcwd()
|
||||
|
||||
# Check if we have the rev already
|
||||
|
||||
if not os.path.exists(ud.clonedir):
|
||||
print("no repo")
|
||||
self.download(None, ud, d)
|
||||
if not os.path.exists(ud.clonedir):
|
||||
logger.error("GIT repository for %s doesn't exist in %s, cannot get sortable buildnumber, using old value", url, ud.clonedir)
|
||||
return None
|
||||
|
||||
|
||||
os.chdir(ud.clonedir)
|
||||
if not self._contains_ref(rev, d):
|
||||
self.download(None, ud, d)
|
||||
|
||||
output = runfetchcmd("%s rev-list %s -- 2> /dev/null | wc -l" % (ud.basecmd, rev), d, quiet=True)
|
||||
os.chdir(cwd)
|
||||
|
||||
buildindex = "%s" % output.split()[0]
|
||||
logger.debug(1, "GIT repository for %s in %s is returning %s revisions in rev-list before %s", url, ud.clonedir, buildindex, rev)
|
||||
return buildindex
|
||||
@@ -1,176 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'Fetch' implementation for mercurial DRCS (hg).
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
# Copyright (C) 2004 Marcin Juszkiewicz
|
||||
# Copyright (C) 2007 Robert Schuster
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import MissingParameterError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
|
||||
class Hg(FetchMethod):
|
||||
"""Class to fetch from mercurial repositories"""
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with mercurial.
|
||||
"""
|
||||
return ud.type in ['hg']
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""
|
||||
init hg specific variable within url data
|
||||
"""
|
||||
if not "module" in ud.parm:
|
||||
raise MissingParameterError('module', ud.url)
|
||||
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
# Create paths to mercurial checkouts
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(data.expand('${HGDIR}', d), ud.host, relpath)
|
||||
ud.moddir = os.path.join(ud.pkgdir, ud.module)
|
||||
|
||||
ud.setup_revisons(d)
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
elif not ud.revision:
|
||||
ud.revision = self.latest_revision(ud.url, ud, d)
|
||||
|
||||
ud.localfile = data.expand('%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision), d)
|
||||
|
||||
def need_update(self, url, ud, d):
|
||||
revTag = ud.parm.get('rev', 'tip')
|
||||
if revTag == "tip":
|
||||
return True
|
||||
if not os.path.exists(ud.localpath):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _buildhgcommand(self, ud, d, command):
|
||||
"""
|
||||
Build up an hg commandline based on ud
|
||||
command is "fetch", "update", "info"
|
||||
"""
|
||||
|
||||
basecmd = data.expand('${FETCHCMD_hg}', d)
|
||||
|
||||
proto = ud.parm.get('proto', 'http')
|
||||
|
||||
host = ud.host
|
||||
if proto == "file":
|
||||
host = "/"
|
||||
ud.host = "localhost"
|
||||
|
||||
if not ud.user:
|
||||
hgroot = host + ud.path
|
||||
else:
|
||||
hgroot = ud.user + "@" + host + ud.path
|
||||
|
||||
if command is "info":
|
||||
return "%s identify -i %s://%s/%s" % (basecmd, proto, hgroot, ud.module)
|
||||
|
||||
options = [];
|
||||
if ud.revision:
|
||||
options.append("-r %s" % ud.revision)
|
||||
|
||||
if command is "fetch":
|
||||
cmd = "%s clone %s %s://%s/%s %s" % (basecmd, " ".join(options), proto, hgroot, ud.module, ud.module)
|
||||
elif command is "pull":
|
||||
# do not pass options list; limiting pull to rev causes the local
|
||||
# repo not to contain it and immediately following "update" command
|
||||
# will crash
|
||||
cmd = "%s pull" % (basecmd)
|
||||
elif command is "update":
|
||||
cmd = "%s update -C %s" % (basecmd, " ".join(options))
|
||||
else:
|
||||
raise FetchError("Invalid hg command %s" % command, ud.url)
|
||||
|
||||
return cmd
|
||||
|
||||
def download(self, loc, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
if os.access(os.path.join(ud.moddir, '.hg'), os.R_OK):
|
||||
updatecmd = self._buildhgcommand(ud, d, "pull")
|
||||
logger.info("Update " + loc)
|
||||
# update sources there
|
||||
os.chdir(ud.moddir)
|
||||
logger.debug(1, "Running %s", updatecmd)
|
||||
bb.fetch2.check_network_access(d, updatecmd, ud.url)
|
||||
runfetchcmd(updatecmd, d)
|
||||
|
||||
else:
|
||||
fetchcmd = self._buildhgcommand(ud, d, "fetch")
|
||||
logger.info("Fetch " + loc)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
os.chdir(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", fetchcmd)
|
||||
bb.fetch2.check_network_access(d, fetchcmd, ud.url)
|
||||
runfetchcmd(fetchcmd, d)
|
||||
|
||||
# Even when we clone (fetch), we still need to update as hg's clone
|
||||
# won't checkout the specified revision if its on a branch
|
||||
updatecmd = self._buildhgcommand(ud, d, "update")
|
||||
os.chdir(ud.moddir)
|
||||
logger.debug(1, "Running %s", updatecmd)
|
||||
runfetchcmd(updatecmd, d)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude '.hg' --exclude '.hgrags'"
|
||||
|
||||
os.chdir(ud.pkgdir)
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.module), d, cleanup = [ud.localpath])
|
||||
|
||||
def supports_srcrev(self):
|
||||
return True
|
||||
|
||||
def _latest_revision(self, url, ud, d, name):
|
||||
"""
|
||||
Compute tip revision for the url
|
||||
"""
|
||||
bb.fetch2.check_network_access(d, self._buildhgcommand(ud, d, "info"))
|
||||
output = runfetchcmd(self._buildhgcommand(ud, d, "info"), d)
|
||||
return output.strip()
|
||||
|
||||
def _build_revision(self, url, ud, d):
|
||||
return ud.revision
|
||||
|
||||
def _revision_key(self, url, ud, d, name):
|
||||
"""
|
||||
Return a unique key for the url
|
||||
"""
|
||||
return "hg:" + ud.moddir
|
||||
@@ -1,93 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'Fetch' implementations
|
||||
|
||||
Classes for obtaining upstream sources for the
|
||||
BitBake build tools.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import bb
|
||||
import bb.utils
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
|
||||
class Local(FetchMethod):
|
||||
def supports(self, url, urldata, d):
|
||||
"""
|
||||
Check to see if a given url represents a local fetch.
|
||||
"""
|
||||
return urldata.type in ['file']
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
# We don't set localfile as for this fetcher the file is already local!
|
||||
ud.basename = os.path.basename(ud.url.split("://")[1].split(";")[0])
|
||||
return
|
||||
|
||||
def localpath(self, url, urldata, d):
|
||||
"""
|
||||
Return the local filename of a given url assuming a successful fetch.
|
||||
"""
|
||||
path = url.split("://")[1]
|
||||
path = path.split(";")[0]
|
||||
newpath = path
|
||||
dldirfile = os.path.join(data.getVar("DL_DIR", d, True), os.path.basename(path))
|
||||
if os.path.exists(dldirfile):
|
||||
return dldirfile
|
||||
if path[0] != "/":
|
||||
filespath = data.getVar('FILESPATH', d, True)
|
||||
if filespath:
|
||||
newpath = bb.utils.which(filespath, path)
|
||||
if not newpath:
|
||||
filesdir = data.getVar('FILESDIR', d, True)
|
||||
if filesdir:
|
||||
newpath = os.path.join(filesdir, path)
|
||||
if not os.path.exists(newpath) and path.find("*") == -1:
|
||||
return dldirfile
|
||||
return newpath
|
||||
|
||||
def need_update(self, url, ud, d):
|
||||
if url.find("*") != -1:
|
||||
return False
|
||||
if os.path.exists(ud.localpath):
|
||||
return False
|
||||
return True
|
||||
|
||||
def download(self, url, urldata, d):
|
||||
"""Fetch urls (no-op for Local method)"""
|
||||
# no need to fetch local files, we'll deal with them in place.
|
||||
return 1
|
||||
|
||||
def checkstatus(self, url, urldata, d):
|
||||
"""
|
||||
Check the status of the url
|
||||
"""
|
||||
if urldata.localpath.find("*") != -1:
|
||||
logger.info("URL %s looks like a glob and was therefore not checked.", url)
|
||||
return True
|
||||
if os.path.exists(urldata.localpath):
|
||||
return True
|
||||
return False
|
||||
|
||||
def clean(self, urldata, d):
|
||||
return
|
||||
|
||||
@@ -1,135 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
Bitbake "Fetch" implementation for osc (Opensuse build service client).
|
||||
Based on the svn "Fetch" implementation.
|
||||
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import MissingParameterError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
class Osc(FetchMethod):
|
||||
"""Class to fetch a module or modules from Opensuse build server
|
||||
repositories."""
|
||||
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with osc.
|
||||
"""
|
||||
return ud.type in ['osc']
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
if not "module" in ud.parm:
|
||||
raise MissingParameterError('module', ud.url)
|
||||
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
# Create paths to osc checkouts
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(data.expand('${OSCDIR}', d), ud.host)
|
||||
ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module)
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
else:
|
||||
pv = data.getVar("PV", d, 0)
|
||||
rev = bb.fetch2.srcrev_internal_helper(ud, d)
|
||||
if rev and rev != True:
|
||||
ud.revision = rev
|
||||
else:
|
||||
ud.revision = ""
|
||||
|
||||
ud.localfile = data.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision), d)
|
||||
|
||||
def _buildosccommand(self, ud, d, command):
|
||||
"""
|
||||
Build up an ocs commandline based on ud
|
||||
command is "fetch", "update", "info"
|
||||
"""
|
||||
|
||||
basecmd = data.expand('${FETCHCMD_osc}', d)
|
||||
|
||||
proto = ud.parm.get('proto', 'ocs')
|
||||
|
||||
options = []
|
||||
|
||||
config = "-c %s" % self.generate_config(ud, d)
|
||||
|
||||
if ud.revision:
|
||||
options.append("-r %s" % ud.revision)
|
||||
|
||||
coroot = self._strip_leading_slashes(ud.path)
|
||||
|
||||
if command is "fetch":
|
||||
osccmd = "%s %s co %s/%s %s" % (basecmd, config, coroot, ud.module, " ".join(options))
|
||||
elif command is "update":
|
||||
osccmd = "%s %s up %s" % (basecmd, config, " ".join(options))
|
||||
else:
|
||||
raise FetchError("Invalid osc command %s" % command, ud.url)
|
||||
|
||||
return osccmd
|
||||
|
||||
def download(self, loc, ud, d):
|
||||
"""
|
||||
Fetch url
|
||||
"""
|
||||
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
if os.access(os.path.join(data.expand('${OSCDIR}', d), ud.path, ud.module), os.R_OK):
|
||||
oscupdatecmd = self._buildosccommand(ud, d, "update")
|
||||
logger.info("Update "+ loc)
|
||||
# update sources there
|
||||
os.chdir(ud.moddir)
|
||||
logger.debug(1, "Running %s", oscupdatecmd)
|
||||
bb.fetch2.check_network_access(d, oscupdatecmd, ud.url)
|
||||
runfetchcmd(oscupdatecmd, d)
|
||||
else:
|
||||
oscfetchcmd = self._buildosccommand(ud, d, "fetch")
|
||||
logger.info("Fetch " + loc)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
os.chdir(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", oscfetchcmd)
|
||||
bb.fetch2.check_network_access(d, oscfetchcmd, ud.url)
|
||||
runfetchcmd(oscfetchcmd, d)
|
||||
|
||||
os.chdir(os.path.join(ud.pkgdir + ud.path))
|
||||
# tar them up to a defined filename
|
||||
runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d, cleanup = [ud.localpath])
|
||||
|
||||
def supports_srcrev(self):
|
||||
return False
|
||||
|
||||
def generate_config(self, ud, d):
|
||||
"""
|
||||
Generate a .oscrc to be used for this run.
|
||||
"""
|
||||
|
||||
config_path = os.path.join(data.expand('${OSCDIR}', d), "oscrc")
|
||||
if (os.path.exists(config_path)):
|
||||
os.remove(config_path)
|
||||
|
||||
f = open(config_path, 'w')
|
||||
f.write("[general]\n")
|
||||
f.write("apisrv = %s\n" % ud.host)
|
||||
f.write("scheme = http\n")
|
||||
f.write("su-wrapper = su -c\n")
|
||||
f.write("build-root = %s\n" % data.expand('${WORKDIR}', d))
|
||||
f.write("urllist = http://moblin-obs.jf.intel.com:8888/build/%(project)s/%(repository)s/%(buildarch)s/:full/%(name)s.rpm\n")
|
||||
f.write("extra-pkgs = gzip\n")
|
||||
f.write("\n")
|
||||
f.write("[%s]\n" % ud.host)
|
||||
f.write("user = %s\n" % ud.parm["user"])
|
||||
f.write("pass = %s\n" % ud.parm["pswd"])
|
||||
f.close()
|
||||
|
||||
return config_path
|
||||
@@ -1,196 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'Fetch' implementations
|
||||
|
||||
Classes for obtaining upstream sources for the
|
||||
BitBake build tools.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
from future_builtins import zip
|
||||
import os
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
class Perforce(FetchMethod):
|
||||
def supports(self, url, ud, d):
|
||||
return ud.type in ['p4']
|
||||
|
||||
def doparse(url, d):
|
||||
parm = {}
|
||||
path = url.split("://")[1]
|
||||
delim = path.find("@");
|
||||
if delim != -1:
|
||||
(user, pswd, host, port) = path.split('@')[0].split(":")
|
||||
path = path.split('@')[1]
|
||||
else:
|
||||
(host, port) = data.getVar('P4PORT', d).split(':')
|
||||
user = ""
|
||||
pswd = ""
|
||||
|
||||
if path.find(";") != -1:
|
||||
keys=[]
|
||||
values=[]
|
||||
plist = path.split(';')
|
||||
for item in plist:
|
||||
if item.count('='):
|
||||
(key, value) = item.split('=')
|
||||
keys.append(key)
|
||||
values.append(value)
|
||||
|
||||
parm = dict(zip(keys, values))
|
||||
path = "//" + path.split(';')[0]
|
||||
host += ":%s" % (port)
|
||||
parm["cset"] = Perforce.getcset(d, path, host, user, pswd, parm)
|
||||
|
||||
return host, path, user, pswd, parm
|
||||
doparse = staticmethod(doparse)
|
||||
|
||||
def getcset(d, depot, host, user, pswd, parm):
|
||||
p4opt = ""
|
||||
if "cset" in parm:
|
||||
return parm["cset"];
|
||||
if user:
|
||||
p4opt += " -u %s" % (user)
|
||||
if pswd:
|
||||
p4opt += " -P %s" % (pswd)
|
||||
if host:
|
||||
p4opt += " -p %s" % (host)
|
||||
|
||||
p4date = data.getVar("P4DATE", d, True)
|
||||
if "revision" in parm:
|
||||
depot += "#%s" % (parm["revision"])
|
||||
elif "label" in parm:
|
||||
depot += "@%s" % (parm["label"])
|
||||
elif p4date:
|
||||
depot += "@%s" % (p4date)
|
||||
|
||||
p4cmd = data.getVar('FETCHCOMMAND_p4', d, True)
|
||||
logger.debug(1, "Running %s%s changes -m 1 %s", p4cmd, p4opt, depot)
|
||||
p4file = os.popen("%s%s changes -m 1 %s" % (p4cmd, p4opt, depot))
|
||||
cset = p4file.readline().strip()
|
||||
logger.debug(1, "READ %s", cset)
|
||||
if not cset:
|
||||
return -1
|
||||
|
||||
return cset.split(' ')[1]
|
||||
getcset = staticmethod(getcset)
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
(host, path, user, pswd, parm) = Perforce.doparse(ud.url, d)
|
||||
|
||||
# If a label is specified, we use that as our filename
|
||||
|
||||
if "label" in parm:
|
||||
ud.localfile = "%s.tar.gz" % (parm["label"])
|
||||
return
|
||||
|
||||
base = path
|
||||
which = path.find('/...')
|
||||
if which != -1:
|
||||
base = path[:which]
|
||||
|
||||
base = self._strip_leading_slashes(base)
|
||||
|
||||
cset = Perforce.getcset(d, path, host, user, pswd, parm)
|
||||
|
||||
ud.localfile = data.expand('%s+%s+%s.tar.gz' % (host, base.replace('/', '.'), cset), d)
|
||||
|
||||
def download(self, loc, ud, d):
|
||||
"""
|
||||
Fetch urls
|
||||
"""
|
||||
|
||||
(host, depot, user, pswd, parm) = Perforce.doparse(loc, d)
|
||||
|
||||
if depot.find('/...') != -1:
|
||||
path = depot[:depot.find('/...')]
|
||||
else:
|
||||
path = depot
|
||||
|
||||
module = parm.get('module', os.path.basename(path))
|
||||
|
||||
localdata = data.createCopy(d)
|
||||
data.setVar('OVERRIDES', "p4:%s" % data.getVar('OVERRIDES', localdata), localdata)
|
||||
data.update_data(localdata)
|
||||
|
||||
# Get the p4 command
|
||||
p4opt = ""
|
||||
if user:
|
||||
p4opt += " -u %s" % (user)
|
||||
|
||||
if pswd:
|
||||
p4opt += " -P %s" % (pswd)
|
||||
|
||||
if host:
|
||||
p4opt += " -p %s" % (host)
|
||||
|
||||
p4cmd = data.getVar('FETCHCOMMAND', localdata, True)
|
||||
|
||||
# create temp directory
|
||||
logger.debug(2, "Fetch: creating temporary directory")
|
||||
bb.utils.mkdirhier(data.expand('${WORKDIR}', localdata))
|
||||
data.setVar('TMPBASE', data.expand('${WORKDIR}/oep4.XXXXXX', localdata), localdata)
|
||||
tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, True) or "false")
|
||||
tmpfile = tmppipe.readline().strip()
|
||||
if not tmpfile:
|
||||
raise FetchError("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.", loc)
|
||||
|
||||
if "label" in parm:
|
||||
depot = "%s@%s" % (depot, parm["label"])
|
||||
else:
|
||||
cset = Perforce.getcset(d, depot, host, user, pswd, parm)
|
||||
depot = "%s@%s" % (depot, cset)
|
||||
|
||||
os.chdir(tmpfile)
|
||||
logger.info("Fetch " + loc)
|
||||
logger.info("%s%s files %s", p4cmd, p4opt, depot)
|
||||
p4file = os.popen("%s%s files %s" % (p4cmd, p4opt, depot))
|
||||
|
||||
if not p4file:
|
||||
raise FetchError("Fetch: unable to get the P4 files from %s" % depot, loc)
|
||||
|
||||
count = 0
|
||||
|
||||
for file in p4file:
|
||||
list = file.split()
|
||||
|
||||
if list[2] == "delete":
|
||||
continue
|
||||
|
||||
dest = list[0][len(path)+1:]
|
||||
where = dest.find("#")
|
||||
|
||||
os.system("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module, dest[:where], list[0]))
|
||||
count = count + 1
|
||||
|
||||
if count == 0:
|
||||
logger.error()
|
||||
raise FetchError("Fetch: No files gathered from the P4 fetch", loc)
|
||||
|
||||
runfetchcmd("tar -czf %s %s" % (ud.localpath, module), d, cleanup = [ud.localpath])
|
||||
# cleanup
|
||||
bb.utils.prunedir(tmpfile)
|
||||
@@ -1,98 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake "Fetch" repo (git) implementation
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2009 Tom Rini <trini@embeddedalley.com>
|
||||
#
|
||||
# Based on git.py which is:
|
||||
#Copyright (C) 2005 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
class Repo(FetchMethod):
|
||||
"""Class to fetch a module or modules from repo (git) repositories"""
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with repo.
|
||||
"""
|
||||
return ud.type in ["repo"]
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""
|
||||
We don"t care about the git rev of the manifests repository, but
|
||||
we do care about the manifest to use. The default is "default".
|
||||
We also care about the branch or tag to be used. The default is
|
||||
"master".
|
||||
"""
|
||||
|
||||
ud.proto = ud.parm.get('protocol', 'git')
|
||||
ud.branch = ud.parm.get('branch', 'master')
|
||||
ud.manifest = ud.parm.get('manifest', 'default.xml')
|
||||
if not ud.manifest.endswith('.xml'):
|
||||
ud.manifest += '.xml'
|
||||
|
||||
ud.localfile = data.expand("repo_%s%s_%s_%s.tar.gz" % (ud.host, ud.path.replace("/", "."), ud.manifest, ud.branch), d)
|
||||
|
||||
def download(self, loc, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
if os.access(os.path.join(data.getVar("DL_DIR", d, True), ud.localfile), os.R_OK):
|
||||
logger.debug(1, "%s already exists (or was stashed). Skipping repo init / sync.", ud.localpath)
|
||||
return
|
||||
|
||||
gitsrcname = "%s%s" % (ud.host, ud.path.replace("/", "."))
|
||||
repodir = data.getVar("REPODIR", d, True) or os.path.join(data.getVar("DL_DIR", d, True), "repo")
|
||||
codir = os.path.join(repodir, gitsrcname, ud.manifest)
|
||||
|
||||
if ud.user:
|
||||
username = ud.user + "@"
|
||||
else:
|
||||
username = ""
|
||||
|
||||
bb.utils.mkdirhier(os.path.join(codir, "repo"))
|
||||
os.chdir(os.path.join(codir, "repo"))
|
||||
if not os.path.exists(os.path.join(codir, "repo", ".repo")):
|
||||
bb.fetch2.check_network_access(d, "repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), ud.url)
|
||||
runfetchcmd("repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), d)
|
||||
|
||||
bb.fetch2.check_network_access(d, "repo sync %s" % ud.url, ud.url)
|
||||
runfetchcmd("repo sync", d)
|
||||
os.chdir(codir)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude '.repo' --exclude '.git'"
|
||||
|
||||
# Create a cache
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.join(".", "*") ), d)
|
||||
|
||||
def supports_srcrev(self):
|
||||
return False
|
||||
|
||||
def _build_revision(self, url, ud, d):
|
||||
return ud.manifest
|
||||
|
||||
def _want_sortable_revision(self, url, ud, d):
|
||||
return False
|
||||
@@ -1,120 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
'''
|
||||
BitBake 'Fetch' implementations
|
||||
|
||||
This implementation is for Secure Shell (SSH), and attempts to comply with the
|
||||
IETF secsh internet draft:
|
||||
http://tools.ietf.org/wg/secsh/draft-ietf-secsh-scp-sftp-ssh-uri/
|
||||
|
||||
Currently does not support the sftp parameters, as this uses scp
|
||||
Also does not support the 'fingerprint' connection parameter.
|
||||
|
||||
'''
|
||||
|
||||
# Copyright (C) 2006 OpenedHand Ltd.
|
||||
#
|
||||
#
|
||||
# Based in part on svk.py:
|
||||
# Copyright (C) 2006 Holger Hans Peter Freyther
|
||||
# Based on svn.py:
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
# Based on functions from the base bb module:
|
||||
# Copyright 2003 Holger Schurig
|
||||
#
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import re, os
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
|
||||
__pattern__ = re.compile(r'''
|
||||
\s* # Skip leading whitespace
|
||||
ssh:// # scheme
|
||||
( # Optional username/password block
|
||||
(?P<user>\S+) # username
|
||||
(:(?P<pass>\S+))? # colon followed by the password (optional)
|
||||
)?
|
||||
(?P<cparam>(;[^;]+)*)? # connection parameters block (optional)
|
||||
@
|
||||
(?P<host>\S+?) # non-greedy match of the host
|
||||
(:(?P<port>[0-9]+))? # colon followed by the port (optional)
|
||||
/
|
||||
(?P<path>[^;]+) # path on the remote system, may be absolute or relative,
|
||||
# and may include the use of '~' to reference the remote home
|
||||
# directory
|
||||
(?P<sparam>(;[^;]+)*)? # parameters block (optional)
|
||||
$
|
||||
''', re.VERBOSE)
|
||||
|
||||
class SSH(FetchMethod):
|
||||
'''Class to fetch a module or modules via Secure Shell'''
|
||||
|
||||
def supports(self, url, urldata, d):
|
||||
return __pattern__.match(url) != None
|
||||
|
||||
def localpath(self, url, urldata, d):
|
||||
m = __pattern__.match(urldata.url)
|
||||
path = m.group('path')
|
||||
host = m.group('host')
|
||||
lpath = os.path.join(data.getVar('DL_DIR', d, True), host, os.path.basename(path))
|
||||
return lpath
|
||||
|
||||
def download(self, url, urldata, d):
|
||||
dldir = data.getVar('DL_DIR', d, True)
|
||||
|
||||
m = __pattern__.match(url)
|
||||
path = m.group('path')
|
||||
host = m.group('host')
|
||||
port = m.group('port')
|
||||
user = m.group('user')
|
||||
password = m.group('pass')
|
||||
|
||||
ldir = os.path.join(dldir, host)
|
||||
lpath = os.path.join(ldir, os.path.basename(path))
|
||||
|
||||
if not os.path.exists(ldir):
|
||||
os.makedirs(ldir)
|
||||
|
||||
if port:
|
||||
port = '-P %s' % port
|
||||
else:
|
||||
port = ''
|
||||
|
||||
if user:
|
||||
fr = user
|
||||
if password:
|
||||
fr += ':%s' % password
|
||||
fr += '@%s' % host
|
||||
else:
|
||||
fr = host
|
||||
fr += ':%s' % path
|
||||
|
||||
|
||||
import commands
|
||||
cmd = 'scp -B -r %s %s %s/' % (
|
||||
port,
|
||||
commands.mkarg(fr),
|
||||
commands.mkarg(ldir)
|
||||
)
|
||||
|
||||
bb.fetch2.check_network_access(d, cmd, urldata.url)
|
||||
|
||||
runfetchcmd(cmd, d)
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'Fetch' implementations
|
||||
|
||||
This implementation is for svk. It is based on the svn implementation
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2006 Holger Hans Peter Freyther
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import MissingParameterError
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
class Svk(FetchMethod):
|
||||
"""Class to fetch a module or modules from svk repositories"""
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with svk.
|
||||
"""
|
||||
return ud.type in ['svk']
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
|
||||
if not "module" in ud.parm:
|
||||
raise MissingParameterError('module', ud.url)
|
||||
else:
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
ud.revision = ud.parm.get('rev', "")
|
||||
|
||||
ud.localfile = data.expand('%s_%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision, ud.date), d)
|
||||
|
||||
def need_update(self, url, ud, d):
|
||||
if ud.date == "now":
|
||||
return True
|
||||
if not os.path.exists(ud.localpath):
|
||||
return True
|
||||
return False
|
||||
|
||||
def download(self, loc, ud, d):
|
||||
"""Fetch urls"""
|
||||
|
||||
svkroot = ud.host + ud.path
|
||||
|
||||
svkcmd = "svk co -r {%s} %s/%s" % (ud.date, svkroot, ud.module)
|
||||
|
||||
if ud.revision:
|
||||
svkcmd = "svk co -r %s %s/%s" % (ud.revision, svkroot, ud.module)
|
||||
|
||||
# create temp directory
|
||||
localdata = data.createCopy(d)
|
||||
data.update_data(localdata)
|
||||
logger.debug(2, "Fetch: creating temporary directory")
|
||||
bb.utils.mkdirhier(data.expand('${WORKDIR}', localdata))
|
||||
data.setVar('TMPBASE', data.expand('${WORKDIR}/oesvk.XXXXXX', localdata), localdata)
|
||||
tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, True) or "false")
|
||||
tmpfile = tmppipe.readline().strip()
|
||||
if not tmpfile:
|
||||
logger.error()
|
||||
raise FetchError("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.", loc)
|
||||
|
||||
# check out sources there
|
||||
os.chdir(tmpfile)
|
||||
logger.info("Fetch " + loc)
|
||||
logger.debug(1, "Running %s", svkcmd)
|
||||
runfetchcmd(svkcmd, d, cleanup = [tmpfile])
|
||||
|
||||
os.chdir(os.path.join(tmpfile, os.path.dirname(ud.module)))
|
||||
# tar them up to a defined filename
|
||||
runfetchcmd("tar -czf %s %s" % (ud.localpath, os.path.basename(ud.module)), d, cleanup = [ud.localpath])
|
||||
|
||||
# cleanup
|
||||
bb.utils.prunedir(tmpfile)
|
||||
@@ -1,182 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'Fetch' implementation for svn.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
# Copyright (C) 2004 Marcin Juszkiewicz
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
import bb
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import MissingParameterError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import logger
|
||||
|
||||
class Svn(FetchMethod):
|
||||
"""Class to fetch a module or modules from svn repositories"""
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with svn.
|
||||
"""
|
||||
return ud.type in ['svn']
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""
|
||||
init svn specific variable within url data
|
||||
"""
|
||||
if not "module" in ud.parm:
|
||||
raise MissingParameterError('module', ud.url)
|
||||
|
||||
ud.module = ud.parm["module"]
|
||||
|
||||
# Create paths to svn checkouts
|
||||
relpath = self._strip_leading_slashes(ud.path)
|
||||
ud.pkgdir = os.path.join(data.expand('${SVNDIR}', d), ud.host, relpath)
|
||||
ud.moddir = os.path.join(ud.pkgdir, ud.module)
|
||||
|
||||
ud.setup_revisons(d)
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
ud.revision = ud.parm['rev']
|
||||
|
||||
ud.localfile = data.expand('%s_%s_%s_%s_.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision), d)
|
||||
|
||||
def _buildsvncommand(self, ud, d, command):
|
||||
"""
|
||||
Build up an svn commandline based on ud
|
||||
command is "fetch", "update", "info"
|
||||
"""
|
||||
|
||||
basecmd = data.expand('${FETCHCMD_svn}', d)
|
||||
|
||||
proto = ud.parm.get('proto', 'svn')
|
||||
|
||||
svn_rsh = None
|
||||
if proto == "svn+ssh" and "rsh" in ud.parm:
|
||||
svn_rsh = ud.parm["rsh"]
|
||||
|
||||
svnroot = ud.host + ud.path
|
||||
|
||||
options = []
|
||||
|
||||
if ud.user:
|
||||
options.append("--username %s" % ud.user)
|
||||
|
||||
if ud.pswd:
|
||||
options.append("--password %s" % ud.pswd)
|
||||
|
||||
if command is "info":
|
||||
svncmd = "%s info %s %s://%s/%s/" % (basecmd, " ".join(options), proto, svnroot, ud.module)
|
||||
else:
|
||||
suffix = ""
|
||||
if ud.revision:
|
||||
options.append("-r %s" % ud.revision)
|
||||
suffix = "@%s" % (ud.revision)
|
||||
|
||||
if command is "fetch":
|
||||
svncmd = "%s co %s %s://%s/%s%s %s" % (basecmd, " ".join(options), proto, svnroot, ud.module, suffix, ud.module)
|
||||
elif command is "update":
|
||||
svncmd = "%s update %s" % (basecmd, " ".join(options))
|
||||
else:
|
||||
raise FetchError("Invalid svn command %s" % command, ud.url)
|
||||
|
||||
if svn_rsh:
|
||||
svncmd = "svn_RSH=\"%s\" %s" % (svn_rsh, svncmd)
|
||||
|
||||
return svncmd
|
||||
|
||||
def download(self, loc, ud, d):
|
||||
"""Fetch url"""
|
||||
|
||||
logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'")
|
||||
|
||||
if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK):
|
||||
svnupdatecmd = self._buildsvncommand(ud, d, "update")
|
||||
logger.info("Update " + loc)
|
||||
# update sources there
|
||||
os.chdir(ud.moddir)
|
||||
logger.debug(1, "Running %s", svnupdatecmd)
|
||||
bb.fetch2.check_network_access(d, svnupdatecmd, ud.url)
|
||||
runfetchcmd(svnupdatecmd, d)
|
||||
else:
|
||||
svnfetchcmd = self._buildsvncommand(ud, d, "fetch")
|
||||
logger.info("Fetch " + loc)
|
||||
# check out sources there
|
||||
bb.utils.mkdirhier(ud.pkgdir)
|
||||
os.chdir(ud.pkgdir)
|
||||
logger.debug(1, "Running %s", svnfetchcmd)
|
||||
bb.fetch2.check_network_access(d, svnfetchcmd, ud.url)
|
||||
runfetchcmd(svnfetchcmd, d)
|
||||
|
||||
scmdata = ud.parm.get("scmdata", "")
|
||||
if scmdata == "keep":
|
||||
tar_flags = ""
|
||||
else:
|
||||
tar_flags = "--exclude '.svn'"
|
||||
|
||||
os.chdir(ud.pkgdir)
|
||||
# tar them up to a defined filename
|
||||
runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.module), d, cleanup = [ud.localpath])
|
||||
|
||||
def clean(self, ud, d):
|
||||
""" Clean SVN specific files and dirs """
|
||||
|
||||
bb.utils.remove(ud.localpath)
|
||||
bb.utils.remove(ud.moddir, True)
|
||||
|
||||
|
||||
def supports_srcrev(self):
|
||||
return True
|
||||
|
||||
def _revision_key(self, url, ud, d, name):
|
||||
"""
|
||||
Return a unique key for the url
|
||||
"""
|
||||
return "svn:" + ud.moddir
|
||||
|
||||
def _latest_revision(self, url, ud, d, name):
|
||||
"""
|
||||
Return the latest upstream revision number
|
||||
"""
|
||||
bb.fetch2.check_network_access(d, self._buildsvncommand(ud, d, "info"))
|
||||
|
||||
output = runfetchcmd("LANG=C LC_ALL=C " + self._buildsvncommand(ud, d, "info"), d, True)
|
||||
|
||||
revision = None
|
||||
for line in output.splitlines():
|
||||
if "Last Changed Rev" in line:
|
||||
revision = line.split(":")[1].strip()
|
||||
|
||||
return revision
|
||||
|
||||
def _sortable_revision(self, url, ud, d):
|
||||
"""
|
||||
Return a sortable revision number which in our case is the revision number
|
||||
"""
|
||||
|
||||
return self._build_revision(url, ud, d)
|
||||
|
||||
def _build_revision(self, url, ud, d):
|
||||
return ud.revision
|
||||
@@ -1,91 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
BitBake 'Fetch' implementations
|
||||
|
||||
Classes for obtaining upstream sources for the
|
||||
BitBake build tools.
|
||||
|
||||
"""
|
||||
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
import os
|
||||
import logging
|
||||
import bb
|
||||
import urllib
|
||||
from bb import data
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import encodeurl
|
||||
from bb.fetch2 import decodeurl
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
class Wget(FetchMethod):
|
||||
"""Class to fetch urls via 'wget'"""
|
||||
def supports(self, url, ud, d):
|
||||
"""
|
||||
Check to see if a given url can be fetched with wget.
|
||||
"""
|
||||
return ud.type in ['http', 'https', 'ftp']
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
ud.localfile = data.expand(urllib.unquote(ud.basename), d)
|
||||
|
||||
def download(self, uri, ud, d, checkonly = False):
|
||||
"""Fetch urls"""
|
||||
|
||||
def fetch_uri(uri, ud, d):
|
||||
if checkonly:
|
||||
fetchcmd = data.getVar("CHECKCOMMAND", d, True)
|
||||
elif os.path.exists(ud.localpath):
|
||||
# file exists, but we didnt complete it.. trying again..
|
||||
fetchcmd = data.getVar("RESUMECOMMAND", d, True)
|
||||
else:
|
||||
fetchcmd = data.getVar("FETCHCOMMAND", d, True)
|
||||
|
||||
uri = uri.split(";")[0]
|
||||
uri_decoded = list(decodeurl(uri))
|
||||
uri_type = uri_decoded[0]
|
||||
uri_host = uri_decoded[1]
|
||||
|
||||
fetchcmd = fetchcmd.replace("${URI}", uri.split(";")[0])
|
||||
fetchcmd = fetchcmd.replace("${FILE}", ud.basename)
|
||||
logger.info("fetch " + uri)
|
||||
logger.debug(2, "executing " + fetchcmd)
|
||||
bb.fetch2.check_network_access(d, fetchcmd)
|
||||
runfetchcmd(fetchcmd, d)
|
||||
|
||||
# Sanity check since wget can pretend it succeed when it didn't
|
||||
# Also, this used to happen if sourceforge sent us to the mirror page
|
||||
if not os.path.exists(ud.localpath) and not checkonly:
|
||||
raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (uri, ud.localpath), uri)
|
||||
|
||||
localdata = data.createCopy(d)
|
||||
data.setVar('OVERRIDES', "wget:" + data.getVar('OVERRIDES', localdata), localdata)
|
||||
data.update_data(localdata)
|
||||
|
||||
fetch_uri(uri, ud, localdata)
|
||||
|
||||
return True
|
||||
|
||||
def checkstatus(self, uri, ud, d):
|
||||
return self.download(uri, ud, d, True)
|
||||
144
bitbake/lib/bb/manifest.py
Normal file
144
bitbake/lib/bb/manifest.py
Normal file
@@ -0,0 +1,144 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import os, sys
|
||||
import bb, bb.data
|
||||
|
||||
def getfields(line):
|
||||
fields = {}
|
||||
fieldmap = ( "pkg", "src", "dest", "type", "mode", "uid", "gid", "major", "minor", "start", "inc", "count" )
|
||||
for f in xrange(len(fieldmap)):
|
||||
fields[fieldmap[f]] = None
|
||||
|
||||
if not line:
|
||||
return None
|
||||
|
||||
splitline = line.split()
|
||||
if not len(splitline):
|
||||
return None
|
||||
|
||||
try:
|
||||
for f in xrange(len(fieldmap)):
|
||||
if splitline[f] == '-':
|
||||
continue
|
||||
fields[fieldmap[f]] = splitline[f]
|
||||
except IndexError:
|
||||
pass
|
||||
return fields
|
||||
|
||||
def parse (mfile, d):
|
||||
manifest = []
|
||||
while 1:
|
||||
line = mfile.readline()
|
||||
if not line:
|
||||
break
|
||||
if line.startswith("#"):
|
||||
continue
|
||||
fields = getfields(line)
|
||||
if not fields:
|
||||
continue
|
||||
manifest.append(fields)
|
||||
return manifest
|
||||
|
||||
def emit (func, manifest, d):
|
||||
#str = "%s () {\n" % func
|
||||
str = ""
|
||||
for line in manifest:
|
||||
emittedline = emit_line(func, line, d)
|
||||
if not emittedline:
|
||||
continue
|
||||
str += emittedline + "\n"
|
||||
# str += "}\n"
|
||||
return str
|
||||
|
||||
def mangle (func, line, d):
|
||||
import copy
|
||||
newline = copy.copy(line)
|
||||
src = bb.data.expand(newline["src"], d)
|
||||
|
||||
if src:
|
||||
if not os.path.isabs(src):
|
||||
src = "${WORKDIR}/" + src
|
||||
|
||||
dest = newline["dest"]
|
||||
if not dest:
|
||||
return
|
||||
|
||||
if dest.startswith("/"):
|
||||
dest = dest[1:]
|
||||
|
||||
if func is "do_install":
|
||||
dest = "${D}/" + dest
|
||||
|
||||
elif func is "do_populate":
|
||||
dest = "${WORKDIR}/install/" + newline["pkg"] + "/" + dest
|
||||
|
||||
elif func is "do_stage":
|
||||
varmap = {}
|
||||
varmap["${bindir}"] = "${STAGING_DIR}/${HOST_SYS}/bin"
|
||||
varmap["${libdir}"] = "${STAGING_DIR}/${HOST_SYS}/lib"
|
||||
varmap["${includedir}"] = "${STAGING_DIR}/${HOST_SYS}/include"
|
||||
varmap["${datadir}"] = "${STAGING_DATADIR}"
|
||||
|
||||
matched = 0
|
||||
for key in varmap.keys():
|
||||
if dest.startswith(key):
|
||||
dest = varmap[key] + "/" + dest[len(key):]
|
||||
matched = 1
|
||||
if not matched:
|
||||
newline = None
|
||||
return
|
||||
else:
|
||||
newline = None
|
||||
return
|
||||
|
||||
newline["src"] = src
|
||||
newline["dest"] = dest
|
||||
return newline
|
||||
|
||||
def emit_line (func, line, d):
|
||||
import copy
|
||||
newline = copy.deepcopy(line)
|
||||
newline = mangle(func, newline, d)
|
||||
if not newline:
|
||||
return None
|
||||
|
||||
str = ""
|
||||
type = newline["type"]
|
||||
mode = newline["mode"]
|
||||
src = newline["src"]
|
||||
dest = newline["dest"]
|
||||
if type is "d":
|
||||
str = "install -d "
|
||||
if mode:
|
||||
str += "-m %s " % mode
|
||||
str += dest
|
||||
elif type is "f":
|
||||
if not src:
|
||||
return None
|
||||
if dest.endswith("/"):
|
||||
str = "install -d "
|
||||
str += dest + "\n"
|
||||
str += "install "
|
||||
else:
|
||||
str = "install -D "
|
||||
if mode:
|
||||
str += "-m %s " % mode
|
||||
str += src + " " + dest
|
||||
del newline
|
||||
return str
|
||||
@@ -27,7 +27,7 @@
|
||||
a method pool to do this task.
|
||||
|
||||
This pool will be used to compile and execute the functions. It
|
||||
will be smart enough to
|
||||
will be smart enough to
|
||||
"""
|
||||
|
||||
from bb.utils import better_compile, better_exec
|
||||
@@ -43,8 +43,8 @@ def insert_method(modulename, code, fn):
|
||||
Add code of a module should be added. The methods
|
||||
will be simply added, no checking will be done
|
||||
"""
|
||||
comp = better_compile(code, modulename, fn )
|
||||
better_exec(comp, None, code, fn)
|
||||
comp = better_compile(code, "<bb>", fn )
|
||||
better_exec(comp, __builtins__, code, fn)
|
||||
|
||||
# now some instrumentation
|
||||
code = comp.co_names
|
||||
@@ -59,7 +59,7 @@ def insert_method(modulename, code, fn):
|
||||
def check_insert_method(modulename, code, fn):
|
||||
"""
|
||||
Add the code if it wasnt added before. The module
|
||||
name will be used for that
|
||||
name will be used for that
|
||||
|
||||
Variables:
|
||||
@modulename a short name e.g. base.bbclass
|
||||
@@ -81,4 +81,4 @@ def get_parsed_dict():
|
||||
"""
|
||||
shortcut
|
||||
"""
|
||||
return _parsed_methods
|
||||
return _parsed_methods
|
||||
|
||||
@@ -22,179 +22,108 @@ Message handling infrastructure for bitbake
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import sys
|
||||
import logging
|
||||
import collections
|
||||
from itertools import groupby
|
||||
import warnings
|
||||
import bb
|
||||
import bb.event
|
||||
import sys, os, re, bb
|
||||
from bb import utils, event
|
||||
|
||||
class BBLogFormatter(logging.Formatter):
|
||||
"""Formatter which ensures that our 'plain' messages (logging.INFO + 1) are used as is"""
|
||||
debug_level = {}
|
||||
|
||||
DEBUG3 = logging.DEBUG - 2
|
||||
DEBUG2 = logging.DEBUG - 1
|
||||
DEBUG = logging.DEBUG
|
||||
VERBOSE = logging.INFO - 1
|
||||
NOTE = logging.INFO
|
||||
PLAIN = logging.INFO + 1
|
||||
ERROR = logging.ERROR
|
||||
WARNING = logging.WARNING
|
||||
CRITICAL = logging.CRITICAL
|
||||
verbose = False
|
||||
|
||||
levelnames = {
|
||||
DEBUG3 : 'DEBUG',
|
||||
DEBUG2 : 'DEBUG',
|
||||
DEBUG : 'DEBUG',
|
||||
VERBOSE: 'NOTE',
|
||||
NOTE : 'NOTE',
|
||||
PLAIN : '',
|
||||
WARNING : 'WARNING',
|
||||
ERROR : 'ERROR',
|
||||
CRITICAL: 'ERROR',
|
||||
}
|
||||
domain = bb.utils.Enum(
|
||||
'Build',
|
||||
'Cache',
|
||||
'Collection',
|
||||
'Data',
|
||||
'Depends',
|
||||
'Fetcher',
|
||||
'Parsing',
|
||||
'PersistData',
|
||||
'Provider',
|
||||
'RunQueue',
|
||||
'TaskData',
|
||||
'Util')
|
||||
|
||||
def getLevelName(self, levelno):
|
||||
try:
|
||||
return self.levelnames[levelno]
|
||||
except KeyError:
|
||||
self.levelnames[levelno] = value = 'Level %d' % levelno
|
||||
return value
|
||||
|
||||
def format(self, record):
|
||||
record.levelname = self.getLevelName(record.levelno)
|
||||
if record.levelno == self.PLAIN:
|
||||
return record.getMessage()
|
||||
else:
|
||||
return logging.Formatter.format(self, record)
|
||||
class MsgBase(bb.event.Event):
|
||||
"""Base class for messages"""
|
||||
|
||||
class Loggers(dict):
|
||||
def __getitem__(self, key):
|
||||
if key in self:
|
||||
return dict.__getitem__(self, key)
|
||||
else:
|
||||
log = logging.getLogger("BitBake.%s" % domain._fields[key])
|
||||
dict.__setitem__(self, key, log)
|
||||
return log
|
||||
def __init__(self, msg, d ):
|
||||
self._message = msg
|
||||
event.Event.__init__(self, d)
|
||||
|
||||
class DebugLevel(dict):
|
||||
def __getitem__(self, key):
|
||||
if key == "default":
|
||||
key = domain.Default
|
||||
return get_debug_level(key)
|
||||
class MsgDebug(MsgBase):
|
||||
"""Debug Message"""
|
||||
|
||||
def _NamedTuple(name, fields):
|
||||
Tuple = collections.namedtuple(name, " ".join(fields))
|
||||
return Tuple(*range(len(fields)))
|
||||
class MsgNote(MsgBase):
|
||||
"""Note Message"""
|
||||
|
||||
domain = _NamedTuple("Domain", (
|
||||
"Default",
|
||||
"Build",
|
||||
"Cache",
|
||||
"Collection",
|
||||
"Data",
|
||||
"Depends",
|
||||
"Fetcher",
|
||||
"Parsing",
|
||||
"PersistData",
|
||||
"Provider",
|
||||
"RunQueue",
|
||||
"TaskData",
|
||||
"Util"))
|
||||
logger = logging.getLogger("BitBake")
|
||||
loggers = Loggers()
|
||||
debug_level = DebugLevel()
|
||||
class MsgWarn(MsgBase):
|
||||
"""Warning Message"""
|
||||
|
||||
class MsgError(MsgBase):
|
||||
"""Error Message"""
|
||||
|
||||
class MsgFatal(MsgBase):
|
||||
"""Fatal Message"""
|
||||
|
||||
class MsgPlain(MsgBase):
|
||||
"""General output"""
|
||||
|
||||
#
|
||||
# Message control functions
|
||||
#
|
||||
|
||||
def set_debug_level(level):
|
||||
for log in loggers.itervalues():
|
||||
log.setLevel(logging.NOTSET)
|
||||
|
||||
if level:
|
||||
logger.setLevel(logging.DEBUG - level + 1)
|
||||
else:
|
||||
logger.setLevel(logging.INFO)
|
||||
|
||||
def get_debug_level(msgdomain = domain.Default):
|
||||
if not msgdomain:
|
||||
level = logger.getEffectiveLevel()
|
||||
else:
|
||||
level = loggers[msgdomain].getEffectiveLevel()
|
||||
return max(0, logging.DEBUG - level + 1)
|
||||
bb.msg.debug_level = {}
|
||||
for domain in bb.msg.domain:
|
||||
bb.msg.debug_level[domain] = level
|
||||
bb.msg.debug_level['default'] = level
|
||||
|
||||
def set_verbose(level):
|
||||
if level:
|
||||
logger.setLevel(BBLogFormatter.VERBOSE)
|
||||
else:
|
||||
logger.setLevel(BBLogFormatter.INFO)
|
||||
bb.msg.verbose = level
|
||||
|
||||
def set_debug_domains(domainargs):
|
||||
for (domainarg, iterator) in groupby(domainargs):
|
||||
for index, msgdomain in enumerate(domain._fields):
|
||||
if msgdomain == domainarg:
|
||||
level = len(tuple(iterator))
|
||||
if level:
|
||||
loggers[index].setLevel(logging.DEBUG - level + 1)
|
||||
break
|
||||
else:
|
||||
warn(None, "Logging domain %s is not valid, ignoring" % domainarg)
|
||||
def set_debug_domains(domains):
|
||||
for domain in domains:
|
||||
found = False
|
||||
for ddomain in bb.msg.domain:
|
||||
if domain == str(ddomain):
|
||||
bb.msg.debug_level[ddomain] = bb.msg.debug_level[ddomain] + 1
|
||||
found = True
|
||||
if not found:
|
||||
bb.msg.warn(None, "Logging domain %s is not valid, ignoring" % domain)
|
||||
|
||||
#
|
||||
# Message handling functions
|
||||
#
|
||||
|
||||
def debug(level, msgdomain, msg):
|
||||
warnings.warn("bb.msg.debug will soon be deprecated in favor of the python 'logging' module",
|
||||
PendingDeprecationWarning, stacklevel=2)
|
||||
level = logging.DEBUG - (level - 1)
|
||||
if not msgdomain:
|
||||
logger.debug(level, msg)
|
||||
else:
|
||||
loggers[msgdomain].debug(level, msg)
|
||||
def debug(level, domain, msg, fn = None):
|
||||
bb.event.fire(MsgDebug(msg, None))
|
||||
if not domain:
|
||||
domain = 'default'
|
||||
if debug_level[domain] >= level:
|
||||
print 'DEBUG: ' + msg
|
||||
|
||||
def plain(msg):
|
||||
warnings.warn("bb.msg.plain will soon be deprecated in favor of the python 'logging' module",
|
||||
PendingDeprecationWarning, stacklevel=2)
|
||||
logger.plain(msg)
|
||||
def note(level, domain, msg, fn = None):
|
||||
bb.event.fire(MsgNote(msg, None))
|
||||
if not domain:
|
||||
domain = 'default'
|
||||
if level == 1 or verbose or debug_level[domain] >= 1:
|
||||
print 'NOTE: ' + msg
|
||||
|
||||
def note(level, msgdomain, msg):
|
||||
warnings.warn("bb.msg.note will soon be deprecated in favor of the python 'logging' module",
|
||||
PendingDeprecationWarning, stacklevel=2)
|
||||
if level > 1:
|
||||
if msgdomain:
|
||||
logger.verbose(msg)
|
||||
else:
|
||||
loggers[msgdomain].verbose(msg)
|
||||
else:
|
||||
if msgdomain:
|
||||
logger.info(msg)
|
||||
else:
|
||||
loggers[msgdomain].info(msg)
|
||||
def warn(domain, msg, fn = None):
|
||||
bb.event.fire(MsgWarn(msg, None))
|
||||
print 'WARNING: ' + msg
|
||||
|
||||
def warn(msgdomain, msg):
|
||||
warnings.warn("bb.msg.warn will soon be deprecated in favor of the python 'logging' module",
|
||||
PendingDeprecationWarning, stacklevel=2)
|
||||
if not msgdomain:
|
||||
logger.warn(msg)
|
||||
else:
|
||||
loggers[msgdomain].warn(msg)
|
||||
def error(domain, msg, fn = None):
|
||||
bb.event.fire(MsgError(msg, None))
|
||||
print 'ERROR: ' + msg
|
||||
|
||||
def error(msgdomain, msg):
|
||||
warnings.warn("bb.msg.error will soon be deprecated in favor of the python 'logging' module",
|
||||
PendingDeprecationWarning, stacklevel=2)
|
||||
if not msgdomain:
|
||||
logger.error(msg)
|
||||
else:
|
||||
loggers[msgdomain].error(msg)
|
||||
|
||||
def fatal(msgdomain, msg):
|
||||
warnings.warn("bb.msg.fatal will soon be deprecated in favor of raising appropriate exceptions",
|
||||
PendingDeprecationWarning, stacklevel=2)
|
||||
if not msgdomain:
|
||||
logger.critical(msg)
|
||||
else:
|
||||
loggers[msgdomain].critical(msg)
|
||||
def fatal(domain, msg, fn = None):
|
||||
bb.event.fire(MsgFatal(msg, None))
|
||||
print 'ERROR: ' + msg
|
||||
sys.exit(1)
|
||||
|
||||
def plain(msg, fn = None):
|
||||
bb.event.fire(MsgPlain(msg, None))
|
||||
print msg
|
||||
|
||||
|
||||
@@ -24,16 +24,11 @@ File parsers for the BitBake build tools.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
__all__ = [ 'ParseError', 'SkipPackage', 'cached_mtime', 'mark_dependency',
|
||||
'supports', 'handle', 'init' ]
|
||||
handlers = []
|
||||
|
||||
import os
|
||||
import stat
|
||||
import logging
|
||||
import bb
|
||||
import bb.utils
|
||||
import bb.siggen
|
||||
|
||||
logger = logging.getLogger("BitBake.Parsing")
|
||||
import bb, os
|
||||
|
||||
class ParseError(Exception):
|
||||
"""Exception raised when parsing fails"""
|
||||
@@ -43,27 +38,23 @@ class SkipPackage(Exception):
|
||||
|
||||
__mtime_cache = {}
|
||||
def cached_mtime(f):
|
||||
if f not in __mtime_cache:
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
if not __mtime_cache.has_key(f):
|
||||
__mtime_cache[f] = os.stat(f)[8]
|
||||
return __mtime_cache[f]
|
||||
|
||||
def cached_mtime_noerror(f):
|
||||
if f not in __mtime_cache:
|
||||
if not __mtime_cache.has_key(f):
|
||||
try:
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
__mtime_cache[f] = os.stat(f)[8]
|
||||
except OSError:
|
||||
return 0
|
||||
return __mtime_cache[f]
|
||||
|
||||
def update_mtime(f):
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
return __mtime_cache[f]
|
||||
|
||||
def mark_dependency(d, f):
|
||||
if f.startswith('./'):
|
||||
f = "%s/%s" % (os.getcwd(), f[2:])
|
||||
deps = bb.data.getVar('__depends', d) or set()
|
||||
deps.update([(f, cached_mtime(f))])
|
||||
deps = bb.data.getVar('__depends', d) or []
|
||||
deps.append( (f, cached_mtime(f)) )
|
||||
bb.data.setVar('__depends', deps, d)
|
||||
|
||||
def supports(fn, data):
|
||||
@@ -85,39 +76,5 @@ def init(fn, data):
|
||||
if h['supports'](fn):
|
||||
return h['init'](data)
|
||||
|
||||
def init_parser(d):
|
||||
bb.parse.siggen = bb.siggen.init(d)
|
||||
|
||||
def resolve_file(fn, d):
|
||||
if not os.path.isabs(fn):
|
||||
bbpath = bb.data.getVar("BBPATH", d, True)
|
||||
newfn = bb.utils.which(bbpath, fn)
|
||||
if not newfn:
|
||||
raise IOError("file %s not found in %s" % (fn, bbpath))
|
||||
fn = newfn
|
||||
|
||||
logger.debug(2, "LOAD %s", fn)
|
||||
return fn
|
||||
|
||||
# Used by OpenEmbedded metadata
|
||||
__pkgsplit_cache__={}
|
||||
def vars_from_file(mypkg, d):
|
||||
if not mypkg:
|
||||
return (None, None, None)
|
||||
if mypkg in __pkgsplit_cache__:
|
||||
return __pkgsplit_cache__[mypkg]
|
||||
|
||||
myfile = os.path.splitext(os.path.basename(mypkg))
|
||||
parts = myfile[0].split('_')
|
||||
__pkgsplit_cache__[mypkg] = parts
|
||||
if len(parts) > 3:
|
||||
raise ParseError("Unable to generate default variables from the filename: %s (too many underscores)" % mypkg)
|
||||
exp = 3 - len(parts)
|
||||
tmplist = []
|
||||
while exp != 0:
|
||||
exp -= 1
|
||||
tmplist.append(None)
|
||||
parts.extend(tmplist)
|
||||
return parts
|
||||
|
||||
from bb.parse.parse_py import __version__, ConfHandler, BBHandler
|
||||
from parse_py import __version__, ConfHandler, BBHandler
|
||||
|
||||
@@ -1,446 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
"""
|
||||
AbstractSyntaxTree classes for the Bitbake language
|
||||
"""
|
||||
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
# Copyright (C) 2003, 2004 Phil Blundell
|
||||
# Copyright (C) 2009 Holger Hans Peter Freyther
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
from __future__ import absolute_import
|
||||
from future_builtins import filter
|
||||
import re
|
||||
import string
|
||||
import logging
|
||||
import bb
|
||||
import itertools
|
||||
from bb import methodpool
|
||||
from bb.parse import logger
|
||||
|
||||
__parsed_methods__ = bb.methodpool.get_parsed_dict()
|
||||
_bbversions_re = re.compile(r"\[(?P<from>[0-9]+)-(?P<to>[0-9]+)\]")
|
||||
|
||||
class StatementGroup(list):
|
||||
def eval(self, data):
|
||||
for statement in self:
|
||||
statement.eval(data)
|
||||
|
||||
class AstNode(object):
|
||||
def __init__(self, filename, lineno):
|
||||
self.filename = filename
|
||||
self.lineno = lineno
|
||||
|
||||
class IncludeNode(AstNode):
|
||||
def __init__(self, filename, lineno, what_file, force):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.what_file = what_file
|
||||
self.force = force
|
||||
|
||||
def eval(self, data):
|
||||
"""
|
||||
Include the file and evaluate the statements
|
||||
"""
|
||||
s = bb.data.expand(self.what_file, data)
|
||||
logger.debug(2, "CONF %s:%s: including %s", self.filename, self.lineno, s)
|
||||
|
||||
# TODO: Cache those includes... maybe not here though
|
||||
if self.force:
|
||||
bb.parse.ConfHandler.include(self.filename, s, data, "include required")
|
||||
else:
|
||||
bb.parse.ConfHandler.include(self.filename, s, data, False)
|
||||
|
||||
class ExportNode(AstNode):
|
||||
def __init__(self, filename, lineno, var):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.var = var
|
||||
|
||||
def eval(self, data):
|
||||
bb.data.setVarFlag(self.var, "export", 1, data)
|
||||
|
||||
class DataNode(AstNode):
|
||||
"""
|
||||
Various data related updates. For the sake of sanity
|
||||
we have one class doing all this. This means that all
|
||||
this need to be re-evaluated... we might be able to do
|
||||
that faster with multiple classes.
|
||||
"""
|
||||
def __init__(self, filename, lineno, groupd):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.groupd = groupd
|
||||
|
||||
def getFunc(self, key, data):
|
||||
if 'flag' in self.groupd and self.groupd['flag'] != None:
|
||||
return bb.data.getVarFlag(key, self.groupd['flag'], data)
|
||||
else:
|
||||
return bb.data.getVar(key, data)
|
||||
|
||||
def eval(self, data):
|
||||
groupd = self.groupd
|
||||
key = groupd["var"]
|
||||
if "exp" in groupd and groupd["exp"] != None:
|
||||
bb.data.setVarFlag(key, "export", 1, data)
|
||||
if "ques" in groupd and groupd["ques"] != None:
|
||||
val = self.getFunc(key, data)
|
||||
if val == None:
|
||||
val = groupd["value"]
|
||||
elif "colon" in groupd and groupd["colon"] != None:
|
||||
e = data.createCopy()
|
||||
bb.data.update_data(e)
|
||||
val = bb.data.expand(groupd["value"], e)
|
||||
elif "append" in groupd and groupd["append"] != None:
|
||||
val = "%s %s" % ((self.getFunc(key, data) or ""), groupd["value"])
|
||||
elif "prepend" in groupd and groupd["prepend"] != None:
|
||||
val = "%s %s" % (groupd["value"], (self.getFunc(key, data) or ""))
|
||||
elif "postdot" in groupd and groupd["postdot"] != None:
|
||||
val = "%s%s" % ((self.getFunc(key, data) or ""), groupd["value"])
|
||||
elif "predot" in groupd and groupd["predot"] != None:
|
||||
val = "%s%s" % (groupd["value"], (self.getFunc(key, data) or ""))
|
||||
else:
|
||||
val = groupd["value"]
|
||||
|
||||
if 'flag' in groupd and groupd['flag'] != None:
|
||||
bb.data.setVarFlag(key, groupd['flag'], val, data)
|
||||
elif groupd["lazyques"]:
|
||||
bb.data.setVarFlag(key, "defaultval", val, data)
|
||||
else:
|
||||
bb.data.setVar(key, val, data)
|
||||
|
||||
class MethodNode(AstNode):
|
||||
def __init__(self, filename, lineno, func_name, body):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.func_name = func_name
|
||||
self.body = body
|
||||
|
||||
def eval(self, data):
|
||||
if self.func_name == "__anonymous":
|
||||
funcname = ("__anon_%s_%s" % (self.lineno, self.filename.translate(string.maketrans('/.+-', '____'))))
|
||||
if not funcname in bb.methodpool._parsed_fns:
|
||||
text = "def %s(d):\n" % (funcname) + '\n'.join(self.body)
|
||||
bb.methodpool.insert_method(funcname, text, self.filename)
|
||||
anonfuncs = bb.data.getVar('__BBANONFUNCS', data) or []
|
||||
anonfuncs.append(funcname)
|
||||
bb.data.setVar('__BBANONFUNCS', anonfuncs, data)
|
||||
else:
|
||||
bb.data.setVarFlag(self.func_name, "func", 1, data)
|
||||
bb.data.setVar(self.func_name, '\n'.join(self.body), data)
|
||||
|
||||
class PythonMethodNode(AstNode):
|
||||
def __init__(self, filename, lineno, function, define, body):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.function = function
|
||||
self.define = define
|
||||
self.body = body
|
||||
|
||||
def eval(self, data):
|
||||
# Note we will add root to parsedmethods after having parse
|
||||
# 'this' file. This means we will not parse methods from
|
||||
# bb classes twice
|
||||
text = '\n'.join(self.body)
|
||||
if not bb.methodpool.parsed_module(self.define):
|
||||
bb.methodpool.insert_method(self.define, text, self.filename)
|
||||
bb.data.setVarFlag(self.function, "func", 1, data)
|
||||
bb.data.setVarFlag(self.function, "python", 1, data)
|
||||
bb.data.setVar(self.function, text, data)
|
||||
|
||||
class MethodFlagsNode(AstNode):
|
||||
def __init__(self, filename, lineno, key, m):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.key = key
|
||||
self.m = m
|
||||
|
||||
def eval(self, data):
|
||||
if bb.data.getVar(self.key, data):
|
||||
# clean up old version of this piece of metadata, as its
|
||||
# flags could cause problems
|
||||
bb.data.setVarFlag(self.key, 'python', None, data)
|
||||
bb.data.setVarFlag(self.key, 'fakeroot', None, data)
|
||||
if self.m.group("py") is not None:
|
||||
bb.data.setVarFlag(self.key, "python", "1", data)
|
||||
else:
|
||||
bb.data.delVarFlag(self.key, "python", data)
|
||||
if self.m.group("fr") is not None:
|
||||
bb.data.setVarFlag(self.key, "fakeroot", "1", data)
|
||||
else:
|
||||
bb.data.delVarFlag(self.key, "fakeroot", data)
|
||||
|
||||
class ExportFuncsNode(AstNode):
|
||||
def __init__(self, filename, lineno, fns, classes):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.n = fns.split()
|
||||
self.classes = classes
|
||||
|
||||
def eval(self, data):
|
||||
for f in self.n:
|
||||
allvars = []
|
||||
allvars.append(f)
|
||||
allvars.append(self.classes[-1] + "_" + f)
|
||||
|
||||
vars = [[ allvars[0], allvars[1] ]]
|
||||
if len(self.classes) > 1 and self.classes[-2] is not None:
|
||||
allvars.append(self.classes[-2] + "_" + f)
|
||||
vars = []
|
||||
vars.append([allvars[2], allvars[1]])
|
||||
vars.append([allvars[0], allvars[2]])
|
||||
|
||||
for (var, calledvar) in vars:
|
||||
if bb.data.getVar(var, data) and not bb.data.getVarFlag(var, 'export_func', data):
|
||||
continue
|
||||
|
||||
if bb.data.getVar(var, data):
|
||||
bb.data.setVarFlag(var, 'python', None, data)
|
||||
bb.data.setVarFlag(var, 'func', None, data)
|
||||
|
||||
for flag in [ "func", "python" ]:
|
||||
if bb.data.getVarFlag(calledvar, flag, data):
|
||||
bb.data.setVarFlag(var, flag, bb.data.getVarFlag(calledvar, flag, data), data)
|
||||
for flag in [ "dirs" ]:
|
||||
if bb.data.getVarFlag(var, flag, data):
|
||||
bb.data.setVarFlag(calledvar, flag, bb.data.getVarFlag(var, flag, data), data)
|
||||
|
||||
if bb.data.getVarFlag(calledvar, "python", data):
|
||||
bb.data.setVar(var, "\tbb.build.exec_func('" + calledvar + "', d)\n", data)
|
||||
else:
|
||||
bb.data.setVar(var, "\t" + calledvar + "\n", data)
|
||||
bb.data.setVarFlag(var, 'export_func', '1', data)
|
||||
|
||||
class AddTaskNode(AstNode):
|
||||
def __init__(self, filename, lineno, func, before, after):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.func = func
|
||||
self.before = before
|
||||
self.after = after
|
||||
|
||||
def eval(self, data):
|
||||
var = self.func
|
||||
if self.func[:3] != "do_":
|
||||
var = "do_" + self.func
|
||||
|
||||
bb.data.setVarFlag(var, "task", 1, data)
|
||||
bbtasks = bb.data.getVar('__BBTASKS', data) or []
|
||||
if not var in bbtasks:
|
||||
bbtasks.append(var)
|
||||
bb.data.setVar('__BBTASKS', bbtasks, data)
|
||||
|
||||
existing = bb.data.getVarFlag(var, "deps", data) or []
|
||||
if self.after is not None:
|
||||
# set up deps for function
|
||||
for entry in self.after.split():
|
||||
if entry not in existing:
|
||||
existing.append(entry)
|
||||
bb.data.setVarFlag(var, "deps", existing, data)
|
||||
if self.before is not None:
|
||||
# set up things that depend on this func
|
||||
for entry in self.before.split():
|
||||
existing = bb.data.getVarFlag(entry, "deps", data) or []
|
||||
if var not in existing:
|
||||
bb.data.setVarFlag(entry, "deps", [var] + existing, data)
|
||||
|
||||
class BBHandlerNode(AstNode):
|
||||
def __init__(self, filename, lineno, fns):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.hs = fns.split()
|
||||
|
||||
def eval(self, data):
|
||||
bbhands = bb.data.getVar('__BBHANDLERS', data) or []
|
||||
for h in self.hs:
|
||||
bbhands.append(h)
|
||||
bb.data.setVarFlag(h, "handler", 1, data)
|
||||
bb.data.setVar('__BBHANDLERS', bbhands, data)
|
||||
|
||||
class InheritNode(AstNode):
|
||||
def __init__(self, filename, lineno, classes):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.classes = classes
|
||||
|
||||
def eval(self, data):
|
||||
bb.parse.BBHandler.inherit(self.classes, data)
|
||||
|
||||
def handleInclude(statements, filename, lineno, m, force):
|
||||
statements.append(IncludeNode(filename, lineno, m.group(1), force))
|
||||
|
||||
def handleExport(statements, filename, lineno, m):
|
||||
statements.append(ExportNode(filename, lineno, m.group(1)))
|
||||
|
||||
def handleData(statements, filename, lineno, groupd):
|
||||
statements.append(DataNode(filename, lineno, groupd))
|
||||
|
||||
def handleMethod(statements, filename, lineno, func_name, body):
|
||||
statements.append(MethodNode(filename, lineno, func_name, body))
|
||||
|
||||
def handlePythonMethod(statements, filename, lineno, funcname, root, body):
|
||||
statements.append(PythonMethodNode(filename, lineno, funcname, root, body))
|
||||
|
||||
def handleMethodFlags(statements, filename, lineno, key, m):
|
||||
statements.append(MethodFlagsNode(filename, lineno, key, m))
|
||||
|
||||
def handleExportFuncs(statements, filename, lineno, m, classes):
|
||||
statements.append(ExportFuncsNode(filename, lineno, m.group(1), classes))
|
||||
|
||||
def handleAddTask(statements, filename, lineno, m):
|
||||
func = m.group("func")
|
||||
before = m.group("before")
|
||||
after = m.group("after")
|
||||
if func is None:
|
||||
return
|
||||
|
||||
statements.append(AddTaskNode(filename, lineno, func, before, after))
|
||||
|
||||
def handleBBHandlers(statements, filename, lineno, m):
|
||||
statements.append(BBHandlerNode(filename, lineno, m.group(1)))
|
||||
|
||||
def handleInherit(statements, filename, lineno, m):
|
||||
classes = m.group(1)
|
||||
statements.append(InheritNode(filename, lineno, classes.split()))
|
||||
|
||||
def finalize(fn, d, variant = None):
|
||||
bb.data.expandKeys(d)
|
||||
bb.data.update_data(d)
|
||||
code = []
|
||||
for funcname in bb.data.getVar("__BBANONFUNCS", d) or []:
|
||||
code.append("%s(d)" % funcname)
|
||||
bb.utils.simple_exec("\n".join(code), {"d": d})
|
||||
bb.data.update_data(d)
|
||||
|
||||
all_handlers = {}
|
||||
for var in bb.data.getVar('__BBHANDLERS', d) or []:
|
||||
# try to add the handler
|
||||
handler = bb.data.getVar(var, d)
|
||||
bb.event.register(var, handler)
|
||||
|
||||
tasklist = bb.data.getVar('__BBTASKS', d) or []
|
||||
bb.build.add_tasks(tasklist, d)
|
||||
|
||||
bb.parse.siggen.finalise(fn, d, variant)
|
||||
|
||||
bb.event.fire(bb.event.RecipeParsed(fn), d)
|
||||
|
||||
def _create_variants(datastores, names, function):
|
||||
def create_variant(name, orig_d, arg = None):
|
||||
new_d = bb.data.createCopy(orig_d)
|
||||
function(arg or name, new_d)
|
||||
datastores[name] = new_d
|
||||
|
||||
for variant, variant_d in datastores.items():
|
||||
for name in names:
|
||||
if not variant:
|
||||
# Based on main recipe
|
||||
create_variant(name, variant_d)
|
||||
else:
|
||||
create_variant("%s-%s" % (variant, name), variant_d, name)
|
||||
|
||||
def _expand_versions(versions):
|
||||
def expand_one(version, start, end):
|
||||
for i in xrange(start, end + 1):
|
||||
ver = _bbversions_re.sub(str(i), version, 1)
|
||||
yield ver
|
||||
|
||||
versions = iter(versions)
|
||||
while True:
|
||||
try:
|
||||
version = next(versions)
|
||||
except StopIteration:
|
||||
break
|
||||
|
||||
range_ver = _bbversions_re.search(version)
|
||||
if not range_ver:
|
||||
yield version
|
||||
else:
|
||||
newversions = expand_one(version, int(range_ver.group("from")),
|
||||
int(range_ver.group("to")))
|
||||
versions = itertools.chain(newversions, versions)
|
||||
|
||||
def multi_finalize(fn, d):
|
||||
appends = (d.getVar("__BBAPPEND", True) or "").split()
|
||||
for append in appends:
|
||||
logger.debug(2, "Appending .bbappend file %s to %s", append, fn)
|
||||
bb.parse.BBHandler.handle(append, d, True)
|
||||
|
||||
safe_d = d
|
||||
d = bb.data.createCopy(safe_d)
|
||||
try:
|
||||
finalize(fn, d)
|
||||
except bb.parse.SkipPackage:
|
||||
bb.data.setVar("__SKIPPED", True, d)
|
||||
datastores = {"": safe_d}
|
||||
|
||||
versions = (d.getVar("BBVERSIONS", True) or "").split()
|
||||
if versions:
|
||||
pv = orig_pv = d.getVar("PV", True)
|
||||
baseversions = {}
|
||||
|
||||
def verfunc(ver, d, pv_d = None):
|
||||
if pv_d is None:
|
||||
pv_d = d
|
||||
|
||||
overrides = d.getVar("OVERRIDES", True).split(":")
|
||||
pv_d.setVar("PV", ver)
|
||||
overrides.append(ver)
|
||||
bpv = baseversions.get(ver) or orig_pv
|
||||
pv_d.setVar("BPV", bpv)
|
||||
overrides.append(bpv)
|
||||
d.setVar("OVERRIDES", ":".join(overrides))
|
||||
|
||||
versions = list(_expand_versions(versions))
|
||||
for pos, version in enumerate(list(versions)):
|
||||
try:
|
||||
pv, bpv = version.split(":", 2)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
versions[pos] = pv
|
||||
baseversions[pv] = bpv
|
||||
|
||||
if pv in versions and not baseversions.get(pv):
|
||||
versions.remove(pv)
|
||||
else:
|
||||
pv = versions.pop()
|
||||
|
||||
# This is necessary because our existing main datastore
|
||||
# has already been finalized with the old PV, we need one
|
||||
# that's been finalized with the new PV.
|
||||
d = bb.data.createCopy(safe_d)
|
||||
verfunc(pv, d, safe_d)
|
||||
try:
|
||||
finalize(fn, d)
|
||||
except bb.parse.SkipPackage:
|
||||
bb.data.setVar("__SKIPPED", True, d)
|
||||
|
||||
_create_variants(datastores, versions, verfunc)
|
||||
|
||||
extended = d.getVar("BBCLASSEXTEND", True) or ""
|
||||
if extended:
|
||||
pn = d.getVar("PN", True)
|
||||
def extendfunc(name, d):
|
||||
d.setVar("PN", "%s-%s" % (pn, name))
|
||||
bb.parse.BBHandler.inherit([name], d)
|
||||
|
||||
safe_d.setVar("BBCLASSEXTEND", extended)
|
||||
_create_variants(datastores, extended.split(), extendfunc)
|
||||
|
||||
for variant, variant_d in datastores.iteritems():
|
||||
if variant:
|
||||
try:
|
||||
finalize(fn, variant_d, variant)
|
||||
except bb.parse.SkipPackage:
|
||||
bb.data.setVar("__SKIPPED", True, variant_d)
|
||||
|
||||
if len(datastores) > 1:
|
||||
variants = filter(None, datastores.iterkeys())
|
||||
safe_d.setVar("__VARIANTS", " ".join(variants))
|
||||
|
||||
datastores[""] = d
|
||||
return datastores
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
# Copyright (C) 2003, 2004 Phil Blundell
|
||||
#
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
@@ -25,18 +25,12 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
from __future__ import absolute_import
|
||||
import re, bb, os
|
||||
import logging
|
||||
import bb.build, bb.utils
|
||||
from bb import data
|
||||
import re, bb, os, sys, time
|
||||
import bb.fetch, bb.build, bb.utils
|
||||
from bb import data, fetch, methodpool
|
||||
|
||||
from . import ConfHandler
|
||||
from .. import resolve_file, ast, logger
|
||||
from .ConfHandler import include, init
|
||||
|
||||
# For compatibility
|
||||
bb.deprecate_import(__name__, "bb.parse", ["vars_from_file"])
|
||||
from ConfHandler import include, localpath, obtain, init
|
||||
from bb.parse import ParseError
|
||||
|
||||
__func_start_regexp__ = re.compile( r"(((?P<py>python)|(?P<fr>fakeroot))\s*)*(?P<func>[\w\.\-\+\{\}\$]+)?\s*\(\s*\)\s*{$" )
|
||||
__inherit_regexp__ = re.compile( r"inherit\s+(.+)" )
|
||||
@@ -45,7 +39,7 @@ __addtask_regexp__ = re.compile("addtask\s+(?P<func>\w+)\s*((before\s*(?P<
|
||||
__addhandler_regexp__ = re.compile( r"addhandler\s+(.+)" )
|
||||
__def_regexp__ = re.compile( r"def\s+(\w+).*:" )
|
||||
__python_func_regexp__ = re.compile( r"(\s+.*)|(^$)" )
|
||||
|
||||
__word__ = re.compile(r"\S+")
|
||||
|
||||
__infunc__ = ""
|
||||
__inpython__ = False
|
||||
@@ -53,8 +47,6 @@ __body__ = []
|
||||
__classname__ = ""
|
||||
classes = [ None, ]
|
||||
|
||||
cached_statements = {}
|
||||
|
||||
# We need to indicate EOF to the feeder. This code is so messy that
|
||||
# factoring it out to a close_parse_file method is out of question.
|
||||
# We will use the IN_PYTHON_EOF as an indicator to just close the method
|
||||
@@ -62,117 +54,176 @@ cached_statements = {}
|
||||
# The two parts using it are tightly integrated anyway
|
||||
IN_PYTHON_EOF = -9999999999999
|
||||
|
||||
|
||||
__parsed_methods__ = methodpool.get_parsed_dict()
|
||||
|
||||
def supports(fn, d):
|
||||
"""Return True if fn has a supported extension"""
|
||||
return os.path.splitext(fn)[-1] in [".bb", ".bbclass", ".inc"]
|
||||
localfn = localpath(fn, d)
|
||||
return localfn[-3:] == ".bb" or localfn[-8:] == ".bbclass" or localfn[-4:] == ".inc"
|
||||
|
||||
def inherit(files, d):
|
||||
__inherit_cache = data.getVar('__inherit_cache', d) or []
|
||||
fn = ""
|
||||
lineno = 0
|
||||
files = data.expand(files, d)
|
||||
for file in files:
|
||||
file = data.expand(file, d)
|
||||
if not os.path.isabs(file) and not file.endswith(".bbclass"):
|
||||
if file[0] != "/" and file[-8:] != ".bbclass":
|
||||
file = os.path.join('classes', '%s.bbclass' % file)
|
||||
|
||||
if not file in __inherit_cache:
|
||||
logger.log(logging.DEBUG -1, "BB %s:%d: inheriting %s", fn, lineno, file)
|
||||
bb.msg.debug(2, bb.msg.domain.Parsing, "BB %s:%d: inheriting %s" % (fn, lineno, file))
|
||||
__inherit_cache.append( file )
|
||||
data.setVar('__inherit_cache', __inherit_cache, d)
|
||||
include(fn, file, d, "inherit")
|
||||
__inherit_cache = data.getVar('__inherit_cache', d) or []
|
||||
|
||||
def get_statements(filename, absolute_filename, base_name):
|
||||
global cached_statements
|
||||
|
||||
try:
|
||||
return cached_statements[absolute_filename]
|
||||
except KeyError:
|
||||
file = open(absolute_filename, 'r')
|
||||
statements = ast.StatementGroup()
|
||||
|
||||
lineno = 0
|
||||
while True:
|
||||
lineno = lineno + 1
|
||||
s = file.readline()
|
||||
if not s: break
|
||||
s = s.rstrip()
|
||||
feeder(lineno, s, filename, base_name, statements)
|
||||
if __inpython__:
|
||||
# add a blank line to close out any python definition
|
||||
feeder(IN_PYTHON_EOF, "", filename, base_name, statements)
|
||||
|
||||
if filename.endswith(".bbclass") or filename.endswith(".inc"):
|
||||
cached_statements[absolute_filename] = statements
|
||||
return statements
|
||||
|
||||
def handle(fn, d, include):
|
||||
def handle(fn, d, include = 0):
|
||||
global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __infunc__, __body__, __residue__
|
||||
__body__ = []
|
||||
__infunc__ = ""
|
||||
__classname__ = ""
|
||||
__residue__ = []
|
||||
|
||||
|
||||
if include == 0:
|
||||
logger.debug(2, "BB %s: handle(data)", fn)
|
||||
bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data)")
|
||||
else:
|
||||
logger.debug(2, "BB %s: handle(data, include)", fn)
|
||||
bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data, include)")
|
||||
|
||||
base_name = os.path.basename(fn)
|
||||
(root, ext) = os.path.splitext(base_name)
|
||||
(root, ext) = os.path.splitext(os.path.basename(fn))
|
||||
base_name = "%s%s" % (root,ext)
|
||||
init(d)
|
||||
|
||||
if ext == ".bbclass":
|
||||
__classname__ = root
|
||||
classes.append(__classname__)
|
||||
__inherit_cache = data.getVar('__inherit_cache', d) or []
|
||||
if not fn in __inherit_cache:
|
||||
__inherit_cache.append(fn)
|
||||
data.setVar('__inherit_cache', __inherit_cache, d)
|
||||
|
||||
if include != 0:
|
||||
oldfile = data.getVar('FILE', d)
|
||||
else:
|
||||
oldfile = None
|
||||
|
||||
abs_fn = resolve_file(fn, d)
|
||||
fn = obtain(fn, d)
|
||||
bbpath = (data.getVar('BBPATH', d, 1) or '').split(':')
|
||||
if not os.path.isabs(fn):
|
||||
f = None
|
||||
for p in bbpath:
|
||||
j = os.path.join(p, fn)
|
||||
if os.access(j, os.R_OK):
|
||||
abs_fn = j
|
||||
f = open(j, 'r')
|
||||
break
|
||||
if f is None:
|
||||
raise IOError("file not found")
|
||||
else:
|
||||
f = open(fn,'r')
|
||||
abs_fn = fn
|
||||
|
||||
if ext != ".bbclass":
|
||||
bbpath.insert(0, os.path.dirname(abs_fn))
|
||||
data.setVar('BBPATH', ":".join(bbpath), d)
|
||||
|
||||
if include:
|
||||
bb.parse.mark_dependency(d, abs_fn)
|
||||
|
||||
# actual loading
|
||||
statements = get_statements(fn, abs_fn, base_name)
|
||||
|
||||
# DONE WITH PARSING... time to evaluate
|
||||
if ext != ".bbclass":
|
||||
data.setVar('FILE', fn, d)
|
||||
i = (data.getVar("INHERIT", d, 1) or "").split()
|
||||
if not "base" in i and __classname__ != "base":
|
||||
i[0:0] = ["base"]
|
||||
inherit(i, d)
|
||||
|
||||
statements.eval(d)
|
||||
|
||||
lineno = 0
|
||||
while 1:
|
||||
lineno = lineno + 1
|
||||
s = f.readline()
|
||||
if not s: break
|
||||
s = s.rstrip()
|
||||
feeder(lineno, s, fn, base_name, d)
|
||||
if __inpython__:
|
||||
# add a blank line to close out any python definition
|
||||
feeder(IN_PYTHON_EOF, "", fn, base_name, d)
|
||||
if ext == ".bbclass":
|
||||
classes.remove(__classname__)
|
||||
else:
|
||||
if include == 0:
|
||||
return ast.multi_finalize(fn, d)
|
||||
data.expandKeys(d)
|
||||
data.update_data(d)
|
||||
anonqueue = data.getVar("__anonqueue", d, 1) or []
|
||||
body = [x['content'] for x in anonqueue]
|
||||
flag = { 'python' : 1, 'func' : 1 }
|
||||
data.setVar("__anonfunc", "\n".join(body), d)
|
||||
data.setVarFlags("__anonfunc", flag, d)
|
||||
from bb import build
|
||||
try:
|
||||
t = data.getVar('T', d)
|
||||
data.setVar('T', '${TMPDIR}/', d)
|
||||
build.exec_func("__anonfunc", d)
|
||||
data.delVar('T', d)
|
||||
if t:
|
||||
data.setVar('T', t, d)
|
||||
except Exception, e:
|
||||
bb.msg.debug(1, bb.msg.domain.Parsing, "Exception when executing anonymous function: %s" % e)
|
||||
raise
|
||||
data.delVar("__anonqueue", d)
|
||||
data.delVar("__anonfunc", d)
|
||||
set_additional_vars(fn, d, include)
|
||||
data.update_data(d)
|
||||
|
||||
all_handlers = {}
|
||||
for var in data.getVar('__BBHANDLERS', d) or []:
|
||||
# try to add the handler
|
||||
# if we added it remember the choiche
|
||||
handler = data.getVar(var,d)
|
||||
if bb.event.register(var,handler) == bb.event.Registered:
|
||||
all_handlers[var] = handler
|
||||
|
||||
tasklist = {}
|
||||
for var in data.getVar('__BBTASKS', d) or []:
|
||||
if var not in tasklist:
|
||||
tasklist[var] = []
|
||||
deps = data.getVarFlag(var, 'deps', d) or []
|
||||
for p in deps:
|
||||
if p not in tasklist[var]:
|
||||
tasklist[var].append(p)
|
||||
|
||||
postdeps = data.getVarFlag(var, 'postdeps', d) or []
|
||||
for p in postdeps:
|
||||
if p not in tasklist:
|
||||
tasklist[p] = []
|
||||
if var not in tasklist[p]:
|
||||
tasklist[p].append(var)
|
||||
|
||||
bb.build.add_tasks(tasklist, d)
|
||||
|
||||
# now add the handlers
|
||||
if not len(all_handlers) == 0:
|
||||
data.setVar('__all_handlers__', all_handlers, d)
|
||||
|
||||
bbpath.pop(0)
|
||||
if oldfile:
|
||||
bb.data.setVar("FILE", oldfile, d)
|
||||
|
||||
# we have parsed the bb class now
|
||||
if ext == ".bbclass" or ext == ".inc":
|
||||
bb.methodpool.get_parsed_dict()[base_name] = 1
|
||||
__parsed_methods__[base_name] = 1
|
||||
|
||||
return d
|
||||
|
||||
def feeder(lineno, s, fn, root, statements):
|
||||
global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__, __infunc__, __body__, classes, bb, __residue__
|
||||
def feeder(lineno, s, fn, root, d):
|
||||
global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__,__infunc__, __body__, classes, bb, __residue__
|
||||
if __infunc__:
|
||||
if s == '}':
|
||||
__body__.append('')
|
||||
ast.handleMethod(statements, fn, lineno, __infunc__, __body__)
|
||||
data.setVar(__infunc__, '\n'.join(__body__), d)
|
||||
data.setVarFlag(__infunc__, "func", 1, d)
|
||||
if __infunc__ == "__anonymous":
|
||||
anonqueue = bb.data.getVar("__anonqueue", d) or []
|
||||
anonitem = {}
|
||||
anonitem["content"] = bb.data.getVar("__anonymous", d)
|
||||
anonitem["flags"] = bb.data.getVarFlags("__anonymous", d)
|
||||
anonqueue.append(anonitem)
|
||||
bb.data.setVar("__anonqueue", anonqueue, d)
|
||||
bb.data.delVarFlags("__anonymous", d)
|
||||
bb.data.delVar("__anonymous", d)
|
||||
__infunc__ = ""
|
||||
__body__ = []
|
||||
else:
|
||||
@@ -185,22 +236,28 @@ def feeder(lineno, s, fn, root, statements):
|
||||
__body__.append(s)
|
||||
return
|
||||
else:
|
||||
ast.handlePythonMethod(statements, fn, lineno, __inpython__,
|
||||
root, __body__)
|
||||
# Note we will add root to parsedmethods after having parse
|
||||
# 'this' file. This means we will not parse methods from
|
||||
# bb classes twice
|
||||
if not root in __parsed_methods__:
|
||||
text = '\n'.join(__body__)
|
||||
methodpool.insert_method( root, text, fn )
|
||||
funcs = data.getVar('__functions__', d) or {}
|
||||
if not funcs.has_key( root ):
|
||||
funcs[root] = text
|
||||
else:
|
||||
funcs[root] = "%s\n%s" % (funcs[root], text)
|
||||
|
||||
data.setVar('__functions__', funcs, d)
|
||||
__body__ = []
|
||||
__inpython__ = False
|
||||
|
||||
if lineno == IN_PYTHON_EOF:
|
||||
return
|
||||
|
||||
# fall through
|
||||
|
||||
# Skip empty lines
|
||||
if s == '':
|
||||
return
|
||||
|
||||
if s[0] == '#':
|
||||
if len(__residue__) != 0 and __residue__[0][0] != "#":
|
||||
bb.error("There is a comment on line %s of file %s (%s) which is in the middle of a multiline expression.\nBitbake used to ignore these but no longer does so, please fix your metadata as errors are likely as a result of this change." % (lineno, fn, s))
|
||||
if s == '' or s[0] == '#': return # skip comments and empty lines
|
||||
|
||||
if s[-1] == '\\':
|
||||
__residue__.append(s[:-1])
|
||||
@@ -209,46 +266,162 @@ def feeder(lineno, s, fn, root, statements):
|
||||
s = "".join(__residue__) + s
|
||||
__residue__ = []
|
||||
|
||||
# Skip comments
|
||||
if s[0] == '#':
|
||||
return
|
||||
|
||||
m = __func_start_regexp__.match(s)
|
||||
if m:
|
||||
__infunc__ = m.group("func") or "__anonymous"
|
||||
ast.handleMethodFlags(statements, fn, lineno, __infunc__, m)
|
||||
key = __infunc__
|
||||
if data.getVar(key, d):
|
||||
# clean up old version of this piece of metadata, as its
|
||||
# flags could cause problems
|
||||
data.setVarFlag(key, 'python', None, d)
|
||||
data.setVarFlag(key, 'fakeroot', None, d)
|
||||
if m.group("py") is not None:
|
||||
data.setVarFlag(key, "python", "1", d)
|
||||
else:
|
||||
data.delVarFlag(key, "python", d)
|
||||
if m.group("fr") is not None:
|
||||
data.setVarFlag(key, "fakeroot", "1", d)
|
||||
else:
|
||||
data.delVarFlag(key, "fakeroot", d)
|
||||
return
|
||||
|
||||
m = __def_regexp__.match(s)
|
||||
if m:
|
||||
__body__.append(s)
|
||||
__inpython__ = m.group(1)
|
||||
|
||||
__inpython__ = True
|
||||
return
|
||||
|
||||
m = __export_func_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleExportFuncs(statements, fn, lineno, m, classes)
|
||||
fns = m.group(1)
|
||||
n = __word__.findall(fns)
|
||||
for f in n:
|
||||
allvars = []
|
||||
allvars.append(f)
|
||||
allvars.append(classes[-1] + "_" + f)
|
||||
|
||||
vars = [[ allvars[0], allvars[1] ]]
|
||||
if len(classes) > 1 and classes[-2] is not None:
|
||||
allvars.append(classes[-2] + "_" + f)
|
||||
vars = []
|
||||
vars.append([allvars[2], allvars[1]])
|
||||
vars.append([allvars[0], allvars[2]])
|
||||
|
||||
for (var, calledvar) in vars:
|
||||
if data.getVar(var, d) and not data.getVarFlag(var, 'export_func', d):
|
||||
continue
|
||||
|
||||
if data.getVar(var, d):
|
||||
data.setVarFlag(var, 'python', None, d)
|
||||
data.setVarFlag(var, 'func', None, d)
|
||||
|
||||
for flag in [ "func", "python" ]:
|
||||
if data.getVarFlag(calledvar, flag, d):
|
||||
data.setVarFlag(var, flag, data.getVarFlag(calledvar, flag, d), d)
|
||||
for flag in [ "dirs" ]:
|
||||
if data.getVarFlag(var, flag, d):
|
||||
data.setVarFlag(calledvar, flag, data.getVarFlag(var, flag, d), d)
|
||||
|
||||
if data.getVarFlag(calledvar, "python", d):
|
||||
data.setVar(var, "\tbb.build.exec_func('" + calledvar + "', d)\n", d)
|
||||
else:
|
||||
data.setVar(var, "\t" + calledvar + "\n", d)
|
||||
data.setVarFlag(var, 'export_func', '1', d)
|
||||
|
||||
return
|
||||
|
||||
m = __addtask_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleAddTask(statements, fn, lineno, m)
|
||||
func = m.group("func")
|
||||
before = m.group("before")
|
||||
after = m.group("after")
|
||||
if func is None:
|
||||
return
|
||||
var = "do_" + func
|
||||
|
||||
data.setVarFlag(var, "task", 1, d)
|
||||
|
||||
bbtasks = data.getVar('__BBTASKS', d) or []
|
||||
bbtasks.append(var)
|
||||
data.setVar('__BBTASKS', bbtasks, d)
|
||||
|
||||
if after is not None:
|
||||
# set up deps for function
|
||||
data.setVarFlag(var, "deps", after.split(), d)
|
||||
if before is not None:
|
||||
# set up things that depend on this func
|
||||
data.setVarFlag(var, "postdeps", before.split(), d)
|
||||
return
|
||||
|
||||
m = __addhandler_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleBBHandlers(statements, fn, lineno, m)
|
||||
fns = m.group(1)
|
||||
hs = __word__.findall(fns)
|
||||
bbhands = data.getVar('__BBHANDLERS', d) or []
|
||||
for h in hs:
|
||||
bbhands.append(h)
|
||||
data.setVarFlag(h, "handler", 1, d)
|
||||
data.setVar('__BBHANDLERS', bbhands, d)
|
||||
return
|
||||
|
||||
m = __inherit_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleInherit(statements, fn, lineno, m)
|
||||
|
||||
files = m.group(1)
|
||||
n = __word__.findall(files)
|
||||
inherit(n, d)
|
||||
return
|
||||
|
||||
return ConfHandler.feeder(lineno, s, fn, statements)
|
||||
from bb.parse import ConfHandler
|
||||
return ConfHandler.feeder(lineno, s, fn, d)
|
||||
|
||||
__pkgsplit_cache__={}
|
||||
def vars_from_file(mypkg, d):
|
||||
if not mypkg:
|
||||
return (None, None, None)
|
||||
if mypkg in __pkgsplit_cache__:
|
||||
return __pkgsplit_cache__[mypkg]
|
||||
|
||||
myfile = os.path.splitext(os.path.basename(mypkg))
|
||||
parts = myfile[0].split('_')
|
||||
__pkgsplit_cache__[mypkg] = parts
|
||||
if len(parts) > 3:
|
||||
raise ParseError("Unable to generate default variables from the filename: %s (too many underscores)" % mypkg)
|
||||
exp = 3 - len(parts)
|
||||
tmplist = []
|
||||
while exp != 0:
|
||||
exp -= 1
|
||||
tmplist.append(None)
|
||||
parts.extend(tmplist)
|
||||
return parts
|
||||
|
||||
def set_additional_vars(file, d, include):
|
||||
"""Deduce rest of variables, e.g. ${A} out of ${SRC_URI}"""
|
||||
|
||||
return
|
||||
# Nothing seems to use this variable
|
||||
#bb.msg.debug(2, bb.msg.domain.Parsing, "BB %s: set_additional_vars" % file)
|
||||
|
||||
#src_uri = data.getVar('SRC_URI', d, 1)
|
||||
#if not src_uri:
|
||||
# return
|
||||
|
||||
#a = (data.getVar('A', d, 1) or '').split()
|
||||
|
||||
#from bb import fetch
|
||||
#try:
|
||||
# ud = fetch.init(src_uri.split(), d)
|
||||
# a += fetch.localpaths(d, ud)
|
||||
#except fetch.NoMethodError:
|
||||
# pass
|
||||
#except bb.MalformedUrl,e:
|
||||
# raise ParseError("Unable to generate local paths for SRC_URI due to malformed uri: %s" % e)
|
||||
#del fetch
|
||||
|
||||
#data.setVar('A', " ".join(a), d)
|
||||
|
||||
|
||||
# Add us to the handlers list
|
||||
from .. import handlers
|
||||
from bb.parse import handlers
|
||||
handlers.append({'supports': supports, 'handle': handle, 'init': init})
|
||||
del handlers
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
# Copyright (C) 2003, 2004 Phil Blundell
|
||||
#
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
@@ -24,70 +24,129 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import re, bb.data, os
|
||||
import logging
|
||||
import bb.utils
|
||||
from bb.parse import ParseError, resolve_file, ast, logger
|
||||
import re, bb.data, os, sys
|
||||
from bb.parse import ParseError
|
||||
|
||||
#__config_regexp__ = re.compile( r"(?P<exp>export\s*)?(?P<var>[a-zA-Z0-9\-_+.${}]+)\s*(?P<colon>:)?(?P<ques>\?)?=\s*(?P<apo>['\"]?)(?P<value>.*)(?P=apo)$")
|
||||
__config_regexp__ = re.compile( r"(?P<exp>export\s*)?(?P<var>[a-zA-Z0-9\-_+.${}/]+)(\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])?\s*((?P<colon>:=)|(?P<lazyques>\?\?=)|(?P<ques>\?=)|(?P<append>\+=)|(?P<prepend>=\+)|(?P<predot>=\.)|(?P<postdot>\.=)|=)\s*(?P<apo>['\"]?)(?P<value>.*)(?P=apo)$")
|
||||
__config_regexp__ = re.compile( r"(?P<exp>export\s*)?(?P<var>[a-zA-Z0-9\-_+.${}/]+)(\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])?\s*((?P<colon>:=)|(?P<ques>\?=)|(?P<append>\+=)|(?P<prepend>=\+)|(?P<predot>=\.)|(?P<postdot>\.=)|=)\s*(?P<apo>['\"]?)(?P<value>.*)(?P=apo)$")
|
||||
__include_regexp__ = re.compile( r"include\s+(.+)" )
|
||||
__require_regexp__ = re.compile( r"require\s+(.+)" )
|
||||
__export_regexp__ = re.compile( r"export\s+(.+)" )
|
||||
|
||||
def init(data):
|
||||
topdir = bb.data.getVar('TOPDIR', data)
|
||||
if not topdir:
|
||||
if not bb.data.getVar('TOPDIR', data):
|
||||
bb.data.setVar('TOPDIR', os.getcwd(), data)
|
||||
|
||||
if not bb.data.getVar('BBPATH', data):
|
||||
bb.data.setVar('BBPATH', os.path.join(sys.prefix, 'share', 'bitbake'), data)
|
||||
|
||||
def supports(fn, d):
|
||||
return fn[-5:] == ".conf"
|
||||
return localpath(fn, d)[-5:] == ".conf"
|
||||
|
||||
def localpath(fn, d):
|
||||
if os.path.exists(fn):
|
||||
return fn
|
||||
|
||||
if "://" not in fn:
|
||||
return fn
|
||||
|
||||
localfn = None
|
||||
try:
|
||||
localfn = bb.fetch.localpath(fn, d, False)
|
||||
except bb.MalformedUrl:
|
||||
pass
|
||||
|
||||
if not localfn:
|
||||
return fn
|
||||
return localfn
|
||||
|
||||
def obtain(fn, data):
|
||||
import sys, bb
|
||||
fn = bb.data.expand(fn, data)
|
||||
localfn = bb.data.expand(localpath(fn, data), data)
|
||||
|
||||
if localfn != fn:
|
||||
dldir = bb.data.getVar('DL_DIR', data, 1)
|
||||
if not dldir:
|
||||
bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: DL_DIR not defined")
|
||||
return localfn
|
||||
bb.mkdirhier(dldir)
|
||||
try:
|
||||
bb.fetch.init([fn], data)
|
||||
except bb.fetch.NoMethodError:
|
||||
(type, value, traceback) = sys.exc_info()
|
||||
bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: no method: %s" % value)
|
||||
return localfn
|
||||
|
||||
try:
|
||||
bb.fetch.go(data)
|
||||
except bb.fetch.MissingParameterError:
|
||||
(type, value, traceback) = sys.exc_info()
|
||||
bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: missing parameters: %s" % value)
|
||||
return localfn
|
||||
except bb.fetch.FetchError:
|
||||
(type, value, traceback) = sys.exc_info()
|
||||
bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: failed: %s" % value)
|
||||
return localfn
|
||||
return localfn
|
||||
|
||||
|
||||
def include(oldfn, fn, data, error_out):
|
||||
"""
|
||||
error_out If True a ParseError will be raised if the to be included
|
||||
config-files could not be included.
|
||||
|
||||
error_out If True a ParseError will be reaised if the to be included
|
||||
"""
|
||||
if oldfn == fn: # prevent infinite recursion
|
||||
if oldfn == fn: # prevent infinate recursion
|
||||
return None
|
||||
|
||||
import bb
|
||||
fn = bb.data.expand(fn, data)
|
||||
oldfn = bb.data.expand(oldfn, data)
|
||||
|
||||
if not os.path.isabs(fn):
|
||||
dname = os.path.dirname(oldfn)
|
||||
bbpath = "%s:%s" % (dname, bb.data.getVar("BBPATH", data, 1))
|
||||
abs_fn = bb.utils.which(bbpath, fn)
|
||||
if abs_fn:
|
||||
fn = abs_fn
|
||||
|
||||
from bb.parse import handle
|
||||
try:
|
||||
ret = handle(fn, data, True)
|
||||
except IOError:
|
||||
if error_out:
|
||||
raise ParseError("Could not %(error_out)s file %(fn)s" % vars() )
|
||||
logger.debug(2, "CONF file '%s' not found", fn)
|
||||
bb.msg.debug(2, bb.msg.domain.Parsing, "CONF file '%s' not found" % fn)
|
||||
|
||||
def handle(fn, data, include):
|
||||
def handle(fn, data, include = 0):
|
||||
if include:
|
||||
inc_string = "including"
|
||||
else:
|
||||
inc_string = "reading"
|
||||
init(data)
|
||||
|
||||
if include == 0:
|
||||
bb.data.inheritFromOS(data)
|
||||
oldfile = None
|
||||
else:
|
||||
oldfile = bb.data.getVar('FILE', data)
|
||||
|
||||
abs_fn = resolve_file(fn, data)
|
||||
f = open(abs_fn, 'r')
|
||||
fn = obtain(fn, data)
|
||||
if not os.path.isabs(fn):
|
||||
f = None
|
||||
bbpath = bb.data.getVar("BBPATH", data, 1) or []
|
||||
for p in bbpath.split(":"):
|
||||
currname = os.path.join(p, fn)
|
||||
if os.access(currname, os.R_OK):
|
||||
f = open(currname, 'r')
|
||||
abs_fn = currname
|
||||
bb.msg.debug(2, bb.msg.domain.Parsing, "CONF %s %s" % (inc_string, currname))
|
||||
break
|
||||
if f is None:
|
||||
raise IOError("file '%s' not found" % fn)
|
||||
else:
|
||||
f = open(fn,'r')
|
||||
bb.msg.debug(1, bb.msg.domain.Parsing, "CONF %s %s" % (inc_string,fn))
|
||||
abs_fn = fn
|
||||
|
||||
if include:
|
||||
bb.parse.mark_dependency(data, abs_fn)
|
||||
|
||||
statements = ast.StatementGroup()
|
||||
lineno = 0
|
||||
while True:
|
||||
bb.data.setVar('FILE', fn, data)
|
||||
while 1:
|
||||
lineno = lineno + 1
|
||||
s = f.readline()
|
||||
if not s: break
|
||||
@@ -99,36 +158,66 @@ def handle(fn, data, include):
|
||||
s2 = f.readline()[:-1].strip()
|
||||
lineno = lineno + 1
|
||||
s = s[:-1] + s2
|
||||
feeder(lineno, s, fn, statements)
|
||||
feeder(lineno, s, fn, data)
|
||||
|
||||
# DONE WITH PARSING... time to evaluate
|
||||
bb.data.setVar('FILE', fn, data)
|
||||
statements.eval(data)
|
||||
if oldfile:
|
||||
bb.data.setVar('FILE', oldfile, data)
|
||||
|
||||
return data
|
||||
|
||||
def feeder(lineno, s, fn, statements):
|
||||
def feeder(lineno, s, fn, data):
|
||||
def getFunc(groupd, key, data):
|
||||
if 'flag' in groupd and groupd['flag'] != None:
|
||||
return bb.data.getVarFlag(key, groupd['flag'], data)
|
||||
else:
|
||||
return bb.data.getVar(key, data)
|
||||
|
||||
m = __config_regexp__.match(s)
|
||||
if m:
|
||||
groupd = m.groupdict()
|
||||
ast.handleData(statements, fn, lineno, groupd)
|
||||
key = groupd["var"]
|
||||
if "exp" in groupd and groupd["exp"] != None:
|
||||
bb.data.setVarFlag(key, "export", 1, data)
|
||||
if "ques" in groupd and groupd["ques"] != None:
|
||||
val = getFunc(groupd, key, data)
|
||||
if val == None:
|
||||
val = groupd["value"]
|
||||
elif "colon" in groupd and groupd["colon"] != None:
|
||||
e = data.createCopy()
|
||||
bb.data.update_data(e)
|
||||
val = bb.data.expand(groupd["value"], e)
|
||||
elif "append" in groupd and groupd["append"] != None:
|
||||
val = "%s %s" % ((getFunc(groupd, key, data) or ""), groupd["value"])
|
||||
elif "prepend" in groupd and groupd["prepend"] != None:
|
||||
val = "%s %s" % (groupd["value"], (getFunc(groupd, key, data) or ""))
|
||||
elif "postdot" in groupd and groupd["postdot"] != None:
|
||||
val = "%s%s" % ((getFunc(groupd, key, data) or ""), groupd["value"])
|
||||
elif "predot" in groupd and groupd["predot"] != None:
|
||||
val = "%s%s" % (groupd["value"], (getFunc(groupd, key, data) or ""))
|
||||
else:
|
||||
val = groupd["value"]
|
||||
if 'flag' in groupd and groupd['flag'] != None:
|
||||
bb.msg.debug(3, bb.msg.domain.Parsing, "setVarFlag(%s, %s, %s, data)" % (key, groupd['flag'], val))
|
||||
bb.data.setVarFlag(key, groupd['flag'], val, data)
|
||||
else:
|
||||
bb.data.setVar(key, val, data)
|
||||
return
|
||||
|
||||
m = __include_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleInclude(statements, fn, lineno, m, False)
|
||||
s = bb.data.expand(m.group(1), data)
|
||||
bb.msg.debug(3, bb.msg.domain.Parsing, "CONF %s:%d: including %s" % (fn, lineno, s))
|
||||
include(fn, s, data, False)
|
||||
return
|
||||
|
||||
m = __require_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleInclude(statements, fn, lineno, m, True)
|
||||
s = bb.data.expand(m.group(1), data)
|
||||
include(fn, s, data, "include required")
|
||||
return
|
||||
|
||||
m = __export_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleExport(statements, fn, lineno, m)
|
||||
bb.data.setVarFlag(m.group(1), "export", 1, data)
|
||||
return
|
||||
|
||||
raise ParseError("%s:%d: unparsed line: '%s'" % (fn, lineno, s));
|
||||
|
||||
@@ -25,9 +25,9 @@ File parsers for the BitBake build tools.
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
#
|
||||
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
|
||||
|
||||
from __future__ import absolute_import
|
||||
from . import ConfHandler
|
||||
from . import BBHandler
|
||||
|
||||
__version__ = '1.0'
|
||||
|
||||
__all__ = [ 'ConfHandler', 'BBHandler']
|
||||
|
||||
import ConfHandler
|
||||
import BBHandler
|
||||
|
||||
@@ -1,12 +1,6 @@
|
||||
"""BitBake Persistent Data Store
|
||||
|
||||
Used to store data in a central location such that other threads/tasks can
|
||||
access them at some future date. Acts as a convenience wrapper around sqlite,
|
||||
currently, providing a key/value store accessed by 'domain'.
|
||||
"""
|
||||
|
||||
# BitBake Persistent Data Store
|
||||
#
|
||||
# Copyright (C) 2007 Richard Purdie
|
||||
# Copyright (C) 2010 Chris Larson <chris_larson@mentor.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
@@ -21,174 +15,96 @@ currently, providing a key/value store accessed by 'domain'.
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import collections
|
||||
import logging
|
||||
import os.path
|
||||
import sys
|
||||
import warnings
|
||||
import bb.msg, bb.data, bb.utils
|
||||
import bb, os
|
||||
|
||||
try:
|
||||
import sqlite3
|
||||
except ImportError:
|
||||
from pysqlite2 import dbapi2 as sqlite3
|
||||
try:
|
||||
from pysqlite2 import dbapi2 as sqlite3
|
||||
except ImportError:
|
||||
bb.msg.fatal(bb.msg.domain.PersistData, "Importing sqlite3 and pysqlite2 failed, please install one of them. Python 2.5 or a 'python-pysqlite2' like package is likely to be what you need.")
|
||||
|
||||
sqlversion = sqlite3.sqlite_version_info
|
||||
if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
|
||||
raise Exception("sqlite3 version 3.3.0 or later is required.")
|
||||
bb.msg.fatal(bb.msg.domain.PersistData, "sqlite3 version 3.3.0 or later is required.")
|
||||
|
||||
class PersistData:
|
||||
"""
|
||||
BitBake Persistent Data Store
|
||||
|
||||
logger = logging.getLogger("BitBake.PersistData")
|
||||
Used to store data in a central location such that other threads/tasks can
|
||||
access them at some future date.
|
||||
|
||||
The "domain" is used as a key to isolate each data pool and in this
|
||||
implementation corresponds to an SQL table. The SQL table consists of a
|
||||
simple key and value pair.
|
||||
|
||||
class SQLTable(collections.MutableMapping):
|
||||
"""Object representing a table/domain in the database"""
|
||||
def __init__(self, cursor, table):
|
||||
self.cursor = cursor
|
||||
self.table = table
|
||||
|
||||
self._execute("CREATE TABLE IF NOT EXISTS %s(key TEXT, value TEXT);"
|
||||
% table)
|
||||
|
||||
def _execute(self, *query):
|
||||
"""Execute a query, waiting to acquire a lock if necessary"""
|
||||
count = 0
|
||||
while True:
|
||||
try:
|
||||
return self.cursor.execute(*query)
|
||||
except sqlite3.OperationalError as exc:
|
||||
if 'database is locked' in str(exc) and count < 500:
|
||||
count = count + 1
|
||||
continue
|
||||
raise
|
||||
|
||||
def __getitem__(self, key):
|
||||
data = self._execute("SELECT * from %s where key=?;" %
|
||||
self.table, [key])
|
||||
for row in data:
|
||||
return row[1]
|
||||
|
||||
def __delitem__(self, key):
|
||||
self._execute("DELETE from %s where key=?;" % self.table, [key])
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
data = self._execute("SELECT * from %s where key=?;" %
|
||||
self.table, [key])
|
||||
exists = len(list(data))
|
||||
if exists:
|
||||
self._execute("UPDATE %s SET value=? WHERE key=?;" % self.table,
|
||||
[value, key])
|
||||
else:
|
||||
self._execute("INSERT into %s(key, value) values (?, ?);" %
|
||||
self.table, [key, value])
|
||||
|
||||
def __contains__(self, key):
|
||||
return key in set(self)
|
||||
|
||||
def __len__(self):
|
||||
data = self._execute("SELECT COUNT(key) FROM %s;" % self.table)
|
||||
for row in data:
|
||||
return row[0]
|
||||
|
||||
def __iter__(self):
|
||||
data = self._execute("SELECT key FROM %s;" % self.table)
|
||||
for row in data:
|
||||
yield row[0]
|
||||
|
||||
def iteritems(self):
|
||||
data = self._execute("SELECT * FROM %s;" % self.table)
|
||||
for row in data:
|
||||
yield row[0], row[1]
|
||||
|
||||
def itervalues(self):
|
||||
data = self._execute("SELECT value FROM %s;" % self.table)
|
||||
for row in data:
|
||||
yield row[0]
|
||||
|
||||
|
||||
class SQLData(object):
|
||||
"""Object representing the persistent data"""
|
||||
def __init__(self, filename):
|
||||
bb.utils.mkdirhier(os.path.dirname(filename))
|
||||
|
||||
self.filename = filename
|
||||
self.connection = sqlite3.connect(filename, timeout=30,
|
||||
isolation_level=None)
|
||||
self.cursor = self.connection.cursor()
|
||||
self._tables = {}
|
||||
|
||||
def __getitem__(self, table):
|
||||
if not isinstance(table, basestring):
|
||||
raise TypeError("table argument must be a string, not '%s'" %
|
||||
type(table))
|
||||
|
||||
if table in self._tables:
|
||||
return self._tables[table]
|
||||
else:
|
||||
tableobj = self._tables[table] = SQLTable(self.cursor, table)
|
||||
return tableobj
|
||||
|
||||
def __delitem__(self, table):
|
||||
if table in self._tables:
|
||||
del self._tables[table]
|
||||
self.cursor.execute("DROP TABLE IF EXISTS %s;" % table)
|
||||
|
||||
|
||||
class PersistData(object):
|
||||
"""Deprecated representation of the bitbake persistent data store"""
|
||||
Why sqlite? It handles all the locking issues for us.
|
||||
"""
|
||||
def __init__(self, d):
|
||||
warnings.warn("Use of PersistData will be deprecated in the future",
|
||||
category=PendingDeprecationWarning,
|
||||
stacklevel=2)
|
||||
self.cachedir = bb.data.getVar("CACHE", d, True)
|
||||
if self.cachedir in [None, '']:
|
||||
bb.msg.fatal(bb.msg.domain.PersistData, "Please set the 'CACHE' variable.")
|
||||
try:
|
||||
os.stat(self.cachedir)
|
||||
except OSError:
|
||||
bb.mkdirhier(self.cachedir)
|
||||
|
||||
self.data = persist(d)
|
||||
logger.debug(1, "Using '%s' as the persistent data cache",
|
||||
self.data.filename)
|
||||
self.cachefile = os.path.join(self.cachedir,"bb_persist_data.sqlite3")
|
||||
bb.msg.debug(1, bb.msg.domain.PersistData, "Using '%s' as the persistent data cache" % self.cachefile)
|
||||
|
||||
self.connection = sqlite3.connect(self.cachefile, timeout=5, isolation_level=None)
|
||||
|
||||
def addDomain(self, domain):
|
||||
"""
|
||||
Add a domain (pending deprecation)
|
||||
Should be called before any domain is used
|
||||
Creates it if it doesn't exist.
|
||||
"""
|
||||
return self.data[domain]
|
||||
self.connection.execute("CREATE TABLE IF NOT EXISTS %s(key TEXT, value TEXT);" % domain)
|
||||
|
||||
def delDomain(self, domain):
|
||||
"""
|
||||
Removes a domain and all the data it contains
|
||||
"""
|
||||
del self.data[domain]
|
||||
|
||||
def getKeyValues(self, domain):
|
||||
"""
|
||||
Return a list of key + value pairs for a domain
|
||||
"""
|
||||
return self.data[domain].items()
|
||||
self.connection.execute("DROP TABLE IF EXISTS %s;" % domain)
|
||||
|
||||
def getValue(self, domain, key):
|
||||
"""
|
||||
Return the value of a key for a domain
|
||||
"""
|
||||
return self.data[domain][key]
|
||||
data = self.connection.execute("SELECT * from %s where key=?;" % domain, [key])
|
||||
for row in data:
|
||||
return row[1]
|
||||
|
||||
def setValue(self, domain, key, value):
|
||||
"""
|
||||
Sets the value of a key for a domain
|
||||
"""
|
||||
self.data[domain][key] = value
|
||||
data = self.connection.execute("SELECT * from %s where key=?;" % domain, [key])
|
||||
rows = 0
|
||||
for row in data:
|
||||
rows = rows + 1
|
||||
if rows:
|
||||
self._execute("UPDATE %s SET value=? WHERE key=?;" % domain, [value, key])
|
||||
else:
|
||||
self._execute("INSERT into %s(key, value) values (?, ?);" % domain, [key, value])
|
||||
|
||||
def delValue(self, domain, key):
|
||||
"""
|
||||
Deletes a key/value pair
|
||||
"""
|
||||
del self.data[domain][key]
|
||||
self._execute("DELETE from %s where key=?;" % domain, [key])
|
||||
|
||||
def _execute(self, *query):
|
||||
while True:
|
||||
try:
|
||||
self.connection.execute(*query)
|
||||
return
|
||||
except sqlite3.OperationalError, e:
|
||||
if 'database is locked' in str(e):
|
||||
continue
|
||||
raise
|
||||
|
||||
|
||||
|
||||
def persist(d):
|
||||
"""Convenience factory for construction of SQLData based upon metadata"""
|
||||
cachedir = (bb.data.getVar("PERSISTENT_DIR", d, True) or
|
||||
bb.data.getVar("CACHE", d, True))
|
||||
if not cachedir:
|
||||
logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
|
||||
sys.exit(1)
|
||||
|
||||
cachefile = os.path.join(cachedir, "bb_persist_data.sqlite3")
|
||||
return SQLData(cachefile)
|
||||
|
||||
@@ -1,109 +0,0 @@
|
||||
import logging
|
||||
import signal
|
||||
import subprocess
|
||||
|
||||
logger = logging.getLogger('BitBake.Process')
|
||||
|
||||
def subprocess_setup():
|
||||
# Python installs a SIGPIPE handler by default. This is usually not what
|
||||
# non-Python subprocesses expect.
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||
|
||||
class CmdError(RuntimeError):
|
||||
def __init__(self, command, msg=None):
|
||||
self.command = command
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self):
|
||||
if not isinstance(self.command, basestring):
|
||||
cmd = subprocess.list2cmdline(self.command)
|
||||
else:
|
||||
cmd = self.command
|
||||
|
||||
msg = "Execution of '%s' failed" % cmd
|
||||
if self.msg:
|
||||
msg += ': %s' % self.msg
|
||||
return msg
|
||||
|
||||
class NotFoundError(CmdError):
|
||||
def __str__(self):
|
||||
return CmdError.__str__(self) + ": command not found"
|
||||
|
||||
class ExecutionError(CmdError):
|
||||
def __init__(self, command, exitcode, stdout = None, stderr = None):
|
||||
CmdError.__init__(self, command)
|
||||
self.exitcode = exitcode
|
||||
self.stdout = stdout
|
||||
self.stderr = stderr
|
||||
|
||||
def __str__(self):
|
||||
message = ""
|
||||
if self.stderr:
|
||||
message += self.stderr
|
||||
if self.stdout:
|
||||
message += self.stdout
|
||||
if message:
|
||||
message = ":\n" + message
|
||||
return (CmdError.__str__(self) +
|
||||
" with exit code %s" % self.exitcode + message)
|
||||
|
||||
class Popen(subprocess.Popen):
|
||||
defaults = {
|
||||
"close_fds": True,
|
||||
"preexec_fn": subprocess_setup,
|
||||
"stdout": subprocess.PIPE,
|
||||
"stderr": subprocess.STDOUT,
|
||||
"stdin": subprocess.PIPE,
|
||||
"shell": False,
|
||||
}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
options = dict(self.defaults)
|
||||
options.update(kwargs)
|
||||
subprocess.Popen.__init__(self, *args, **options)
|
||||
|
||||
def _logged_communicate(pipe, log, input):
|
||||
if pipe.stdin:
|
||||
if input is not None:
|
||||
pipe.stdin.write(input)
|
||||
pipe.stdin.close()
|
||||
|
||||
bufsize = 512
|
||||
outdata, errdata = [], []
|
||||
while pipe.poll() is None:
|
||||
if pipe.stdout is not None:
|
||||
data = pipe.stdout.read(bufsize)
|
||||
if data is not None:
|
||||
outdata.append(data)
|
||||
log.write(data)
|
||||
|
||||
if pipe.stderr is not None:
|
||||
data = pipe.stderr.read(bufsize)
|
||||
if data is not None:
|
||||
errdata.append(data)
|
||||
log.write(data)
|
||||
return ''.join(outdata), ''.join(errdata)
|
||||
|
||||
def run(cmd, input=None, log=None, **options):
|
||||
"""Convenience function to run a command and return its output, raising an
|
||||
exception when the command fails"""
|
||||
|
||||
if isinstance(cmd, basestring) and not "shell" in options:
|
||||
options["shell"] = True
|
||||
|
||||
try:
|
||||
pipe = Popen(cmd, **options)
|
||||
except OSError, exc:
|
||||
if exc.errno == 2:
|
||||
raise NotFoundError(cmd)
|
||||
else:
|
||||
raise CmdError(cmd, exc)
|
||||
|
||||
if log:
|
||||
stdout, stderr = _logged_communicate(pipe, log, input)
|
||||
else:
|
||||
stdout, stderr = pipe.communicate(input)
|
||||
|
||||
if pipe.returncode != 0:
|
||||
raise ExecutionError(cmd, pipe.returncode, stdout, stderr)
|
||||
return stdout, stderr
|
||||
@@ -21,13 +21,10 @@
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import re
|
||||
import logging
|
||||
import os, re
|
||||
from bb import data, utils
|
||||
import bb
|
||||
|
||||
logger = logging.getLogger("BitBake.Provider")
|
||||
|
||||
class NoProvider(Exception):
|
||||
"""Exception raised when no provider of a build dependency can be found"""
|
||||
|
||||
@@ -53,27 +50,19 @@ def sortPriorities(pn, dataCache, pkg_pn = None):
|
||||
if preference not in priorities[priority]:
|
||||
priorities[priority][preference] = []
|
||||
priorities[priority][preference].append(f)
|
||||
pri_list = priorities.keys()
|
||||
pri_list.sort(lambda a, b: a - b)
|
||||
tmp_pn = []
|
||||
for pri in sorted(priorities, lambda a, b: a - b):
|
||||
for pri in pri_list:
|
||||
pref_list = priorities[pri].keys()
|
||||
pref_list.sort(lambda a, b: b - a)
|
||||
tmp_pref = []
|
||||
for pref in sorted(priorities[pri], lambda a, b: b - a):
|
||||
for pref in pref_list:
|
||||
tmp_pref.extend(priorities[pri][pref])
|
||||
tmp_pn = [tmp_pref] + tmp_pn
|
||||
|
||||
return tmp_pn
|
||||
|
||||
def preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r):
|
||||
"""
|
||||
Check if the version pe,pv,pr is the preferred one.
|
||||
If there is preferred version defined and ends with '%', then pv has to start with that version after removing the '%'
|
||||
"""
|
||||
if (pr == preferred_r or preferred_r == None):
|
||||
if (pe == preferred_e or preferred_e == None):
|
||||
if preferred_v == pv:
|
||||
return True
|
||||
if preferred_v != None and preferred_v.endswith('%') and pv.startswith(preferred_v[:len(preferred_v)-1]):
|
||||
return True
|
||||
return False
|
||||
|
||||
def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
"""
|
||||
@@ -106,8 +95,8 @@ def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
|
||||
for file_set in pkg_pn:
|
||||
for f in file_set:
|
||||
pe, pv, pr = dataCache.pkg_pepvpr[f]
|
||||
if preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r):
|
||||
pe,pv,pr = dataCache.pkg_pepvpr[f]
|
||||
if preferred_v == pv and (preferred_r == pr or preferred_r == None) and (preferred_e == pe or preferred_e == None):
|
||||
preferred_file = f
|
||||
preferred_ver = (pe, pv, pr)
|
||||
break
|
||||
@@ -123,9 +112,9 @@ def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
if item:
|
||||
itemstr = " (for item %s)" % item
|
||||
if preferred_file is None:
|
||||
logger.info("preferred version %s of %s not available%s", pv_str, pn, itemstr)
|
||||
bb.msg.note(1, bb.msg.domain.Provider, "preferred version %s of %s not available%s" % (pv_str, pn, itemstr))
|
||||
else:
|
||||
logger.debug(1, "selecting %s as PREFERRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr)
|
||||
bb.msg.debug(1, bb.msg.domain.Provider, "selecting %s as PREFERRED_VERSION %s of package %s%s" % (preferred_file, pv_str, pn, itemstr))
|
||||
|
||||
return (preferred_ver, preferred_file)
|
||||
|
||||
@@ -139,7 +128,7 @@ def findLatestProvider(pn, cfgData, dataCache, file_set):
|
||||
latest_p = 0
|
||||
latest_f = None
|
||||
for file_name in file_set:
|
||||
pe, pv, pr = dataCache.pkg_pepvpr[file_name]
|
||||
pe,pv,pr = dataCache.pkg_pepvpr[file_name]
|
||||
dp = dataCache.pkg_dp[file_name]
|
||||
|
||||
if (latest is None) or ((latest_p == dp) and (utils.vercmp(latest, (pe, pv, pr)) < 0)) or (dp > latest_p):
|
||||
@@ -172,14 +161,14 @@ def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
|
||||
|
||||
def _filterProviders(providers, item, cfgData, dataCache):
|
||||
"""
|
||||
Take a list of providers and filter/reorder according to the
|
||||
Take a list of providers and filter/reorder according to the
|
||||
environment variables and previous build results
|
||||
"""
|
||||
eligible = []
|
||||
preferred_versions = {}
|
||||
sortpkg_pn = {}
|
||||
|
||||
# The order of providers depends on the order of the files on the disk
|
||||
# The order of providers depends on the order of the files on the disk
|
||||
# up to here. Sort pkg_pn to make dependency issues reproducible rather
|
||||
# than effectively random.
|
||||
providers.sort()
|
||||
@@ -192,24 +181,24 @@ def _filterProviders(providers, item, cfgData, dataCache):
|
||||
pkg_pn[pn] = []
|
||||
pkg_pn[pn].append(p)
|
||||
|
||||
logger.debug(1, "providers for %s are: %s", item, pkg_pn.keys())
|
||||
bb.msg.debug(1, bb.msg.domain.Provider, "providers for %s are: %s" % (item, pkg_pn.keys()))
|
||||
|
||||
# First add PREFERRED_VERSIONS
|
||||
for pn in pkg_pn:
|
||||
for pn in pkg_pn.keys():
|
||||
sortpkg_pn[pn] = sortPriorities(pn, dataCache, pkg_pn)
|
||||
preferred_versions[pn] = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item)
|
||||
if preferred_versions[pn][1]:
|
||||
eligible.append(preferred_versions[pn][1])
|
||||
|
||||
# Now add latest versions
|
||||
for pn in sortpkg_pn:
|
||||
# Now add latest verisons
|
||||
for pn in pkg_pn.keys():
|
||||
if pn in preferred_versions and preferred_versions[pn][1]:
|
||||
continue
|
||||
preferred_versions[pn] = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[pn][0])
|
||||
eligible.append(preferred_versions[pn][1])
|
||||
|
||||
if len(eligible) == 0:
|
||||
logger.error("no eligible providers for %s", item)
|
||||
bb.msg.error(bb.msg.domain.Provider, "no eligible providers for %s" % item)
|
||||
return 0
|
||||
|
||||
# If pn == item, give it a slight default preference
|
||||
@@ -224,12 +213,40 @@ def _filterProviders(providers, item, cfgData, dataCache):
|
||||
eligible.remove(fn)
|
||||
eligible = [fn] + eligible
|
||||
|
||||
# look to see if one of them is already staged, or marked as preferred.
|
||||
# if so, bump it to the head of the queue
|
||||
for p in providers:
|
||||
pn = dataCache.pkg_fn[p]
|
||||
pe, pv, pr = dataCache.pkg_pepvpr[p]
|
||||
|
||||
stamp = '%s.do_populate_staging' % dataCache.stamp[p]
|
||||
if os.path.exists(stamp):
|
||||
(newvers, fn) = preferred_versions[pn]
|
||||
if not fn in eligible:
|
||||
# package was made ineligible by already-failed check
|
||||
continue
|
||||
oldver = "%s-%s" % (pv, pr)
|
||||
if pe > 0:
|
||||
oldver = "%s:%s" % (pe, oldver)
|
||||
newver = "%s-%s" % (newvers[1], newvers[2])
|
||||
if newvers[0] > 0:
|
||||
newver = "%s:%s" % (newvers[0], newver)
|
||||
if (newver != oldver):
|
||||
extra_chat = "%s (%s) already staged but upgrading to %s to satisfy %s" % (pn, oldver, newver, item)
|
||||
else:
|
||||
extra_chat = "Selecting already-staged %s (%s) to satisfy %s" % (pn, oldver, item)
|
||||
|
||||
bb.msg.note(2, bb.msg.domain.Provider, "%s" % extra_chat)
|
||||
eligible.remove(fn)
|
||||
eligible = [fn] + eligible
|
||||
break
|
||||
|
||||
return eligible
|
||||
|
||||
|
||||
def filterProviders(providers, item, cfgData, dataCache):
|
||||
"""
|
||||
Take a list of providers and filter/reorder according to the
|
||||
Take a list of providers and filter/reorder according to the
|
||||
environment variables and previous build results
|
||||
Takes a "normal" target item
|
||||
"""
|
||||
@@ -245,19 +262,19 @@ def filterProviders(providers, item, cfgData, dataCache):
|
||||
for p in eligible:
|
||||
pn = dataCache.pkg_fn[p]
|
||||
if dataCache.preferred[item] == pn:
|
||||
logger.verbose("selecting %s to satisfy %s due to PREFERRED_PROVIDERS", pn, item)
|
||||
bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy %s due to PREFERRED_PROVIDERS" % (pn, item))
|
||||
eligible.remove(p)
|
||||
eligible = [p] + eligible
|
||||
foundUnique = True
|
||||
break
|
||||
|
||||
logger.debug(1, "sorted providers for %s are: %s", item, eligible)
|
||||
bb.msg.debug(1, bb.msg.domain.Provider, "sorted providers for %s are: %s" % (item, eligible))
|
||||
|
||||
return eligible, foundUnique
|
||||
|
||||
def filterProvidersRunTime(providers, item, cfgData, dataCache):
|
||||
"""
|
||||
Take a list of providers and filter/reorder according to the
|
||||
Take a list of providers and filter/reorder according to the
|
||||
environment variables and previous build results
|
||||
Takes a "runtime" target item
|
||||
"""
|
||||
@@ -266,37 +283,24 @@ def filterProvidersRunTime(providers, item, cfgData, dataCache):
|
||||
|
||||
# Should use dataCache.preferred here?
|
||||
preferred = []
|
||||
preferred_vars = []
|
||||
pns = {}
|
||||
for p in eligible:
|
||||
pns[dataCache.pkg_fn[p]] = p
|
||||
for p in eligible:
|
||||
pn = dataCache.pkg_fn[p]
|
||||
provides = dataCache.pn_provides[pn]
|
||||
for provide in provides:
|
||||
prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % provide, cfgData, 1)
|
||||
logger.verbose("checking PREFERRED_PROVIDER_%s (value %s) against %s", provide, prefervar, pns.keys())
|
||||
if prefervar in pns and pns[prefervar] not in preferred:
|
||||
var = "PREFERRED_PROVIDER_%s = %s" % (provide, prefervar)
|
||||
logger.verbose("selecting %s to satisfy runtime %s due to %s", prefervar, item, var)
|
||||
preferred_vars.append(var)
|
||||
pref = pns[prefervar]
|
||||
eligible.remove(pref)
|
||||
eligible = [pref] + eligible
|
||||
preferred.append(pref)
|
||||
if prefervar == pn:
|
||||
bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy runtime %s due to PREFERRED_PROVIDERS" % (pn, item))
|
||||
eligible.remove(p)
|
||||
eligible = [p] + eligible
|
||||
preferred.append(p)
|
||||
break
|
||||
|
||||
numberPreferred = len(preferred)
|
||||
|
||||
if numberPreferred > 1:
|
||||
logger.error("Trying to resolve runtime dependency %s resulted in conflicting PREFERRED_PROVIDER entries being found.\nThe providers found were: %s\nThe PREFERRED_PROVIDER entries resulting in this conflict were: %s", item, preferred, preferred_vars)
|
||||
|
||||
logger.debug(1, "sorted providers for %s are: %s", item, eligible)
|
||||
bb.msg.debug(1, bb.msg.domain.Provider, "sorted providers for %s are: %s" % (item, eligible))
|
||||
|
||||
return eligible, numberPreferred
|
||||
|
||||
regexp_cache = {}
|
||||
|
||||
def getRuntimeProviders(dataCache, rdepend):
|
||||
"""
|
||||
Return any providers of runtime dependency
|
||||
@@ -304,7 +308,7 @@ def getRuntimeProviders(dataCache, rdepend):
|
||||
rproviders = []
|
||||
|
||||
if rdepend in dataCache.rproviders:
|
||||
rproviders += dataCache.rproviders[rdepend]
|
||||
rproviders += dataCache.rproviders[rdepend]
|
||||
|
||||
if rdepend in dataCache.packages:
|
||||
rproviders += dataCache.packages[rdepend]
|
||||
@@ -314,16 +318,7 @@ def getRuntimeProviders(dataCache, rdepend):
|
||||
|
||||
# Only search dynamic packages if we can't find anything in other variables
|
||||
for pattern in dataCache.packages_dynamic:
|
||||
pattern = pattern.replace('+', "\+")
|
||||
if pattern in regexp_cache:
|
||||
regexp = regexp_cache[pattern]
|
||||
else:
|
||||
try:
|
||||
regexp = re.compile(pattern)
|
||||
except:
|
||||
logger.error("Error parsing regular expression '%s'", pattern)
|
||||
raise
|
||||
regexp_cache[pattern] = regexp
|
||||
regexp = re.compile(pattern)
|
||||
if regexp.match(rdepend):
|
||||
rproviders += dataCache.packages_dynamic[pattern]
|
||||
|
||||
|
||||
@@ -1,710 +0,0 @@
|
||||
# builtin.py - builtins and utilities definitions for pysh.
|
||||
#
|
||||
# Copyright 2007 Patrick Mezard
|
||||
#
|
||||
# This software may be used and distributed according to the terms
|
||||
# of the GNU General Public License, incorporated herein by reference.
|
||||
|
||||
"""Builtin and internal utilities implementations.
|
||||
|
||||
- Beware not to use python interpreter environment as if it were the shell
|
||||
environment. For instance, commands working directory must be explicitely handled
|
||||
through env['PWD'] instead of relying on python working directory.
|
||||
"""
|
||||
import errno
|
||||
import optparse
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
|
||||
def has_subprocess_bug():
|
||||
return getattr(subprocess, 'list2cmdline') and \
|
||||
( subprocess.list2cmdline(['']) == '' or \
|
||||
subprocess.list2cmdline(['foo|bar']) == 'foo|bar')
|
||||
|
||||
# Detect python bug 1634343: "subprocess swallows empty arguments under win32"
|
||||
# <http://sourceforge.net/tracker/index.php?func=detail&aid=1634343&group_id=5470&atid=105470>
|
||||
# Also detect: "[ 1710802 ] subprocess must escape redirection characters under win32"
|
||||
# <http://sourceforge.net/tracker/index.php?func=detail&aid=1710802&group_id=5470&atid=105470>
|
||||
if has_subprocess_bug():
|
||||
import subprocess_fix
|
||||
subprocess.list2cmdline = subprocess_fix.list2cmdline
|
||||
|
||||
from sherrors import *
|
||||
|
||||
class NonExitingParser(optparse.OptionParser):
|
||||
"""OptionParser default behaviour upon error is to print the error message and
|
||||
exit. Raise a utility error instead.
|
||||
"""
|
||||
def error(self, msg):
|
||||
raise UtilityError(msg)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# set special builtin
|
||||
#-------------------------------------------------------------------------------
|
||||
OPT_SET = NonExitingParser(usage="set - set or unset options and positional parameters")
|
||||
OPT_SET.add_option( '-f', action='store_true', dest='has_f', default=False,
|
||||
help='The shell shall disable pathname expansion.')
|
||||
OPT_SET.add_option('-e', action='store_true', dest='has_e', default=False,
|
||||
help="""When this option is on, if a simple command fails for any of the \
|
||||
reasons listed in Consequences of Shell Errors or returns an exit status \
|
||||
value >0, and is not part of the compound list following a while, until, \
|
||||
or if keyword, and is not a part of an AND or OR list, and is not a \
|
||||
pipeline preceded by the ! reserved word, then the shell shall immediately \
|
||||
exit.""")
|
||||
OPT_SET.add_option('-x', action='store_true', dest='has_x', default=False,
|
||||
help="""The shell shall write to standard error a trace for each command \
|
||||
after it expands the command and before it executes it. It is unspecified \
|
||||
whether the command that turns tracing off is traced.""")
|
||||
|
||||
def builtin_set(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
option, args = OPT_SET.parse_args(args)
|
||||
env = interp.get_env()
|
||||
|
||||
if option.has_f:
|
||||
env.set_opt('-f')
|
||||
if option.has_e:
|
||||
env.set_opt('-e')
|
||||
if option.has_x:
|
||||
env.set_opt('-x')
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# shift special builtin
|
||||
#-------------------------------------------------------------------------------
|
||||
def builtin_shift(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
params = interp.get_env().get_positional_args()
|
||||
if args:
|
||||
try:
|
||||
n = int(args[0])
|
||||
if n > len(params):
|
||||
raise ValueError()
|
||||
except ValueError:
|
||||
return 1
|
||||
else:
|
||||
n = 1
|
||||
|
||||
params[:n] = []
|
||||
interp.get_env().set_positional_args(params)
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# export special builtin
|
||||
#-------------------------------------------------------------------------------
|
||||
OPT_EXPORT = NonExitingParser(usage="set - set or unset options and positional parameters")
|
||||
OPT_EXPORT.add_option('-p', action='store_true', dest='has_p', default=False)
|
||||
|
||||
def builtin_export(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
option, args = OPT_EXPORT.parse_args(args)
|
||||
if option.has_p:
|
||||
raise NotImplementedError()
|
||||
|
||||
for arg in args:
|
||||
try:
|
||||
name, value = arg.split('=', 1)
|
||||
except ValueError:
|
||||
name, value = arg, None
|
||||
env = interp.get_env().export(name, value)
|
||||
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# return special builtin
|
||||
#-------------------------------------------------------------------------------
|
||||
def builtin_return(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
res = 0
|
||||
if args:
|
||||
try:
|
||||
res = int(args[0])
|
||||
except ValueError:
|
||||
res = 0
|
||||
if not 0<=res<=255:
|
||||
res = 0
|
||||
|
||||
# BUG: should be last executed command exit code
|
||||
raise ReturnSignal(res)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# trap special builtin
|
||||
#-------------------------------------------------------------------------------
|
||||
def builtin_trap(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
if len(args) < 2:
|
||||
stderr.write('trap: usage: trap [[arg] signal_spec ...]\n')
|
||||
return 2
|
||||
|
||||
action = args[0]
|
||||
for sig in args[1:]:
|
||||
try:
|
||||
env.traps[sig] = action
|
||||
except Exception, e:
|
||||
stderr.write('trap: %s\n' % str(e))
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# unset special builtin
|
||||
#-------------------------------------------------------------------------------
|
||||
OPT_UNSET = NonExitingParser("unset - unset values and attributes of variables and functions")
|
||||
OPT_UNSET.add_option( '-f', action='store_true', dest='has_f', default=False)
|
||||
OPT_UNSET.add_option( '-v', action='store_true', dest='has_v', default=False)
|
||||
|
||||
def builtin_unset(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
option, args = OPT_UNSET.parse_args(args)
|
||||
|
||||
status = 0
|
||||
env = interp.get_env()
|
||||
for arg in args:
|
||||
try:
|
||||
if option.has_f:
|
||||
env.remove_function(arg)
|
||||
else:
|
||||
del env[arg]
|
||||
except KeyError:
|
||||
pass
|
||||
except VarAssignmentError:
|
||||
status = 1
|
||||
|
||||
return status
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# wait special builtin
|
||||
#-------------------------------------------------------------------------------
|
||||
def builtin_wait(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
return interp.wait([int(arg) for arg in args])
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# cat utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_cat(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
if not args:
|
||||
args = ['-']
|
||||
|
||||
status = 0
|
||||
for arg in args:
|
||||
if arg == '-':
|
||||
data = stdin.read()
|
||||
else:
|
||||
path = os.path.join(env['PWD'], arg)
|
||||
try:
|
||||
f = file(path, 'rb')
|
||||
try:
|
||||
data = f.read()
|
||||
finally:
|
||||
f.close()
|
||||
except IOError, e:
|
||||
if e.errno != errno.ENOENT:
|
||||
raise
|
||||
status = 1
|
||||
continue
|
||||
stdout.write(data)
|
||||
stdout.flush()
|
||||
return status
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# cd utility
|
||||
#-------------------------------------------------------------------------------
|
||||
OPT_CD = NonExitingParser("cd - change the working directory")
|
||||
|
||||
def utility_cd(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
option, args = OPT_CD.parse_args(args)
|
||||
env = interp.get_env()
|
||||
|
||||
directory = None
|
||||
printdir = False
|
||||
if not args:
|
||||
home = env.get('HOME')
|
||||
if home:
|
||||
# Unspecified, do nothing
|
||||
return 0
|
||||
else:
|
||||
directory = home
|
||||
elif len(args)==1:
|
||||
directory = args[0]
|
||||
if directory=='-':
|
||||
if 'OLDPWD' not in env:
|
||||
raise UtilityError("OLDPWD not set")
|
||||
printdir = True
|
||||
directory = env['OLDPWD']
|
||||
else:
|
||||
raise UtilityError("too many arguments")
|
||||
|
||||
curpath = None
|
||||
# Absolute directories will be handled correctly by the os.path.join call.
|
||||
if not directory.startswith('.') and not directory.startswith('..'):
|
||||
cdpaths = env.get('CDPATH', '.').split(';')
|
||||
for cdpath in cdpaths:
|
||||
p = os.path.join(cdpath, directory)
|
||||
if os.path.isdir(p):
|
||||
curpath = p
|
||||
break
|
||||
|
||||
if curpath is None:
|
||||
curpath = directory
|
||||
curpath = os.path.join(env['PWD'], directory)
|
||||
|
||||
env['OLDPWD'] = env['PWD']
|
||||
env['PWD'] = curpath
|
||||
if printdir:
|
||||
stdout.write('%s\n' % curpath)
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# colon utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_colon(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# echo utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_echo(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
# Echo only takes arguments, no options. Use printf if you need fancy stuff.
|
||||
output = ' '.join(args) + '\n'
|
||||
stdout.write(output)
|
||||
stdout.flush()
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# egrep utility
|
||||
#-------------------------------------------------------------------------------
|
||||
# egrep is usually a shell script.
|
||||
# Unfortunately, pysh does not support shell scripts *with arguments* right now,
|
||||
# so the redirection is implemented here, assuming grep is available.
|
||||
def utility_egrep(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
return run_command('grep', ['-E'] + args, interp, env, stdin, stdout,
|
||||
stderr, debugflags)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# env utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_env(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
if args and args[0]=='-i':
|
||||
raise NotImplementedError('env: -i option is not implemented')
|
||||
|
||||
i = 0
|
||||
for arg in args:
|
||||
if '=' not in arg:
|
||||
break
|
||||
# Update the current environment
|
||||
name, value = arg.split('=', 1)
|
||||
env[name] = value
|
||||
i += 1
|
||||
|
||||
if args[i:]:
|
||||
# Find then execute the specified interpreter
|
||||
utility = env.find_in_path(args[i])
|
||||
if not utility:
|
||||
return 127
|
||||
args[i:i+1] = utility
|
||||
name = args[i]
|
||||
args = args[i+1:]
|
||||
try:
|
||||
return run_command(name, args, interp, env, stdin, stdout, stderr,
|
||||
debugflags)
|
||||
except UtilityError:
|
||||
stderr.write('env: failed to execute %s' % ' '.join([name]+args))
|
||||
return 126
|
||||
else:
|
||||
for pair in env.get_variables().iteritems():
|
||||
stdout.write('%s=%s\n' % pair)
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# exit utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_exit(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
res = None
|
||||
if args:
|
||||
try:
|
||||
res = int(args[0])
|
||||
except ValueError:
|
||||
res = None
|
||||
if not 0<=res<=255:
|
||||
res = None
|
||||
|
||||
if res is None:
|
||||
# BUG: should be last executed command exit code
|
||||
res = 0
|
||||
|
||||
raise ExitSignal(res)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# fgrep utility
|
||||
#-------------------------------------------------------------------------------
|
||||
# see egrep
|
||||
def utility_fgrep(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
return run_command('grep', ['-F'] + args, interp, env, stdin, stdout,
|
||||
stderr, debugflags)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# gunzip utility
|
||||
#-------------------------------------------------------------------------------
|
||||
# see egrep
|
||||
def utility_gunzip(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
return run_command('gzip', ['-d'] + args, interp, env, stdin, stdout,
|
||||
stderr, debugflags)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# kill utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_kill(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
for arg in args:
|
||||
pid = int(arg)
|
||||
status = subprocess.call(['pskill', '/T', str(pid)],
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
# pskill is asynchronous, hence the stupid polling loop
|
||||
while 1:
|
||||
p = subprocess.Popen(['pslist', str(pid)],
|
||||
shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
output = p.communicate()[0]
|
||||
if ('process %d was not' % pid) in output:
|
||||
break
|
||||
time.sleep(1)
|
||||
return status
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# mkdir utility
|
||||
#-------------------------------------------------------------------------------
|
||||
OPT_MKDIR = NonExitingParser("mkdir - make directories.")
|
||||
OPT_MKDIR.add_option('-p', action='store_true', dest='has_p', default=False)
|
||||
|
||||
def utility_mkdir(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
# TODO: implement umask
|
||||
# TODO: implement proper utility error report
|
||||
option, args = OPT_MKDIR.parse_args(args)
|
||||
for arg in args:
|
||||
path = os.path.join(env['PWD'], arg)
|
||||
if option.has_p:
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except IOError, e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
else:
|
||||
os.mkdir(path)
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# netstat utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_netstat(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
# Do you really expect me to implement netstat ?
|
||||
# This empty form is enough for Mercurial tests since it's
|
||||
# supposed to generate nothing upon success. Faking this test
|
||||
# is not a big deal either.
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# pwd utility
|
||||
#-------------------------------------------------------------------------------
|
||||
OPT_PWD = NonExitingParser("pwd - return working directory name")
|
||||
OPT_PWD.add_option('-L', action='store_true', dest='has_L', default=True,
|
||||
help="""If the PWD environment variable contains an absolute pathname of \
|
||||
the current directory that does not contain the filenames dot or dot-dot, \
|
||||
pwd shall write this pathname to standard output. Otherwise, the -L option \
|
||||
shall behave as the -P option.""")
|
||||
OPT_PWD.add_option('-P', action='store_true', dest='has_L', default=False,
|
||||
help="""The absolute pathname written shall not contain filenames that, in \
|
||||
the context of the pathname, refer to files of type symbolic link.""")
|
||||
|
||||
def utility_pwd(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
option, args = OPT_PWD.parse_args(args)
|
||||
stdout.write('%s\n' % env['PWD'])
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# printf utility
|
||||
#-------------------------------------------------------------------------------
|
||||
RE_UNESCAPE = re.compile(r'(\\x[a-zA-Z0-9]{2}|\\[0-7]{1,3}|\\.)')
|
||||
|
||||
def utility_printf(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
def replace(m):
|
||||
assert m.group()
|
||||
g = m.group()[1:]
|
||||
if g.startswith('x'):
|
||||
return chr(int(g[1:], 16))
|
||||
if len(g) <= 3 and len([c for c in g if c in '01234567']) == len(g):
|
||||
# Yay, an octal number
|
||||
return chr(int(g, 8))
|
||||
return {
|
||||
'a': '\a',
|
||||
'b': '\b',
|
||||
'f': '\f',
|
||||
'n': '\n',
|
||||
'r': '\r',
|
||||
't': '\t',
|
||||
'v': '\v',
|
||||
'\\': '\\',
|
||||
}.get(g)
|
||||
|
||||
# Convert escape sequences
|
||||
format = re.sub(RE_UNESCAPE, replace, args[0])
|
||||
stdout.write(format % tuple(args[1:]))
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# true utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_true(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# sed utility
|
||||
#-------------------------------------------------------------------------------
|
||||
RE_SED = re.compile(r'^s(.).*\1[a-zA-Z]*$')
|
||||
|
||||
# cygwin sed fails with some expressions when they do not end with a single space.
|
||||
# see unit tests for details. Interestingly, the same expressions works perfectly
|
||||
# in cygwin shell.
|
||||
def utility_sed(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
# Scan pattern arguments and append a space if necessary
|
||||
for i in xrange(len(args)):
|
||||
if not RE_SED.search(args[i]):
|
||||
continue
|
||||
args[i] = args[i] + ' '
|
||||
|
||||
return run_command(name, args, interp, env, stdin, stdout,
|
||||
stderr, debugflags)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# sleep utility
|
||||
#-------------------------------------------------------------------------------
|
||||
def utility_sleep(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
time.sleep(int(args[0]))
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# sort utility
|
||||
#-------------------------------------------------------------------------------
|
||||
OPT_SORT = NonExitingParser("sort - sort, merge, or sequence check text files")
|
||||
|
||||
def utility_sort(name, args, interp, env, stdin, stdout, stderr, debugflags):
|
||||
|
||||
def sort(path):
|
||||
if path == '-':
|
||||
lines = stdin.readlines()
|
||||
else:
|
||||
try:
|
||||
f = file(path)
|
||||
try:
|
||||
lines = f.readlines()
|
||||
finally:
|
||||
f.close()
|
||||
except IOError, e:
|
||||
stderr.write(str(e) + '\n')
|
||||
return 1
|
||||
|
||||
if lines and lines[-1][-1]!='\n':
|
||||
lines[-1] = lines[-1] + '\n'
|
||||
return lines
|
||||
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
option, args = OPT_SORT.parse_args(args)
|
||||
alllines = []
|
||||
|
||||
if len(args)<=0:
|
||||
args += ['-']
|
||||
|
||||
# Load all files lines
|
||||
curdir = os.getcwd()
|
||||
try:
|
||||
os.chdir(env['PWD'])
|
||||
for path in args:
|
||||
alllines += sort(path)
|
||||
finally:
|
||||
os.chdir(curdir)
|
||||
|
||||
alllines.sort()
|
||||
for line in alllines:
|
||||
stdout.write(line)
|
||||
return 0
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# hg utility
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
hgcommands = [
|
||||
'add',
|
||||
'addremove',
|
||||
'commit', 'ci',
|
||||
'debugrename',
|
||||
'debugwalk',
|
||||
'falabala', # Dummy command used in a mercurial test
|
||||
'incoming',
|
||||
'locate',
|
||||
'pull',
|
||||
'push',
|
||||
'qinit',
|
||||
'remove', 'rm',
|
||||
'rename', 'mv',
|
||||
'revert',
|
||||
'showconfig',
|
||||
'status', 'st',
|
||||
'strip',
|
||||
]
|
||||
|
||||
def rewriteslashes(name, args):
|
||||
# Several hg commands output file paths, rewrite the separators
|
||||
if len(args) > 1 and name.lower().endswith('python') \
|
||||
and args[0].endswith('hg'):
|
||||
for cmd in hgcommands:
|
||||
if cmd in args[1:]:
|
||||
return True
|
||||
|
||||
# svn output contains many paths with OS specific separators.
|
||||
# Normalize these to unix paths.
|
||||
base = os.path.basename(name)
|
||||
if base.startswith('svn'):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def rewritehg(output):
|
||||
if not output:
|
||||
return output
|
||||
# Rewrite os specific messages
|
||||
output = output.replace(': The system cannot find the file specified',
|
||||
': No such file or directory')
|
||||
output = re.sub(': Access is denied.*$', ': Permission denied', output)
|
||||
output = output.replace(': No connection could be made because the target machine actively refused it',
|
||||
': Connection refused')
|
||||
return output
|
||||
|
||||
|
||||
def run_command(name, args, interp, env, stdin, stdout,
|
||||
stderr, debugflags):
|
||||
# Execute the command
|
||||
if 'debug-utility' in debugflags:
|
||||
print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n')
|
||||
|
||||
hgbin = interp.options().hgbinary
|
||||
ishg = hgbin and ('hg' in name or args and 'hg' in args[0])
|
||||
unixoutput = 'cygwin' in name or ishg
|
||||
|
||||
exec_env = env.get_variables()
|
||||
try:
|
||||
# BUG: comparing file descriptor is clearly not a reliable way to tell
|
||||
# whether they point on the same underlying object. But in pysh limited
|
||||
# scope this is usually right, we do not expect complicated redirections
|
||||
# besides usual 2>&1.
|
||||
# Still there is one case we have but cannot deal with is when stdout
|
||||
# and stderr are redirected *by pysh caller*. This the reason for the
|
||||
# --redirect pysh() option.
|
||||
# Now, we want to know they are the same because we sometimes need to
|
||||
# transform the command output, mostly remove CR-LF to ensure that
|
||||
# command output is unix-like. Cygwin utilies are a special case because
|
||||
# they explicitely set their output streams to binary mode, so we have
|
||||
# nothing to do. For all others commands, we have to guess whether they
|
||||
# are sending text data, in which case the transformation must be done.
|
||||
# Again, the NUL character test is unreliable but should be enough for
|
||||
# hg tests.
|
||||
redirected = stdout.fileno()==stderr.fileno()
|
||||
if not redirected:
|
||||
p = subprocess.Popen([name] + args, cwd=env['PWD'], env=exec_env,
|
||||
stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
else:
|
||||
p = subprocess.Popen([name] + args, cwd=env['PWD'], env=exec_env,
|
||||
stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
out, err = p.communicate()
|
||||
except WindowsError, e:
|
||||
raise UtilityError(str(e))
|
||||
|
||||
if not unixoutput:
|
||||
def encode(s):
|
||||
if '\0' in s:
|
||||
return s
|
||||
return s.replace('\r\n', '\n')
|
||||
else:
|
||||
encode = lambda s: s
|
||||
|
||||
if rewriteslashes(name, args):
|
||||
encode1_ = encode
|
||||
def encode(s):
|
||||
s = encode1_(s)
|
||||
s = s.replace('\\\\', '\\')
|
||||
s = s.replace('\\', '/')
|
||||
return s
|
||||
|
||||
if ishg:
|
||||
encode2_ = encode
|
||||
def encode(s):
|
||||
return rewritehg(encode2_(s))
|
||||
|
||||
stdout.write(encode(out))
|
||||
if not redirected:
|
||||
stderr.write(encode(err))
|
||||
return p.returncode
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,116 +0,0 @@
|
||||
#! /usr/bin/env python
|
||||
|
||||
import sys
|
||||
from _lsprof import Profiler, profiler_entry
|
||||
|
||||
__all__ = ['profile', 'Stats']
|
||||
|
||||
def profile(f, *args, **kwds):
|
||||
"""XXX docstring"""
|
||||
p = Profiler()
|
||||
p.enable(subcalls=True, builtins=True)
|
||||
try:
|
||||
f(*args, **kwds)
|
||||
finally:
|
||||
p.disable()
|
||||
return Stats(p.getstats())
|
||||
|
||||
|
||||
class Stats(object):
|
||||
"""XXX docstring"""
|
||||
|
||||
def __init__(self, data):
|
||||
self.data = data
|
||||
|
||||
def sort(self, crit="inlinetime"):
|
||||
"""XXX docstring"""
|
||||
if crit not in profiler_entry.__dict__:
|
||||
raise ValueError("Can't sort by %s" % crit)
|
||||
self.data.sort(lambda b, a: cmp(getattr(a, crit),
|
||||
getattr(b, crit)))
|
||||
for e in self.data:
|
||||
if e.calls:
|
||||
e.calls.sort(lambda b, a: cmp(getattr(a, crit),
|
||||
getattr(b, crit)))
|
||||
|
||||
def pprint(self, top=None, file=None, limit=None, climit=None):
|
||||
"""XXX docstring"""
|
||||
if file is None:
|
||||
file = sys.stdout
|
||||
d = self.data
|
||||
if top is not None:
|
||||
d = d[:top]
|
||||
cols = "% 12s %12s %11.4f %11.4f %s\n"
|
||||
hcols = "% 12s %12s %12s %12s %s\n"
|
||||
cols2 = "+%12s %12s %11.4f %11.4f + %s\n"
|
||||
file.write(hcols % ("CallCount", "Recursive", "Total(ms)",
|
||||
"Inline(ms)", "module:lineno(function)"))
|
||||
count = 0
|
||||
for e in d:
|
||||
file.write(cols % (e.callcount, e.reccallcount, e.totaltime,
|
||||
e.inlinetime, label(e.code)))
|
||||
count += 1
|
||||
if limit is not None and count == limit:
|
||||
return
|
||||
ccount = 0
|
||||
if e.calls:
|
||||
for se in e.calls:
|
||||
file.write(cols % ("+%s" % se.callcount, se.reccallcount,
|
||||
se.totaltime, se.inlinetime,
|
||||
"+%s" % label(se.code)))
|
||||
count += 1
|
||||
ccount += 1
|
||||
if limit is not None and count == limit:
|
||||
return
|
||||
if climit is not None and ccount == climit:
|
||||
break
|
||||
|
||||
def freeze(self):
|
||||
"""Replace all references to code objects with string
|
||||
descriptions; this makes it possible to pickle the instance."""
|
||||
|
||||
# this code is probably rather ickier than it needs to be!
|
||||
for i in range(len(self.data)):
|
||||
e = self.data[i]
|
||||
if not isinstance(e.code, str):
|
||||
self.data[i] = type(e)((label(e.code),) + e[1:])
|
||||
if e.calls:
|
||||
for j in range(len(e.calls)):
|
||||
se = e.calls[j]
|
||||
if not isinstance(se.code, str):
|
||||
e.calls[j] = type(se)((label(se.code),) + se[1:])
|
||||
|
||||
_fn2mod = {}
|
||||
|
||||
def label(code):
|
||||
if isinstance(code, str):
|
||||
return code
|
||||
try:
|
||||
mname = _fn2mod[code.co_filename]
|
||||
except KeyError:
|
||||
for k, v in sys.modules.items():
|
||||
if v is None:
|
||||
continue
|
||||
if not hasattr(v, '__file__'):
|
||||
continue
|
||||
if not isinstance(v.__file__, str):
|
||||
continue
|
||||
if v.__file__.startswith(code.co_filename):
|
||||
mname = _fn2mod[code.co_filename] = k
|
||||
break
|
||||
else:
|
||||
mname = _fn2mod[code.co_filename] = '<%s>'%code.co_filename
|
||||
|
||||
return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import os
|
||||
sys.argv = sys.argv[1:]
|
||||
if not sys.argv:
|
||||
print >> sys.stderr, "usage: lsprof.py <script> <arguments...>"
|
||||
sys.exit(2)
|
||||
sys.path.insert(0, os.path.abspath(os.path.dirname(sys.argv[0])))
|
||||
stats = profile(execfile, sys.argv[0], globals(), locals())
|
||||
stats.sort()
|
||||
stats.pprint()
|
||||
@@ -1,167 +0,0 @@
|
||||
# pysh.py - command processing for pysh.
|
||||
#
|
||||
# Copyright 2007 Patrick Mezard
|
||||
#
|
||||
# This software may be used and distributed according to the terms
|
||||
# of the GNU General Public License, incorporated herein by reference.
|
||||
|
||||
import optparse
|
||||
import os
|
||||
import sys
|
||||
|
||||
import interp
|
||||
|
||||
SH_OPT = optparse.OptionParser(prog='pysh', usage="%prog [OPTIONS]", version='0.1')
|
||||
SH_OPT.add_option('-c', action='store_true', dest='command_string', default=None,
|
||||
help='A string that shall be interpreted by the shell as one or more commands')
|
||||
SH_OPT.add_option('--redirect-to', dest='redirect_to', default=None,
|
||||
help='Redirect script commands stdout and stderr to the specified file')
|
||||
# See utility_command in builtin.py about the reason for this flag.
|
||||
SH_OPT.add_option('--redirected', dest='redirected', action='store_true', default=False,
|
||||
help='Tell the interpreter that stdout and stderr are actually the same objects, which is really stdout')
|
||||
SH_OPT.add_option('--debug-parsing', action='store_true', dest='debug_parsing', default=False,
|
||||
help='Trace PLY execution')
|
||||
SH_OPT.add_option('--debug-tree', action='store_true', dest='debug_tree', default=False,
|
||||
help='Display the generated syntax tree.')
|
||||
SH_OPT.add_option('--debug-cmd', action='store_true', dest='debug_cmd', default=False,
|
||||
help='Trace command execution before parameters expansion and exit status.')
|
||||
SH_OPT.add_option('--debug-utility', action='store_true', dest='debug_utility', default=False,
|
||||
help='Trace utility calls, after parameters expansions')
|
||||
SH_OPT.add_option('--ast', action='store_true', dest='ast', default=False,
|
||||
help='Encoded commands to execute in a subprocess')
|
||||
SH_OPT.add_option('--profile', action='store_true', default=False,
|
||||
help='Profile pysh run')
|
||||
|
||||
|
||||
def split_args(args):
|
||||
# Separate shell arguments from command ones
|
||||
# Just stop at the first argument not starting with a dash. I know, this is completely broken,
|
||||
# it ignores files starting with a dash or may take option values for command file. This is not
|
||||
# supposed to happen for now
|
||||
command_index = len(args)
|
||||
for i,arg in enumerate(args):
|
||||
if not arg.startswith('-'):
|
||||
command_index = i
|
||||
break
|
||||
|
||||
return args[:command_index], args[command_index:]
|
||||
|
||||
|
||||
def fixenv(env):
|
||||
path = env.get('PATH')
|
||||
if path is not None:
|
||||
parts = path.split(os.pathsep)
|
||||
# Remove Windows utilities from PATH, they are useless at best and
|
||||
# some of them (find) may be confused with other utilities.
|
||||
parts = [p for p in parts if 'system32' not in p.lower()]
|
||||
env['PATH'] = os.pathsep.join(parts)
|
||||
if env.get('HOME') is None:
|
||||
# Several utilities, including cvsps, cannot work without
|
||||
# a defined HOME directory.
|
||||
env['HOME'] = os.path.expanduser('~')
|
||||
return env
|
||||
|
||||
def _sh(cwd, shargs, cmdargs, options, debugflags=None, env=None):
|
||||
if os.environ.get('PYSH_TEXT') != '1':
|
||||
import msvcrt
|
||||
for fp in (sys.stdin, sys.stdout, sys.stderr):
|
||||
msvcrt.setmode(fp.fileno(), os.O_BINARY)
|
||||
|
||||
hgbin = os.environ.get('PYSH_HGTEXT') != '1'
|
||||
|
||||
if debugflags is None:
|
||||
debugflags = []
|
||||
if options.debug_parsing: debugflags.append('debug-parsing')
|
||||
if options.debug_utility: debugflags.append('debug-utility')
|
||||
if options.debug_cmd: debugflags.append('debug-cmd')
|
||||
if options.debug_tree: debugflags.append('debug-tree')
|
||||
|
||||
if env is None:
|
||||
env = fixenv(dict(os.environ))
|
||||
if cwd is None:
|
||||
cwd = os.getcwd()
|
||||
|
||||
if not cmdargs:
|
||||
# Nothing to do
|
||||
return 0
|
||||
|
||||
ast = None
|
||||
command_file = None
|
||||
if options.command_string:
|
||||
input = cmdargs[0]
|
||||
if not options.ast:
|
||||
input += '\n'
|
||||
else:
|
||||
args, input = interp.decodeargs(input), None
|
||||
env, ast = args
|
||||
cwd = env.get('PWD', cwd)
|
||||
else:
|
||||
command_file = cmdargs[0]
|
||||
arguments = cmdargs[1:]
|
||||
|
||||
prefix = interp.resolve_shebang(command_file, ignoreshell=True)
|
||||
if prefix:
|
||||
input = ' '.join(prefix + [command_file] + arguments)
|
||||
else:
|
||||
# Read commands from file
|
||||
f = file(command_file)
|
||||
try:
|
||||
# Trailing newline to help the parser
|
||||
input = f.read() + '\n'
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
redirect = None
|
||||
try:
|
||||
if options.redirected:
|
||||
stdout = sys.stdout
|
||||
stderr = stdout
|
||||
elif options.redirect_to:
|
||||
redirect = open(options.redirect_to, 'wb')
|
||||
stdout = redirect
|
||||
stderr = redirect
|
||||
else:
|
||||
stdout = sys.stdout
|
||||
stderr = sys.stderr
|
||||
|
||||
# TODO: set arguments to environment variables
|
||||
opts = interp.Options()
|
||||
opts.hgbinary = hgbin
|
||||
ip = interp.Interpreter(cwd, debugflags, stdout=stdout, stderr=stderr,
|
||||
opts=opts)
|
||||
try:
|
||||
# Export given environment in shell object
|
||||
for k,v in env.iteritems():
|
||||
ip.get_env().export(k,v)
|
||||
return ip.execute_script(input, ast, scriptpath=command_file)
|
||||
finally:
|
||||
ip.close()
|
||||
finally:
|
||||
if redirect is not None:
|
||||
redirect.close()
|
||||
|
||||
def sh(cwd=None, args=None, debugflags=None, env=None):
|
||||
if args is None:
|
||||
args = sys.argv[1:]
|
||||
shargs, cmdargs = split_args(args)
|
||||
options, shargs = SH_OPT.parse_args(shargs)
|
||||
|
||||
if options.profile:
|
||||
import lsprof
|
||||
p = lsprof.Profiler()
|
||||
p.enable(subcalls=True)
|
||||
try:
|
||||
return _sh(cwd, shargs, cmdargs, options, debugflags, env)
|
||||
finally:
|
||||
p.disable()
|
||||
stats = lsprof.Stats(p.getstats())
|
||||
stats.sort()
|
||||
stats.pprint(top=10, file=sys.stderr, climit=5)
|
||||
else:
|
||||
return _sh(cwd, shargs, cmdargs, options, debugflags, env)
|
||||
|
||||
def main():
|
||||
sys.exit(sh())
|
||||
|
||||
if __name__=='__main__':
|
||||
main()
|
||||
@@ -1,888 +0,0 @@
|
||||
# pyshlex.py - PLY compatible lexer for pysh.
|
||||
#
|
||||
# Copyright 2007 Patrick Mezard
|
||||
#
|
||||
# This software may be used and distributed according to the terms
|
||||
# of the GNU General Public License, incorporated herein by reference.
|
||||
|
||||
# TODO:
|
||||
# - review all "char in 'abc'" snippets: the empty string can be matched
|
||||
# - test line continuations within quoted/expansion strings
|
||||
# - eof is buggy wrt sublexers
|
||||
# - the lexer cannot really work in pull mode as it would be required to run
|
||||
# PLY in pull mode. It was designed to work incrementally and it would not be
|
||||
# that hard to enable pull mode.
|
||||
import re
|
||||
try:
|
||||
s = set()
|
||||
del s
|
||||
except NameError:
|
||||
from Set import Set as set
|
||||
|
||||
from ply import lex
|
||||
from sherrors import *
|
||||
|
||||
class NeedMore(Exception):
|
||||
pass
|
||||
|
||||
def is_blank(c):
|
||||
return c in (' ', '\t')
|
||||
|
||||
_RE_DIGITS = re.compile(r'^\d+$')
|
||||
|
||||
def are_digits(s):
|
||||
return _RE_DIGITS.search(s) is not None
|
||||
|
||||
_OPERATORS = dict([
|
||||
('&&', 'AND_IF'),
|
||||
('||', 'OR_IF'),
|
||||
(';;', 'DSEMI'),
|
||||
('<<', 'DLESS'),
|
||||
('>>', 'DGREAT'),
|
||||
('<&', 'LESSAND'),
|
||||
('>&', 'GREATAND'),
|
||||
('<>', 'LESSGREAT'),
|
||||
('<<-', 'DLESSDASH'),
|
||||
('>|', 'CLOBBER'),
|
||||
('&', 'AMP'),
|
||||
(';', 'COMMA'),
|
||||
('<', 'LESS'),
|
||||
('>', 'GREATER'),
|
||||
('(', 'LPARENS'),
|
||||
(')', 'RPARENS'),
|
||||
])
|
||||
|
||||
#Make a function to silence pychecker "Local variable shadows global"
|
||||
def make_partial_ops():
|
||||
partials = {}
|
||||
for k in _OPERATORS:
|
||||
for i in range(1, len(k)+1):
|
||||
partials[k[:i]] = None
|
||||
return partials
|
||||
|
||||
_PARTIAL_OPERATORS = make_partial_ops()
|
||||
|
||||
def is_partial_op(s):
|
||||
"""Return True if s matches a non-empty subpart of an operator starting
|
||||
at its first character.
|
||||
"""
|
||||
return s in _PARTIAL_OPERATORS
|
||||
|
||||
def is_op(s):
|
||||
"""If s matches an operator, returns the operator identifier. Return None
|
||||
otherwise.
|
||||
"""
|
||||
return _OPERATORS.get(s)
|
||||
|
||||
_RESERVEDS = dict([
|
||||
('if', 'If'),
|
||||
('then', 'Then'),
|
||||
('else', 'Else'),
|
||||
('elif', 'Elif'),
|
||||
('fi', 'Fi'),
|
||||
('do', 'Do'),
|
||||
('done', 'Done'),
|
||||
('case', 'Case'),
|
||||
('esac', 'Esac'),
|
||||
('while', 'While'),
|
||||
('until', 'Until'),
|
||||
('for', 'For'),
|
||||
('{', 'Lbrace'),
|
||||
('}', 'Rbrace'),
|
||||
('!', 'Bang'),
|
||||
('in', 'In'),
|
||||
('|', 'PIPE'),
|
||||
])
|
||||
|
||||
def get_reserved(s):
|
||||
return _RESERVEDS.get(s)
|
||||
|
||||
_RE_NAME = re.compile(r'^[0-9a-zA-Z_]+$')
|
||||
|
||||
def is_name(s):
|
||||
return _RE_NAME.search(s) is not None
|
||||
|
||||
def find_chars(seq, chars):
|
||||
for i,v in enumerate(seq):
|
||||
if v in chars:
|
||||
return i,v
|
||||
return -1, None
|
||||
|
||||
class WordLexer:
|
||||
"""WordLexer parse quoted or expansion expressions and return an expression
|
||||
tree. The input string can be any well formed sequence beginning with quoting
|
||||
or expansion character. Embedded expressions are handled recursively. The
|
||||
resulting tree is made of lists and strings. Lists represent quoted or
|
||||
expansion expressions. Each list first element is the opening separator,
|
||||
the last one the closing separator. In-between can be any number of strings
|
||||
or lists for sub-expressions. Non quoted/expansion expression can written as
|
||||
strings or as lists with empty strings as starting and ending delimiters.
|
||||
"""
|
||||
|
||||
NAME_CHARSET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
|
||||
NAME_CHARSET = dict(zip(NAME_CHARSET, NAME_CHARSET))
|
||||
|
||||
SPECIAL_CHARSET = '@*#?-$!0'
|
||||
|
||||
#Characters which can be escaped depends on the current delimiters
|
||||
ESCAPABLE = {
|
||||
'`': set(['$', '\\', '`']),
|
||||
'"': set(['$', '\\', '`', '"']),
|
||||
"'": set(),
|
||||
}
|
||||
|
||||
def __init__(self, heredoc = False):
|
||||
# _buffer is the unprocessed input characters buffer
|
||||
self._buffer = []
|
||||
# _stack is empty or contains a quoted list being processed
|
||||
# (this is the DFS path to the quoted expression being evaluated).
|
||||
self._stack = []
|
||||
self._escapable = None
|
||||
# True when parsing unquoted here documents
|
||||
self._heredoc = heredoc
|
||||
|
||||
def add(self, data, eof=False):
|
||||
"""Feed the lexer with more data. If the quoted expression can be
|
||||
delimited, return a tuple (expr, remaining) containing the expression
|
||||
tree and the unconsumed data.
|
||||
Otherwise, raise NeedMore.
|
||||
"""
|
||||
self._buffer += list(data)
|
||||
self._parse(eof)
|
||||
|
||||
result = self._stack[0]
|
||||
remaining = ''.join(self._buffer)
|
||||
self._stack = []
|
||||
self._buffer = []
|
||||
return result, remaining
|
||||
|
||||
def _is_escapable(self, c, delim=None):
|
||||
if delim is None:
|
||||
if self._heredoc:
|
||||
# Backslashes works as if they were double quoted in unquoted
|
||||
# here-documents
|
||||
delim = '"'
|
||||
else:
|
||||
if len(self._stack)<=1:
|
||||
return True
|
||||
delim = self._stack[-2][0]
|
||||
|
||||
escapables = self.ESCAPABLE.get(delim, None)
|
||||
return escapables is None or c in escapables
|
||||
|
||||
def _parse_squote(self, buf, result, eof):
|
||||
if not buf:
|
||||
raise NeedMore()
|
||||
try:
|
||||
pos = buf.index("'")
|
||||
except ValueError:
|
||||
raise NeedMore()
|
||||
result[-1] += ''.join(buf[:pos])
|
||||
result += ["'"]
|
||||
return pos+1, True
|
||||
|
||||
def _parse_bquote(self, buf, result, eof):
|
||||
if not buf:
|
||||
raise NeedMore()
|
||||
|
||||
if buf[0]=='\n':
|
||||
#Remove line continuations
|
||||
result[:] = ['', '', '']
|
||||
elif self._is_escapable(buf[0]):
|
||||
result[-1] += buf[0]
|
||||
result += ['']
|
||||
else:
|
||||
#Keep as such
|
||||
result[:] = ['', '\\'+buf[0], '']
|
||||
|
||||
return 1, True
|
||||
|
||||
def _parse_dquote(self, buf, result, eof):
|
||||
if not buf:
|
||||
raise NeedMore()
|
||||
pos, sep = find_chars(buf, '$\\`"')
|
||||
if pos==-1:
|
||||
raise NeedMore()
|
||||
|
||||
result[-1] += ''.join(buf[:pos])
|
||||
if sep=='"':
|
||||
result += ['"']
|
||||
return pos+1, True
|
||||
else:
|
||||
#Keep everything until the separator and defer processing
|
||||
return pos, False
|
||||
|
||||
def _parse_command(self, buf, result, eof):
|
||||
if not buf:
|
||||
raise NeedMore()
|
||||
|
||||
chars = '$\\`"\''
|
||||
if result[0] == '$(':
|
||||
chars += ')'
|
||||
pos, sep = find_chars(buf, chars)
|
||||
if pos == -1:
|
||||
raise NeedMore()
|
||||
|
||||
result[-1] += ''.join(buf[:pos])
|
||||
if (result[0]=='$(' and sep==')') or (result[0]=='`' and sep=='`'):
|
||||
result += [sep]
|
||||
return pos+1, True
|
||||
else:
|
||||
return pos, False
|
||||
|
||||
def _parse_parameter(self, buf, result, eof):
|
||||
if not buf:
|
||||
raise NeedMore()
|
||||
|
||||
pos, sep = find_chars(buf, '$\\`"\'}')
|
||||
if pos==-1:
|
||||
raise NeedMore()
|
||||
|
||||
result[-1] += ''.join(buf[:pos])
|
||||
if sep=='}':
|
||||
result += [sep]
|
||||
return pos+1, True
|
||||
else:
|
||||
return pos, False
|
||||
|
||||
def _parse_dollar(self, buf, result, eof):
|
||||
sep = result[0]
|
||||
if sep=='$':
|
||||
if not buf:
|
||||
#TODO: handle empty $
|
||||
raise NeedMore()
|
||||
if buf[0]=='(':
|
||||
if len(buf)==1:
|
||||
raise NeedMore()
|
||||
|
||||
if buf[1]=='(':
|
||||
result[0] = '$(('
|
||||
buf[:2] = []
|
||||
else:
|
||||
result[0] = '$('
|
||||
buf[:1] = []
|
||||
|
||||
elif buf[0]=='{':
|
||||
result[0] = '${'
|
||||
buf[:1] = []
|
||||
else:
|
||||
if buf[0] in self.SPECIAL_CHARSET:
|
||||
result[-1] = buf[0]
|
||||
read = 1
|
||||
else:
|
||||
for read,c in enumerate(buf):
|
||||
if c not in self.NAME_CHARSET:
|
||||
break
|
||||
else:
|
||||
if not eof:
|
||||
raise NeedMore()
|
||||
read += 1
|
||||
|
||||
result[-1] += ''.join(buf[0:read])
|
||||
|
||||
if not result[-1]:
|
||||
result[:] = ['', result[0], '']
|
||||
else:
|
||||
result += ['']
|
||||
return read,True
|
||||
|
||||
sep = result[0]
|
||||
if sep=='$(':
|
||||
parsefunc = self._parse_command
|
||||
elif sep=='${':
|
||||
parsefunc = self._parse_parameter
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
pos, closed = parsefunc(buf, result, eof)
|
||||
return pos, closed
|
||||
|
||||
def _parse(self, eof):
|
||||
buf = self._buffer
|
||||
stack = self._stack
|
||||
recurse = False
|
||||
|
||||
while 1:
|
||||
if not stack or recurse:
|
||||
if not buf:
|
||||
raise NeedMore()
|
||||
if buf[0] not in ('"\\`$\''):
|
||||
raise ShellSyntaxError('Invalid quoted string sequence')
|
||||
stack.append([buf[0], ''])
|
||||
buf[:1] = []
|
||||
recurse = False
|
||||
|
||||
result = stack[-1]
|
||||
if result[0]=="'":
|
||||
parsefunc = self._parse_squote
|
||||
elif result[0]=='\\':
|
||||
parsefunc = self._parse_bquote
|
||||
elif result[0]=='"':
|
||||
parsefunc = self._parse_dquote
|
||||
elif result[0]=='`':
|
||||
parsefunc = self._parse_command
|
||||
elif result[0][0]=='$':
|
||||
parsefunc = self._parse_dollar
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
read, closed = parsefunc(buf, result, eof)
|
||||
|
||||
buf[:read] = []
|
||||
if closed:
|
||||
if len(stack)>1:
|
||||
#Merge in parent expression
|
||||
parsed = stack.pop()
|
||||
stack[-1] += [parsed]
|
||||
stack[-1] += ['']
|
||||
else:
|
||||
break
|
||||
else:
|
||||
recurse = True
|
||||
|
||||
def normalize_wordtree(wtree):
|
||||
"""Fold back every literal sequence (delimited with empty strings) into
|
||||
parent sequence.
|
||||
"""
|
||||
def normalize(wtree):
|
||||
result = []
|
||||
for part in wtree[1:-1]:
|
||||
if isinstance(part, list):
|
||||
part = normalize(part)
|
||||
if part[0]=='':
|
||||
#Move the part content back at current level
|
||||
result += part[1:-1]
|
||||
continue
|
||||
elif not part:
|
||||
#Remove empty strings
|
||||
continue
|
||||
result.append(part)
|
||||
if not result:
|
||||
result = ['']
|
||||
return [wtree[0]] + result + [wtree[-1]]
|
||||
|
||||
return normalize(wtree)
|
||||
|
||||
|
||||
def make_wordtree(token, here_document=False):
|
||||
"""Parse a delimited token and return a tree similar to the ones returned by
|
||||
WordLexer. token may contain any combinations of expansion/quoted fields and
|
||||
non-ones.
|
||||
"""
|
||||
tree = ['']
|
||||
remaining = token
|
||||
delimiters = '\\$`'
|
||||
if not here_document:
|
||||
delimiters += '\'"'
|
||||
|
||||
while 1:
|
||||
pos, sep = find_chars(remaining, delimiters)
|
||||
if pos==-1:
|
||||
tree += [remaining, '']
|
||||
return normalize_wordtree(tree)
|
||||
tree.append(remaining[:pos])
|
||||
remaining = remaining[pos:]
|
||||
|
||||
try:
|
||||
result, remaining = WordLexer(heredoc = here_document).add(remaining, True)
|
||||
except NeedMore:
|
||||
raise ShellSyntaxError('Invalid token "%s"')
|
||||
tree.append(result)
|
||||
|
||||
|
||||
def wordtree_as_string(wtree):
|
||||
"""Rewrite an expression tree generated by make_wordtree as string."""
|
||||
def visit(node, output):
|
||||
for child in node:
|
||||
if isinstance(child, list):
|
||||
visit(child, output)
|
||||
else:
|
||||
output.append(child)
|
||||
|
||||
output = []
|
||||
visit(wtree, output)
|
||||
return ''.join(output)
|
||||
|
||||
|
||||
def unquote_wordtree(wtree):
|
||||
"""Fold the word tree while removing quotes everywhere. Other expansion
|
||||
sequences are joined as such.
|
||||
"""
|
||||
def unquote(wtree):
|
||||
unquoted = []
|
||||
if wtree[0] in ('', "'", '"', '\\'):
|
||||
wtree = wtree[1:-1]
|
||||
|
||||
for part in wtree:
|
||||
if isinstance(part, list):
|
||||
part = unquote(part)
|
||||
unquoted.append(part)
|
||||
return ''.join(unquoted)
|
||||
|
||||
return unquote(wtree)
|
||||
|
||||
|
||||
class HereDocLexer:
|
||||
"""HereDocLexer delimits whatever comes from the here-document starting newline
|
||||
not included to the closing delimiter line included.
|
||||
"""
|
||||
def __init__(self, op, delim):
|
||||
assert op in ('<<', '<<-')
|
||||
if not delim:
|
||||
raise ShellSyntaxError('invalid here document delimiter %s' % str(delim))
|
||||
|
||||
self._op = op
|
||||
self._delim = delim
|
||||
self._buffer = []
|
||||
self._token = []
|
||||
|
||||
def add(self, data, eof):
|
||||
"""If the here-document was delimited, return a tuple (content, remaining).
|
||||
Raise NeedMore() otherwise.
|
||||
"""
|
||||
self._buffer += list(data)
|
||||
self._parse(eof)
|
||||
token = ''.join(self._token)
|
||||
remaining = ''.join(self._buffer)
|
||||
self._token, self._remaining = [], []
|
||||
return token, remaining
|
||||
|
||||
def _parse(self, eof):
|
||||
while 1:
|
||||
#Look for first unescaped newline. Quotes may be ignored
|
||||
escaped = False
|
||||
for i,c in enumerate(self._buffer):
|
||||
if escaped:
|
||||
escaped = False
|
||||
elif c=='\\':
|
||||
escaped = True
|
||||
elif c=='\n':
|
||||
break
|
||||
else:
|
||||
i = -1
|
||||
|
||||
if i==-1 or self._buffer[i]!='\n':
|
||||
if not eof:
|
||||
raise NeedMore()
|
||||
#No more data, maybe the last line is closing delimiter
|
||||
line = ''.join(self._buffer)
|
||||
eol = ''
|
||||
self._buffer[:] = []
|
||||
else:
|
||||
line = ''.join(self._buffer[:i])
|
||||
eol = self._buffer[i]
|
||||
self._buffer[:i+1] = []
|
||||
|
||||
if self._op=='<<-':
|
||||
line = line.lstrip('\t')
|
||||
|
||||
if line==self._delim:
|
||||
break
|
||||
|
||||
self._token += [line, eol]
|
||||
if i==-1:
|
||||
break
|
||||
|
||||
class Token:
|
||||
#TODO: check this is still in use
|
||||
OPERATOR = 'OPERATOR'
|
||||
WORD = 'WORD'
|
||||
|
||||
def __init__(self):
|
||||
self.value = ''
|
||||
self.type = None
|
||||
|
||||
def __getitem__(self, key):
|
||||
#Behave like a two elements tuple
|
||||
if key==0:
|
||||
return self.type
|
||||
if key==1:
|
||||
return self.value
|
||||
raise IndexError(key)
|
||||
|
||||
|
||||
class HereDoc:
|
||||
def __init__(self, op, name=None):
|
||||
self.op = op
|
||||
self.name = name
|
||||
self.pendings = []
|
||||
|
||||
TK_COMMA = 'COMMA'
|
||||
TK_AMPERSAND = 'AMP'
|
||||
TK_OP = 'OP'
|
||||
TK_TOKEN = 'TOKEN'
|
||||
TK_COMMENT = 'COMMENT'
|
||||
TK_NEWLINE = 'NEWLINE'
|
||||
TK_IONUMBER = 'IO_NUMBER'
|
||||
TK_ASSIGNMENT = 'ASSIGNMENT_WORD'
|
||||
TK_HERENAME = 'HERENAME'
|
||||
|
||||
class Lexer:
|
||||
"""Main lexer.
|
||||
|
||||
Call add() until the script AST is returned.
|
||||
"""
|
||||
# Here-document handling makes the whole thing more complex because they basically
|
||||
# force tokens to be reordered: here-content must come right after the operator
|
||||
# and the here-document name, while some other tokens might be following the
|
||||
# here-document expression on the same line.
|
||||
#
|
||||
# So, here-doc states are basically:
|
||||
# *self._state==ST_NORMAL
|
||||
# - self._heredoc.op is None: no here-document
|
||||
# - self._heredoc.op is not None but name is: here-document operator matched,
|
||||
# waiting for the document name/delimiter
|
||||
# - self._heredoc.op and name are not None: here-document is ready, following
|
||||
# tokens are being stored and will be pushed again when the document is
|
||||
# completely parsed.
|
||||
# *self._state==ST_HEREDOC
|
||||
# - The here-document is being delimited by self._herelexer. Once it is done
|
||||
# the content is pushed in front of the pending token list then all these
|
||||
# tokens are pushed once again.
|
||||
ST_NORMAL = 'ST_NORMAL'
|
||||
ST_OP = 'ST_OP'
|
||||
ST_BACKSLASH = 'ST_BACKSLASH'
|
||||
ST_QUOTED = 'ST_QUOTED'
|
||||
ST_COMMENT = 'ST_COMMENT'
|
||||
ST_HEREDOC = 'ST_HEREDOC'
|
||||
|
||||
#Match end of backquote strings
|
||||
RE_BACKQUOTE_END = re.compile(r'(?<!\\)(`)')
|
||||
|
||||
def __init__(self, parent_state = None):
|
||||
self._input = []
|
||||
self._pos = 0
|
||||
|
||||
self._token = ''
|
||||
self._type = TK_TOKEN
|
||||
|
||||
self._state = self.ST_NORMAL
|
||||
self._parent_state = parent_state
|
||||
self._wordlexer = None
|
||||
|
||||
self._heredoc = HereDoc(None)
|
||||
self._herelexer = None
|
||||
|
||||
### Following attributes are not used for delimiting token and can safely
|
||||
### be changed after here-document detection (see _push_toke)
|
||||
|
||||
# Count the number of tokens following a 'For' reserved word. Needed to
|
||||
# return an 'In' reserved word if it comes in third place.
|
||||
self._for_count = None
|
||||
|
||||
def add(self, data, eof=False):
|
||||
"""Feed the lexer with data.
|
||||
|
||||
When eof is set to True, returns unconsumed data or raise if the lexer
|
||||
is in the middle of a delimiting operation.
|
||||
Raise NeedMore otherwise.
|
||||
"""
|
||||
self._input += list(data)
|
||||
self._parse(eof)
|
||||
self._input[:self._pos] = []
|
||||
return ''.join(self._input)
|
||||
|
||||
def _parse(self, eof):
|
||||
while self._state:
|
||||
if self._pos>=len(self._input):
|
||||
if not eof:
|
||||
raise NeedMore()
|
||||
elif self._state not in (self.ST_OP, self.ST_QUOTED, self.ST_HEREDOC):
|
||||
#Delimit the current token and leave cleanly
|
||||
self._push_token('')
|
||||
break
|
||||
else:
|
||||
#Let the sublexer handle the eof themselves
|
||||
pass
|
||||
|
||||
if self._state==self.ST_NORMAL:
|
||||
self._parse_normal()
|
||||
elif self._state==self.ST_COMMENT:
|
||||
self._parse_comment()
|
||||
elif self._state==self.ST_OP:
|
||||
self._parse_op(eof)
|
||||
elif self._state==self.ST_QUOTED:
|
||||
self._parse_quoted(eof)
|
||||
elif self._state==self.ST_HEREDOC:
|
||||
self._parse_heredoc(eof)
|
||||
else:
|
||||
assert False, "Unknown state " + str(self._state)
|
||||
|
||||
if self._heredoc.op is not None:
|
||||
raise ShellSyntaxError('missing here-document delimiter')
|
||||
|
||||
def _parse_normal(self):
|
||||
c = self._input[self._pos]
|
||||
if c=='\n':
|
||||
self._push_token(c)
|
||||
self._token = c
|
||||
self._type = TK_NEWLINE
|
||||
self._push_token('')
|
||||
self._pos += 1
|
||||
elif c in ('\\', '\'', '"', '`', '$'):
|
||||
self._state = self.ST_QUOTED
|
||||
elif is_partial_op(c):
|
||||
self._push_token(c)
|
||||
|
||||
self._type = TK_OP
|
||||
self._token += c
|
||||
self._pos += 1
|
||||
self._state = self.ST_OP
|
||||
elif is_blank(c):
|
||||
self._push_token(c)
|
||||
|
||||
#Discard blanks
|
||||
self._pos += 1
|
||||
elif self._token:
|
||||
self._token += c
|
||||
self._pos += 1
|
||||
elif c=='#':
|
||||
self._state = self.ST_COMMENT
|
||||
self._type = TK_COMMENT
|
||||
self._pos += 1
|
||||
else:
|
||||
self._pos += 1
|
||||
self._token += c
|
||||
|
||||
def _parse_op(self, eof):
|
||||
assert self._token
|
||||
|
||||
while 1:
|
||||
if self._pos>=len(self._input):
|
||||
if not eof:
|
||||
raise NeedMore()
|
||||
c = ''
|
||||
else:
|
||||
c = self._input[self._pos]
|
||||
|
||||
op = self._token + c
|
||||
if c and is_partial_op(op):
|
||||
#Still parsing an operator
|
||||
self._token = op
|
||||
self._pos += 1
|
||||
else:
|
||||
#End of operator
|
||||
self._push_token(c)
|
||||
self._state = self.ST_NORMAL
|
||||
break
|
||||
|
||||
def _parse_comment(self):
|
||||
while 1:
|
||||
if self._pos>=len(self._input):
|
||||
raise NeedMore()
|
||||
|
||||
c = self._input[self._pos]
|
||||
if c=='\n':
|
||||
#End of comment, do not consume the end of line
|
||||
self._state = self.ST_NORMAL
|
||||
break
|
||||
else:
|
||||
self._token += c
|
||||
self._pos += 1
|
||||
|
||||
def _parse_quoted(self, eof):
|
||||
"""Precondition: the starting backquote/dollar is still in the input queue."""
|
||||
if not self._wordlexer:
|
||||
self._wordlexer = WordLexer()
|
||||
|
||||
if self._pos<len(self._input):
|
||||
#Transfer input queue character into the subparser
|
||||
input = self._input[self._pos:]
|
||||
self._pos += len(input)
|
||||
|
||||
wtree, remaining = self._wordlexer.add(input, eof)
|
||||
self._wordlexer = None
|
||||
self._token += wordtree_as_string(wtree)
|
||||
|
||||
#Put unparsed character back in the input queue
|
||||
if remaining:
|
||||
self._input[self._pos:self._pos] = list(remaining)
|
||||
self._state = self.ST_NORMAL
|
||||
|
||||
def _parse_heredoc(self, eof):
|
||||
assert not self._token
|
||||
|
||||
if self._herelexer is None:
|
||||
self._herelexer = HereDocLexer(self._heredoc.op, self._heredoc.name)
|
||||
|
||||
if self._pos<len(self._input):
|
||||
#Transfer input queue character into the subparser
|
||||
input = self._input[self._pos:]
|
||||
self._pos += len(input)
|
||||
|
||||
self._token, remaining = self._herelexer.add(input, eof)
|
||||
|
||||
#Reset here-document state
|
||||
self._herelexer = None
|
||||
heredoc, self._heredoc = self._heredoc, HereDoc(None)
|
||||
if remaining:
|
||||
self._input[self._pos:self._pos] = list(remaining)
|
||||
self._state = self.ST_NORMAL
|
||||
|
||||
#Push pending tokens
|
||||
heredoc.pendings[:0] = [(self._token, self._type, heredoc.name)]
|
||||
for token, type, delim in heredoc.pendings:
|
||||
self._token = token
|
||||
self._type = type
|
||||
self._push_token(delim)
|
||||
|
||||
def _push_token(self, delim):
|
||||
if not self._token:
|
||||
return 0
|
||||
|
||||
if self._heredoc.op is not None:
|
||||
if self._heredoc.name is None:
|
||||
#Here-document name
|
||||
if self._type!=TK_TOKEN:
|
||||
raise ShellSyntaxError("expecting here-document name, got '%s'" % self._token)
|
||||
self._heredoc.name = unquote_wordtree(make_wordtree(self._token))
|
||||
self._type = TK_HERENAME
|
||||
else:
|
||||
#Capture all tokens until the newline starting the here-document
|
||||
if self._type==TK_NEWLINE:
|
||||
assert self._state==self.ST_NORMAL
|
||||
self._state = self.ST_HEREDOC
|
||||
|
||||
self._heredoc.pendings.append((self._token, self._type, delim))
|
||||
self._token = ''
|
||||
self._type = TK_TOKEN
|
||||
return 1
|
||||
|
||||
# BEWARE: do not change parser state from here to the end of the function:
|
||||
# when parsing between an here-document operator to the end of the line
|
||||
# tokens are stored in self._heredoc.pendings. Therefore, they will not
|
||||
# reach the section below.
|
||||
|
||||
#Check operators
|
||||
if self._type==TK_OP:
|
||||
#False positive because of partial op matching
|
||||
op = is_op(self._token)
|
||||
if not op:
|
||||
self._type = TK_TOKEN
|
||||
else:
|
||||
#Map to the specific operator
|
||||
self._type = op
|
||||
if self._token in ('<<', '<<-'):
|
||||
#Done here rather than in _parse_op because there is no need
|
||||
#to change the parser state since we are still waiting for
|
||||
#the here-document name
|
||||
if self._heredoc.op is not None:
|
||||
raise ShellSyntaxError("syntax error near token '%s'" % self._token)
|
||||
assert self._heredoc.op is None
|
||||
self._heredoc.op = self._token
|
||||
|
||||
if self._type==TK_TOKEN:
|
||||
if '=' in self._token and not delim:
|
||||
if self._token.startswith('='):
|
||||
#Token is a WORD... a TOKEN that is.
|
||||
pass
|
||||
else:
|
||||
prev = self._token[:self._token.find('=')]
|
||||
if is_name(prev):
|
||||
self._type = TK_ASSIGNMENT
|
||||
else:
|
||||
#Just a token (unspecified)
|
||||
pass
|
||||
else:
|
||||
reserved = get_reserved(self._token)
|
||||
if reserved is not None:
|
||||
if reserved=='In' and self._for_count!=2:
|
||||
#Sorry, not a reserved word after all
|
||||
pass
|
||||
else:
|
||||
self._type = reserved
|
||||
if reserved in ('For', 'Case'):
|
||||
self._for_count = 0
|
||||
elif are_digits(self._token) and delim in ('<', '>'):
|
||||
#Detect IO_NUMBER
|
||||
self._type = TK_IONUMBER
|
||||
elif self._token==';':
|
||||
self._type = TK_COMMA
|
||||
elif self._token=='&':
|
||||
self._type = TK_AMPERSAND
|
||||
elif self._type==TK_COMMENT:
|
||||
#Comments are not part of sh grammar, ignore them
|
||||
self._token = ''
|
||||
self._type = TK_TOKEN
|
||||
return 0
|
||||
|
||||
if self._for_count is not None:
|
||||
#Track token count in 'For' expression to detect 'In' reserved words.
|
||||
#Can only be in third position, no need to go beyond
|
||||
self._for_count += 1
|
||||
if self._for_count==3:
|
||||
self._for_count = None
|
||||
|
||||
self.on_token((self._token, self._type))
|
||||
self._token = ''
|
||||
self._type = TK_TOKEN
|
||||
return 1
|
||||
|
||||
def on_token(self, token):
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
tokens = [
|
||||
TK_TOKEN,
|
||||
# To silence yacc unused token warnings
|
||||
# TK_COMMENT,
|
||||
TK_NEWLINE,
|
||||
TK_IONUMBER,
|
||||
TK_ASSIGNMENT,
|
||||
TK_HERENAME,
|
||||
]
|
||||
|
||||
#Add specific operators
|
||||
tokens += _OPERATORS.values()
|
||||
#Add reserved words
|
||||
tokens += _RESERVEDS.values()
|
||||
|
||||
class PLYLexer(Lexer):
|
||||
"""Bridge Lexer and PLY lexer interface."""
|
||||
def __init__(self):
|
||||
Lexer.__init__(self)
|
||||
self._tokens = []
|
||||
self._current = 0
|
||||
self.lineno = 0
|
||||
|
||||
def on_token(self, token):
|
||||
value, type = token
|
||||
|
||||
self.lineno = 0
|
||||
t = lex.LexToken()
|
||||
t.value = value
|
||||
t.type = type
|
||||
t.lexer = self
|
||||
t.lexpos = 0
|
||||
t.lineno = 0
|
||||
|
||||
self._tokens.append(t)
|
||||
|
||||
def is_empty(self):
|
||||
return not bool(self._tokens)
|
||||
|
||||
#PLY compliant interface
|
||||
def token(self):
|
||||
if self._current>=len(self._tokens):
|
||||
return None
|
||||
t = self._tokens[self._current]
|
||||
self._current += 1
|
||||
return t
|
||||
|
||||
|
||||
def get_tokens(s):
|
||||
"""Parse the input string and return a tuple (tokens, unprocessed) where
|
||||
tokens is a list of parsed tokens and unprocessed is the part of the input
|
||||
string left untouched by the lexer.
|
||||
"""
|
||||
lexer = PLYLexer()
|
||||
untouched = lexer.add(s, True)
|
||||
tokens = []
|
||||
while 1:
|
||||
token = lexer.token()
|
||||
if token is None:
|
||||
break
|
||||
tokens.append(token)
|
||||
|
||||
tokens = [(t.value, t.type) for t in tokens]
|
||||
return tokens, untouched
|
||||
@@ -1,779 +0,0 @@
|
||||
# pyshyacc.py - PLY grammar definition for pysh
|
||||
#
|
||||
# Copyright 2007 Patrick Mezard
|
||||
#
|
||||
# This software may be used and distributed according to the terms
|
||||
# of the GNU General Public License, incorporated herein by reference.
|
||||
|
||||
"""PLY grammar file.
|
||||
"""
|
||||
import os.path
|
||||
import sys
|
||||
|
||||
import pyshlex
|
||||
tokens = pyshlex.tokens
|
||||
|
||||
from ply import yacc
|
||||
import sherrors
|
||||
|
||||
class IORedirect:
|
||||
def __init__(self, op, filename, io_number=None):
|
||||
self.op = op
|
||||
self.filename = filename
|
||||
self.io_number = io_number
|
||||
|
||||
class HereDocument:
|
||||
def __init__(self, op, name, content, io_number=None):
|
||||
self.op = op
|
||||
self.name = name
|
||||
self.content = content
|
||||
self.io_number = io_number
|
||||
|
||||
def make_io_redirect(p):
|
||||
"""Make an IORedirect instance from the input 'io_redirect' production."""
|
||||
name, io_number, io_target = p
|
||||
assert name=='io_redirect'
|
||||
|
||||
if io_target[0]=='io_file':
|
||||
io_type, io_op, io_file = io_target
|
||||
return IORedirect(io_op, io_file, io_number)
|
||||
elif io_target[0]=='io_here':
|
||||
io_type, io_op, io_name, io_content = io_target
|
||||
return HereDocument(io_op, io_name, io_content, io_number)
|
||||
else:
|
||||
assert False, "Invalid IO redirection token %s" % repr(io_type)
|
||||
|
||||
class SimpleCommand:
|
||||
"""
|
||||
assigns contains (name, value) pairs.
|
||||
"""
|
||||
def __init__(self, words, redirs, assigns):
|
||||
self.words = list(words)
|
||||
self.redirs = list(redirs)
|
||||
self.assigns = list(assigns)
|
||||
|
||||
class Pipeline:
|
||||
def __init__(self, commands, reverse_status=False):
|
||||
self.commands = list(commands)
|
||||
assert self.commands #Grammar forbids this
|
||||
self.reverse_status = reverse_status
|
||||
|
||||
class AndOr:
|
||||
def __init__(self, op, left, right):
|
||||
self.op = str(op)
|
||||
self.left = left
|
||||
self.right = right
|
||||
|
||||
class ForLoop:
|
||||
def __init__(self, name, items, cmds):
|
||||
self.name = str(name)
|
||||
self.items = list(items)
|
||||
self.cmds = list(cmds)
|
||||
|
||||
class WhileLoop:
|
||||
def __init__(self, condition, cmds):
|
||||
self.condition = list(condition)
|
||||
self.cmds = list(cmds)
|
||||
|
||||
class UntilLoop:
|
||||
def __init__(self, condition, cmds):
|
||||
self.condition = list(condition)
|
||||
self.cmds = list(cmds)
|
||||
|
||||
class FunDef:
|
||||
def __init__(self, name, body):
|
||||
self.name = str(name)
|
||||
self.body = body
|
||||
|
||||
class BraceGroup:
|
||||
def __init__(self, cmds):
|
||||
self.cmds = list(cmds)
|
||||
|
||||
class IfCond:
|
||||
def __init__(self, cond, if_cmds, else_cmds):
|
||||
self.cond = list(cond)
|
||||
self.if_cmds = if_cmds
|
||||
self.else_cmds = else_cmds
|
||||
|
||||
class Case:
|
||||
def __init__(self, name, items):
|
||||
self.name = name
|
||||
self.items = items
|
||||
|
||||
class SubShell:
|
||||
def __init__(self, cmds):
|
||||
self.cmds = cmds
|
||||
|
||||
class RedirectList:
|
||||
def __init__(self, cmd, redirs):
|
||||
self.cmd = cmd
|
||||
self.redirs = list(redirs)
|
||||
|
||||
def get_production(productions, ptype):
|
||||
"""productions must be a list of production tuples like (name, obj) where
|
||||
name is the production string identifier.
|
||||
Return the first production named 'ptype'. Raise KeyError if None can be
|
||||
found.
|
||||
"""
|
||||
for production in productions:
|
||||
if production is not None and production[0]==ptype:
|
||||
return production
|
||||
raise KeyError(ptype)
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# PLY grammar definition
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
def p_multiple_commands(p):
|
||||
"""multiple_commands : newline_sequence
|
||||
| complete_command
|
||||
| multiple_commands complete_command"""
|
||||
if len(p)==2:
|
||||
if p[1] is not None:
|
||||
p[0] = [p[1]]
|
||||
else:
|
||||
p[0] = []
|
||||
else:
|
||||
p[0] = p[1] + [p[2]]
|
||||
|
||||
def p_complete_command(p):
|
||||
"""complete_command : list separator
|
||||
| list"""
|
||||
if len(p)==3 and p[2] and p[2][1] == '&':
|
||||
p[0] = ('async', p[1])
|
||||
else:
|
||||
p[0] = p[1]
|
||||
|
||||
def p_list(p):
|
||||
"""list : list separator_op and_or
|
||||
| and_or"""
|
||||
if len(p)==2:
|
||||
p[0] = [p[1]]
|
||||
else:
|
||||
#if p[2]!=';':
|
||||
# raise NotImplementedError('AND-OR list asynchronous execution is not implemented')
|
||||
p[0] = p[1] + [p[3]]
|
||||
|
||||
def p_and_or(p):
|
||||
"""and_or : pipeline
|
||||
| and_or AND_IF linebreak pipeline
|
||||
| and_or OR_IF linebreak pipeline"""
|
||||
if len(p)==2:
|
||||
p[0] = p[1]
|
||||
else:
|
||||
p[0] = ('and_or', AndOr(p[2], p[1], p[4]))
|
||||
|
||||
def p_maybe_bang_word(p):
|
||||
"""maybe_bang_word : Bang"""
|
||||
p[0] = ('maybe_bang_word', p[1])
|
||||
|
||||
def p_pipeline(p):
|
||||
"""pipeline : pipe_sequence
|
||||
| bang_word pipe_sequence"""
|
||||
if len(p)==3:
|
||||
p[0] = ('pipeline', Pipeline(p[2][1:], True))
|
||||
else:
|
||||
p[0] = ('pipeline', Pipeline(p[1][1:]))
|
||||
|
||||
def p_pipe_sequence(p):
|
||||
"""pipe_sequence : command
|
||||
| pipe_sequence PIPE linebreak command"""
|
||||
if len(p)==2:
|
||||
p[0] = ['pipe_sequence', p[1]]
|
||||
else:
|
||||
p[0] = p[1] + [p[4]]
|
||||
|
||||
def p_command(p):
|
||||
"""command : simple_command
|
||||
| compound_command
|
||||
| compound_command redirect_list
|
||||
| function_definition"""
|
||||
|
||||
if p[1][0] in ( 'simple_command',
|
||||
'for_clause',
|
||||
'while_clause',
|
||||
'until_clause',
|
||||
'case_clause',
|
||||
'if_clause',
|
||||
'function_definition',
|
||||
'subshell',
|
||||
'brace_group',):
|
||||
if len(p) == 2:
|
||||
p[0] = p[1]
|
||||
else:
|
||||
p[0] = ('redirect_list', RedirectList(p[1], p[2][1:]))
|
||||
else:
|
||||
raise NotImplementedError('%s command is not implemented' % repr(p[1][0]))
|
||||
|
||||
def p_compound_command(p):
|
||||
"""compound_command : brace_group
|
||||
| subshell
|
||||
| for_clause
|
||||
| case_clause
|
||||
| if_clause
|
||||
| while_clause
|
||||
| until_clause"""
|
||||
p[0] = p[1]
|
||||
|
||||
def p_subshell(p):
|
||||
"""subshell : LPARENS compound_list RPARENS"""
|
||||
p[0] = ('subshell', SubShell(p[2][1:]))
|
||||
|
||||
def p_compound_list(p):
|
||||
"""compound_list : term
|
||||
| newline_list term
|
||||
| term separator
|
||||
| newline_list term separator"""
|
||||
productions = p[1:]
|
||||
try:
|
||||
sep = get_production(productions, 'separator')
|
||||
if sep[1]!=';':
|
||||
raise NotImplementedError()
|
||||
except KeyError:
|
||||
pass
|
||||
term = get_production(productions, 'term')
|
||||
p[0] = ['compound_list'] + term[1:]
|
||||
|
||||
def p_term(p):
|
||||
"""term : term separator and_or
|
||||
| and_or"""
|
||||
if len(p)==2:
|
||||
p[0] = ['term', p[1]]
|
||||
else:
|
||||
if p[2] is not None and p[2][1] == '&':
|
||||
p[0] = ['term', ('async', p[1][1:])] + [p[3]]
|
||||
else:
|
||||
p[0] = p[1] + [p[3]]
|
||||
|
||||
def p_maybe_for_word(p):
|
||||
# Rearrange 'For' priority wrt TOKEN. See p_for_word
|
||||
"""maybe_for_word : For"""
|
||||
p[0] = ('maybe_for_word', p[1])
|
||||
|
||||
def p_for_clause(p):
|
||||
"""for_clause : for_word name linebreak do_group
|
||||
| for_word name linebreak in sequential_sep do_group
|
||||
| for_word name linebreak in wordlist sequential_sep do_group"""
|
||||
productions = p[1:]
|
||||
do_group = get_production(productions, 'do_group')
|
||||
try:
|
||||
items = get_production(productions, 'in')[1:]
|
||||
except KeyError:
|
||||
raise NotImplementedError('"in" omission is not implemented')
|
||||
|
||||
try:
|
||||
items = get_production(productions, 'wordlist')[1:]
|
||||
except KeyError:
|
||||
items = []
|
||||
|
||||
name = p[2]
|
||||
p[0] = ('for_clause', ForLoop(name, items, do_group[1:]))
|
||||
|
||||
def p_name(p):
|
||||
"""name : token""" #Was NAME instead of token
|
||||
p[0] = p[1]
|
||||
|
||||
def p_in(p):
|
||||
"""in : In"""
|
||||
p[0] = ('in', p[1])
|
||||
|
||||
def p_wordlist(p):
|
||||
"""wordlist : wordlist token
|
||||
| token"""
|
||||
if len(p)==2:
|
||||
p[0] = ['wordlist', ('TOKEN', p[1])]
|
||||
else:
|
||||
p[0] = p[1] + [('TOKEN', p[2])]
|
||||
|
||||
def p_case_clause(p):
|
||||
"""case_clause : Case token linebreak in linebreak case_list Esac
|
||||
| Case token linebreak in linebreak case_list_ns Esac
|
||||
| Case token linebreak in linebreak Esac"""
|
||||
if len(p) < 8:
|
||||
items = []
|
||||
else:
|
||||
items = p[6][1:]
|
||||
name = p[2]
|
||||
p[0] = ('case_clause', Case(name, [c[1] for c in items]))
|
||||
|
||||
def p_case_list_ns(p):
|
||||
"""case_list_ns : case_list case_item_ns
|
||||
| case_item_ns"""
|
||||
p_case_list(p)
|
||||
|
||||
def p_case_list(p):
|
||||
"""case_list : case_list case_item
|
||||
| case_item"""
|
||||
if len(p)==2:
|
||||
p[0] = ['case_list', p[1]]
|
||||
else:
|
||||
p[0] = p[1] + [p[2]]
|
||||
|
||||
def p_case_item_ns(p):
|
||||
"""case_item_ns : pattern RPARENS linebreak
|
||||
| pattern RPARENS compound_list linebreak
|
||||
| LPARENS pattern RPARENS linebreak
|
||||
| LPARENS pattern RPARENS compound_list linebreak"""
|
||||
p_case_item(p)
|
||||
|
||||
def p_case_item(p):
|
||||
"""case_item : pattern RPARENS linebreak DSEMI linebreak
|
||||
| pattern RPARENS compound_list DSEMI linebreak
|
||||
| LPARENS pattern RPARENS linebreak DSEMI linebreak
|
||||
| LPARENS pattern RPARENS compound_list DSEMI linebreak"""
|
||||
if len(p) < 7:
|
||||
name = p[1][1:]
|
||||
else:
|
||||
name = p[2][1:]
|
||||
|
||||
try:
|
||||
cmds = get_production(p[1:], "compound_list")[1:]
|
||||
except KeyError:
|
||||
cmds = []
|
||||
|
||||
p[0] = ('case_item', (name, cmds))
|
||||
|
||||
def p_pattern(p):
|
||||
"""pattern : token
|
||||
| pattern PIPE token"""
|
||||
if len(p)==2:
|
||||
p[0] = ['pattern', ('TOKEN', p[1])]
|
||||
else:
|
||||
p[0] = p[1] + [('TOKEN', p[2])]
|
||||
|
||||
def p_maybe_if_word(p):
|
||||
# Rearrange 'If' priority wrt TOKEN. See p_if_word
|
||||
"""maybe_if_word : If"""
|
||||
p[0] = ('maybe_if_word', p[1])
|
||||
|
||||
def p_maybe_then_word(p):
|
||||
# Rearrange 'Then' priority wrt TOKEN. See p_then_word
|
||||
"""maybe_then_word : Then"""
|
||||
p[0] = ('maybe_then_word', p[1])
|
||||
|
||||
def p_if_clause(p):
|
||||
"""if_clause : if_word compound_list then_word compound_list else_part Fi
|
||||
| if_word compound_list then_word compound_list Fi"""
|
||||
else_part = []
|
||||
if len(p)==7:
|
||||
else_part = p[5]
|
||||
p[0] = ('if_clause', IfCond(p[2][1:], p[4][1:], else_part))
|
||||
|
||||
def p_else_part(p):
|
||||
"""else_part : Elif compound_list then_word compound_list else_part
|
||||
| Elif compound_list then_word compound_list
|
||||
| Else compound_list"""
|
||||
if len(p)==3:
|
||||
p[0] = p[2][1:]
|
||||
else:
|
||||
else_part = []
|
||||
if len(p)==6:
|
||||
else_part = p[5]
|
||||
p[0] = ('elif', IfCond(p[2][1:], p[4][1:], else_part))
|
||||
|
||||
def p_while_clause(p):
|
||||
"""while_clause : While compound_list do_group"""
|
||||
p[0] = ('while_clause', WhileLoop(p[2][1:], p[3][1:]))
|
||||
|
||||
def p_maybe_until_word(p):
|
||||
# Rearrange 'Until' priority wrt TOKEN. See p_until_word
|
||||
"""maybe_until_word : Until"""
|
||||
p[0] = ('maybe_until_word', p[1])
|
||||
|
||||
def p_until_clause(p):
|
||||
"""until_clause : until_word compound_list do_group"""
|
||||
p[0] = ('until_clause', UntilLoop(p[2][1:], p[3][1:]))
|
||||
|
||||
def p_function_definition(p):
|
||||
"""function_definition : fname LPARENS RPARENS linebreak function_body"""
|
||||
p[0] = ('function_definition', FunDef(p[1], p[5]))
|
||||
|
||||
def p_function_body(p):
|
||||
"""function_body : compound_command
|
||||
| compound_command redirect_list"""
|
||||
if len(p)!=2:
|
||||
raise NotImplementedError('functions redirections lists are not implemented')
|
||||
p[0] = p[1]
|
||||
|
||||
def p_fname(p):
|
||||
"""fname : TOKEN""" #Was NAME instead of token
|
||||
p[0] = p[1]
|
||||
|
||||
def p_brace_group(p):
|
||||
"""brace_group : Lbrace compound_list Rbrace"""
|
||||
p[0] = ('brace_group', BraceGroup(p[2][1:]))
|
||||
|
||||
def p_maybe_done_word(p):
|
||||
#See p_assignment_word for details.
|
||||
"""maybe_done_word : Done"""
|
||||
p[0] = ('maybe_done_word', p[1])
|
||||
|
||||
def p_maybe_do_word(p):
|
||||
"""maybe_do_word : Do"""
|
||||
p[0] = ('maybe_do_word', p[1])
|
||||
|
||||
def p_do_group(p):
|
||||
"""do_group : do_word compound_list done_word"""
|
||||
#Do group contains a list of AndOr
|
||||
p[0] = ['do_group'] + p[2][1:]
|
||||
|
||||
def p_simple_command(p):
|
||||
"""simple_command : cmd_prefix cmd_word cmd_suffix
|
||||
| cmd_prefix cmd_word
|
||||
| cmd_prefix
|
||||
| cmd_name cmd_suffix
|
||||
| cmd_name"""
|
||||
words, redirs, assigns = [], [], []
|
||||
for e in p[1:]:
|
||||
name = e[0]
|
||||
if name in ('cmd_prefix', 'cmd_suffix'):
|
||||
for sube in e[1:]:
|
||||
subname = sube[0]
|
||||
if subname=='io_redirect':
|
||||
redirs.append(make_io_redirect(sube))
|
||||
elif subname=='ASSIGNMENT_WORD':
|
||||
assigns.append(sube)
|
||||
else:
|
||||
words.append(sube)
|
||||
elif name in ('cmd_word', 'cmd_name'):
|
||||
words.append(e)
|
||||
|
||||
cmd = SimpleCommand(words, redirs, assigns)
|
||||
p[0] = ('simple_command', cmd)
|
||||
|
||||
def p_cmd_name(p):
|
||||
"""cmd_name : TOKEN"""
|
||||
p[0] = ('cmd_name', p[1])
|
||||
|
||||
def p_cmd_word(p):
|
||||
"""cmd_word : token"""
|
||||
p[0] = ('cmd_word', p[1])
|
||||
|
||||
def p_maybe_assignment_word(p):
|
||||
#See p_assignment_word for details.
|
||||
"""maybe_assignment_word : ASSIGNMENT_WORD"""
|
||||
p[0] = ('maybe_assignment_word', p[1])
|
||||
|
||||
def p_cmd_prefix(p):
|
||||
"""cmd_prefix : io_redirect
|
||||
| cmd_prefix io_redirect
|
||||
| assignment_word
|
||||
| cmd_prefix assignment_word"""
|
||||
try:
|
||||
prefix = get_production(p[1:], 'cmd_prefix')
|
||||
except KeyError:
|
||||
prefix = ['cmd_prefix']
|
||||
|
||||
try:
|
||||
value = get_production(p[1:], 'assignment_word')[1]
|
||||
value = ('ASSIGNMENT_WORD', value.split('=', 1))
|
||||
except KeyError:
|
||||
value = get_production(p[1:], 'io_redirect')
|
||||
p[0] = prefix + [value]
|
||||
|
||||
def p_cmd_suffix(p):
|
||||
"""cmd_suffix : io_redirect
|
||||
| cmd_suffix io_redirect
|
||||
| token
|
||||
| cmd_suffix token
|
||||
| maybe_for_word
|
||||
| cmd_suffix maybe_for_word
|
||||
| maybe_done_word
|
||||
| cmd_suffix maybe_done_word
|
||||
| maybe_do_word
|
||||
| cmd_suffix maybe_do_word
|
||||
| maybe_until_word
|
||||
| cmd_suffix maybe_until_word
|
||||
| maybe_assignment_word
|
||||
| cmd_suffix maybe_assignment_word
|
||||
| maybe_if_word
|
||||
| cmd_suffix maybe_if_word
|
||||
| maybe_then_word
|
||||
| cmd_suffix maybe_then_word
|
||||
| maybe_bang_word
|
||||
| cmd_suffix maybe_bang_word"""
|
||||
try:
|
||||
suffix = get_production(p[1:], 'cmd_suffix')
|
||||
token = p[2]
|
||||
except KeyError:
|
||||
suffix = ['cmd_suffix']
|
||||
token = p[1]
|
||||
|
||||
if isinstance(token, tuple):
|
||||
if token[0]=='io_redirect':
|
||||
p[0] = suffix + [token]
|
||||
else:
|
||||
#Convert maybe_* to TOKEN if necessary
|
||||
p[0] = suffix + [('TOKEN', token[1])]
|
||||
else:
|
||||
p[0] = suffix + [('TOKEN', token)]
|
||||
|
||||
def p_redirect_list(p):
|
||||
"""redirect_list : io_redirect
|
||||
| redirect_list io_redirect"""
|
||||
if len(p) == 2:
|
||||
p[0] = ['redirect_list', make_io_redirect(p[1])]
|
||||
else:
|
||||
p[0] = p[1] + [make_io_redirect(p[2])]
|
||||
|
||||
def p_io_redirect(p):
|
||||
"""io_redirect : io_file
|
||||
| IO_NUMBER io_file
|
||||
| io_here
|
||||
| IO_NUMBER io_here"""
|
||||
if len(p)==3:
|
||||
p[0] = ('io_redirect', p[1], p[2])
|
||||
else:
|
||||
p[0] = ('io_redirect', None, p[1])
|
||||
|
||||
def p_io_file(p):
|
||||
#Return the tuple (operator, filename)
|
||||
"""io_file : LESS filename
|
||||
| LESSAND filename
|
||||
| GREATER filename
|
||||
| GREATAND filename
|
||||
| DGREAT filename
|
||||
| LESSGREAT filename
|
||||
| CLOBBER filename"""
|
||||
#Extract the filename from the file
|
||||
p[0] = ('io_file', p[1], p[2][1])
|
||||
|
||||
def p_filename(p):
|
||||
#Return the filename
|
||||
"""filename : TOKEN"""
|
||||
p[0] = ('filename', p[1])
|
||||
|
||||
def p_io_here(p):
|
||||
"""io_here : DLESS here_end
|
||||
| DLESSDASH here_end"""
|
||||
p[0] = ('io_here', p[1], p[2][1], p[2][2])
|
||||
|
||||
def p_here_end(p):
|
||||
"""here_end : HERENAME TOKEN"""
|
||||
p[0] = ('here_document', p[1], p[2])
|
||||
|
||||
def p_newline_sequence(p):
|
||||
# Nothing in the grammar can handle leading NEWLINE productions, so add
|
||||
# this one with the lowest possible priority relatively to newline_list.
|
||||
"""newline_sequence : newline_list"""
|
||||
p[0] = None
|
||||
|
||||
def p_newline_list(p):
|
||||
"""newline_list : NEWLINE
|
||||
| newline_list NEWLINE"""
|
||||
p[0] = None
|
||||
|
||||
def p_linebreak(p):
|
||||
"""linebreak : newline_list
|
||||
| empty"""
|
||||
p[0] = None
|
||||
|
||||
def p_separator_op(p):
|
||||
"""separator_op : COMMA
|
||||
| AMP"""
|
||||
p[0] = p[1]
|
||||
|
||||
def p_separator(p):
|
||||
"""separator : separator_op linebreak
|
||||
| newline_list"""
|
||||
if len(p)==2:
|
||||
#Ignore newlines
|
||||
p[0] = None
|
||||
else:
|
||||
#Keep the separator operator
|
||||
p[0] = ('separator', p[1])
|
||||
|
||||
def p_sequential_sep(p):
|
||||
"""sequential_sep : COMMA linebreak
|
||||
| newline_list"""
|
||||
p[0] = None
|
||||
|
||||
# Low priority TOKEN => for_word conversion.
|
||||
# Let maybe_for_word be used as a token when necessary in higher priority
|
||||
# rules.
|
||||
def p_for_word(p):
|
||||
"""for_word : maybe_for_word"""
|
||||
p[0] = p[1]
|
||||
|
||||
def p_if_word(p):
|
||||
"""if_word : maybe_if_word"""
|
||||
p[0] = p[1]
|
||||
|
||||
def p_then_word(p):
|
||||
"""then_word : maybe_then_word"""
|
||||
p[0] = p[1]
|
||||
|
||||
def p_done_word(p):
|
||||
"""done_word : maybe_done_word"""
|
||||
p[0] = p[1]
|
||||
|
||||
def p_do_word(p):
|
||||
"""do_word : maybe_do_word"""
|
||||
p[0] = p[1]
|
||||
|
||||
def p_until_word(p):
|
||||
"""until_word : maybe_until_word"""
|
||||
p[0] = p[1]
|
||||
|
||||
def p_assignment_word(p):
|
||||
"""assignment_word : maybe_assignment_word"""
|
||||
p[0] = ('assignment_word', p[1][1])
|
||||
|
||||
def p_bang_word(p):
|
||||
"""bang_word : maybe_bang_word"""
|
||||
p[0] = ('bang_word', p[1][1])
|
||||
|
||||
def p_token(p):
|
||||
"""token : TOKEN
|
||||
| Fi"""
|
||||
p[0] = p[1]
|
||||
|
||||
def p_empty(p):
|
||||
'empty :'
|
||||
p[0] = None
|
||||
|
||||
# Error rule for syntax errors
|
||||
def p_error(p):
|
||||
msg = []
|
||||
w = msg.append
|
||||
w('%r\n' % p)
|
||||
w('followed by:\n')
|
||||
for i in range(5):
|
||||
n = yacc.token()
|
||||
if not n:
|
||||
break
|
||||
w(' %r\n' % n)
|
||||
raise sherrors.ShellSyntaxError(''.join(msg))
|
||||
|
||||
# Build the parser
|
||||
try:
|
||||
import pyshtables
|
||||
except ImportError:
|
||||
outputdir = os.path.dirname(__file__)
|
||||
if not os.access(outputdir, os.W_OK):
|
||||
outputdir = ''
|
||||
yacc.yacc(tabmodule = 'pyshtables', outputdir = outputdir, debug = 0)
|
||||
else:
|
||||
yacc.yacc(tabmodule = 'pysh.pyshtables', write_tables = 0, debug = 0)
|
||||
|
||||
|
||||
def parse(input, eof=False, debug=False):
|
||||
"""Parse a whole script at once and return the generated AST and unconsumed
|
||||
data in a tuple.
|
||||
|
||||
NOTE: eof is probably meaningless for now, the parser being unable to work
|
||||
in pull mode. It should be set to True.
|
||||
"""
|
||||
lexer = pyshlex.PLYLexer()
|
||||
remaining = lexer.add(input, eof)
|
||||
if lexer.is_empty():
|
||||
return [], remaining
|
||||
if debug:
|
||||
debug = 2
|
||||
return yacc.parse(lexer=lexer, debug=debug), remaining
|
||||
|
||||
#-------------------------------------------------------------------------------
|
||||
# AST rendering helpers
|
||||
#-------------------------------------------------------------------------------
|
||||
|
||||
def format_commands(v):
|
||||
"""Return a tree made of strings and lists. Make command trees easier to
|
||||
display.
|
||||
"""
|
||||
if isinstance(v, list):
|
||||
return [format_commands(c) for c in v]
|
||||
if isinstance(v, tuple):
|
||||
if len(v)==2 and isinstance(v[0], str) and not isinstance(v[1], str):
|
||||
if v[0] == 'async':
|
||||
return ['AsyncList', map(format_commands, v[1])]
|
||||
else:
|
||||
#Avoid decomposing tuples like ('pipeline', Pipeline(...))
|
||||
return format_commands(v[1])
|
||||
return format_commands(list(v))
|
||||
elif isinstance(v, IfCond):
|
||||
name = ['IfCond']
|
||||
name += ['if', map(format_commands, v.cond)]
|
||||
name += ['then', map(format_commands, v.if_cmds)]
|
||||
name += ['else', map(format_commands, v.else_cmds)]
|
||||
return name
|
||||
elif isinstance(v, ForLoop):
|
||||
name = ['ForLoop']
|
||||
name += [repr(v.name)+' in ', map(str, v.items)]
|
||||
name += ['commands', map(format_commands, v.cmds)]
|
||||
return name
|
||||
elif isinstance(v, AndOr):
|
||||
return [v.op, format_commands(v.left), format_commands(v.right)]
|
||||
elif isinstance(v, Pipeline):
|
||||
name = 'Pipeline'
|
||||
if v.reverse_status:
|
||||
name = '!' + name
|
||||
return [name, format_commands(v.commands)]
|
||||
elif isinstance(v, Case):
|
||||
name = ['Case']
|
||||
name += [v.name, format_commands(v.items)]
|
||||
elif isinstance(v, SimpleCommand):
|
||||
name = ['SimpleCommand']
|
||||
if v.words:
|
||||
name += ['words', map(str, v.words)]
|
||||
if v.assigns:
|
||||
assigns = [tuple(a[1]) for a in v.assigns]
|
||||
name += ['assigns', map(str, assigns)]
|
||||
if v.redirs:
|
||||
name += ['redirs', map(format_commands, v.redirs)]
|
||||
return name
|
||||
elif isinstance(v, RedirectList):
|
||||
name = ['RedirectList']
|
||||
if v.redirs:
|
||||
name += ['redirs', map(format_commands, v.redirs)]
|
||||
name += ['command', format_commands(v.cmd)]
|
||||
return name
|
||||
elif isinstance(v, IORedirect):
|
||||
return ' '.join(map(str, (v.io_number, v.op, v.filename)))
|
||||
elif isinstance(v, HereDocument):
|
||||
return ' '.join(map(str, (v.io_number, v.op, repr(v.name), repr(v.content))))
|
||||
elif isinstance(v, SubShell):
|
||||
return ['SubShell', map(format_commands, v.cmds)]
|
||||
else:
|
||||
return repr(v)
|
||||
|
||||
def print_commands(cmds, output=sys.stdout):
|
||||
"""Pretty print a command tree."""
|
||||
def print_tree(cmd, spaces, output):
|
||||
if isinstance(cmd, list):
|
||||
for c in cmd:
|
||||
print_tree(c, spaces + 3, output)
|
||||
else:
|
||||
print >>output, ' '*spaces + str(cmd)
|
||||
|
||||
formatted = format_commands(cmds)
|
||||
print_tree(formatted, 0, output)
|
||||
|
||||
|
||||
def stringify_commands(cmds):
|
||||
"""Serialize a command tree as a string.
|
||||
|
||||
Returned string is not pretty and is currently used for unit tests only.
|
||||
"""
|
||||
def stringify(value):
|
||||
output = []
|
||||
if isinstance(value, list):
|
||||
formatted = []
|
||||
for v in value:
|
||||
formatted.append(stringify(v))
|
||||
formatted = ' '.join(formatted)
|
||||
output.append(''.join(['<', formatted, '>']))
|
||||
else:
|
||||
output.append(value)
|
||||
return ' '.join(output)
|
||||
|
||||
return stringify(format_commands(cmds))
|
||||
|
||||
|
||||
def visit_commands(cmds, callable):
|
||||
"""Visit the command tree and execute callable on every Pipeline and
|
||||
SimpleCommand instances.
|
||||
"""
|
||||
if isinstance(cmds, (tuple, list)):
|
||||
map(lambda c: visit_commands(c,callable), cmds)
|
||||
elif isinstance(cmds, (Pipeline, SimpleCommand)):
|
||||
callable(cmds)
|
||||
@@ -1,41 +0,0 @@
|
||||
# sherrors.py - shell errors and signals
|
||||
#
|
||||
# Copyright 2007 Patrick Mezard
|
||||
#
|
||||
# This software may be used and distributed according to the terms
|
||||
# of the GNU General Public License, incorporated herein by reference.
|
||||
|
||||
"""Define shell exceptions and error codes.
|
||||
"""
|
||||
|
||||
class ShellError(Exception):
|
||||
pass
|
||||
|
||||
class ShellSyntaxError(ShellError):
|
||||
pass
|
||||
|
||||
class UtilityError(ShellError):
|
||||
"""Raised upon utility syntax error (option or operand error)."""
|
||||
pass
|
||||
|
||||
class ExpansionError(ShellError):
|
||||
pass
|
||||
|
||||
class CommandNotFound(ShellError):
|
||||
"""Specified command was not found."""
|
||||
pass
|
||||
|
||||
class RedirectionError(ShellError):
|
||||
pass
|
||||
|
||||
class VarAssignmentError(ShellError):
|
||||
"""Variable assignment error."""
|
||||
pass
|
||||
|
||||
class ExitSignal(ShellError):
|
||||
"""Exit signal."""
|
||||
pass
|
||||
|
||||
class ReturnSignal(ShellError):
|
||||
"""Exit signal."""
|
||||
pass
|
||||
@@ -1,77 +0,0 @@
|
||||
# subprocess - Subprocesses with accessible I/O streams
|
||||
#
|
||||
# For more information about this module, see PEP 324.
|
||||
#
|
||||
# This module should remain compatible with Python 2.2, see PEP 291.
|
||||
#
|
||||
# Copyright (c) 2003-2005 by Peter Astrand <astrand@lysator.liu.se>
|
||||
#
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
# See http://www.python.org/2.4/license for licensing details.
|
||||
|
||||
def list2cmdline(seq):
|
||||
"""
|
||||
Translate a sequence of arguments into a command line
|
||||
string, using the same rules as the MS C runtime:
|
||||
|
||||
1) Arguments are delimited by white space, which is either a
|
||||
space or a tab.
|
||||
|
||||
2) A string surrounded by double quotation marks is
|
||||
interpreted as a single argument, regardless of white space
|
||||
contained within. A quoted string can be embedded in an
|
||||
argument.
|
||||
|
||||
3) A double quotation mark preceded by a backslash is
|
||||
interpreted as a literal double quotation mark.
|
||||
|
||||
4) Backslashes are interpreted literally, unless they
|
||||
immediately precede a double quotation mark.
|
||||
|
||||
5) If backslashes immediately precede a double quotation mark,
|
||||
every pair of backslashes is interpreted as a literal
|
||||
backslash. If the number of backslashes is odd, the last
|
||||
backslash escapes the next double quotation mark as
|
||||
described in rule 3.
|
||||
"""
|
||||
|
||||
# See
|
||||
# http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp
|
||||
result = []
|
||||
needquote = False
|
||||
for arg in seq:
|
||||
bs_buf = []
|
||||
|
||||
# Add a space to separate this argument from the others
|
||||
if result:
|
||||
result.append(' ')
|
||||
|
||||
needquote = (" " in arg) or ("\t" in arg) or ("|" in arg) or arg == ""
|
||||
if needquote:
|
||||
result.append('"')
|
||||
|
||||
for c in arg:
|
||||
if c == '\\':
|
||||
# Don't know if we need to double yet.
|
||||
bs_buf.append(c)
|
||||
elif c == '"':
|
||||
# Double backspaces.
|
||||
result.append('\\' * len(bs_buf)*2)
|
||||
bs_buf = []
|
||||
result.append('\\"')
|
||||
else:
|
||||
# Normal char
|
||||
if bs_buf:
|
||||
result.extend(bs_buf)
|
||||
bs_buf = []
|
||||
result.append(c)
|
||||
|
||||
# Add remaining backspaces, if any.
|
||||
if bs_buf:
|
||||
result.extend(bs_buf)
|
||||
|
||||
if needquote:
|
||||
result.extend(bs_buf)
|
||||
result.append('"')
|
||||
|
||||
return ''.join(result)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,191 +0,0 @@
|
||||
#
|
||||
# BitBake 'dummy' Passthrough Server
|
||||
#
|
||||
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
|
||||
# Copyright (C) 2006 - 2008 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
"""
|
||||
This module implements a passthrough server for BitBake.
|
||||
|
||||
Use register_idle_function() to add a function which the server
|
||||
calls from within idle_commands when no requests are pending. Make sure
|
||||
that those functions are non-blocking or else you will introduce latency
|
||||
in the server's main loop.
|
||||
"""
|
||||
|
||||
import time
|
||||
import bb
|
||||
import pickle
|
||||
import signal
|
||||
|
||||
DEBUG = False
|
||||
|
||||
import inspect, select
|
||||
|
||||
class BitBakeServerCommands():
|
||||
def __init__(self, server, cooker):
|
||||
self.cooker = cooker
|
||||
self.server = server
|
||||
|
||||
def runCommand(self, command):
|
||||
"""
|
||||
Run a cooker command on the server
|
||||
"""
|
||||
#print "Running Command %s" % command
|
||||
return self.cooker.command.runCommand(command)
|
||||
|
||||
def terminateServer(self):
|
||||
"""
|
||||
Trigger the server to quit
|
||||
"""
|
||||
self.server.server_exit()
|
||||
#print "Server (cooker) exitting"
|
||||
return
|
||||
|
||||
def ping(self):
|
||||
"""
|
||||
Dummy method which can be used to check the server is still alive
|
||||
"""
|
||||
return True
|
||||
|
||||
eventQueue = []
|
||||
|
||||
class BBUIEventQueue:
|
||||
class event:
|
||||
def __init__(self, parent):
|
||||
self.parent = parent
|
||||
@staticmethod
|
||||
def send(event):
|
||||
bb.server.none.eventQueue.append(pickle.loads(event))
|
||||
@staticmethod
|
||||
def quit():
|
||||
return
|
||||
|
||||
def __init__(self, BBServer):
|
||||
self.eventQueue = bb.server.none.eventQueue
|
||||
self.BBServer = BBServer
|
||||
self.EventHandle = bb.event.register_UIHhandler(self)
|
||||
|
||||
def __popEvent(self):
|
||||
if len(self.eventQueue) == 0:
|
||||
return None
|
||||
return self.eventQueue.pop(0)
|
||||
|
||||
def getEvent(self):
|
||||
if len(self.eventQueue) == 0:
|
||||
self.BBServer.idle_commands(0)
|
||||
return self.__popEvent()
|
||||
|
||||
def waitEvent(self, delay):
|
||||
event = self.__popEvent()
|
||||
if event:
|
||||
return event
|
||||
self.BBServer.idle_commands(delay)
|
||||
return self.__popEvent()
|
||||
|
||||
def queue_event(self, event):
|
||||
self.eventQueue.append(event)
|
||||
|
||||
def system_quit( self ):
|
||||
bb.event.unregister_UIHhandler(self.EventHandle)
|
||||
|
||||
# Dummy signal handler to ensure we break out of sleep upon SIGCHLD
|
||||
def chldhandler(signum, stackframe):
|
||||
pass
|
||||
|
||||
class BitBakeServer():
|
||||
# remove this when you're done with debugging
|
||||
# allow_reuse_address = True
|
||||
|
||||
def __init__(self, cooker):
|
||||
self._idlefuns = {}
|
||||
self.commands = BitBakeServerCommands(self, cooker)
|
||||
|
||||
def register_idle_function(self, function, data):
|
||||
"""Register a function to be called while the server is idle"""
|
||||
assert hasattr(function, '__call__')
|
||||
self._idlefuns[function] = data
|
||||
|
||||
def idle_commands(self, delay):
|
||||
#print "Idle queue length %s" % len(self._idlefuns)
|
||||
#print "Idle timeout, running idle functions"
|
||||
#if len(self._idlefuns) == 0:
|
||||
nextsleep = delay
|
||||
for function, data in self._idlefuns.items():
|
||||
try:
|
||||
retval = function(self, data, False)
|
||||
#print "Idle function returned %s" % (retval)
|
||||
if retval is False:
|
||||
del self._idlefuns[function]
|
||||
elif retval is True:
|
||||
nextsleep = None
|
||||
elif nextsleep is None:
|
||||
continue
|
||||
elif retval < nextsleep:
|
||||
nextsleep = retval
|
||||
except SystemExit:
|
||||
raise
|
||||
except:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
self.commands.runCommand(["stateShutdown"])
|
||||
pass
|
||||
if nextsleep is not None:
|
||||
#print "Sleeping for %s (%s)" % (nextsleep, delay)
|
||||
signal.signal(signal.SIGCHLD, chldhandler)
|
||||
time.sleep(nextsleep)
|
||||
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
|
||||
|
||||
def server_exit(self):
|
||||
# Tell idle functions we're exiting
|
||||
for function, data in self._idlefuns.items():
|
||||
try:
|
||||
retval = function(self, data, True)
|
||||
except:
|
||||
pass
|
||||
|
||||
class BitbakeServerInfo():
|
||||
def __init__(self, server):
|
||||
self.server = server
|
||||
self.commands = server.commands
|
||||
|
||||
class BitBakeServerFork():
|
||||
def __init__(self, cooker, server, serverinfo, logfile):
|
||||
serverinfo.logfile = logfile
|
||||
serverinfo.cooker = cooker
|
||||
serverinfo.server = server
|
||||
|
||||
class BitbakeUILauch():
|
||||
def launch(self, serverinfo, uifunc, *args):
|
||||
return bb.cooker.server_main(serverinfo.cooker, uifunc, *args)
|
||||
|
||||
class BitBakeServerConnection():
|
||||
def __init__(self, serverinfo):
|
||||
self.server = serverinfo.server
|
||||
self.connection = serverinfo.commands
|
||||
self.events = bb.server.none.BBUIEventQueue(self.server)
|
||||
for event in bb.event.ui_queue:
|
||||
self.events.queue_event(event)
|
||||
|
||||
def terminate(self):
|
||||
try:
|
||||
self.events.system_quit()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
self.connection.terminateServer()
|
||||
except:
|
||||
pass
|
||||
@@ -1,273 +0,0 @@
|
||||
#
|
||||
# BitBake XMLRPC Server
|
||||
#
|
||||
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
|
||||
# Copyright (C) 2006 - 2008 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
"""
|
||||
This module implements an xmlrpc server for BitBake.
|
||||
|
||||
Use this by deriving a class from BitBakeXMLRPCServer and then adding
|
||||
methods which you want to "export" via XMLRPC. If the methods have the
|
||||
prefix xmlrpc_, then registering those function will happen automatically,
|
||||
if not, you need to call register_function.
|
||||
|
||||
Use register_idle_function() to add a function which the xmlrpc server
|
||||
calls from within server_forever when no requests are pending. Make sure
|
||||
that those functions are non-blocking or else you will introduce latency
|
||||
in the server's main loop.
|
||||
"""
|
||||
|
||||
import bb
|
||||
import xmlrpclib, sys
|
||||
from bb import daemonize
|
||||
from bb.ui import uievent
|
||||
|
||||
DEBUG = False
|
||||
|
||||
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
|
||||
import inspect, select
|
||||
|
||||
if sys.hexversion < 0x020600F0:
|
||||
print("Sorry, python 2.6 or later is required for bitbake's XMLRPC mode")
|
||||
sys.exit(1)
|
||||
|
||||
##
|
||||
# The xmlrpclib.Transport class has undergone various changes in Python 2.7
|
||||
# which break BitBake's XMLRPC implementation.
|
||||
# To work around this we subclass Transport and have a copy/paste of method
|
||||
# implementations from Python 2.6.6's xmlrpclib.
|
||||
#
|
||||
# Upstream Python bug is #8194 (http://bugs.python.org/issue8194)
|
||||
# This bug is relevant for Python 2.7.0 and 2.7.1 but was fixed for
|
||||
# Python > 2.7.2
|
||||
##
|
||||
|
||||
class BBTransport(xmlrpclib.Transport):
|
||||
def request(self, host, handler, request_body, verbose=0):
|
||||
h = self.make_connection(host)
|
||||
if verbose:
|
||||
h.set_debuglevel(1)
|
||||
|
||||
self.send_request(h, handler, request_body)
|
||||
self.send_host(h, host)
|
||||
self.send_user_agent(h)
|
||||
self.send_content(h, request_body)
|
||||
|
||||
errcode, errmsg, headers = h.getreply()
|
||||
|
||||
if errcode != 200:
|
||||
raise ProtocolError(
|
||||
host + handler,
|
||||
errcode, errmsg,
|
||||
headers
|
||||
)
|
||||
|
||||
self.verbose = verbose
|
||||
|
||||
try:
|
||||
sock = h._conn.sock
|
||||
except AttributeError:
|
||||
sock = None
|
||||
|
||||
return self._parse_response(h.getfile(), sock)
|
||||
|
||||
def make_connection(self, host):
|
||||
import httplib
|
||||
host, extra_headers, x509 = self.get_host_info(host)
|
||||
return httplib.HTTP(host)
|
||||
|
||||
def _parse_response(self, file, sock):
|
||||
p, u = self.getparser()
|
||||
|
||||
while 1:
|
||||
if sock:
|
||||
response = sock.recv(1024)
|
||||
else:
|
||||
response = file.read(1024)
|
||||
if not response:
|
||||
break
|
||||
if self.verbose:
|
||||
print "body:", repr(response)
|
||||
p.feed(response)
|
||||
|
||||
file.close()
|
||||
p.close()
|
||||
|
||||
return u.close()
|
||||
|
||||
def _create_server(host, port):
|
||||
# Python 2.7.0 and 2.7.1 have a buggy Transport implementation
|
||||
# For those versions of Python, and only those versions, use our
|
||||
# own copy/paste BBTransport class.
|
||||
if (2, 7, 0) <= sys.version_info < (2, 7, 2):
|
||||
t = BBTransport()
|
||||
s = xmlrpclib.Server("http://%s:%d/" % (host, port), transport=t, allow_none=True)
|
||||
else:
|
||||
s = xmlrpclib.Server("http://%s:%d/" % (host, port), allow_none=True)
|
||||
|
||||
return s
|
||||
|
||||
class BitBakeServerCommands():
|
||||
def __init__(self, server, cooker):
|
||||
self.cooker = cooker
|
||||
self.server = server
|
||||
|
||||
def registerEventHandler(self, host, port):
|
||||
"""
|
||||
Register a remote UI Event Handler
|
||||
"""
|
||||
s = _create_server(host, port)
|
||||
|
||||
return bb.event.register_UIHhandler(s)
|
||||
|
||||
def unregisterEventHandler(self, handlerNum):
|
||||
"""
|
||||
Unregister a remote UI Event Handler
|
||||
"""
|
||||
return bb.event.unregister_UIHhandler(handlerNum)
|
||||
|
||||
def runCommand(self, command):
|
||||
"""
|
||||
Run a cooker command on the server
|
||||
"""
|
||||
return self.cooker.command.runCommand(command)
|
||||
|
||||
def terminateServer(self):
|
||||
"""
|
||||
Trigger the server to quit
|
||||
"""
|
||||
self.server.quit = True
|
||||
print("Server (cooker) exitting")
|
||||
return
|
||||
|
||||
def ping(self):
|
||||
"""
|
||||
Dummy method which can be used to check the server is still alive
|
||||
"""
|
||||
return True
|
||||
|
||||
class BitBakeServer(SimpleXMLRPCServer):
|
||||
# remove this when you're done with debugging
|
||||
# allow_reuse_address = True
|
||||
|
||||
def __init__(self, cooker, interface = ("localhost", 0)):
|
||||
"""
|
||||
Constructor
|
||||
"""
|
||||
SimpleXMLRPCServer.__init__(self, interface,
|
||||
requestHandler=SimpleXMLRPCRequestHandler,
|
||||
logRequests=False, allow_none=True)
|
||||
self._idlefuns = {}
|
||||
self.host, self.port = self.socket.getsockname()
|
||||
#self.register_introspection_functions()
|
||||
commands = BitBakeServerCommands(self, cooker)
|
||||
self.autoregister_all_functions(commands, "")
|
||||
self.cooker = cooker
|
||||
|
||||
def autoregister_all_functions(self, context, prefix):
|
||||
"""
|
||||
Convenience method for registering all functions in the scope
|
||||
of this class that start with a common prefix
|
||||
"""
|
||||
methodlist = inspect.getmembers(context, inspect.ismethod)
|
||||
for name, method in methodlist:
|
||||
if name.startswith(prefix):
|
||||
self.register_function(method, name[len(prefix):])
|
||||
|
||||
def register_idle_function(self, function, data):
|
||||
"""Register a function to be called while the server is idle"""
|
||||
assert hasattr(function, '__call__')
|
||||
self._idlefuns[function] = data
|
||||
|
||||
def serve_forever(self):
|
||||
bb.cooker.server_main(self.cooker, self._serve_forever)
|
||||
|
||||
def _serve_forever(self):
|
||||
"""
|
||||
Serve Requests. Overloaded to honor a quit command
|
||||
"""
|
||||
self.quit = False
|
||||
self.timeout = 0 # Run Idle calls for our first callback
|
||||
while not self.quit:
|
||||
#print "Idle queue length %s" % len(self._idlefuns)
|
||||
self.handle_request()
|
||||
#print "Idle timeout, running idle functions"
|
||||
nextsleep = None
|
||||
for function, data in self._idlefuns.items():
|
||||
try:
|
||||
retval = function(self, data, False)
|
||||
if retval is False:
|
||||
del self._idlefuns[function]
|
||||
elif retval is True:
|
||||
nextsleep = 0
|
||||
elif nextsleep is 0:
|
||||
continue
|
||||
elif nextsleep is None:
|
||||
nextsleep = retval
|
||||
elif retval < nextsleep:
|
||||
nextsleep = retval
|
||||
except SystemExit:
|
||||
raise
|
||||
except:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
pass
|
||||
if nextsleep is None and len(self._idlefuns) > 0:
|
||||
nextsleep = 0
|
||||
self.timeout = nextsleep
|
||||
# Tell idle functions we're exiting
|
||||
for function, data in self._idlefuns.items():
|
||||
try:
|
||||
retval = function(self, data, True)
|
||||
except:
|
||||
pass
|
||||
|
||||
self.server_close()
|
||||
return
|
||||
|
||||
class BitbakeServerInfo():
|
||||
def __init__(self, server):
|
||||
self.host = server.host
|
||||
self.port = server.port
|
||||
|
||||
class BitBakeServerFork():
|
||||
def __init__(self, cooker, server, serverinfo, logfile):
|
||||
daemonize.createDaemon(server.serve_forever, logfile)
|
||||
|
||||
class BitbakeUILauch():
|
||||
def launch(self, serverinfo, uifunc, *args):
|
||||
return uifunc(*args)
|
||||
|
||||
class BitBakeServerConnection():
|
||||
def __init__(self, serverinfo):
|
||||
self.connection = _create_server(serverinfo.host, serverinfo.port)
|
||||
self.events = uievent.BBUIEventQueue(self.connection)
|
||||
for event in bb.event.ui_queue:
|
||||
self.events.queue_event(event)
|
||||
|
||||
def terminate(self):
|
||||
# Don't wait for server indefinitely
|
||||
import socket
|
||||
socket.setdefaulttimeout(2)
|
||||
try:
|
||||
self.events.system_quit()
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
self.connection.terminateServer()
|
||||
except:
|
||||
pass
|
||||
@@ -52,14 +52,12 @@ PROBLEMS:
|
||||
# Import and setup global variables
|
||||
##########################################################################
|
||||
|
||||
from __future__ import print_function
|
||||
from functools import reduce
|
||||
try:
|
||||
set
|
||||
except NameError:
|
||||
from sets import Set as set
|
||||
import sys, os, readline, socket, httplib, urllib, commands, popen2, shlex, Queue, fnmatch
|
||||
from bb import data, parse, build, cache, taskdata, runqueue, providers as Providers
|
||||
import sys, os, readline, socket, httplib, urllib, commands, popen2, copy, shlex, Queue, fnmatch
|
||||
from bb import data, parse, build, fatal, cache, taskdata, runqueue, providers as Providers
|
||||
|
||||
__version__ = "0.5.3.1"
|
||||
__credits__ = """BitBake Shell Version %s (C) 2005 Michael 'Mickey' Lauer <mickey@Vanille.de>
|
||||
@@ -100,7 +98,7 @@ class BitBakeShellCommands:
|
||||
|
||||
def _checkParsed( self ):
|
||||
if not parsed:
|
||||
print("SHELL: This command needs to parse bbfiles...")
|
||||
print "SHELL: This command needs to parse bbfiles..."
|
||||
self.parse( None )
|
||||
|
||||
def _findProvider( self, item ):
|
||||
@@ -121,37 +119,40 @@ class BitBakeShellCommands:
|
||||
"""Register a new name for a command"""
|
||||
new, old = params
|
||||
if not old in cmds:
|
||||
print("ERROR: Command '%s' not known" % old)
|
||||
print "ERROR: Command '%s' not known" % old
|
||||
else:
|
||||
cmds[new] = cmds[old]
|
||||
print("OK")
|
||||
print "OK"
|
||||
alias.usage = "<alias> <command>"
|
||||
|
||||
def buffer( self, params ):
|
||||
"""Dump specified output buffer"""
|
||||
index = params[0]
|
||||
print(self._shell.myout.buffer( int( index ) ))
|
||||
print self._shell.myout.buffer( int( index ) )
|
||||
buffer.usage = "<index>"
|
||||
|
||||
def buffers( self, params ):
|
||||
"""Show the available output buffers"""
|
||||
commands = self._shell.myout.bufferedCommands()
|
||||
if not commands:
|
||||
print("SHELL: No buffered commands available yet. Start doing something.")
|
||||
print "SHELL: No buffered commands available yet. Start doing something."
|
||||
else:
|
||||
print("="*35, "Available Output Buffers", "="*27)
|
||||
print "="*35, "Available Output Buffers", "="*27
|
||||
for index, cmd in enumerate( commands ):
|
||||
print("| %s %s" % ( str( index ).ljust( 3 ), cmd ))
|
||||
print("="*88)
|
||||
print "| %s %s" % ( str( index ).ljust( 3 ), cmd )
|
||||
print "="*88
|
||||
|
||||
def build( self, params, cmd = "build" ):
|
||||
"""Build a providee"""
|
||||
global last_exception
|
||||
globexpr = params[0]
|
||||
self._checkParsed()
|
||||
names = globfilter( cooker.status.pkg_pn, globexpr )
|
||||
names = globfilter( cooker.status.pkg_pn.keys(), globexpr )
|
||||
if len( names ) == 0: names = [ globexpr ]
|
||||
print("SHELL: Building %s" % ' '.join( names ))
|
||||
print "SHELL: Building %s" % ' '.join( names )
|
||||
|
||||
oldcmd = cooker.configuration.cmd
|
||||
cooker.configuration.cmd = cmd
|
||||
|
||||
td = taskdata.TaskData(cooker.configuration.abort)
|
||||
localdata = data.createCopy(cooker.configuration.data)
|
||||
@@ -167,25 +168,28 @@ class BitBakeShellCommands:
|
||||
if len(providers) == 0:
|
||||
raise Providers.NoProvider
|
||||
|
||||
tasks.append([name, "do_%s" % cmd])
|
||||
tasks.append([name, "do_%s" % cooker.configuration.cmd])
|
||||
|
||||
td.add_unresolved(localdata, cooker.status)
|
||||
|
||||
|
||||
rq = runqueue.RunQueue(cooker, localdata, cooker.status, td, tasks)
|
||||
rq.prepare_runqueue()
|
||||
rq.execute_runqueue()
|
||||
|
||||
except Providers.NoProvider:
|
||||
print("ERROR: No Provider")
|
||||
print "ERROR: No Provider"
|
||||
last_exception = Providers.NoProvider
|
||||
|
||||
except runqueue.TaskFailure as fnids:
|
||||
except runqueue.TaskFailure, fnids:
|
||||
for fnid in fnids:
|
||||
print "ERROR: '%s' failed" % td.fn_index[fnid]
|
||||
last_exception = runqueue.TaskFailure
|
||||
|
||||
except build.FuncFailed as e:
|
||||
print("ERROR: Couldn't build '%s'" % names)
|
||||
except build.EventException, e:
|
||||
print "ERROR: Couldn't build '%s'" % names
|
||||
last_exception = e
|
||||
|
||||
cooker.configuration.cmd = oldcmd
|
||||
|
||||
build.usage = "<providee>"
|
||||
|
||||
@@ -204,11 +208,6 @@ class BitBakeShellCommands:
|
||||
self.build( params, "configure" )
|
||||
configure.usage = "<providee>"
|
||||
|
||||
def install( self, params ):
|
||||
"""Execute 'install' on a providee"""
|
||||
self.build( params, "install" )
|
||||
install.usage = "<providee>"
|
||||
|
||||
def edit( self, params ):
|
||||
"""Call $EDITOR on a providee"""
|
||||
name = params[0]
|
||||
@@ -216,7 +215,7 @@ class BitBakeShellCommands:
|
||||
if bbfile is not None:
|
||||
os.system( "%s %s" % ( os.environ.get( "EDITOR", "vi" ), bbfile ) )
|
||||
else:
|
||||
print("ERROR: Nothing provides '%s'" % name)
|
||||
print "ERROR: Nothing provides '%s'" % name
|
||||
edit.usage = "<providee>"
|
||||
|
||||
def environment( self, params ):
|
||||
@@ -239,16 +238,34 @@ class BitBakeShellCommands:
|
||||
global last_exception
|
||||
name = params[0]
|
||||
bf = completeFilePath( name )
|
||||
print("SHELL: Calling '%s' on '%s'" % ( cmd, bf ))
|
||||
print "SHELL: Calling '%s' on '%s'" % ( cmd, bf )
|
||||
|
||||
oldcmd = cooker.configuration.cmd
|
||||
cooker.configuration.cmd = cmd
|
||||
|
||||
thisdata = data.createCopy(cooker.configuration.data)
|
||||
data.update_data(thisdata)
|
||||
data.expandKeys(thisdata)
|
||||
|
||||
try:
|
||||
cooker.buildFile(bf, cmd)
|
||||
bbfile_data = parse.handle( bf, thisdata )
|
||||
except parse.ParseError:
|
||||
print("ERROR: Unable to open or parse '%s'" % bf)
|
||||
except build.FuncFailed as e:
|
||||
print("ERROR: Couldn't build '%s'" % name)
|
||||
last_exception = e
|
||||
print "ERROR: Unable to open or parse '%s'" % bf
|
||||
else:
|
||||
# Remove stamp for target if force mode active
|
||||
if cooker.configuration.force:
|
||||
bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (cmd, bf))
|
||||
bb.build.del_stamp('do_%s' % cmd, bbfile_data)
|
||||
|
||||
item = data.getVar('PN', bbfile_data, 1)
|
||||
data.setVar( "_task_cache", [], bbfile_data ) # force
|
||||
try:
|
||||
cooker.tryBuildPackage( os.path.abspath( bf ), item, cmd, bbfile_data, True )
|
||||
except build.EventException, e:
|
||||
print "ERROR: Couldn't build '%s'" % name
|
||||
last_exception = e
|
||||
|
||||
cooker.configuration.cmd = oldcmd
|
||||
fileBuild.usage = "<bbfile>"
|
||||
|
||||
def fileClean( self, params ):
|
||||
@@ -270,60 +287,64 @@ class BitBakeShellCommands:
|
||||
def fileReparse( self, params ):
|
||||
"""(re)Parse a bb file"""
|
||||
bbfile = params[0]
|
||||
print("SHELL: Parsing '%s'" % bbfile)
|
||||
print "SHELL: Parsing '%s'" % bbfile
|
||||
parse.update_mtime( bbfile )
|
||||
cooker.parser.reparse(bbfile)
|
||||
cooker.bb_cache.cacheValidUpdate(bbfile)
|
||||
fromCache = cooker.bb_cache.loadData(bbfile, cooker.configuration.data)
|
||||
cooker.bb_cache.sync()
|
||||
if False: #fromCache:
|
||||
print("SHELL: File has not been updated, not reparsing")
|
||||
print "SHELL: File has not been updated, not reparsing"
|
||||
else:
|
||||
print("SHELL: Parsed")
|
||||
print "SHELL: Parsed"
|
||||
fileReparse.usage = "<bbfile>"
|
||||
|
||||
def abort( self, params ):
|
||||
"""Toggle abort task execution flag (see bitbake -k)"""
|
||||
cooker.configuration.abort = not cooker.configuration.abort
|
||||
print("SHELL: Abort Flag is now '%s'" % repr( cooker.configuration.abort ))
|
||||
print "SHELL: Abort Flag is now '%s'" % repr( cooker.configuration.abort )
|
||||
|
||||
def force( self, params ):
|
||||
"""Toggle force task execution flag (see bitbake -f)"""
|
||||
cooker.configuration.force = not cooker.configuration.force
|
||||
print("SHELL: Force Flag is now '%s'" % repr( cooker.configuration.force ))
|
||||
print "SHELL: Force Flag is now '%s'" % repr( cooker.configuration.force )
|
||||
|
||||
def help( self, params ):
|
||||
"""Show a comprehensive list of commands and their purpose"""
|
||||
print("="*30, "Available Commands", "="*30)
|
||||
for cmd in sorted(cmds):
|
||||
function, numparams, usage, helptext = cmds[cmd]
|
||||
print("| %s | %s" % (usage.ljust(30), helptext))
|
||||
print("="*78)
|
||||
print "="*30, "Available Commands", "="*30
|
||||
allcmds = cmds.keys()
|
||||
allcmds.sort()
|
||||
for cmd in allcmds:
|
||||
function,numparams,usage,helptext = cmds[cmd]
|
||||
print "| %s | %s" % (usage.ljust(30), helptext)
|
||||
print "="*78
|
||||
|
||||
def lastError( self, params ):
|
||||
"""Show the reason or log that was produced by the last BitBake event exception"""
|
||||
if last_exception is None:
|
||||
print("SHELL: No Errors yet (Phew)...")
|
||||
print "SHELL: No Errors yet (Phew)..."
|
||||
else:
|
||||
reason, event = last_exception.args
|
||||
print("SHELL: Reason for the last error: '%s'" % reason)
|
||||
print "SHELL: Reason for the last error: '%s'" % reason
|
||||
if ':' in reason:
|
||||
msg, filename = reason.split( ':' )
|
||||
filename = filename.strip()
|
||||
print("SHELL: Dumping log file for last error:")
|
||||
print "SHELL: Dumping log file for last error:"
|
||||
try:
|
||||
print(open( filename ).read())
|
||||
print open( filename ).read()
|
||||
except IOError:
|
||||
print("ERROR: Couldn't open '%s'" % filename)
|
||||
print "ERROR: Couldn't open '%s'" % filename
|
||||
|
||||
def match( self, params ):
|
||||
"""Dump all files or providers matching a glob expression"""
|
||||
what, globexpr = params
|
||||
if what == "files":
|
||||
self._checkParsed()
|
||||
for key in globfilter( cooker.status.pkg_fn, globexpr ): print(key)
|
||||
for key in globfilter( cooker.status.pkg_fn.keys(), globexpr ): print key
|
||||
elif what == "providers":
|
||||
self._checkParsed()
|
||||
for key in globfilter( cooker.status.pkg_pn, globexpr ): print(key)
|
||||
for key in globfilter( cooker.status.pkg_pn.keys(), globexpr ): print key
|
||||
else:
|
||||
print("Usage: match %s" % self.print_.usage)
|
||||
print "Usage: match %s" % self.print_.usage
|
||||
match.usage = "<files|providers> <glob>"
|
||||
|
||||
def new( self, params ):
|
||||
@@ -333,15 +354,15 @@ class BitBakeShellCommands:
|
||||
fulldirname = "%s/%s" % ( packages, dirname )
|
||||
|
||||
if not os.path.exists( fulldirname ):
|
||||
print("SHELL: Creating '%s'" % fulldirname)
|
||||
print "SHELL: Creating '%s'" % fulldirname
|
||||
os.mkdir( fulldirname )
|
||||
if os.path.exists( fulldirname ) and os.path.isdir( fulldirname ):
|
||||
if os.path.exists( "%s/%s" % ( fulldirname, filename ) ):
|
||||
print("SHELL: ERROR: %s/%s already exists" % ( fulldirname, filename ))
|
||||
print "SHELL: ERROR: %s/%s already exists" % ( fulldirname, filename )
|
||||
return False
|
||||
print("SHELL: Creating '%s/%s'" % ( fulldirname, filename ))
|
||||
print "SHELL: Creating '%s/%s'" % ( fulldirname, filename )
|
||||
newpackage = open( "%s/%s" % ( fulldirname, filename ), "w" )
|
||||
print("""DESCRIPTION = ""
|
||||
print >>newpackage,"""DESCRIPTION = ""
|
||||
SECTION = ""
|
||||
AUTHOR = ""
|
||||
HOMEPAGE = ""
|
||||
@@ -368,7 +389,7 @@ SRC_URI = ""
|
||||
#do_install() {
|
||||
#
|
||||
#}
|
||||
""", file=newpackage)
|
||||
"""
|
||||
newpackage.close()
|
||||
os.system( "%s %s/%s" % ( os.environ.get( "EDITOR" ), fulldirname, filename ) )
|
||||
new.usage = "<directory> <filename>"
|
||||
@@ -388,14 +409,14 @@ SRC_URI = ""
|
||||
def pasteLog( self, params ):
|
||||
"""Send the last event exception error log (if there is one) to http://rafb.net/paste"""
|
||||
if last_exception is None:
|
||||
print("SHELL: No Errors yet (Phew)...")
|
||||
print "SHELL: No Errors yet (Phew)..."
|
||||
else:
|
||||
reason, event = last_exception.args
|
||||
print("SHELL: Reason for the last error: '%s'" % reason)
|
||||
print "SHELL: Reason for the last error: '%s'" % reason
|
||||
if ':' in reason:
|
||||
msg, filename = reason.split( ':' )
|
||||
filename = filename.strip()
|
||||
print("SHELL: Pasting log file to pastebin...")
|
||||
print "SHELL: Pasting log file to pastebin..."
|
||||
|
||||
file = open( filename ).read()
|
||||
sendToPastebin( "contents of " + filename, file )
|
||||
@@ -417,23 +438,23 @@ SRC_URI = ""
|
||||
cooker.buildDepgraph()
|
||||
global parsed
|
||||
parsed = True
|
||||
print()
|
||||
print
|
||||
|
||||
def reparse( self, params ):
|
||||
"""(re)Parse a providee's bb file"""
|
||||
bbfile = self._findProvider( params[0] )
|
||||
if bbfile is not None:
|
||||
print("SHELL: Found bbfile '%s' for '%s'" % ( bbfile, params[0] ))
|
||||
print "SHELL: Found bbfile '%s' for '%s'" % ( bbfile, params[0] )
|
||||
self.fileReparse( [ bbfile ] )
|
||||
else:
|
||||
print("ERROR: Nothing provides '%s'" % params[0])
|
||||
print "ERROR: Nothing provides '%s'" % params[0]
|
||||
reparse.usage = "<providee>"
|
||||
|
||||
def getvar( self, params ):
|
||||
"""Dump the contents of an outer BitBake environment variable"""
|
||||
var = params[0]
|
||||
value = data.getVar( var, cooker.configuration.data, 1 )
|
||||
print(value)
|
||||
print value
|
||||
getvar.usage = "<variable>"
|
||||
|
||||
def peek( self, params ):
|
||||
@@ -441,11 +462,11 @@ SRC_URI = ""
|
||||
name, var = params
|
||||
bbfile = self._findProvider( name )
|
||||
if bbfile is not None:
|
||||
the_data = cache.Cache.loadDataFull(bbfile, cooker.configuration.data)
|
||||
the_data = cooker.bb_cache.loadDataFull(bbfile, cooker.configuration.data)
|
||||
value = the_data.getVar( var, 1 )
|
||||
print(value)
|
||||
print value
|
||||
else:
|
||||
print("ERROR: Nothing provides '%s'" % name)
|
||||
print "ERROR: Nothing provides '%s'" % name
|
||||
peek.usage = "<providee> <variable>"
|
||||
|
||||
def poke( self, params ):
|
||||
@@ -453,7 +474,7 @@ SRC_URI = ""
|
||||
name, var, value = params
|
||||
bbfile = self._findProvider( name )
|
||||
if bbfile is not None:
|
||||
print("ERROR: Sorry, this functionality is currently broken")
|
||||
print "ERROR: Sorry, this functionality is currently broken"
|
||||
#d = cooker.pkgdata[bbfile]
|
||||
#data.setVar( var, value, d )
|
||||
|
||||
@@ -461,7 +482,7 @@ SRC_URI = ""
|
||||
#cooker.pkgdata.setDirty(bbfile, d)
|
||||
#print "OK"
|
||||
else:
|
||||
print("ERROR: Nothing provides '%s'" % name)
|
||||
print "ERROR: Nothing provides '%s'" % name
|
||||
poke.usage = "<providee> <variable> <value>"
|
||||
|
||||
def print_( self, params ):
|
||||
@@ -469,12 +490,12 @@ SRC_URI = ""
|
||||
what = params[0]
|
||||
if what == "files":
|
||||
self._checkParsed()
|
||||
for key in cooker.status.pkg_fn: print(key)
|
||||
for key in cooker.status.pkg_fn.keys(): print key
|
||||
elif what == "providers":
|
||||
self._checkParsed()
|
||||
for key in cooker.status.providers: print(key)
|
||||
for key in cooker.status.providers.keys(): print key
|
||||
else:
|
||||
print("Usage: print %s" % self.print_.usage)
|
||||
print "Usage: print %s" % self.print_.usage
|
||||
print_.usage = "<files|providers>"
|
||||
|
||||
def python( self, params ):
|
||||
@@ -486,7 +507,7 @@ SRC_URI = ""
|
||||
interpreter.interact( "SHELL: Expert Mode - BitBake Python %s\nType 'help' for more information, press CTRL-D to switch back to BBSHELL." % sys.version )
|
||||
|
||||
def showdata( self, params ):
|
||||
"""Execute 'showdata' on a providee"""
|
||||
"""Show the parsed metadata for a given providee"""
|
||||
cooker.showEnvironment(None, params)
|
||||
showdata.usage = "<providee>"
|
||||
|
||||
@@ -494,7 +515,7 @@ SRC_URI = ""
|
||||
"""Set an outer BitBake environment variable"""
|
||||
var, value = params
|
||||
data.setVar( var, value, cooker.configuration.data )
|
||||
print("OK")
|
||||
print "OK"
|
||||
setVar.usage = "<variable> <value>"
|
||||
|
||||
def rebuild( self, params ):
|
||||
@@ -506,27 +527,27 @@ SRC_URI = ""
|
||||
def shell( self, params ):
|
||||
"""Execute a shell command and dump the output"""
|
||||
if params != "":
|
||||
print(commands.getoutput( " ".join( params ) ))
|
||||
print commands.getoutput( " ".join( params ) )
|
||||
shell.usage = "<...>"
|
||||
|
||||
def stage( self, params ):
|
||||
"""Execute 'stage' on a providee"""
|
||||
self.build( params, "populate_staging" )
|
||||
self.build( params, "stage" )
|
||||
stage.usage = "<providee>"
|
||||
|
||||
def status( self, params ):
|
||||
"""<just for testing>"""
|
||||
print("-" * 78)
|
||||
print("building list = '%s'" % cooker.building_list)
|
||||
print("build path = '%s'" % cooker.build_path)
|
||||
print("consider_msgs_cache = '%s'" % cooker.consider_msgs_cache)
|
||||
print("build stats = '%s'" % cooker.stats)
|
||||
if last_exception is not None: print("last_exception = '%s'" % repr( last_exception.args ))
|
||||
print("memory output contents = '%s'" % self._shell.myout._buffer)
|
||||
print "-" * 78
|
||||
print "building list = '%s'" % cooker.building_list
|
||||
print "build path = '%s'" % cooker.build_path
|
||||
print "consider_msgs_cache = '%s'" % cooker.consider_msgs_cache
|
||||
print "build stats = '%s'" % cooker.stats
|
||||
if last_exception is not None: print "last_exception = '%s'" % repr( last_exception.args )
|
||||
print "memory output contents = '%s'" % self._shell.myout._buffer
|
||||
|
||||
def test( self, params ):
|
||||
"""<just for testing>"""
|
||||
print("testCommand called with '%s'" % params)
|
||||
print "testCommand called with '%s'" % params
|
||||
|
||||
def unpack( self, params ):
|
||||
"""Execute 'unpack' on a providee"""
|
||||
@@ -551,12 +572,12 @@ SRC_URI = ""
|
||||
try:
|
||||
providers = cooker.status.providers[item]
|
||||
except KeyError:
|
||||
print("SHELL: ERROR: Nothing provides", preferred)
|
||||
print "SHELL: ERROR: Nothing provides", preferred
|
||||
else:
|
||||
for provider in providers:
|
||||
if provider == pf: provider = " (***) %s" % provider
|
||||
else: provider = " %s" % provider
|
||||
print(provider)
|
||||
print provider
|
||||
which.usage = "<providee>"
|
||||
|
||||
##########################################################################
|
||||
@@ -565,9 +586,8 @@ SRC_URI = ""
|
||||
|
||||
def completeFilePath( bbfile ):
|
||||
"""Get the complete bbfile path"""
|
||||
if not cooker.status: return bbfile
|
||||
if not cooker.status.pkg_fn: return bbfile
|
||||
for key in cooker.status.pkg_fn:
|
||||
for key in cooker.status.pkg_fn.keys():
|
||||
if key.endswith( bbfile ):
|
||||
return key
|
||||
return bbfile
|
||||
@@ -581,7 +601,7 @@ def sendToPastebin( desc, content ):
|
||||
mydata["nick"] = "%s@%s" % ( os.environ.get( "USER", "unknown" ), socket.gethostname() or "unknown" )
|
||||
mydata["text"] = content
|
||||
params = urllib.urlencode( mydata )
|
||||
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
|
||||
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain"}
|
||||
|
||||
host = "rafb.net"
|
||||
conn = httplib.HTTPConnection( "%s:80" % host )
|
||||
@@ -592,9 +612,9 @@ def sendToPastebin( desc, content ):
|
||||
|
||||
if response.status == 302:
|
||||
location = response.getheader( "location" ) or "unknown"
|
||||
print("SHELL: Pasted to http://%s%s" % ( host, location ))
|
||||
print "SHELL: Pasted to http://%s%s" % ( host, location )
|
||||
else:
|
||||
print("ERROR: %s %s" % ( response.status, response.reason ))
|
||||
print "ERROR: %s %s" % ( response.status, response.reason )
|
||||
|
||||
def completer( text, state ):
|
||||
"""Return a possible readline completion"""
|
||||
@@ -611,7 +631,7 @@ def completer( text, state ):
|
||||
allmatches = cooker.configuration.data.keys()
|
||||
elif u == "<bbfile>":
|
||||
if cooker.status.pkg_fn is None: allmatches = [ "(No Matches Available. Parsed yet?)" ]
|
||||
else: allmatches = [ x.split("/")[-1] for x in cooker.status.pkg_fn ]
|
||||
else: allmatches = [ x.split("/")[-1] for x in cooker.status.pkg_fn.keys() ]
|
||||
elif u == "<providee>":
|
||||
if cooker.status.pkg_fn is None: allmatches = [ "(No Matches Available. Parsed yet?)" ]
|
||||
else: allmatches = cooker.status.providers.iterkeys()
|
||||
@@ -641,7 +661,7 @@ def columnize( alist, width = 80 ):
|
||||
return reduce(lambda line, word, width=width: '%s%s%s' %
|
||||
(line,
|
||||
' \n'[(len(line[line.rfind('\n')+1:])
|
||||
+ len(word.split('\n', 1)[0]
|
||||
+ len(word.split('\n',1)[0]
|
||||
) >= width)],
|
||||
word),
|
||||
alist
|
||||
@@ -716,7 +736,7 @@ class BitBakeShell:
|
||||
except IOError:
|
||||
pass # It doesn't exist yet.
|
||||
|
||||
print(__credits__)
|
||||
print __credits__
|
||||
|
||||
def cleanup( self ):
|
||||
"""Write readline history and clean up resources"""
|
||||
@@ -724,7 +744,7 @@ class BitBakeShell:
|
||||
try:
|
||||
readline.write_history_file( self.historyfilename )
|
||||
except:
|
||||
print("SHELL: Unable to save command history")
|
||||
print "SHELL: Unable to save command history"
|
||||
|
||||
def registerCommand( self, command, function, numparams = 0, usage = "", helptext = "" ):
|
||||
"""Register a command"""
|
||||
@@ -738,11 +758,11 @@ class BitBakeShell:
|
||||
try:
|
||||
function, numparams, usage, helptext = cmds[command]
|
||||
except KeyError:
|
||||
print("SHELL: ERROR: '%s' command is not a valid command." % command)
|
||||
print "SHELL: ERROR: '%s' command is not a valid command." % command
|
||||
self.myout.removeLast()
|
||||
else:
|
||||
if (numparams != -1) and (not len( params ) == numparams):
|
||||
print("Usage: '%s'" % usage)
|
||||
print "Usage: '%s'" % usage
|
||||
return
|
||||
|
||||
result = function( self.commands, params )
|
||||
@@ -757,7 +777,7 @@ class BitBakeShell:
|
||||
if not cmdline:
|
||||
continue
|
||||
if "|" in cmdline:
|
||||
print("ERROR: '|' in startup file is not allowed. Ignoring line")
|
||||
print "ERROR: '|' in startup file is not allowed. Ignoring line"
|
||||
continue
|
||||
self.commandQ.put( cmdline.strip() )
|
||||
|
||||
@@ -799,10 +819,10 @@ class BitBakeShell:
|
||||
sys.stdout.write( pipe.fromchild.read() )
|
||||
#
|
||||
except EOFError:
|
||||
print()
|
||||
print
|
||||
return
|
||||
except KeyboardInterrupt:
|
||||
print()
|
||||
print
|
||||
|
||||
##########################################################################
|
||||
# Start function - called from the BitBake command line utility
|
||||
@@ -817,4 +837,4 @@ def start( aCooker ):
|
||||
bbshell.cleanup()
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("SHELL: Sorry, this program should only be called by BitBake.")
|
||||
print "SHELL: Sorry, this program should only be called by BitBake."
|
||||
|
||||
@@ -1,298 +0,0 @@
|
||||
import hashlib
|
||||
import logging
|
||||
import re
|
||||
import bb.data
|
||||
|
||||
logger = logging.getLogger('BitBake.SigGen')
|
||||
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except ImportError:
|
||||
import pickle
|
||||
logger.info('Importing cPickle failed. Falling back to a very slow implementation.')
|
||||
|
||||
def init(d):
|
||||
siggens = [obj for obj in globals().itervalues()
|
||||
if type(obj) is type and issubclass(obj, SignatureGenerator)]
|
||||
|
||||
desired = bb.data.getVar("BB_SIGNATURE_HANDLER", d, True) or "noop"
|
||||
for sg in siggens:
|
||||
if desired == sg.name:
|
||||
return sg(d)
|
||||
break
|
||||
else:
|
||||
logger.error("Invalid signature generator '%s', using default 'noop'\n"
|
||||
"Available generators: %s",
|
||||
', '.join(obj.name for obj in siggens))
|
||||
return SignatureGenerator(d)
|
||||
|
||||
class SignatureGenerator(object):
|
||||
"""
|
||||
"""
|
||||
name = "noop"
|
||||
|
||||
def __init__(self, data):
|
||||
return
|
||||
|
||||
def finalise(self, fn, d, varient):
|
||||
return
|
||||
|
||||
def get_taskhash(self, fn, task, deps, dataCache):
|
||||
return 0
|
||||
|
||||
def set_taskdata(self, hashes, deps):
|
||||
return
|
||||
|
||||
def stampfile(self, stampbase, file_name, taskname, extrainfo):
|
||||
return ("%s.%s.%s" % (stampbase, taskname, extrainfo)).rstrip('.')
|
||||
|
||||
class SignatureGeneratorBasic(SignatureGenerator):
|
||||
"""
|
||||
"""
|
||||
name = "basic"
|
||||
|
||||
def __init__(self, data):
|
||||
self.basehash = {}
|
||||
self.taskhash = {}
|
||||
self.taskdeps = {}
|
||||
self.runtaskdeps = {}
|
||||
self.gendeps = {}
|
||||
self.lookupcache = {}
|
||||
self.basewhitelist = set((data.getVar("BB_HASHBASE_WHITELIST", True) or "").split())
|
||||
self.taskwhitelist = data.getVar("BB_HASHTASK_WHITELIST", True) or None
|
||||
|
||||
if self.taskwhitelist:
|
||||
self.twl = re.compile(self.taskwhitelist)
|
||||
else:
|
||||
self.twl = None
|
||||
|
||||
def _build_data(self, fn, d):
|
||||
|
||||
tasklist, gendeps = bb.data.generate_dependencies(d)
|
||||
|
||||
taskdeps = {}
|
||||
basehash = {}
|
||||
lookupcache = {}
|
||||
|
||||
for task in tasklist:
|
||||
data = d.getVar(task, False)
|
||||
lookupcache[task] = data
|
||||
|
||||
newdeps = gendeps[task]
|
||||
seen = set()
|
||||
while newdeps:
|
||||
nextdeps = newdeps
|
||||
seen |= nextdeps
|
||||
newdeps = set()
|
||||
for dep in nextdeps:
|
||||
if dep in self.basewhitelist:
|
||||
continue
|
||||
newdeps |= gendeps[dep]
|
||||
newdeps -= seen
|
||||
|
||||
alldeps = seen - self.basewhitelist
|
||||
|
||||
for dep in sorted(alldeps):
|
||||
if dep in lookupcache:
|
||||
var = lookupcache[dep]
|
||||
else:
|
||||
var = d.getVar(dep, False)
|
||||
lookupcache[dep] = var
|
||||
if var:
|
||||
data = data + var
|
||||
if data is None:
|
||||
bb.error("Task %s from %s seems to be empty?!" % (task, fn))
|
||||
self.basehash[fn + "." + task] = hashlib.md5(data).hexdigest()
|
||||
taskdeps[task] = sorted(alldeps)
|
||||
|
||||
self.taskdeps[fn] = taskdeps
|
||||
self.gendeps[fn] = gendeps
|
||||
self.lookupcache[fn] = lookupcache
|
||||
|
||||
return taskdeps
|
||||
|
||||
def finalise(self, fn, d, variant):
|
||||
|
||||
if variant:
|
||||
fn = "virtual:" + variant + ":" + fn
|
||||
|
||||
taskdeps = self._build_data(fn, d)
|
||||
|
||||
#Slow but can be useful for debugging mismatched basehashes
|
||||
#for task in self.taskdeps[fn]:
|
||||
# self.dump_sigtask(fn, task, d.getVar("STAMP", True), False)
|
||||
|
||||
for task in taskdeps:
|
||||
d.setVar("BB_BASEHASH_task-%s" % task, self.basehash[fn + "." + task])
|
||||
|
||||
def get_taskhash(self, fn, task, deps, dataCache):
|
||||
k = fn + "." + task
|
||||
data = dataCache.basetaskhash[k]
|
||||
self.runtaskdeps[k] = []
|
||||
for dep in sorted(deps):
|
||||
# We only manipulate the dependencies for packages not in the whitelist
|
||||
if self.twl and not self.twl.search(dataCache.pkg_fn[fn]):
|
||||
# then process the actual dependencies
|
||||
dep_fn = re.search("(?P<fn>.*)\..*", dep).group('fn')
|
||||
if self.twl.search(dataCache.pkg_fn[dep_fn]):
|
||||
continue
|
||||
if dep not in self.taskhash:
|
||||
bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?", dep)
|
||||
data = data + self.taskhash[dep]
|
||||
self.runtaskdeps[k].append(dep)
|
||||
h = hashlib.md5(data).hexdigest()
|
||||
self.taskhash[k] = h
|
||||
#d.setVar("BB_TASKHASH_task-%s" % task, taskhash[task])
|
||||
return h
|
||||
|
||||
def set_taskdata(self, hashes, deps):
|
||||
self.runtaskdeps = deps
|
||||
self.taskhash = hashes
|
||||
|
||||
def dump_sigtask(self, fn, task, stampbase, runtime):
|
||||
k = fn + "." + task
|
||||
if runtime == "customfile":
|
||||
sigfile = stampbase
|
||||
elif runtime:
|
||||
sigfile = stampbase + "." + task + ".sigdata" + "." + self.taskhash[k]
|
||||
else:
|
||||
sigfile = stampbase + "." + task + ".sigbasedata" + "." + self.basehash[k]
|
||||
|
||||
bb.utils.mkdirhier(os.path.dirname(sigfile))
|
||||
|
||||
data = {}
|
||||
data['basewhitelist'] = self.basewhitelist
|
||||
data['taskwhitelist'] = self.taskwhitelist
|
||||
data['taskdeps'] = self.taskdeps[fn][task]
|
||||
data['basehash'] = self.basehash[k]
|
||||
data['gendeps'] = {}
|
||||
data['varvals'] = {}
|
||||
data['varvals'][task] = self.lookupcache[fn][task]
|
||||
for dep in self.taskdeps[fn][task]:
|
||||
if dep in self.basewhitelist:
|
||||
continue
|
||||
data['gendeps'][dep] = self.gendeps[fn][dep]
|
||||
data['varvals'][dep] = self.lookupcache[fn][dep]
|
||||
|
||||
if runtime:
|
||||
data['runtaskdeps'] = self.runtaskdeps[k]
|
||||
data['runtaskhashes'] = {}
|
||||
for dep in data['runtaskdeps']:
|
||||
data['runtaskhashes'][dep] = self.taskhash[dep]
|
||||
|
||||
p = pickle.Pickler(file(sigfile, "wb"), -1)
|
||||
p.dump(data)
|
||||
|
||||
def dump_sigs(self, dataCache):
|
||||
for fn in self.taskdeps:
|
||||
for task in self.taskdeps[fn]:
|
||||
k = fn + "." + task
|
||||
if k not in self.taskhash:
|
||||
continue
|
||||
if dataCache.basetaskhash[k] != self.basehash[k]:
|
||||
bb.error("Bitbake's cached basehash does not match the one we just generated (%s)!" % k)
|
||||
bb.error("The mismatched hashes were %s and %s" % (dataCache.basetaskhash[k], self.basehash[k]))
|
||||
self.dump_sigtask(fn, task, dataCache.stamp[fn], True)
|
||||
|
||||
class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
|
||||
name = "basichash"
|
||||
|
||||
def stampfile(self, stampbase, fn, taskname, extrainfo):
|
||||
if taskname != "do_setscene" and taskname.endswith("_setscene"):
|
||||
k = fn + "." + taskname[:-9]
|
||||
else:
|
||||
k = fn + "." + taskname
|
||||
h = self.taskhash[k]
|
||||
return ("%s.%s.%s.%s" % (stampbase, taskname, h, extrainfo)).rstrip('.')
|
||||
|
||||
def dump_this_task(outfile, d):
|
||||
import bb.parse
|
||||
fn = d.getVar("BB_FILENAME", True)
|
||||
task = "do_" + d.getVar("BB_CURRENTTASK", True)
|
||||
bb.parse.siggen.dump_sigtask(fn, task, outfile, "customfile")
|
||||
|
||||
def compare_sigfiles(a, b):
|
||||
p1 = pickle.Unpickler(file(a, "rb"))
|
||||
a_data = p1.load()
|
||||
p2 = pickle.Unpickler(file(b, "rb"))
|
||||
b_data = p2.load()
|
||||
|
||||
def dict_diff(a, b):
|
||||
sa = set(a.keys())
|
||||
sb = set(b.keys())
|
||||
common = sa & sb
|
||||
changed = set()
|
||||
for i in common:
|
||||
if a[i] != b[i]:
|
||||
changed.add(i)
|
||||
added = sa - sb
|
||||
removed = sb - sa
|
||||
return changed, added, removed
|
||||
|
||||
if 'basewhitelist' in a_data and a_data['basewhitelist'] != b_data['basewhitelist']:
|
||||
print "basewhitelist changed from %s to %s" % (a_data['basewhitelist'], b_data['basewhitelist'])
|
||||
|
||||
if 'taskwhitelist' in a_data and a_data['taskwhitelist'] != b_data['taskwhitelist']:
|
||||
print "taskwhitelist changed from %s to %s" % (a_data['taskwhitelist'], b_data['taskwhitelist'])
|
||||
|
||||
if a_data['taskdeps'] != b_data['taskdeps']:
|
||||
print "Task dependencies changed from %s to %s" % (sorted(a_data['taskdeps']), sorted(b_data['taskdeps']))
|
||||
|
||||
if a_data['basehash'] != b_data['basehash']:
|
||||
print "basehash changed from %s to %s" % (a_data['basehash'], b_data['basehash'])
|
||||
|
||||
changed, added, removed = dict_diff(a_data['gendeps'], b_data['gendeps'])
|
||||
if changed:
|
||||
for dep in changed:
|
||||
print "List of dependencies for variable %s changed from %s to %s" % (dep, a_data['gendeps'][dep], b_data['gendeps'][dep])
|
||||
if added:
|
||||
for dep in added:
|
||||
print "Dependency on variable %s was added" % (dep)
|
||||
if removed:
|
||||
for dep in removed:
|
||||
print "Dependency on Variable %s was removed" % (dep)
|
||||
|
||||
|
||||
changed, added, removed = dict_diff(a_data['varvals'], b_data['varvals'])
|
||||
if changed:
|
||||
for dep in changed:
|
||||
print "Variable %s value changed from %s to %s" % (dep, a_data['varvals'][dep], b_data['varvals'][dep])
|
||||
|
||||
if 'runtaskhashes' in a_data and 'runtaskhashes' in b_data:
|
||||
changed, added, removed = dict_diff(a_data['runtaskhashes'], b_data['runtaskhashes'])
|
||||
if added:
|
||||
for dep in added:
|
||||
print "Dependency on task %s was added" % (dep)
|
||||
if removed:
|
||||
for dep in removed:
|
||||
print "Dependency on task %s was removed" % (dep)
|
||||
if changed:
|
||||
for dep in changed:
|
||||
print "Hash for dependent task %s changed from %s to %s" % (dep, a_data['runtaskhashes'][dep], b_data['runtaskhashes'][dep])
|
||||
elif 'runtaskdeps' in a_data and 'runtaskdeps' in b_data and sorted(a_data['runtaskdeps']) != sorted(b_data['runtaskdeps']):
|
||||
print "Tasks this task depends on changed from %s to %s" % (sorted(a_data['runtaskdeps']), sorted(b_data['runtaskdeps']))
|
||||
|
||||
def dump_sigfile(a):
|
||||
p1 = pickle.Unpickler(file(a, "rb"))
|
||||
a_data = p1.load()
|
||||
|
||||
print "basewhitelist: %s" % (a_data['basewhitelist'])
|
||||
|
||||
print "taskwhitelist: %s" % (a_data['taskwhitelist'])
|
||||
|
||||
print "Task dependencies: %s" % (sorted(a_data['taskdeps']))
|
||||
|
||||
print "basehash: %s" % (a_data['basehash'])
|
||||
|
||||
for dep in a_data['gendeps']:
|
||||
print "List of dependencies for variable %s is %s" % (dep, a_data['gendeps'][dep])
|
||||
|
||||
for dep in a_data['varvals']:
|
||||
print "Variable %s value is %s" % (dep, a_data['varvals'][dep])
|
||||
|
||||
if 'runtaskdeps' in a_data:
|
||||
print "Tasks this task depends on: %s" % (a_data['runtaskdeps'])
|
||||
|
||||
if 'runtaskhashes' in a_data:
|
||||
for dep in a_data['runtaskhashes']:
|
||||
print "Hash for dependent task %s is %s" % (dep, a_data['runtaskhashes'][dep])
|
||||
@@ -23,25 +23,14 @@ Task data collection and handling
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import logging
|
||||
import re
|
||||
import bb
|
||||
|
||||
logger = logging.getLogger("BitBake.TaskData")
|
||||
|
||||
def re_match_strings(target, strings):
|
||||
"""
|
||||
Whether or not the string 'target' matches
|
||||
any one string of the strings which can be regular expression string
|
||||
"""
|
||||
return any(name == target or re.match(name, target)
|
||||
for name in strings)
|
||||
from bb import data, event, mkdirhier, utils
|
||||
import bb, os
|
||||
|
||||
class TaskData:
|
||||
"""
|
||||
BitBake Task Data implementation
|
||||
"""
|
||||
def __init__(self, abort = True, tryaltconfigs = False):
|
||||
def __init__(self, abort = True):
|
||||
self.build_names_index = []
|
||||
self.run_names_index = []
|
||||
self.fn_index = []
|
||||
@@ -68,7 +57,6 @@ class TaskData:
|
||||
self.failed_fnids = []
|
||||
|
||||
self.abort = abort
|
||||
self.tryaltconfigs = tryaltconfigs
|
||||
|
||||
def getbuild_id(self, name):
|
||||
"""
|
||||
@@ -83,7 +71,7 @@ class TaskData:
|
||||
|
||||
def getrun_id(self, name):
|
||||
"""
|
||||
Return an ID number for the run target name.
|
||||
Return an ID number for the run target name.
|
||||
If it doesn't exist, create one.
|
||||
"""
|
||||
if not name in self.run_names_index:
|
||||
@@ -94,7 +82,7 @@ class TaskData:
|
||||
|
||||
def getfn_id(self, name):
|
||||
"""
|
||||
Return an ID number for the filename.
|
||||
Return an ID number for the filename.
|
||||
If it doesn't exist, create one.
|
||||
"""
|
||||
if not name in self.fn_index:
|
||||
@@ -103,16 +91,6 @@ class TaskData:
|
||||
|
||||
return self.fn_index.index(name)
|
||||
|
||||
def gettask_ids(self, fnid):
|
||||
"""
|
||||
Return an array of the ID numbers matching a given fnid.
|
||||
"""
|
||||
ids = []
|
||||
if fnid in self.tasks_lookup:
|
||||
for task in self.tasks_lookup[fnid]:
|
||||
ids.append(self.tasks_lookup[fnid][task])
|
||||
return ids
|
||||
|
||||
def gettask_id(self, fn, task, create = True):
|
||||
"""
|
||||
Return an ID number for the task matching fn and task.
|
||||
@@ -146,6 +124,7 @@ class TaskData:
|
||||
Add tasks for a given fn to the database
|
||||
"""
|
||||
|
||||
task_graph = dataCache.task_queues[fn]
|
||||
task_deps = dataCache.task_deps[fn]
|
||||
|
||||
fnid = self.getfn_id(fn)
|
||||
@@ -157,11 +136,11 @@ class TaskData:
|
||||
if fnid in self.tasks_fnid:
|
||||
return
|
||||
|
||||
for task in task_deps['tasks']:
|
||||
for task in task_graph.allnodes():
|
||||
|
||||
# Work out task dependencies
|
||||
parentids = []
|
||||
for dep in task_deps['parents'][task]:
|
||||
for dep in task_graph.getparents(task):
|
||||
parentid = self.gettask_id(fn, dep)
|
||||
parentids.append(parentid)
|
||||
taskid = self.gettask_id(fn, task)
|
||||
@@ -172,16 +151,14 @@ class TaskData:
|
||||
ids = []
|
||||
for dep in task_deps['depends'][task].split():
|
||||
if dep:
|
||||
if ":" not in dep:
|
||||
bb.msg.fatal(bb.msg.domain.TaskData, "Error, dependency %s does not contain ':' character\n. Task 'depends' should be specified in the form 'packagename:task'" % (dep, fn))
|
||||
ids.append(((self.getbuild_id(dep.split(":")[0])), dep.split(":")[1]))
|
||||
ids.append(str(self.getbuild_id(dep.split(":")[0])) + ":" + dep.split(":")[1])
|
||||
self.tasks_idepends[taskid].extend(ids)
|
||||
|
||||
# Work out build dependencies
|
||||
if not fnid in self.depids:
|
||||
dependids = {}
|
||||
for depend in dataCache.deps[fn]:
|
||||
logger.debug(2, "Added dependency %s for %s", depend, fn)
|
||||
bb.msg.debug(2, bb.msg.domain.TaskData, "Added dependency %s for %s" % (depend, fn))
|
||||
dependids[self.getbuild_id(depend)] = None
|
||||
self.depids[fnid] = dependids.keys()
|
||||
|
||||
@@ -192,11 +169,11 @@ class TaskData:
|
||||
rrecs = dataCache.runrecs[fn]
|
||||
for package in rdepends:
|
||||
for rdepend in rdepends[package]:
|
||||
logger.debug(2, "Added runtime dependency %s for %s", rdepend, fn)
|
||||
bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime dependency %s for %s" % (rdepend, fn))
|
||||
rdependids[self.getrun_id(rdepend)] = None
|
||||
for package in rrecs:
|
||||
for rdepend in rrecs[package]:
|
||||
logger.debug(2, "Added runtime recommendation %s for %s", rdepend, fn)
|
||||
bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime recommendation %s for %s" % (rdepend, fn))
|
||||
rdependids[self.getrun_id(rdepend)] = None
|
||||
self.rdepids[fnid] = rdependids.keys()
|
||||
|
||||
@@ -270,12 +247,12 @@ class TaskData:
|
||||
|
||||
def get_unresolved_build_targets(self, dataCache):
|
||||
"""
|
||||
Return a list of build targets who's providers
|
||||
Return a list of build targets who's providers
|
||||
are unknown.
|
||||
"""
|
||||
unresolved = []
|
||||
for target in self.build_names_index:
|
||||
if re_match_strings(target, dataCache.ignored_dependencies):
|
||||
if target in dataCache.ignored_dependencies:
|
||||
continue
|
||||
if self.build_names_index.index(target) in self.failed_deps:
|
||||
continue
|
||||
@@ -285,12 +262,12 @@ class TaskData:
|
||||
|
||||
def get_unresolved_run_targets(self, dataCache):
|
||||
"""
|
||||
Return a list of runtime targets who's providers
|
||||
Return a list of runtime targets who's providers
|
||||
are unknown.
|
||||
"""
|
||||
unresolved = []
|
||||
for target in self.run_names_index:
|
||||
if re_match_strings(target, dataCache.ignored_dependencies):
|
||||
if target in dataCache.ignored_dependencies:
|
||||
continue
|
||||
if self.run_names_index.index(target) in self.failed_rdeps:
|
||||
continue
|
||||
@@ -303,7 +280,7 @@ class TaskData:
|
||||
Return a list of providers of item
|
||||
"""
|
||||
targetid = self.getbuild_id(item)
|
||||
|
||||
|
||||
return self.build_targets[targetid]
|
||||
|
||||
def get_dependees(self, itemid):
|
||||
@@ -353,23 +330,26 @@ class TaskData:
|
||||
self.add_provider_internal(cfgData, dataCache, item)
|
||||
except bb.providers.NoProvider:
|
||||
if self.abort:
|
||||
bb.msg.error(bb.msg.domain.Provider, "Nothing PROVIDES '%s' (but '%s' DEPENDS on or otherwise requires it)" % (item, self.get_dependees_str(item)))
|
||||
raise
|
||||
self.remove_buildtarget(self.getbuild_id(item))
|
||||
targetid = self.getbuild_id(item)
|
||||
self.remove_buildtarget(targetid)
|
||||
|
||||
self.mark_external_target(item)
|
||||
|
||||
def add_provider_internal(self, cfgData, dataCache, item):
|
||||
"""
|
||||
Add the providers of item to the task data
|
||||
Mark entries were specifically added externally as against dependencies
|
||||
Mark entries were specifically added externally as against dependencies
|
||||
added internally during dependency resolution
|
||||
"""
|
||||
|
||||
if re_match_strings(item, dataCache.ignored_dependencies):
|
||||
if item in dataCache.ignored_dependencies:
|
||||
return
|
||||
|
||||
if not item in dataCache.providers:
|
||||
bb.event.fire(bb.event.NoProvider(item, dependees=self.get_rdependees_str(item)), cfgData)
|
||||
bb.msg.note(2, bb.msg.domain.Provider, "Nothing PROVIDES '%s' (but '%s' DEPENDS on or otherwise requires it)" % (item, self.get_dependees_str(item)))
|
||||
bb.event.fire(bb.event.NoProvider(item, cfgData))
|
||||
raise bb.providers.NoProvider(item)
|
||||
|
||||
if self.have_build_target(item):
|
||||
@@ -378,10 +358,15 @@ class TaskData:
|
||||
all_p = dataCache.providers[item]
|
||||
|
||||
eligible, foundUnique = bb.providers.filterProviders(all_p, item, cfgData, dataCache)
|
||||
eligible = [p for p in eligible if not self.getfn_id(p) in self.failed_fnids]
|
||||
|
||||
for p in eligible:
|
||||
fnid = self.getfn_id(p)
|
||||
if fnid in self.failed_fnids:
|
||||
eligible.remove(p)
|
||||
|
||||
if not eligible:
|
||||
bb.event.fire(bb.event.NoProvider(item, dependees=self.get_dependees_str(item)), cfgData)
|
||||
bb.msg.note(2, bb.msg.domain.Provider, "No buildable provider PROVIDES '%s' but '%s' DEPENDS on or otherwise requires it. Enable debugging and see earlier logs to find unbuildable providers." % (item, self.get_dependees_str(item)))
|
||||
bb.event.fire(bb.event.NoProvider(item, cfgData))
|
||||
raise bb.providers.NoProvider(item)
|
||||
|
||||
if len(eligible) > 1 and foundUnique == False:
|
||||
@@ -389,14 +374,16 @@ class TaskData:
|
||||
providers_list = []
|
||||
for fn in eligible:
|
||||
providers_list.append(dataCache.pkg_fn[fn])
|
||||
bb.event.fire(bb.event.MultipleProviders(item, providers_list), cfgData)
|
||||
bb.msg.note(1, bb.msg.domain.Provider, "multiple providers are available for %s (%s);" % (item, ", ".join(providers_list)))
|
||||
bb.msg.note(1, bb.msg.domain.Provider, "consider defining PREFERRED_PROVIDER_%s" % item)
|
||||
bb.event.fire(bb.event.MultipleProviders(item, providers_list, cfgData))
|
||||
self.consider_msgs_cache.append(item)
|
||||
|
||||
for fn in eligible:
|
||||
fnid = self.getfn_id(fn)
|
||||
if fnid in self.failed_fnids:
|
||||
continue
|
||||
logger.debug(2, "adding %s to satisfy %s", fn, item)
|
||||
bb.msg.debug(2, bb.msg.domain.Provider, "adding %s to satisfy %s" % (fn, item))
|
||||
self.add_build_target(fn, item)
|
||||
self.add_tasks(fn, dataCache)
|
||||
|
||||
@@ -409,7 +396,7 @@ class TaskData:
|
||||
(takes item names from RDEPENDS/PACKAGES namespace)
|
||||
"""
|
||||
|
||||
if re_match_strings(item, dataCache.ignored_dependencies):
|
||||
if item in dataCache.ignored_dependencies:
|
||||
return
|
||||
|
||||
if self.have_runtime_target(item):
|
||||
@@ -418,14 +405,20 @@ class TaskData:
|
||||
all_p = bb.providers.getRuntimeProviders(dataCache, item)
|
||||
|
||||
if not all_p:
|
||||
bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees_str(item)), cfgData)
|
||||
bb.msg.error(bb.msg.domain.Provider, "'%s' RDEPENDS/RRECOMMENDS or otherwise requires the runtime entity '%s' but it wasn't found in any PACKAGE or RPROVIDES variables" % (self.get_rdependees_str(item), item))
|
||||
bb.event.fire(bb.event.NoProvider(item, cfgData, runtime=True))
|
||||
raise bb.providers.NoRProvider(item)
|
||||
|
||||
eligible, numberPreferred = bb.providers.filterProvidersRunTime(all_p, item, cfgData, dataCache)
|
||||
eligible = [p for p in eligible if not self.getfn_id(p) in self.failed_fnids]
|
||||
|
||||
for p in eligible:
|
||||
fnid = self.getfn_id(p)
|
||||
if fnid in self.failed_fnids:
|
||||
eligible.remove(p)
|
||||
|
||||
if not eligible:
|
||||
bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees_str(item)), cfgData)
|
||||
bb.msg.error(bb.msg.domain.Provider, "'%s' RDEPENDS/RRECOMMENDS or otherwise requires the runtime entity '%s' but it wasn't found in any PACKAGE or RPROVIDES variables of any buildable targets.\nEnable debugging and see earlier logs to find unbuildable targets." % (self.get_rdependees_str(item), item))
|
||||
bb.event.fire(bb.event.NoProvider(item, cfgData, runtime=True))
|
||||
raise bb.providers.NoRProvider(item)
|
||||
|
||||
if len(eligible) > 1 and numberPreferred == 0:
|
||||
@@ -433,7 +426,9 @@ class TaskData:
|
||||
providers_list = []
|
||||
for fn in eligible:
|
||||
providers_list.append(dataCache.pkg_fn[fn])
|
||||
bb.event.fire(bb.event.MultipleProviders(item, providers_list, runtime=True), cfgData)
|
||||
bb.msg.note(2, bb.msg.domain.Provider, "multiple providers are available for runtime %s (%s);" % (item, ", ".join(providers_list)))
|
||||
bb.msg.note(2, bb.msg.domain.Provider, "consider defining a PREFERRED_PROVIDER entry to match runtime %s" % item)
|
||||
bb.event.fire(bb.event.MultipleProviders(item,providers_list, cfgData, runtime=True))
|
||||
self.consider_msgs_cache.append(item)
|
||||
|
||||
if numberPreferred > 1:
|
||||
@@ -441,7 +436,9 @@ class TaskData:
|
||||
providers_list = []
|
||||
for fn in eligible:
|
||||
providers_list.append(dataCache.pkg_fn[fn])
|
||||
bb.event.fire(bb.event.MultipleProviders(item, providers_list, runtime=True), cfgData)
|
||||
bb.msg.note(2, bb.msg.domain.Provider, "multiple providers are available for runtime %s (top %s entries preferred) (%s);" % (item, numberPreferred, ", ".join(providers_list)))
|
||||
bb.msg.note(2, bb.msg.domain.Provider, "consider defining only one PREFERRED_PROVIDER entry to match runtime %s" % item)
|
||||
bb.event.fire(bb.event.MultipleProviders(item,providers_list, cfgData, runtime=True))
|
||||
self.consider_msgs_cache.append(item)
|
||||
|
||||
# run through the list until we find one that we can build
|
||||
@@ -449,7 +446,7 @@ class TaskData:
|
||||
fnid = self.getfn_id(fn)
|
||||
if fnid in self.failed_fnids:
|
||||
continue
|
||||
logger.debug(2, "adding '%s' to satisfy runtime '%s'", fn, item)
|
||||
bb.msg.debug(2, bb.msg.domain.Provider, "adding '%s' to satisfy runtime '%s'" % (fn, item))
|
||||
self.add_runtime_target(fn, item)
|
||||
self.add_tasks(fn, dataCache)
|
||||
|
||||
@@ -462,7 +459,9 @@ class TaskData:
|
||||
"""
|
||||
if fnid in self.failed_fnids:
|
||||
return
|
||||
logger.debug(1, "File '%s' is unbuildable, removing...", self.fn_index[fnid])
|
||||
if not missing_list:
|
||||
missing_list = [fnid]
|
||||
bb.msg.debug(1, bb.msg.domain.Provider, "File '%s' is unbuildable, removing..." % self.fn_index[fnid])
|
||||
self.failed_fnids.append(fnid)
|
||||
for target in self.build_targets:
|
||||
if fnid in self.build_targets[target]:
|
||||
@@ -484,21 +483,14 @@ class TaskData:
|
||||
missing_list = [self.build_names_index[targetid]]
|
||||
else:
|
||||
missing_list = [self.build_names_index[targetid]] + missing_list
|
||||
logger.verbose("Target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s", self.build_names_index[targetid], missing_list)
|
||||
bb.msg.note(2, bb.msg.domain.Provider, "Target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s" % (self.build_names_index[targetid], missing_list))
|
||||
self.failed_deps.append(targetid)
|
||||
dependees = self.get_dependees(targetid)
|
||||
for fnid in dependees:
|
||||
self.fail_fnid(fnid, missing_list)
|
||||
for taskid in xrange(len(self.tasks_idepends)):
|
||||
idepends = self.tasks_idepends[taskid]
|
||||
for (idependid, idependtask) in idepends:
|
||||
if idependid == targetid:
|
||||
self.fail_fnid(self.tasks_fnid[taskid], missing_list)
|
||||
|
||||
if self.abort and targetid in self.external_targets:
|
||||
target = self.build_names_index[targetid]
|
||||
logger.error("Required build target '%s' has no buildable providers.\nMissing or unbuildable dependency chain was: %s", target, missing_list)
|
||||
raise bb.providers.NoProvider(target)
|
||||
bb.msg.error(bb.msg.domain.Provider, "Required build target '%s' has no buildable providers.\nMissing or unbuildable dependency chain was: %s" % (self.build_names_index[targetid], missing_list))
|
||||
raise bb.providers.NoProvider
|
||||
|
||||
def remove_runtarget(self, targetid, missing_list = []):
|
||||
"""
|
||||
@@ -510,7 +502,7 @@ class TaskData:
|
||||
else:
|
||||
missing_list = [self.run_names_index[targetid]] + missing_list
|
||||
|
||||
logger.info("Runtime target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s", self.run_names_index[targetid], missing_list)
|
||||
bb.msg.note(1, bb.msg.domain.Provider, "Runtime target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s" % (self.run_names_index[targetid], missing_list))
|
||||
self.failed_rdeps.append(targetid)
|
||||
dependees = self.get_rdependees(targetid)
|
||||
for fnid in dependees:
|
||||
@@ -520,8 +512,8 @@ class TaskData:
|
||||
"""
|
||||
Resolve all unresolved build and runtime targets
|
||||
"""
|
||||
logger.info("Resolving any missing task queue dependencies")
|
||||
while True:
|
||||
bb.msg.note(1, bb.msg.domain.TaskData, "Resolving any missing task queue dependencies")
|
||||
while 1:
|
||||
added = 0
|
||||
for target in self.get_unresolved_build_targets(dataCache):
|
||||
try:
|
||||
@@ -530,6 +522,7 @@ class TaskData:
|
||||
except bb.providers.NoProvider:
|
||||
targetid = self.getbuild_id(target)
|
||||
if self.abort and targetid in self.external_targets:
|
||||
bb.msg.error(bb.msg.domain.Provider, "Nothing PROVIDES '%s' (but '%s' DEPENDS on or otherwise requires it)" % (target, self.get_dependees_str(target)))
|
||||
raise
|
||||
self.remove_buildtarget(targetid)
|
||||
for target in self.get_unresolved_run_targets(dataCache):
|
||||
@@ -538,7 +531,7 @@ class TaskData:
|
||||
added = added + 1
|
||||
except bb.providers.NoRProvider:
|
||||
self.remove_runtarget(self.getrun_id(target))
|
||||
logger.debug(1, "Resolved " + str(added) + " extra dependencies")
|
||||
bb.msg.debug(1, bb.msg.domain.TaskData, "Resolved " + str(added) + " extra dependecies")
|
||||
if added == 0:
|
||||
break
|
||||
# self.dump_data()
|
||||
@@ -547,40 +540,42 @@ class TaskData:
|
||||
"""
|
||||
Dump some debug information on the internal data structures
|
||||
"""
|
||||
logger.debug(3, "build_names:")
|
||||
logger.debug(3, ", ".join(self.build_names_index))
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, "build_names:")
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, ", ".join(self.build_names_index))
|
||||
|
||||
logger.debug(3, "run_names:")
|
||||
logger.debug(3, ", ".join(self.run_names_index))
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, "run_names:")
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, ", ".join(self.run_names_index))
|
||||
|
||||
logger.debug(3, "build_targets:")
|
||||
for buildid in xrange(len(self.build_names_index)):
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, "build_targets:")
|
||||
for buildid in range(len(self.build_names_index)):
|
||||
target = self.build_names_index[buildid]
|
||||
targets = "None"
|
||||
if buildid in self.build_targets:
|
||||
targets = self.build_targets[buildid]
|
||||
logger.debug(3, " (%s)%s: %s", buildid, target, targets)
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s: %s" % (buildid, target, targets))
|
||||
|
||||
logger.debug(3, "run_targets:")
|
||||
for runid in xrange(len(self.run_names_index)):
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, "run_targets:")
|
||||
for runid in range(len(self.run_names_index)):
|
||||
target = self.run_names_index[runid]
|
||||
targets = "None"
|
||||
if runid in self.run_targets:
|
||||
targets = self.run_targets[runid]
|
||||
logger.debug(3, " (%s)%s: %s", runid, target, targets)
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s: %s" % (runid, target, targets))
|
||||
|
||||
logger.debug(3, "tasks:")
|
||||
for task in xrange(len(self.tasks_name)):
|
||||
logger.debug(3, " (%s)%s - %s: %s",
|
||||
task,
|
||||
self.fn_index[self.tasks_fnid[task]],
|
||||
self.tasks_name[task],
|
||||
self.tasks_tdepends[task])
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, "tasks:")
|
||||
for task in range(len(self.tasks_name)):
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s - %s: %s" % (
|
||||
task,
|
||||
self.fn_index[self.tasks_fnid[task]],
|
||||
self.tasks_name[task],
|
||||
self.tasks_tdepends[task]))
|
||||
|
||||
logger.debug(3, "dependency ids (per fn):")
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, "dependency ids (per fn):")
|
||||
for fnid in self.depids:
|
||||
logger.debug(3, " %s %s: %s", fnid, self.fn_index[fnid], self.depids[fnid])
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, " %s %s: %s" % (fnid, self.fn_index[fnid], self.depids[fnid]))
|
||||
|
||||
logger.debug(3, "runtime dependency ids (per fn):")
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, "runtime dependency ids (per fn):")
|
||||
for fnid in self.rdepids:
|
||||
logger.debug(3, " %s %s: %s", fnid, self.fn_index[fnid], self.rdepids[fnid])
|
||||
bb.msg.debug(3, bb.msg.domain.TaskData, " %s %s: %s" % (fnid, self.fn_index[fnid], self.rdepids[fnid]))
|
||||
|
||||
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
#
|
||||
# BitBake UI Implementation
|
||||
#
|
||||
# Copyright (C) 2006-2007 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
@@ -1,17 +0,0 @@
|
||||
#
|
||||
# Gtk+ UI pieces for BitBake
|
||||
#
|
||||
# Copyright (C) 2006-2007 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
@@ -1,455 +0,0 @@
|
||||
#
|
||||
# BitBake Graphical GTK User Interface
|
||||
#
|
||||
# Copyright (C) 2008 Intel Corporation
|
||||
#
|
||||
# Authored by Rob Bradford <rob@linux.intel.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import gtk
|
||||
import gobject
|
||||
import threading
|
||||
import os
|
||||
import datetime
|
||||
import time
|
||||
|
||||
class BuildConfiguration:
|
||||
""" Represents a potential *or* historic *or* concrete build. It
|
||||
encompasses all the things that we need to tell bitbake to do to make it
|
||||
build what we want it to build.
|
||||
|
||||
It also stored the metadata URL and the set of possible machines (and the
|
||||
distros / images / uris for these. Apart from the metdata URL these are
|
||||
not serialised to file (since they may be transient). In some ways this
|
||||
functionality might be shifted to the loader class."""
|
||||
|
||||
def __init__ (self):
|
||||
self.metadata_url = None
|
||||
|
||||
# Tuple of (distros, image, urls)
|
||||
self.machine_options = {}
|
||||
|
||||
self.machine = None
|
||||
self.distro = None
|
||||
self.image = None
|
||||
self.urls = []
|
||||
self.extra_urls = []
|
||||
self.extra_pkgs = []
|
||||
|
||||
def get_machines_model (self):
|
||||
model = gtk.ListStore (gobject.TYPE_STRING)
|
||||
for machine in self.machine_options.keys():
|
||||
model.append ([machine])
|
||||
|
||||
return model
|
||||
|
||||
def get_distro_and_images_models (self, machine):
|
||||
distro_model = gtk.ListStore (gobject.TYPE_STRING)
|
||||
|
||||
for distro in self.machine_options[machine][0]:
|
||||
distro_model.append ([distro])
|
||||
|
||||
image_model = gtk.ListStore (gobject.TYPE_STRING)
|
||||
|
||||
for image in self.machine_options[machine][1]:
|
||||
image_model.append ([image])
|
||||
|
||||
return (distro_model, image_model)
|
||||
|
||||
def get_repos (self):
|
||||
self.urls = self.machine_options[self.machine][2]
|
||||
return self.urls
|
||||
|
||||
# It might be a lot lot better if we stored these in like, bitbake conf
|
||||
# file format.
|
||||
@staticmethod
|
||||
def load_from_file (filename):
|
||||
|
||||
conf = BuildConfiguration()
|
||||
with open(filename, "r") as f:
|
||||
for line in f:
|
||||
data = line.split (";")[1]
|
||||
if (line.startswith ("metadata-url;")):
|
||||
conf.metadata_url = data.strip()
|
||||
continue
|
||||
if (line.startswith ("url;")):
|
||||
conf.urls += [data.strip()]
|
||||
continue
|
||||
if (line.startswith ("extra-url;")):
|
||||
conf.extra_urls += [data.strip()]
|
||||
continue
|
||||
if (line.startswith ("machine;")):
|
||||
conf.machine = data.strip()
|
||||
continue
|
||||
if (line.startswith ("distribution;")):
|
||||
conf.distro = data.strip()
|
||||
continue
|
||||
if (line.startswith ("image;")):
|
||||
conf.image = data.strip()
|
||||
continue
|
||||
|
||||
return conf
|
||||
|
||||
# Serialise to a file. This is part of the build process and we use this
|
||||
# to be able to repeat a given build (using the same set of parameters)
|
||||
# but also so that we can include the details of the image / machine /
|
||||
# distro in the build manager tree view.
|
||||
def write_to_file (self, filename):
|
||||
f = open (filename, "w")
|
||||
|
||||
lines = []
|
||||
|
||||
if (self.metadata_url):
|
||||
lines += ["metadata-url;%s\n" % (self.metadata_url)]
|
||||
|
||||
for url in self.urls:
|
||||
lines += ["url;%s\n" % (url)]
|
||||
|
||||
for url in self.extra_urls:
|
||||
lines += ["extra-url;%s\n" % (url)]
|
||||
|
||||
if (self.machine):
|
||||
lines += ["machine;%s\n" % (self.machine)]
|
||||
|
||||
if (self.distro):
|
||||
lines += ["distribution;%s\n" % (self.distro)]
|
||||
|
||||
if (self.image):
|
||||
lines += ["image;%s\n" % (self.image)]
|
||||
|
||||
f.writelines (lines)
|
||||
f.close ()
|
||||
|
||||
class BuildResult(gobject.GObject):
|
||||
""" Represents an historic build. Perhaps not successful. But it includes
|
||||
things such as the files that are in the directory (the output from the
|
||||
build) as well as a deserialised BuildConfiguration file that is stored in
|
||||
".conf" in the directory for the build.
|
||||
|
||||
This is GObject so that it can be included in the TreeStore."""
|
||||
|
||||
(STATE_COMPLETE, STATE_FAILED, STATE_ONGOING) = \
|
||||
(0, 1, 2)
|
||||
|
||||
def __init__ (self, parent, identifier):
|
||||
gobject.GObject.__init__ (self)
|
||||
self.date = None
|
||||
|
||||
self.files = []
|
||||
self.status = None
|
||||
self.identifier = identifier
|
||||
self.path = os.path.join (parent, identifier)
|
||||
|
||||
# Extract the date, since the directory name is of the
|
||||
# format build-<year><month><day>-<ordinal> we can easily
|
||||
# pull it out.
|
||||
# TODO: Better to stat a file?
|
||||
(_, date, revision) = identifier.split ("-")
|
||||
print(date)
|
||||
|
||||
year = int (date[0:4])
|
||||
month = int (date[4:6])
|
||||
day = int (date[6:8])
|
||||
|
||||
self.date = datetime.date (year, month, day)
|
||||
|
||||
self.conf = None
|
||||
|
||||
# By default builds are STATE_FAILED unless we find a "complete" file
|
||||
# in which case they are STATE_COMPLETE
|
||||
self.state = BuildResult.STATE_FAILED
|
||||
for file in os.listdir (self.path):
|
||||
if (file.startswith (".conf")):
|
||||
conffile = os.path.join (self.path, file)
|
||||
self.conf = BuildConfiguration.load_from_file (conffile)
|
||||
elif (file.startswith ("complete")):
|
||||
self.state = BuildResult.STATE_COMPLETE
|
||||
else:
|
||||
self.add_file (file)
|
||||
|
||||
def add_file (self, file):
|
||||
# Just add the file for now. Don't care about the type.
|
||||
self.files += [(file, None)]
|
||||
|
||||
class BuildManagerModel (gtk.TreeStore):
|
||||
""" Model for the BuildManagerTreeView. This derives from gtk.TreeStore
|
||||
but it abstracts nicely what the columns mean and the setup of the columns
|
||||
in the model. """
|
||||
|
||||
(COL_IDENT, COL_DESC, COL_MACHINE, COL_DISTRO, COL_BUILD_RESULT, COL_DATE, COL_STATE) = \
|
||||
(0, 1, 2, 3, 4, 5, 6)
|
||||
|
||||
def __init__ (self):
|
||||
gtk.TreeStore.__init__ (self,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_OBJECT,
|
||||
gobject.TYPE_INT64,
|
||||
gobject.TYPE_INT)
|
||||
|
||||
class BuildManager (gobject.GObject):
|
||||
""" This class manages the historic builds that have been found in the
|
||||
"results" directory but is also used for starting a new build."""
|
||||
|
||||
__gsignals__ = {
|
||||
'population-finished' : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
()),
|
||||
'populate-error' : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
())
|
||||
}
|
||||
|
||||
def update_build_result (self, result, iter):
|
||||
# Convert the date into something we can sort by.
|
||||
date = long (time.mktime (result.date.timetuple()))
|
||||
|
||||
# Add a top level entry for the build
|
||||
|
||||
self.model.set (iter,
|
||||
BuildManagerModel.COL_IDENT, result.identifier,
|
||||
BuildManagerModel.COL_DESC, result.conf.image,
|
||||
BuildManagerModel.COL_MACHINE, result.conf.machine,
|
||||
BuildManagerModel.COL_DISTRO, result.conf.distro,
|
||||
BuildManagerModel.COL_BUILD_RESULT, result,
|
||||
BuildManagerModel.COL_DATE, date,
|
||||
BuildManagerModel.COL_STATE, result.state)
|
||||
|
||||
# And then we use the files in the directory as the children for the
|
||||
# top level iter.
|
||||
for file in result.files:
|
||||
self.model.append (iter, (None, file[0], None, None, None, date, -1))
|
||||
|
||||
# This function is called as an idle by the BuildManagerPopulaterThread
|
||||
def add_build_result (self, result):
|
||||
gtk.gdk.threads_enter()
|
||||
self.known_builds += [result]
|
||||
|
||||
self.update_build_result (result, self.model.append (None))
|
||||
|
||||
gtk.gdk.threads_leave()
|
||||
|
||||
def notify_build_finished (self):
|
||||
# This is a bit of a hack. If we have a running build running then we
|
||||
# will have a row in the model in STATE_ONGOING. Find it and make it
|
||||
# as if it was a proper historic build (well, it is completed now....)
|
||||
|
||||
# We need to use the iters here rather than the Python iterator
|
||||
# interface to the model since we need to pass it into
|
||||
# update_build_result
|
||||
|
||||
iter = self.model.get_iter_first()
|
||||
|
||||
while (iter):
|
||||
(ident, state) = self.model.get(iter,
|
||||
BuildManagerModel.COL_IDENT,
|
||||
BuildManagerModel.COL_STATE)
|
||||
|
||||
if state == BuildResult.STATE_ONGOING:
|
||||
result = BuildResult (self.results_directory, ident)
|
||||
self.update_build_result (result, iter)
|
||||
iter = self.model.iter_next(iter)
|
||||
|
||||
def notify_build_succeeded (self):
|
||||
# Write the "complete" file so that when we create the BuildResult
|
||||
# object we put into the model
|
||||
|
||||
complete_file_path = os.path.join (self.cur_build_directory, "complete")
|
||||
f = file (complete_file_path, "w")
|
||||
f.close()
|
||||
self.notify_build_finished()
|
||||
|
||||
def notify_build_failed (self):
|
||||
# Without a "complete" file then this will mark the build as failed:
|
||||
self.notify_build_finished()
|
||||
|
||||
# This function is called as an idle
|
||||
def emit_population_finished_signal (self):
|
||||
gtk.gdk.threads_enter()
|
||||
self.emit ("population-finished")
|
||||
gtk.gdk.threads_leave()
|
||||
|
||||
class BuildManagerPopulaterThread (threading.Thread):
|
||||
def __init__ (self, manager, directory):
|
||||
threading.Thread.__init__ (self)
|
||||
self.manager = manager
|
||||
self.directory = directory
|
||||
|
||||
def run (self):
|
||||
# For each of the "build-<...>" directories ..
|
||||
|
||||
if os.path.exists (self.directory):
|
||||
for directory in os.listdir (self.directory):
|
||||
|
||||
if not directory.startswith ("build-"):
|
||||
continue
|
||||
|
||||
build_result = BuildResult (self.directory, directory)
|
||||
self.manager.add_build_result (build_result)
|
||||
|
||||
gobject.idle_add (BuildManager.emit_population_finished_signal,
|
||||
self.manager)
|
||||
|
||||
def __init__ (self, server, results_directory):
|
||||
gobject.GObject.__init__ (self)
|
||||
|
||||
# The builds that we've found from walking the result directory
|
||||
self.known_builds = []
|
||||
|
||||
# Save out the bitbake server, we need this for issuing commands to
|
||||
# the cooker:
|
||||
self.server = server
|
||||
|
||||
# The TreeStore that we use
|
||||
self.model = BuildManagerModel ()
|
||||
|
||||
# The results directory is where we create (and look for) the
|
||||
# build-<xyz>-<n> directories. We need to populate ourselves from
|
||||
# directory
|
||||
self.results_directory = results_directory
|
||||
self.populate_from_directory (self.results_directory)
|
||||
|
||||
def populate_from_directory (self, directory):
|
||||
thread = BuildManager.BuildManagerPopulaterThread (self, directory)
|
||||
thread.start()
|
||||
|
||||
# Come up with the name for the next build ident by combining "build-"
|
||||
# with the date formatted as yyyymmdd and then an ordinal. We do this by
|
||||
# an optimistic algorithm incrementing the ordinal if we find that it
|
||||
# already exists.
|
||||
def get_next_build_ident (self):
|
||||
today = datetime.date.today ()
|
||||
datestr = str (today.year) + str (today.month) + str (today.day)
|
||||
|
||||
revision = 0
|
||||
test_name = "build-%s-%d" % (datestr, revision)
|
||||
test_path = os.path.join (self.results_directory, test_name)
|
||||
|
||||
while (os.path.exists (test_path)):
|
||||
revision += 1
|
||||
test_name = "build-%s-%d" % (datestr, revision)
|
||||
test_path = os.path.join (self.results_directory, test_name)
|
||||
|
||||
return test_name
|
||||
|
||||
# Take a BuildConfiguration and then try and build it based on the
|
||||
# parameters of that configuration. S
|
||||
def do_build (self, conf):
|
||||
server = self.server
|
||||
|
||||
# Work out the build directory. Note we actually create the
|
||||
# directories here since we need to write the ".conf" file. Otherwise
|
||||
# we could have relied on bitbake's builder thread to actually make
|
||||
# the directories as it proceeds with the build.
|
||||
ident = self.get_next_build_ident ()
|
||||
build_directory = os.path.join (self.results_directory,
|
||||
ident)
|
||||
self.cur_build_directory = build_directory
|
||||
os.makedirs (build_directory)
|
||||
|
||||
conffile = os.path.join (build_directory, ".conf")
|
||||
conf.write_to_file (conffile)
|
||||
|
||||
# Add a row to the model representing this ongoing build. It's kinda a
|
||||
# fake entry. If this build completes or fails then this gets updated
|
||||
# with the real stuff like the historic builds
|
||||
date = long (time.time())
|
||||
self.model.append (None, (ident, conf.image, conf.machine, conf.distro,
|
||||
None, date, BuildResult.STATE_ONGOING))
|
||||
try:
|
||||
server.runCommand(["setVariable", "BUILD_IMAGES_FROM_FEEDS", 1])
|
||||
server.runCommand(["setVariable", "MACHINE", conf.machine])
|
||||
server.runCommand(["setVariable", "DISTRO", conf.distro])
|
||||
server.runCommand(["setVariable", "PACKAGE_CLASSES", "package_ipk"])
|
||||
server.runCommand(["setVariable", "BBFILES", \
|
||||
"""${OEROOT}/meta/packages/*/*.bb ${OEROOT}/meta-moblin/packages/*/*.bb"""])
|
||||
server.runCommand(["setVariable", "TMPDIR", "${OEROOT}/build/tmp"])
|
||||
server.runCommand(["setVariable", "IPK_FEED_URIS", \
|
||||
" ".join(conf.get_repos())])
|
||||
server.runCommand(["setVariable", "DEPLOY_DIR_IMAGE",
|
||||
build_directory])
|
||||
server.runCommand(["buildTargets", [conf.image], "rootfs"])
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
class BuildManagerTreeView (gtk.TreeView):
|
||||
""" The tree view for the build manager. This shows the historic builds
|
||||
and so forth. """
|
||||
|
||||
# We use this function to control what goes in the cell since we store
|
||||
# the date in the model as seconds since the epoch (for sorting) and so we
|
||||
# need to make it human readable.
|
||||
def date_format_custom_cell_data_func (self, col, cell, model, iter):
|
||||
date = model.get (iter, BuildManagerModel.COL_DATE)[0]
|
||||
datestr = time.strftime("%A %d %B %Y", time.localtime(date))
|
||||
cell.set_property ("text", datestr)
|
||||
|
||||
# This format function controls what goes in the cell. We use this to map
|
||||
# the integer state to a string and also to colourise the text
|
||||
def state_format_custom_cell_data_fun (self, col, cell, model, iter):
|
||||
state = model.get (iter, BuildManagerModel.COL_STATE)[0]
|
||||
|
||||
if (state == BuildResult.STATE_ONGOING):
|
||||
cell.set_property ("text", "Active")
|
||||
cell.set_property ("foreground", "#000000")
|
||||
elif (state == BuildResult.STATE_FAILED):
|
||||
cell.set_property ("text", "Failed")
|
||||
cell.set_property ("foreground", "#ff0000")
|
||||
elif (state == BuildResult.STATE_COMPLETE):
|
||||
cell.set_property ("text", "Complete")
|
||||
cell.set_property ("foreground", "#00ff00")
|
||||
else:
|
||||
cell.set_property ("text", "")
|
||||
|
||||
def __init__ (self):
|
||||
gtk.TreeView.__init__(self)
|
||||
|
||||
# Misc descriptiony thing
|
||||
renderer = gtk.CellRendererText ()
|
||||
col = gtk.TreeViewColumn (None, renderer,
|
||||
text=BuildManagerModel.COL_DESC)
|
||||
self.append_column (col)
|
||||
|
||||
# Machine
|
||||
renderer = gtk.CellRendererText ()
|
||||
col = gtk.TreeViewColumn ("Machine", renderer,
|
||||
text=BuildManagerModel.COL_MACHINE)
|
||||
self.append_column (col)
|
||||
|
||||
# distro
|
||||
renderer = gtk.CellRendererText ()
|
||||
col = gtk.TreeViewColumn ("Distribution", renderer,
|
||||
text=BuildManagerModel.COL_DISTRO)
|
||||
self.append_column (col)
|
||||
|
||||
# date (using a custom function for formatting the cell contents it
|
||||
# takes epoch -> human readable string)
|
||||
renderer = gtk.CellRendererText ()
|
||||
col = gtk.TreeViewColumn ("Date", renderer,
|
||||
text=BuildManagerModel.COL_DATE)
|
||||
self.append_column (col)
|
||||
col.set_cell_data_func (renderer,
|
||||
self.date_format_custom_cell_data_func)
|
||||
|
||||
# For status.
|
||||
renderer = gtk.CellRendererText ()
|
||||
col = gtk.TreeViewColumn ("Status", renderer,
|
||||
text = BuildManagerModel.COL_STATE)
|
||||
self.append_column (col)
|
||||
col.set_cell_data_func (renderer,
|
||||
self.state_format_custom_cell_data_fun)
|
||||
@@ -1,137 +0,0 @@
|
||||
#
|
||||
# BitBake Graphical GTK User Interface
|
||||
#
|
||||
# Copyright (C) 2011 Intel Corporation
|
||||
#
|
||||
# Authored by Joshua Lock <josh@linux.intel.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import gobject
|
||||
from bb.ui.crumbs.progress import ProgressBar
|
||||
|
||||
progress_total = 0
|
||||
|
||||
class HobHandler(gobject.GObject):
|
||||
|
||||
"""
|
||||
This object does BitBake event handling for the hob gui.
|
||||
"""
|
||||
__gsignals__ = {
|
||||
"machines-updated" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
(gobject.TYPE_PYOBJECT,)),
|
||||
"distros-updated" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
(gobject.TYPE_PYOBJECT,)),
|
||||
"generating-data" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
()),
|
||||
"data-generated" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
())
|
||||
}
|
||||
|
||||
def __init__(self, taskmodel, server):
|
||||
gobject.GObject.__init__(self)
|
||||
|
||||
self.model = taskmodel
|
||||
self.server = server
|
||||
self.current_command = None
|
||||
self.building = False
|
||||
|
||||
self.command_map = {
|
||||
"findConfigFilesDistro" : ("findConfigFiles", "MACHINE", "findConfigFilesMachine"),
|
||||
"findConfigFilesMachine" : ("generateTargetsTree", "classes/image.bbclass", None),
|
||||
"generateTargetsTree" : (None, None, None),
|
||||
}
|
||||
|
||||
def run_next_command(self):
|
||||
# FIXME: this is ugly and I *will* replace it
|
||||
if self.current_command:
|
||||
next_cmd = self.command_map[self.current_command]
|
||||
command = next_cmd[0]
|
||||
argument = next_cmd[1]
|
||||
self.current_command = next_cmd[2]
|
||||
if command == "generateTargetsTree":
|
||||
self.emit("generating-data")
|
||||
self.server.runCommand([command, argument])
|
||||
|
||||
def handle_event(self, event, running_build, pbar=None):
|
||||
if not event:
|
||||
return
|
||||
|
||||
# If we're running a build, use the RunningBuild event handler
|
||||
if self.building:
|
||||
running_build.handle_event(event)
|
||||
elif isinstance(event, bb.event.TargetsTreeGenerated):
|
||||
self.emit("data-generated")
|
||||
if event._model:
|
||||
self.model.populate(event._model)
|
||||
|
||||
elif isinstance(event, bb.event.ConfigFilesFound):
|
||||
var = event._variable
|
||||
if var == "distro":
|
||||
distros = event._values
|
||||
distros.sort()
|
||||
self.emit("distros-updated", distros)
|
||||
elif var == "machine":
|
||||
machines = event._values
|
||||
machines.sort()
|
||||
self.emit("machines-updated", machines)
|
||||
|
||||
elif isinstance(event, bb.command.CommandCompleted):
|
||||
self.run_next_command()
|
||||
elif isinstance(event, bb.event.CacheLoadStarted) and pbar:
|
||||
pbar.set_title("Loading cache")
|
||||
bb.ui.crumbs.hobeventhandler.progress_total = event.total
|
||||
pbar.update(0, bb.ui.crumbs.hobeventhandler.progress_total)
|
||||
elif isinstance(event, bb.event.CacheLoadProgress) and pbar:
|
||||
pbar.update(event.current, bb.ui.crumbs.hobeventhandler.progress_total)
|
||||
elif isinstance(event, bb.event.CacheLoadCompleted) and pbar:
|
||||
pbar.update(bb.ui.crumbs.hobeventhandler.progress_total, bb.ui.crumbs.hobeventhandler.progress_total)
|
||||
elif isinstance(event, bb.event.ParseStarted) and pbar:
|
||||
pbar.set_title("Processing recipes")
|
||||
bb.ui.crumbs.hobeventhandler.progress_total = event.total
|
||||
pbar.update(0, bb.ui.crumbs.hobeventhandler.progress_total)
|
||||
elif isinstance(event, bb.event.ParseProgress) and pbar:
|
||||
pbar.update(event.current, bb.ui.crumbs.hobeventhandler.progress_total)
|
||||
elif isinstance(event, bb.event.ParseCompleted) and pbar:
|
||||
pbar.hide()
|
||||
|
||||
return
|
||||
|
||||
def event_handle_idle_func (self, eventHandler, running_build, pbar):
|
||||
# Consume as many messages as we can in the time available to us
|
||||
event = eventHandler.getEvent()
|
||||
while event:
|
||||
self.handle_event(event, running_build, pbar)
|
||||
event = eventHandler.getEvent()
|
||||
return True
|
||||
|
||||
def set_machine(self, machine):
|
||||
self.server.runCommand(["setVariable", "MACHINE", machine])
|
||||
self.current_command = "findConfigFilesMachine"
|
||||
self.run_next_command()
|
||||
|
||||
def set_distro(self, distro):
|
||||
self.server.runCommand(["setVariable", "DISTRO", distro])
|
||||
|
||||
def run_build(self, targets):
|
||||
self.building = True
|
||||
self.server.runCommand(["buildTargets", targets, "build"])
|
||||
|
||||
def cancel_build(self):
|
||||
# Note: this may not be the right way to stop an in-progress build
|
||||
self.server.runCommand(["stateStop"])
|
||||
@@ -1,20 +0,0 @@
|
||||
import gtk
|
||||
|
||||
class ProgressBar(gtk.Dialog):
|
||||
def __init__(self, parent):
|
||||
|
||||
gtk.Dialog.__init__(self, flags=(gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT))
|
||||
self.set_title("Parsing metadata, please wait...")
|
||||
self.set_default_size(500, 0)
|
||||
self.set_transient_for(parent)
|
||||
self.progress = gtk.ProgressBar()
|
||||
self.vbox.pack_start(self.progress)
|
||||
self.show_all()
|
||||
|
||||
def update(self, x, y):
|
||||
self.progress.set_fraction(float(x)/float(y))
|
||||
self.progress.set_text("%2d %%" % (x*100/y))
|
||||
|
||||
def pulse(self):
|
||||
self.progress.set_text("Loading...")
|
||||
self.progress.pulse()
|
||||
@@ -1,606 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!DOCTYPE glade-interface SYSTEM "glade-2.0.dtd">
|
||||
<!--Generated with glade3 3.4.5 on Mon Nov 10 12:24:12 2008 -->
|
||||
<glade-interface>
|
||||
<widget class="GtkDialog" id="build_dialog">
|
||||
<property name="title" translatable="yes">Start a build</property>
|
||||
<property name="window_position">GTK_WIN_POS_CENTER_ON_PARENT</property>
|
||||
<property name="type_hint">GDK_WINDOW_TYPE_HINT_DIALOG</property>
|
||||
<property name="has_separator">False</property>
|
||||
<child internal-child="vbox">
|
||||
<widget class="GtkVBox" id="dialog-vbox1">
|
||||
<property name="visible">True</property>
|
||||
<property name="spacing">2</property>
|
||||
<child>
|
||||
<widget class="GtkTable" id="build_table">
|
||||
<property name="visible">True</property>
|
||||
<property name="border_width">6</property>
|
||||
<property name="n_rows">7</property>
|
||||
<property name="n_columns">3</property>
|
||||
<property name="column_spacing">5</property>
|
||||
<property name="row_spacing">6</property>
|
||||
<child>
|
||||
<widget class="GtkAlignment" id="status_alignment">
|
||||
<property name="visible">True</property>
|
||||
<property name="left_padding">12</property>
|
||||
<child>
|
||||
<widget class="GtkHBox" id="status_hbox">
|
||||
<property name="spacing">6</property>
|
||||
<child>
|
||||
<widget class="GtkImage" id="status_image">
|
||||
<property name="visible">True</property>
|
||||
<property name="no_show_all">True</property>
|
||||
<property name="xalign">0</property>
|
||||
<property name="stock">gtk-dialog-error</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="expand">False</property>
|
||||
<property name="fill">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkLabel" id="status_label">
|
||||
<property name="visible">True</property>
|
||||
<property name="xalign">0</property>
|
||||
<property name="label" translatable="yes">If you see this text something is wrong...</property>
|
||||
<property name="use_markup">True</property>
|
||||
<property name="use_underline">True</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="position">1</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="right_attach">3</property>
|
||||
<property name="top_attach">2</property>
|
||||
<property name="bottom_attach">3</property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkLabel" id="label2">
|
||||
<property name="visible">True</property>
|
||||
<property name="xalign">0</property>
|
||||
<property name="label" translatable="yes"><b>Build configuration</b></property>
|
||||
<property name="use_markup">True</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="right_attach">3</property>
|
||||
<property name="top_attach">3</property>
|
||||
<property name="bottom_attach">4</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkComboBox" id="image_combo">
|
||||
<property name="visible">True</property>
|
||||
<property name="sensitive">False</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="left_attach">1</property>
|
||||
<property name="right_attach">2</property>
|
||||
<property name="top_attach">6</property>
|
||||
<property name="bottom_attach">7</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkLabel" id="image_label">
|
||||
<property name="visible">True</property>
|
||||
<property name="sensitive">False</property>
|
||||
<property name="xalign">0</property>
|
||||
<property name="xpad">12</property>
|
||||
<property name="label" translatable="yes">Image:</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="top_attach">6</property>
|
||||
<property name="bottom_attach">7</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkComboBox" id="distribution_combo">
|
||||
<property name="visible">True</property>
|
||||
<property name="sensitive">False</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="left_attach">1</property>
|
||||
<property name="right_attach">2</property>
|
||||
<property name="top_attach">5</property>
|
||||
<property name="bottom_attach">6</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkLabel" id="distribution_label">
|
||||
<property name="visible">True</property>
|
||||
<property name="sensitive">False</property>
|
||||
<property name="xalign">0</property>
|
||||
<property name="xpad">12</property>
|
||||
<property name="label" translatable="yes">Distribution:</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="top_attach">5</property>
|
||||
<property name="bottom_attach">6</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkComboBox" id="machine_combo">
|
||||
<property name="visible">True</property>
|
||||
<property name="sensitive">False</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="left_attach">1</property>
|
||||
<property name="right_attach">2</property>
|
||||
<property name="top_attach">4</property>
|
||||
<property name="bottom_attach">5</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkLabel" id="machine_label">
|
||||
<property name="visible">True</property>
|
||||
<property name="sensitive">False</property>
|
||||
<property name="xalign">0</property>
|
||||
<property name="xpad">12</property>
|
||||
<property name="label" translatable="yes">Machine:</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="top_attach">4</property>
|
||||
<property name="bottom_attach">5</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkButton" id="refresh_button">
|
||||
<property name="visible">True</property>
|
||||
<property name="sensitive">False</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="receives_default">True</property>
|
||||
<property name="label" translatable="yes">gtk-refresh</property>
|
||||
<property name="use_stock">True</property>
|
||||
<property name="response_id">0</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="left_attach">2</property>
|
||||
<property name="right_attach">3</property>
|
||||
<property name="top_attach">1</property>
|
||||
<property name="bottom_attach">2</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkEntry" id="location_entry">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="width_chars">32</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="left_attach">1</property>
|
||||
<property name="right_attach">2</property>
|
||||
<property name="top_attach">1</property>
|
||||
<property name="bottom_attach">2</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkLabel" id="label3">
|
||||
<property name="visible">True</property>
|
||||
<property name="xalign">0</property>
|
||||
<property name="xpad">12</property>
|
||||
<property name="label" translatable="yes">Location:</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="top_attach">1</property>
|
||||
<property name="bottom_attach">2</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkLabel" id="label1">
|
||||
<property name="visible">True</property>
|
||||
<property name="xalign">0</property>
|
||||
<property name="label" translatable="yes"><b>Repository</b></property>
|
||||
<property name="use_markup">True</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="right_attach">3</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkAlignment" id="alignment1">
|
||||
<property name="visible">True</property>
|
||||
<child>
|
||||
<placeholder/>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="left_attach">2</property>
|
||||
<property name="right_attach">3</property>
|
||||
<property name="top_attach">4</property>
|
||||
<property name="bottom_attach">5</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkAlignment" id="alignment2">
|
||||
<property name="visible">True</property>
|
||||
<child>
|
||||
<placeholder/>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="left_attach">2</property>
|
||||
<property name="right_attach">3</property>
|
||||
<property name="top_attach">5</property>
|
||||
<property name="bottom_attach">6</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkAlignment" id="alignment3">
|
||||
<property name="visible">True</property>
|
||||
<child>
|
||||
<placeholder/>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="left_attach">2</property>
|
||||
<property name="right_attach">3</property>
|
||||
<property name="top_attach">6</property>
|
||||
<property name="bottom_attach">7</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="position">1</property>
|
||||
</packing>
|
||||
</child>
|
||||
<child internal-child="action_area">
|
||||
<widget class="GtkHButtonBox" id="dialog-action_area1">
|
||||
<property name="visible">True</property>
|
||||
<property name="layout_style">GTK_BUTTONBOX_END</property>
|
||||
<child>
|
||||
<placeholder/>
|
||||
</child>
|
||||
<child>
|
||||
<placeholder/>
|
||||
</child>
|
||||
<child>
|
||||
<placeholder/>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="expand">False</property>
|
||||
<property name="pack_type">GTK_PACK_END</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
</child>
|
||||
</widget>
|
||||
<widget class="GtkDialog" id="dialog2">
|
||||
<property name="window_position">GTK_WIN_POS_CENTER_ON_PARENT</property>
|
||||
<property name="type_hint">GDK_WINDOW_TYPE_HINT_DIALOG</property>
|
||||
<property name="has_separator">False</property>
|
||||
<child internal-child="vbox">
|
||||
<widget class="GtkVBox" id="dialog-vbox2">
|
||||
<property name="visible">True</property>
|
||||
<property name="spacing">2</property>
|
||||
<child>
|
||||
<widget class="GtkTable" id="table2">
|
||||
<property name="visible">True</property>
|
||||
<property name="border_width">6</property>
|
||||
<property name="n_rows">7</property>
|
||||
<property name="n_columns">3</property>
|
||||
<property name="column_spacing">6</property>
|
||||
<property name="row_spacing">6</property>
|
||||
<child>
|
||||
<widget class="GtkLabel" id="label7">
|
||||
<property name="visible">True</property>
|
||||
<property name="xalign">0</property>
|
||||
<property name="label" translatable="yes"><b>Repositories</b></property>
|
||||
<property name="use_markup">True</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="right_attach">3</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkAlignment" id="alignment4">
|
||||
<property name="visible">True</property>
|
||||
<property name="xalign">0</property>
|
||||
<property name="left_padding">12</property>
|
||||
<child>
|
||||
<widget class="GtkScrolledWindow" id="scrolledwindow1">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="hscrollbar_policy">GTK_POLICY_AUTOMATIC</property>
|
||||
<property name="vscrollbar_policy">GTK_POLICY_AUTOMATIC</property>
|
||||
<child>
|
||||
<widget class="GtkTreeView" id="treeview1">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="headers_clickable">True</property>
|
||||
</widget>
|
||||
</child>
|
||||
</widget>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="right_attach">3</property>
|
||||
<property name="top_attach">2</property>
|
||||
<property name="bottom_attach">3</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkEntry" id="entry1">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="left_attach">1</property>
|
||||
<property name="right_attach">3</property>
|
||||
<property name="top_attach">1</property>
|
||||
<property name="bottom_attach">2</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkLabel" id="label9">
|
||||
<property name="visible">True</property>
|
||||
<property name="xalign">0</property>
|
||||
<property name="label" translatable="yes"><b>Additional packages</b></property>
|
||||
<property name="use_markup">True</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="right_attach">3</property>
|
||||
<property name="top_attach">4</property>
|
||||
<property name="bottom_attach">5</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkAlignment" id="alignment6">
|
||||
<property name="visible">True</property>
|
||||
<property name="xalign">0</property>
|
||||
<property name="xscale">0</property>
|
||||
<child>
|
||||
<widget class="GtkLabel" id="label8">
|
||||
<property name="visible">True</property>
|
||||
<property name="xalign">0</property>
|
||||
<property name="yalign">0</property>
|
||||
<property name="xpad">12</property>
|
||||
<property name="label" translatable="yes">Location: </property>
|
||||
</widget>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="top_attach">1</property>
|
||||
<property name="bottom_attach">2</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkAlignment" id="alignment7">
|
||||
<property name="visible">True</property>
|
||||
<property name="xalign">1</property>
|
||||
<property name="xscale">0</property>
|
||||
<child>
|
||||
<widget class="GtkHButtonBox" id="hbuttonbox1">
|
||||
<property name="visible">True</property>
|
||||
<property name="spacing">5</property>
|
||||
<child>
|
||||
<widget class="GtkButton" id="button7">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="receives_default">True</property>
|
||||
<property name="label" translatable="yes">gtk-remove</property>
|
||||
<property name="use_stock">True</property>
|
||||
<property name="response_id">0</property>
|
||||
</widget>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkButton" id="button6">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="receives_default">True</property>
|
||||
<property name="label" translatable="yes">gtk-edit</property>
|
||||
<property name="use_stock">True</property>
|
||||
<property name="response_id">0</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="position">1</property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkButton" id="button5">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="receives_default">True</property>
|
||||
<property name="label" translatable="yes">gtk-add</property>
|
||||
<property name="use_stock">True</property>
|
||||
<property name="response_id">0</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="position">2</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="left_attach">1</property>
|
||||
<property name="right_attach">3</property>
|
||||
<property name="top_attach">3</property>
|
||||
<property name="bottom_attach">4</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkAlignment" id="alignment5">
|
||||
<property name="visible">True</property>
|
||||
<child>
|
||||
<placeholder/>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="top_attach">3</property>
|
||||
<property name="bottom_attach">4</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkLabel" id="label10">
|
||||
<property name="visible">True</property>
|
||||
<property name="xalign">0</property>
|
||||
<property name="yalign">0</property>
|
||||
<property name="xpad">12</property>
|
||||
<property name="label" translatable="yes">Search:</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="top_attach">5</property>
|
||||
<property name="bottom_attach">6</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkEntry" id="entry2">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="left_attach">1</property>
|
||||
<property name="right_attach">3</property>
|
||||
<property name="top_attach">5</property>
|
||||
<property name="bottom_attach">6</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkAlignment" id="alignment8">
|
||||
<property name="visible">True</property>
|
||||
<property name="xalign">0</property>
|
||||
<property name="left_padding">12</property>
|
||||
<child>
|
||||
<widget class="GtkScrolledWindow" id="scrolledwindow2">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="hscrollbar_policy">GTK_POLICY_AUTOMATIC</property>
|
||||
<property name="vscrollbar_policy">GTK_POLICY_AUTOMATIC</property>
|
||||
<child>
|
||||
<widget class="GtkTreeView" id="treeview2">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="headers_clickable">True</property>
|
||||
</widget>
|
||||
</child>
|
||||
</widget>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="right_attach">3</property>
|
||||
<property name="top_attach">6</property>
|
||||
<property name="bottom_attach">7</property>
|
||||
<property name="y_options"></property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="position">1</property>
|
||||
</packing>
|
||||
</child>
|
||||
<child internal-child="action_area">
|
||||
<widget class="GtkHButtonBox" id="dialog-action_area2">
|
||||
<property name="visible">True</property>
|
||||
<property name="layout_style">GTK_BUTTONBOX_END</property>
|
||||
<child>
|
||||
<widget class="GtkButton" id="button4">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="receives_default">True</property>
|
||||
<property name="label" translatable="yes">gtk-close</property>
|
||||
<property name="use_stock">True</property>
|
||||
<property name="response_id">0</property>
|
||||
</widget>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="expand">False</property>
|
||||
<property name="pack_type">GTK_PACK_END</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
</child>
|
||||
</widget>
|
||||
<widget class="GtkWindow" id="main_window">
|
||||
<child>
|
||||
<widget class="GtkVBox" id="main_window_vbox">
|
||||
<property name="visible">True</property>
|
||||
<child>
|
||||
<widget class="GtkToolbar" id="main_toolbar">
|
||||
<property name="visible">True</property>
|
||||
<child>
|
||||
<widget class="GtkToolButton" id="main_toolbutton_build">
|
||||
<property name="visible">True</property>
|
||||
<property name="label" translatable="yes">Build</property>
|
||||
<property name="stock_id">gtk-execute</property>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="expand">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="expand">False</property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkVPaned" id="vpaned1">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<child>
|
||||
<widget class="GtkScrolledWindow" id="results_scrolledwindow">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="hscrollbar_policy">GTK_POLICY_AUTOMATIC</property>
|
||||
<property name="vscrollbar_policy">GTK_POLICY_AUTOMATIC</property>
|
||||
<child>
|
||||
<placeholder/>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="resize">False</property>
|
||||
<property name="shrink">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
<child>
|
||||
<widget class="GtkScrolledWindow" id="progress_scrolledwindow">
|
||||
<property name="visible">True</property>
|
||||
<property name="can_focus">True</property>
|
||||
<property name="hscrollbar_policy">GTK_POLICY_AUTOMATIC</property>
|
||||
<property name="vscrollbar_policy">GTK_POLICY_AUTOMATIC</property>
|
||||
<child>
|
||||
<placeholder/>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="resize">True</property>
|
||||
<property name="shrink">True</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
<packing>
|
||||
<property name="position">1</property>
|
||||
</packing>
|
||||
</child>
|
||||
</widget>
|
||||
</child>
|
||||
</widget>
|
||||
</glade-interface>
|
||||
@@ -1,311 +0,0 @@
|
||||
|
||||
#
|
||||
# BitBake Graphical GTK User Interface
|
||||
#
|
||||
# Copyright (C) 2008 Intel Corporation
|
||||
#
|
||||
# Authored by Rob Bradford <rob@linux.intel.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import gtk
|
||||
import gobject
|
||||
import logging
|
||||
import time
|
||||
import urllib
|
||||
import urllib2
|
||||
|
||||
class Colors(object):
|
||||
OK = "#ffffff"
|
||||
RUNNING = "#aaffaa"
|
||||
WARNING ="#f88017"
|
||||
ERROR = "#ffaaaa"
|
||||
|
||||
class RunningBuildModel (gtk.TreeStore):
|
||||
(COL_LOG, COL_PACKAGE, COL_TASK, COL_MESSAGE, COL_ICON, COL_COLOR, COL_NUM_ACTIVE) = range(7)
|
||||
|
||||
def __init__ (self):
|
||||
gtk.TreeStore.__init__ (self,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_INT)
|
||||
|
||||
class RunningBuild (gobject.GObject):
|
||||
__gsignals__ = {
|
||||
'build-succeeded' : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
()),
|
||||
'build-failed' : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
())
|
||||
}
|
||||
pids_to_task = {}
|
||||
tasks_to_iter = {}
|
||||
|
||||
def __init__ (self):
|
||||
gobject.GObject.__init__ (self)
|
||||
self.model = RunningBuildModel()
|
||||
|
||||
def handle_event (self, event, pbar=None):
|
||||
# Handle an event from the event queue, this may result in updating
|
||||
# the model and thus the UI. Or it may be to tell us that the build
|
||||
# has finished successfully (or not, as the case may be.)
|
||||
|
||||
parent = None
|
||||
pid = 0
|
||||
package = None
|
||||
task = None
|
||||
|
||||
# If we have a pid attached to this message/event try and get the
|
||||
# (package, task) pair for it. If we get that then get the parent iter
|
||||
# for the message.
|
||||
if hasattr(event, 'pid'):
|
||||
pid = event.pid
|
||||
if hasattr(event, 'process'):
|
||||
pid = event.process
|
||||
|
||||
if pid and pid in self.pids_to_task:
|
||||
(package, task) = self.pids_to_task[pid]
|
||||
parent = self.tasks_to_iter[(package, task)]
|
||||
|
||||
if(isinstance(event, logging.LogRecord)):
|
||||
if (event.msg.startswith ("Running task")):
|
||||
return # don't add these to the list
|
||||
|
||||
if event.levelno >= logging.ERROR:
|
||||
icon = "dialog-error"
|
||||
color = Colors.ERROR
|
||||
elif event.levelno >= logging.WARNING:
|
||||
icon = "dialog-warning"
|
||||
color = Colors.WARNING
|
||||
else:
|
||||
icon = None
|
||||
color = Colors.OK
|
||||
|
||||
# if we know which package we belong to, we'll append onto its list.
|
||||
# otherwise, we'll jump to the top of the master list
|
||||
if parent:
|
||||
tree_add = self.model.append
|
||||
else:
|
||||
tree_add = self.model.prepend
|
||||
tree_add(parent,
|
||||
(None,
|
||||
package,
|
||||
task,
|
||||
event.getMessage(),
|
||||
icon,
|
||||
color,
|
||||
0))
|
||||
|
||||
elif isinstance(event, bb.build.TaskStarted):
|
||||
(package, task) = (event._package, event._task)
|
||||
|
||||
# Save out this PID.
|
||||
self.pids_to_task[pid] = (package, task)
|
||||
|
||||
# Check if we already have this package in our model. If so then
|
||||
# that can be the parent for the task. Otherwise we create a new
|
||||
# top level for the package.
|
||||
if ((package, None) in self.tasks_to_iter):
|
||||
parent = self.tasks_to_iter[(package, None)]
|
||||
else:
|
||||
parent = self.model.prepend(None, (None,
|
||||
package,
|
||||
None,
|
||||
"Package: %s" % (package),
|
||||
None,
|
||||
Colors.OK,
|
||||
0))
|
||||
self.tasks_to_iter[(package, None)] = parent
|
||||
|
||||
# Because this parent package now has an active child mark it as
|
||||
# such.
|
||||
# @todo if parent is already in error, don't mark it green
|
||||
self.model.set(parent, self.model.COL_ICON, "gtk-execute",
|
||||
self.model.COL_COLOR, Colors.RUNNING)
|
||||
|
||||
# Add an entry in the model for this task
|
||||
i = self.model.append (parent, (None,
|
||||
package,
|
||||
task,
|
||||
"Task: %s" % (task),
|
||||
"gtk-execute",
|
||||
Colors.RUNNING,
|
||||
0))
|
||||
|
||||
# update the parent's active task count
|
||||
num_active = self.model.get(parent, self.model.COL_NUM_ACTIVE)[0] + 1
|
||||
self.model.set(parent, self.model.COL_NUM_ACTIVE, num_active)
|
||||
|
||||
# Save out the iter so that we can find it when we have a message
|
||||
# that we need to attach to a task.
|
||||
self.tasks_to_iter[(package, task)] = i
|
||||
|
||||
elif isinstance(event, bb.build.TaskBase):
|
||||
current = self.tasks_to_iter[(package, task)]
|
||||
parent = self.tasks_to_iter[(package, None)]
|
||||
|
||||
# remove this task from the parent's active count
|
||||
num_active = self.model.get(parent, self.model.COL_NUM_ACTIVE)[0] - 1
|
||||
self.model.set(parent, self.model.COL_NUM_ACTIVE, num_active)
|
||||
|
||||
if isinstance(event, bb.build.TaskFailed):
|
||||
# Mark the task and parent as failed
|
||||
icon = "dialog-error"
|
||||
color = Colors.ERROR
|
||||
|
||||
logfile = event.logfile
|
||||
if logfile and os.path.exists(logfile):
|
||||
with open(logfile) as f:
|
||||
logdata = f.read()
|
||||
self.model.append(current, ('pastebin', None, None, logdata, 'gtk-error', Colors.OK, 0))
|
||||
|
||||
for i in (current, parent):
|
||||
self.model.set(i, self.model.COL_ICON, icon,
|
||||
self.model.COL_COLOR, color)
|
||||
else:
|
||||
icon = None
|
||||
color = Colors.OK
|
||||
|
||||
# Mark the task as inactive
|
||||
self.model.set(current, self.model.COL_ICON, icon,
|
||||
self.model.COL_COLOR, color)
|
||||
|
||||
# Mark the parent package as inactive, but make sure to
|
||||
# preserve error and active states
|
||||
i = self.tasks_to_iter[(package, None)]
|
||||
if self.model.get(parent, self.model.COL_ICON) != 'dialog-error':
|
||||
self.model.set(parent, self.model.COL_ICON, icon)
|
||||
if num_active == 0:
|
||||
self.model.set(parent, self.model.COL_COLOR, Colors.OK)
|
||||
|
||||
# Clear the iters and the pids since when the task goes away the
|
||||
# pid will no longer be used for messages
|
||||
del self.tasks_to_iter[(package, task)]
|
||||
del self.pids_to_task[pid]
|
||||
|
||||
elif isinstance(event, bb.event.BuildStarted):
|
||||
|
||||
self.model.prepend(None, (None,
|
||||
None,
|
||||
None,
|
||||
"Build Started (%s)" % time.strftime('%m/%d/%Y %H:%M:%S'),
|
||||
None,
|
||||
Colors.OK,
|
||||
0))
|
||||
elif isinstance(event, bb.event.BuildCompleted):
|
||||
failures = int (event._failures)
|
||||
self.model.prepend(None, (None,
|
||||
None,
|
||||
None,
|
||||
"Build Completed (%s)" % time.strftime('%m/%d/%Y %H:%M:%S'),
|
||||
None,
|
||||
Colors.OK,
|
||||
0))
|
||||
|
||||
# Emit the appropriate signal depending on the number of failures
|
||||
if (failures >= 1):
|
||||
self.emit ("build-failed")
|
||||
else:
|
||||
self.emit ("build-succeeded")
|
||||
|
||||
elif isinstance(event, bb.event.CacheLoadStarted) and pbar:
|
||||
pbar.set_title("Loading cache")
|
||||
self.progress_total = event.total
|
||||
pbar.update(0, self.progress_total)
|
||||
elif isinstance(event, bb.event.CacheLoadProgress) and pbar:
|
||||
pbar.update(event.current, self.progress_total)
|
||||
elif isinstance(event, bb.event.CacheLoadCompleted) and pbar:
|
||||
pbar.update(self.progress_total, self.progress_total)
|
||||
|
||||
elif isinstance(event, bb.event.ParseStarted) and pbar:
|
||||
pbar.set_title("Processing recipes")
|
||||
self.progress_total = event.total
|
||||
pbar.update(0, self.progress_total)
|
||||
elif isinstance(event, bb.event.ParseProgress) and pbar:
|
||||
pbar.update(event.current, self.progress_total)
|
||||
elif isinstance(event, bb.event.ParseCompleted) and pbar:
|
||||
pbar.hide()
|
||||
|
||||
return
|
||||
|
||||
|
||||
def do_pastebin(text):
|
||||
url = 'http://pastebin.com/api_public.php'
|
||||
params = {'paste_code': text, 'paste_format': 'text'}
|
||||
|
||||
req = urllib2.Request(url, urllib.urlencode(params))
|
||||
response = urllib2.urlopen(req)
|
||||
paste_url = response.read()
|
||||
|
||||
return paste_url
|
||||
|
||||
|
||||
class RunningBuildTreeView (gtk.TreeView):
|
||||
__gsignals__ = {
|
||||
"button_press_event" : "override"
|
||||
}
|
||||
def __init__ (self):
|
||||
gtk.TreeView.__init__ (self)
|
||||
|
||||
# The icon that indicates whether we're building or failed.
|
||||
renderer = gtk.CellRendererPixbuf ()
|
||||
col = gtk.TreeViewColumn ("Status", renderer)
|
||||
col.add_attribute (renderer, "icon-name", 4)
|
||||
self.append_column (col)
|
||||
|
||||
# The message of the build.
|
||||
self.message_renderer = gtk.CellRendererText ()
|
||||
self.message_column = gtk.TreeViewColumn ("Message", self.message_renderer, text=3)
|
||||
self.message_column.add_attribute(self.message_renderer, 'background', 5)
|
||||
self.message_renderer.set_property('editable', 5)
|
||||
self.append_column (self.message_column)
|
||||
|
||||
def do_button_press_event(self, event):
|
||||
gtk.TreeView.do_button_press_event(self, event)
|
||||
|
||||
if event.button == 3:
|
||||
selection = super(RunningBuildTreeView, self).get_selection()
|
||||
(model, iter) = selection.get_selected()
|
||||
if iter is not None:
|
||||
can_paste = model.get(iter, model.COL_LOG)[0]
|
||||
if can_paste == 'pastebin':
|
||||
# build a simple menu with a pastebin option
|
||||
menu = gtk.Menu()
|
||||
menuitem = gtk.MenuItem("Send log to pastebin")
|
||||
menu.append(menuitem)
|
||||
menuitem.connect("activate", self.pastebin_handler, (model, iter))
|
||||
menuitem.show()
|
||||
menu.show()
|
||||
menu.popup(None, None, None, event.button, event.time)
|
||||
|
||||
def pastebin_handler(self, widget, data):
|
||||
"""
|
||||
Send the log data to pastebin, then add the new paste url to the
|
||||
clipboard.
|
||||
"""
|
||||
(model, iter) = data
|
||||
paste_url = do_pastebin(model.get(iter, model.COL_MESSAGE)[0])
|
||||
|
||||
# @todo Provide visual feedback to the user that it is done and that
|
||||
# it worked.
|
||||
print paste_url
|
||||
|
||||
clipboard = gtk.clipboard_get()
|
||||
clipboard.set_text(paste_url)
|
||||
clipboard.store()
|
||||
@@ -1,346 +0,0 @@
|
||||
#
|
||||
# BitBake Graphical GTK User Interface
|
||||
#
|
||||
# Copyright (C) 2011 Intel Corporation
|
||||
#
|
||||
# Authored by Joshua Lock <josh@linux.intel.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import gtk
|
||||
import gobject
|
||||
|
||||
class TaskListModel(gtk.ListStore):
|
||||
"""
|
||||
This class defines an gtk.ListStore subclass which will convert the output
|
||||
of the bb.event.TargetsTreeGenerated event into a gtk.ListStore whilst also
|
||||
providing convenience functions to access gtk.TreeModel subclasses which
|
||||
provide filtered views of the data.
|
||||
"""
|
||||
(COL_NAME, COL_DESC, COL_LIC, COL_GROUP, COL_DEPS, COL_BINB, COL_TYPE, COL_INC) = range(8)
|
||||
|
||||
__gsignals__ = {
|
||||
"tasklist-populated" : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
())
|
||||
}
|
||||
|
||||
"""
|
||||
"""
|
||||
def __init__(self):
|
||||
self.contents = None
|
||||
self.tasks = None
|
||||
self.packages = None
|
||||
self.images = None
|
||||
|
||||
gtk.ListStore.__init__ (self,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_STRING,
|
||||
gobject.TYPE_BOOLEAN)
|
||||
|
||||
"""
|
||||
Create, if required, and return a filtered gtk.TreeModel
|
||||
containing only the items which are to be included in the
|
||||
image
|
||||
"""
|
||||
def contents_model(self):
|
||||
if not self.contents:
|
||||
self.contents = self.filter_new()
|
||||
self.contents.set_visible_column(self.COL_INC)
|
||||
return self.contents
|
||||
|
||||
"""
|
||||
Helper function to determine whether an item is a task
|
||||
"""
|
||||
def task_model_filter(self, model, it):
|
||||
if model.get_value(it, self.COL_TYPE) == 'task':
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
"""
|
||||
Create, if required, and return a filtered gtk.TreeModel
|
||||
containing only the items which are tasks
|
||||
"""
|
||||
def tasks_model(self):
|
||||
if not self.tasks:
|
||||
self.tasks = self.filter_new()
|
||||
self.tasks.set_visible_func(self.task_model_filter)
|
||||
return self.tasks
|
||||
|
||||
"""
|
||||
Helper function to determine whether an item is an image
|
||||
"""
|
||||
def image_model_filter(self, model, it):
|
||||
if model.get_value(it, self.COL_TYPE) == 'image':
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
"""
|
||||
Create, if required, and return a filtered gtk.TreeModel
|
||||
containing only the items which are images
|
||||
"""
|
||||
def images_model(self):
|
||||
if not self.images:
|
||||
self.images = self.filter_new()
|
||||
self.images.set_visible_func(self.image_model_filter)
|
||||
return self.images
|
||||
|
||||
"""
|
||||
Helper function to determine whether an item is a package
|
||||
"""
|
||||
def package_model_filter(self, model, it):
|
||||
if model.get_value(it, self.COL_TYPE) == 'package':
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
"""
|
||||
Create, if required, and return a filtered gtk.TreeModel
|
||||
containing only the items which are packages
|
||||
"""
|
||||
def packages_model(self):
|
||||
if not self.packages:
|
||||
self.packages = self.filter_new()
|
||||
self.packages.set_visible_func(self.package_model_filter)
|
||||
return self.packages
|
||||
|
||||
"""
|
||||
The populate() function takes as input the data from a
|
||||
bb.event.TargetsTreeGenerated event and populates the TaskList.
|
||||
Once the population is done it emits gsignal tasklist-populated
|
||||
to notify any listeners that the model is ready
|
||||
"""
|
||||
def populate(self, event_model):
|
||||
for item in event_model["pn"]:
|
||||
atype = 'package'
|
||||
name = item
|
||||
summary = event_model["pn"][item]["summary"]
|
||||
license = event_model["pn"][item]["license"]
|
||||
group = event_model["pn"][item]["section"]
|
||||
|
||||
depends = event_model["depends"].get(item, "")
|
||||
rdepends = event_model["rdepends-pn"].get(item, "")
|
||||
depends = depends + rdepends
|
||||
self.squish(depends)
|
||||
deps = " ".join(depends)
|
||||
|
||||
if name.count('task-') > 0:
|
||||
atype = 'task'
|
||||
elif name.count('-image-') > 0:
|
||||
atype = 'image'
|
||||
|
||||
self.set(self.append(), self.COL_NAME, name, self.COL_DESC, summary,
|
||||
self.COL_LIC, license, self.COL_GROUP, group,
|
||||
self.COL_DEPS, deps, self.COL_BINB, "",
|
||||
self.COL_TYPE, atype, self.COL_INC, False)
|
||||
|
||||
self.emit("tasklist-populated")
|
||||
|
||||
"""
|
||||
squish lst so that it doesn't contain any duplicates
|
||||
"""
|
||||
def squish(self, lst):
|
||||
seen = {}
|
||||
for l in lst:
|
||||
seen[l] = 1
|
||||
return seen.keys()
|
||||
|
||||
"""
|
||||
Mark the item at path as not included
|
||||
NOTE:
|
||||
path should be a gtk.TreeModelPath into self (not a filtered model)
|
||||
"""
|
||||
def remove_item_path(self, path):
|
||||
self[path][self.COL_BINB] = ""
|
||||
self[path][self.COL_INC] = False
|
||||
|
||||
"""
|
||||
"""
|
||||
def mark(self, path):
|
||||
name = self[path][self.COL_NAME]
|
||||
it = self.get_iter_first()
|
||||
removals = []
|
||||
#print("Removing %s" % name)
|
||||
|
||||
self.remove_item_path(path)
|
||||
|
||||
# Remove all dependent packages, update binb
|
||||
while it:
|
||||
path = self.get_path(it)
|
||||
# FIXME: need to ensure partial name matching doesn't happen, regexp?
|
||||
if self[path][self.COL_INC] and self[path][self.COL_DEPS].count(name):
|
||||
#print("%s depended on %s, marking for removal" % (self[path][self.COL_NAME], name))
|
||||
# found a dependency, remove it
|
||||
self.mark(path)
|
||||
if self[path][self.COL_INC] and self[path][self.COL_BINB].count(name):
|
||||
binb = self.find_alt_dependency(self[path][self.COL_NAME])
|
||||
#print("%s was brought in by %s, binb set to %s" % (self[path][self.COL_NAME], name, binb))
|
||||
self[path][self.COL_BINB] = binb
|
||||
it = self.iter_next(it)
|
||||
|
||||
"""
|
||||
"""
|
||||
def sweep_up(self):
|
||||
removals = []
|
||||
it = self.get_iter_first()
|
||||
|
||||
while it:
|
||||
path = self.get_path(it)
|
||||
binb = self[path][self.COL_BINB]
|
||||
if binb == "" or binb is None:
|
||||
#print("Sweeping up %s" % self[path][self.COL_NAME])
|
||||
if not path in removals:
|
||||
removals.extend(path)
|
||||
it = self.iter_next(it)
|
||||
|
||||
while removals:
|
||||
path = removals.pop()
|
||||
self.mark(path)
|
||||
|
||||
"""
|
||||
Remove an item from the contents
|
||||
"""
|
||||
def remove_item(self, path):
|
||||
self.mark(path)
|
||||
self.sweep_up()
|
||||
|
||||
"""
|
||||
Find the name of an item in the image contents which depends on the item
|
||||
at contents_path returns either an item name (str) or None
|
||||
NOTE:
|
||||
contents_path must be a path in the self.contents gtk.TreeModel
|
||||
"""
|
||||
def find_alt_dependency(self, name):
|
||||
it = self.get_iter_first()
|
||||
while it:
|
||||
# iterate all items in the model
|
||||
path = self.get_path(it)
|
||||
deps = self[path][self.COL_DEPS]
|
||||
itname = self[path][self.COL_NAME]
|
||||
inc = self[path][self.COL_INC]
|
||||
if itname != name and inc and deps.count(name) > 0:
|
||||
# if this item depends on the item, return this items name
|
||||
#print("%s depends on %s" % (itname, name))
|
||||
return itname
|
||||
it = self.iter_next(it)
|
||||
return ""
|
||||
|
||||
"""
|
||||
Convert a path in self to a path in the filtered contents model
|
||||
"""
|
||||
def contents_path_for_path(self, path):
|
||||
return self.contents.convert_child_path_to_path(path)
|
||||
|
||||
"""
|
||||
Check the self.contents gtk.TreeModel for an item
|
||||
where COL_NAME matches item_name
|
||||
Returns True if a match is found, False otherwise
|
||||
"""
|
||||
def contents_includes_name(self, item_name):
|
||||
it = self.contents.get_iter_first()
|
||||
while it:
|
||||
path = self.contents.get_path(it)
|
||||
if self.contents[path][self.COL_NAME] == item_name:
|
||||
return True
|
||||
it = self.contents.iter_next(it)
|
||||
return False
|
||||
|
||||
"""
|
||||
Add this item, and any of its dependencies, to the image contents
|
||||
"""
|
||||
def include_item(self, item_path, binb=""):
|
||||
name = self[item_path][self.COL_NAME]
|
||||
deps = self[item_path][self.COL_DEPS]
|
||||
cur_inc = self[item_path][self.COL_INC]
|
||||
#print("Adding %s for %s dependency" % (name, binb))
|
||||
if not cur_inc:
|
||||
self[item_path][self.COL_INC] = True
|
||||
self[item_path][self.COL_BINB] = binb
|
||||
if deps:
|
||||
#print("Dependencies of %s are %s" % (name, deps))
|
||||
# add all of the deps and set their binb to this item
|
||||
for dep in deps.split(" "):
|
||||
# FIXME: this skipping virtuals can't be right? Unless we choose only to show target
|
||||
# packages? In which case we should handle this server side...
|
||||
# If the contents model doesn't already contain dep, add it
|
||||
if not dep.startswith("virtual") and not self.contents_includes_name(dep):
|
||||
path = self.find_path_for_item(dep)
|
||||
if path:
|
||||
self.include_item(path, name)
|
||||
else:
|
||||
pass
|
||||
|
||||
"""
|
||||
Find the model path for the item_name
|
||||
Returns the path in the model or None
|
||||
"""
|
||||
def find_path_for_item(self, item_name):
|
||||
it = self.get_iter_first()
|
||||
path = None
|
||||
while it:
|
||||
path = self.get_path(it)
|
||||
if (self[path][self.COL_NAME] == item_name):
|
||||
return path
|
||||
else:
|
||||
it = self.iter_next(it)
|
||||
return None
|
||||
|
||||
"""
|
||||
Empty self.contents by setting the include of each entry to None
|
||||
"""
|
||||
def reset(self):
|
||||
it = self.contents.get_iter_first()
|
||||
while it:
|
||||
path = self.contents.get_path(it)
|
||||
opath = self.contents.convert_path_to_child_path(path)
|
||||
self[opath][self.COL_INC] = False
|
||||
self[opath][self.COL_BINB] = ""
|
||||
# As we've just removed the first item...
|
||||
it = self.contents.get_iter_first()
|
||||
|
||||
"""
|
||||
Returns True if one of the selected tasks is an image, False otherwise
|
||||
"""
|
||||
def targets_contains_image(self):
|
||||
it = self.images.get_iter_first()
|
||||
while it:
|
||||
path = self.images.get_path(it)
|
||||
inc = self.images[path][self.COL_INC]
|
||||
if inc:
|
||||
return True
|
||||
it = self.images.iter_next(it)
|
||||
return False
|
||||
|
||||
"""
|
||||
Return a list of all selected items which are not -native or -cross
|
||||
"""
|
||||
def get_targets(self):
|
||||
tasks = []
|
||||
|
||||
it = self.contents.get_iter_first()
|
||||
while it:
|
||||
path = self.contents.get_path(it)
|
||||
name = self.contents[path][self.COL_NAME]
|
||||
stype = self.contents[path][self.COL_TYPE]
|
||||
if not name.count('-native') and not name.count('-cross'):
|
||||
tasks.append(name)
|
||||
it = self.contents.iter_next(it)
|
||||
return tasks
|
||||
@@ -1,307 +0,0 @@
|
||||
#
|
||||
# BitBake Graphical GTK based Dependency Explorer
|
||||
#
|
||||
# Copyright (C) 2007 Ross Burton
|
||||
# Copyright (C) 2007 - 2008 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import gobject
|
||||
import gtk
|
||||
import Queue
|
||||
import threading
|
||||
import xmlrpclib
|
||||
import bb
|
||||
import bb.event
|
||||
from bb.ui.crumbs.progress import ProgressBar
|
||||
|
||||
# Package Model
|
||||
(COL_PKG_NAME) = (0)
|
||||
|
||||
# Dependency Model
|
||||
(TYPE_DEP, TYPE_RDEP) = (0, 1)
|
||||
(COL_DEP_TYPE, COL_DEP_PARENT, COL_DEP_PACKAGE) = (0, 1, 2)
|
||||
|
||||
|
||||
class PackageDepView(gtk.TreeView):
|
||||
def __init__(self, model, dep_type, label):
|
||||
gtk.TreeView.__init__(self)
|
||||
self.current = None
|
||||
self.dep_type = dep_type
|
||||
self.filter_model = model.filter_new()
|
||||
self.filter_model.set_visible_func(self._filter)
|
||||
self.set_model(self.filter_model)
|
||||
#self.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE)
|
||||
self.append_column(gtk.TreeViewColumn(label, gtk.CellRendererText(), text=COL_DEP_PACKAGE))
|
||||
|
||||
def _filter(self, model, iter):
|
||||
(this_type, package) = model.get(iter, COL_DEP_TYPE, COL_DEP_PARENT)
|
||||
if this_type != self.dep_type: return False
|
||||
return package == self.current
|
||||
|
||||
def set_current_package(self, package):
|
||||
self.current = package
|
||||
self.filter_model.refilter()
|
||||
|
||||
|
||||
class PackageReverseDepView(gtk.TreeView):
|
||||
def __init__(self, model, label):
|
||||
gtk.TreeView.__init__(self)
|
||||
self.current = None
|
||||
self.filter_model = model.filter_new()
|
||||
self.filter_model.set_visible_func(self._filter)
|
||||
self.set_model(self.filter_model)
|
||||
self.append_column(gtk.TreeViewColumn(label, gtk.CellRendererText(), text=COL_DEP_PARENT))
|
||||
|
||||
def _filter(self, model, iter):
|
||||
package = model.get_value(iter, COL_DEP_PACKAGE)
|
||||
return package == self.current
|
||||
|
||||
def set_current_package(self, package):
|
||||
self.current = package
|
||||
self.filter_model.refilter()
|
||||
|
||||
|
||||
class DepExplorer(gtk.Window):
|
||||
def __init__(self):
|
||||
gtk.Window.__init__(self)
|
||||
self.set_title("Dependency Explorer")
|
||||
self.set_default_size(500, 500)
|
||||
self.connect("delete-event", gtk.main_quit)
|
||||
|
||||
# Create the data models
|
||||
self.pkg_model = gtk.ListStore(gobject.TYPE_STRING)
|
||||
self.pkg_model.set_sort_column_id(COL_PKG_NAME, gtk.SORT_ASCENDING)
|
||||
self.depends_model = gtk.ListStore(gobject.TYPE_INT, gobject.TYPE_STRING, gobject.TYPE_STRING)
|
||||
self.depends_model.set_sort_column_id(COL_DEP_PACKAGE, gtk.SORT_ASCENDING)
|
||||
|
||||
pane = gtk.HPaned()
|
||||
pane.set_position(250)
|
||||
self.add(pane)
|
||||
|
||||
# The master list of packages
|
||||
scrolled = gtk.ScrolledWindow()
|
||||
scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
|
||||
scrolled.set_shadow_type(gtk.SHADOW_IN)
|
||||
|
||||
self.pkg_treeview = gtk.TreeView(self.pkg_model)
|
||||
self.pkg_treeview.get_selection().connect("changed", self.on_cursor_changed)
|
||||
column = gtk.TreeViewColumn("Package", gtk.CellRendererText(), text=COL_PKG_NAME)
|
||||
self.pkg_treeview.append_column(column)
|
||||
pane.add1(scrolled)
|
||||
scrolled.add(self.pkg_treeview)
|
||||
|
||||
box = gtk.VBox(homogeneous=True, spacing=4)
|
||||
|
||||
# Runtime Depends
|
||||
scrolled = gtk.ScrolledWindow()
|
||||
scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
|
||||
scrolled.set_shadow_type(gtk.SHADOW_IN)
|
||||
self.rdep_treeview = PackageDepView(self.depends_model, TYPE_RDEP, "Runtime Depends")
|
||||
self.rdep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE)
|
||||
scrolled.add(self.rdep_treeview)
|
||||
box.add(scrolled)
|
||||
|
||||
# Build Depends
|
||||
scrolled = gtk.ScrolledWindow()
|
||||
scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
|
||||
scrolled.set_shadow_type(gtk.SHADOW_IN)
|
||||
self.dep_treeview = PackageDepView(self.depends_model, TYPE_DEP, "Build Depends")
|
||||
self.dep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE)
|
||||
scrolled.add(self.dep_treeview)
|
||||
box.add(scrolled)
|
||||
pane.add2(box)
|
||||
|
||||
# Reverse Depends
|
||||
scrolled = gtk.ScrolledWindow()
|
||||
scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
|
||||
scrolled.set_shadow_type(gtk.SHADOW_IN)
|
||||
self.revdep_treeview = PackageReverseDepView(self.depends_model, "Reverse Depends")
|
||||
self.revdep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PARENT)
|
||||
scrolled.add(self.revdep_treeview)
|
||||
box.add(scrolled)
|
||||
pane.add2(box)
|
||||
|
||||
self.show_all()
|
||||
|
||||
def on_package_activated(self, treeview, path, column, data_col):
|
||||
model = treeview.get_model()
|
||||
package = model.get_value(model.get_iter(path), data_col)
|
||||
|
||||
pkg_path = []
|
||||
def finder(model, path, iter, needle):
|
||||
package = model.get_value(iter, COL_PKG_NAME)
|
||||
if package == needle:
|
||||
pkg_path.append(path)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
self.pkg_model.foreach(finder, package)
|
||||
if pkg_path:
|
||||
self.pkg_treeview.get_selection().select_path(pkg_path[0])
|
||||
self.pkg_treeview.scroll_to_cell(pkg_path[0])
|
||||
|
||||
def on_cursor_changed(self, selection):
|
||||
(model, it) = selection.get_selected()
|
||||
if iter is None:
|
||||
current_package = None
|
||||
else:
|
||||
current_package = model.get_value(it, COL_PKG_NAME)
|
||||
self.rdep_treeview.set_current_package(current_package)
|
||||
self.dep_treeview.set_current_package(current_package)
|
||||
self.revdep_treeview.set_current_package(current_package)
|
||||
|
||||
|
||||
def parse(depgraph, pkg_model, depends_model):
|
||||
for package in depgraph["pn"]:
|
||||
pkg_model.set(pkg_model.append(), COL_PKG_NAME, package)
|
||||
|
||||
for package in depgraph["depends"]:
|
||||
for depend in depgraph["depends"][package]:
|
||||
depends_model.set (depends_model.append(),
|
||||
COL_DEP_TYPE, TYPE_DEP,
|
||||
COL_DEP_PARENT, package,
|
||||
COL_DEP_PACKAGE, depend)
|
||||
|
||||
for package in depgraph["rdepends-pn"]:
|
||||
for rdepend in depgraph["rdepends-pn"][package]:
|
||||
depends_model.set (depends_model.append(),
|
||||
COL_DEP_TYPE, TYPE_RDEP,
|
||||
COL_DEP_PARENT, package,
|
||||
COL_DEP_PACKAGE, rdepend)
|
||||
|
||||
|
||||
class gtkthread(threading.Thread):
|
||||
quit = threading.Event()
|
||||
def __init__(self, shutdown):
|
||||
threading.Thread.__init__(self)
|
||||
self.setDaemon(True)
|
||||
self.shutdown = shutdown
|
||||
|
||||
def run(self):
|
||||
gobject.threads_init()
|
||||
gtk.gdk.threads_init()
|
||||
gtk.main()
|
||||
gtkthread.quit.set()
|
||||
|
||||
|
||||
def main(server, eventHandler):
|
||||
try:
|
||||
cmdline = server.runCommand(["getCmdLineAction"])
|
||||
if not cmdline or cmdline[0] != "generateDotGraph":
|
||||
print("This UI is only compatible with the -g option")
|
||||
return
|
||||
ret = server.runCommand(["generateDepTreeEvent", cmdline[1], cmdline[2]])
|
||||
if ret != True:
|
||||
print("Couldn't run command! %s" % ret)
|
||||
return
|
||||
except xmlrpclib.Fault as x:
|
||||
print("XMLRPC Fault getting commandline:\n %s" % x)
|
||||
return
|
||||
|
||||
shutdown = 0
|
||||
|
||||
gtkgui = gtkthread(shutdown)
|
||||
gtkgui.start()
|
||||
|
||||
gtk.gdk.threads_enter()
|
||||
dep = DepExplorer()
|
||||
pbar = ProgressBar(dep)
|
||||
pbar.connect("delete-event", gtk.main_quit)
|
||||
gtk.gdk.threads_leave()
|
||||
|
||||
progress_total = 0
|
||||
while True:
|
||||
try:
|
||||
event = eventHandler.waitEvent(0.25)
|
||||
if gtkthread.quit.isSet():
|
||||
server.runCommand(["stateStop"])
|
||||
break
|
||||
|
||||
if event is None:
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.CacheLoadStarted):
|
||||
progress_total = event.total
|
||||
gtk.gdk.threads_enter()
|
||||
pbar.set_title("Loading Cache")
|
||||
pbar.update(0, progress_total)
|
||||
gtk.gdk.threads_leave()
|
||||
|
||||
if isinstance(event, bb.event.CacheLoadProgress):
|
||||
x = event.current
|
||||
gtk.gdk.threads_enter()
|
||||
pbar.update(x, progress_total)
|
||||
gtk.gdk.threads_leave()
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.CacheLoadCompleted):
|
||||
gtk.gdk.threads_enter()
|
||||
pbar.update(progress_total, progress_total)
|
||||
gtk.gdk.threads_leave()
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.ParseStarted):
|
||||
progress_total = event.total
|
||||
gtk.gdk.threads_enter()
|
||||
pbar.set_title("Processing recipes")
|
||||
pbar.update(0, progress_total)
|
||||
gtk.gdk.threads_leave()
|
||||
|
||||
if isinstance(event, bb.event.ParseProgress):
|
||||
x = event.current
|
||||
gtk.gdk.threads_enter()
|
||||
pbar.update(x, progress_total)
|
||||
gtk.gdk.threads_leave()
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.ParseCompleted):
|
||||
pbar.hide()
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.DepTreeGenerated):
|
||||
gtk.gdk.threads_enter()
|
||||
parse(event._depgraph, dep.pkg_model, dep.depends_model)
|
||||
gtk.gdk.threads_leave()
|
||||
|
||||
if isinstance(event, bb.command.CommandCompleted):
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.command.CommandFailed):
|
||||
print("Command execution failed: %s" % event.error)
|
||||
return event.exitcode
|
||||
|
||||
if isinstance(event, bb.command.CommandExit):
|
||||
return event.exitcode
|
||||
|
||||
if isinstance(event, bb.cooker.CookerExit):
|
||||
break
|
||||
|
||||
continue
|
||||
except EnvironmentError as ioerror:
|
||||
# ignore interrupted io
|
||||
if ioerror.args[0] == 4:
|
||||
pass
|
||||
except KeyboardInterrupt:
|
||||
if shutdown == 2:
|
||||
print("\nThird Keyboard Interrupt, exit.\n")
|
||||
break
|
||||
if shutdown == 1:
|
||||
print("\nSecond Keyboard Interrupt, stopping...\n")
|
||||
server.runCommand(["stateStop"])
|
||||
if shutdown == 0:
|
||||
print("\nKeyboard Interrupt, closing down...\n")
|
||||
server.runCommand(["stateShutdown"])
|
||||
shutdown = shutdown + 1
|
||||
pass
|
||||
@@ -1,112 +0,0 @@
|
||||
#
|
||||
# BitBake Graphical GTK User Interface
|
||||
#
|
||||
# Copyright (C) 2008 Intel Corporation
|
||||
#
|
||||
# Authored by Rob Bradford <rob@linux.intel.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import gobject
|
||||
import gtk
|
||||
import xmlrpclib
|
||||
from bb.ui.crumbs.runningbuild import RunningBuildTreeView, RunningBuild
|
||||
from bb.ui.crumbs.progress import ProgressBar
|
||||
|
||||
import Queue
|
||||
|
||||
|
||||
def event_handle_idle_func (eventHandler, build, pbar):
|
||||
|
||||
# Consume as many messages as we can in the time available to us
|
||||
event = eventHandler.getEvent()
|
||||
while event:
|
||||
build.handle_event (event, pbar)
|
||||
event = eventHandler.getEvent()
|
||||
|
||||
return True
|
||||
|
||||
def scroll_tv_cb (model, path, iter, view):
|
||||
view.scroll_to_cell (path)
|
||||
|
||||
|
||||
# @todo hook these into the GUI so the user has feedback...
|
||||
def running_build_failed_cb (running_build):
|
||||
pass
|
||||
|
||||
|
||||
def running_build_succeeded_cb (running_build):
|
||||
pass
|
||||
|
||||
|
||||
class MainWindow (gtk.Window):
|
||||
def __init__ (self):
|
||||
gtk.Window.__init__ (self, gtk.WINDOW_TOPLEVEL)
|
||||
|
||||
# Setup tree view and the scrolled window
|
||||
scrolled_window = gtk.ScrolledWindow ()
|
||||
self.add (scrolled_window)
|
||||
self.cur_build_tv = RunningBuildTreeView()
|
||||
self.connect("delete-event", gtk.main_quit)
|
||||
self.set_default_size(640, 480)
|
||||
scrolled_window.add (self.cur_build_tv)
|
||||
|
||||
|
||||
def main (server, eventHandler):
|
||||
gobject.threads_init()
|
||||
gtk.gdk.threads_init()
|
||||
|
||||
window = MainWindow ()
|
||||
window.show_all ()
|
||||
pbar = ProgressBar(window)
|
||||
pbar.connect("delete-event", gtk.main_quit)
|
||||
|
||||
# Create the object for the current build
|
||||
running_build = RunningBuild ()
|
||||
window.cur_build_tv.set_model (running_build.model)
|
||||
running_build.model.connect("row-inserted", scroll_tv_cb, window.cur_build_tv)
|
||||
running_build.connect ("build-succeeded", running_build_succeeded_cb)
|
||||
running_build.connect ("build-failed", running_build_failed_cb)
|
||||
|
||||
try:
|
||||
cmdline = server.runCommand(["getCmdLineAction"])
|
||||
if not cmdline:
|
||||
return 1
|
||||
ret = server.runCommand(cmdline)
|
||||
if ret != True:
|
||||
print("Couldn't get default commandline! %s" % ret)
|
||||
return 1
|
||||
except xmlrpclib.Fault as x:
|
||||
print("XMLRPC Fault getting commandline:\n %s" % x)
|
||||
return 1
|
||||
|
||||
# Use a timeout function for probing the event queue to find out if we
|
||||
# have a message waiting for us.
|
||||
gobject.timeout_add (100,
|
||||
event_handle_idle_func,
|
||||
eventHandler,
|
||||
running_build,
|
||||
pbar)
|
||||
|
||||
try:
|
||||
gtk.main()
|
||||
except EnvironmentError as ioerror:
|
||||
# ignore interrupted io
|
||||
if ioerror.args[0] == 4:
|
||||
pass
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
finally:
|
||||
server.runCommand(["stateStop"])
|
||||
|
||||
@@ -1,596 +0,0 @@
|
||||
#
|
||||
# BitBake Graphical GTK User Interface
|
||||
#
|
||||
# Copyright (C) 2011 Intel Corporation
|
||||
#
|
||||
# Authored by Joshua Lock <josh@linux.intel.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import gobject
|
||||
import gtk
|
||||
from bb.ui.crumbs.progress import ProgressBar
|
||||
from bb.ui.crumbs.tasklistmodel import TaskListModel
|
||||
from bb.ui.crumbs.hobeventhandler import HobHandler
|
||||
from bb.ui.crumbs.runningbuild import RunningBuildTreeView, RunningBuild
|
||||
import xmlrpclib
|
||||
import logging
|
||||
import Queue
|
||||
|
||||
class MainWindow (gtk.Window):
|
||||
|
||||
def __init__(self, taskmodel, handler, curr_mach=None, curr_distro=None):
|
||||
gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL)
|
||||
self.model = taskmodel
|
||||
self.model.connect("tasklist-populated", self.update_model)
|
||||
self.curr_mach = curr_mach
|
||||
self.curr_distro = curr_distro
|
||||
self.handler = handler
|
||||
self.set_border_width(10)
|
||||
self.connect("delete-event", gtk.main_quit)
|
||||
self.set_title("BitBake Image Creator")
|
||||
self.set_default_size(700, 600)
|
||||
|
||||
self.build = RunningBuild()
|
||||
self.build.connect("build-succeeded", self.running_build_succeeded_cb)
|
||||
self.build.connect("build-failed", self.running_build_failed_cb)
|
||||
|
||||
createview = self.create_build_gui()
|
||||
buildview = self.view_build_gui()
|
||||
self.nb = gtk.Notebook()
|
||||
self.nb.append_page(createview)
|
||||
self.nb.append_page(buildview)
|
||||
self.nb.set_current_page(0)
|
||||
self.nb.set_show_tabs(False)
|
||||
self.add(self.nb)
|
||||
self.generating = False
|
||||
|
||||
def scroll_tv_cb(self, model, path, it, view):
|
||||
view.scroll_to_cell(path)
|
||||
|
||||
def running_build_failed_cb(self, running_build):
|
||||
# FIXME: handle this
|
||||
return
|
||||
|
||||
def running_build_succeeded_cb(self, running_build):
|
||||
label = gtk.Label("Build completed, start another build?")
|
||||
dialog = gtk.Dialog("Build complete",
|
||||
self,
|
||||
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
|
||||
(gtk.STOCK_NO, gtk.RESPONSE_NO,
|
||||
gtk.STOCK_YES, gtk.RESPONSE_YES))
|
||||
dialog.vbox.pack_start(label)
|
||||
label.show()
|
||||
response = dialog.run()
|
||||
dialog.destroy()
|
||||
if not response == gtk.RESPONSE_YES:
|
||||
self.model.reset() # NOTE: really?
|
||||
self.nb.set_current_page(0)
|
||||
return
|
||||
|
||||
def machine_combo_changed_cb(self, combo, handler):
|
||||
mach = combo.get_active_text()
|
||||
if mach != self.curr_mach:
|
||||
self.curr_mach = mach
|
||||
handler.set_machine(mach)
|
||||
|
||||
def update_machines(self, handler, machines):
|
||||
active = 0
|
||||
for machine in machines:
|
||||
self.machine_combo.append_text(machine)
|
||||
if machine == self.curr_mach:
|
||||
self.machine_combo.set_active(active)
|
||||
active = active + 1
|
||||
self.machine_combo.connect("changed", self.machine_combo_changed_cb, handler)
|
||||
|
||||
def update_distros(self, handler, distros):
|
||||
# FIXME: when we add UI for changing distro this will be used
|
||||
return
|
||||
|
||||
def data_generated(self, handler):
|
||||
self.generating = False
|
||||
|
||||
def spin_idle_func(self, pbar):
|
||||
if self.generating:
|
||||
pbar.pulse()
|
||||
return True
|
||||
else:
|
||||
pbar.hide()
|
||||
return False
|
||||
|
||||
def busy(self, handler):
|
||||
self.generating = True
|
||||
pbar = ProgressBar(self)
|
||||
pbar.connect("delete-event", gtk.main_quit) # NOTE: questionable...
|
||||
pbar.pulse()
|
||||
gobject.timeout_add (200,
|
||||
self.spin_idle_func,
|
||||
pbar)
|
||||
|
||||
def update_model(self, model):
|
||||
pkgsaz_model = gtk.TreeModelSort(self.model.packages_model())
|
||||
pkgsaz_model.set_sort_column_id(self.model.COL_NAME, gtk.SORT_ASCENDING)
|
||||
self.pkgsaz_tree.set_model(pkgsaz_model)
|
||||
|
||||
# FIXME: need to implement a custom sort function, as otherwise the column
|
||||
# is re-ordered when toggling the inclusion state (COL_INC)
|
||||
pkgsgrp_model = gtk.TreeModelSort(self.model.packages_model())
|
||||
pkgsgrp_model.set_sort_column_id(self.model.COL_GROUP, gtk.SORT_ASCENDING)
|
||||
self.pkgsgrp_tree.set_model(pkgsgrp_model)
|
||||
|
||||
self.contents_tree.set_model(self.model.contents_model())
|
||||
self.images_tree.set_model(self.model.images_model())
|
||||
self.tasks_tree.set_model(self.model.tasks_model())
|
||||
|
||||
def reset_clicked_cb(self, button):
|
||||
label = gtk.Label("Are you sure you want to reset the image contents?")
|
||||
dialog = gtk.Dialog("Confirm reset", self,
|
||||
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
|
||||
(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
|
||||
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
|
||||
dialog.vbox.pack_start(label)
|
||||
label.show()
|
||||
response = dialog.run()
|
||||
dialog.destroy()
|
||||
if (response == gtk.RESPONSE_ACCEPT):
|
||||
self.model.reset()
|
||||
return
|
||||
|
||||
def bake_clicked_cb(self, button):
|
||||
if not self.model.targets_contains_image():
|
||||
label = gtk.Label("No image was selected. Just build the selected packages?")
|
||||
dialog = gtk.Dialog("Warning, no image selected",
|
||||
self,
|
||||
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
|
||||
(gtk.STOCK_NO, gtk.RESPONSE_NO,
|
||||
gtk.STOCK_YES, gtk.RESPONSE_YES))
|
||||
dialog.vbox.pack_start(label)
|
||||
label.show()
|
||||
response = dialog.run()
|
||||
dialog.destroy()
|
||||
if not response == gtk.RESPONSE_YES:
|
||||
return
|
||||
|
||||
# Note: We could "squash" the targets list to only include things not brought in by an image
|
||||
task_list = self.model.get_targets()
|
||||
if len(task_list):
|
||||
tasks = " ".join(task_list)
|
||||
# TODO: show a confirmation dialog
|
||||
print("Including these extra tasks in IMAGE_INSTALL: %s" % tasks)
|
||||
else:
|
||||
return
|
||||
|
||||
self.nb.set_current_page(1)
|
||||
self.handler.run_build(task_list)
|
||||
|
||||
return
|
||||
|
||||
def advanced_expander_cb(self, expander, param):
|
||||
return
|
||||
|
||||
def images(self):
|
||||
self.images_tree = gtk.TreeView()
|
||||
self.images_tree.set_headers_visible(True)
|
||||
self.images_tree.set_headers_clickable(False)
|
||||
self.images_tree.set_enable_search(True)
|
||||
self.images_tree.set_search_column(0)
|
||||
self.images_tree.get_selection().set_mode(gtk.SELECTION_NONE)
|
||||
|
||||
col = gtk.TreeViewColumn('Package')
|
||||
col1 = gtk.TreeViewColumn('Description')
|
||||
col2 = gtk.TreeViewColumn('License')
|
||||
col3 = gtk.TreeViewColumn('Include')
|
||||
col3.set_resizable(False)
|
||||
|
||||
self.images_tree.append_column(col)
|
||||
self.images_tree.append_column(col1)
|
||||
self.images_tree.append_column(col2)
|
||||
self.images_tree.append_column(col3)
|
||||
|
||||
cell = gtk.CellRendererText()
|
||||
cell1 = gtk.CellRendererText()
|
||||
cell2 = gtk.CellRendererText()
|
||||
cell3 = gtk.CellRendererToggle()
|
||||
cell3.set_property('activatable', True)
|
||||
cell3.connect("toggled", self.toggle_include_cb, self.images_tree)
|
||||
|
||||
col.pack_start(cell, True)
|
||||
col1.pack_start(cell1, True)
|
||||
col2.pack_start(cell2, True)
|
||||
col3.pack_start(cell3, True)
|
||||
|
||||
col.set_attributes(cell, text=self.model.COL_NAME)
|
||||
col1.set_attributes(cell1, text=self.model.COL_DESC)
|
||||
col2.set_attributes(cell2, text=self.model.COL_LIC)
|
||||
col3.set_attributes(cell3, active=self.model.COL_INC)
|
||||
|
||||
self.images_tree.show()
|
||||
|
||||
scroll = gtk.ScrolledWindow()
|
||||
scroll.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
|
||||
scroll.set_shadow_type(gtk.SHADOW_IN)
|
||||
scroll.add(self.images_tree)
|
||||
|
||||
return scroll
|
||||
|
||||
def toggle_package(self, path, model):
|
||||
# Convert path to path in original model
|
||||
opath = model.convert_path_to_child_path(path)
|
||||
# current include status
|
||||
inc = self.model[opath][self.model.COL_INC]
|
||||
if inc:
|
||||
self.model.mark(opath)
|
||||
self.model.sweep_up()
|
||||
#self.model.remove_package_full(cpath)
|
||||
else:
|
||||
self.model.include_item(opath)
|
||||
return
|
||||
|
||||
def remove_package_cb(self, cell, path):
|
||||
model = self.model.contents_model()
|
||||
label = gtk.Label("Are you sure you want to remove this item?")
|
||||
dialog = gtk.Dialog("Confirm removal", self,
|
||||
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
|
||||
(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
|
||||
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))
|
||||
dialog.vbox.pack_start(label)
|
||||
label.show()
|
||||
response = dialog.run()
|
||||
dialog.destroy()
|
||||
if (response == gtk.RESPONSE_ACCEPT):
|
||||
self.toggle_package(path, model)
|
||||
|
||||
def toggle_include_cb(self, cell, path, tv):
|
||||
model = tv.get_model()
|
||||
self.toggle_package(path, model)
|
||||
|
||||
def toggle_pkg_include_cb(self, cell, path, tv):
|
||||
# there's an extra layer of models in the packages case.
|
||||
sort_model = tv.get_model()
|
||||
cpath = sort_model.convert_path_to_child_path(path)
|
||||
self.toggle_package(cpath, sort_model.get_model())
|
||||
|
||||
def pkgsaz(self):
|
||||
self.pkgsaz_tree = gtk.TreeView()
|
||||
self.pkgsaz_tree.set_headers_visible(True)
|
||||
self.pkgsaz_tree.set_headers_clickable(True)
|
||||
self.pkgsaz_tree.set_enable_search(True)
|
||||
self.pkgsaz_tree.set_search_column(0)
|
||||
self.pkgsaz_tree.get_selection().set_mode(gtk.SELECTION_NONE)
|
||||
|
||||
col = gtk.TreeViewColumn('Package')
|
||||
col1 = gtk.TreeViewColumn('Description')
|
||||
col1.set_resizable(True)
|
||||
col2 = gtk.TreeViewColumn('License')
|
||||
col2.set_resizable(True)
|
||||
col3 = gtk.TreeViewColumn('Group')
|
||||
col4 = gtk.TreeViewColumn('Include')
|
||||
col4.set_resizable(False)
|
||||
|
||||
self.pkgsaz_tree.append_column(col)
|
||||
self.pkgsaz_tree.append_column(col1)
|
||||
self.pkgsaz_tree.append_column(col2)
|
||||
self.pkgsaz_tree.append_column(col3)
|
||||
self.pkgsaz_tree.append_column(col4)
|
||||
|
||||
cell = gtk.CellRendererText()
|
||||
cell1 = gtk.CellRendererText()
|
||||
cell1.set_property('width-chars', 20)
|
||||
cell2 = gtk.CellRendererText()
|
||||
cell2.set_property('width-chars', 20)
|
||||
cell3 = gtk.CellRendererText()
|
||||
cell4 = gtk.CellRendererToggle()
|
||||
cell4.set_property('activatable', True)
|
||||
cell4.connect("toggled", self.toggle_pkg_include_cb, self.pkgsaz_tree)
|
||||
|
||||
col.pack_start(cell, True)
|
||||
col1.pack_start(cell1, True)
|
||||
col2.pack_start(cell2, True)
|
||||
col3.pack_start(cell3, True)
|
||||
col4.pack_start(cell4, True)
|
||||
|
||||
col.set_attributes(cell, text=self.model.COL_NAME)
|
||||
col1.set_attributes(cell1, text=self.model.COL_DESC)
|
||||
col2.set_attributes(cell2, text=self.model.COL_LIC)
|
||||
col3.set_attributes(cell3, text=self.model.COL_GROUP)
|
||||
col4.set_attributes(cell4, active=self.model.COL_INC)
|
||||
|
||||
self.pkgsaz_tree.show()
|
||||
|
||||
scroll = gtk.ScrolledWindow()
|
||||
scroll.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
|
||||
scroll.set_shadow_type(gtk.SHADOW_IN)
|
||||
scroll.add(self.pkgsaz_tree)
|
||||
|
||||
return scroll
|
||||
|
||||
def pkgsgrp(self):
|
||||
self.pkgsgrp_tree = gtk.TreeView()
|
||||
self.pkgsgrp_tree.set_headers_visible(True)
|
||||
self.pkgsgrp_tree.set_headers_clickable(False)
|
||||
self.pkgsgrp_tree.set_enable_search(True)
|
||||
self.pkgsgrp_tree.set_search_column(0)
|
||||
self.pkgsgrp_tree.get_selection().set_mode(gtk.SELECTION_NONE)
|
||||
|
||||
col = gtk.TreeViewColumn('Package')
|
||||
col1 = gtk.TreeViewColumn('Description')
|
||||
col1.set_resizable(True)
|
||||
col2 = gtk.TreeViewColumn('License')
|
||||
col2.set_resizable(True)
|
||||
col3 = gtk.TreeViewColumn('Group')
|
||||
col4 = gtk.TreeViewColumn('Include')
|
||||
col4.set_resizable(False)
|
||||
|
||||
self.pkgsgrp_tree.append_column(col)
|
||||
self.pkgsgrp_tree.append_column(col1)
|
||||
self.pkgsgrp_tree.append_column(col2)
|
||||
self.pkgsgrp_tree.append_column(col3)
|
||||
self.pkgsgrp_tree.append_column(col4)
|
||||
|
||||
cell = gtk.CellRendererText()
|
||||
cell1 = gtk.CellRendererText()
|
||||
cell1.set_property('width-chars', 20)
|
||||
cell2 = gtk.CellRendererText()
|
||||
cell2.set_property('width-chars', 20)
|
||||
cell3 = gtk.CellRendererText()
|
||||
cell4 = gtk.CellRendererToggle()
|
||||
cell4.set_property("activatable", True)
|
||||
cell4.connect("toggled", self.toggle_pkg_include_cb, self.pkgsgrp_tree)
|
||||
|
||||
col.pack_start(cell, True)
|
||||
col1.pack_start(cell1, True)
|
||||
col2.pack_start(cell2, True)
|
||||
col3.pack_start(cell3, True)
|
||||
col4.pack_start(cell4, True)
|
||||
|
||||
col.set_attributes(cell, text=self.model.COL_NAME)
|
||||
col1.set_attributes(cell1, text=self.model.COL_DESC)
|
||||
col2.set_attributes(cell2, text=self.model.COL_LIC)
|
||||
col3.set_attributes(cell3, text=self.model.COL_GROUP)
|
||||
col4.set_attributes(cell4, active=self.model.COL_INC)
|
||||
|
||||
self.pkgsgrp_tree.show()
|
||||
|
||||
scroll = gtk.ScrolledWindow()
|
||||
scroll.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
|
||||
scroll.set_shadow_type(gtk.SHADOW_IN)
|
||||
scroll.add(self.pkgsgrp_tree)
|
||||
|
||||
return scroll
|
||||
|
||||
def tasks(self):
|
||||
self.tasks_tree = gtk.TreeView()
|
||||
self.tasks_tree.set_headers_visible(True)
|
||||
self.tasks_tree.set_headers_clickable(False)
|
||||
self.tasks_tree.set_enable_search(True)
|
||||
self.tasks_tree.set_search_column(0)
|
||||
self.tasks_tree.get_selection().set_mode(gtk.SELECTION_NONE)
|
||||
|
||||
col = gtk.TreeViewColumn('Package')
|
||||
col1 = gtk.TreeViewColumn('Description')
|
||||
col2 = gtk.TreeViewColumn('Include')
|
||||
col2.set_resizable(False)
|
||||
|
||||
self.tasks_tree.append_column(col)
|
||||
self.tasks_tree.append_column(col1)
|
||||
self.tasks_tree.append_column(col2)
|
||||
|
||||
cell = gtk.CellRendererText()
|
||||
cell1 = gtk.CellRendererText()
|
||||
cell2 = gtk.CellRendererToggle()
|
||||
cell2.set_property('activatable', True)
|
||||
cell2.connect("toggled", self.toggle_include_cb, self.tasks_tree)
|
||||
|
||||
col.pack_start(cell, True)
|
||||
col1.pack_start(cell1, True)
|
||||
col2.pack_start(cell2, True)
|
||||
|
||||
col.set_attributes(cell, text=self.model.COL_NAME)
|
||||
col1.set_attributes(cell1, text=self.model.COL_DESC)
|
||||
col2.set_attributes(cell2, active=self.model.COL_INC)
|
||||
|
||||
self.tasks_tree.show()
|
||||
|
||||
scroll = gtk.ScrolledWindow()
|
||||
scroll.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
|
||||
scroll.set_shadow_type(gtk.SHADOW_IN)
|
||||
scroll.add(self.tasks_tree)
|
||||
|
||||
return scroll
|
||||
|
||||
def cancel_build(self, button):
|
||||
label = gtk.Label("Do you really want to stop this build?")
|
||||
dialog = gtk.Dialog("Cancel build",
|
||||
self,
|
||||
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
|
||||
(gtk.STOCK_NO, gtk.RESPONSE_NO,
|
||||
gtk.STOCK_YES, gtk.RESPONSE_YES))
|
||||
dialog.vbox.pack_start(label)
|
||||
label.show()
|
||||
response = dialog.run()
|
||||
dialog.destroy()
|
||||
if response == gtk.RESPONSE_YES:
|
||||
self.handler.cancel_build()
|
||||
return
|
||||
|
||||
def view_build_gui(self):
|
||||
vbox = gtk.VBox(False, 6)
|
||||
vbox.show()
|
||||
build_tv = RunningBuildTreeView()
|
||||
build_tv.show()
|
||||
build_tv.set_model(self.build.model)
|
||||
self.build.model.connect("row-inserted", self.scroll_tv_cb, build_tv)
|
||||
scrolled_view = gtk.ScrolledWindow ()
|
||||
scrolled_view.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
|
||||
scrolled_view.add(build_tv)
|
||||
scrolled_view.show()
|
||||
vbox.pack_start(scrolled_view, expand=True, fill=True)
|
||||
hbox = gtk.HBox(False, 6)
|
||||
hbox.show()
|
||||
vbox.pack_start(hbox, expand=False, fill=False)
|
||||
cancel = gtk.Button(stock=gtk.STOCK_CANCEL)
|
||||
cancel.connect("clicked", self.cancel_build)
|
||||
cancel.show()
|
||||
hbox.pack_end(cancel, expand=False, fill=False)
|
||||
|
||||
return vbox
|
||||
|
||||
def create_build_gui(self):
|
||||
vbox = gtk.VBox(False, 6)
|
||||
vbox.show()
|
||||
hbox = gtk.HBox(False, 6)
|
||||
hbox.show()
|
||||
vbox.pack_start(hbox, expand=False, fill=False)
|
||||
|
||||
label = gtk.Label("Machine:")
|
||||
label.show()
|
||||
hbox.pack_start(label, expand=False, fill=False, padding=6)
|
||||
self.machine_combo = gtk.combo_box_new_text()
|
||||
self.machine_combo.set_active(0)
|
||||
self.machine_combo.show()
|
||||
self.machine_combo.set_tooltip_text("Selects the architecture of the target board for which you would like to build an image.")
|
||||
hbox.pack_start(self.machine_combo, expand=False, fill=False, padding=6)
|
||||
|
||||
ins = gtk.Notebook()
|
||||
vbox.pack_start(ins, expand=True, fill=True)
|
||||
ins.set_show_tabs(True)
|
||||
label = gtk.Label("Images")
|
||||
label.show()
|
||||
ins.append_page(self.images(), tab_label=label)
|
||||
label = gtk.Label("Tasks")
|
||||
label.show()
|
||||
ins.append_page(self.tasks(), tab_label=label)
|
||||
label = gtk.Label("Packages (by Group)")
|
||||
label.show()
|
||||
ins.append_page(self.pkgsgrp(), tab_label=label)
|
||||
label = gtk.Label("Packages (by Name)")
|
||||
label.show()
|
||||
ins.append_page(self.pkgsaz(), tab_label=label)
|
||||
ins.set_current_page(0)
|
||||
ins.show_all()
|
||||
|
||||
hbox = gtk.HBox()
|
||||
hbox.show()
|
||||
vbox.pack_start(hbox, expand=False, fill=False)
|
||||
label = gtk.Label("Image contents:")
|
||||
label.show()
|
||||
hbox.pack_start(label, expand=False, fill=False, padding=6)
|
||||
con = self.contents()
|
||||
con.show()
|
||||
vbox.pack_start(con, expand=True, fill=True)
|
||||
|
||||
#advanced = gtk.Expander(label="Advanced")
|
||||
#advanced.connect("notify::expanded", self.advanced_expander_cb)
|
||||
#advanced.show()
|
||||
#vbox.pack_start(advanced, expand=False, fill=False)
|
||||
|
||||
hbox = gtk.HBox()
|
||||
hbox.show()
|
||||
vbox.pack_start(hbox, expand=False, fill=False)
|
||||
bake = gtk.Button("Bake")
|
||||
bake.connect("clicked", self.bake_clicked_cb)
|
||||
bake.show()
|
||||
hbox.pack_end(bake, expand=False, fill=False, padding=6)
|
||||
reset = gtk.Button("Reset")
|
||||
reset.connect("clicked", self.reset_clicked_cb)
|
||||
reset.show()
|
||||
hbox.pack_end(reset, expand=False, fill=False, padding=6)
|
||||
|
||||
return vbox
|
||||
|
||||
def contents(self):
|
||||
self.contents_tree = gtk.TreeView()
|
||||
self.contents_tree.set_headers_visible(True)
|
||||
self.contents_tree.get_selection().set_mode(gtk.SELECTION_NONE)
|
||||
|
||||
# allow searching in the package column
|
||||
self.contents_tree.set_search_column(0)
|
||||
|
||||
col = gtk.TreeViewColumn('Package')
|
||||
col.set_sort_column_id(0)
|
||||
col1 = gtk.TreeViewColumn('Brought in by')
|
||||
col1.set_resizable(True)
|
||||
col2 = gtk.TreeViewColumn('Remove')
|
||||
col2.set_expand(False)
|
||||
|
||||
self.contents_tree.append_column(col)
|
||||
self.contents_tree.append_column(col1)
|
||||
self.contents_tree.append_column(col2)
|
||||
|
||||
cell = gtk.CellRendererText()
|
||||
cell1 = gtk.CellRendererText()
|
||||
cell1.set_property('width-chars', 20)
|
||||
cell2 = gtk.CellRendererToggle()
|
||||
cell2.connect("toggled", self.remove_package_cb)
|
||||
|
||||
col.pack_start(cell, True)
|
||||
col1.pack_start(cell1, True)
|
||||
col2.pack_start(cell2, True)
|
||||
|
||||
col.set_attributes(cell, text=self.model.COL_NAME)
|
||||
col1.set_attributes(cell1, text=self.model.COL_BINB)
|
||||
col2.set_attributes(cell2, active=self.model.COL_INC)
|
||||
|
||||
self.contents_tree.show()
|
||||
|
||||
scroll = gtk.ScrolledWindow()
|
||||
scroll.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
|
||||
scroll.set_shadow_type(gtk.SHADOW_IN)
|
||||
scroll.add(self.contents_tree)
|
||||
|
||||
return scroll
|
||||
|
||||
def main (server, eventHandler):
|
||||
gobject.threads_init()
|
||||
gtk.gdk.threads_init()
|
||||
|
||||
taskmodel = TaskListModel()
|
||||
handler = HobHandler(taskmodel, server)
|
||||
mach = server.runCommand(["getVariable", "MACHINE"])
|
||||
distro = server.runCommand(["getVariable", "DISTRO"])
|
||||
|
||||
window = MainWindow(taskmodel, handler, mach, distro)
|
||||
window.show_all ()
|
||||
handler.connect("machines-updated", window.update_machines)
|
||||
handler.connect("distros-updated", window.update_distros)
|
||||
handler.connect("generating-data", window.busy)
|
||||
handler.connect("data-generated", window.data_generated)
|
||||
pbar = ProgressBar(window)
|
||||
pbar.connect("delete-event", gtk.main_quit)
|
||||
|
||||
try:
|
||||
# kick the while thing off
|
||||
handler.current_command = "findConfigFilesDistro"
|
||||
server.runCommand(["findConfigFiles", "DISTRO"])
|
||||
except xmlrpclib.Fault:
|
||||
print("XMLRPC Fault getting commandline:\n %s" % x)
|
||||
return 1
|
||||
|
||||
# This timeout function regularly probes the event queue to find out if we
|
||||
# have any messages waiting for us.
|
||||
gobject.timeout_add (100,
|
||||
handler.event_handle_idle_func,
|
||||
eventHandler,
|
||||
window.build,
|
||||
pbar)
|
||||
|
||||
try:
|
||||
gtk.main()
|
||||
except EnvironmentError as ioerror:
|
||||
# ignore interrupted io
|
||||
if ioerror.args[0] == 4:
|
||||
pass
|
||||
finally:
|
||||
server.runCommand(["stateStop"])
|
||||
|
||||
@@ -1,248 +0,0 @@
|
||||
#
|
||||
# BitBake (No)TTY UI Implementation
|
||||
#
|
||||
# Handling output to TTYs or files (no TTY)
|
||||
#
|
||||
# Copyright (C) 2006-2007 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
from __future__ import division
|
||||
|
||||
import os
|
||||
import sys
|
||||
import xmlrpclib
|
||||
import logging
|
||||
import progressbar
|
||||
import bb.msg
|
||||
from bb.ui import uihelper
|
||||
|
||||
logger = logging.getLogger("BitBake")
|
||||
interactive = sys.stdout.isatty()
|
||||
|
||||
class BBProgress(progressbar.ProgressBar):
|
||||
def __init__(self, msg, maxval):
|
||||
self.msg = msg
|
||||
widgets = [progressbar.Percentage(), ' ', progressbar.Bar(), ' ',
|
||||
progressbar.ETA()]
|
||||
|
||||
progressbar.ProgressBar.__init__(self, maxval, [self.msg + ": "] + widgets)
|
||||
|
||||
class NonInteractiveProgress(object):
|
||||
fobj = sys.stdout
|
||||
|
||||
def __init__(self, msg, maxval):
|
||||
self.msg = msg
|
||||
self.maxval = maxval
|
||||
|
||||
def start(self):
|
||||
self.fobj.write("%s..." % self.msg)
|
||||
self.fobj.flush()
|
||||
return self
|
||||
|
||||
def update(self, value):
|
||||
pass
|
||||
|
||||
def finish(self):
|
||||
self.fobj.write("done.\n")
|
||||
self.fobj.flush()
|
||||
|
||||
def new_progress(msg, maxval):
|
||||
if interactive:
|
||||
return BBProgress(msg, maxval)
|
||||
else:
|
||||
return NonInteractiveProgress(msg, maxval)
|
||||
|
||||
def main(server, eventHandler):
|
||||
|
||||
# Get values of variables which control our output
|
||||
includelogs = server.runCommand(["getVariable", "BBINCLUDELOGS"])
|
||||
loglines = server.runCommand(["getVariable", "BBINCLUDELOGS_LINES"])
|
||||
|
||||
helper = uihelper.BBUIHelper()
|
||||
|
||||
console = logging.StreamHandler(sys.stdout)
|
||||
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
|
||||
console.setFormatter(format)
|
||||
logger.addHandler(console)
|
||||
|
||||
try:
|
||||
cmdline = server.runCommand(["getCmdLineAction"])
|
||||
if not cmdline:
|
||||
return 1
|
||||
ret = server.runCommand(cmdline)
|
||||
if ret != True:
|
||||
print("Couldn't get default commandline! %s" % ret)
|
||||
return 1
|
||||
except xmlrpclib.Fault as x:
|
||||
print("XMLRPC Fault getting commandline:\n %s" % x)
|
||||
return 1
|
||||
|
||||
|
||||
parseprogress = None
|
||||
cacheprogress = None
|
||||
shutdown = 0
|
||||
return_value = 0
|
||||
while True:
|
||||
try:
|
||||
event = eventHandler.waitEvent(0.25)
|
||||
if event is None:
|
||||
continue
|
||||
helper.eventHandler(event)
|
||||
if isinstance(event, bb.runqueue.runQueueExitWait):
|
||||
if not shutdown:
|
||||
shutdown = 1
|
||||
if shutdown and helper.needUpdate:
|
||||
activetasks, failedtasks = helper.getTasks()
|
||||
if activetasks:
|
||||
print("Waiting for %s active tasks to finish:" % len(activetasks))
|
||||
for tasknum, task in enumerate(activetasks):
|
||||
print("%s: %s (pid %s)" % (tasknum, activetasks[task]["title"], task))
|
||||
|
||||
if isinstance(event, logging.LogRecord):
|
||||
if event.levelno >= format.ERROR:
|
||||
return_value = 1
|
||||
# For "normal" logging conditions, don't show note logs from tasks
|
||||
# but do show them if the user has changed the default log level to
|
||||
# include verbose/debug messages
|
||||
if logger.getEffectiveLevel() > format.VERBOSE:
|
||||
if event.taskpid != 0 and event.levelno <= format.NOTE:
|
||||
continue
|
||||
logger.handle(event)
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.build.TaskFailed):
|
||||
return_value = 1
|
||||
logfile = event.logfile
|
||||
if logfile and os.path.exists(logfile):
|
||||
print("ERROR: Logfile of failure stored in: %s" % logfile)
|
||||
if 1 or includelogs:
|
||||
print("Log data follows:")
|
||||
f = open(logfile, "r")
|
||||
lines = []
|
||||
while True:
|
||||
l = f.readline()
|
||||
if l == '':
|
||||
break
|
||||
l = l.rstrip()
|
||||
if loglines:
|
||||
lines.append(' | %s' % l)
|
||||
if len(lines) > int(loglines):
|
||||
lines.pop(0)
|
||||
else:
|
||||
print('| %s' % l)
|
||||
f.close()
|
||||
if lines:
|
||||
for line in lines:
|
||||
print(line)
|
||||
if isinstance(event, bb.build.TaskBase):
|
||||
logger.info(event._message)
|
||||
continue
|
||||
if isinstance(event, bb.event.ParseStarted):
|
||||
parseprogress = new_progress("Parsing recipes", event.total).start()
|
||||
continue
|
||||
if isinstance(event, bb.event.ParseProgress):
|
||||
parseprogress.update(event.current)
|
||||
continue
|
||||
if isinstance(event, bb.event.ParseCompleted):
|
||||
parseprogress.finish()
|
||||
print(("Parsing of %d .bb files complete (%d cached, %d parsed). %d targets, %d skipped, %d masked, %d errors."
|
||||
% ( event.total, event.cached, event.parsed, event.virtuals, event.skipped, event.masked, event.errors)))
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.event.CacheLoadStarted):
|
||||
cacheprogress = new_progress("Loading cache", event.total).start()
|
||||
continue
|
||||
if isinstance(event, bb.event.CacheLoadProgress):
|
||||
cacheprogress.update(event.current)
|
||||
continue
|
||||
if isinstance(event, bb.event.CacheLoadCompleted):
|
||||
cacheprogress.finish()
|
||||
print("Loaded %d entries from dependency cache." % event.num_entries)
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.command.CommandCompleted):
|
||||
break
|
||||
if isinstance(event, bb.command.CommandFailed):
|
||||
return_value = event.exitcode
|
||||
logger.error("Command execution failed: %s", event.error)
|
||||
break
|
||||
if isinstance(event, bb.command.CommandExit):
|
||||
return_value = event.exitcode
|
||||
continue
|
||||
if isinstance(event, bb.cooker.CookerExit):
|
||||
break
|
||||
if isinstance(event, bb.event.MultipleProviders):
|
||||
logger.info("multiple providers are available for %s%s (%s)", event._is_runtime and "runtime " or "",
|
||||
event._item,
|
||||
", ".join(event._candidates))
|
||||
logger.info("consider defining a PREFERRED_PROVIDER entry to match %s", event._item)
|
||||
continue
|
||||
if isinstance(event, bb.event.NoProvider):
|
||||
if event._runtime:
|
||||
r = "R"
|
||||
else:
|
||||
r = ""
|
||||
|
||||
if event._dependees:
|
||||
logger.error("Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)", r, event._item, ", ".join(event._dependees), r)
|
||||
else:
|
||||
logger.error("Nothing %sPROVIDES '%s'", r, event._item)
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.runqueue.runQueueTaskStarted):
|
||||
if event.noexec:
|
||||
tasktype = 'noexec task'
|
||||
else:
|
||||
tasktype = 'task'
|
||||
logger.info("Running %s %s of %s (ID: %s, %s)",
|
||||
tasktype,
|
||||
event.stats.completed + event.stats.active +
|
||||
event.stats.failed + 1,
|
||||
event.stats.total, event.taskid, event.taskstring)
|
||||
continue
|
||||
|
||||
if isinstance(event, bb.runqueue.runQueueTaskFailed):
|
||||
logger.error("Task %s (%s) failed with exit code '%s'",
|
||||
event.taskid, event.taskstring, event.exitcode)
|
||||
continue
|
||||
|
||||
# ignore
|
||||
if isinstance(event, (bb.event.BuildBase,
|
||||
bb.event.StampUpdate,
|
||||
bb.event.ConfigParsed,
|
||||
bb.event.RecipeParsed,
|
||||
bb.runqueue.runQueueEvent,
|
||||
bb.runqueue.runQueueExitWait)):
|
||||
continue
|
||||
|
||||
logger.error("Unknown event: %s", event)
|
||||
|
||||
except EnvironmentError as ioerror:
|
||||
# ignore interrupted io
|
||||
if ioerror.args[0] == 4:
|
||||
pass
|
||||
except KeyboardInterrupt:
|
||||
if shutdown == 2:
|
||||
print("\nThird Keyboard Interrupt, exit.\n")
|
||||
break
|
||||
if shutdown == 1:
|
||||
print("\nSecond Keyboard Interrupt, stopping...\n")
|
||||
server.runCommand(["stateStop"])
|
||||
if shutdown == 0:
|
||||
print("\nKeyboard Interrupt, closing down...\n")
|
||||
server.runCommand(["stateShutdown"])
|
||||
shutdown = shutdown + 1
|
||||
pass
|
||||
return return_value
|
||||
@@ -1,352 +0,0 @@
|
||||
#
|
||||
# BitBake Curses UI Implementation
|
||||
#
|
||||
# Implements an ncurses frontend for the BitBake utility.
|
||||
#
|
||||
# Copyright (C) 2006 Michael 'Mickey' Lauer
|
||||
# Copyright (C) 2006-2007 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
"""
|
||||
We have the following windows:
|
||||
|
||||
1.) Main Window: Shows what we are ultimately building and how far we are. Includes status bar
|
||||
2.) Thread Activity Window: Shows one status line for every concurrent bitbake thread.
|
||||
3.) Command Line Window: Contains an interactive command line where you can interact w/ Bitbake.
|
||||
|
||||
Basic window layout is like that:
|
||||
|
||||
|---------------------------------------------------------|
|
||||
| <Main Window> | <Thread Activity Window> |
|
||||
| | 0: foo do_compile complete|
|
||||
| Building Gtk+-2.6.10 | 1: bar do_patch complete |
|
||||
| Status: 60% | ... |
|
||||
| | ... |
|
||||
| | ... |
|
||||
|---------------------------------------------------------|
|
||||
|<Command Line Window> |
|
||||
|>>> which virtual/kernel |
|
||||
|openzaurus-kernel |
|
||||
|>>> _ |
|
||||
|---------------------------------------------------------|
|
||||
|
||||
"""
|
||||
|
||||
|
||||
from __future__ import division
|
||||
import logging
|
||||
import os, sys, curses, itertools, time
|
||||
import bb
|
||||
import xmlrpclib
|
||||
from bb import ui
|
||||
from bb.ui import uihelper
|
||||
|
||||
parsespin = itertools.cycle( r'|/-\\' )
|
||||
|
||||
X = 0
|
||||
Y = 1
|
||||
WIDTH = 2
|
||||
HEIGHT = 3
|
||||
|
||||
MAXSTATUSLENGTH = 32
|
||||
|
||||
class NCursesUI:
|
||||
"""
|
||||
NCurses UI Class
|
||||
"""
|
||||
class Window:
|
||||
"""Base Window Class"""
|
||||
def __init__( self, x, y, width, height, fg=curses.COLOR_BLACK, bg=curses.COLOR_WHITE ):
|
||||
self.win = curses.newwin( height, width, y, x )
|
||||
self.dimensions = ( x, y, width, height )
|
||||
"""
|
||||
if curses.has_colors():
|
||||
color = 1
|
||||
curses.init_pair( color, fg, bg )
|
||||
self.win.bkgdset( ord(' '), curses.color_pair(color) )
|
||||
else:
|
||||
self.win.bkgdset( ord(' '), curses.A_BOLD )
|
||||
"""
|
||||
self.erase()
|
||||
self.setScrolling()
|
||||
self.win.noutrefresh()
|
||||
|
||||
def erase( self ):
|
||||
self.win.erase()
|
||||
|
||||
def setScrolling( self, b = True ):
|
||||
self.win.scrollok( b )
|
||||
self.win.idlok( b )
|
||||
|
||||
def setBoxed( self ):
|
||||
self.boxed = True
|
||||
self.win.box()
|
||||
self.win.noutrefresh()
|
||||
|
||||
def setText( self, x, y, text, *args ):
|
||||
self.win.addstr( y, x, text, *args )
|
||||
self.win.noutrefresh()
|
||||
|
||||
def appendText( self, text, *args ):
|
||||
self.win.addstr( text, *args )
|
||||
self.win.noutrefresh()
|
||||
|
||||
def drawHline( self, y ):
|
||||
self.win.hline( y, 0, curses.ACS_HLINE, self.dimensions[WIDTH] )
|
||||
self.win.noutrefresh()
|
||||
|
||||
class DecoratedWindow( Window ):
|
||||
"""Base class for windows with a box and a title bar"""
|
||||
def __init__( self, title, x, y, width, height, fg=curses.COLOR_BLACK, bg=curses.COLOR_WHITE ):
|
||||
NCursesUI.Window.__init__( self, x+1, y+3, width-2, height-4, fg, bg )
|
||||
self.decoration = NCursesUI.Window( x, y, width, height, fg, bg )
|
||||
self.decoration.setBoxed()
|
||||
self.decoration.win.hline( 2, 1, curses.ACS_HLINE, width-2 )
|
||||
self.setTitle( title )
|
||||
|
||||
def setTitle( self, title ):
|
||||
self.decoration.setText( 1, 1, title.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD )
|
||||
|
||||
#-------------------------------------------------------------------------#
|
||||
# class TitleWindow( Window ):
|
||||
#-------------------------------------------------------------------------#
|
||||
# """Title Window"""
|
||||
# def __init__( self, x, y, width, height ):
|
||||
# NCursesUI.Window.__init__( self, x, y, width, height )
|
||||
# version = bb.__version__
|
||||
# title = "BitBake %s" % version
|
||||
# credit = "(C) 2003-2007 Team BitBake"
|
||||
# #self.win.hline( 2, 1, curses.ACS_HLINE, width-2 )
|
||||
# self.win.border()
|
||||
# self.setText( 1, 1, title.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD )
|
||||
# self.setText( 1, 2, credit.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD )
|
||||
|
||||
#-------------------------------------------------------------------------#
|
||||
class ThreadActivityWindow( DecoratedWindow ):
|
||||
#-------------------------------------------------------------------------#
|
||||
"""Thread Activity Window"""
|
||||
def __init__( self, x, y, width, height ):
|
||||
NCursesUI.DecoratedWindow.__init__( self, "Thread Activity", x, y, width, height )
|
||||
|
||||
def setStatus( self, thread, text ):
|
||||
line = "%02d: %s" % ( thread, text )
|
||||
width = self.dimensions[WIDTH]
|
||||
if ( len(line) > width ):
|
||||
line = line[:width-3] + "..."
|
||||
else:
|
||||
line = line.ljust( width )
|
||||
self.setText( 0, thread, line )
|
||||
|
||||
#-------------------------------------------------------------------------#
|
||||
class MainWindow( DecoratedWindow ):
|
||||
#-------------------------------------------------------------------------#
|
||||
"""Main Window"""
|
||||
def __init__( self, x, y, width, height ):
|
||||
self.StatusPosition = width - MAXSTATUSLENGTH
|
||||
NCursesUI.DecoratedWindow.__init__( self, None, x, y, width, height )
|
||||
curses.nl()
|
||||
|
||||
def setTitle( self, title ):
|
||||
title = "BitBake %s" % bb.__version__
|
||||
self.decoration.setText( 2, 1, title, curses.A_BOLD )
|
||||
self.decoration.setText( self.StatusPosition - 8, 1, "Status:", curses.A_BOLD )
|
||||
|
||||
def setStatus(self, status):
|
||||
while len(status) < MAXSTATUSLENGTH:
|
||||
status = status + " "
|
||||
self.decoration.setText( self.StatusPosition, 1, status, curses.A_BOLD )
|
||||
|
||||
|
||||
#-------------------------------------------------------------------------#
|
||||
class ShellOutputWindow( DecoratedWindow ):
|
||||
#-------------------------------------------------------------------------#
|
||||
"""Interactive Command Line Output"""
|
||||
def __init__( self, x, y, width, height ):
|
||||
NCursesUI.DecoratedWindow.__init__( self, "Command Line Window", x, y, width, height )
|
||||
|
||||
#-------------------------------------------------------------------------#
|
||||
class ShellInputWindow( Window ):
|
||||
#-------------------------------------------------------------------------#
|
||||
"""Interactive Command Line Input"""
|
||||
def __init__( self, x, y, width, height ):
|
||||
NCursesUI.Window.__init__( self, x, y, width, height )
|
||||
|
||||
# put that to the top again from curses.textpad import Textbox
|
||||
# self.textbox = Textbox( self.win )
|
||||
# t = threading.Thread()
|
||||
# t.run = self.textbox.edit
|
||||
# t.start()
|
||||
|
||||
#-------------------------------------------------------------------------#
|
||||
def main(self, stdscr, server, eventHandler):
|
||||
#-------------------------------------------------------------------------#
|
||||
height, width = stdscr.getmaxyx()
|
||||
|
||||
# for now split it like that:
|
||||
# MAIN_y + THREAD_y = 2/3 screen at the top
|
||||
# MAIN_x = 2/3 left, THREAD_y = 1/3 right
|
||||
# CLI_y = 1/3 of screen at the bottom
|
||||
# CLI_x = full
|
||||
|
||||
main_left = 0
|
||||
main_top = 0
|
||||
main_height = ( height // 3 * 2 )
|
||||
main_width = ( width // 3 ) * 2
|
||||
clo_left = main_left
|
||||
clo_top = main_top + main_height
|
||||
clo_height = height - main_height - main_top - 1
|
||||
clo_width = width
|
||||
cli_left = main_left
|
||||
cli_top = clo_top + clo_height
|
||||
cli_height = 1
|
||||
cli_width = width
|
||||
thread_left = main_left + main_width
|
||||
thread_top = main_top
|
||||
thread_height = main_height
|
||||
thread_width = width - main_width
|
||||
|
||||
#tw = self.TitleWindow( 0, 0, width, main_top )
|
||||
mw = self.MainWindow( main_left, main_top, main_width, main_height )
|
||||
taw = self.ThreadActivityWindow( thread_left, thread_top, thread_width, thread_height )
|
||||
clo = self.ShellOutputWindow( clo_left, clo_top, clo_width, clo_height )
|
||||
cli = self.ShellInputWindow( cli_left, cli_top, cli_width, cli_height )
|
||||
cli.setText( 0, 0, "BB>" )
|
||||
|
||||
mw.setStatus("Idle")
|
||||
|
||||
helper = uihelper.BBUIHelper()
|
||||
shutdown = 0
|
||||
|
||||
try:
|
||||
cmdline = server.runCommand(["getCmdLineAction"])
|
||||
if not cmdline:
|
||||
return
|
||||
ret = server.runCommand(cmdline)
|
||||
if ret != True:
|
||||
print("Couldn't get default commandlind! %s" % ret)
|
||||
return
|
||||
except xmlrpclib.Fault as x:
|
||||
print("XMLRPC Fault getting commandline:\n %s" % x)
|
||||
return
|
||||
|
||||
exitflag = False
|
||||
while not exitflag:
|
||||
try:
|
||||
event = eventHandler.waitEvent(0.25)
|
||||
if not event:
|
||||
continue
|
||||
|
||||
helper.eventHandler(event)
|
||||
if isinstance(event, bb.build.TaskBase):
|
||||
mw.appendText("NOTE: %s\n" % event._message)
|
||||
if isinstance(event, logging.LogRecord):
|
||||
mw.appendText(logging.getLevelName(event.levelno) + ': ' + event.getMessage() + '\n')
|
||||
|
||||
if isinstance(event, bb.event.CacheLoadStarted):
|
||||
self.parse_total = event.total
|
||||
if isinstance(event, bb.event.CacheLoadProgress):
|
||||
x = event.current
|
||||
y = self.parse_total
|
||||
mw.setStatus("Loading Cache: %s [%2d %%]" % ( next(parsespin), x*100/y ) )
|
||||
if isinstance(event, bb.event.CacheLoadCompleted):
|
||||
mw.setStatus("Idle")
|
||||
mw.appendText("Loaded %d entries from dependency cache.\n"
|
||||
% ( event.num_entries))
|
||||
|
||||
if isinstance(event, bb.event.ParseStarted):
|
||||
self.parse_total = event.total
|
||||
if isinstance(event, bb.event.ParseProgress):
|
||||
x = event.current
|
||||
y = self.parse_total
|
||||
mw.setStatus("Parsing Recipes: %s [%2d %%]" % ( next(parsespin), x*100/y ) )
|
||||
if isinstance(event, bb.event.ParseCompleted):
|
||||
mw.setStatus("Idle")
|
||||
mw.appendText("Parsing finished. %d cached, %d parsed, %d skipped, %d masked.\n"
|
||||
% ( event.cached, event.parsed, event.skipped, event.masked ))
|
||||
|
||||
# if isinstance(event, bb.build.TaskFailed):
|
||||
# if event.logfile:
|
||||
# if data.getVar("BBINCLUDELOGS", d):
|
||||
# bb.msg.error(bb.msg.domain.Build, "log data follows (%s)" % logfile)
|
||||
# number_of_lines = data.getVar("BBINCLUDELOGS_LINES", d)
|
||||
# if number_of_lines:
|
||||
# os.system('tail -n%s %s' % (number_of_lines, logfile))
|
||||
# else:
|
||||
# f = open(logfile, "r")
|
||||
# while True:
|
||||
# l = f.readline()
|
||||
# if l == '':
|
||||
# break
|
||||
# l = l.rstrip()
|
||||
# print '| %s' % l
|
||||
# f.close()
|
||||
# else:
|
||||
# bb.msg.error(bb.msg.domain.Build, "see log in %s" % logfile)
|
||||
|
||||
if isinstance(event, bb.command.CommandCompleted):
|
||||
# stop so the user can see the result of the build, but
|
||||
# also allow them to now exit with a single ^C
|
||||
shutdown = 2
|
||||
if isinstance(event, bb.command.CommandFailed):
|
||||
mw.appendText("Command execution failed: %s" % event.error)
|
||||
time.sleep(2)
|
||||
exitflag = True
|
||||
if isinstance(event, bb.command.CommandExit):
|
||||
exitflag = True
|
||||
if isinstance(event, bb.cooker.CookerExit):
|
||||
exitflag = True
|
||||
|
||||
if helper.needUpdate:
|
||||
activetasks, failedtasks = helper.getTasks()
|
||||
taw.erase()
|
||||
taw.setText(0, 0, "")
|
||||
if activetasks:
|
||||
taw.appendText("Active Tasks:\n")
|
||||
for task in activetasks.itervalues():
|
||||
taw.appendText(task["title"] + '\n')
|
||||
if failedtasks:
|
||||
taw.appendText("Failed Tasks:\n")
|
||||
for task in failedtasks:
|
||||
taw.appendText(task["title"] + '\n')
|
||||
|
||||
curses.doupdate()
|
||||
except EnvironmentError as ioerror:
|
||||
# ignore interrupted io
|
||||
if ioerror.args[0] == 4:
|
||||
pass
|
||||
|
||||
except KeyboardInterrupt:
|
||||
if shutdown == 2:
|
||||
mw.appendText("Third Keyboard Interrupt, exit.\n")
|
||||
exitflag = True
|
||||
if shutdown == 1:
|
||||
mw.appendText("Second Keyboard Interrupt, stopping...\n")
|
||||
server.runCommand(["stateStop"])
|
||||
if shutdown == 0:
|
||||
mw.appendText("Keyboard Interrupt, closing down...\n")
|
||||
server.runCommand(["stateShutdown"])
|
||||
shutdown = shutdown + 1
|
||||
pass
|
||||
|
||||
def main(server, eventHandler):
|
||||
if not os.isatty(sys.stdout.fileno()):
|
||||
print("FATAL: Unable to run 'ncurses' UI without a TTY.")
|
||||
return
|
||||
ui = NCursesUI()
|
||||
try:
|
||||
curses.wrapper(ui.main, server, eventHandler)
|
||||
except:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
@@ -1,425 +0,0 @@
|
||||
#
|
||||
# BitBake Graphical GTK User Interface
|
||||
#
|
||||
# Copyright (C) 2008 Intel Corporation
|
||||
#
|
||||
# Authored by Rob Bradford <rob@linux.intel.com>
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import gtk
|
||||
import gobject
|
||||
import gtk.glade
|
||||
import threading
|
||||
import urllib2
|
||||
import os
|
||||
import contextlib
|
||||
|
||||
from bb.ui.crumbs.buildmanager import BuildManager, BuildConfiguration
|
||||
from bb.ui.crumbs.buildmanager import BuildManagerTreeView
|
||||
|
||||
from bb.ui.crumbs.runningbuild import RunningBuild, RunningBuildTreeView
|
||||
|
||||
# The metadata loader is used by the BuildSetupDialog to download the
|
||||
# available options to populate the dialog
|
||||
class MetaDataLoader(gobject.GObject):
|
||||
""" This class provides the mechanism for loading the metadata (the
|
||||
fetching and parsing) from a given URL. The metadata encompasses details
|
||||
on what machines are available. The distribution and images available for
|
||||
the machine and the the uris to use for building the given machine."""
|
||||
__gsignals__ = {
|
||||
'success' : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
()),
|
||||
'error' : (gobject.SIGNAL_RUN_LAST,
|
||||
gobject.TYPE_NONE,
|
||||
(gobject.TYPE_STRING,))
|
||||
}
|
||||
|
||||
# We use these little helper functions to ensure that we take the gdk lock
|
||||
# when emitting the signal. These functions are called as idles (so that
|
||||
# they happen in the gtk / main thread's main loop.
|
||||
def emit_error_signal (self, remark):
|
||||
gtk.gdk.threads_enter()
|
||||
self.emit ("error", remark)
|
||||
gtk.gdk.threads_leave()
|
||||
|
||||
def emit_success_signal (self):
|
||||
gtk.gdk.threads_enter()
|
||||
self.emit ("success")
|
||||
gtk.gdk.threads_leave()
|
||||
|
||||
def __init__ (self):
|
||||
gobject.GObject.__init__ (self)
|
||||
|
||||
class LoaderThread(threading.Thread):
|
||||
""" This class provides an asynchronous loader for the metadata (by
|
||||
using threads and signals). This is useful since the metadata may be
|
||||
at a remote URL."""
|
||||
class LoaderImportException (Exception):
|
||||
pass
|
||||
|
||||
def __init__(self, loader, url):
|
||||
threading.Thread.__init__ (self)
|
||||
self.url = url
|
||||
self.loader = loader
|
||||
|
||||
def run (self):
|
||||
result = {}
|
||||
try:
|
||||
with contextlib.closing (urllib2.urlopen (self.url)) as f:
|
||||
# Parse the metadata format. The format is....
|
||||
# <machine>;<default distro>|<distro>...;<default image>|<image>...;<type##url>|...
|
||||
for line in f:
|
||||
components = line.split(";")
|
||||
if (len (components) < 4):
|
||||
raise MetaDataLoader.LoaderThread.LoaderImportException
|
||||
machine = components[0]
|
||||
distros = components[1].split("|")
|
||||
images = components[2].split("|")
|
||||
urls = components[3].split("|")
|
||||
|
||||
result[machine] = (distros, images, urls)
|
||||
|
||||
# Create an object representing this *potential*
|
||||
# configuration. It can become concrete if the machine, distro
|
||||
# and image are all chosen in the UI
|
||||
configuration = BuildConfiguration()
|
||||
configuration.metadata_url = self.url
|
||||
configuration.machine_options = result
|
||||
self.loader.configuration = configuration
|
||||
|
||||
# Emit that we've actually got a configuration
|
||||
gobject.idle_add (MetaDataLoader.emit_success_signal,
|
||||
self.loader)
|
||||
|
||||
except MetaDataLoader.LoaderThread.LoaderImportException as e:
|
||||
gobject.idle_add (MetaDataLoader.emit_error_signal, self.loader,
|
||||
"Repository metadata corrupt")
|
||||
except Exception as e:
|
||||
gobject.idle_add (MetaDataLoader.emit_error_signal, self.loader,
|
||||
"Unable to download repository metadata")
|
||||
print(e)
|
||||
|
||||
def try_fetch_from_url (self, url):
|
||||
# Try and download the metadata. Firing a signal if successful
|
||||
thread = MetaDataLoader.LoaderThread(self, url)
|
||||
thread.start()
|
||||
|
||||
class BuildSetupDialog (gtk.Dialog):
|
||||
RESPONSE_BUILD = 1
|
||||
|
||||
# A little helper method that just sets the states on the widgets based on
|
||||
# whether we've got good metadata or not.
|
||||
def set_configurable (self, configurable):
|
||||
if (self.configurable == configurable):
|
||||
return
|
||||
|
||||
self.configurable = configurable
|
||||
for widget in self.conf_widgets:
|
||||
widget.set_sensitive (configurable)
|
||||
|
||||
if not configurable:
|
||||
self.machine_combo.set_active (-1)
|
||||
self.distribution_combo.set_active (-1)
|
||||
self.image_combo.set_active (-1)
|
||||
|
||||
# GTK widget callbacks
|
||||
def refresh_button_clicked (self, button):
|
||||
# Refresh button clicked.
|
||||
|
||||
url = self.location_entry.get_chars (0, -1)
|
||||
self.loader.try_fetch_from_url(url)
|
||||
|
||||
def repository_entry_editable_changed (self, entry):
|
||||
if (len (entry.get_chars (0, -1)) > 0):
|
||||
self.refresh_button.set_sensitive (True)
|
||||
else:
|
||||
self.refresh_button.set_sensitive (False)
|
||||
self.clear_status_message()
|
||||
|
||||
# If we were previously configurable we are no longer since the
|
||||
# location entry has been changed
|
||||
self.set_configurable (False)
|
||||
|
||||
def machine_combo_changed (self, combobox):
|
||||
active_iter = combobox.get_active_iter()
|
||||
|
||||
if not active_iter:
|
||||
return
|
||||
|
||||
model = combobox.get_model()
|
||||
|
||||
if model:
|
||||
chosen_machine = model.get (active_iter, 0)[0]
|
||||
|
||||
(distros_model, images_model) = \
|
||||
self.loader.configuration.get_distro_and_images_models (chosen_machine)
|
||||
|
||||
self.distribution_combo.set_model (distros_model)
|
||||
self.image_combo.set_model (images_model)
|
||||
|
||||
# Callbacks from the loader
|
||||
def loader_success_cb (self, loader):
|
||||
self.status_image.set_from_icon_name ("info",
|
||||
gtk.ICON_SIZE_BUTTON)
|
||||
self.status_image.show()
|
||||
self.status_label.set_label ("Repository metadata successfully downloaded")
|
||||
|
||||
# Set the models on the combo boxes based on the models generated from
|
||||
# the configuration that the loader has created
|
||||
|
||||
# We just need to set the machine here, that then determines the
|
||||
# distro and image options. Cunning huh? :-)
|
||||
|
||||
self.configuration = self.loader.configuration
|
||||
model = self.configuration.get_machines_model ()
|
||||
self.machine_combo.set_model (model)
|
||||
|
||||
self.set_configurable (True)
|
||||
|
||||
def loader_error_cb (self, loader, message):
|
||||
self.status_image.set_from_icon_name ("error",
|
||||
gtk.ICON_SIZE_BUTTON)
|
||||
self.status_image.show()
|
||||
self.status_label.set_text ("Error downloading repository metadata")
|
||||
for widget in self.conf_widgets:
|
||||
widget.set_sensitive (False)
|
||||
|
||||
def clear_status_message (self):
|
||||
self.status_image.hide()
|
||||
self.status_label.set_label (
|
||||
"""<i>Enter the repository location and press _Refresh</i>""")
|
||||
|
||||
def __init__ (self):
|
||||
gtk.Dialog.__init__ (self)
|
||||
|
||||
# Cancel
|
||||
self.add_button (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
|
||||
|
||||
# Build
|
||||
button = gtk.Button ("_Build", None, True)
|
||||
image = gtk.Image ()
|
||||
image.set_from_stock (gtk.STOCK_EXECUTE, gtk.ICON_SIZE_BUTTON)
|
||||
button.set_image (image)
|
||||
self.add_action_widget (button, BuildSetupDialog.RESPONSE_BUILD)
|
||||
button.show_all ()
|
||||
|
||||
# Pull in *just* the table from the Glade XML data.
|
||||
gxml = gtk.glade.XML (os.path.dirname(__file__) + "/crumbs/puccho.glade",
|
||||
root = "build_table")
|
||||
table = gxml.get_widget ("build_table")
|
||||
self.vbox.pack_start (table, True, False, 0)
|
||||
|
||||
# Grab all the widgets that we need to turn on/off when we refresh...
|
||||
self.conf_widgets = []
|
||||
self.conf_widgets += [gxml.get_widget ("machine_label")]
|
||||
self.conf_widgets += [gxml.get_widget ("distribution_label")]
|
||||
self.conf_widgets += [gxml.get_widget ("image_label")]
|
||||
self.conf_widgets += [gxml.get_widget ("machine_combo")]
|
||||
self.conf_widgets += [gxml.get_widget ("distribution_combo")]
|
||||
self.conf_widgets += [gxml.get_widget ("image_combo")]
|
||||
|
||||
# Grab the status widgets
|
||||
self.status_image = gxml.get_widget ("status_image")
|
||||
self.status_label = gxml.get_widget ("status_label")
|
||||
|
||||
# Grab the refresh button and connect to the clicked signal
|
||||
self.refresh_button = gxml.get_widget ("refresh_button")
|
||||
self.refresh_button.connect ("clicked", self.refresh_button_clicked)
|
||||
|
||||
# Grab the location entry and connect to editable::changed
|
||||
self.location_entry = gxml.get_widget ("location_entry")
|
||||
self.location_entry.connect ("changed",
|
||||
self.repository_entry_editable_changed)
|
||||
|
||||
# Grab the machine combo and hook onto the changed signal. This then
|
||||
# allows us to populate the distro and image combos
|
||||
self.machine_combo = gxml.get_widget ("machine_combo")
|
||||
self.machine_combo.connect ("changed", self.machine_combo_changed)
|
||||
|
||||
# Setup the combo
|
||||
cell = gtk.CellRendererText()
|
||||
self.machine_combo.pack_start(cell, True)
|
||||
self.machine_combo.add_attribute(cell, 'text', 0)
|
||||
|
||||
# Grab the distro and image combos. We need these to populate with
|
||||
# models once the machine is chosen
|
||||
self.distribution_combo = gxml.get_widget ("distribution_combo")
|
||||
cell = gtk.CellRendererText()
|
||||
self.distribution_combo.pack_start(cell, True)
|
||||
self.distribution_combo.add_attribute(cell, 'text', 0)
|
||||
|
||||
self.image_combo = gxml.get_widget ("image_combo")
|
||||
cell = gtk.CellRendererText()
|
||||
self.image_combo.pack_start(cell, True)
|
||||
self.image_combo.add_attribute(cell, 'text', 0)
|
||||
|
||||
# Put the default descriptive text in the status box
|
||||
self.clear_status_message()
|
||||
|
||||
# Mark as non-configurable, this is just greys out the widgets the
|
||||
# user can't yet use
|
||||
self.configurable = False
|
||||
self.set_configurable(False)
|
||||
|
||||
# Show the table
|
||||
table.show_all ()
|
||||
|
||||
# The loader and some signals connected to it to update the status
|
||||
# area
|
||||
self.loader = MetaDataLoader()
|
||||
self.loader.connect ("success", self.loader_success_cb)
|
||||
self.loader.connect ("error", self.loader_error_cb)
|
||||
|
||||
def update_configuration (self):
|
||||
""" A poorly named function but it updates the internal configuration
|
||||
from the widgets. This can make that configuration concrete and can
|
||||
thus be used for building """
|
||||
# Extract the chosen machine from the combo
|
||||
model = self.machine_combo.get_model()
|
||||
active_iter = self.machine_combo.get_active_iter()
|
||||
if (active_iter):
|
||||
self.configuration.machine = model.get(active_iter, 0)[0]
|
||||
|
||||
# Extract the chosen distro from the combo
|
||||
model = self.distribution_combo.get_model()
|
||||
active_iter = self.distribution_combo.get_active_iter()
|
||||
if (active_iter):
|
||||
self.configuration.distro = model.get(active_iter, 0)[0]
|
||||
|
||||
# Extract the chosen image from the combo
|
||||
model = self.image_combo.get_model()
|
||||
active_iter = self.image_combo.get_active_iter()
|
||||
if (active_iter):
|
||||
self.configuration.image = model.get(active_iter, 0)[0]
|
||||
|
||||
# This function operates to pull events out from the event queue and then push
|
||||
# them into the RunningBuild (which then drives the RunningBuild which then
|
||||
# pushes through and updates the progress tree view.)
|
||||
#
|
||||
# TODO: Should be a method on the RunningBuild class
|
||||
def event_handle_timeout (eventHandler, build):
|
||||
# Consume as many messages as we can ...
|
||||
event = eventHandler.getEvent()
|
||||
while event:
|
||||
build.handle_event (event)
|
||||
event = eventHandler.getEvent()
|
||||
return True
|
||||
|
||||
class MainWindow (gtk.Window):
|
||||
|
||||
# Callback that gets fired when the user hits a button in the
|
||||
# BuildSetupDialog.
|
||||
def build_dialog_box_response_cb (self, dialog, response_id):
|
||||
conf = None
|
||||
if (response_id == BuildSetupDialog.RESPONSE_BUILD):
|
||||
dialog.update_configuration()
|
||||
print(dialog.configuration.machine, dialog.configuration.distro, \
|
||||
dialog.configuration.image)
|
||||
conf = dialog.configuration
|
||||
|
||||
dialog.destroy()
|
||||
|
||||
if conf:
|
||||
self.manager.do_build (conf)
|
||||
|
||||
def build_button_clicked_cb (self, button):
|
||||
dialog = BuildSetupDialog ()
|
||||
|
||||
# For some unknown reason Dialog.run causes nice little deadlocks ... :-(
|
||||
dialog.connect ("response", self.build_dialog_box_response_cb)
|
||||
dialog.show()
|
||||
|
||||
def __init__ (self):
|
||||
gtk.Window.__init__ (self)
|
||||
|
||||
# Pull in *just* the main vbox from the Glade XML data and then pack
|
||||
# that inside the window
|
||||
gxml = gtk.glade.XML (os.path.dirname(__file__) + "/crumbs/puccho.glade",
|
||||
root = "main_window_vbox")
|
||||
vbox = gxml.get_widget ("main_window_vbox")
|
||||
self.add (vbox)
|
||||
|
||||
# Create the tree views for the build manager view and the progress view
|
||||
self.build_manager_view = BuildManagerTreeView()
|
||||
self.running_build_view = RunningBuildTreeView()
|
||||
|
||||
# Grab the scrolled windows that we put the tree views into
|
||||
self.results_scrolledwindow = gxml.get_widget ("results_scrolledwindow")
|
||||
self.progress_scrolledwindow = gxml.get_widget ("progress_scrolledwindow")
|
||||
|
||||
# Put the tree views inside ...
|
||||
self.results_scrolledwindow.add (self.build_manager_view)
|
||||
self.progress_scrolledwindow.add (self.running_build_view)
|
||||
|
||||
# Hook up the build button...
|
||||
self.build_button = gxml.get_widget ("main_toolbutton_build")
|
||||
self.build_button.connect ("clicked", self.build_button_clicked_cb)
|
||||
|
||||
# I'm not very happy about the current ownership of the RunningBuild. I have
|
||||
# my suspicions that this object should be held by the BuildManager since we
|
||||
# care about the signals in the manager
|
||||
|
||||
def running_build_succeeded_cb (running_build, manager):
|
||||
# Notify the manager that a build has succeeded. This is necessary as part
|
||||
# of the 'hack' that we use for making the row in the model / view
|
||||
# representing the ongoing build change into a row representing the
|
||||
# completed build. Since we know only one build can be running a time then
|
||||
# we can handle this.
|
||||
|
||||
# FIXME: Refactor all this so that the RunningBuild is owned by the
|
||||
# BuildManager. It can then hook onto the signals directly and drive
|
||||
# interesting things it cares about.
|
||||
manager.notify_build_succeeded ()
|
||||
print("build succeeded")
|
||||
|
||||
def running_build_failed_cb (running_build, manager):
|
||||
# As above
|
||||
print("build failed")
|
||||
manager.notify_build_failed ()
|
||||
|
||||
def main (server, eventHandler):
|
||||
# Initialise threading...
|
||||
gobject.threads_init()
|
||||
gtk.gdk.threads_init()
|
||||
|
||||
main_window = MainWindow ()
|
||||
main_window.show_all ()
|
||||
|
||||
# Set up the build manager stuff in general
|
||||
builds_dir = os.path.join (os.getcwd(), "results")
|
||||
manager = BuildManager (server, builds_dir)
|
||||
main_window.build_manager_view.set_model (manager.model)
|
||||
|
||||
# Do the running build setup
|
||||
running_build = RunningBuild ()
|
||||
main_window.running_build_view.set_model (running_build.model)
|
||||
running_build.connect ("build-succeeded", running_build_succeeded_cb,
|
||||
manager)
|
||||
running_build.connect ("build-failed", running_build_failed_cb, manager)
|
||||
|
||||
# We need to save the manager into the MainWindow so that the toolbar
|
||||
# button can use it.
|
||||
# FIXME: Refactor ?
|
||||
main_window.manager = manager
|
||||
|
||||
# Use a timeout function for probing the event queue to find out if we
|
||||
# have a message waiting for us.
|
||||
gobject.timeout_add (200,
|
||||
event_handle_timeout,
|
||||
eventHandler,
|
||||
running_build)
|
||||
|
||||
gtk.main()
|
||||
@@ -1,127 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
|
||||
# Copyright (C) 2006 - 2007 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
|
||||
"""
|
||||
Use this class to fork off a thread to recieve event callbacks from the bitbake
|
||||
server and queue them for the UI to process. This process must be used to avoid
|
||||
client/server deadlocks.
|
||||
"""
|
||||
|
||||
import socket, threading, pickle
|
||||
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
|
||||
|
||||
class BBUIEventQueue:
|
||||
def __init__(self, BBServer):
|
||||
|
||||
self.eventQueue = []
|
||||
self.eventQueueLock = threading.Lock()
|
||||
self.eventQueueNotify = threading.Event()
|
||||
|
||||
self.BBServer = BBServer
|
||||
|
||||
self.t = threading.Thread()
|
||||
self.t.setDaemon(True)
|
||||
self.t.run = self.startCallbackHandler
|
||||
self.t.start()
|
||||
|
||||
def getEvent(self):
|
||||
|
||||
self.eventQueueLock.acquire()
|
||||
|
||||
if len(self.eventQueue) == 0:
|
||||
self.eventQueueLock.release()
|
||||
return None
|
||||
|
||||
item = self.eventQueue.pop(0)
|
||||
|
||||
if len(self.eventQueue) == 0:
|
||||
self.eventQueueNotify.clear()
|
||||
|
||||
self.eventQueueLock.release()
|
||||
return item
|
||||
|
||||
def waitEvent(self, delay):
|
||||
self.eventQueueNotify.wait(delay)
|
||||
return self.getEvent()
|
||||
|
||||
def queue_event(self, event):
|
||||
self.eventQueueLock.acquire()
|
||||
self.eventQueue.append(event)
|
||||
self.eventQueueNotify.set()
|
||||
self.eventQueueLock.release()
|
||||
|
||||
def send_event(self, event):
|
||||
self.queue_event(pickle.loads(event))
|
||||
|
||||
def startCallbackHandler(self):
|
||||
|
||||
server = UIXMLRPCServer()
|
||||
self.host, self.port = server.socket.getsockname()
|
||||
|
||||
server.register_function( self.system_quit, "event.quit" )
|
||||
server.register_function( self.send_event, "event.send" )
|
||||
server.socket.settimeout(1)
|
||||
|
||||
self.EventHandle = self.BBServer.registerEventHandler(self.host, self.port)
|
||||
|
||||
self.server = server
|
||||
while not server.quit:
|
||||
server.handle_request()
|
||||
server.server_close()
|
||||
|
||||
def system_quit( self ):
|
||||
"""
|
||||
Shut down the callback thread
|
||||
"""
|
||||
try:
|
||||
self.BBServer.unregisterEventHandler(self.EventHandle)
|
||||
except:
|
||||
pass
|
||||
self.server.quit = True
|
||||
|
||||
class UIXMLRPCServer (SimpleXMLRPCServer):
|
||||
|
||||
def __init__( self, interface = ("localhost", 0) ):
|
||||
self.quit = False
|
||||
SimpleXMLRPCServer.__init__( self,
|
||||
interface,
|
||||
requestHandler=SimpleXMLRPCRequestHandler,
|
||||
logRequests=False, allow_none=True)
|
||||
|
||||
def get_request(self):
|
||||
while not self.quit:
|
||||
try:
|
||||
sock, addr = self.socket.accept()
|
||||
sock.settimeout(1)
|
||||
return (sock, addr)
|
||||
except socket.timeout:
|
||||
pass
|
||||
return (None, None)
|
||||
|
||||
def close_request(self, request):
|
||||
if request is None:
|
||||
return
|
||||
SimpleXMLRPCServer.close_request(self, request)
|
||||
|
||||
def process_request(self, request, client_address):
|
||||
if request is None:
|
||||
return
|
||||
SimpleXMLRPCServer.process_request(self, request, client_address)
|
||||
|
||||
@@ -1,42 +0,0 @@
|
||||
# ex:ts=4:sw=4:sts=4:et
|
||||
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
|
||||
#
|
||||
# Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer
|
||||
# Copyright (C) 2006 - 2007 Richard Purdie
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License version 2 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License along
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import bb.build
|
||||
|
||||
class BBUIHelper:
|
||||
def __init__(self):
|
||||
self.needUpdate = False
|
||||
self.running_tasks = {}
|
||||
self.failed_tasks = []
|
||||
|
||||
def eventHandler(self, event):
|
||||
if isinstance(event, bb.build.TaskStarted):
|
||||
self.running_tasks[event.pid] = { 'title' : "%s %s" % (event._package, event._task) }
|
||||
self.needUpdate = True
|
||||
if isinstance(event, bb.build.TaskSucceeded):
|
||||
del self.running_tasks[event.pid]
|
||||
self.needUpdate = True
|
||||
if isinstance(event, bb.build.TaskFailed):
|
||||
del self.running_tasks[event.pid]
|
||||
self.failed_tasks.append( { 'title' : "%s %s" % (event._package, event._task)})
|
||||
self.needUpdate = True
|
||||
|
||||
def getTasks(self):
|
||||
self.needUpdate = False
|
||||
return (self.running_tasks, self.failed_tasks)
|
||||
@@ -19,51 +19,32 @@ BitBake Utility Functions
|
||||
# with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
import re, fcntl, os, string, stat, shutil, time
|
||||
import sys
|
||||
import errno
|
||||
import logging
|
||||
import bb
|
||||
import bb.msg
|
||||
from commands import getstatusoutput
|
||||
from contextlib import contextmanager
|
||||
digits = "0123456789"
|
||||
ascii_letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
logger = logging.getLogger("BitBake.Util")
|
||||
|
||||
# Version comparison
|
||||
separators = ".-"
|
||||
|
||||
# Context used in better_exec, eval
|
||||
_context = {
|
||||
"os": os,
|
||||
"bb": bb,
|
||||
"time": time,
|
||||
}
|
||||
import re, fcntl, os
|
||||
|
||||
def explode_version(s):
|
||||
r = []
|
||||
alpha_regexp = re.compile('^([a-zA-Z]+)(.*)$')
|
||||
numeric_regexp = re.compile('^(\d+)(.*)$')
|
||||
while (s != ''):
|
||||
if s[0] in string.digits:
|
||||
if s[0] in digits:
|
||||
m = numeric_regexp.match(s)
|
||||
r.append(int(m.group(1)))
|
||||
s = m.group(2)
|
||||
continue
|
||||
if s[0] in string.letters:
|
||||
if s[0] in ascii_letters:
|
||||
m = alpha_regexp.match(s)
|
||||
r.append(m.group(1))
|
||||
s = m.group(2)
|
||||
continue
|
||||
r.append(s[0])
|
||||
s = s[1:]
|
||||
return r
|
||||
|
||||
def vercmp_part(a, b):
|
||||
va = explode_version(a)
|
||||
vb = explode_version(b)
|
||||
sa = False
|
||||
sb = False
|
||||
while True:
|
||||
if va == []:
|
||||
ca = None
|
||||
@@ -75,16 +56,6 @@ def vercmp_part(a, b):
|
||||
cb = vb.pop(0)
|
||||
if ca == None and cb == None:
|
||||
return 0
|
||||
|
||||
if isinstance(ca, basestring):
|
||||
sa = ca in separators
|
||||
if isinstance(cb, basestring):
|
||||
sb = cb in separators
|
||||
if sa and not sb:
|
||||
return -1
|
||||
if not sa and sb:
|
||||
return 1
|
||||
|
||||
if ca > cb:
|
||||
return 1
|
||||
if ca < cb:
|
||||
@@ -94,138 +65,13 @@ def vercmp(ta, tb):
|
||||
(ea, va, ra) = ta
|
||||
(eb, vb, rb) = tb
|
||||
|
||||
r = int(ea or 0) - int(eb or 0)
|
||||
r = int(ea)-int(eb)
|
||||
if (r == 0):
|
||||
r = vercmp_part(va, vb)
|
||||
if (r == 0):
|
||||
r = vercmp_part(ra, rb)
|
||||
return r
|
||||
|
||||
_package_weights_ = {"pre":-2, "p":0, "alpha":-4, "beta":-3, "rc":-1} # dicts are unordered
|
||||
_package_ends_ = ["pre", "p", "alpha", "beta", "rc", "cvs", "bk", "HEAD" ] # so we need ordered list
|
||||
|
||||
def relparse(myver):
|
||||
"""Parses the last elements of a version number into a triplet, that can
|
||||
later be compared.
|
||||
"""
|
||||
|
||||
number = 0
|
||||
p1 = 0
|
||||
p2 = 0
|
||||
mynewver = myver.split('_')
|
||||
if len(mynewver) == 2:
|
||||
# an _package_weights_
|
||||
number = float(mynewver[0])
|
||||
match = 0
|
||||
for x in _package_ends_:
|
||||
elen = len(x)
|
||||
if mynewver[1][:elen] == x:
|
||||
match = 1
|
||||
p1 = _package_weights_[x]
|
||||
try:
|
||||
p2 = float(mynewver[1][elen:])
|
||||
except:
|
||||
p2 = 0
|
||||
break
|
||||
if not match:
|
||||
# normal number or number with letter at end
|
||||
divider = len(myver)-1
|
||||
if myver[divider:] not in "1234567890":
|
||||
# letter at end
|
||||
p1 = ord(myver[divider:])
|
||||
number = float(myver[0:divider])
|
||||
else:
|
||||
number = float(myver)
|
||||
else:
|
||||
# normal number or number with letter at end
|
||||
divider = len(myver)-1
|
||||
if myver[divider:] not in "1234567890":
|
||||
#letter at end
|
||||
p1 = ord(myver[divider:])
|
||||
number = float(myver[0:divider])
|
||||
else:
|
||||
number = float(myver)
|
||||
return [number, p1, p2]
|
||||
|
||||
__vercmp_cache__ = {}
|
||||
|
||||
def vercmp_string(val1, val2):
|
||||
"""This takes two version strings and returns an integer to tell you whether
|
||||
the versions are the same, val1>val2 or val2>val1.
|
||||
"""
|
||||
|
||||
# quick short-circuit
|
||||
if val1 == val2:
|
||||
return 0
|
||||
valkey = val1 + " " + val2
|
||||
|
||||
# cache lookup
|
||||
try:
|
||||
return __vercmp_cache__[valkey]
|
||||
try:
|
||||
return - __vercmp_cache__[val2 + " " + val1]
|
||||
except KeyError:
|
||||
pass
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# consider 1_p2 vc 1.1
|
||||
# after expansion will become (1_p2,0) vc (1,1)
|
||||
# then 1_p2 is compared with 1 before 0 is compared with 1
|
||||
# to solve the bug we need to convert it to (1,0_p2)
|
||||
# by splitting _prepart part and adding it back _after_expansion
|
||||
|
||||
val1_prepart = val2_prepart = ''
|
||||
if val1.count('_'):
|
||||
val1, val1_prepart = val1.split('_', 1)
|
||||
if val2.count('_'):
|
||||
val2, val2_prepart = val2.split('_', 1)
|
||||
|
||||
# replace '-' by '.'
|
||||
# FIXME: Is it needed? can val1/2 contain '-'?
|
||||
|
||||
val1 = val1.split("-")
|
||||
if len(val1) == 2:
|
||||
val1[0] = val1[0] + "." + val1[1]
|
||||
val2 = val2.split("-")
|
||||
if len(val2) == 2:
|
||||
val2[0] = val2[0] + "." + val2[1]
|
||||
|
||||
val1 = val1[0].split('.')
|
||||
val2 = val2[0].split('.')
|
||||
|
||||
# add back decimal point so that .03 does not become "3" !
|
||||
for x in xrange(1, len(val1)):
|
||||
if val1[x][0] == '0' :
|
||||
val1[x] = '.' + val1[x]
|
||||
for x in xrange(1, len(val2)):
|
||||
if val2[x][0] == '0' :
|
||||
val2[x] = '.' + val2[x]
|
||||
|
||||
# extend varion numbers
|
||||
if len(val2) < len(val1):
|
||||
val2.extend(["0"]*(len(val1)-len(val2)))
|
||||
elif len(val1) < len(val2):
|
||||
val1.extend(["0"]*(len(val2)-len(val1)))
|
||||
|
||||
# add back _prepart tails
|
||||
if val1_prepart:
|
||||
val1[-1] += '_' + val1_prepart
|
||||
if val2_prepart:
|
||||
val2[-1] += '_' + val2_prepart
|
||||
# The above code will extend version numbers out so they
|
||||
# have the same number of digits.
|
||||
for x in xrange(0, len(val1)):
|
||||
cmp1 = relparse(val1[x])
|
||||
cmp2 = relparse(val2[x])
|
||||
for y in xrange(0, 3):
|
||||
myret = cmp1[y] - cmp2[y]
|
||||
if myret != 0:
|
||||
__vercmp_cache__[valkey] = myret
|
||||
return myret
|
||||
__vercmp_cache__[valkey] = 0
|
||||
return 0
|
||||
|
||||
def explode_deps(s):
|
||||
"""
|
||||
Take an RDEPENDS style string of format:
|
||||
@@ -239,616 +85,155 @@ def explode_deps(s):
|
||||
for i in l:
|
||||
if i[0] == '(':
|
||||
flag = True
|
||||
#j = []
|
||||
if not flag:
|
||||
j = []
|
||||
if flag:
|
||||
j.append(i)
|
||||
else:
|
||||
r.append(i)
|
||||
#else:
|
||||
# j.append(i)
|
||||
if flag and i.endswith(')'):
|
||||
flag = False
|
||||
# Ignore version
|
||||
#r[-1] += ' ' + ' '.join(j)
|
||||
return r
|
||||
|
||||
def explode_dep_versions(s):
|
||||
"""
|
||||
Take an RDEPENDS style string of format:
|
||||
"DEPEND1 (optional version) DEPEND2 (optional version) ..."
|
||||
and return a dictionary of dependencies and versions.
|
||||
"""
|
||||
r = {}
|
||||
l = s.replace(",", "").split()
|
||||
lastdep = None
|
||||
lastver = ""
|
||||
inversion = False
|
||||
for i in l:
|
||||
if i[0] == '(':
|
||||
inversion = True
|
||||
lastver = i[1:] or ""
|
||||
#j = []
|
||||
elif inversion and i.endswith(')'):
|
||||
inversion = False
|
||||
lastver = lastver + " " + (i[:-1] or "")
|
||||
r[lastdep] = lastver
|
||||
elif not inversion:
|
||||
r[i] = None
|
||||
lastdep = i
|
||||
lastver = ""
|
||||
elif inversion:
|
||||
lastver = lastver + " " + i
|
||||
|
||||
return r
|
||||
|
||||
def join_deps(deps, commasep=True):
|
||||
"""
|
||||
Take the result from explode_dep_versions and generate a dependency string
|
||||
"""
|
||||
result = []
|
||||
for dep in deps:
|
||||
if deps[dep]:
|
||||
result.append(dep + " (" + deps[dep] + ")")
|
||||
else:
|
||||
result.append(dep)
|
||||
if commasep:
|
||||
return ", ".join(result)
|
||||
else:
|
||||
return " ".join(result)
|
||||
|
||||
def _print_trace(body, line):
|
||||
"""
|
||||
Print the Environment of a Text Body
|
||||
"""
|
||||
# print the environment of the method
|
||||
min_line = max(1, line-4)
|
||||
max_line = min(line + 4, len(body))
|
||||
for i in xrange(min_line, max_line + 1):
|
||||
if line == i:
|
||||
logger.error(' *** %.4d:%s', i, body[i-1])
|
||||
else:
|
||||
logger.error(' %.4d:%s', i, body[i-1])
|
||||
import bb
|
||||
|
||||
def better_compile(text, file, realfile, mode = "exec"):
|
||||
# print the environment of the method
|
||||
bb.msg.error(bb.msg.domain.Util, "Printing the environment of the function")
|
||||
min_line = max(1,line-4)
|
||||
max_line = min(line+4,len(body)-1)
|
||||
for i in range(min_line,max_line+1):
|
||||
bb.msg.error(bb.msg.domain.Util, "\t%.4d:%s" % (i, body[i-1]) )
|
||||
|
||||
|
||||
def better_compile(text, file, realfile):
|
||||
"""
|
||||
A better compile method. This method
|
||||
will print the offending lines.
|
||||
"""
|
||||
try:
|
||||
return compile(text, file, mode)
|
||||
except Exception as e:
|
||||
return compile(text, file, "exec")
|
||||
except Exception, e:
|
||||
import bb,sys
|
||||
|
||||
# split the text into lines again
|
||||
body = text.split('\n')
|
||||
logger.error("Error in compiling python function in %s", realfile)
|
||||
logger.error(str(e))
|
||||
if e.lineno:
|
||||
logger.error("The lines leading to this error were:")
|
||||
logger.error("\t%d:%s:'%s'", e.lineno, e.__class__.__name__, body[e.lineno-1])
|
||||
_print_trace(body, e.lineno)
|
||||
else:
|
||||
logger.error("The function causing this error was:")
|
||||
for line in body:
|
||||
logger.error(line)
|
||||
bb.msg.error(bb.msg.domain.Util, "Error in compiling: ", realfile)
|
||||
bb.msg.error(bb.msg.domain.Util, "The lines resulting into this error were:")
|
||||
bb.msg.error(bb.msg.domain.Util, "\t%d:%s:'%s'" % (e.lineno, e.__class__.__name__, body[e.lineno-1]))
|
||||
|
||||
raise
|
||||
_print_trace(body, e.lineno)
|
||||
|
||||
def better_exec(code, context, text, realfile = "<code>"):
|
||||
# exit now
|
||||
sys.exit(1)
|
||||
|
||||
def better_exec(code, context, text, realfile):
|
||||
"""
|
||||
Similiar to better_compile, better_exec will
|
||||
print the lines that are responsible for the
|
||||
error.
|
||||
"""
|
||||
import bb.parse
|
||||
if not hasattr(code, "co_filename"):
|
||||
code = better_compile(code, realfile, realfile)
|
||||
import bb,sys
|
||||
try:
|
||||
exec(code, _context, context)
|
||||
except Exception:
|
||||
(t, value, tb) = sys.exc_info()
|
||||
exec code in context
|
||||
except:
|
||||
(t,value,tb) = sys.exc_info()
|
||||
|
||||
if t in [bb.parse.SkipPackage, bb.build.FuncFailed]:
|
||||
raise
|
||||
|
||||
# print the Header of the Error Message
|
||||
bb.msg.error(bb.msg.domain.Util, "Error in executing: ", realfile)
|
||||
bb.msg.error(bb.msg.domain.Util, "Exception:%s Message:%s" % (t,value) )
|
||||
|
||||
# let us find the line number now
|
||||
while tb.tb_next:
|
||||
tb = tb.tb_next
|
||||
|
||||
import traceback
|
||||
exception = traceback.format_exception_only(t, value)
|
||||
logger.error('Error executing a python function in %s:\n%s',
|
||||
realfile, ''.join(exception))
|
||||
|
||||
# Strip 'us' from the stack (better_exec call)
|
||||
tb = tb.tb_next
|
||||
|
||||
textarray = text.split('\n')
|
||||
linefailed = traceback.tb_lineno(tb)
|
||||
|
||||
tbextract = traceback.extract_tb(tb)
|
||||
tbformat = "\n".join(traceback.format_list(tbextract))
|
||||
logger.error("The stack trace of python calls that resulted in this exception/failure was:")
|
||||
for line in tbformat.split('\n'):
|
||||
logger.error(line)
|
||||
|
||||
logger.error("The code that was being executed was:")
|
||||
_print_trace(textarray, linefailed)
|
||||
logger.error("(file: '%s', lineno: %s, function: %s)", tbextract[0][0], tbextract[0][1], tbextract[0][2])
|
||||
|
||||
# See if this is a function we constructed and has calls back into other functions in
|
||||
# "text". If so, try and improve the context of the error by diving down the trace
|
||||
level = 0
|
||||
nexttb = tb.tb_next
|
||||
while nexttb is not None:
|
||||
if tbextract[level][0] == tbextract[level+1][0] and tbextract[level+1][2] == tbextract[level][0]:
|
||||
_print_trace(textarray, tbextract[level+1][1])
|
||||
logger.error("(file: '%s', lineno: %s, function: %s)", tbextract[level+1][0], tbextract[level+1][1], tbextract[level+1][2])
|
||||
else:
|
||||
break
|
||||
nexttb = tb.tb_next
|
||||
level = level + 1
|
||||
line = traceback.tb_lineno(tb)
|
||||
|
||||
_print_trace( text.split('\n'), line )
|
||||
|
||||
raise
|
||||
|
||||
def simple_exec(code, context):
|
||||
exec(code, _context, context)
|
||||
def Enum(*names):
|
||||
"""
|
||||
A simple class to give Enum support
|
||||
"""
|
||||
|
||||
def better_eval(source, locals):
|
||||
return eval(source, _context, locals)
|
||||
assert names, "Empty enums are not supported"
|
||||
|
||||
@contextmanager
|
||||
def fileslocked(files):
|
||||
"""Context manager for locking and unlocking file locks."""
|
||||
locks = []
|
||||
if files:
|
||||
for lockfile in files:
|
||||
locks.append(bb.utils.lockfile(lockfile))
|
||||
class EnumClass(object):
|
||||
__slots__ = names
|
||||
def __iter__(self): return iter(constants)
|
||||
def __len__(self): return len(constants)
|
||||
def __getitem__(self, i): return constants[i]
|
||||
def __repr__(self): return 'Enum' + str(names)
|
||||
def __str__(self): return 'enum ' + str(constants)
|
||||
|
||||
yield
|
||||
class EnumValue(object):
|
||||
__slots__ = ('__value')
|
||||
def __init__(self, value): self.__value = value
|
||||
Value = property(lambda self: self.__value)
|
||||
EnumType = property(lambda self: EnumType)
|
||||
def __hash__(self): return hash(self.__value)
|
||||
def __cmp__(self, other):
|
||||
# C fans might want to remove the following assertion
|
||||
# to make all enums comparable by ordinal value {;))
|
||||
assert self.EnumType is other.EnumType, "Only values from the same enum are comparable"
|
||||
return cmp(self.__value, other.__value)
|
||||
def __invert__(self): return constants[maximum - self.__value]
|
||||
def __nonzero__(self): return bool(self.__value)
|
||||
def __repr__(self): return str(names[self.__value])
|
||||
|
||||
for lock in locks:
|
||||
bb.utils.unlockfile(lock)
|
||||
maximum = len(names) - 1
|
||||
constants = [None] * len(names)
|
||||
for i, each in enumerate(names):
|
||||
val = EnumValue(i)
|
||||
setattr(EnumClass, each, val)
|
||||
constants[i] = val
|
||||
constants = tuple(constants)
|
||||
EnumType = EnumClass()
|
||||
return EnumType
|
||||
|
||||
def lockfile(name, shared=False):
|
||||
def lockfile(name):
|
||||
"""
|
||||
Use the file fn as a lock file, return when the lock has been acquired.
|
||||
Returns a variable to pass to unlockfile().
|
||||
"""
|
||||
dirname = os.path.dirname(name)
|
||||
mkdirhier(dirname)
|
||||
|
||||
if not os.access(dirname, os.W_OK):
|
||||
logger.error("Unable to acquire lock '%s', directory is not writable",
|
||||
name)
|
||||
sys.exit(1)
|
||||
|
||||
op = fcntl.LOCK_EX
|
||||
if shared:
|
||||
op = fcntl.LOCK_SH
|
||||
|
||||
while True:
|
||||
# If we leave the lockfiles lying around there is no problem
|
||||
# but we should clean up after ourselves. This gives potential
|
||||
# for races though. To work around this, when we acquire the lock
|
||||
# we check the file we locked was still the lock file on disk.
|
||||
# by comparing inode numbers. If they don't match or the lockfile
|
||||
# for races though. To work around this, when we acquire the lock
|
||||
# we check the file we locked was still the lock file on disk.
|
||||
# by comparing inode numbers. If they don't match or the lockfile
|
||||
# no longer exists, we start again.
|
||||
|
||||
# This implementation is unfair since the last person to request the
|
||||
# This implementation is unfair since the last person to request the
|
||||
# lock is the most likely to win it.
|
||||
|
||||
try:
|
||||
lf = open(name, 'a+')
|
||||
fileno = lf.fileno()
|
||||
fcntl.flock(fileno, op)
|
||||
statinfo = os.fstat(fileno)
|
||||
if os.path.exists(lf.name):
|
||||
statinfo2 = os.stat(lf.name)
|
||||
if statinfo.st_ino == statinfo2.st_ino:
|
||||
return lf
|
||||
lf.close()
|
||||
except Exception:
|
||||
continue
|
||||
lf = open(name, "a+")
|
||||
fcntl.flock(lf.fileno(), fcntl.LOCK_EX)
|
||||
statinfo = os.fstat(lf.fileno())
|
||||
if os.path.exists(lf.name):
|
||||
statinfo2 = os.stat(lf.name)
|
||||
if statinfo.st_ino == statinfo2.st_ino:
|
||||
return lf
|
||||
# File no longer exists or changed, retry
|
||||
lf.close
|
||||
|
||||
def unlockfile(lf):
|
||||
"""
|
||||
Unlock a file locked using lockfile()
|
||||
Unlock a file locked using lockfile()
|
||||
"""
|
||||
try:
|
||||
# If we had a shared lock, we need to promote to exclusive before
|
||||
# removing the lockfile. Attempt this, ignore failures.
|
||||
fcntl.flock(lf.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
|
||||
os.unlink(lf.name)
|
||||
except (IOError, OSError):
|
||||
pass
|
||||
os.unlink(lf.name)
|
||||
fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
|
||||
lf.close()
|
||||
lf.close
|
||||
|
||||
def md5_file(filename):
|
||||
"""
|
||||
Return the hex string representation of the MD5 checksum of filename.
|
||||
"""
|
||||
try:
|
||||
import hashlib
|
||||
m = hashlib.md5()
|
||||
except ImportError:
|
||||
import md5
|
||||
m = md5.new()
|
||||
|
||||
for line in open(filename):
|
||||
m.update(line)
|
||||
return m.hexdigest()
|
||||
|
||||
def sha256_file(filename):
|
||||
"""
|
||||
Return the hex string representation of the 256-bit SHA checksum of
|
||||
filename. On Python 2.4 this will return None, so callers will need to
|
||||
handle that by either skipping SHA checks, or running a standalone sha256sum
|
||||
binary.
|
||||
"""
|
||||
try:
|
||||
import hashlib
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
s = hashlib.sha256()
|
||||
for line in open(filename):
|
||||
s.update(line)
|
||||
return s.hexdigest()
|
||||
|
||||
def preserved_envvars_exported():
|
||||
"""Variables which are taken from the environment and placed in and exported
|
||||
from the metadata"""
|
||||
return [
|
||||
'BB_TASKHASH',
|
||||
'HOME',
|
||||
'LOGNAME',
|
||||
'PATH',
|
||||
'PWD',
|
||||
'SHELL',
|
||||
'TERM',
|
||||
'USER',
|
||||
'USERNAME',
|
||||
]
|
||||
|
||||
def preserved_envvars_exported_interactive():
|
||||
"""Variables which are taken from the environment and placed in and exported
|
||||
from the metadata, for interactive tasks"""
|
||||
return [
|
||||
'COLORTERM',
|
||||
'DBUS_SESSION_BUS_ADDRESS',
|
||||
'DESKTOP_SESSION',
|
||||
'DESKTOP_STARTUP_ID',
|
||||
'DISPLAY',
|
||||
'GNOME_KEYRING_PID',
|
||||
'GNOME_KEYRING_SOCKET',
|
||||
'GPG_AGENT_INFO',
|
||||
'GTK_RC_FILES',
|
||||
'SESSION_MANAGER',
|
||||
'KRB5CCNAME',
|
||||
'SSH_AUTH_SOCK',
|
||||
'XAUTHORITY',
|
||||
'XDG_DATA_DIRS',
|
||||
'XDG_SESSION_COOKIE',
|
||||
]
|
||||
|
||||
def preserved_envvars():
|
||||
"""Variables which are taken from the environment and placed in the metadata"""
|
||||
v = [
|
||||
'BBPATH',
|
||||
'BB_PRESERVE_ENV',
|
||||
'BB_ENV_WHITELIST',
|
||||
'BB_ENV_EXTRAWHITE',
|
||||
'LANG',
|
||||
'_',
|
||||
]
|
||||
return v + preserved_envvars_exported() + preserved_envvars_exported_interactive()
|
||||
|
||||
def filter_environment(good_vars):
|
||||
"""
|
||||
Create a pristine environment for bitbake. This will remove variables that
|
||||
are not known and may influence the build in a negative way.
|
||||
"""
|
||||
|
||||
removed_vars = []
|
||||
for key in os.environ.keys():
|
||||
if key in good_vars:
|
||||
continue
|
||||
|
||||
removed_vars.append(key)
|
||||
os.unsetenv(key)
|
||||
del os.environ[key]
|
||||
|
||||
if len(removed_vars):
|
||||
logger.debug(1, "Removed the following variables from the environment: %s", ", ".join(removed_vars))
|
||||
|
||||
return removed_vars
|
||||
|
||||
def create_interactive_env(d):
|
||||
for k in preserved_envvars_exported_interactive():
|
||||
os.setenv(k, bb.data.getVar(k, d, True))
|
||||
|
||||
def clean_environment():
|
||||
"""
|
||||
Clean up any spurious environment variables. This will remove any
|
||||
variables the user hasn't chose to preserve.
|
||||
"""
|
||||
if 'BB_PRESERVE_ENV' not in os.environ:
|
||||
if 'BB_ENV_WHITELIST' in os.environ:
|
||||
good_vars = os.environ['BB_ENV_WHITELIST'].split()
|
||||
else:
|
||||
good_vars = preserved_envvars()
|
||||
if 'BB_ENV_EXTRAWHITE' in os.environ:
|
||||
good_vars.extend(os.environ['BB_ENV_EXTRAWHITE'].split())
|
||||
filter_environment(good_vars)
|
||||
|
||||
def empty_environment():
|
||||
"""
|
||||
Remove all variables from the environment.
|
||||
"""
|
||||
for s in os.environ.keys():
|
||||
os.unsetenv(s)
|
||||
del os.environ[s]
|
||||
|
||||
def build_environment(d):
|
||||
"""
|
||||
Build an environment from all exported variables.
|
||||
"""
|
||||
import bb.data
|
||||
for var in bb.data.keys(d):
|
||||
export = bb.data.getVarFlag(var, "export", d)
|
||||
if export:
|
||||
os.environ[var] = bb.data.getVar(var, d, True) or ""
|
||||
|
||||
def remove(path, recurse=False):
|
||||
"""Equivalent to rm -f or rm -rf"""
|
||||
if not path:
|
||||
return
|
||||
import os, errno, shutil, glob
|
||||
for name in glob.glob(path):
|
||||
try:
|
||||
os.unlink(name)
|
||||
except OSError as exc:
|
||||
if recurse and exc.errno == errno.EISDIR:
|
||||
shutil.rmtree(name)
|
||||
elif exc.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
def prunedir(topdir):
|
||||
# Delete everything reachable from the directory named in 'topdir'.
|
||||
# CAUTION: This is dangerous!
|
||||
for root, dirs, files in os.walk(topdir, topdown = False):
|
||||
for name in files:
|
||||
os.remove(os.path.join(root, name))
|
||||
for name in dirs:
|
||||
if os.path.islink(os.path.join(root, name)):
|
||||
os.remove(os.path.join(root, name))
|
||||
else:
|
||||
os.rmdir(os.path.join(root, name))
|
||||
os.rmdir(topdir)
|
||||
|
||||
#
|
||||
# Could also use return re.compile("(%s)" % "|".join(map(re.escape, suffixes))).sub(lambda mo: "", var)
|
||||
# but thats possibly insane and suffixes is probably going to be small
|
||||
#
|
||||
def prune_suffix(var, suffixes, d):
|
||||
# See if var ends with any of the suffixes listed and
|
||||
# remove it if found
|
||||
for suffix in suffixes:
|
||||
if var.endswith(suffix):
|
||||
return var.replace(suffix, "")
|
||||
return var
|
||||
|
||||
def mkdirhier(directory):
|
||||
"""Create a directory like 'mkdir -p', but does not complain if
|
||||
directory already exists like os.makedirs
|
||||
"""
|
||||
|
||||
try:
|
||||
os.makedirs(directory)
|
||||
except OSError as e:
|
||||
if e.errno != errno.EEXIST:
|
||||
raise e
|
||||
|
||||
def movefile(src, dest, newmtime = None, sstat = None):
|
||||
"""Moves a file from src to dest, preserving all permissions and
|
||||
attributes; mtime will be preserved even when moving across
|
||||
filesystems. Returns true on success and false on failure. Move is
|
||||
atomic.
|
||||
"""
|
||||
|
||||
#print "movefile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
|
||||
try:
|
||||
if not sstat:
|
||||
sstat = os.lstat(src)
|
||||
except Exception as e:
|
||||
print("movefile: Stating source file failed...", e)
|
||||
return None
|
||||
|
||||
destexists = 1
|
||||
try:
|
||||
dstat = os.lstat(dest)
|
||||
except:
|
||||
dstat = os.lstat(os.path.dirname(dest))
|
||||
destexists = 0
|
||||
|
||||
if destexists:
|
||||
if stat.S_ISLNK(dstat[stat.ST_MODE]):
|
||||
try:
|
||||
os.unlink(dest)
|
||||
destexists = 0
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
if stat.S_ISLNK(sstat[stat.ST_MODE]):
|
||||
try:
|
||||
target = os.readlink(src)
|
||||
if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
|
||||
os.unlink(dest)
|
||||
os.symlink(target, dest)
|
||||
#os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
|
||||
os.unlink(src)
|
||||
return os.lstat(dest)
|
||||
except Exception as e:
|
||||
print("movefile: failed to properly create symlink:", dest, "->", target, e)
|
||||
return None
|
||||
|
||||
renamefailed = 1
|
||||
if sstat[stat.ST_DEV] == dstat[stat.ST_DEV]:
|
||||
try:
|
||||
os.rename(src, dest)
|
||||
renamefailed = 0
|
||||
except Exception as e:
|
||||
if e[0] != errno.EXDEV:
|
||||
# Some random error.
|
||||
print("movefile: Failed to move", src, "to", dest, e)
|
||||
return None
|
||||
# Invalid cross-device-link 'bind' mounted or actually Cross-Device
|
||||
|
||||
if renamefailed:
|
||||
didcopy = 0
|
||||
if stat.S_ISREG(sstat[stat.ST_MODE]):
|
||||
try: # For safety copy then move it over.
|
||||
shutil.copyfile(src, dest + "#new")
|
||||
os.rename(dest + "#new", dest)
|
||||
didcopy = 1
|
||||
except Exception as e:
|
||||
print('movefile: copy', src, '->', dest, 'failed.', e)
|
||||
return None
|
||||
else:
|
||||
#we don't yet handle special, so we need to fall back to /bin/mv
|
||||
a = getstatusoutput("/bin/mv -f " + "'" + src + "' '" + dest + "'")
|
||||
if a[0] != 0:
|
||||
print("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a)
|
||||
return None # failure
|
||||
try:
|
||||
if didcopy:
|
||||
os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
|
||||
os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
|
||||
os.unlink(src)
|
||||
except Exception as e:
|
||||
print("movefile: Failed to chown/chmod/unlink", dest, e)
|
||||
return None
|
||||
|
||||
if newmtime:
|
||||
os.utime(dest, (newmtime, newmtime))
|
||||
else:
|
||||
os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
|
||||
newmtime = sstat[stat.ST_MTIME]
|
||||
return newmtime
|
||||
|
||||
def copyfile(src, dest, newmtime = None, sstat = None):
|
||||
"""
|
||||
Copies a file from src to dest, preserving all permissions and
|
||||
attributes; mtime will be preserved even when moving across
|
||||
filesystems. Returns true on success and false on failure.
|
||||
"""
|
||||
#print "copyfile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
|
||||
try:
|
||||
if not sstat:
|
||||
sstat = os.lstat(src)
|
||||
except Exception as e:
|
||||
print("copyfile: Stating source file failed...", e)
|
||||
return False
|
||||
|
||||
destexists = 1
|
||||
try:
|
||||
dstat = os.lstat(dest)
|
||||
except:
|
||||
dstat = os.lstat(os.path.dirname(dest))
|
||||
destexists = 0
|
||||
|
||||
if destexists:
|
||||
if stat.S_ISLNK(dstat[stat.ST_MODE]):
|
||||
try:
|
||||
os.unlink(dest)
|
||||
destexists = 0
|
||||
except Exception as e:
|
||||
pass
|
||||
|
||||
if stat.S_ISLNK(sstat[stat.ST_MODE]):
|
||||
try:
|
||||
target = os.readlink(src)
|
||||
if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
|
||||
os.unlink(dest)
|
||||
os.symlink(target, dest)
|
||||
#os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
|
||||
return os.lstat(dest)
|
||||
except Exception as e:
|
||||
print("copyfile: failed to properly create symlink:", dest, "->", target, e)
|
||||
return False
|
||||
|
||||
if stat.S_ISREG(sstat[stat.ST_MODE]):
|
||||
try:
|
||||
srcchown = False
|
||||
if not os.access(src, os.R_OK):
|
||||
# Make sure we can read it
|
||||
srcchown = True
|
||||
os.chmod(src, sstat[stat.ST_MODE] | stat.S_IRUSR)
|
||||
|
||||
# For safety copy then move it over.
|
||||
shutil.copyfile(src, dest + "#new")
|
||||
os.rename(dest + "#new", dest)
|
||||
except Exception as e:
|
||||
print('copyfile: copy', src, '->', dest, 'failed.', e)
|
||||
return False
|
||||
finally:
|
||||
if srcchown:
|
||||
os.chmod(src, sstat[stat.ST_MODE])
|
||||
os.utime(src, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
|
||||
|
||||
else:
|
||||
#we don't yet handle special, so we need to fall back to /bin/mv
|
||||
a = getstatusoutput("/bin/cp -f " + "'" + src + "' '" + dest + "'")
|
||||
if a[0] != 0:
|
||||
print("copyfile: Failed to copy special file:" + src + "' to '" + dest + "'", a)
|
||||
return False # failure
|
||||
try:
|
||||
os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
|
||||
os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
|
||||
except Exception as e:
|
||||
print("copyfile: Failed to chown/chmod/unlink", dest, e)
|
||||
return False
|
||||
|
||||
if newmtime:
|
||||
os.utime(dest, (newmtime, newmtime))
|
||||
else:
|
||||
os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
|
||||
newmtime = sstat[stat.ST_MTIME]
|
||||
return newmtime
|
||||
|
||||
def which(path, item, direction = 0):
|
||||
"""
|
||||
Locate a file in a PATH
|
||||
"""
|
||||
|
||||
paths = (path or "").split(':')
|
||||
if direction != 0:
|
||||
paths.reverse()
|
||||
|
||||
for p in paths:
|
||||
next = os.path.join(p, item)
|
||||
if os.path.exists(next):
|
||||
return next
|
||||
|
||||
return ""
|
||||
|
||||
def init_logger(logger, verbose, debug, debug_domains):
|
||||
"""
|
||||
Set verbosity and debug levels in the logger
|
||||
"""
|
||||
|
||||
if debug:
|
||||
bb.msg.set_debug_level(debug)
|
||||
elif verbose:
|
||||
bb.msg.set_verbose(True)
|
||||
else:
|
||||
bb.msg.set_debug_level(0)
|
||||
|
||||
if debug_domains:
|
||||
bb.msg.set_debug_domains(debug_domains)
|
||||
|
||||
def to_boolean(string, default=None):
|
||||
if not string:
|
||||
return default
|
||||
|
||||
normalized = string.lower()
|
||||
if normalized in ("y", "yes", "1", "true"):
|
||||
return True
|
||||
elif normalized in ("n", "no", "0", "false"):
|
||||
return False
|
||||
else:
|
||||
raise ValueError("Invalid value for to_boolean: %s" % string)
|
||||
|
||||
@@ -1,570 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
codegen
|
||||
~~~~~~~
|
||||
|
||||
Extension to ast that allow ast -> python code generation.
|
||||
|
||||
:copyright: Copyright 2008 by Armin Ronacher.
|
||||
:license: BSD.
|
||||
"""
|
||||
from ast import *
|
||||
|
||||
BOOLOP_SYMBOLS = {
|
||||
And: 'and',
|
||||
Or: 'or'
|
||||
}
|
||||
|
||||
BINOP_SYMBOLS = {
|
||||
Add: '+',
|
||||
Sub: '-',
|
||||
Mult: '*',
|
||||
Div: '/',
|
||||
FloorDiv: '//',
|
||||
Mod: '%',
|
||||
LShift: '<<',
|
||||
RShift: '>>',
|
||||
BitOr: '|',
|
||||
BitAnd: '&',
|
||||
BitXor: '^'
|
||||
}
|
||||
|
||||
CMPOP_SYMBOLS = {
|
||||
Eq: '==',
|
||||
Gt: '>',
|
||||
GtE: '>=',
|
||||
In: 'in',
|
||||
Is: 'is',
|
||||
IsNot: 'is not',
|
||||
Lt: '<',
|
||||
LtE: '<=',
|
||||
NotEq: '!=',
|
||||
NotIn: 'not in'
|
||||
}
|
||||
|
||||
UNARYOP_SYMBOLS = {
|
||||
Invert: '~',
|
||||
Not: 'not',
|
||||
UAdd: '+',
|
||||
USub: '-'
|
||||
}
|
||||
|
||||
ALL_SYMBOLS = {}
|
||||
ALL_SYMBOLS.update(BOOLOP_SYMBOLS)
|
||||
ALL_SYMBOLS.update(BINOP_SYMBOLS)
|
||||
ALL_SYMBOLS.update(CMPOP_SYMBOLS)
|
||||
ALL_SYMBOLS.update(UNARYOP_SYMBOLS)
|
||||
|
||||
def to_source(node, indent_with=' ' * 4, add_line_information=False):
|
||||
"""This function can convert a node tree back into python sourcecode.
|
||||
This is useful for debugging purposes, especially if you're dealing with
|
||||
custom asts not generated by python itself.
|
||||
|
||||
It could be that the sourcecode is evaluable when the AST itself is not
|
||||
compilable / evaluable. The reason for this is that the AST contains some
|
||||
more data than regular sourcecode does, which is dropped during
|
||||
conversion.
|
||||
|
||||
Each level of indentation is replaced with `indent_with`. Per default this
|
||||
parameter is equal to four spaces as suggested by PEP 8, but it might be
|
||||
adjusted to match the application's styleguide.
|
||||
|
||||
If `add_line_information` is set to `True` comments for the line numbers
|
||||
of the nodes are added to the output. This can be used to spot wrong line
|
||||
number information of statement nodes.
|
||||
"""
|
||||
generator = SourceGenerator(indent_with, add_line_information)
|
||||
generator.visit(node)
|
||||
return ''.join(generator.result)
|
||||
|
||||
|
||||
class SourceGenerator(NodeVisitor):
|
||||
"""This visitor is able to transform a well formed syntax tree into python
|
||||
sourcecode. For more details have a look at the docstring of the
|
||||
`node_to_source` function.
|
||||
"""
|
||||
|
||||
def __init__(self, indent_with, add_line_information=False):
|
||||
self.result = []
|
||||
self.indent_with = indent_with
|
||||
self.add_line_information = add_line_information
|
||||
self.indentation = 0
|
||||
self.new_lines = 0
|
||||
|
||||
def write(self, x):
|
||||
if self.new_lines:
|
||||
if self.result:
|
||||
self.result.append('\n' * self.new_lines)
|
||||
self.result.append(self.indent_with * self.indentation)
|
||||
self.new_lines = 0
|
||||
self.result.append(x)
|
||||
|
||||
def newline(self, node=None, extra=0):
|
||||
self.new_lines = max(self.new_lines, 1 + extra)
|
||||
if node is not None and self.add_line_information:
|
||||
self.write('# line: %s' % node.lineno)
|
||||
self.new_lines = 1
|
||||
|
||||
def body(self, statements):
|
||||
self.new_line = True
|
||||
self.indentation += 1
|
||||
for stmt in statements:
|
||||
self.visit(stmt)
|
||||
self.indentation -= 1
|
||||
|
||||
def body_or_else(self, node):
|
||||
self.body(node.body)
|
||||
if node.orelse:
|
||||
self.newline()
|
||||
self.write('else:')
|
||||
self.body(node.orelse)
|
||||
|
||||
def signature(self, node):
|
||||
want_comma = []
|
||||
def write_comma():
|
||||
if want_comma:
|
||||
self.write(', ')
|
||||
else:
|
||||
want_comma.append(True)
|
||||
|
||||
padding = [None] * (len(node.args) - len(node.defaults))
|
||||
for arg, default in zip(node.args, padding + node.defaults):
|
||||
write_comma()
|
||||
self.visit(arg)
|
||||
if default is not None:
|
||||
self.write('=')
|
||||
self.visit(default)
|
||||
if node.vararg is not None:
|
||||
write_comma()
|
||||
self.write('*' + node.vararg)
|
||||
if node.kwarg is not None:
|
||||
write_comma()
|
||||
self.write('**' + node.kwarg)
|
||||
|
||||
def decorators(self, node):
|
||||
for decorator in node.decorator_list:
|
||||
self.newline(decorator)
|
||||
self.write('@')
|
||||
self.visit(decorator)
|
||||
|
||||
# Statements
|
||||
|
||||
def visit_Assign(self, node):
|
||||
self.newline(node)
|
||||
for idx, target in enumerate(node.targets):
|
||||
if idx:
|
||||
self.write(', ')
|
||||
self.visit(target)
|
||||
self.write(' = ')
|
||||
self.visit(node.value)
|
||||
|
||||
def visit_AugAssign(self, node):
|
||||
self.newline(node)
|
||||
self.visit(node.target)
|
||||
self.write(BINOP_SYMBOLS[type(node.op)] + '=')
|
||||
self.visit(node.value)
|
||||
|
||||
def visit_ImportFrom(self, node):
|
||||
self.newline(node)
|
||||
self.write('from %s%s import ' % ('.' * node.level, node.module))
|
||||
for idx, item in enumerate(node.names):
|
||||
if idx:
|
||||
self.write(', ')
|
||||
self.write(item)
|
||||
|
||||
def visit_Import(self, node):
|
||||
self.newline(node)
|
||||
for item in node.names:
|
||||
self.write('import ')
|
||||
self.visit(item)
|
||||
|
||||
def visit_Expr(self, node):
|
||||
self.newline(node)
|
||||
self.generic_visit(node)
|
||||
|
||||
def visit_FunctionDef(self, node):
|
||||
self.newline(extra=1)
|
||||
self.decorators(node)
|
||||
self.newline(node)
|
||||
self.write('def %s(' % node.name)
|
||||
self.signature(node.args)
|
||||
self.write('):')
|
||||
self.body(node.body)
|
||||
|
||||
def visit_ClassDef(self, node):
|
||||
have_args = []
|
||||
def paren_or_comma():
|
||||
if have_args:
|
||||
self.write(', ')
|
||||
else:
|
||||
have_args.append(True)
|
||||
self.write('(')
|
||||
|
||||
self.newline(extra=2)
|
||||
self.decorators(node)
|
||||
self.newline(node)
|
||||
self.write('class %s' % node.name)
|
||||
for base in node.bases:
|
||||
paren_or_comma()
|
||||
self.visit(base)
|
||||
# XXX: the if here is used to keep this module compatible
|
||||
# with python 2.6.
|
||||
if hasattr(node, 'keywords'):
|
||||
for keyword in node.keywords:
|
||||
paren_or_comma()
|
||||
self.write(keyword.arg + '=')
|
||||
self.visit(keyword.value)
|
||||
if node.starargs is not None:
|
||||
paren_or_comma()
|
||||
self.write('*')
|
||||
self.visit(node.starargs)
|
||||
if node.kwargs is not None:
|
||||
paren_or_comma()
|
||||
self.write('**')
|
||||
self.visit(node.kwargs)
|
||||
self.write(have_args and '):' or ':')
|
||||
self.body(node.body)
|
||||
|
||||
def visit_If(self, node):
|
||||
self.newline(node)
|
||||
self.write('if ')
|
||||
self.visit(node.test)
|
||||
self.write(':')
|
||||
self.body(node.body)
|
||||
while True:
|
||||
else_ = node.orelse
|
||||
if len(else_) == 1 and isinstance(else_[0], If):
|
||||
node = else_[0]
|
||||
self.newline()
|
||||
self.write('elif ')
|
||||
self.visit(node.test)
|
||||
self.write(':')
|
||||
self.body(node.body)
|
||||
else:
|
||||
self.newline()
|
||||
self.write('else:')
|
||||
self.body(else_)
|
||||
break
|
||||
|
||||
def visit_For(self, node):
|
||||
self.newline(node)
|
||||
self.write('for ')
|
||||
self.visit(node.target)
|
||||
self.write(' in ')
|
||||
self.visit(node.iter)
|
||||
self.write(':')
|
||||
self.body_or_else(node)
|
||||
|
||||
def visit_While(self, node):
|
||||
self.newline(node)
|
||||
self.write('while ')
|
||||
self.visit(node.test)
|
||||
self.write(':')
|
||||
self.body_or_else(node)
|
||||
|
||||
def visit_With(self, node):
|
||||
self.newline(node)
|
||||
self.write('with ')
|
||||
self.visit(node.context_expr)
|
||||
if node.optional_vars is not None:
|
||||
self.write(' as ')
|
||||
self.visit(node.optional_vars)
|
||||
self.write(':')
|
||||
self.body(node.body)
|
||||
|
||||
def visit_Pass(self, node):
|
||||
self.newline(node)
|
||||
self.write('pass')
|
||||
|
||||
def visit_Print(self, node):
|
||||
# XXX: python 2.6 only
|
||||
self.newline(node)
|
||||
self.write('print ')
|
||||
want_comma = False
|
||||
if node.dest is not None:
|
||||
self.write(' >> ')
|
||||
self.visit(node.dest)
|
||||
want_comma = True
|
||||
for value in node.values:
|
||||
if want_comma:
|
||||
self.write(', ')
|
||||
self.visit(value)
|
||||
want_comma = True
|
||||
if not node.nl:
|
||||
self.write(',')
|
||||
|
||||
def visit_Delete(self, node):
|
||||
self.newline(node)
|
||||
self.write('del ')
|
||||
for idx, target in enumerate(node):
|
||||
if idx:
|
||||
self.write(', ')
|
||||
self.visit(target)
|
||||
|
||||
def visit_TryExcept(self, node):
|
||||
self.newline(node)
|
||||
self.write('try:')
|
||||
self.body(node.body)
|
||||
for handler in node.handlers:
|
||||
self.visit(handler)
|
||||
|
||||
def visit_TryFinally(self, node):
|
||||
self.newline(node)
|
||||
self.write('try:')
|
||||
self.body(node.body)
|
||||
self.newline(node)
|
||||
self.write('finally:')
|
||||
self.body(node.finalbody)
|
||||
|
||||
def visit_Global(self, node):
|
||||
self.newline(node)
|
||||
self.write('global ' + ', '.join(node.names))
|
||||
|
||||
def visit_Nonlocal(self, node):
|
||||
self.newline(node)
|
||||
self.write('nonlocal ' + ', '.join(node.names))
|
||||
|
||||
def visit_Return(self, node):
|
||||
self.newline(node)
|
||||
self.write('return ')
|
||||
self.visit(node.value)
|
||||
|
||||
def visit_Break(self, node):
|
||||
self.newline(node)
|
||||
self.write('break')
|
||||
|
||||
def visit_Continue(self, node):
|
||||
self.newline(node)
|
||||
self.write('continue')
|
||||
|
||||
def visit_Raise(self, node):
|
||||
# XXX: Python 2.6 / 3.0 compatibility
|
||||
self.newline(node)
|
||||
self.write('raise')
|
||||
if hasattr(node, 'exc') and node.exc is not None:
|
||||
self.write(' ')
|
||||
self.visit(node.exc)
|
||||
if node.cause is not None:
|
||||
self.write(' from ')
|
||||
self.visit(node.cause)
|
||||
elif hasattr(node, 'type') and node.type is not None:
|
||||
self.visit(node.type)
|
||||
if node.inst is not None:
|
||||
self.write(', ')
|
||||
self.visit(node.inst)
|
||||
if node.tback is not None:
|
||||
self.write(', ')
|
||||
self.visit(node.tback)
|
||||
|
||||
# Expressions
|
||||
|
||||
def visit_Attribute(self, node):
|
||||
self.visit(node.value)
|
||||
self.write('.' + node.attr)
|
||||
|
||||
def visit_Call(self, node):
|
||||
want_comma = []
|
||||
def write_comma():
|
||||
if want_comma:
|
||||
self.write(', ')
|
||||
else:
|
||||
want_comma.append(True)
|
||||
|
||||
self.visit(node.func)
|
||||
self.write('(')
|
||||
for arg in node.args:
|
||||
write_comma()
|
||||
self.visit(arg)
|
||||
for keyword in node.keywords:
|
||||
write_comma()
|
||||
self.write(keyword.arg + '=')
|
||||
self.visit(keyword.value)
|
||||
if node.starargs is not None:
|
||||
write_comma()
|
||||
self.write('*')
|
||||
self.visit(node.starargs)
|
||||
if node.kwargs is not None:
|
||||
write_comma()
|
||||
self.write('**')
|
||||
self.visit(node.kwargs)
|
||||
self.write(')')
|
||||
|
||||
def visit_Name(self, node):
|
||||
self.write(node.id)
|
||||
|
||||
def visit_Str(self, node):
|
||||
self.write(repr(node.s))
|
||||
|
||||
def visit_Bytes(self, node):
|
||||
self.write(repr(node.s))
|
||||
|
||||
def visit_Num(self, node):
|
||||
self.write(repr(node.n))
|
||||
|
||||
def visit_Tuple(self, node):
|
||||
self.write('(')
|
||||
idx = -1
|
||||
for idx, item in enumerate(node.elts):
|
||||
if idx:
|
||||
self.write(', ')
|
||||
self.visit(item)
|
||||
self.write(idx and ')' or ',)')
|
||||
|
||||
def sequence_visit(left, right):
|
||||
def visit(self, node):
|
||||
self.write(left)
|
||||
for idx, item in enumerate(node.elts):
|
||||
if idx:
|
||||
self.write(', ')
|
||||
self.visit(item)
|
||||
self.write(right)
|
||||
return visit
|
||||
|
||||
visit_List = sequence_visit('[', ']')
|
||||
visit_Set = sequence_visit('{', '}')
|
||||
del sequence_visit
|
||||
|
||||
def visit_Dict(self, node):
|
||||
self.write('{')
|
||||
for idx, (key, value) in enumerate(zip(node.keys, node.values)):
|
||||
if idx:
|
||||
self.write(', ')
|
||||
self.visit(key)
|
||||
self.write(': ')
|
||||
self.visit(value)
|
||||
self.write('}')
|
||||
|
||||
def visit_BinOp(self, node):
|
||||
self.visit(node.left)
|
||||
self.write(' %s ' % BINOP_SYMBOLS[type(node.op)])
|
||||
self.visit(node.right)
|
||||
|
||||
def visit_BoolOp(self, node):
|
||||
self.write('(')
|
||||
for idx, value in enumerate(node.values):
|
||||
if idx:
|
||||
self.write(' %s ' % BOOLOP_SYMBOLS[type(node.op)])
|
||||
self.visit(value)
|
||||
self.write(')')
|
||||
|
||||
def visit_Compare(self, node):
|
||||
self.write('(')
|
||||
self.write(node.left)
|
||||
for op, right in zip(node.ops, node.comparators):
|
||||
self.write(' %s %%' % CMPOP_SYMBOLS[type(op)])
|
||||
self.visit(right)
|
||||
self.write(')')
|
||||
|
||||
def visit_UnaryOp(self, node):
|
||||
self.write('(')
|
||||
op = UNARYOP_SYMBOLS[type(node.op)]
|
||||
self.write(op)
|
||||
if op == 'not':
|
||||
self.write(' ')
|
||||
self.visit(node.operand)
|
||||
self.write(')')
|
||||
|
||||
def visit_Subscript(self, node):
|
||||
self.visit(node.value)
|
||||
self.write('[')
|
||||
self.visit(node.slice)
|
||||
self.write(']')
|
||||
|
||||
def visit_Slice(self, node):
|
||||
if node.lower is not None:
|
||||
self.visit(node.lower)
|
||||
self.write(':')
|
||||
if node.upper is not None:
|
||||
self.visit(node.upper)
|
||||
if node.step is not None:
|
||||
self.write(':')
|
||||
if not (isinstance(node.step, Name) and node.step.id == 'None'):
|
||||
self.visit(node.step)
|
||||
|
||||
def visit_ExtSlice(self, node):
|
||||
for idx, item in node.dims:
|
||||
if idx:
|
||||
self.write(', ')
|
||||
self.visit(item)
|
||||
|
||||
def visit_Yield(self, node):
|
||||
self.write('yield ')
|
||||
self.visit(node.value)
|
||||
|
||||
def visit_Lambda(self, node):
|
||||
self.write('lambda ')
|
||||
self.signature(node.args)
|
||||
self.write(': ')
|
||||
self.visit(node.body)
|
||||
|
||||
def visit_Ellipsis(self, node):
|
||||
self.write('Ellipsis')
|
||||
|
||||
def generator_visit(left, right):
|
||||
def visit(self, node):
|
||||
self.write(left)
|
||||
self.visit(node.elt)
|
||||
for comprehension in node.generators:
|
||||
self.visit(comprehension)
|
||||
self.write(right)
|
||||
return visit
|
||||
|
||||
visit_ListComp = generator_visit('[', ']')
|
||||
visit_GeneratorExp = generator_visit('(', ')')
|
||||
visit_SetComp = generator_visit('{', '}')
|
||||
del generator_visit
|
||||
|
||||
def visit_DictComp(self, node):
|
||||
self.write('{')
|
||||
self.visit(node.key)
|
||||
self.write(': ')
|
||||
self.visit(node.value)
|
||||
for comprehension in node.generators:
|
||||
self.visit(comprehension)
|
||||
self.write('}')
|
||||
|
||||
def visit_IfExp(self, node):
|
||||
self.visit(node.body)
|
||||
self.write(' if ')
|
||||
self.visit(node.test)
|
||||
self.write(' else ')
|
||||
self.visit(node.orelse)
|
||||
|
||||
def visit_Starred(self, node):
|
||||
self.write('*')
|
||||
self.visit(node.value)
|
||||
|
||||
def visit_Repr(self, node):
|
||||
# XXX: python 2.6 only
|
||||
self.write('`')
|
||||
self.visit(node.value)
|
||||
self.write('`')
|
||||
|
||||
# Helper Nodes
|
||||
|
||||
def visit_alias(self, node):
|
||||
self.write(node.name)
|
||||
if node.asname is not None:
|
||||
self.write(' as ' + node.asname)
|
||||
|
||||
def visit_comprehension(self, node):
|
||||
self.write(' for ')
|
||||
self.visit(node.target)
|
||||
self.write(' in ')
|
||||
self.visit(node.iter)
|
||||
if node.ifs:
|
||||
for if_ in node.ifs:
|
||||
self.write(' if ')
|
||||
self.visit(if_)
|
||||
|
||||
def visit_excepthandler(self, node):
|
||||
self.newline(node)
|
||||
self.write('except')
|
||||
if node.type is not None:
|
||||
self.write(' ')
|
||||
self.visit(node.type)
|
||||
if node.name is not None:
|
||||
self.write(' as ')
|
||||
self.visit(node.name)
|
||||
self.write(':')
|
||||
self.body(node.body)
|
||||
@@ -1,4 +0,0 @@
|
||||
# PLY package
|
||||
# Author: David Beazley (dave@dabeaz.com)
|
||||
|
||||
__all__ = ['lex','yacc']
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user