Compare commits

..

2 Commits

Author SHA1 Message Date
Richard Purdie
08290c6003 self-hosted-image: Update poky revision to point at the 1.2 release branch
(From OE-Core rev: 00256125873ff6f1630743a712e882e5f473a9d2)

Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
2012-04-18 15:59:56 +01:00
Elizabeth Flanagan
729e7f774c distro.conf: Flipping for denzil
Flipping values in distro.conf for upcoming release

Signed-off-by: Elizabeth Flanagan <elizabeth.flanagan@intel.com>
2012-04-18 15:55:32 +01:00
2079 changed files with 141004 additions and 56999 deletions

8
.gitignore vendored
View File

@@ -1,8 +1,10 @@
bitbake
*.pyc
*.pyo
/*.patch
build*/
build*/conf/local.conf
build*/conf/bblayers.conf
build*/downloads
build*/tmp/
build*/sstate-cache
pyshtables.py
pstage/
scripts/oe-git-proxy-socks

22
README
View File

@@ -18,7 +18,7 @@ e.g. for the hardware support. Poky is in turn a component of the Yocto Project.
The Yocto Project has extensive documentation about the system including a
reference manual which can be found at:
http://yoctoproject.org/documentation
http://yoctoproject.org/community/documentation
OpenEmbedded-Core is a layer containing the core metadata for current versions
of OpenEmbedded. It is distro-less (can build a functional image with
@@ -27,23 +27,3 @@ DISTRO = "") and contains only emulated machine support.
For information about OpenEmbedded, see the OpenEmbedded website:
http://www.openembedded.org/
Where to Send Patches
=====================
As Poky is an integration repository, patches against the various components
should be sent to their respective upstreams.
bitbake:
bitbake-devel@lists.openembedded.org
meta-yocto:
poky@yoctoproject.org
Most everything else should be sent to the OpenEmbedded Core mailing list. If
in doubt, check the oe-core git repository for the content you intend to modify.
Before sending, be sure the patches apply cleanly to the current oe-core git
repository.
openembedded-core@lists.openembedded.org
Note: The scripts directory should be treated with extra care as it is a mix
of oe-core and poky-specific files.

View File

@@ -40,7 +40,7 @@ from bb import cooker
from bb import ui
from bb import server
__version__ = "1.15.3"
__version__ = "1.15.1"
logger = logging.getLogger("BitBake")
@@ -56,11 +56,10 @@ class BBConfiguration(object):
def get_ui(config):
if not config.ui:
# modify 'ui' attribute because it is also read by cooker
config.ui = os.environ.get('BITBAKE_UI', 'knotty')
interface = config.ui
if config.ui:
interface = config.ui
else:
interface = 'knotty'
try:
# Dynamically load the UI based on the ui name. Although we
@@ -70,7 +69,7 @@ def get_ui(config):
return getattr(module, interface).main
except AttributeError:
sys.exit("FATAL: Invalid user interface '%s' specified.\n"
"Valid interfaces: depexp, goggle, ncurses, hob, knotty [default]." % interface)
"Valid interfaces: depexp, goggle, ncurses, hob, knotty [default], knotty2." % interface)
# Display bitbake/OE warnings via the BitBake.Warnings logger, ignoring others"""
@@ -118,9 +117,6 @@ Default BBFILES are the .bb files in the current directory.""")
parser.add_option("-c", "--cmd", help = "Specify task to execute. Note that this only executes the specified task for the providee and the packages it depends on, i.e. 'compile' does not implicitly call stage for the dependencies (IOW: use only if you know what you are doing). Depending on the base.bbclass a listtasks tasks is defined and will show available tasks",
action = "store", dest = "cmd")
parser.add_option("-C", "--clear-stamp", help = "Invalidate the stamp for the specified cmd such as 'compile' and run the default task for the specified target(s)",
action = "store", dest = "invalidate_stamp")
parser.add_option("-r", "--read", help = "read the specified file before bitbake.conf",
action = "append", dest = "prefile", default = [])
@@ -142,13 +138,13 @@ Default BBFILES are the .bb files in the current directory.""")
parser.add_option("-p", "--parse-only", help = "quit after parsing the BB files (developers only)",
action = "store_true", dest = "parse_only", default = False)
parser.add_option("-s", "--show-versions", help = "show current and preferred versions of all recipes",
parser.add_option("-s", "--show-versions", help = "show current and preferred versions of all packages",
action = "store_true", dest = "show_versions", default = False)
parser.add_option("-e", "--environment", help = "show the global or per-package environment (this is what used to be bbread)",
action = "store_true", dest = "show_environment", default = False)
parser.add_option("-g", "--graphviz", help = "emit the dependency trees of the specified packages in the dot syntax, and the pn-buildlist to show the build list",
parser.add_option("-g", "--graphviz", help = "emit the dependency trees of the specified packages in the dot syntax",
action = "store_true", dest = "dot_graph", default = False)
parser.add_option("-I", "--ignore-deps", help = """Assume these dependencies don't exist and are already provided (equivalent to ASSUME_PROVIDED). Useful to make dependency graphs more appealing""",

View File

@@ -86,13 +86,9 @@ class Commands(cmd.Cmd):
self.cooker_data = self.cooker.status
self.cooker_data.appends = self.cooker.appendlist
def check_prepare_cooker(self, config_only = False):
def check_prepare_cooker(self):
if not self.cooker_data:
if config_only:
self.cooker.parseConfiguration()
self.cooker_data = self.cooker.status
else:
self.prepare_cooker()
self.prepare_cooker()
def default(self, line):
"""Handle unrecognised commands"""
@@ -117,7 +113,8 @@ class Commands(cmd.Cmd):
def do_show_layers(self, args):
"""show current configured layers"""
self.check_prepare_cooker(config_only = True)
self.check_prepare_cooker()
logger.plain('')
logger.plain("%s %s %s" % ("layer".ljust(20), "path".ljust(40), "priority"))
logger.plain('=' * 74)
for layerdir in self.bblayers:
@@ -141,7 +138,7 @@ class Commands(cmd.Cmd):
def do_show_overlayed(self, args):
"""list overlayed recipes (where the same recipe exists in another layer)
"""list overlayed recipes (where the same recipe exists in another layer that has a higher layer priority)
usage: show-overlayed [-f] [-s]
@@ -521,7 +518,7 @@ Recipes are listed with the bbappends that apply to them as subitems.
logger.plain('No append files found')
return
logger.plain('=== Appended recipes ===')
logger.plain('State of append files:')
pnlist = list(self.cooker_data.pkg_pn.keys())
pnlist.sort()

View File

@@ -1,38 +0,0 @@
#!/usr/bin/env python
#
# Copyright (C) 2012 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import sys, logging
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), 'lib'))
import unittest
try:
import bb
except RuntimeError as exc:
sys.exit(str(exc))
tests = ["bb.tests.codeparser",
"bb.tests.cow",
"bb.tests.data",
"bb.tests.fetch",
"bb.tests.utils"]
for t in tests:
__import__(t)
unittest.main(argv=["bitbake-selftest"] + tests)

View File

@@ -1,120 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2012 Wind River Systems, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname( \
os.path.abspath(__file__))), 'lib'))
try:
import bb
except RuntimeError as exc:
sys.exit(str(exc))
import gtk
import optparse
import pygtk
from bb.ui.crumbs.hig import DeployImageDialog, ImageSelectionDialog, CrumbsMessageDialog
from bb.ui.crumbs.hobwidget import HobAltButton, HobButton
# I put all the fs bitbake supported here. Need more test.
DEPLOYABLE_IMAGE_TYPES = ["jffs2", "cramfs", "ext2", "ext3", "btrfs", "squashfs", "ubi", "vmdk"]
Title = "USB Image Writer"
class DeployWindow(gtk.Window):
def __init__(self, image_path=''):
super(DeployWindow, self).__init__()
if len(image_path) > 0:
valid = True
if not os.path.exists(image_path):
valid = False
lbl = "<b>Invalid image file path: %s.</b>\nPress <b>Select Image</b> to select an image." % image_path
else:
image_path = os.path.abspath(image_path)
extend_name = os.path.splitext(image_path)[1][1:]
if extend_name not in DEPLOYABLE_IMAGE_TYPES:
valid = False
lbl = "<b>Undeployable imge type: %s</b>\nPress <b>Select Image</b> to select an image." % extend_name
if not valid:
image_path = ''
crumbs_dialog = CrumbsMessageDialog(self, lbl, gtk.STOCK_DIALOG_INFO)
button = crumbs_dialog.add_button("Close", gtk.RESPONSE_OK)
HobButton.style_button(button)
crumbs_dialog.run()
crumbs_dialog.destroy()
self.deploy_dialog = DeployImageDialog(Title, image_path, self,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT
| gtk.DIALOG_NO_SEPARATOR, None, standalone=True)
close_button = self.deploy_dialog.add_button("Close", gtk.RESPONSE_NO)
HobAltButton.style_button(close_button)
close_button.connect('clicked', gtk.main_quit)
write_button = self.deploy_dialog.add_button("Write USB image", gtk.RESPONSE_YES)
HobAltButton.style_button(write_button)
self.deploy_dialog.connect('select_image_clicked', self.select_image_clicked_cb)
self.deploy_dialog.connect('destroy', gtk.main_quit)
response = self.deploy_dialog.show()
def select_image_clicked_cb(self, dialog):
cwd = os.getcwd()
dialog = ImageSelectionDialog(cwd, DEPLOYABLE_IMAGE_TYPES, Title, self, gtk.FILE_CHOOSER_ACTION_SAVE )
button = dialog.add_button("Cancel", gtk.RESPONSE_NO)
HobAltButton.style_button(button)
button = dialog.add_button("Open", gtk.RESPONSE_YES)
HobAltButton.style_button(button)
response = dialog.run()
if response == gtk.RESPONSE_YES:
if not dialog.image_names:
lbl = "<b>No selections made</b>\nClicked the radio button to select a image."
crumbs_dialog = CrumbsMessageDialog(self, lbl, gtk.STOCK_DIALOG_INFO)
button = crumbs_dialog.add_button("Close", gtk.RESPONSE_OK)
HobButton.style_button(button)
crumbs_dialog.run()
crumbs_dialog.destroy()
dialog.destroy()
return
# get the full path of image
image_path = os.path.join(dialog.image_folder, dialog.image_names[0])
self.deploy_dialog.set_image_text_buffer(image_path)
self.deploy_dialog.set_image_path(image_path)
dialog.destroy()
def main():
parser = optparse.OptionParser(
usage = """%prog [-h] [image_file]
%prog writes bootable images to USB devices. You can
provide the image file on the command line or select it using the GUI.""")
options, args = parser.parse_args(sys.argv)
image_file = args[1] if len(args) > 1 else ''
dw = DeployWindow(image_file)
if __name__ == '__main__':
try:
main()
gtk.main()
except Exception:
import traceback
traceback.print_exc(3)

View File

@@ -1,68 +0,0 @@
#!/usr/bin/env python
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Copyright (C) 2012 Wind River Systems, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# This is used for dumping the bb_cache.dat, the output format is:
# recipe_path PN PV PACKAGES
#
import os
import sys
import warnings
# For importing bb.cache
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), '../lib'))
from bb.cache import CoreRecipeInfo
import cPickle as pickle
def main(argv=None):
"""
Get the mapping for the target recipe.
"""
if len(argv) != 1:
print >>sys.stderr, "Error, need one argument!"
return 2
cachefile = argv[0]
with open(cachefile, "rb") as cachefile:
pickled = pickle.Unpickler(cachefile)
while cachefile:
try:
key = pickled.load()
val = pickled.load()
except Exception:
break
if isinstance(val, CoreRecipeInfo) and (not val.skipped):
pn = val.pn
# Filter out the native recipes.
if key.startswith('virtual:native:') or pn.endswith("-native"):
continue
# 1.0 is the default version for a no PV recipe.
if val.__dict__.has_key("pv"):
pv = val.pv
else:
pv = "1.0"
print("%s %s %s %s" % (key, pn, pv, ' '.join(val.packages)))
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))

View File

@@ -103,13 +103,7 @@ Show debug logging for the specified logging domains
.TP
.B \-P, \-\-profile
profile the command and print a report
.SH ENVIRONMENT VARIABLES
bitbake uses the following environment variables to control its
operation:
.TP
.B BITBAKE_UI
The bitbake user interface; overridden by the \fB-u\fP commandline option.
.SH AUTHORS
BitBake was written by

View File

@@ -228,7 +228,7 @@ addtask printdate before do_build</screen></para>
<para>'nostamp' - don't generate a stamp file for a task. This means the task is always rexecuted.</para>
<para>'fakeroot' - this task needs to be run in a fakeroot environment, obtained by adding the variables in FAKEROOTENV to the environment.</para>
<para>'umask' - the umask to run the task under.</para>
<para> For the 'deptask', 'rdeptask', 'depends', 'rdepends' and 'recrdeptask' flags please see the dependencies section.</para>
<para> For the 'deptask', 'rdeptask', 'recdeptask' and 'recrdeptask' flags please see the dependencies section.</para>
</section>
<section>
@@ -308,35 +308,37 @@ SRC_URI_append_1.0.7+ = "file://some_patch_which_the_new_versions_need.patch;pat
</section>
<section>
<title>Dependency handling</title>
<para>BitBake handles dependencies at the task level since to allow for efficient operation with multiple processed executing in parallel. A robust method of specifying task dependencies is therefore needed. </para>
<para>BitBake 1.7.x onwards works with the metadata at the task level since this is optimal when dealing with multiple threads of execution. A robust method of specifing task dependencies is therefore needed. </para>
<section>
<title>Dependencies internal to the .bb file</title>
<para>Where the dependencies are internal to a given .bb file, the dependencies are handled by the previously detailed addtask directive.</para>
</section>
<section>
<title>Build Dependencies</title>
<title>DEPENDS</title>
<para>DEPENDS lists build time dependencies. The 'deptask' flag for tasks is used to signify the task of each item listed in DEPENDS which must have completed before that task can be executed.</para>
<para><screen>do_configure[deptask] = "do_populate_staging"</screen></para>
<para>means the do_populate_staging task of each item in DEPENDS must have completed before do_configure can execute.</para>
</section>
<section>
<title>Runtime Dependencies</title>
<para>The PACKAGES variable lists runtime packages and each of these can have RDEPENDS and RRECOMMENDS runtime dependencies. The 'rdeptask' flag for tasks is used to signify the task of each item runtime dependency which must have completed before that task can be executed.</para>
<title>RDEPENDS</title>
<para>RDEPENDS lists runtime dependencies. The 'rdeptask' flag for tasks is used to signify the task of each item listed in RDEPENDS which must have completed before that task can be executed.</para>
<para><screen>do_package_write[rdeptask] = "do_package"</screen></para>
<para>means the do_package task of each item in RDEPENDS must have completed before do_package_write can execute.</para>
</section>
<section>
<title>Recursive Dependencies</title>
<para>These are specified with the 'recrdeptask' flag which is used signify the task(s) of dependencies which must have completed before that task can be executed. It works by looking though the build and runtime dependencies of the current recipe as well as any inter-task dependencies the task has, then adding a dependency on the listed task. It will then recurse through the dependencies of those tasks and so on.</para>
<para>It may be desireable to recurse not just through the dependencies of those tasks but through the build and runtime dependencies of dependent tasks too. If that is the case, the taskname itself should be referenced in the task list, e.g. do_a[recrdeptask] = "do_a do_b".</para>
<title>Recursive DEPENDS</title>
<para>These are specified with the 'recdeptask' flag and is used signify the task(s) of each DEPENDS which must have completed before that task can be executed. It applies recursively so the DEPENDS of each item in the original DEPENDS must be met and so on.</para>
</section>
<section>
<title>Recursive RDEPENDS</title>
<para>These are specified with the 'recrdeptask' flag and is used signify the task(s) of each RDEPENDS which must have completed before that task can be executed. It applies recursively so the RDEPENDS of each item in the original RDEPENDS must be met and so on. It also runs all DEPENDS first.</para>
</section>
<section>
<title>Inter task</title>
<para>The 'depends' flag for tasks is a more generic form of which allows an interdependency on specific tasks rather than specifying the data in DEPENDS.</para>
<para>The 'depends' flag for tasks is a more generic form of which allows an interdependency on specific tasks rather than specifying the data in DEPENDS or RDEPENDS.</para>
<para><screen>do_patch[depends] = "quilt-native:do_populate_staging"</screen></para>
<para>means the do_populate_staging task of the target quilt-native must have completed before the do_patch can execute.</para>
<para>The 'rdepends' flag works in a similar way but takes targets in the runtime namespace instead of the build time dependency namespace.</para>
</section>
</section>

View File

@@ -21,7 +21,7 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
__version__ = "1.15.3"
__version__ = "1.15.1"
import sys
if sys.version_info < (2, 6, 0):

View File

@@ -72,7 +72,7 @@ class TaskBase(event.Event):
self._task = t
self._package = d.getVar("PF", True)
event.Event.__init__(self)
self._message = "recipe %s: task %s: %s" % (d.getVar("PF", True), t, self.getDisplayName())
self._message = "package %s: task %s: %s" % (d.getVar("PF", True), t, self.getDisplayName())
def getTask(self):
return self._task
@@ -135,8 +135,7 @@ class LogTee(object):
def __repr__(self):
return '<LogTee {0}>'.format(self.name)
def flush(self):
self.outfile.flush()
def exec_func(func, d, dirs = None):
"""Execute an BB 'function'"""
@@ -175,19 +174,8 @@ def exec_func(func, d, dirs = None):
lockfiles = None
tempdir = data.getVar('T', d, 1)
# or func allows items to be executed outside of the normal
# task set, such as buildhistory
task = data.getVar('BB_RUNTASK', d, 1) or func
if task == func:
taskfunc = task
else:
taskfunc = "%s.%s" % (task, func)
runfmt = data.getVar('BB_RUNFMT', d, 1) or "run.{func}.{pid}"
runfn = runfmt.format(taskfunc=taskfunc, task=task, func=func, pid=os.getpid())
runfile = os.path.join(tempdir, runfn)
bb.utils.mkdirhier(os.path.dirname(runfile))
bb.utils.mkdirhier(tempdir)
runfile = os.path.join(tempdir, 'run.{0}.{1}'.format(func, os.getpid()))
with bb.utils.fileslocked(lockfiles):
if ispython:
@@ -218,8 +206,6 @@ def exec_func_python(func, d, runfile, cwd=None):
olddir = None
os.chdir(cwd)
bb.debug(2, "Executing python function %s" % func)
try:
comp = utils.better_compile(code, func, bbfile)
utils.better_exec(comp, {"d": d}, code, bbfile)
@@ -229,15 +215,13 @@ def exec_func_python(func, d, runfile, cwd=None):
raise FuncFailed(func, None)
finally:
bb.debug(2, "Python function %s finished" % func)
if cwd and olddir:
try:
os.chdir(olddir)
except OSError:
pass
def exec_func_shell(func, d, runfile, cwd=None):
def exec_func_shell(function, d, runfile, cwd=None):
"""Execute a shell function from the metadata
Note on directory behavior. The 'dirs' varflag should contain a list
@@ -250,18 +234,18 @@ def exec_func_shell(func, d, runfile, cwd=None):
with open(runfile, 'w') as script:
script.write('#!/bin/sh -e\n')
data.emit_func(func, script, d)
data.emit_func(function, script, d)
if bb.msg.loggerVerboseLogs:
script.write("set -x\n")
if cwd:
script.write("cd %s\n" % cwd)
script.write("%s\n" % func)
script.write("%s\n" % function)
os.chmod(runfile, 0775)
cmd = runfile
if d.getVarFlag(func, 'fakeroot'):
if d.getVarFlag(function, 'fakeroot'):
fakerootcmd = d.getVar('FAKEROOT', True)
if fakerootcmd:
cmd = [fakerootcmd, runfile]
@@ -271,15 +255,11 @@ def exec_func_shell(func, d, runfile, cwd=None):
else:
logfile = sys.stdout
bb.debug(2, "Executing shell function %s" % func)
try:
bb.process.run(cmd, shell=False, stdin=NULL, log=logfile)
except bb.process.CmdError:
logfn = d.getVar('BB_LOGFILE', True)
raise FuncFailed(func, logfn)
bb.debug(2, "Shell function %s finished" % func)
raise FuncFailed(function, logfn)
def _task_data(fn, task, d):
localdata = data.createCopy(d)
@@ -310,23 +290,8 @@ def _exec_task(fn, task, d, quieterr):
bb.fatal("T variable not set, unable to build")
bb.utils.mkdirhier(tempdir)
# Determine the logfile to generate
logfmt = localdata.getVar('BB_LOGFMT', True) or 'log.{task}.{pid}'
logbase = logfmt.format(task=task, pid=os.getpid())
# Document the order of the tasks...
logorder = os.path.join(tempdir, 'log.task_order')
try:
logorderfile = file(logorder, 'a')
except OSError:
logger.exception("Opening log file '%s'", logorder)
pass
logorderfile.write('{0} ({1}): {2}\n'.format(task, os.getpid(), logbase))
logorderfile.close()
# Setup the courtesy link to the logfn
loglink = os.path.join(tempdir, 'log.{0}'.format(task))
logbase = 'log.{0}.{1}'.format(task, os.getpid())
logfn = os.path.join(tempdir, logbase)
if loglink:
bb.utils.remove(loglink)
@@ -349,7 +314,6 @@ def _exec_task(fn, task, d, quieterr):
# Handle logfiles
si = file('/dev/null', 'r')
try:
bb.utils.mkdirhier(os.path.dirname(logfn))
logfile = file(logfn, 'w')
except OSError:
logger.exception("Opening log file '%s'", logfn)
@@ -376,7 +340,6 @@ def _exec_task(fn, task, d, quieterr):
bblogger.addHandler(errchk)
localdata.setVar('BB_LOGFILE', logfn)
localdata.setVar('BB_RUNTASK', task)
event.fire(TaskStarted(task, localdata), localdata)
try:
@@ -464,9 +427,7 @@ def stamp_internal(taskname, d, file_name):
stamp = bb.parse.siggen.stampfile(stamp, file_name, taskname, extrainfo)
stampdir = os.path.dirname(stamp)
if bb.parse.cached_mtime_noerror(stampdir) == 0:
bb.utils.mkdirhier(stampdir)
bb.utils.mkdirhier(os.path.dirname(stamp))
return stamp
@@ -497,24 +458,6 @@ def del_stamp(task, d, file_name = None):
stamp = stamp_internal(task, d, file_name)
bb.utils.remove(stamp)
def write_taint(task, d, file_name = None):
"""
Creates a "taint" file which will force the specified task and its
dependents to be re-run the next time by influencing the value of its
taskhash.
(d can be a data dict or dataCache)
"""
import uuid
if file_name:
taintfn = d.stamp[file_name] + '.' + task + '.taint'
else:
taintfn = d.getVar('STAMP', True) + '.' + task + '.taint'
bb.utils.mkdirhier(os.path.dirname(taintfn))
# The specific content of the taint file is not really important,
# we just need it to be random, so a random UUID is used
with open(taintfn, 'w') as taintf:
taintf.write(str(uuid.uuid4()))
def stampfile(taskname, d, file_name = None):
"""
Return the stamp for a given task
@@ -546,7 +489,6 @@ def add_tasks(tasklist, d):
deptask = data.expand(flags[name], d)
task_deps[name][task] = deptask
getTask('depends')
getTask('rdepends')
getTask('deptask')
getTask('rdeptask')
getTask('recrdeptask')

View File

@@ -1,12 +1,11 @@
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# BitBake Cache implementation
# BitBake 'Event' implementation
#
# Caching of bitbake variables before task execution
# Copyright (C) 2006 Richard Purdie
# Copyright (C) 2012 Intel Corporation
# but small sections based on code from bin/bitbake:
# Copyright (C) 2003, 2004 Chris Larson
@@ -43,7 +42,7 @@ except ImportError:
logger.info("Importing cPickle failed. "
"Falling back to a very slow implementation.")
__cache_version__ = "144"
__cache_version__ = "143"
def getCacheFile(path, filename, data_hash):
return os.path.join(path, filename + "." + data_hash)
@@ -76,13 +75,9 @@ class RecipeInfoCommon(object):
for task in tasks)
@classmethod
def flaglist(cls, flag, varlist, metadata, squash=False):
out_dict = dict((var, metadata.getVarFlag(var, flag, True))
def flaglist(cls, flag, varlist, metadata):
return dict((var, metadata.getVarFlag(var, flag, True))
for var in varlist)
if squash:
return dict((k,v) for (k,v) in out_dict.iteritems() if v)
else:
return out_dict
@classmethod
def getvar(cls, var, metadata):
@@ -132,7 +127,6 @@ class CoreRecipeInfo(RecipeInfoCommon):
self.stamp = self.getvar('STAMP', metadata)
self.stamp_base = self.flaglist('stamp-base', self.tasks, metadata)
self.stamp_extrainfo = self.flaglist('stamp-extra-info', self.tasks, metadata)
self.file_checksums = self.flaglist('file-checksums', self.tasks, metadata, True)
self.packages_dynamic = self.listvar('PACKAGES_DYNAMIC', metadata)
self.depends = self.depvar('DEPENDS', metadata)
self.provides = self.depvar('PROVIDES', metadata)
@@ -159,7 +153,6 @@ class CoreRecipeInfo(RecipeInfoCommon):
cachedata.stamp = {}
cachedata.stamp_base = {}
cachedata.stamp_extrainfo = {}
cachedata.file_checksums = {}
cachedata.fn_provides = {}
cachedata.pn_provides = defaultdict(list)
cachedata.all_depends = []
@@ -191,7 +184,6 @@ class CoreRecipeInfo(RecipeInfoCommon):
cachedata.stamp[fn] = self.stamp
cachedata.stamp_base[fn] = self.stamp_base
cachedata.stamp_extrainfo[fn] = self.stamp_extrainfo
cachedata.file_checksums[fn] = self.file_checksums
provides = [self.pn]
for provide in self.provides:
@@ -711,115 +703,4 @@ class CacheData(object):
for info in info_array:
info.add_cacheData(self, fn)
class MultiProcessCache(object):
"""
BitBake multi-process cache implementation
Used by the codeparser & file checksum caches
"""
def __init__(self):
self.cachefile = None
self.cachedata = self.create_cachedata()
self.cachedata_extras = self.create_cachedata()
def init_cache(self, d):
cachedir = (d.getVar("PERSISTENT_DIR", True) or
d.getVar("CACHE", True))
if cachedir in [None, '']:
return
bb.utils.mkdirhier(cachedir)
self.cachefile = os.path.join(cachedir, self.__class__.cache_file_name)
logger.debug(1, "Using cache in '%s'", self.cachefile)
try:
p = pickle.Unpickler(file(self.cachefile, "rb"))
data, version = p.load()
except:
return
if version != self.__class__.CACHE_VERSION:
return
self.cachedata = data
def internSet(self, items):
new = set()
for i in items:
new.add(intern(i))
return new
def compress_keys(self, data):
# Override in subclasses if desired
return
def create_cachedata(self):
data = [{}]
return data
def save_extras(self, d):
if not self.cachefile:
return
glf = bb.utils.lockfile(self.cachefile + ".lock", shared=True)
i = os.getpid()
lf = None
while not lf:
lf = bb.utils.lockfile(self.cachefile + ".lock." + str(i), retry=False)
if not lf or os.path.exists(self.cachefile + "-" + str(i)):
if lf:
bb.utils.unlockfile(lf)
lf = None
i = i + 1
continue
p = pickle.Pickler(file(self.cachefile + "-" + str(i), "wb"), -1)
p.dump([self.cachedata_extras, self.__class__.CACHE_VERSION])
bb.utils.unlockfile(lf)
bb.utils.unlockfile(glf)
def merge_data(self, source, dest):
for j in range(0,len(dest)):
for h in source[j]:
if h not in dest[j]:
dest[j][h] = source[j][h]
def save_merge(self, d):
if not self.cachefile:
return
glf = bb.utils.lockfile(self.cachefile + ".lock")
try:
p = pickle.Unpickler(file(self.cachefile, "rb"))
data, version = p.load()
except (IOError, EOFError):
data, version = None, None
if version != self.__class__.CACHE_VERSION:
data = self.create_cachedata()
for f in [y for y in os.listdir(os.path.dirname(self.cachefile)) if y.startswith(os.path.basename(self.cachefile) + '-')]:
f = os.path.join(os.path.dirname(self.cachefile), f)
try:
p = pickle.Unpickler(file(f, "rb"))
extradata, version = p.load()
except (IOError, EOFError):
extradata, version = self.create_cachedata(), None
if version != self.__class__.CACHE_VERSION:
continue
self.merge_data(extradata, data)
os.unlink(f)
self.compress_keys(data)
p = pickle.Pickler(file(self.cachefile, "wb"), -1)
p.dump([data, self.__class__.CACHE_VERSION])
bb.utils.unlockfile(glf)

View File

@@ -1,90 +0,0 @@
# Local file checksum cache implementation
#
# Copyright (C) 2012 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import stat
import bb.utils
import logging
from bb.cache import MultiProcessCache
logger = logging.getLogger("BitBake.Cache")
try:
import cPickle as pickle
except ImportError:
import pickle
logger.info("Importing cPickle failed. "
"Falling back to a very slow implementation.")
# mtime cache (non-persistent)
# based upon the assumption that files do not change during bitbake run
class FileMtimeCache(object):
cache = {}
def cached_mtime(self, f):
if f not in self.cache:
self.cache[f] = os.stat(f)[stat.ST_MTIME]
return self.cache[f]
def cached_mtime_noerror(self, f):
if f not in self.cache:
try:
self.cache[f] = os.stat(f)[stat.ST_MTIME]
except OSError:
return 0
return self.cache[f]
def update_mtime(self, f):
self.cache[f] = os.stat(f)[stat.ST_MTIME]
return self.cache[f]
def clear(self):
self.cache.clear()
# Checksum + mtime cache (persistent)
class FileChecksumCache(MultiProcessCache):
cache_file_name = "local_file_checksum_cache.dat"
CACHE_VERSION = 1
def __init__(self):
self.mtime_cache = FileMtimeCache()
MultiProcessCache.__init__(self)
def get_checksum(self, f):
entry = self.cachedata[0].get(f)
cmtime = self.mtime_cache.cached_mtime(f)
if entry:
(mtime, hashval) = entry
if cmtime == mtime:
return hashval
else:
bb.debug(2, "file %s changed mtime, recompute checksum" % f)
hashval = bb.utils.md5_file(f)
self.cachedata_extras[0][f] = (cmtime, hashval)
return hashval
def merge_data(self, source, dest):
for h in source[0]:
if h in dest:
(smtime, _) = source[0][h]
(dmtime, _) = dest[0][h]
if smtime > dmtime:
dest[0][h] = source[0][h]
else:
dest[0][h] = source[0][h]

View File

@@ -5,10 +5,10 @@ import os.path
import bb.utils, bb.data
from itertools import chain
from pysh import pyshyacc, pyshlex, sherrors
from bb.cache import MultiProcessCache
logger = logging.getLogger('BitBake.CodeParser')
PARSERCACHE_VERSION = 2
try:
import cPickle as pickle
@@ -32,56 +32,133 @@ def check_indent(codestr):
return codestr
pythonparsecache = {}
shellparsecache = {}
pythonparsecacheextras = {}
shellparsecacheextras = {}
class CodeParserCache(MultiProcessCache):
cache_file_name = "bb_codeparser.dat"
CACHE_VERSION = 2
def __init__(self):
MultiProcessCache.__init__(self)
self.pythoncache = self.cachedata[0]
self.shellcache = self.cachedata[1]
self.pythoncacheextras = self.cachedata_extras[0]
self.shellcacheextras = self.cachedata_extras[1]
def init_cache(self, d):
MultiProcessCache.init_cache(self, d)
# cachedata gets re-assigned in the parent
self.pythoncache = self.cachedata[0]
self.shellcache = self.cachedata[1]
def compress_keys(self, data):
# When the dicts are originally created, python calls intern() on the set keys
# which significantly improves memory usage. Sadly the pickle/unpickle process
# doesn't call intern() on the keys and results in the same strings being duplicated
# in memory. This also means pickle will save the same string multiple times in
# the cache file. By interning the data here, the cache file shrinks dramatically
# meaning faster load times and the reloaded cache files also consume much less
# memory. This is worth any performance hit from this loops and the use of the
# intern() data storage.
# Python 3.x may behave better in this area
for h in data[0]:
data[0][h]["refs"] = self.internSet(data[0][h]["refs"])
data[0][h]["execs"] = self.internSet(data[0][h]["execs"])
for h in data[1]:
data[1][h]["execs"] = self.internSet(data[1][h]["execs"])
return
def create_cachedata(self):
data = [{}, {}]
return data
codeparsercache = CodeParserCache()
def parser_cachefile(d):
cachedir = (d.getVar("PERSISTENT_DIR", True) or
d.getVar("CACHE", True))
if cachedir in [None, '']:
return None
bb.utils.mkdirhier(cachedir)
cachefile = os.path.join(cachedir, "bb_codeparser.dat")
logger.debug(1, "Using cache in '%s' for codeparser cache", cachefile)
return cachefile
def parser_cache_init(d):
codeparsercache.init_cache(d)
global pythonparsecache
global shellparsecache
cachefile = parser_cachefile(d)
if not cachefile:
return
try:
p = pickle.Unpickler(file(cachefile, "rb"))
data, version = p.load()
except:
return
if version != PARSERCACHE_VERSION:
return
pythonparsecache = data[0]
shellparsecache = data[1]
def parser_cache_save(d):
codeparsercache.save_extras(d)
cachefile = parser_cachefile(d)
if not cachefile:
return
glf = bb.utils.lockfile(cachefile + ".lock", shared=True)
i = os.getpid()
lf = None
while not lf:
shellcache = {}
pythoncache = {}
lf = bb.utils.lockfile(cachefile + ".lock." + str(i), retry=False)
if not lf or os.path.exists(cachefile + "-" + str(i)):
if lf:
bb.utils.unlockfile(lf)
lf = None
i = i + 1
continue
shellcache = shellparsecacheextras
pythoncache = pythonparsecacheextras
p = pickle.Pickler(file(cachefile + "-" + str(i), "wb"), -1)
p.dump([[pythoncache, shellcache], PARSERCACHE_VERSION])
bb.utils.unlockfile(lf)
bb.utils.unlockfile(glf)
def internSet(items):
new = set()
for i in items:
new.add(intern(i))
return new
def parser_cache_savemerge(d):
codeparsercache.save_merge(d)
cachefile = parser_cachefile(d)
if not cachefile:
return
glf = bb.utils.lockfile(cachefile + ".lock")
try:
p = pickle.Unpickler(file(cachefile, "rb"))
data, version = p.load()
except (IOError, EOFError):
data, version = None, None
if version != PARSERCACHE_VERSION:
data = [{}, {}]
for f in [y for y in os.listdir(os.path.dirname(cachefile)) if y.startswith(os.path.basename(cachefile) + '-')]:
f = os.path.join(os.path.dirname(cachefile), f)
try:
p = pickle.Unpickler(file(f, "rb"))
extradata, version = p.load()
except (IOError, EOFError):
extradata, version = [{}, {}], None
if version != PARSERCACHE_VERSION:
continue
for h in extradata[0]:
if h not in data[0]:
data[0][h] = extradata[0][h]
for h in extradata[1]:
if h not in data[1]:
data[1][h] = extradata[1][h]
os.unlink(f)
# When the dicts are originally created, python calls intern() on the set keys
# which significantly improves memory usage. Sadly the pickle/unpickle process
# doesn't call intern() on the keys and results in the same strings being duplicated
# in memory. This also means pickle will save the same string multiple times in
# the cache file. By interning the data here, the cache file shrinks dramatically
# meaning faster load times and the reloaded cache files also consume much less
# memory. This is worth any performance hit from this loops and the use of the
# intern() data storage.
# Python 3.x may behave better in this area
for h in data[0]:
data[0][h]["refs"] = internSet(data[0][h]["refs"])
data[0][h]["execs"] = internSet(data[0][h]["execs"])
for h in data[1]:
data[1][h]["execs"] = internSet(data[1][h]["execs"])
p = pickle.Pickler(file(cachefile, "wb"), -1)
p.dump([data, PARSERCACHE_VERSION])
bb.utils.unlockfile(glf)
Logger = logging.getLoggerClass()
class BufferedLogger(Logger):
@@ -158,14 +235,14 @@ class PythonParser():
def parse_python(self, node):
h = hash(str(node))
if h in codeparsercache.pythoncache:
self.references = codeparsercache.pythoncache[h]["refs"]
self.execs = codeparsercache.pythoncache[h]["execs"]
if h in pythonparsecache:
self.references = pythonparsecache[h]["refs"]
self.execs = pythonparsecache[h]["execs"]
return
if h in codeparsercache.pythoncacheextras:
self.references = codeparsercache.pythoncacheextras[h]["refs"]
self.execs = codeparsercache.pythoncacheextras[h]["execs"]
if h in pythonparsecacheextras:
self.references = pythonparsecacheextras[h]["refs"]
self.execs = pythonparsecacheextras[h]["execs"]
return
@@ -179,9 +256,9 @@ class PythonParser():
self.references.update(self.var_references)
self.references.update(self.var_execs)
codeparsercache.pythoncacheextras[h] = {}
codeparsercache.pythoncacheextras[h]["refs"] = self.references
codeparsercache.pythoncacheextras[h]["execs"] = self.execs
pythonparsecacheextras[h] = {}
pythonparsecacheextras[h]["refs"] = self.references
pythonparsecacheextras[h]["execs"] = self.execs
class ShellParser():
def __init__(self, name, log):
@@ -199,12 +276,12 @@ class ShellParser():
h = hash(str(value))
if h in codeparsercache.shellcache:
self.execs = codeparsercache.shellcache[h]["execs"]
if h in shellparsecache:
self.execs = shellparsecache[h]["execs"]
return self.execs
if h in codeparsercache.shellcacheextras:
self.execs = codeparsercache.shellcacheextras[h]["execs"]
if h in shellparsecacheextras:
self.execs = shellparsecacheextras[h]["execs"]
return self.execs
try:
@@ -216,8 +293,8 @@ class ShellParser():
self.process_tokens(token)
self.execs = set(cmd for cmd in self.allexecs if cmd not in self.funcdefs)
codeparsercache.shellcacheextras[h] = {}
codeparsercache.shellcacheextras[h]["execs"] = self.execs
shellparsecacheextras[h] = {}
shellparsecacheextras[h]["execs"] = self.execs
return self.execs

View File

@@ -158,7 +158,6 @@ class BBCooker:
#
self.configuration.event_data = bb.data.createCopy(self.configuration.data)
bb.data.update_data(self.configuration.event_data)
bb.parse.init_parser(self.configuration.event_data)
# TOSTOP must not be set or our children will hang when they output
fd = sys.stdout.fileno()
@@ -219,12 +218,6 @@ class BBCooker:
nice = int(nice) - curnice
buildlog.verbose("Renice to %s " % os.nice(nice))
if self.status:
del self.status
self.status = bb.cache.CacheData(self.caches_array)
self.handleCollections( self.configuration.data.getVar("BBFILE_COLLECTIONS", True) )
def parseCommandLine(self):
# Parse any commandline into actions
self.commandlineAction = {'action':None, 'msg':None}
@@ -304,6 +297,8 @@ class BBCooker:
# Parse the configuration here. We need to do it explicitly here since
# this showEnvironment() code path doesn't use the cache
self.parseConfiguration()
self.status = bb.cache.CacheData(self.caches_array)
self.handleCollections( self.configuration.data.getVar("BBFILE_COLLECTIONS", True) )
fn, cls = bb.cache.Cache.virtualfn2realfn(buildfile)
fn = self.matchFile(fn)
@@ -539,15 +534,11 @@ class BBCooker:
# Prints a flattened form of package-depends below where subpackages of a package are merged into the main pn
depends_file = file('pn-depends.dot', 'w' )
buildlist_file = file('pn-buildlist', 'w' )
print("digraph depends {", file=depends_file)
for pn in depgraph["pn"]:
fn = depgraph["pn"][pn]["filename"]
version = depgraph["pn"][pn]["version"]
print('"%s" [label="%s %s\\n%s"]' % (pn, pn, version, fn), file=depends_file)
print("%s" % pn, file=buildlist_file)
buildlist_file.close()
logger.info("PN build list saved to 'pn-buildlist'")
for pn in depgraph["depends"]:
for depend in depgraph["depends"][pn]:
print('"%s" -> "%s"' % (pn, depend), file=depends_file)
@@ -994,12 +985,12 @@ class BBCooker:
"""
Find the .bb files which match the expression in 'buildfile'.
"""
if bf.startswith("/") or bf.startswith("../"):
bf = os.path.abspath(bf)
filelist, masked = self.collect_bbfiles()
try:
os.stat(bf)
bf = os.path.abspath(bf)
return [bf]
except OSError:
regexp = re.compile(bf)
@@ -1039,6 +1030,8 @@ class BBCooker:
# Parse the configuration here. We need to do it explicitly here since
# buildFile() doesn't use the cache
self.parseConfiguration()
self.status = bb.cache.CacheData(self.caches_array)
self.handleCollections( self.configuration.data.getVar("BBFILE_COLLECTIONS", True) )
# If we are told to do the None task then query the default task
if (task == None):
@@ -1060,10 +1053,6 @@ class BBCooker:
info_array = infos[fn]
except KeyError:
bb.fatal("%s does not exist" % fn)
if info_array[0].skipped:
bb.fatal("%s was skipped: %s" % (fn, info_array[0].skipreason))
self.status.add_from_recipeinfo(fn, info_array)
# Tweak some variables
@@ -1077,10 +1066,10 @@ class BBCooker:
self.status.rundeps[fn] = []
self.status.runrecs[fn] = []
# Invalidate task for target if force mode active
# Remove stamp for target if force mode active
if self.configuration.force:
logger.verbose("Invalidate task %s, %s", task, fn)
bb.parse.siggen.invalidate_task('do_%s' % task, self.status, fn)
logger.verbose("Remove stamp %s, %s", task, fn)
bb.build.del_stamp('do_%s' % task, self.status, fn)
# Setup taskdata structure
taskdata = bb.taskdata.TaskData(self.configuration.abort)
@@ -1186,18 +1175,24 @@ class BBCooker:
return
if self.state in (state.shutdown, state.stop):
self.parser.shutdown(clean=False, force = True)
self.parser.shutdown(clean=False)
sys.exit(1)
if self.state != state.parsing:
self.parseConfiguration ()
if self.status:
del self.status
self.status = bb.cache.CacheData(self.caches_array)
ignore = self.configuration.data.getVar("ASSUME_PROVIDED", True) or ""
self.status.ignored_dependencies = set(ignore.split())
for dep in self.configuration.extra_assume_provided:
self.status.ignored_dependencies.add(dep)
self.handleCollections( self.configuration.data.getVar("BBFILE_COLLECTIONS", True) )
(filelist, masked) = self.collect_bbfiles()
self.configuration.data.renameVar("__depends", "__base_depends")
@@ -1206,8 +1201,6 @@ class BBCooker:
if not self.parser.parse_next():
collectlog.debug(1, "parsing complete")
if self.parser.error:
sys.exit(1)
self.show_appends_with_no_recipes()
self.buildDepgraph()
self.state = state.running
@@ -1577,7 +1570,6 @@ class CookerParser(object):
def init():
Parser.cfg = self.cfgdata
multiprocessing.util.Finalize(None, bb.codeparser.parser_cache_save, args=(self.cfgdata,), exitpriority=1)
multiprocessing.util.Finalize(None, bb.fetch.fetcher_parse_save, args=(self.cfgdata,), exitpriority=1)
self.feeder_quit = multiprocessing.Queue(maxsize=1)
self.parser_quit = multiprocessing.Queue(maxsize=self.num_processes)
@@ -1604,7 +1596,6 @@ class CookerParser(object):
self.skipped, self.masked,
self.virtuals, self.error,
self.total)
bb.event.fire(event, self.cfgdata)
self.feeder_quit.put(None)
for process in self.processes:
@@ -1617,20 +1608,16 @@ class CookerParser(object):
self.parser_quit.put(None)
self.jobs.cancel_join_thread()
sys.exit(1)
for process in self.processes:
if force:
process.join(.1)
process.terminate()
else:
process.join()
process.join()
self.feeder.join()
sync = threading.Thread(target=self.bb_cache.sync)
sync.start()
multiprocessing.util.Finalize(None, sync.join, exitpriority=-100)
bb.codeparser.parser_cache_savemerge(self.cooker.configuration.data)
bb.fetch.fetcher_parse_done(self.cooker.configuration.data)
def load_cached(self):
for filename, appends in self.fromcache:
@@ -1654,47 +1641,26 @@ class CookerParser(object):
yield result
def parse_next(self):
result = []
parsed = None
try:
parsed, result = self.results.next()
except StopIteration:
self.shutdown()
return False
except bb.BBHandledException as exc:
self.error += 1
logger.error('Failed to parse recipe: %s' % exc.recipe)
self.shutdown(clean=False)
return False
except ParsingFailure as exc:
self.error += 1
logger.error('Unable to parse %s: %s' %
(exc.recipe, bb.exceptions.to_string(exc.realexception)))
self.shutdown(clean=False)
return False
except bb.parse.ParseError as exc:
self.error += 1
except (bb.parse.ParseError, bb.data_smart.ExpansionError) as exc:
logger.error(str(exc))
self.shutdown(clean=False)
return False
except bb.data_smart.ExpansionError as exc:
self.error += 1
_, value, _ = sys.exc_info()
logger.error('ExpansionError during parsing %s: %s', value.recipe, str(exc))
self.shutdown(clean=False)
return False
except SyntaxError as exc:
self.error += 1
logger.error('Unable to parse %s', exc.recipe)
self.shutdown(clean=False)
return False
except Exception as exc:
self.error += 1
etype, value, tb = sys.exc_info()
logger.error('Unable to parse %s', value.recipe,
exc_info=(etype, value, exc.traceback))
self.shutdown(clean=False)
return False
self.current += 1
self.virtuals += len(result)

View File

@@ -279,20 +279,13 @@ def build_dependencies(key, keys, shelldeps, vardepvals, d):
deps = set()
vardeps = d.getVarFlag(key, "vardeps", True)
try:
if key[-1] == ']':
vf = key[:-1].split('[')
value = d.getVarFlag(vf[0], vf[1], False)
else:
value = d.getVar(key, False)
value = d.getVar(key, False)
if key in vardepvals:
value = d.getVarFlag(key, "vardepvalue", True)
elif d.getVarFlag(key, "func"):
if d.getVarFlag(key, "python"):
parsedvar = d.expandWithRefs(value, key)
parser = bb.codeparser.PythonParser(key, logger)
if parsedvar.value and "\t" in parsedvar.value:
logger.warn("Variable %s contains tabs, please remove these (%s)" % (key, d.getVar("FILE", True)))
parser.parse_python(parsedvar.value)
deps = deps | parser.references
else:
@@ -308,19 +301,6 @@ def build_dependencies(key, keys, shelldeps, vardepvals, d):
parser = d.expandWithRefs(value, key)
deps |= parser.references
deps = deps | (keys & parser.execs)
# Add varflags, assuming an exclusion list is set
varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS', True)
if varflagsexcl:
varfdeps = []
varflags = d.getVarFlags(key)
if varflags:
for f in varflags:
if f not in varflagsexcl:
varfdeps.append('%s[%s]' % (key, f))
if varfdeps:
deps |= set(varfdeps)
deps |= set((vardeps or "").split())
deps -= set((d.getVarFlag(key, "vardepsexclude", True) or "").split())
except:

View File

@@ -39,7 +39,7 @@ from bb.COW import COWDictBase
logger = logging.getLogger("BitBake.Data")
__setvar_keyword__ = ["_append", "_prepend"]
__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend)(_(?P<add>.*))?$')
__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend)(_(?P<add>.*))?')
__expand_var_regexp__ = re.compile(r"\${[^{}]+}")
__expand_python_regexp__ = re.compile(r"\${@.+?}")
@@ -102,10 +102,7 @@ class ExpansionError(Exception):
self.expression = expression
self.variablename = varname
self.exception = exception
if varname:
self.msg = "Failure expanding variable %s, expression was %s which triggered exception %s: %s" % (varname, expression, type(exception).__name__, exception)
else:
self.msg = "Failure expanding expression %s which triggered exception %s: %s" % (expression, type(exception).__name__, exception)
self.msg = "Failure expanding variable %s, expression was %s which triggered exception %s: %s" % (varname, expression, type(exception).__name__, exception)
Exception.__init__(self, self.msg)
self.args = (varname, expression, exception)
def __str__(self):
@@ -198,12 +195,7 @@ class DataSmart(MutableMapping):
for append in appends:
keep = []
for (a, o) in self.getVarFlag(append, op) or []:
match = True
if o:
for o2 in o.split("_"):
if not o2 in overrides:
match = False
if not match:
if o and not o in overrides:
keep.append((a ,o))
continue
@@ -280,10 +272,10 @@ class DataSmart(MutableMapping):
self._seen_overrides[override].add( var )
# setting var
self.dict[var]["_content"] = value
self.dict[var]["content"] = value
def getVar(self, var, expand=False, noweakdefault=False):
value = self.getVarFlag(var, "_content", False, noweakdefault)
value = self.getVarFlag(var, "content", False, noweakdefault)
# Call expand() separately to make use of the expand cache
if expand and value:
@@ -340,7 +332,7 @@ class DataSmart(MutableMapping):
if local_var:
if flag in local_var:
value = copy.copy(local_var[flag])
elif flag == "_content" and "defaultval" in local_var and not noweakdefault:
elif flag == "content" and "defaultval" in local_var and not noweakdefault:
value = copy.copy(local_var["defaultval"])
if expand and value:
value = self.expand(value, None)
@@ -369,7 +361,7 @@ class DataSmart(MutableMapping):
self._makeShadowCopy(var)
for i in flags:
if i == "_content":
if i == "content":
continue
self.dict[var][i] = flags[i]
@@ -379,7 +371,7 @@ class DataSmart(MutableMapping):
if local_var:
for i in local_var:
if i.startswith("_"):
if i == "content":
continue
flags[i] = local_var[i]
@@ -396,10 +388,10 @@ class DataSmart(MutableMapping):
content = None
# try to save the content
if "_content" in self.dict[var]:
content = self.dict[var]["_content"]
if "content" in self.dict[var]:
content = self.dict[var]["content"]
self.dict[var] = {}
self.dict[var]["_content"] = content
self.dict[var]["content"] = content
else:
del self.dict[var]

View File

@@ -175,7 +175,7 @@ def register(name, handler):
_handlers[name] = noop
return
env = {}
bb.utils.better_exec(code, env)
bb.utils.simple_exec(code, env)
func = bb.utils.better_eval(name, env)
_handlers[name] = func
else:
@@ -312,14 +312,6 @@ class BuildCompleted(BuildBase, OperationCompleted):
OperationCompleted.__init__(self, total, "Building Failed")
BuildBase.__init__(self, n, p, failures)
class DiskFull(Event):
"""Disk full case build aborted"""
def __init__(self, dev, type, freespace, mountpoint):
Event.__init__(self)
self._dev = dev
self._type = type
self._free = freespace
self._mountpoint = mountpoint
class NoProvider(Event):
"""No Provider for an Event"""
@@ -525,21 +517,3 @@ class PackageInfo(Event):
def __init__(self, pkginfolist):
Event.__init__(self)
self._pkginfolist = pkginfolist
class SanityCheck(Event):
"""
Event to issue sanity check
"""
class SanityCheckPassed(Event):
"""
Event to indicate sanity check is passed
"""
class SanityCheckFailed(Event):
"""
Event to indicate sanity check has failed
"""
def __init__(self, msg):
Event.__init__(self)
self._msg = msg

View File

@@ -32,14 +32,7 @@ class TracebackEntry(namedtuple.abc):
def _get_frame_args(frame):
"""Get the formatted arguments and class (if available) for a frame"""
arginfo = inspect.getargvalues(frame)
try:
if not arginfo.args:
return '', None
# There have been reports from the field of python 2.6 which doesn't
# return a namedtuple here but simply a tuple so fallback gracefully if
# args isn't present.
except AttributeError:
if not arginfo.args:
return '', None
firstarg = arginfo.args[0]

View File

@@ -8,7 +8,6 @@ BitBake build tools.
"""
# Copyright (C) 2003, 2004 Chris Larson
# Copyright (C) 2012 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
@@ -29,13 +28,10 @@ from __future__ import absolute_import
from __future__ import print_function
import os, re
import logging
import urllib
import bb.persist_data, bb.utils
import bb.checksum
from bb import data
__version__ = "2"
_checksum_cache = bb.checksum.FileChecksumCache()
logger = logging.getLogger("BitBake.Fetcher")
@@ -67,12 +63,6 @@ class FetchError(BBFetchException):
BBFetchException.__init__(self, msg)
self.args = (message, url)
class ChecksumError(FetchError):
"""Exception when mismatched checksum encountered"""
class NoChecksumError(FetchError):
"""Exception when no checksum is specified, but BB_STRICT_CHECKSUM is set"""
class UnpackError(BBFetchException):
"""General fetcher exception when something happens incorrectly when unpacking"""
def __init__(self, message, url):
@@ -109,15 +99,12 @@ class ParameterError(BBFetchException):
class NetworkAccess(BBFetchException):
"""Exception raised when network access is disabled but it is required."""
def __init__(self, url, cmd):
msg = "Network access disabled through BB_NO_NETWORK but access requested with command %s (for url %s)" % (cmd, url)
msg = "Network access disabled through BB_NO_NETWORK but access rquested with command %s (for url %s)" % (cmd, url)
self.url = url
self.cmd = cmd
BBFetchException.__init__(self, msg)
self.args = (url, cmd)
class NonLocalMethod(Exception):
def __init__(self):
Exception.__init__(self)
def decodeurl(url):
"""Decodes an URL into the tokens (scheme, network location, path,
@@ -157,14 +144,14 @@ def decodeurl(url):
s1, s2 = s.split('=')
p[s1] = s2
return type, host, urllib.unquote(path), user, pswd, p
return (type, host, path, user, pswd, p)
def encodeurl(decoded):
"""Encodes a URL from tokens (scheme, network location, path,
user, password, parameters).
"""
type, host, path, user, pswd, p = decoded
(type, host, path, user, pswd, p) = decoded
if not path:
raise MissingParameterError('path', "encoded from the data %s" % str(decoded))
@@ -178,64 +165,43 @@ def encodeurl(decoded):
url += "@"
if host and type != "file":
url += "%s" % host
url += "%s" % urllib.quote(path)
url += "%s" % path
if p:
for parm in p:
url += ";%s=%s" % (parm, p[parm])
return url
def uri_replace(ud, uri_find, uri_replace, replacements, d):
def uri_replace(ud, uri_find, uri_replace, d):
if not ud.url or not uri_find or not uri_replace:
logger.error("uri_replace: passed an undefined value, not replacing")
return None
logger.debug(1, "uri_replace: passed an undefined value, not replacing")
uri_decoded = list(decodeurl(ud.url))
uri_find_decoded = list(decodeurl(uri_find))
uri_replace_decoded = list(decodeurl(uri_replace))
logger.debug(2, "For url %s comparing %s to %s" % (uri_decoded, uri_find_decoded, uri_replace_decoded))
result_decoded = ['', '', '', '', '', {}]
for loc, i in enumerate(uri_find_decoded):
for i in uri_find_decoded:
loc = uri_find_decoded.index(i)
result_decoded[loc] = uri_decoded[loc]
regexp = i
if loc == 0 and regexp and not regexp.endswith("$"):
# Leaving the type unanchored can mean "https" matching "file" can become "files"
# which is clearly undesirable.
regexp += "$"
if loc == 5:
# Handle URL parameters
if i:
# Any specified URL parameters must match
for k in uri_replace_decoded[loc]:
if uri_decoded[loc][k] != uri_replace_decoded[loc][k]:
return None
# Overwrite any specified replacement parameters
for k in uri_replace_decoded[loc]:
result_decoded[loc][k] = uri_replace_decoded[loc][k]
elif (re.match(regexp, uri_decoded[loc])):
if not uri_replace_decoded[loc]:
result_decoded[loc] = ""
if isinstance(i, basestring):
if (re.match(i, uri_decoded[loc])):
if not uri_replace_decoded[loc]:
result_decoded[loc] = ""
else:
result_decoded[loc] = re.sub(i, uri_replace_decoded[loc], uri_decoded[loc])
if uri_find_decoded.index(i) == 2:
basename = None
if ud.mirrortarball:
basename = os.path.basename(ud.mirrortarball)
elif ud.localpath:
basename = os.path.basename(ud.localpath)
if basename and result_decoded[loc].endswith("/"):
result_decoded[loc] = os.path.dirname(result_decoded[loc])
if basename and not result_decoded[loc].endswith(basename):
result_decoded[loc] = os.path.join(result_decoded[loc], basename)
else:
for k in replacements:
uri_replace_decoded[loc] = uri_replace_decoded[loc].replace(k, replacements[k])
#bb.note("%s %s %s" % (regexp, uri_replace_decoded[loc], uri_decoded[loc]))
result_decoded[loc] = re.sub(regexp, uri_replace_decoded[loc], uri_decoded[loc])
if loc == 2:
# Handle path manipulations
basename = None
if uri_decoded[0] != uri_replace_decoded[0] and ud.mirrortarball:
# If the source and destination url types differ, must be a mirrortarball mapping
basename = os.path.basename(ud.mirrortarball)
# Kill parameters, they make no sense for mirror tarballs
uri_decoded[5] = {}
elif ud.localpath and ud.method.supports_checksum(ud):
basename = os.path.basename(ud.localpath)
if basename and not result_decoded[loc].endswith(basename):
result_decoded[loc] = os.path.join(result_decoded[loc], basename)
else:
return None
return ud.url
result = encodeurl(result_decoded)
if result == ud.url:
return None
logger.debug(2, "For url %s returning %s" % (ud.url, result))
return result
@@ -263,18 +229,10 @@ def fetcher_init(d):
else:
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
_checksum_cache.init_cache(d)
for m in methods:
if hasattr(m, "init"):
m.init(d)
def fetcher_parse_save(d):
_checksum_cache.save_extras(d)
def fetcher_parse_done(d):
_checksum_cache.save_merge(d)
def fetcher_compare_revisions(d):
"""
Compare the revisions in the persistant cache with current values and
@@ -301,37 +259,39 @@ def verify_checksum(u, ud, d):
"""
verify the MD5 and SHA256 checksum for downloaded src
Raises a FetchError if one or both of the SRC_URI checksums do not match
the downloaded file, or if BB_STRICT_CHECKSUM is set and there are no
checksums specified.
return value:
- True: a checksum matched
- False: neither checksum matched
if checksum is missing in recipes file, "BB_STRICT_CHECKSUM" decide the return value.
if BB_STRICT_CHECKSUM = "1" then return false as unmatched, otherwise return true as
matched
"""
if not ud.method.supports_checksum(ud):
if not ud.type in ["http", "https", "ftp", "ftps"]:
return
md5data = bb.utils.md5_file(ud.localpath)
sha256data = bb.utils.sha256_file(ud.localpath)
if ud.method.recommends_checksum(ud):
# If strict checking enabled and neither sum defined, raise error
strict = d.getVar("BB_STRICT_CHECKSUM", True) or None
if (strict and ud.md5_expected == None and ud.sha256_expected == None):
raise NoChecksumError('No checksum specified for %s, please add at least one to the recipe:\n'
'SRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"' %
(ud.localpath, ud.md5_name, md5data,
ud.sha256_name, sha256data), u)
# If strict checking enabled and neither sum defined, raise error
strict = d.getVar("BB_STRICT_CHECKSUM", True) or None
if (strict and ud.md5_expected == None and ud.sha256_expected == None):
raise FetchError('No checksum specified for %s, please add at least one to the recipe:\n'
'SRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"' %
(ud.localpath, ud.md5_name, md5data,
ud.sha256_name, sha256data), u)
# Log missing sums so user can more easily add them
if ud.md5_expected == None:
logger.warn('Missing md5 SRC_URI checksum for %s, consider adding to the recipe:\n'
'SRC_URI[%s] = "%s"',
ud.localpath, ud.md5_name, md5data)
# Log missing sums so user can more easily add them
if ud.md5_expected == None:
logger.warn('Missing md5 SRC_URI checksum for %s, consider adding to the recipe:\n'
'SRC_URI[%s] = "%s"',
ud.localpath, ud.md5_name, md5data)
if ud.sha256_expected == None:
logger.warn('Missing sha256 SRC_URI checksum for %s, consider adding to the recipe:\n'
'SRC_URI[%s] = "%s"',
ud.localpath, ud.sha256_name, sha256data)
if ud.sha256_expected == None:
logger.warn('Missing sha256 SRC_URI checksum for %s, consider adding to the recipe:\n'
'SRC_URI[%s] = "%s"',
ud.localpath, ud.sha256_name, sha256data)
md5mismatch = False
sha256mismatch = False
@@ -345,20 +305,14 @@ def verify_checksum(u, ud, d):
# We want to alert the user if a checksum is defined in the recipe but
# it does not match.
msg = ""
mismatch = False
if md5mismatch and ud.md5_expected:
msg = msg + "\nFile: '%s' has %s checksum %s when %s was expected" % (ud.localpath, 'md5', md5data, ud.md5_expected)
mismatch = True;
if sha256mismatch and ud.sha256_expected:
msg = msg + "\nFile: '%s' has %s checksum %s when %s was expected" % (ud.localpath, 'sha256', sha256data, ud.sha256_expected)
mismatch = True;
if mismatch:
msg = msg + '\nYour checksums:\nSRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"' % (ud.md5_name, md5data, ud.sha256_name, sha256data)
if len(msg):
raise ChecksumError('Checksum mismatch!%s' % msg, u)
raise FetchError('Checksum mismatch!%s' % msg, u)
def update_stamp(u, ud, d):
@@ -467,10 +421,11 @@ def runfetchcmd(cmd, d, quiet = False, cleanup = []):
success = True
except bb.process.NotFoundError as e:
error_message = "Fetch command %s" % (e.command)
except bb.process.ExecutionError as e:
error_message = "Fetch command %s failed with exit code %s, output:\nSTDOUT: %s\nSTDERR: %s" % (e.command, e.exitcode, e.stdout, e.stderr)
except bb.process.CmdError as e:
error_message = "Fetch command %s could not be run:\n%s" % (e.command, e.msg)
except bb.process.ExecutionError as e:
error_message = "Fetch command %s failed with exit code %s, output:\n%s" % (e.command, e.exitcode, e.stderr)
if not success:
for f in cleanup:
try:
@@ -491,107 +446,6 @@ def check_network_access(d, info = "", url = None):
else:
logger.debug(1, "Fetcher accessed the network with the command %s" % info)
def build_mirroruris(origud, mirrors, ld):
uris = []
uds = []
replacements = {}
replacements["TYPE"] = origud.type
replacements["HOST"] = origud.host
replacements["PATH"] = origud.path
replacements["BASENAME"] = origud.path.split("/")[-1]
replacements["MIRRORNAME"] = origud.host.replace(':','.') + origud.path.replace('/', '.').replace('*', '.')
def adduri(uri, ud, uris, uds):
for line in mirrors:
try:
(find, replace) = line
except ValueError:
continue
newuri = uri_replace(ud, find, replace, replacements, ld)
if not newuri or newuri in uris or newuri == origud.url:
continue
try:
newud = FetchData(newuri, ld)
newud.setup_localpath(ld)
except bb.fetch2.BBFetchException as e:
logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url))
logger.debug(1, str(e))
try:
ud.method.clean(ud, ld)
except UnboundLocalError:
pass
continue
uris.append(newuri)
uds.append(newud)
adduri(newuri, newud, uris, uds)
adduri(None, origud, uris, uds)
return uris, uds
def try_mirror_url(newuri, origud, ud, ld, check = False):
# Return of None or a value means we're finished
# False means try another url
try:
if check:
found = ud.method.checkstatus(newuri, ud, ld)
if found:
return found
return False
os.chdir(ld.getVar("DL_DIR", True))
if not os.path.exists(ud.donestamp) or ud.method.need_update(newuri, ud, ld):
ud.method.download(newuri, ud, ld)
if hasattr(ud.method,"build_mirror_data"):
ud.method.build_mirror_data(newuri, ud, ld)
if not ud.localpath or not os.path.exists(ud.localpath):
return False
if ud.localpath == origud.localpath:
return ud.localpath
# We may be obtaining a mirror tarball which needs further processing by the real fetcher
# If that tarball is a local file:// we need to provide a symlink to it
dldir = ld.getVar("DL_DIR", True)
if origud.mirrortarball and os.path.basename(ud.localpath) == os.path.basename(origud.mirrortarball) \
and os.path.basename(ud.localpath) != os.path.basename(origud.localpath):
open(ud.donestamp, 'w').close()
dest = os.path.join(dldir, os.path.basename(ud.localpath))
if not os.path.exists(dest):
os.symlink(ud.localpath, dest)
return None
# Otherwise the result is a local file:// and we symlink to it
if not os.path.exists(origud.localpath):
if os.path.islink(origud.localpath):
# Broken symbolic link
os.unlink(origud.localpath)
os.symlink(ud.localpath, origud.localpath)
update_stamp(newuri, origud, ld)
return ud.localpath
except bb.fetch2.NetworkAccess:
raise
except bb.fetch2.BBFetchException as e:
if isinstance(e, ChecksumError):
logger.warn("Mirror checksum failure for url %s (original url: %s)\nCleaning and trying again." % (newuri, origud.url))
logger.warn(str(e))
elif isinstance(e, NoChecksumError):
raise
else:
logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url))
logger.debug(1, str(e))
try:
ud.method.clean(ud, ld)
except UnboundLocalError:
pass
return False
def try_mirrors(d, origud, mirrors, check = False):
"""
Try to use a mirrored version of the sources.
@@ -602,13 +456,62 @@ def try_mirrors(d, origud, mirrors, check = False):
mirrors is the list of mirrors we're going to try
"""
ld = d.createCopy()
for line in mirrors:
try:
(find, replace) = line
except ValueError:
continue
newuri = uri_replace(origud, find, replace, ld)
if newuri == origud.url:
continue
try:
ud = FetchData(newuri, ld)
ud.setup_localpath(ld)
uris, uds = build_mirroruris(origud, mirrors, ld)
if check:
found = ud.method.checkstatus(newuri, ud, ld)
if found:
return found
continue
for index, uri in enumerate(uris):
ret = try_mirror_url(uri, origud, uds[index], ld, check)
if ret != False:
return ret
if not os.path.exists(ud.donestamp) or ud.method.need_update(newuri, ud, ld):
ud.method.download(newuri, ud, ld)
if os.path.exists(ud.localpath):
open(ud.donestamp, 'w').close()
if hasattr(ud.method,"build_mirror_data"):
ud.method.build_mirror_data(newuri, ud, ld)
if not ud.localpath or not os.path.exists(ud.localpath):
continue
if ud.localpath == origud.localpath:
return ud.localpath
# We may be obtaining a mirror tarball which needs further processing by the real fetcher
# If that tarball is a local file:// we need to provide a symlink to it
dldir = ld.getVar("DL_DIR", True)
if os.path.basename(ud.localpath) != os.path.basename(origud.localpath):
dest = os.path.join(dldir, os.path.basename(ud.localpath))
if not os.path.exists(dest):
os.symlink(ud.localpath, dest)
return None
# Otherwise the result is a local file:// and we symlink to it
if not os.path.exists(origud.localpath):
os.symlink(ud.localpath, origud.localpath)
return ud.localpath
except bb.fetch2.NetworkAccess:
raise
except bb.fetch2.BBFetchException as e:
logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url))
logger.debug(1, str(e))
try:
if os.path.isfile(ud.localpath):
bb.utils.remove(ud.localpath)
except UnboundLocalError:
pass
continue
return None
def srcrev_internal_helper(ud, d, name):
@@ -642,85 +545,11 @@ def srcrev_internal_helper(ud, d, name):
return rev
def get_checksum_file_list(d):
""" Get a list of files checksum in SRC_URI
Returns the resolved local paths of all local file entries in
SRC_URI as a space-separated string
"""
fetch = Fetch([], d, cache = False, localonly = True)
dl_dir = d.getVar('DL_DIR', True)
filelist = []
for u in fetch.urls:
ud = fetch.ud[u]
if ud and isinstance(ud.method, local.Local):
ud.setup_localpath(d)
f = ud.localpath
if f.startswith(dl_dir):
# The local fetcher's behaviour is to return a path under DL_DIR if it couldn't find the file anywhere else
if os.path.exists(f):
bb.warn("Getting checksum for %s SRC_URI entry %s: file not found except in DL_DIR" % (d.getVar('PN', True), os.path.basename(f)))
else:
bb.warn("Unable to get checksum for %s SRC_URI entry %s: file could not be found" % (d.getVar('PN', True), os.path.basename(f)))
continue
filelist.append(f)
return " ".join(filelist)
def get_file_checksums(filelist, pn):
"""Get a list of the checksums for a list of local files
Returns the checksums for a list of local files, caching the results as
it proceeds
"""
def checksum_file(f):
try:
checksum = _checksum_cache.get_checksum(f)
except OSError as e:
import traceback
bb.warn("Unable to get checksum for %s SRC_URI entry %s: %s" % (pn, os.path.basename(f), e))
return None
return checksum
checksums = []
for pth in filelist.split():
checksum = None
if '*' in pth:
# Handle globs
import glob
for f in glob.glob(pth):
checksum = checksum_file(f)
if checksum:
checksums.append((f, checksum))
elif os.path.isdir(pth):
# Handle directories
for root, dirs, files in os.walk(pth):
for name in files:
fullpth = os.path.join(root, name)
checksum = checksum_file(fullpth)
if checksum:
checksums.append((fullpth, checksum))
else:
checksum = checksum_file(pth)
if checksum:
checksums.append((pth, checksum))
checksums.sort()
return checksums
class FetchData(object):
"""
A class which represents the fetcher state for a given URI.
"""
def __init__(self, url, d, localonly = False):
def __init__(self, url, d):
# localpath is the location of a downloaded result. If not set, the file is local.
self.donestamp = None
self.localfile = ""
@@ -745,14 +574,10 @@ class FetchData(object):
self.sha256_name = "sha256sum"
if self.md5_name in self.parm:
self.md5_expected = self.parm[self.md5_name]
elif self.type not in ["http", "https", "ftp", "ftps"]:
self.md5_expected = None
else:
self.md5_expected = d.getVarFlag("SRC_URI", self.md5_name)
if self.sha256_name in self.parm:
self.sha256_expected = self.parm[self.sha256_name]
elif self.type not in ["http", "https", "ftp", "ftps"]:
self.sha256_expected = None
else:
self.sha256_expected = d.getVarFlag("SRC_URI", self.sha256_name)
@@ -767,13 +592,6 @@ class FetchData(object):
if not self.method:
raise NoMethodError(url)
if localonly and not isinstance(self.method, local.Local):
raise NonLocalMethod()
if self.parm.get("proto", None) and "protocol" not in self.parm:
logger.warn('Consider updating %s recipe to use "protocol" not "proto" in SRC_URI.', d.getVar('PN', True))
self.parm["protocol"] = self.parm.get("proto", None)
if hasattr(self.method, "urldata_init"):
self.method.urldata_init(self, d)
@@ -838,26 +656,6 @@ class FetchMethod(object):
"""
return os.path.join(data.getVar("DL_DIR", d, True), urldata.localfile)
def supports_checksum(self, urldata):
"""
Is localpath something that can be represented by a checksum?
"""
# We cannot compute checksums for directories
if os.path.isdir(urldata.localpath) == True:
return False
if urldata.localpath.find("*") != -1:
return False
return True
def recommends_checksum(self, urldata):
"""
Is the backend on where checksumming is recommended (should warnings
by displayed if there is no checksum)?
"""
return False
def _strip_leading_slashes(self, relpath):
"""
Remove leading slash as os.path.join can't cope
@@ -908,7 +706,7 @@ class FetchMethod(object):
dots = file.split(".")
if dots[-1] in ['gz', 'bz2', 'Z']:
efile = os.path.join(rootdir, os.path.basename('.'.join(dots[0:-1])))
efile = os.path.join(data.getVar('WORKDIR', True),os.path.basename('.'.join(dots[0:-1])))
else:
efile = file
cmd = None
@@ -984,9 +782,7 @@ class FetchMethod(object):
bb.utils.mkdirhier(newdir)
os.chdir(newdir)
path = data.getVar('PATH', True)
if path:
cmd = "PATH=\"%s\" %s" % (path, cmd)
cmd = "PATH=\"%s\" %s" % (data.getVar('PATH', True), cmd)
bb.note("Unpacking %s to %s/" % (file, os.getcwd()))
ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True)
@@ -1097,10 +893,7 @@ class FetchMethod(object):
return "%s-%s" % (key, d.getVar("PN", True) or "")
class Fetch(object):
def __init__(self, urls, d, cache = True, localonly = False):
if localonly and cache:
raise Exception("bb.fetch2.Fetch.__init__: cannot set cache and localonly at same time")
def __init__(self, urls, d, cache = True):
if len(urls) == 0:
urls = d.getVar("SRC_URI", True).split()
self.urls = urls
@@ -1108,19 +901,14 @@ class Fetch(object):
self.ud = {}
fn = d.getVar('FILE', True)
if cache and fn and fn in urldata_cache:
if cache and fn in urldata_cache:
self.ud = urldata_cache[fn]
for url in urls:
if url not in self.ud:
try:
self.ud[url] = FetchData(url, d, localonly)
except NonLocalMethod:
if localonly:
self.ud[url] = None
pass
self.ud[url] = FetchData(url, d)
if fn and cache:
if cache:
urldata_cache[fn] = self.ud
def localpath(self, url):
@@ -1174,8 +962,6 @@ class Fetch(object):
if premirroronly:
self.d.setVar("BB_NO_NETWORK", "1")
os.chdir(self.d.getVar("DL_DIR", True))
firsterr = None
if not localpath and ((not os.path.exists(ud.donestamp)) or m.need_update(u, ud, self.d)):
try:
@@ -1192,16 +978,12 @@ class Fetch(object):
raise
except BBFetchException as e:
if isinstance(e, ChecksumError):
logger.warn("Checksum error encountered with download (will attempt other sources): %s" % str(e))
if isinstance(e, NoChecksumError):
raise
else:
logger.warn('Failed to fetch URL %s, attempting MIRRORS if available' % u)
logger.debug(1, str(e))
logger.warn('Failed to fetch URL %s' % u)
logger.debug(1, str(e))
firsterr = e
# Remove any incomplete fetch
m.clean(ud, self.d)
if os.path.isfile(ud.localpath):
bb.utils.remove(ud.localpath)
logger.debug(1, "Trying MIRRORS")
mirrors = mirror_from_string(self.d.getVar('MIRRORS', True))
localpath = try_mirrors (self.d, ud, mirrors)
@@ -1213,11 +995,6 @@ class Fetch(object):
update_stamp(u, ud, self.d)
except BBFetchException as e:
if isinstance(e, NoChecksumError):
logger.error("%s" % str(e))
raise
finally:
bb.utils.unlockfile(lf)
@@ -1244,7 +1021,7 @@ class Fetch(object):
except:
# Finally, try checking uri, u, from MIRRORS
mirrors = mirror_from_string(self.d.getVar('MIRRORS', True))
ret = try_mirrors(self.d, ud, mirrors, True)
ret = try_mirrors (self.d, ud, mirrors, True)
if not ret:
raise FetchError("URL %s doesn't work" % u, u)

View File

@@ -60,7 +60,7 @@ class Bzr(FetchMethod):
basecmd = data.expand('${FETCHCMD_bzr}', d)
proto = ud.parm.get('protocol', 'http')
proto = ud.parm.get('proto', 'http')
bzrroot = ud.host + ud.path
@@ -73,7 +73,7 @@ class Bzr(FetchMethod):
options.append("-r %s" % ud.revision)
if command == "fetch":
bzrcmd = "%s branch %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot)
bzrcmd = "%s co %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot)
elif command == "update":
bzrcmd = "%s pull %s --overwrite" % (basecmd, " ".join(options))
else:

View File

@@ -111,9 +111,15 @@ class Cvs(FetchMethod):
if ud.tag:
options.append("-r %s" % ud.tag)
cvsbasecmd = d.getVar("FETCHCMD_cvs", True)
cvscmd = cvsbasecmd + "'-d" + cvsroot + "' co " + " ".join(options) + " " + ud.module
cvsupdatecmd = cvsbasecmd + "'-d" + cvsroot + "' update -d -P " + " ".join(options)
localdata = data.createCopy(d)
data.setVar('OVERRIDES', "cvs:%s" % data.getVar('OVERRIDES', localdata), localdata)
data.update_data(localdata)
data.setVar('CVSROOT', cvsroot, localdata)
data.setVar('CVSCOOPTS', " ".join(options), localdata)
data.setVar('CVSMODULE', ud.module, localdata)
cvscmd = data.getVar('FETCHCOMMAND', localdata, True)
cvsupdatecmd = data.getVar('UPDATECOMMAND', localdata, True)
if cvs_rsh:
cvscmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvscmd)

View File

@@ -82,9 +82,6 @@ class Git(FetchMethod):
"""
return ud.type in ['git']
def supports_checksum(self, urldata):
return False
def urldata_init(self, ud, d):
"""
init git specific variable within url data
@@ -126,11 +123,10 @@ class Git(FetchMethod):
for name in ud.names:
# Ensure anything that doesn't look like a sha256 checksum/revision is translated into one
if not ud.revisions[name] or len(ud.revisions[name]) != 40 or (False in [c in "abcdef0123456789" for c in ud.revisions[name]]):
if ud.revisions[name]:
ud.branches[name] = ud.revisions[name]
ud.branches[name] = ud.revisions[name]
ud.revisions[name] = self.latest_revision(ud.url, ud, d, name)
gitsrcname = '%s%s' % (ud.host.replace(':','.'), ud.path.replace('/', '.').replace('*', '.'))
gitsrcname = '%s%s' % (ud.host.replace(':','.'), ud.path.replace('/', '.'))
# for rebaseable git repo, it is necessary to keep mirror tar ball
# per revision, so that even the revision disappears from the
# upstream repo in the future, the mirror will remain intact and still
@@ -139,9 +135,8 @@ class Git(FetchMethod):
for name in ud.names:
gitsrcname = gitsrcname + '_' + ud.revisions[name]
ud.mirrortarball = 'git2_%s.tar.gz' % (gitsrcname)
ud.fullmirror = os.path.join(d.getVar("DL_DIR", True), ud.mirrortarball)
gitdir = d.getVar("GITDIR", True) or (d.getVar("DL_DIR", True) + "/git2/")
ud.clonedir = os.path.join(gitdir, gitsrcname)
ud.fullmirror = os.path.join(data.getVar("DL_DIR", d, True), ud.mirrortarball)
ud.clonedir = os.path.join(data.expand('${GITDIR}', d), gitsrcname)
ud.localfile = ud.clonedir
@@ -188,12 +183,8 @@ class Git(FetchMethod):
# If the repo still doesn't exist, fallback to cloning it
if not os.path.exists(ud.clonedir):
# We do this since git will use a "-l" option automatically for local urls where possible
if repourl.startswith("file://"):
repourl = repourl[7:]
clone_cmd = "%s clone --bare --mirror %s %s" % (ud.basecmd, repourl, ud.clonedir)
if ud.proto.lower() != 'file':
bb.fetch2.check_network_access(d, clone_cmd)
bb.fetch2.check_network_access(d, clone_cmd)
runfetchcmd(clone_cmd, d)
os.chdir(ud.clonedir)
@@ -204,14 +195,14 @@ class Git(FetchMethod):
needupdate = True
if needupdate:
try:
runfetchcmd("%s remote prune origin" % ud.basecmd, d)
runfetchcmd("%s remote rm origin" % ud.basecmd, d)
except bb.fetch2.FetchError:
logger.debug(1, "No Origin")
runfetchcmd("%s remote add --mirror=fetch origin %s" % (ud.basecmd, repourl), d)
fetch_cmd = "%s fetch -f --prune %s refs/*:refs/*" % (ud.basecmd, repourl)
if ud.proto.lower() != 'file':
bb.fetch2.check_network_access(d, fetch_cmd, ud.url)
bb.fetch2.check_network_access(d, fetch_cmd, ud.url)
runfetchcmd(fetch_cmd, d)
runfetchcmd("%s prune-packed" % ud.basecmd, d)
runfetchcmd("%s pack-redundant --all | xargs -r rm" % ud.basecmd, d)
@@ -245,22 +236,7 @@ class Git(FetchMethod):
if ud.bareclone:
cloneflags += " --mirror"
# Versions of git prior to 1.7.9.2 have issues where foo.git and foo get confused
# and you end up with some horrible union of the two when you attempt to clone it
# The least invasive workaround seems to be a symlink to the real directory to
# fool git into ignoring any .git version that may also be present.
#
# The issue is fixed in more recent versions of git so we can drop this hack in future
# when that version becomes common enough.
clonedir = ud.clonedir
if not ud.path.endswith(".git"):
indirectiondir = destdir[:-1] + ".indirectionsymlink"
if os.path.exists(indirectiondir):
os.remove(indirectiondir)
os.symlink(ud.clonedir, indirectiondir)
clonedir = indirectiondir
runfetchcmd("git clone %s %s/ %s" % (cloneflags, clonedir, destdir), d)
runfetchcmd("git clone %s %s/ %s" % (cloneflags, ud.clonedir, destdir), d)
if not ud.nocheckout:
os.chdir(destdir)
if subdir != "":
@@ -305,8 +281,7 @@ class Git(FetchMethod):
basecmd = data.getVar("FETCHCMD_git", d, True) or "git"
cmd = "%s ls-remote %s://%s%s%s %s" % \
(basecmd, ud.proto, username, ud.host, ud.path, ud.branches[name])
if ud.proto.lower() != 'file':
bb.fetch2.check_network_access(d, cmd)
bb.fetch2.check_network_access(d, cmd)
output = runfetchcmd(cmd, d, True)
if not output:
raise bb.fetch2.FetchError("The command %s gave empty output unexpectedly" % cmd, url)

View File

@@ -82,7 +82,7 @@ class Hg(FetchMethod):
basecmd = data.expand('${FETCHCMD_hg}', d)
proto = ud.parm.get('protocol', 'http')
proto = ud.parm.get('proto', 'http')
host = ud.host
if proto == "file":

View File

@@ -26,12 +26,10 @@ BitBake build tools.
# Based on functions from the base bb module, Copyright 2003 Holger Schurig
import os
import urllib
import bb
import bb.utils
from bb import data
from bb.fetch2 import FetchMethod, FetchError
from bb.fetch2 import logger
from bb.fetch2 import FetchMethod
class Local(FetchMethod):
def supports(self, url, urldata, d):
@@ -42,31 +40,27 @@ class Local(FetchMethod):
def urldata_init(self, ud, d):
# We don't set localfile as for this fetcher the file is already local!
ud.decodedurl = urllib.unquote(ud.url.split("://")[1].split(";")[0])
ud.basename = os.path.basename(ud.decodedurl)
ud.basename = os.path.basename(ud.url.split("://")[1].split(";")[0])
return
def localpath(self, url, urldata, d):
"""
Return the local filename of a given url assuming a successful fetch.
"""
path = urldata.decodedurl
path = url.split("://")[1]
path = path.split(";")[0]
newpath = path
if path[0] != "/":
filespath = data.getVar('FILESPATH', d, True)
if filespath:
logger.debug(2, "Searching for %s in paths: \n%s" % (path, "\n ".join(filespath.split(":"))))
newpath = bb.utils.which(filespath, path)
if not newpath:
filesdir = data.getVar('FILESDIR', d, True)
if filesdir:
logger.debug(2, "Searching for %s in path: %s" % (path, filesdir))
newpath = os.path.join(filesdir, path)
if not os.path.exists(newpath) and path.find("*") == -1:
dldirfile = os.path.join(d.getVar("DL_DIR", True), path)
logger.debug(2, "Defaulting to %s for %s" % (dldirfile, path))
bb.utils.mkdirhier(os.path.dirname(dldirfile))
return dldirfile
if not os.path.exists(newpath) and path.find("*") == -1:
dldirfile = os.path.join(data.getVar("DL_DIR", d, True), os.path.basename(path))
return dldirfile
return newpath
def need_update(self, url, ud, d):
@@ -79,20 +73,7 @@ class Local(FetchMethod):
def download(self, url, urldata, d):
"""Fetch urls (no-op for Local method)"""
# no need to fetch local files, we'll deal with them in place.
if self.supports_checksum(urldata) and not os.path.exists(urldata.localpath):
locations = []
filespath = data.getVar('FILESPATH', d, True)
if filespath:
locations = filespath.split(":")
filesdir = data.getVar('FILESDIR', d, True)
if filesdir:
locations.append(filesdir)
locations.append(d.getVar("DL_DIR", True))
msg = "Unable to find file " + url + " anywhere. The paths that were searched were:\n " + "\n ".join(locations)
raise FetchError(msg)
return True
return 1
def checkstatus(self, url, urldata, d):
"""

View File

@@ -57,7 +57,7 @@ class Osc(FetchMethod):
basecmd = data.expand('${FETCHCMD_osc}', d)
proto = ud.parm.get('protocol', 'ocs')
proto = ud.parm.get('proto', 'ocs')
options = []

View File

@@ -27,7 +27,6 @@ BitBake build tools.
from future_builtins import zip
import os
import subprocess
import logging
import bb
from bb import data
@@ -91,8 +90,8 @@ class Perforce(FetchMethod):
p4cmd = data.getVar('FETCHCOMMAND_p4', d, True)
logger.debug(1, "Running %s%s changes -m 1 %s", p4cmd, p4opt, depot)
p4file, errors = bb.process.run("%s%s changes -m 1 %s" % (p4cmd, p4opt, depot))
cset = p4file.strip()
p4file = os.popen("%s%s changes -m 1 %s" % (p4cmd, p4opt, depot))
cset = p4file.readline().strip()
logger.debug(1, "READ %s", cset)
if not cset:
return -1
@@ -155,8 +154,8 @@ class Perforce(FetchMethod):
logger.debug(2, "Fetch: creating temporary directory")
bb.utils.mkdirhier(data.expand('${WORKDIR}', localdata))
data.setVar('TMPBASE', data.expand('${WORKDIR}/oep4.XXXXXX', localdata), localdata)
tmpfile, errors = bb.process.run(data.getVar('MKTEMPDIRCMD', localdata, True) or "false")
tmpfile = tmpfile.strip()
tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, True) or "false")
tmpfile = tmppipe.readline().strip()
if not tmpfile:
raise FetchError("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.", loc)
@@ -169,8 +168,7 @@ class Perforce(FetchMethod):
os.chdir(tmpfile)
logger.info("Fetch " + loc)
logger.info("%s%s files %s", p4cmd, p4opt, depot)
p4file, errors = bb.process.run("%s%s files %s" % (p4cmd, p4opt, depot))
p4file = p4file.strip()
p4file = os.popen("%s%s files %s" % (p4cmd, p4opt, depot))
if not p4file:
raise FetchError("Fetch: unable to get the P4 files from %s" % depot, loc)
@@ -186,7 +184,7 @@ class Perforce(FetchMethod):
dest = list[0][len(path)+1:]
where = dest.find("#")
subprocess.call("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module, dest[:where], list[0]), shell=True)
os.system("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module, dest[:where], list[0]))
count = count + 1
if count == 0:

View File

@@ -69,9 +69,6 @@ class SSH(FetchMethod):
def supports(self, url, urldata, d):
return __pattern__.match(url) != None
def supports_checksum(self, urldata):
return False
def localpath(self, url, urldata, d):
m = __pattern__.match(urldata.url)
path = m.group('path')

View File

@@ -77,8 +77,8 @@ class Svk(FetchMethod):
logger.debug(2, "Fetch: creating temporary directory")
bb.utils.mkdirhier(data.expand('${WORKDIR}', localdata))
data.setVar('TMPBASE', data.expand('${WORKDIR}/oesvk.XXXXXX', localdata), localdata)
tmpfile, errors = bb.process.run(data.getVar('MKTEMPDIRCMD', localdata, True) or "false")
tmpfile = tmpfile.strip()
tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, True) or "false")
tmpfile = tmppipe.readline().strip()
if not tmpfile:
logger.error()
raise FetchError("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.", loc)

View File

@@ -49,8 +49,6 @@ class Svn(FetchMethod):
if not "module" in ud.parm:
raise MissingParameterError('module', ud.url)
ud.basecmd = d.getVar('FETCHCMD_svn', True)
ud.module = ud.parm["module"]
# Create paths to svn checkouts
@@ -71,7 +69,9 @@ class Svn(FetchMethod):
command is "fetch", "update", "info"
"""
proto = ud.parm.get('protocol', 'svn')
basecmd = data.expand('${FETCHCMD_svn}', d)
proto = ud.parm.get('proto', 'svn')
svn_rsh = None
if proto == "svn+ssh" and "rsh" in ud.parm:
@@ -88,7 +88,7 @@ class Svn(FetchMethod):
options.append("--password %s" % ud.pswd)
if command == "info":
svncmd = "%s info %s %s://%s/%s/" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module)
svncmd = "%s info %s %s://%s/%s/" % (basecmd, " ".join(options), proto, svnroot, ud.module)
else:
suffix = ""
if ud.revision:
@@ -96,9 +96,9 @@ class Svn(FetchMethod):
suffix = "@%s" % (ud.revision)
if command == "fetch":
svncmd = "%s co %s %s://%s/%s%s %s" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module, suffix, ud.module)
svncmd = "%s co %s %s://%s/%s%s %s" % (basecmd, " ".join(options), proto, svnroot, ud.module, suffix, ud.module)
elif command == "update":
svncmd = "%s update %s" % (ud.basecmd, " ".join(options))
svncmd = "%s update %s" % (basecmd, " ".join(options))
else:
raise FetchError("Invalid svn command %s" % command, ud.url)
@@ -117,11 +117,6 @@ class Svn(FetchMethod):
logger.info("Update " + loc)
# update sources there
os.chdir(ud.moddir)
# We need to attempt to run svn upgrade first in case its an older working format
try:
runfetchcmd(ud.basecmd + " upgrade", d)
except FetchError:
pass
logger.debug(1, "Running %s", svnupdatecmd)
bb.fetch2.check_network_access(d, svnupdatecmd, ud.url)
runfetchcmd(svnupdatecmd, d)

View File

@@ -45,52 +45,47 @@ class Wget(FetchMethod):
"""
return ud.type in ['http', 'https', 'ftp']
def recommends_checksum(self, urldata):
return True
def urldata_init(self, ud, d):
if 'downloadfilename' in ud.parm:
ud.basename = ud.parm['downloadfilename']
else:
ud.basename = os.path.basename(ud.path)
ud.basename = os.path.basename(ud.path)
ud.localfile = data.expand(urllib.unquote(ud.basename), d)
def download(self, uri, ud, d, checkonly = False):
"""Fetch urls"""
basecmd = d.getVar("FETCHCMD_wget", True) or "/usr/bin/env wget -t 2 -T 30 -nv --passive-ftp --no-check-certificate"
def fetch_uri(uri, ud, d):
if checkonly:
fetchcmd = data.getVar("CHECKCOMMAND", d, True)
elif os.path.exists(ud.localpath):
# file exists, but we didnt complete it.. trying again..
fetchcmd = data.getVar("RESUMECOMMAND", d, True)
else:
fetchcmd = data.getVar("FETCHCOMMAND", d, True)
if 'downloadfilename' in ud.parm:
basecmd += " -O ${DL_DIR}/" + ud.localfile
uri = uri.split(";")[0]
uri_decoded = list(decodeurl(uri))
uri_type = uri_decoded[0]
uri_host = uri_decoded[1]
if checkonly:
fetchcmd = d.getVar("CHECKCOMMAND_wget", True) or d.expand(basecmd + " -c -P ${DL_DIR} '${URI}'")
elif os.path.exists(ud.localpath):
# file exists, but we didnt complete it.. trying again..
fetchcmd = d.getVar("RESUMECOMMAND_wget", True) or d.expand(basecmd + " --spider -P ${DL_DIR} '${URI}'")
else:
fetchcmd = d.getVar("FETCHCOMMAND_wget", True) or d.expand(basecmd + " -P ${DL_DIR} '${URI}'")
fetchcmd = fetchcmd.replace("${URI}", uri.split(";")[0])
fetchcmd = fetchcmd.replace("${FILE}", ud.basename)
if not checkonly:
logger.info("fetch " + uri)
logger.debug(2, "executing " + fetchcmd)
bb.fetch2.check_network_access(d, fetchcmd)
runfetchcmd(fetchcmd, d, quiet=checkonly)
uri = uri.split(";")[0]
uri_decoded = list(decodeurl(uri))
uri_type = uri_decoded[0]
uri_host = uri_decoded[1]
# Sanity check since wget can pretend it succeed when it didn't
# Also, this used to happen if sourceforge sent us to the mirror page
if not os.path.exists(ud.localpath) and not checkonly:
raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (uri, ud.localpath), uri)
fetchcmd = fetchcmd.replace("${URI}", uri.split(";")[0])
fetchcmd = fetchcmd.replace("${FILE}", ud.basename)
if not checkonly:
logger.info("fetch " + uri)
logger.debug(2, "executing " + fetchcmd)
bb.fetch2.check_network_access(d, fetchcmd)
runfetchcmd(fetchcmd, d, quiet=checkonly)
# Sanity check since wget can pretend it succeed when it didn't
# Also, this used to happen if sourceforge sent us to the mirror page
if not os.path.exists(ud.localpath) and not checkonly:
raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (uri, ud.localpath), uri)
localdata = data.createCopy(d)
data.setVar('OVERRIDES', "wget:" + data.getVar('OVERRIDES', localdata), localdata)
data.update_data(localdata)
fetch_uri(uri, ud, localdata)
return True
def checkstatus(self, uri, ud, d):

View File

@@ -33,7 +33,9 @@
from bb.utils import better_compile, better_exec
from bb import error
# A dict of function names we have seen
# A dict of modules we have handled
# it is the number of .bbclasses + x in size
_parsed_methods = { }
_parsed_fns = { }
def insert_method(modulename, code, fn):
@@ -50,22 +52,33 @@ def insert_method(modulename, code, fn):
if name in ['None', 'False']:
continue
elif name in _parsed_fns and not _parsed_fns[name] == modulename:
error("The function %s defined in %s was already declared in %s. BitBake has a global python function namespace so shared functions should be declared in a common include file rather than being duplicated, or if the functions are different, please use different function names." % (name, modulename, _parsed_fns[name]))
error( "Error Method already seen: %s in' %s' now in '%s'" % (name, _parsed_fns[name], modulename))
else:
_parsed_fns[name] = modulename
# A dict of modules the parser has finished with
_parsed_methods = {}
def check_insert_method(modulename, code, fn):
"""
Add the code if it wasnt added before. The module
name will be used for that
Variables:
@modulename a short name e.g. base.bbclass
@code The actual python code
@fn The filename from the outer file
"""
if not modulename in _parsed_methods:
return insert_method(modulename, code, fn)
_parsed_methods[modulename] = 1
def parsed_module(modulename):
"""
Has module been parsed?
Inform me file xyz was parsed
"""
return modulename in _parsed_methods
def set_parsed_module(modulename):
"""
Set module as parsed
"""
_parsed_methods[modulename] = True
def get_parsed_dict():
"""
shortcut
"""
return _parsed_methods

View File

@@ -176,7 +176,6 @@ class diskMonitor:
def __init__(self, configuration):
self.enableMonitor = False
self.configuration = configuration
BBDirs = configuration.getVar("BB_DISKMON_DIRS", True) or None
if BBDirs:
@@ -220,12 +219,10 @@ class diskMonitor:
logger.error("No new tasks can be excuted since the disk space monitor action is \"STOPTASKS\"!")
self.checked[dev] = True
rq.finish_runqueue(False)
bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, self.devDict[dev][1]), self.configuration)
elif self.devDict[dev][0] == "ABORT" and not self.checked[dev]:
logger.error("Immediately abort since the disk space monitor action is \"ABORT\"!")
self.checked[dev] = True
rq.finish_runqueue(True)
bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, self.devDict[dev][1]), self.configuration)
# The free inodes, float point number
freeInode = st.f_favail
@@ -240,10 +237,8 @@ class diskMonitor:
logger.error("No new tasks can be excuted since the disk space monitor action is \"STOPTASKS\"!")
self.checked[dev] = True
rq.finish_runqueue(False)
bb.event.fire(bb.event.DiskFull(dev, 'inode', freeSpace, self.devDict[dev][1]), self.configuration)
elif self.devDict[dev][0] == "ABORT" and not self.checked[dev]:
logger.error("Immediately abort since the disk space monitor action is \"ABORT\"!")
self.checked[dev] = True
rq.finish_runqueue(True)
bb.event.fire(bb.event.DiskFull(dev, 'inode', freeSpace, self.devDict[dev][1]), self.configuration)
return

View File

@@ -31,6 +31,7 @@ import itertools
from bb import methodpool
from bb.parse import logger
__parsed_methods__ = bb.methodpool.get_parsed_dict()
_bbversions_re = re.compile(r"\[(?P<from>[0-9]+)-(?P<to>[0-9]+)\]")
class StatementGroup(list):
@@ -125,25 +126,23 @@ class MethodNode(AstNode):
self.body = body
def eval(self, data):
text = '\n'.join(self.body)
if self.func_name == "__anonymous":
funcname = ("__anon_%s_%s" % (self.lineno, self.filename.translate(string.maketrans('/.+-', '____'))))
if not funcname in bb.methodpool._parsed_fns:
text = "def %s(d):\n" % (funcname) + text
text = "def %s(d):\n" % (funcname) + '\n'.join(self.body)
bb.methodpool.insert_method(funcname, text, self.filename)
anonfuncs = data.getVar('__BBANONFUNCS') or []
anonfuncs.append(funcname)
data.setVar('__BBANONFUNCS', anonfuncs)
data.setVar(funcname, text)
else:
data.setVarFlag(self.func_name, "func", 1)
data.setVar(self.func_name, text)
data.setVar(self.func_name, '\n'.join(self.body))
class PythonMethodNode(AstNode):
def __init__(self, filename, lineno, function, modulename, body):
def __init__(self, filename, lineno, function, define, body):
AstNode.__init__(self, filename, lineno)
self.function = function
self.modulename = modulename
self.define = define
self.body = body
def eval(self, data):
@@ -151,8 +150,8 @@ class PythonMethodNode(AstNode):
# 'this' file. This means we will not parse methods from
# bb classes twice
text = '\n'.join(self.body)
if not bb.methodpool.parsed_module(self.modulename):
bb.methodpool.insert_method(self.modulename, text, self.filename)
if not bb.methodpool.parsed_module(self.define):
bb.methodpool.insert_method(self.define, text, self.filename)
data.setVarFlag(self.function, "func", 1)
data.setVarFlag(self.function, "python", 1)
data.setVar(self.function, text)
@@ -213,9 +212,9 @@ class ExportFuncsNode(AstNode):
data.setVarFlag(calledvar, flag, data.getVarFlag(var, flag))
if data.getVarFlag(calledvar, "python"):
data.setVar(var, " bb.build.exec_func('" + calledvar + "', d)\n")
data.setVar(var, "\tbb.build.exec_func('" + calledvar + "', d)\n")
else:
data.setVar(var, " " + calledvar + "\n")
data.setVar(var, "\t" + calledvar + "\n")
data.setVarFlag(var, 'export_func', '1')
class AddTaskNode(AstNode):
@@ -282,8 +281,8 @@ def handleData(statements, filename, lineno, groupd):
def handleMethod(statements, filename, lineno, func_name, body):
statements.append(MethodNode(filename, lineno, func_name, body))
def handlePythonMethod(statements, filename, lineno, funcname, modulename, body):
statements.append(PythonMethodNode(filename, lineno, funcname, modulename, body))
def handlePythonMethod(statements, filename, lineno, funcname, root, body):
statements.append(PythonMethodNode(filename, lineno, funcname, root, body))
def handleMethodFlags(statements, filename, lineno, key, m):
statements.append(MethodFlagsNode(filename, lineno, key, m))
@@ -321,7 +320,7 @@ def finalize(fn, d, variant = None):
code = []
for funcname in d.getVar("__BBANONFUNCS") or []:
code.append("%s(d)" % funcname)
bb.utils.better_exec("\n".join(code), {"d": d})
bb.utils.simple_exec("\n".join(code), {"d": d})
bb.data.update_data(d)
tasklist = d.getVar('__BBTASKS') or []

View File

@@ -69,7 +69,7 @@ def supports(fn, d):
return os.path.splitext(fn)[-1] in [".bb", ".bbclass", ".inc"]
def inherit(files, fn, lineno, d):
__inherit_cache = d.getVar('__inherit_cache') or []
__inherit_cache = data.getVar('__inherit_cache', d) or []
files = d.expand(files).split()
for file in files:
if not os.path.isabs(file) and not file.endswith(".bbclass"):
@@ -80,7 +80,7 @@ def inherit(files, fn, lineno, d):
__inherit_cache.append( file )
data.setVar('__inherit_cache', __inherit_cache, d)
include(fn, file, lineno, d, "inherit")
__inherit_cache = d.getVar('__inherit_cache') or []
__inherit_cache = data.getVar('__inherit_cache', d) or []
def get_statements(filename, absolute_filename, base_name):
global cached_statements
@@ -126,13 +126,13 @@ def handle(fn, d, include):
if ext == ".bbclass":
__classname__ = root
classes.append(__classname__)
__inherit_cache = d.getVar('__inherit_cache') or []
__inherit_cache = data.getVar('__inherit_cache', d) or []
if not fn in __inherit_cache:
__inherit_cache.append(fn)
data.setVar('__inherit_cache', __inherit_cache, d)
if include != 0:
oldfile = d.getVar('FILE')
oldfile = data.getVar('FILE', d)
else:
oldfile = None
@@ -161,7 +161,7 @@ def handle(fn, d, include):
# we have parsed the bb class now
if ext == ".bbclass" or ext == ".inc":
bb.methodpool.set_parsed_module(base_name)
bb.methodpool.get_parsed_dict()[base_name] = 1
return d

View File

@@ -1,8 +1,6 @@
import logging
import signal
import subprocess
import errno
import select
logger = logging.getLogger('BitBake.Process')
@@ -70,38 +68,20 @@ def _logged_communicate(pipe, log, input):
pipe.stdin.write(input)
pipe.stdin.close()
bufsize = 512
outdata, errdata = [], []
rin = []
while pipe.poll() is None:
if pipe.stdout is not None:
data = pipe.stdout.read(bufsize)
if data is not None:
outdata.append(data)
log.write(data)
if pipe.stdout is not None:
bb.utils.nonblockingfd(pipe.stdout.fileno())
rin.append(pipe.stdout)
if pipe.stderr is not None:
bb.utils.nonblockingfd(pipe.stderr.fileno())
rin.append(pipe.stderr)
try:
while pipe.poll() is None:
rlist = rin
try:
r,w,e = select.select (rlist, [], [])
except OSError, e:
if e.errno != errno.EINTR:
raise
if pipe.stdout in r:
data = pipe.stdout.read()
if data is not None:
outdata.append(data)
log.write(data)
if pipe.stderr in r:
data = pipe.stderr.read()
if data is not None:
errdata.append(data)
log.write(data)
finally:
log.flush()
if pipe.stderr is not None:
data = pipe.stderr.read(bufsize)
if data is not None:
errdata.append(data)
log.write(data)
return ''.join(outdata), ''.join(errdata)
def run(cmd, input=None, log=None, **options):

View File

@@ -35,8 +35,6 @@ class NoProvider(bb.BBHandledException):
class NoRProvider(bb.BBHandledException):
"""Exception raised when no provider of a runtime dependency can be found"""
class MultipleRProvider(bb.BBHandledException):
"""Exception raised when multiple providers of a runtime dependency can be found"""
def findProviders(cfgData, dataCache, pkg_pn = None):
"""

View File

@@ -375,8 +375,9 @@ class RunQueueData:
"""
runq_build = []
recursivetasks = {}
recursivetasksselfref = set()
recursive_tdepends = {}
runq_recrdepends = []
tdepends_fnid = {}
taskData = self.taskData
@@ -405,10 +406,11 @@ class RunQueueData:
depdata = taskData.build_targets[depid][0]
if depdata is None:
continue
dep = taskData.fn_index[depdata]
for taskname in tasknames:
taskid = taskData.gettask_id_fromfnid(depdata, taskname)
taskid = taskData.gettask_id(dep, taskname, False)
if taskid is not None:
depends.add(taskid)
depends.append(taskid)
def add_runtime_dependencies(depids, tasknames, depends):
for depid in depids:
@@ -417,20 +419,15 @@ class RunQueueData:
depdata = taskData.run_targets[depid][0]
if depdata is None:
continue
dep = taskData.fn_index[depdata]
for taskname in tasknames:
taskid = taskData.gettask_id_fromfnid(depdata, taskname)
taskid = taskData.gettask_id(dep, taskname, False)
if taskid is not None:
depends.add(taskid)
def add_resolved_dependencies(depids, tasknames, depends):
for depid in depids:
for taskname in tasknames:
taskid = taskData.gettask_id_fromfnid(depid, taskname)
if taskid is not None:
depends.add(taskid)
depends.append(taskid)
for task in xrange(len(taskData.tasks_name)):
depends = set()
depends = []
recrdepends = []
fnid = taskData.tasks_fnid[task]
fn = taskData.fn_index[fnid]
task_deps = self.dataCache.task_deps[fn]
@@ -442,7 +439,7 @@ class RunQueueData:
# Resolve task internal dependencies
#
# e.g. addtask before X after Y
depends = set(taskData.tasks_tdepends[task])
depends = taskData.tasks_tdepends[task]
# Resolve 'deptask' dependencies
#
@@ -457,91 +454,99 @@ class RunQueueData:
# e.g. do_sometask[rdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all RDEPENDS)
if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']:
tasknames = task_deps['rdeptask'][taskData.tasks_name[task]].split()
add_runtime_dependencies(taskData.rdepids[fnid], tasknames, depends)
taskname = task_deps['rdeptask'][taskData.tasks_name[task]]
add_runtime_dependencies(taskData.rdepids[fnid], [taskname], depends)
# Resolve inter-task dependencies
#
# e.g. do_sometask[depends] = "targetname:do_someothertask"
# (makes sure sometask runs after targetname's someothertask)
if fnid not in tdepends_fnid:
tdepends_fnid[fnid] = set()
idepends = taskData.tasks_idepends[task]
for (depid, idependtask) in idepends:
if depid in taskData.build_targets:
# Won't be in build_targets if ASSUME_PROVIDED
depdata = taskData.build_targets[depid][0]
if depdata is not None:
taskid = taskData.gettask_id_fromfnid(depdata, idependtask)
dep = taskData.fn_index[depdata]
taskid = taskData.gettask_id(dep, idependtask, False)
if taskid is None:
bb.msg.fatal("RunQueue", "Task %s in %s depends upon non-existent task %s in %s" % (taskData.tasks_name[task], fn, idependtask, dep))
depends.add(taskid)
irdepends = taskData.tasks_irdepends[task]
for (depid, idependtask) in irdepends:
if depid in taskData.run_targets:
# Won't be in run_targets if ASSUME_PROVIDED
depdata = taskData.run_targets[depid][0]
if depdata is not None:
taskid = taskData.gettask_id_fromfnid(depdata, idependtask)
if taskid is None:
bb.msg.fatal("RunQueue", "Task %s in %s rdepends upon non-existent task %s in %s" % (taskData.tasks_name[task], fn, idependtask, dep))
depends.add(taskid)
depends.append(taskid)
if depdata != fnid:
tdepends_fnid[fnid].add(taskid)
# Resolve recursive 'recrdeptask' dependencies (Part A)
# Resolve recursive 'recrdeptask' dependencies (A)
#
# e.g. do_sometask[recrdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
# We cover the recursive part of the dependencies below
if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']:
tasknames = task_deps['recrdeptask'][taskData.tasks_name[task]].split()
recursivetasks[task] = tasknames
add_build_dependencies(taskData.depids[fnid], tasknames, depends)
add_runtime_dependencies(taskData.rdepids[fnid], tasknames, depends)
if taskData.tasks_name[task] in tasknames:
recursivetasksselfref.add(task)
for taskname in task_deps['recrdeptask'][taskData.tasks_name[task]].split():
recrdepends.append(taskname)
add_build_dependencies(taskData.depids[fnid], [taskname], depends)
add_runtime_dependencies(taskData.rdepids[fnid], [taskname], depends)
# Rmove all self references
if task in depends:
newdep = []
logger.debug(2, "Task %s (%s %s) contains self reference! %s", task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends)
for dep in depends:
if task != dep:
newdep.append(dep)
depends = newdep
self.runq_fnid.append(taskData.tasks_fnid[task])
self.runq_task.append(taskData.tasks_name[task])
self.runq_depends.append(depends)
self.runq_depends.append(set(depends))
self.runq_revdeps.append(set())
self.runq_hash.append("")
runq_build.append(0)
runq_recrdepends.append(recrdepends)
# Resolve recursive 'recrdeptask' dependencies (Part B)
#
# Build a list of recursive cumulative dependencies for each fnid
# We do this by fnid, since if A depends on some task in B
# we're interested in later tasks B's fnid might have but B itself
# doesn't depend on
#
# Algorithm is O(tasks) + O(tasks)*O(fnids)
#
reccumdepends = {}
for task in xrange(len(self.runq_fnid)):
fnid = self.runq_fnid[task]
if fnid not in reccumdepends:
if fnid in tdepends_fnid:
reccumdepends[fnid] = tdepends_fnid[fnid]
else:
reccumdepends[fnid] = set()
reccumdepends[fnid].update(self.runq_depends[task])
for task in xrange(len(self.runq_fnid)):
taskfnid = self.runq_fnid[task]
for fnid in reccumdepends:
if task in reccumdepends[fnid]:
reccumdepends[fnid].add(task)
if taskfnid in reccumdepends:
reccumdepends[fnid].update(reccumdepends[taskfnid])
# Resolve recursive 'recrdeptask' dependencies (B)
#
# e.g. do_sometask[recrdeptask] = "do_someothertask"
# (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively)
# We need to do this separately since we need all of self.runq_depends to be complete before this is processed
extradeps = {}
for task in recursivetasks:
extradeps[task] = set(self.runq_depends[task])
tasknames = recursivetasks[task]
seendeps = set()
seenfnid = []
def generate_recdeps(t):
newdeps = set()
add_resolved_dependencies([taskData.tasks_fnid[t]], tasknames, newdeps)
extradeps[task].update(newdeps)
seendeps.add(t)
newdeps.add(t)
for i in newdeps:
for n in self.runq_depends[i]:
if n not in seendeps:
generate_recdeps(n)
generate_recdeps(task)
# Remove circular references so that do_a[recrdeptask] = "do_a do_b" can work
for task in recursivetasks:
extradeps[task].difference_update(recursivetasksselfref)
for task in xrange(len(taskData.tasks_name)):
# Add in extra dependencies
if task in extradeps:
self.runq_depends[task] = extradeps[task]
# Remove all self references
if task in self.runq_depends[task]:
logger.debug(2, "Task %s (%s %s) contains self reference! %s", task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], self.runq_depends[task])
self.runq_depends[task].remove(task)
for task in xrange(len(self.runq_fnid)):
if len(runq_recrdepends[task]) > 0:
taskfnid = self.runq_fnid[task]
for dep in reccumdepends[taskfnid]:
# Ignore self references
if dep == task:
continue
for taskname in runq_recrdepends[task]:
if taskData.tasks_name[dep] == taskname:
self.runq_depends[task].add(dep)
# Step B - Mark all active tasks
#
@@ -700,28 +705,6 @@ class RunQueueData:
continue
self.runq_setscene.append(task)
def invalidate_task(fn, taskname, error_nostamp):
taskdep = self.dataCache.task_deps[fn]
if 'nostamp' in taskdep and taskname in taskdep['nostamp']:
if error_nostamp:
bb.fatal("Task %s is marked nostamp, cannot invalidate this task" % taskname)
else:
bb.debug(1, "Task %s is marked nostamp, cannot invalidate this task" % taskname)
else:
logger.verbose("Invalidate task %s, %s", taskname, fn)
bb.parse.siggen.invalidate_task(taskname, self.dataCache, fn)
# Invalidate task if force mode active
if self.cooker.configuration.force:
for (fn, target) in self.target_pairs:
invalidate_task(fn, target, False)
# Invalidate task if invalidate mode active
if self.cooker.configuration.invalidate_stamp:
for (fn, target) in self.target_pairs:
for st in self.cooker.configuration.invalidate_stamp.split(','):
invalidate_task(fn, "do_%s" % st, True)
# Interate over the task list and call into the siggen code
dealtwith = set()
todeal = set(range(len(self.runq_fnid)))
@@ -748,6 +731,12 @@ class RunQueueData:
deps.append(depidentifier)
self.hash_deps[identifier] = deps
# Remove stamps for targets if force mode active
if self.cooker.configuration.force:
for (fn, target) in self.target_pairs:
logger.verbose("Remove stamp %s, %s", target, fn)
bb.build.del_stamp(target, self.dataCache, fn)
return len(self.runq_fnid)
def dump_data(self, taskQueue):
@@ -792,7 +781,101 @@ class RunQueue:
self.rqexe = None
def check_stamp_task(self, task, taskname = None, recurse = False, cache = None):
def check_stamps(self):
unchecked = {}
current = []
notcurrent = []
buildable = []
if self.stamppolicy == "perfile":
fulldeptree = False
else:
fulldeptree = True
stampwhitelist = []
if self.stamppolicy == "whitelist":
stampwhitelist = self.rqdata.stampfnwhitelist
for task in xrange(len(self.rqdata.runq_fnid)):
unchecked[task] = ""
if len(self.rqdata.runq_depends[task]) == 0:
buildable.append(task)
def check_buildable(self, task, buildable):
for revdep in self.rqdata.runq_revdeps[task]:
alldeps = 1
for dep in self.rqdata.runq_depends[revdep]:
if dep in unchecked:
alldeps = 0
if alldeps == 1:
if revdep in unchecked:
buildable.append(revdep)
for task in xrange(len(self.rqdata.runq_fnid)):
if task not in unchecked:
continue
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
taskname = self.rqdata.runq_task[task]
stampfile = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
# If the stamp is missing its not current
if not os.access(stampfile, os.F_OK):
del unchecked[task]
notcurrent.append(task)
check_buildable(self, task, buildable)
continue
# If its a 'nostamp' task, it's not current
taskdep = self.rqdata.dataCache.task_deps[fn]
if 'nostamp' in taskdep and task in taskdep['nostamp']:
del unchecked[task]
notcurrent.append(task)
check_buildable(self, task, buildable)
continue
while (len(buildable) > 0):
nextbuildable = []
for task in buildable:
if task in unchecked:
fn = self.taskData.fn_index[self.rqdata.runq_fnid[task]]
taskname = self.rqdata.runq_task[task]
stampfile = bb.build.stampfile(taskname, self.rqdata.dataCache, fn)
iscurrent = True
t1 = os.stat(stampfile)[stat.ST_MTIME]
for dep in self.rqdata.runq_depends[task]:
if iscurrent:
fn2 = self.taskData.fn_index[self.rqdata.runq_fnid[dep]]
taskname2 = self.rqdata.runq_task[dep]
stampfile2 = bb.build.stampfile(taskname2, self.rqdata.dataCache, fn2)
if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist):
if dep in notcurrent:
iscurrent = False
else:
t2 = os.stat(stampfile2)[stat.ST_MTIME]
if t1 < t2:
iscurrent = False
del unchecked[task]
if iscurrent:
current.append(task)
else:
notcurrent.append(task)
check_buildable(self, task, nextbuildable)
buildable = nextbuildable
#for task in range(len(self.runq_fnid)):
# fn = self.taskData.fn_index[self.runq_fnid[task]]
# taskname = self.runq_task[task]
# print "%s %s.%s" % (task, taskname, fn)
#print "Unchecked: %s" % unchecked
#print "Current: %s" % current
#print "Not current: %s" % notcurrent
if len(unchecked) > 0:
bb.msg.fatal("RunQueue", "check_stamps fatal internal error")
return current
def check_stamp_task(self, task, taskname = None, recurse = False):
def get_timestamp(f):
try:
if not os.access(f, os.F_OK):
@@ -828,9 +911,6 @@ class RunQueue:
if taskname != "do_setscene" and taskname.endswith("_setscene"):
return True
if cache is None:
cache = {}
iscurrent = True
t1 = get_timestamp(stampfile)
for dep in self.rqdata.runq_depends[task]:
@@ -851,18 +931,10 @@ class RunQueue:
logger.debug(2, 'Stampfile %s < %s', stampfile, stampfile2)
iscurrent = False
if recurse and iscurrent:
if dep in cache:
iscurrent = cache[dep]
if not iscurrent:
logger.debug(2, 'Stampfile for dependency %s:%s invalid (cached)' % (fn2, taskname2))
else:
iscurrent = self.check_stamp_task(dep, recurse=True, cache=cache)
cache[dep] = iscurrent
if recurse:
cache[task] = iscurrent
iscurrent = self.check_stamp_task(dep, recurse=True)
return iscurrent
def _execute_runqueue(self):
def execute_runqueue(self):
"""
Run the tasks in a queue prepared by rqdata.prepare()
Upon failure, optionally try to recover the build using any alternate providers
@@ -926,17 +998,6 @@ class RunQueue:
# Loop
return retval
def execute_runqueue(self):
# Catch unexpected exceptions and ensure we exit when an error occurs, not loop.
try:
return self._execute_runqueue()
except bb.runqueue.TaskFailure:
raise
except:
logger.error("An uncaught exception occured in runqueue, please see the failure below:")
self.state = runQueueComplete
raise
def finish_runqueue(self, now = False):
if not self.rqexe:
return
@@ -980,36 +1041,23 @@ class RunQueueExecute:
self.build_stamps = {}
self.failed_fnids = []
self.stampcache = {}
def runqueue_process_waitpid(self):
"""
Return none is there are no processes awaiting result collection, otherwise
collect the process exit codes and close the information pipe.
"""
pid, status = os.waitpid(-1, os.WNOHANG)
if pid == 0 or os.WIFSTOPPED(status):
result = os.waitpid(-1, os.WNOHANG)
if result[0] == 0 and result[1] == 0:
return None
if os.WIFEXITED(status):
status = os.WEXITSTATUS(status)
elif os.WIFSIGNALED(status):
# Per shell conventions for $?, when a process exits due to
# a signal, we return an exit code of 128 + SIGNUM
status = 128 + os.WTERMSIG(status)
task = self.build_pids[pid]
del self.build_pids[pid]
self.build_pipes[pid].close()
del self.build_pipes[pid]
# self.build_stamps[pid] may not exist when use shared work directory.
if pid in self.build_stamps:
del self.build_stamps[pid]
if status != 0:
self.task_fail(task, status)
task = self.build_pids[result[0]]
del self.build_pids[result[0]]
self.build_pipes[result[0]].close()
del self.build_pipes[result[0]]
# self.build_stamps[result[0]] may not exist when use shared work directory.
if result[0] in self.build_stamps.keys():
del self.build_stamps[result[0]]
if result[1] != 0:
self.task_fail(task, result[1]>>8)
else:
self.task_complete(task)
return True
@@ -1116,6 +1164,8 @@ class RunQueueExecute:
os.umask(umask)
self.cooker.configuration.data.setVar("BB_WORKERCONTEXT", "1")
self.cooker.configuration.data.setVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY", self)
self.cooker.configuration.data.setVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY2", fn)
bb.parse.siggen.set_taskdata(self.rqdata.hashes, self.rqdata.hash_deps)
ret = 0
try:
@@ -1173,8 +1223,6 @@ class RunQueueExecuteTasks(RunQueueExecute):
self.stats = RunQueueStats(len(self.rqdata.runq_fnid))
self.stampcache = {}
# Mark initial buildable tasks
for task in xrange(self.stats.total):
self.runq_running.append(0)
@@ -1183,7 +1231,7 @@ class RunQueueExecuteTasks(RunQueueExecute):
self.runq_buildable.append(1)
else:
self.runq_buildable.append(0)
if len(self.rqdata.runq_revdeps[task]) > 0 and self.rqdata.runq_revdeps[task].issubset(self.rq.scenequeue_covered) and task not in self.rq.scenequeue_notcovered:
if len(self.rqdata.runq_revdeps[task]) > 0 and self.rqdata.runq_revdeps[task].issubset(self.rq.scenequeue_covered):
self.rq.scenequeue_covered.add(task)
found = True
@@ -1194,7 +1242,7 @@ class RunQueueExecuteTasks(RunQueueExecute):
continue
logger.debug(1, 'Considering %s (%s): %s' % (task, self.rqdata.get_user_idstring(task), str(self.rqdata.runq_revdeps[task])))
if len(self.rqdata.runq_revdeps[task]) > 0 and self.rqdata.runq_revdeps[task].issubset(self.rq.scenequeue_covered) and task not in self.rq.scenequeue_notcovered:
if len(self.rqdata.runq_revdeps[task]) > 0 and self.rqdata.runq_revdeps[task].issubset(self.rq.scenequeue_covered):
ok = True
for revdep in self.rqdata.runq_revdeps[task]:
if self.rqdata.runq_fnid[task] != self.rqdata.runq_fnid[revdep]:
@@ -1211,30 +1259,9 @@ class RunQueueExecuteTasks(RunQueueExecute):
# Allow the metadata to elect for setscene tasks to run anyway
covered_remove = set()
if self.rq.setsceneverify:
invalidtasks = []
for task in xrange(len(self.rqdata.runq_task)):
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
taskname = self.rqdata.runq_task[task]
taskdep = self.rqdata.dataCache.task_deps[fn]
if 'noexec' in taskdep and taskname in taskdep['noexec']:
continue
if self.rq.check_stamp_task(task, taskname + "_setscene", cache=self.stampcache):
logger.debug(2, 'Setscene stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(task))
continue
if self.rq.check_stamp_task(task, taskname, recurse = True, cache=self.stampcache):
logger.debug(2, 'Normal stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(task))
continue
invalidtasks.append(task)
call = self.rq.setsceneverify + "(covered, tasknames, fnids, fns, d, invalidtasks=invalidtasks)"
call2 = self.rq.setsceneverify + "(covered, tasknames, fnids, fns, d)"
locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : self.rqdata.runq_task, "fnids" : self.rqdata.runq_fnid, "fns" : self.rqdata.taskData.fn_index, "d" : self.cooker.configuration.data, "invalidtasks" : invalidtasks }
# Backwards compatibility with older versions without invalidtasks
try:
covered_remove = bb.utils.better_eval(call, locs)
except TypeError:
covered_remove = bb.utils.better_eval(call2, locs)
call = self.rq.setsceneverify + "(covered, tasknames, fnids, fns, d)"
locs = { "covered" : self.rq.scenequeue_covered, "tasknames" : self.rqdata.runq_task, "fnids" : self.rqdata.runq_fnid, "fns" : self.rqdata.taskData.fn_index, "d" : self.cooker.configuration.data }
covered_remove = bb.utils.better_eval(call, locs)
for task in covered_remove:
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[task]]
@@ -1346,7 +1373,7 @@ class RunQueueExecuteTasks(RunQueueExecute):
self.task_skip(task)
return True
if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
if self.rq.check_stamp_task(task, taskname):
logger.debug(2, "Stamp current task %s (%s)", task,
self.rqdata.get_user_idstring(task))
self.task_skip(task)
@@ -1487,7 +1514,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
dep = self.rqdata.taskData.fn_index[depdata]
taskid = self.rqdata.get_task_id(self.rqdata.taskData.getfn_id(dep), idependtask.replace("_setscene", ""))
if taskid is None:
bb.msg.fatal("RunQueue", "Task %s:%s depends upon non-existent task %s:%s" % (self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realid]], self.rqdata.taskData.tasks_name[realid], dep, idependtask))
bb.msg.fatal("RunQueue", "Task %s depends upon non-existent task %s:%s" % (self.rqdata.taskData.tasks_name[realid], dep, idependtask))
sq_revdeps_squash[self.rqdata.runq_setscene.index(task)].add(self.rqdata.runq_setscene.index(taskid))
# Have to zero this to avoid circular dependencies
@@ -1530,18 +1557,12 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
bb.build.make_stamp(taskname + "_setscene", self.rqdata.dataCache, fn)
continue
if self.rq.check_stamp_task(realtask, taskname + "_setscene", cache=self.stampcache):
if self.rq.check_stamp_task(realtask, taskname + "_setscene"):
logger.debug(2, 'Setscene stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(realtask))
stamppresent.append(task)
self.task_skip(task)
continue
if self.rq.check_stamp_task(realtask, taskname, recurse = True, cache=self.stampcache):
logger.debug(2, 'Normal stamp current for task %s(%s)', task, self.rqdata.get_user_idstring(realtask))
stamppresent.append(task)
self.task_skip(task)
continue
sq_fn.append(fn)
sq_hashfn.append(self.rqdata.dataCache.hashfn[fn])
sq_hash.append(self.rqdata.runq_hash[realtask])
@@ -1629,7 +1650,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
fn = self.rqdata.taskData.fn_index[self.rqdata.runq_fnid[realtask]]
taskname = self.rqdata.runq_task[realtask] + "_setscene"
if self.rq.check_stamp_task(realtask, self.rqdata.runq_task[realtask], recurse = True, cache=self.stampcache):
if self.rq.check_stamp_task(realtask, self.rqdata.runq_task[realtask], recurse = True):
logger.debug(2, 'Stamp for underlying task %s(%s) is current, so skipping setscene variant',
task, self.rqdata.get_user_idstring(realtask))
self.task_failoutright(task)
@@ -1641,7 +1662,7 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
self.task_failoutright(task)
return True
if self.rq.check_stamp_task(realtask, taskname, cache=self.stampcache):
if self.rq.check_stamp_task(realtask, taskname):
logger.debug(2, 'Setscene stamp current task %s(%s), so skip it and its dependencies',
task, self.rqdata.get_user_idstring(realtask))
self.task_skip(task)
@@ -1672,9 +1693,6 @@ class RunQueueExecuteScenequeue(RunQueueExecute):
self.rq.scenequeue_covered = set()
for task in oldcovered:
self.rq.scenequeue_covered.add(self.rqdata.runq_setscene[task])
self.rq.scenequeue_notcovered = set()
for task in self.scenequeue_notcovered:
self.rq.scenequeue_notcovered.add(self.rqdata.runq_setscene[task])
logger.debug(1, 'We can skip tasks %s', sorted(self.rq.scenequeue_covered))
@@ -1758,6 +1776,15 @@ class runQueueTaskCompleted(runQueueEvent):
Event notifing a task completed
"""
def check_stamp_fn(fn, taskname, d):
rqexe = d.getVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY")
fn = d.getVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY2")
fnid = rqexe.rqdata.taskData.getfn_id(fn)
taskid = rqexe.rqdata.get_task_id(fnid, taskname)
if taskid is not None:
return rqexe.rq.check_stamp_task(taskid)
return None
class runQueuePipe():
"""
Abstraction for a pipe between a worker thread and the server
@@ -1765,7 +1792,7 @@ class runQueuePipe():
def __init__(self, pipein, pipeout, d):
self.input = pipein
pipeout.close()
bb.utils.nonblockingfd(self.input)
fcntl.fcntl(self.input, fcntl.F_SETFL, fcntl.fcntl(self.input, fcntl.F_GETFL) | os.O_NONBLOCK)
self.queue = ""
self.d = d

View File

@@ -2,7 +2,6 @@ import hashlib
import logging
import os
import re
import tempfile
import bb.data
logger = logging.getLogger('BitBake.SigGen')
@@ -51,10 +50,6 @@ class SignatureGenerator(object):
def dump_sigtask(self, fn, task, stampbase, runtime):
return
def invalidate_task(self, task, d, fn):
bb.build.del_stamp(task, d, fn)
class SignatureGeneratorBasic(SignatureGenerator):
"""
"""
@@ -65,7 +60,6 @@ class SignatureGeneratorBasic(SignatureGenerator):
self.taskhash = {}
self.taskdeps = {}
self.runtaskdeps = {}
self.file_checksum_values = {}
self.gendeps = {}
self.lookupcache = {}
self.pkgnameextract = re.compile("(?P<fn>.*)\..*")
@@ -113,10 +107,6 @@ class SignatureGeneratorBasic(SignatureGenerator):
data = data + dep
if dep in lookupcache:
var = lookupcache[dep]
elif dep[-1] == ']':
vf = dep[:-1].split('[')
var = d.getVarFlag(vf[0], vf[1], False)
lookupcache[dep] = var
else:
var = d.getVar(dep, False)
lookupcache[dep] = var
@@ -158,20 +148,10 @@ class SignatureGeneratorBasic(SignatureGenerator):
return False
return True
def read_taint(self, fn, task, stampbase):
taint = None
try:
with open(stampbase + '.' + task + '.taint', 'r') as taintf:
taint = taintf.read()
except IOError:
pass
return taint
def get_taskhash(self, fn, task, deps, dataCache):
k = fn + "." + task
data = dataCache.basetaskhash[k]
self.runtaskdeps[k] = []
self.file_checksum_values[k] = {}
recipename = dataCache.pkg_fn[fn]
for dep in sorted(deps, key=clean_basepath):
depname = dataCache.pkg_fn[self.pkgnameextract.search(dep).group('fn')]
@@ -181,17 +161,6 @@ class SignatureGeneratorBasic(SignatureGenerator):
bb.fatal("%s is not in taskhash, caller isn't calling in dependency order?", dep)
data = data + self.taskhash[dep]
self.runtaskdeps[k].append(dep)
if task in dataCache.file_checksums[fn]:
checksums = bb.fetch2.get_file_checksums(dataCache.file_checksums[fn][task], recipename)
for (f,cs) in checksums:
self.file_checksum_values[k][f] = cs
data = data + cs
taint = self.read_taint(fn, task, dataCache.stamp[fn])
if taint:
data = data + taint
h = hashlib.md5(data).hexdigest()
self.taskhash[k] = h
#d.setVar("BB_TASKHASH_task-%s" % task, taskhash[task])
@@ -228,29 +197,12 @@ class SignatureGeneratorBasic(SignatureGenerator):
if runtime and k in self.taskhash:
data['runtaskdeps'] = self.runtaskdeps[k]
data['file_checksum_values'] = self.file_checksum_values[k]
data['runtaskhashes'] = {}
for dep in data['runtaskdeps']:
data['runtaskhashes'][dep] = self.taskhash[dep]
taint = self.read_taint(fn, task, stampbase)
if taint:
data['taint'] = taint
fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
try:
with os.fdopen(fd, "wb") as stream:
p = pickle.dump(data, stream, -1)
stream.flush()
os.fsync(fd)
os.chmod(tmpfile, 0664)
os.rename(tmpfile, sigfile)
except (OSError, IOError), err:
try:
os.unlink(tmpfile)
except OSError:
pass
raise err
p = pickle.Pickler(file(sigfile, "wb"), -1)
p.dump(data)
def dump_sigs(self, dataCache):
for fn in self.taskdeps:
@@ -278,10 +230,6 @@ class SignatureGeneratorBasicHash(SignatureGeneratorBasic):
h = self.basehash[k]
return ("%s.%s.%s.%s" % (stampbase, taskname, h, extrainfo)).rstrip('.')
def invalidate_task(self, task, d, fn):
bb.note("Tainting hash to force rebuild of task %s, %s" % (fn, task))
bb.build.write_taint(task, d, fn)
def dump_this_task(outfile, d):
import bb.parse
fn = d.getVar("BB_FILENAME", True)
@@ -302,9 +250,9 @@ def clean_basepaths(a):
return b
def compare_sigfiles(a, b):
p1 = pickle.Unpickler(open(a, "rb"))
p1 = pickle.Unpickler(file(a, "rb"))
a_data = p1.load()
p2 = pickle.Unpickler(open(b, "rb"))
p2 = pickle.Unpickler(file(b, "rb"))
b_data = p2.load()
def dict_diff(a, b, whitelist=set()):
@@ -354,18 +302,6 @@ def compare_sigfiles(a, b):
for dep in changed:
print "Variable %s value changed from %s to %s" % (dep, a_data['varvals'][dep], b_data['varvals'][dep])
changed, added, removed = dict_diff(a_data['file_checksum_values'], b_data['file_checksum_values'])
if changed:
for f in changed:
print "Checksum for file %s changed from %s to %s" % (f, a_data['file_checksum_values'][f], b_data['file_checksum_values'][f])
if added:
for f in added:
print "Dependency on checksum of file %s was added" % (f)
if removed:
for f in removed:
print "Dependency on checksum of file %s was removed" % (f)
if 'runtaskhashes' in a_data and 'runtaskhashes' in b_data:
a = clean_basepaths(a_data['runtaskhashes'])
b = clean_basepaths(b_data['runtaskhashes'])
@@ -394,15 +330,8 @@ def compare_sigfiles(a, b):
for dep in changed:
print "Hash for dependent task %s changed from %s to %s" % (dep, a[dep], b[dep])
a_taint = a_data.get('taint', None)
b_taint = b_data.get('taint', None)
if a_taint != b_taint:
print "Taint (by forced/invalidated task) changed from %s to %s" % (a_taint, b_taint)
def dump_sigfile(a):
p1 = pickle.Unpickler(open(a, "rb"))
p1 = pickle.Unpickler(file(a, "rb"))
a_data = p1.load()
print "basewhitelist: %s" % (a_data['basewhitelist'])
@@ -422,12 +351,6 @@ def dump_sigfile(a):
if 'runtaskdeps' in a_data:
print "Tasks this task depends on: %s" % (a_data['runtaskdeps'])
if 'file_checksum_values' in a_data:
print "This task depends on the checksums of files: %s" % (a_data['file_checksum_values'])
if 'runtaskhashes' in a_data:
for dep in a_data['runtaskhashes']:
print "Hash for dependent task %s is %s" % (dep, a_data['runtaskhashes'][dep])
if 'taint' in a_data:
print "Tainted (by forced/invalidated task): %s" % a_data['taint']

View File

@@ -55,7 +55,6 @@ class TaskData:
self.tasks_name = []
self.tasks_tdepends = []
self.tasks_idepends = []
self.tasks_irdepends = []
# Cache to speed up task ID lookups
self.tasks_lookup = {}
@@ -116,16 +115,6 @@ class TaskData:
ids.append(self.tasks_lookup[fnid][task])
return ids
def gettask_id_fromfnid(self, fnid, task):
"""
Return an ID number for the task matching fnid and task.
"""
if fnid in self.tasks_lookup:
if task in self.tasks_lookup[fnid]:
return self.tasks_lookup[fnid][task]
return None
def gettask_id(self, fn, task, create = True):
"""
Return an ID number for the task matching fn and task.
@@ -145,7 +134,6 @@ class TaskData:
self.tasks_fnid.append(fnid)
self.tasks_tdepends.append([])
self.tasks_idepends.append([])
self.tasks_irdepends.append([])
listid = len(self.tasks_name) - 1
@@ -176,9 +164,6 @@ class TaskData:
# Work out task dependencies
parentids = []
for dep in task_deps['parents'][task]:
if dep not in task_deps['tasks']:
bb.debug(2, "Not adding dependeny of %s on %s since %s does not exist" % (task, dep, dep))
continue
parentid = self.gettask_id(fn, dep)
parentids.append(parentid)
taskid = self.gettask_id(fn, task)
@@ -193,15 +178,6 @@ class TaskData:
bb.msg.fatal("TaskData", "Error for %s, dependency %s does not contain ':' character\n. Task 'depends' should be specified in the form 'packagename:task'" % (fn, dep))
ids.append(((self.getbuild_id(dep.split(":")[0])), dep.split(":")[1]))
self.tasks_idepends[taskid].extend(ids)
if 'rdepends' in task_deps and task in task_deps['rdepends']:
ids = []
for dep in task_deps['rdepends'][task].split():
if dep:
if ":" not in dep:
bb.msg.fatal("TaskData", "Error for %s, dependency %s does not contain ':' character\n. Task 'rdepends' should be specified in the form 'packagename:task'" % (fn, dep))
ids.append(((self.getrun_id(dep.split(":")[0])), dep.split(":")[1]))
self.tasks_irdepends[taskid].extend(ids)
# Work out build dependencies
if not fnid in self.depids:
@@ -485,7 +461,6 @@ class TaskData:
providers_list.append(dataCache.pkg_fn[fn])
bb.event.fire(bb.event.MultipleProviders(item, providers_list, runtime=True), cfgData)
self.consider_msgs_cache.append(item)
raise bb.providers.MultipleRProvider(item)
# run through the list until we find one that we can build
for fn in eligible:
@@ -558,11 +533,6 @@ class TaskData:
dependees = self.get_rdependees(targetid)
for fnid in dependees:
self.fail_fnid(fnid, missing_list)
for taskid in xrange(len(self.tasks_irdepends)):
irdepends = self.tasks_irdepends[taskid]
for (idependid, idependtask) in irdepends:
if idependid == targetid:
self.fail_fnid(self.tasks_fnid[taskid], missing_list)
def add_unresolved(self, cfgData, dataCache):
"""
@@ -584,7 +554,7 @@ class TaskData:
try:
self.add_rprovider(cfgData, dataCache, target)
added = added + 1
except (bb.providers.NoRProvider, bb.providers.MultipleRProvider):
except bb.providers.NoRProvider:
self.remove_runtarget(self.getrun_id(target))
logger.debug(1, "Resolved " + str(added) + " extra dependencies")
if added == 0:

View File

@@ -1,369 +0,0 @@
#
# BitBake Test for codeparser.py
#
# Copyright (C) 2010 Chris Larson
# Copyright (C) 2012 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import unittest
import logging
import bb
logger = logging.getLogger('BitBake.TestCodeParser')
import bb.data
class ReferenceTest(unittest.TestCase):
def setUp(self):
self.d = bb.data.init()
def setEmptyVars(self, varlist):
for k in varlist:
self.d.setVar(k, "")
def setValues(self, values):
for k, v in values.items():
self.d.setVar(k, v)
def assertReferences(self, refs):
self.assertEqual(self.references, refs)
def assertExecs(self, execs):
self.assertEqual(self.execs, execs)
class VariableReferenceTest(ReferenceTest):
def parseExpression(self, exp):
parsedvar = self.d.expandWithRefs(exp, None)
self.references = parsedvar.references
def test_simple_reference(self):
self.setEmptyVars(["FOO"])
self.parseExpression("${FOO}")
self.assertReferences(set(["FOO"]))
def test_nested_reference(self):
self.setEmptyVars(["BAR"])
self.d.setVar("FOO", "BAR")
self.parseExpression("${${FOO}}")
self.assertReferences(set(["FOO", "BAR"]))
def test_python_reference(self):
self.setEmptyVars(["BAR"])
self.parseExpression("${@bb.data.getVar('BAR', d, True) + 'foo'}")
self.assertReferences(set(["BAR"]))
class ShellReferenceTest(ReferenceTest):
def parseExpression(self, exp):
parsedvar = self.d.expandWithRefs(exp, None)
parser = bb.codeparser.ShellParser("ParserTest", logger)
parser.parse_shell(parsedvar.value)
self.references = parsedvar.references
self.execs = parser.execs
def test_quotes_inside_assign(self):
self.parseExpression('foo=foo"bar"baz')
self.assertReferences(set([]))
def test_quotes_inside_arg(self):
self.parseExpression('sed s#"bar baz"#"alpha beta"#g')
self.assertExecs(set(["sed"]))
def test_arg_continuation(self):
self.parseExpression("sed -i -e s,foo,bar,g \\\n *.pc")
self.assertExecs(set(["sed"]))
def test_dollar_in_quoted(self):
self.parseExpression('sed -i -e "foo$" *.pc')
self.assertExecs(set(["sed"]))
def test_quotes_inside_arg_continuation(self):
self.setEmptyVars(["bindir", "D", "libdir"])
self.parseExpression("""
sed -i -e s#"moc_location=.*$"#"moc_location=${bindir}/moc4"# \\
-e s#"uic_location=.*$"#"uic_location=${bindir}/uic4"# \\
${D}${libdir}/pkgconfig/*.pc
""")
self.assertReferences(set(["bindir", "D", "libdir"]))
def test_assign_subshell_expansion(self):
self.parseExpression("foo=$(echo bar)")
self.assertExecs(set(["echo"]))
def test_shell_unexpanded(self):
self.setEmptyVars(["QT_BASE_NAME"])
self.parseExpression('echo "${QT_BASE_NAME}"')
self.assertExecs(set(["echo"]))
self.assertReferences(set(["QT_BASE_NAME"]))
def test_incomplete_varexp_single_quotes(self):
self.parseExpression("sed -i -e 's:IP{:I${:g' $pc")
self.assertExecs(set(["sed"]))
def test_until(self):
self.parseExpression("until false; do echo true; done")
self.assertExecs(set(["false", "echo"]))
self.assertReferences(set())
def test_case(self):
self.parseExpression("""
case $foo in
*)
bar
;;
esac
""")
self.assertExecs(set(["bar"]))
self.assertReferences(set())
def test_assign_exec(self):
self.parseExpression("a=b c='foo bar' alpha 1 2 3")
self.assertExecs(set(["alpha"]))
def test_redirect_to_file(self):
self.setEmptyVars(["foo"])
self.parseExpression("echo foo >${foo}/bar")
self.assertExecs(set(["echo"]))
self.assertReferences(set(["foo"]))
def test_heredoc(self):
self.setEmptyVars(["theta"])
self.parseExpression("""
cat <<END
alpha
beta
${theta}
END
""")
self.assertReferences(set(["theta"]))
def test_redirect_from_heredoc(self):
v = ["B", "SHADOW_MAILDIR", "SHADOW_MAILFILE", "SHADOW_UTMPDIR", "SHADOW_LOGDIR", "bindir"]
self.setEmptyVars(v)
self.parseExpression("""
cat <<END >${B}/cachedpaths
shadow_cv_maildir=${SHADOW_MAILDIR}
shadow_cv_mailfile=${SHADOW_MAILFILE}
shadow_cv_utmpdir=${SHADOW_UTMPDIR}
shadow_cv_logdir=${SHADOW_LOGDIR}
shadow_cv_passwd_dir=${bindir}
END
""")
self.assertReferences(set(v))
self.assertExecs(set(["cat"]))
# def test_incomplete_command_expansion(self):
# self.assertRaises(reftracker.ShellSyntaxError, reftracker.execs,
# bbvalue.shparse("cp foo`", self.d), self.d)
# def test_rogue_dollarsign(self):
# self.setValues({"D" : "/tmp"})
# self.parseExpression("install -d ${D}$")
# self.assertReferences(set(["D"]))
# self.assertExecs(set(["install"]))
class PythonReferenceTest(ReferenceTest):
def setUp(self):
self.d = bb.data.init()
if hasattr(bb.utils, "_context"):
self.context = bb.utils._context
else:
import __builtin__
self.context = __builtin__.__dict__
def parseExpression(self, exp):
parsedvar = self.d.expandWithRefs(exp, None)
parser = bb.codeparser.PythonParser("ParserTest", logger)
parser.parse_python(parsedvar.value)
self.references = parsedvar.references | parser.references
self.execs = parser.execs
@staticmethod
def indent(value):
"""Python Snippets have to be indented, python values don't have to
be. These unit tests are testing snippets."""
return " " + value
def test_getvar_reference(self):
self.parseExpression("bb.data.getVar('foo', d, True)")
self.assertReferences(set(["foo"]))
self.assertExecs(set())
def test_getvar_computed_reference(self):
self.parseExpression("bb.data.getVar('f' + 'o' + 'o', d, True)")
self.assertReferences(set())
self.assertExecs(set())
def test_getvar_exec_reference(self):
self.parseExpression("eval('bb.data.getVar(\"foo\", d, True)')")
self.assertReferences(set())
self.assertExecs(set(["eval"]))
def test_var_reference(self):
self.context["foo"] = lambda x: x
self.setEmptyVars(["FOO"])
self.parseExpression("foo('${FOO}')")
self.assertReferences(set(["FOO"]))
self.assertExecs(set(["foo"]))
del self.context["foo"]
def test_var_exec(self):
for etype in ("func", "task"):
self.d.setVar("do_something", "echo 'hi mom! ${FOO}'")
self.d.setVarFlag("do_something", etype, True)
self.parseExpression("bb.build.exec_func('do_something', d)")
self.assertReferences(set(["do_something"]))
def test_function_reference(self):
self.context["testfunc"] = lambda msg: bb.msg.note(1, None, msg)
self.d.setVar("FOO", "Hello, World!")
self.parseExpression("testfunc('${FOO}')")
self.assertReferences(set(["FOO"]))
self.assertExecs(set(["testfunc"]))
del self.context["testfunc"]
def test_qualified_function_reference(self):
self.parseExpression("time.time()")
self.assertExecs(set(["time.time"]))
def test_qualified_function_reference_2(self):
self.parseExpression("os.path.dirname('/foo/bar')")
self.assertExecs(set(["os.path.dirname"]))
def test_qualified_function_reference_nested(self):
self.parseExpression("time.strftime('%Y%m%d',time.gmtime())")
self.assertExecs(set(["time.strftime", "time.gmtime"]))
def test_function_reference_chained(self):
self.context["testget"] = lambda: "\tstrip me "
self.parseExpression("testget().strip()")
self.assertExecs(set(["testget"]))
del self.context["testget"]
class DependencyReferenceTest(ReferenceTest):
pydata = """
bb.data.getVar('somevar', d, True)
def test(d):
foo = 'bar %s' % 'foo'
def test2(d):
d.getVar(foo, True)
d.getVar('bar', False)
test2(d)
def a():
\"\"\"some
stuff
\"\"\"
return "heh"
test(d)
bb.data.expand(bb.data.getVar("something", False, d), d)
bb.data.expand("${inexpand} somethingelse", d)
bb.data.getVar(a(), d, False)
"""
def test_python(self):
self.d.setVar("FOO", self.pydata)
self.setEmptyVars(["inexpand", "a", "test2", "test"])
self.d.setVarFlags("FOO", {"func": True, "python": True})
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), self.d)
self.assertEquals(deps, set(["somevar", "bar", "something", "inexpand", "test", "test2", "a"]))
shelldata = """
foo () {
bar
}
{
echo baz
$(heh)
eval `moo`
}
a=b
c=d
(
true && false
test -f foo
testval=something
$testval
) || aiee
! inverted
echo ${somevar}
case foo in
bar)
echo bar
;;
baz)
echo baz
;;
foo*)
echo foo
;;
esac
"""
def test_shell(self):
execs = ["bar", "echo", "heh", "moo", "true", "aiee"]
self.d.setVar("somevar", "heh")
self.d.setVar("inverted", "echo inverted...")
self.d.setVarFlag("inverted", "func", True)
self.d.setVar("FOO", self.shelldata)
self.d.setVarFlags("FOO", {"func": True})
self.setEmptyVars(execs)
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), self.d)
self.assertEquals(deps, set(["somevar", "inverted"] + execs))
def test_vardeps(self):
self.d.setVar("oe_libinstall", "echo test")
self.d.setVar("FOO", "foo=oe_libinstall; eval $foo")
self.d.setVarFlag("FOO", "vardeps", "oe_libinstall")
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), self.d)
self.assertEquals(deps, set(["oe_libinstall"]))
def test_vardeps_expand(self):
self.d.setVar("oe_libinstall", "echo test")
self.d.setVar("FOO", "foo=oe_libinstall; eval $foo")
self.d.setVarFlag("FOO", "vardeps", "${@'oe_libinstall'}")
deps, values = bb.data.build_dependencies("FOO", set(self.d.keys()), set(), set(), self.d)
self.assertEquals(deps, set(["oe_libinstall"]))
#Currently no wildcard support
#def test_vardeps_wildcards(self):
# self.d.setVar("oe_libinstall", "echo test")
# self.d.setVar("FOO", "foo=oe_libinstall; eval $foo")
# self.d.setVarFlag("FOO", "vardeps", "oe_*")
# self.assertEquals(deps, set(["oe_libinstall"]))

View File

@@ -1,134 +0,0 @@
#
# BitBake Tests for Copy-on-Write (cow.py)
#
# Copyright 2006 Holger Freyther <freyther@handhelds.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import unittest
import os
class COWTestCase(unittest.TestCase):
"""
Test case for the COW module from mithro
"""
def testGetSet(self):
"""
Test and set
"""
from bb.COW import COWDictBase
a = COWDictBase.copy()
self.assertEquals(False, a.has_key('a'))
a['a'] = 'a'
a['b'] = 'b'
self.assertEquals(True, a.has_key('a'))
self.assertEquals(True, a.has_key('b'))
self.assertEquals('a', a['a'] )
self.assertEquals('b', a['b'] )
def testCopyCopy(self):
"""
Test the copy of copies
"""
from bb.COW import COWDictBase
# create two COW dict 'instances'
b = COWDictBase.copy()
c = COWDictBase.copy()
# assign some keys to one instance, some keys to another
b['a'] = 10
b['c'] = 20
c['a'] = 30
# test separation of the two instances
self.assertEquals(False, c.has_key('c'))
self.assertEquals(30, c['a'])
self.assertEquals(10, b['a'])
# test copy
b_2 = b.copy()
c_2 = c.copy()
self.assertEquals(False, c_2.has_key('c'))
self.assertEquals(10, b_2['a'])
b_2['d'] = 40
self.assertEquals(False, c_2.has_key('d'))
self.assertEquals(True, b_2.has_key('d'))
self.assertEquals(40, b_2['d'])
self.assertEquals(False, b.has_key('d'))
self.assertEquals(False, c.has_key('d'))
c_2['d'] = 30
self.assertEquals(True, c_2.has_key('d'))
self.assertEquals(True, b_2.has_key('d'))
self.assertEquals(30, c_2['d'])
self.assertEquals(40, b_2['d'])
self.assertEquals(False, b.has_key('d'))
self.assertEquals(False, c.has_key('d'))
# test copy of the copy
c_3 = c_2.copy()
b_3 = b_2.copy()
b_3_2 = b_2.copy()
c_3['e'] = 4711
self.assertEquals(4711, c_3['e'])
self.assertEquals(False, c_2.has_key('e'))
self.assertEquals(False, b_3.has_key('e'))
self.assertEquals(False, b_3_2.has_key('e'))
self.assertEquals(False, b_2.has_key('e'))
b_3['e'] = 'viel'
self.assertEquals('viel', b_3['e'])
self.assertEquals(4711, c_3['e'])
self.assertEquals(False, c_2.has_key('e'))
self.assertEquals(True, b_3.has_key('e'))
self.assertEquals(False, b_3_2.has_key('e'))
self.assertEquals(False, b_2.has_key('e'))
def testCow(self):
from bb.COW import COWDictBase
c = COWDictBase.copy()
c['123'] = 1027
c['other'] = 4711
c['d'] = { 'abc' : 10, 'bcd' : 20 }
copy = c.copy()
self.assertEquals(1027, c['123'])
self.assertEquals(4711, c['other'])
self.assertEquals({'abc':10, 'bcd':20}, c['d'])
self.assertEquals(1027, copy['123'])
self.assertEquals(4711, copy['other'])
self.assertEquals({'abc':10, 'bcd':20}, copy['d'])
# cow it now
copy['123'] = 1028
copy['other'] = 4712
copy['d']['abc'] = 20
self.assertEquals(1027, c['123'])
self.assertEquals(4711, c['other'])
self.assertEquals({'abc':10, 'bcd':20}, c['d'])
self.assertEquals(1028, copy['123'])
self.assertEquals(4712, copy['other'])
self.assertEquals({'abc':20, 'bcd':20}, copy['d'])

View File

@@ -1,252 +0,0 @@
#
# BitBake Tests for the Data Store (data.py/data_smart.py)
#
# Copyright (C) 2010 Chris Larson
# Copyright (C) 2012 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import unittest
import bb
import bb.data
class DataExpansions(unittest.TestCase):
def setUp(self):
self.d = bb.data.init()
self.d["foo"] = "value of foo"
self.d["bar"] = "value of bar"
self.d["value of foo"] = "value of 'value of foo'"
def test_one_var(self):
val = self.d.expand("${foo}")
self.assertEqual(str(val), "value of foo")
def test_indirect_one_var(self):
val = self.d.expand("${${foo}}")
self.assertEqual(str(val), "value of 'value of foo'")
def test_indirect_and_another(self):
val = self.d.expand("${${foo}} ${bar}")
self.assertEqual(str(val), "value of 'value of foo' value of bar")
def test_python_snippet(self):
val = self.d.expand("${@5*12}")
self.assertEqual(str(val), "60")
def test_expand_in_python_snippet(self):
val = self.d.expand("${@'boo ' + '${foo}'}")
self.assertEqual(str(val), "boo value of foo")
def test_python_snippet_getvar(self):
val = self.d.expand("${@d.getVar('foo', True) + ' ${bar}'}")
self.assertEqual(str(val), "value of foo value of bar")
def test_python_snippet_syntax_error(self):
self.d.setVar("FOO", "${@foo = 5}")
self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
def test_python_snippet_runtime_error(self):
self.d.setVar("FOO", "${@int('test')}")
self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
def test_python_snippet_error_path(self):
self.d.setVar("FOO", "foo value ${BAR}")
self.d.setVar("BAR", "bar value ${@int('test')}")
self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
def test_value_containing_value(self):
val = self.d.expand("${@d.getVar('foo', True) + ' ${bar}'}")
self.assertEqual(str(val), "value of foo value of bar")
def test_reference_undefined_var(self):
val = self.d.expand("${undefinedvar} meh")
self.assertEqual(str(val), "${undefinedvar} meh")
def test_double_reference(self):
self.d.setVar("BAR", "bar value")
self.d.setVar("FOO", "${BAR} foo ${BAR}")
val = self.d.getVar("FOO", True)
self.assertEqual(str(val), "bar value foo bar value")
def test_direct_recursion(self):
self.d.setVar("FOO", "${FOO}")
self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
def test_indirect_recursion(self):
self.d.setVar("FOO", "${BAR}")
self.d.setVar("BAR", "${BAZ}")
self.d.setVar("BAZ", "${FOO}")
self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
def test_recursion_exception(self):
self.d.setVar("FOO", "${BAR}")
self.d.setVar("BAR", "${${@'FOO'}}")
self.assertRaises(bb.data_smart.ExpansionError, self.d.getVar, "FOO", True)
def test_incomplete_varexp_single_quotes(self):
self.d.setVar("FOO", "sed -i -e 's:IP{:I${:g' $pc")
val = self.d.getVar("FOO", True)
self.assertEqual(str(val), "sed -i -e 's:IP{:I${:g' $pc")
def test_nonstring(self):
self.d.setVar("TEST", 5)
val = self.d.getVar("TEST", True)
self.assertEqual(str(val), "5")
def test_rename(self):
self.d.renameVar("foo", "newfoo")
self.assertEqual(self.d.getVar("newfoo"), "value of foo")
self.assertEqual(self.d.getVar("foo"), None)
def test_deletion(self):
self.d.delVar("foo")
self.assertEqual(self.d.getVar("foo"), None)
def test_keys(self):
keys = self.d.keys()
self.assertEqual(keys, ['value of foo', 'foo', 'bar'])
class TestNestedExpansions(unittest.TestCase):
def setUp(self):
self.d = bb.data.init()
self.d["foo"] = "foo"
self.d["bar"] = "bar"
self.d["value of foobar"] = "187"
def test_refs(self):
val = self.d.expand("${value of ${foo}${bar}}")
self.assertEqual(str(val), "187")
#def test_python_refs(self):
# val = self.d.expand("${@${@3}**2 + ${@4}**2}")
# self.assertEqual(str(val), "25")
def test_ref_in_python_ref(self):
val = self.d.expand("${@'${foo}' + 'bar'}")
self.assertEqual(str(val), "foobar")
def test_python_ref_in_ref(self):
val = self.d.expand("${${@'f'+'o'+'o'}}")
self.assertEqual(str(val), "foo")
def test_deep_nesting(self):
depth = 100
val = self.d.expand("${" * depth + "foo" + "}" * depth)
self.assertEqual(str(val), "foo")
#def test_deep_python_nesting(self):
# depth = 50
# val = self.d.expand("${@" * depth + "1" + "+1}" * depth)
# self.assertEqual(str(val), str(depth + 1))
def test_mixed(self):
val = self.d.expand("${value of ${@('${foo}'+'bar')[0:3]}${${@'BAR'.lower()}}}")
self.assertEqual(str(val), "187")
def test_runtime(self):
val = self.d.expand("${${@'value of' + ' f'+'o'+'o'+'b'+'a'+'r'}}")
self.assertEqual(str(val), "187")
class TestMemoize(unittest.TestCase):
def test_memoized(self):
d = bb.data.init()
d.setVar("FOO", "bar")
self.assertTrue(d.getVar("FOO") is d.getVar("FOO"))
def test_not_memoized(self):
d1 = bb.data.init()
d2 = bb.data.init()
d1.setVar("FOO", "bar")
d2.setVar("FOO", "bar2")
self.assertTrue(d1.getVar("FOO") is not d2.getVar("FOO"))
def test_changed_after_memoized(self):
d = bb.data.init()
d.setVar("foo", "value of foo")
self.assertEqual(str(d.getVar("foo")), "value of foo")
d.setVar("foo", "second value of foo")
self.assertEqual(str(d.getVar("foo")), "second value of foo")
def test_same_value(self):
d = bb.data.init()
d.setVar("foo", "value of")
d.setVar("bar", "value of")
self.assertEqual(d.getVar("foo"),
d.getVar("bar"))
class TestConcat(unittest.TestCase):
def setUp(self):
self.d = bb.data.init()
self.d.setVar("FOO", "foo")
self.d.setVar("VAL", "val")
self.d.setVar("BAR", "bar")
def test_prepend(self):
self.d.setVar("TEST", "${VAL}")
self.d.prependVar("TEST", "${FOO}:")
self.assertEqual(self.d.getVar("TEST", True), "foo:val")
def test_append(self):
self.d.setVar("TEST", "${VAL}")
self.d.appendVar("TEST", ":${BAR}")
self.assertEqual(self.d.getVar("TEST", True), "val:bar")
def test_multiple_append(self):
self.d.setVar("TEST", "${VAL}")
self.d.prependVar("TEST", "${FOO}:")
self.d.appendVar("TEST", ":val2")
self.d.appendVar("TEST", ":${BAR}")
self.assertEqual(self.d.getVar("TEST", True), "foo:val:val2:bar")
class TestOverrides(unittest.TestCase):
def setUp(self):
self.d = bb.data.init()
self.d.setVar("OVERRIDES", "foo:bar:local")
self.d.setVar("TEST", "testvalue")
def test_no_override(self):
bb.data.update_data(self.d)
self.assertEqual(self.d.getVar("TEST", True), "testvalue")
def test_one_override(self):
self.d.setVar("TEST_bar", "testvalue2")
bb.data.update_data(self.d)
self.assertEqual(self.d.getVar("TEST", True), "testvalue2")
def test_multiple_override(self):
self.d.setVar("TEST_bar", "testvalue2")
self.d.setVar("TEST_local", "testvalue3")
self.d.setVar("TEST_foo", "testvalue4")
bb.data.update_data(self.d)
self.assertEqual(self.d.getVar("TEST", True), "testvalue3")
class TestFlags(unittest.TestCase):
def setUp(self):
self.d = bb.data.init()
self.d.setVar("foo", "value of foo")
self.d.setVarFlag("foo", "flag1", "value of flag1")
self.d.setVarFlag("foo", "flag2", "value of flag2")
def test_setflag(self):
self.assertEqual(self.d.getVarFlag("foo", "flag1"), "value of flag1")
self.assertEqual(self.d.getVarFlag("foo", "flag2"), "value of flag2")
def test_delflag(self):
self.d.delVarFlag("foo", "flag2")
self.assertEqual(self.d.getVarFlag("foo", "flag1"), "value of flag1")
self.assertEqual(self.d.getVarFlag("foo", "flag2"), None)

View File

@@ -1,191 +0,0 @@
#
# BitBake Tests for the Fetcher (fetch2/)
#
# Copyright (C) 2012 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import unittest
import tempfile
import subprocess
import os
import bb
class FetcherTest(unittest.TestCase):
replaceuris = {
("git://git.invalid.infradead.org/mtd-utils.git;tag=1234567890123456789012345678901234567890", "git://.*/.*", "http://somewhere.org/somedir/")
: "http://somewhere.org/somedir/git2_git.invalid.infradead.org.mtd-utils.git.tar.gz",
("git://git.invalid.infradead.org/mtd-utils.git;tag=1234567890123456789012345678901234567890", "git://.*/([^/]+/)*([^/]*)", "git://somewhere.org/somedir/\\2;protocol=http")
: "git://somewhere.org/somedir/mtd-utils.git;tag=1234567890123456789012345678901234567890;protocol=http",
("git://git.invalid.infradead.org/foo/mtd-utils.git;tag=1234567890123456789012345678901234567890", "git://.*/([^/]+/)*([^/]*)", "git://somewhere.org/somedir/\\2;protocol=http")
: "git://somewhere.org/somedir/mtd-utils.git;tag=1234567890123456789012345678901234567890;protocol=http",
("git://git.invalid.infradead.org/foo/mtd-utils.git;tag=1234567890123456789012345678901234567890", "git://.*/([^/]+/)*([^/]*)", "git://somewhere.org/\\2;protocol=http")
: "git://somewhere.org/mtd-utils.git;tag=1234567890123456789012345678901234567890;protocol=http",
("git://someserver.org/bitbake;tag=1234567890123456789012345678901234567890", "git://someserver.org/bitbake", "git://git.openembedded.org/bitbake")
: "git://git.openembedded.org/bitbake;tag=1234567890123456789012345678901234567890",
("file://sstate-xyz.tgz", "file://.*", "file:///somewhere/1234/sstate-cache")
: "file:///somewhere/1234/sstate-cache/sstate-xyz.tgz",
("file://sstate-xyz.tgz", "file://.*", "file:///somewhere/1234/sstate-cache/")
: "file:///somewhere/1234/sstate-cache/sstate-xyz.tgz",
("http://somewhere.org/somedir1/somedir2/somefile_1.2.3.tar.gz", "http://.*/.*", "http://somewhere2.org/somedir3")
: "http://somewhere2.org/somedir3/somefile_1.2.3.tar.gz",
("http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere2.org/somedir3/somefile_1.2.3.tar.gz")
: "http://somewhere2.org/somedir3/somefile_1.2.3.tar.gz",
("http://www.apache.org/dist/subversion/subversion-1.7.1.tar.bz2", "http://www.apache.org/dist", "http://archive.apache.org/dist")
: "http://archive.apache.org/dist/subversion/subversion-1.7.1.tar.bz2",
("http://www.apache.org/dist/subversion/subversion-1.7.1.tar.bz2", "http://.*/.*", "file:///somepath/downloads/")
: "file:///somepath/downloads/subversion-1.7.1.tar.bz2",
("git://git.invalid.infradead.org/mtd-utils.git;tag=1234567890123456789012345678901234567890", "git://.*/.*", "git://somewhere.org/somedir/BASENAME;protocol=http")
: "git://somewhere.org/somedir/mtd-utils.git;tag=1234567890123456789012345678901234567890;protocol=http",
("git://git.invalid.infradead.org/foo/mtd-utils.git;tag=1234567890123456789012345678901234567890", "git://.*/.*", "git://somewhere.org/somedir/BASENAME;protocol=http")
: "git://somewhere.org/somedir/mtd-utils.git;tag=1234567890123456789012345678901234567890;protocol=http",
("git://git.invalid.infradead.org/foo/mtd-utils.git;tag=1234567890123456789012345678901234567890", "git://.*/.*", "git://somewhere.org/somedir/MIRRORNAME;protocol=http")
: "git://somewhere.org/somedir/git.invalid.infradead.org.foo.mtd-utils.git;tag=1234567890123456789012345678901234567890;protocol=http",
#Renaming files doesn't work
#("http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere.org/somedir1/somefile_1.2.3.tar.gz", "http://somewhere2.org/somedir3/somefile_2.3.4.tar.gz") : "http://somewhere2.org/somedir3/somefile_2.3.4.tar.gz"
#("file://sstate-xyz.tgz", "file://.*/.*", "file:///somewhere/1234/sstate-cache") : "file:///somewhere/1234/sstate-cache/sstate-xyz.tgz",
}
mirrorvar = "http://.*/.* file:///somepath/downloads/ \n" \
"git://someserver.org/bitbake git://git.openembedded.org/bitbake \n" \
"https://.*/.* file:///someotherpath/downloads/ \n" \
"http://.*/.* file:///someotherpath/downloads/ \n"
def setUp(self):
self.d = bb.data.init()
self.tempdir = tempfile.mkdtemp()
self.dldir = os.path.join(self.tempdir, "download")
os.mkdir(self.dldir)
self.d.setVar("DL_DIR", self.dldir)
self.unpackdir = os.path.join(self.tempdir, "unpacked")
os.mkdir(self.unpackdir)
persistdir = os.path.join(self.tempdir, "persistdata")
self.d.setVar("PERSISTENT_DIR", persistdir)
def tearDown(self):
bb.utils.prunedir(self.tempdir)
def test_fetch(self):
fetcher = bb.fetch.Fetch(["http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d)
fetcher.download()
self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.1.tar.gz"), 57892)
self.d.setVar("BB_NO_NETWORK", "1")
fetcher = bb.fetch.Fetch(["http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", "http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.1.tar.gz"], self.d)
fetcher.download()
fetcher.unpack(self.unpackdir)
self.assertEqual(len(os.listdir(self.unpackdir + "/bitbake-1.0/")), 9)
self.assertEqual(len(os.listdir(self.unpackdir + "/bitbake-1.1/")), 9)
def test_fetch_mirror(self):
self.d.setVar("MIRRORS", "http://.*/.* http://downloads.yoctoproject.org/releases/bitbake")
fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
fetcher.download()
self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
def test_fetch_premirror(self):
self.d.setVar("PREMIRRORS", "http://.*/.* http://downloads.yoctoproject.org/releases/bitbake")
fetcher = bb.fetch.Fetch(["http://invalid.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz"], self.d)
fetcher.download()
self.assertEqual(os.path.getsize(self.dldir + "/bitbake-1.0.tar.gz"), 57749)
def gitfetcher(self, url1, url2):
def checkrevision(self, fetcher):
fetcher.unpack(self.unpackdir)
revision = subprocess.check_output("git rev-parse HEAD", shell=True, cwd=self.unpackdir + "/git").strip()
self.assertEqual(revision, "270a05b0b4ba0959fe0624d2a4885d7b70426da5")
self.d.setVar("BB_GENERATE_MIRROR_TARBALLS", "1")
self.d.setVar("SRCREV", "270a05b0b4ba0959fe0624d2a4885d7b70426da5")
fetcher = bb.fetch.Fetch([url1], self.d)
fetcher.download()
checkrevision(self, fetcher)
# Wipe out the dldir clone and the unpacked source, turn off the network and check mirror tarball works
bb.utils.prunedir(self.dldir + "/git2/")
bb.utils.prunedir(self.unpackdir)
self.d.setVar("BB_NO_NETWORK", "1")
fetcher = bb.fetch.Fetch([url2], self.d)
fetcher.download()
checkrevision(self, fetcher)
def test_gitfetch(self):
url1 = url2 = "git://git.openembedded.org/bitbake"
self.gitfetcher(url1, url2)
def test_gitfetch_premirror(self):
url1 = "git://git.openembedded.org/bitbake"
url2 = "git://someserver.org/bitbake"
self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake \n")
self.gitfetcher(url1, url2)
def test_gitfetch_premirror2(self):
url1 = url2 = "git://someserver.org/bitbake"
self.d.setVar("PREMIRRORS", "git://someserver.org/bitbake git://git.openembedded.org/bitbake \n")
self.gitfetcher(url1, url2)
def test_gitfetch_premirror3(self):
realurl = "git://git.openembedded.org/bitbake"
dummyurl = "git://someserver.org/bitbake"
self.sourcedir = self.unpackdir.replace("unpacked", "sourcemirror.git")
os.chdir(self.tempdir)
subprocess.check_output("git clone %s %s 2> /dev/null" % (realurl, self.sourcedir), shell=True)
self.d.setVar("PREMIRRORS", "%s git://%s;protocol=file \n" % (dummyurl, self.sourcedir))
self.gitfetcher(dummyurl, dummyurl)
def test_urireplace(self):
for k, v in self.replaceuris.items():
ud = bb.fetch.FetchData(k[0], self.d)
ud.setup_localpath(self.d)
mirrors = bb.fetch2.mirror_from_string("%s %s" % (k[1], k[2]))
newuris, uds = bb.fetch2.build_mirroruris(ud, mirrors, self.d)
self.assertEqual([v], newuris)
def test_urilist1(self):
fetcher = bb.fetch.FetchData("http://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", self.d)
mirrors = bb.fetch2.mirror_from_string(self.mirrorvar)
uris, uds = bb.fetch2.build_mirroruris(fetcher, mirrors, self.d)
self.assertEqual(uris, ['file:///somepath/downloads/bitbake-1.0.tar.gz', 'file:///someotherpath/downloads/bitbake-1.0.tar.gz'])
def test_urilist2(self):
# Catch https:// -> files:// bug
fetcher = bb.fetch.FetchData("https://downloads.yoctoproject.org/releases/bitbake/bitbake-1.0.tar.gz", self.d)
mirrors = bb.fetch2.mirror_from_string(self.mirrorvar)
uris, uds = bb.fetch2.build_mirroruris(fetcher, mirrors, self.d)
self.assertEqual(uris, ['file:///someotherpath/downloads/bitbake-1.0.tar.gz'])
class URLHandle(unittest.TestCase):
datatable = {
"http://www.google.com/index.html" : ('http', 'www.google.com', '/index.html', '', '', {}),
"cvs://anoncvs@cvs.handhelds.org/cvs;module=familiar/dist/ipkg" : ('cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', '', {'module': 'familiar/dist/ipkg'}),
"cvs://anoncvs:anonymous@cvs.handhelds.org/cvs;tag=V0-99-81;module=familiar/dist/ipkg" : ('cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', 'anonymous', {'tag': 'V0-99-81', 'module': 'familiar/dist/ipkg'})
}
def test_decodeurl(self):
for k, v in self.datatable.items():
result = bb.fetch.decodeurl(k)
self.assertEqual(result, v)
def test_encodeurl(self):
for k, v in self.datatable.items():
result = bb.fetch.encodeurl(v)
self.assertEqual(result, k)

View File

@@ -1,36 +0,0 @@
#
# BitBake Tests for utils.py
#
# Copyright (C) 2012 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import unittest
import bb
class VerCmpString(unittest.TestCase):
def test_vercmpstring(self):
result = bb.utils.vercmp_string('1', '2')
self.assertTrue(result < 0)
result = bb.utils.vercmp_string('2', '1')
self.assertTrue(result > 0)
result = bb.utils.vercmp_string('1', '1.0')
self.assertTrue(result < 0)
result = bb.utils.vercmp_string('1', '1.1')
self.assertTrue(result < 0)
result = bb.utils.vercmp_string('1.1', '1_p2')
self.assertTrue(result < 0)

View File

@@ -23,13 +23,11 @@
import gtk
import pango
import gobject
import bb.process
from bb.ui.crumbs.progressbar import HobProgressBar
from bb.ui.crumbs.hobwidget import hic, HobNotebook, HobAltButton, HobWarpCellRendererText, HobButton, HobInfoButton
from bb.ui.crumbs.hobwidget import hic, HobNotebook, HobAltButton, HobWarpCellRendererText
from bb.ui.crumbs.runningbuild import RunningBuildTreeView
from bb.ui.crumbs.runningbuild import BuildFailureTreeView
from bb.ui.crumbs.hobpages import HobPage
from bb.ui.crumbs.hobcolor import HobColors
class BuildConfigurationTreeView(gtk.TreeView):
def __init__ (self):
@@ -98,10 +96,11 @@ class BuildConfigurationTreeView(gtk.TreeView):
for path in src_config_info.layers:
import os, os.path
if os.path.exists(path):
branch = bb.process.run('cd %s; git branch | grep "^* " | tr -d "* "' % path)[0]
if branch:
branch = branch.strip('\n')
f = os.popen('cd %s; git branch 2>&1 | grep "^* " | tr -d "* "' % path)
if f:
branch = f.readline().lstrip('\n').rstrip('\n')
vars.append(self.set_vars("Branch:", branch))
f.close()
break
self.set_config_model(vars)
@@ -145,7 +144,7 @@ class BuildDetailsPage (HobPage):
self.scrolled_view_config = gtk.ScrolledWindow ()
self.scrolled_view_config.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
self.scrolled_view_config.add(self.config_tv)
self.notebook.append_page(self.scrolled_view_config, "Build configuration")
self.notebook.append_page(self.scrolled_view_config, gtk.Label("Build configuration"))
self.failure_tv = BuildFailureTreeView()
self.failure_model = self.builder.handler.build.model.failure_model()
@@ -153,19 +152,19 @@ class BuildDetailsPage (HobPage):
self.scrolled_view_failure = gtk.ScrolledWindow ()
self.scrolled_view_failure.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
self.scrolled_view_failure.add(self.failure_tv)
self.notebook.append_page(self.scrolled_view_failure, "Issues")
self.notebook.append_page(self.scrolled_view_failure, gtk.Label("Issues"))
self.build_tv = RunningBuildTreeView(readonly=True, hob=True)
self.build_tv.set_model(self.builder.handler.build.model)
self.scrolled_view_build = gtk.ScrolledWindow ()
self.scrolled_view_build.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
self.scrolled_view_build.add(self.build_tv)
self.notebook.append_page(self.scrolled_view_build, "Log")
self.notebook.append_page(self.scrolled_view_build, gtk.Label("Log"))
self.builder.handler.build.model.connect_after("row-changed", self.scroll_to_present_row, self.scrolled_view_build.get_vadjustment(), self.build_tv)
self.button_box = gtk.HBox(False, 6)
self.back_button = HobAltButton('<< Back')
self.back_button = HobAltButton("<< Back to image configuration")
self.back_button.connect("clicked", self.back_button_clicked_cb)
self.button_box.pack_start(self.back_button, expand=False, fill=False)
@@ -199,70 +198,6 @@ class BuildDetailsPage (HobPage):
for child in children:
self.remove(child)
def add_build_fail_top_bar(self, actions, log_file=None):
primary_action = "Edit %s" % actions
self.notebook.set_page("Issues")
color = HobColors.ERROR
build_fail_top = gtk.EventBox()
build_fail_top.set_size_request(-1, 200)
build_fail_top.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse(color))
build_fail_tab = gtk.Table(14, 46, True)
build_fail_top.add(build_fail_tab)
icon = gtk.Image()
icon_pix_buffer = gtk.gdk.pixbuf_new_from_file(hic.ICON_INDI_ERROR_FILE)
icon.set_from_pixbuf(icon_pix_buffer)
build_fail_tab.attach(icon, 1, 4, 0, 6)
label = gtk.Label()
label.set_alignment(0.0, 0.5)
label.set_markup("<span size='x-large'><b>%s</b></span>" % self.title)
build_fail_tab.attach(label, 4, 26, 0, 6)
label = gtk.Label()
label.set_alignment(0.0, 0.5)
label.set_markup("<span size='medium'>Check the \"Issues\" information for more details</span>")
build_fail_tab.attach(label, 4, 40, 4, 9)
# create button 'Edit packages'
action_button = HobButton(primary_action)
action_button.set_size_request(-1, 40)
action_button.connect('clicked', self.failure_primary_action_button_clicked_cb, primary_action)
build_fail_tab.attach(action_button, 4, 13, 9, 12)
if log_file:
open_log_button = HobAltButton("Open log")
open_log_button.set_relief(gtk.RELIEF_HALF)
open_log_button.connect('clicked', self.failure_open_log_button_clicked_cb, log_file)
build_fail_tab.attach(open_log_button, 14, 23, 9, 12)
attach_pos = (24 if log_file else 14)
file_bug_button = HobAltButton('File a bug')
file_bug_button.set_relief(gtk.RELIEF_HALF)
file_bug_button.connect('clicked', self.failure_activate_file_bug_link_cb)
build_fail_tab.attach(file_bug_button, attach_pos, attach_pos + 9, 9, 12)
return build_fail_top
def show_fail_page(self, title, action_names):
self._remove_all_widget()
self.title = "Hob cannot build your %s" % title
self.build_fail_bar = self.add_build_fail_top_bar(action_names, self.builder.current_logfile)
self.pack_start(self.group_align, expand=True, fill=True)
self.box_group_area.pack_start(self.build_fail_bar, expand=False, fill=False)
self.box_group_area.pack_start(self.vbox, expand=True, fill=True)
self.vbox.pack_start(self.notebook, expand=True, fill=True)
self.box_group_area.pack_end(self.button_box, expand=False, fill=False)
self.show_all()
self.back_button.hide()
def show_page(self, step):
self._remove_all_widget()
if step == self.builder.PACKAGE_GENERATING or step == self.builder.FAST_IMAGE_GENERATING:
@@ -316,18 +251,3 @@ class BuildDetailsPage (HobPage):
def show_configurations(self, configurations, params):
self.config_tv.show(configurations, params)
def failure_primary_action_button_clicked_cb(self, button, action):
if "Edit recipes" in action:
self.builder.show_recipes()
elif "Edit packages" in action:
self.builder.show_packages()
elif "Edit image configuration" in action:
self.builder.show_configuration()
def failure_open_log_button_clicked_cb(self, button, log_file):
if log_file:
os.system("xdg-open /%s" % log_file)
def failure_activate_file_bug_link_cb(self, button):
button.child.emit('activate-link', "http://bugzilla.yoctoproject.org")

View File

@@ -26,8 +26,6 @@ import copy
import os
import subprocess
import shlex
import re
import logging
from bb.ui.crumbs.template import TemplateMgr
from bb.ui.crumbs.imageconfigurationpage import ImageConfigurationPage
from bb.ui.crumbs.recipeselectionpage import RecipeSelectionPage
@@ -41,49 +39,9 @@ from bb.ui.crumbs.hig import CrumbsMessageDialog, ImageSelectionDialog, \
from bb.ui.crumbs.persistenttooltip import PersistentTooltip
import bb.ui.crumbs.utils
hobVer = 20120808
class Configuration:
'''Represents the data structure of configuration.'''
@classmethod
def parse_proxy_string(cls, proxy):
pattern = "^\s*((http|https|ftp|git|cvs)://)?((\S+):(\S+)@)?(\S+):(\d+)/?"
match = re.search(pattern, proxy)
if match:
return match.group(2), match.group(4), match.group(5), match.group(6), match.group(7)
else:
return None, None, None, "", ""
@classmethod
def make_host_string(cls, prot, user, passwd, host, default_prot=""):
if host == None or host == "":
return ""
passwd = passwd or ""
if user != None and user != "":
if prot == None or prot == "":
prot = default_prot
return prot + "://" + user + ":" + passwd + "@" + host
else:
if prot == None or prot == "":
return host
else:
return prot + "://" + host
@classmethod
def make_port_string(cls, port):
port = port or ""
return port
@classmethod
def make_proxy_string(cls, prot, user, passwd, host, port, default_prot=""):
if host == None or host == "" or port == None or port == "":
return ""
return Configuration.make_host_string(prot, user, passwd, host, default_prot) + ":" + Configuration.make_port_string(port)
def __init__(self):
self.curr_mach = ""
# settings
@@ -109,43 +67,15 @@ class Configuration:
self.default_task = "build"
# proxy settings
self.enable_proxy = None
self.same_proxy = False
self.proxies = {
"http" : [None, None, None, "", ""], # protocol : [prot, user, passwd, host, port]
"https" : [None, None, None, "", ""],
"ftp" : [None, None, None, "", ""],
"git" : [None, None, None, "", ""],
"cvs" : [None, None, None, "", ""],
}
self.all_proxy = self.http_proxy = self.ftp_proxy = self.https_proxy = ""
self.git_proxy_host = self.git_proxy_port = ""
self.cvs_proxy_host = self.cvs_proxy_port = ""
def clear_selection(self):
self.selected_image = None
self.selected_recipes = []
self.selected_packages = []
def split_proxy(self, protocol, proxy):
entry = []
prot, user, passwd, host, port = Configuration.parse_proxy_string(proxy)
entry.append(prot)
entry.append(user)
entry.append(passwd)
entry.append(host)
entry.append(port)
self.proxies[protocol] = entry
def combine_proxy(self, protocol):
entry = self.proxies[protocol]
return Configuration.make_proxy_string(entry[0], entry[1], entry[2], entry[3], entry[4], protocol)
def combine_host_only(self, protocol):
entry = self.proxies[protocol]
return Configuration.make_host_string(entry[0], entry[1], entry[2], entry[3], protocol)
def combine_port_only(self, protocol):
entry = self.proxies[protocol]
return Configuration.make_port_string(entry[4])
def update(self, params):
# settings
self.curr_distro = params["distro"]
@@ -169,14 +99,14 @@ class Configuration:
self.default_task = params["default_task"]
# proxy settings
self.enable_proxy = params["http_proxy"] != "" or params["https_proxy"] != "" or params["ftp_proxy"] != "" \
or params["git_proxy_host"] != "" or params["git_proxy_port"] != "" \
or params["cvs_proxy_host"] != "" or params["cvs_proxy_port"] != ""
self.split_proxy("http", params["http_proxy"])
self.split_proxy("https", params["https_proxy"])
self.split_proxy("ftp", params["ftp_proxy"])
self.split_proxy("git", params["git_proxy_host"] + ":" + params["git_proxy_port"])
self.split_proxy("cvs", params["cvs_proxy_host"] + ":" + params["cvs_proxy_port"])
self.all_proxy = params["all_proxy"]
self.http_proxy = params["http_proxy"]
self.ftp_proxy = params["ftp_proxy"]
self.https_proxy = params["https_proxy"]
self.git_proxy_host = params["git_proxy_host"]
self.git_proxy_port = params["git_proxy_port"]
self.cvs_proxy_host = params["cvs_proxy_host"]
self.cvs_proxy_port = params["cvs_proxy_port"]
def load(self, template):
self.curr_mach = template.getVar("MACHINE")
@@ -216,16 +146,16 @@ class Configuration:
self.selected_recipes = template.getVar("DEPENDS").split()
self.selected_packages = template.getVar("IMAGE_INSTALL").split()
# proxy
self.enable_proxy = eval(template.getVar("enable_proxy"))
self.same_proxy = eval(template.getVar("use_same_proxy"))
self.split_proxy("http", template.getVar("http_proxy"))
self.split_proxy("https", template.getVar("https_proxy"))
self.split_proxy("ftp", template.getVar("ftp_proxy"))
self.split_proxy("git", template.getVar("GIT_PROXY_HOST") + ":" + template.getVar("GIT_PROXY_PORT"))
self.split_proxy("cvs", template.getVar("CVS_PROXY_HOST") + ":" + template.getVar("CVS_PROXY_PORT"))
self.all_proxy = template.getVar("all_proxy")
self.http_proxy = template.getVar("http_proxy")
self.ftp_proxy = template.getVar("ftp_proxy")
self.https_proxy = template.getVar("https_proxy")
self.git_proxy_host = template.getVar("GIT_PROXY_HOST")
self.git_proxy_port = template.getVar("GIT_PROXY_PORT")
self.cvs_proxy_host = template.getVar("CVS_PROXY_HOST")
self.cvs_proxy_port = template.getVar("CVS_PROXY_PORT")
def save(self, template, defaults=False):
template.setVar("VERSION", "%s" % hobVer)
# bblayers.conf
template.setVar("BBLAYERS", " ".join(self.layers))
# local.conf
@@ -253,15 +183,14 @@ class Configuration:
template.setVar("DEPENDS", self.selected_recipes)
template.setVar("IMAGE_INSTALL", self.user_selected_packages)
# proxy
template.setVar("enable_proxy", self.enable_proxy)
template.setVar("use_same_proxy", self.same_proxy)
template.setVar("http_proxy", self.combine_proxy("http"))
template.setVar("https_proxy", self.combine_proxy("https"))
template.setVar("ftp_proxy", self.combine_proxy("ftp"))
template.setVar("GIT_PROXY_HOST", self.combine_host_only("git"))
template.setVar("GIT_PROXY_PORT", self.combine_port_only("git"))
template.setVar("CVS_PROXY_HOST", self.combine_host_only("cvs"))
template.setVar("CVS_PROXY_PORT", self.combine_port_only("cvs"))
template.setVar("all_proxy", self.all_proxy)
template.setVar("http_proxy", self.http_proxy)
template.setVar("ftp_proxy", self.ftp_proxy)
template.setVar("https_proxy", self.https_proxy)
template.setVar("GIT_PROXY_HOST", self.git_proxy_host)
template.setVar("GIT_PROXY_PORT", self.git_proxy_port)
template.setVar("CVS_PROXY_HOST", self.cvs_proxy_host)
template.setVar("CVS_PROXY_PORT", self.cvs_proxy_port)
class Parameters:
'''Represents other variables like available machines, etc.'''
@@ -283,8 +212,7 @@ class Parameters:
self.all_sdk_machines = []
self.all_layers = []
self.image_names = []
self.image_white_pattern = ""
self.image_black_pattern = ""
self.enable_proxy = False
# for build log to show
self.bb_version = ""
@@ -302,9 +230,6 @@ class Parameters:
self.runnable_machine_patterns = params["runnable_machine_patterns"].split()
self.deployable_image_types = params["deployable_image_types"].split()
self.tmpdir = params["tmpdir"]
self.image_white_pattern = params["image_white_pattern"]
self.image_black_pattern = params["image_black_pattern"]
self.kernel_image_type = params["kernel_image_type"]
# for build log to show
self.bb_version = params["bb_version"]
self.target_arch = params["target_arch"]
@@ -376,15 +301,6 @@ class Builder(gtk.Window):
END_NOOP : None,
}
@classmethod
def interpret_markup(cls, msg):
msg = msg.replace('&', '&amp;')
msg = msg.replace('<', '&lt;')
msg = msg.replace('>', '&gt;')
msg = msg.replace('"', '&quot;')
msg = msg.replace("'", "&acute;")
return msg
def __init__(self, hobHandler, recipe_model, package_model):
super(Builder, self).__init__()
@@ -396,11 +312,6 @@ class Builder(gtk.Window):
self.template = None
# logger
self.logger = logging.getLogger("BitBake")
self.consolelog = None
self.current_logfile = None
# configuration and parameters
self.configuration = Configuration()
self.parameters = Parameters()
@@ -436,18 +347,13 @@ class Builder(gtk.Window):
self.handler.build.connect("build-started", self.handler_build_started_cb)
self.handler.build.connect("build-succeeded", self.handler_build_succeeded_cb)
self.handler.build.connect("build-failed", self.handler_build_failed_cb)
self.handler.build.connect("build-aborted", self.handler_build_aborted_cb)
self.handler.build.connect("task-started", self.handler_task_started_cb)
self.handler.build.connect("log-error", self.handler_build_failure_cb)
self.handler.build.connect("log", self.handler_build_log_cb)
self.handler.build.connect("no-provider", self.handler_no_provider_cb)
self.handler.connect("generating-data", self.handler_generating_data_cb)
self.handler.connect("data-generated", self.handler_data_generated_cb)
self.handler.connect("command-succeeded", self.handler_command_succeeded_cb)
self.handler.connect("command-failed", self.handler_command_failed_cb)
self.handler.connect("sanity-failed", self.handler_sanity_failed_cb)
self.handler.connect("recipe-populated", self.handler_recipe_populated_cb)
self.handler.connect("package-populated", self.handler_package_populated_cb)
self.handler.set_config_filter(hob_conf_filter)
@@ -489,7 +395,7 @@ class Builder(gtk.Window):
def initiate_new_build_async(self):
self.switch_page(self.MACHINE_SELECTION)
if self.load_template(TemplateMgr.convert_to_template_pathfilename("default", ".hob/")) == False:
if self.load_template(TemplateMgr.convert_to_template_pathfilename("default", ".hob/")) == None:
self.handler.init_cooker()
self.handler.set_extra_inherit("image_types")
self.handler.generate_configuration()
@@ -499,49 +405,37 @@ class Builder(gtk.Window):
self.set_user_config()
self.handler.generate_configuration()
def sanity_check(self):
self.handler.trigger_sanity_check()
def populate_recipe_package_info_async(self):
self.switch_page(self.RCPPKGINFO_POPULATING)
# Parse recipes
self.set_user_config()
self.handler.generate_recipes()
def generate_packages_async(self, log = False):
def generate_packages_async(self):
self.switch_page(self.PACKAGE_GENERATING)
if log:
self.current_logfile = self.handler.get_logfile()
self.do_log(self.current_logfile)
# Build packages
_, all_recipes = self.recipe_model.get_selected_recipes()
self.set_user_config()
self.handler.reset_build()
self.handler.generate_packages(all_recipes, self.configuration.default_task)
def fast_generate_image_async(self, log = False):
def fast_generate_image_async(self):
self.switch_page(self.FAST_IMAGE_GENERATING)
if log:
self.current_logfile = self.handler.get_logfile()
self.do_log(self.current_logfile)
# Build packages
_, all_recipes = self.recipe_model.get_selected_recipes()
self.set_user_config()
self.handler.reset_build()
self.handler.generate_packages(all_recipes, self.configuration.default_task)
def generate_image_async(self, cont = False):
def generate_image_async(self):
self.switch_page(self.IMAGE_GENERATING)
self.handler.reset_build()
if not cont:
self.current_logfile = self.handler.get_logfile()
self.do_log(self.current_logfile)
# Build image
self.set_user_config()
toolchain_packages = []
if self.configuration.toolchain_build:
toolchain_packages = self.package_model.get_selected_packages_toolchain()
if self.configuration.selected_image == self.recipe_model.__custom_image__:
if self.configuration.selected_image == self.recipe_model.__dummy_image__:
packages = self.package_model.get_selected_packages()
image = self.hob_image
else:
@@ -567,16 +461,9 @@ class Builder(gtk.Window):
def load_template(self, path):
if not os.path.isfile(path):
return False
return None
self.template = TemplateMgr()
# check compatibility
tempVer = self.template.getVersion(path)
if not tempVer or int(tempVer) < hobVer:
self.template.destroy()
self.template = None
return False
try:
self.template.load(path)
self.configuration.load(self.template)
@@ -640,29 +527,17 @@ class Builder(gtk.Window):
self.image_configuration_page.show_baseimg_selected()
elif next_step == self.RECIPE_SELECTION:
if self.recipe_model.get_selected_image() == self.recipe_model.__custom_image__:
self.recipe_details_page.set_recipe_curr_tab(self.recipe_details_page.ALL)
else:
self.recipe_details_page.set_recipe_curr_tab(self.recipe_details_page.INCLUDED)
pass
elif next_step == self.PACKAGE_SELECTION:
if self.recipe_model.get_selected_image() == self.recipe_model.__custom_image__:
self.package_details_page.set_packages_curr_tab(self.package_details_page.ALL)
else:
self.package_details_page.set_packages_curr_tab(self.package_details_page.INCLUDED)
self.package_details_page.show_page(self.current_logfile)
pass
elif next_step == self.PACKAGE_GENERATING or next_step == self.FAST_IMAGE_GENERATING:
# both PACKAGE_GENERATING and FAST_IMAGE_GENERATING share the same page
# both PACKAGE_GENEATING and FAST_IMAGE_GENERATING share the same page
self.build_details_page.show_page(next_step)
elif next_step == self.PACKAGE_GENERATED:
if self.recipe_model.get_selected_image() == self.recipe_model.__custom_image__:
self.package_details_page.set_packages_curr_tab(self.package_details_page.ALL)
else:
self.package_details_page.set_packages_curr_tab(self.package_details_page.INCLUDED)
self.package_details_page.show_page(self.current_logfile)
pass
elif next_step == self.IMAGE_GENERATING:
# after packages are generated, selected_packages need to
@@ -700,18 +575,13 @@ class Builder(gtk.Window):
self.handler.set_extra_inherit("packageinfo")
self.handler.set_extra_inherit("image_types")
# set proxies
if self.configuration.enable_proxy == True:
self.handler.set_http_proxy(self.configuration.combine_proxy("http"))
self.handler.set_https_proxy(self.configuration.combine_proxy("https"))
self.handler.set_ftp_proxy(self.configuration.combine_proxy("ftp"))
self.handler.set_git_proxy(self.configuration.combine_host_only("git"), self.configuration.combine_port_only("git"))
self.handler.set_cvs_proxy(self.configuration.combine_host_only("cvs"), self.configuration.combine_port_only("cvs"))
elif self.configuration.enable_proxy == False:
self.handler.set_http_proxy("")
self.handler.set_https_proxy("")
self.handler.set_ftp_proxy("")
self.handler.set_git_proxy("", "")
self.handler.set_cvs_proxy("", "")
if self.parameters.enable_proxy:
self.handler.set_http_proxy(self.configuration.http_proxy)
self.handler.set_https_proxy(self.configuration.https_proxy)
self.handler.set_ftp_proxy(self.configuration.ftp_proxy)
self.handler.set_all_proxy(self.configuration.all_proxy)
self.handler.set_git_proxy(self.configuration.git_proxy_host, self.configuration.git_proxy_port)
self.handler.set_cvs_proxy(self.configuration.cvs_proxy_host, self.configuration.cvs_proxy_port)
def update_recipe_model(self, selected_image, selected_recipes):
self.recipe_model.set_selected_image(selected_image)
@@ -748,8 +618,6 @@ class Builder(gtk.Window):
def handler_command_succeeded_cb(self, handler, initcmd):
if initcmd == self.handler.GENERATE_CONFIGURATION:
self.update_configuration_parameters(self.get_parameters_sync())
self.sanity_check()
elif initcmd == self.handler.SANITY_CHECK:
self.image_configuration_page.switch_machine_combo()
elif initcmd in [self.handler.GENERATE_RECIPES,
self.handler.GENERATE_PACKAGES,
@@ -764,11 +632,11 @@ class Builder(gtk.Window):
self.rcppkglist_populated()
if self.current_step == self.FAST_IMAGE_GENERATING:
self.generate_image_async(True)
self.generate_image_async()
def show_error_dialog(self, msg):
lbl = "<b>Error</b>\n"
lbl = lbl + "%s\n\n" % Builder.interpret_markup(msg)
lbl = lbl + "%s\n\n" % msg
dialog = CrumbsMessageDialog(self, lbl, gtk.STOCK_DIALOG_ERROR)
button = dialog.add_button("Close", gtk.RESPONSE_OK)
HobButton.style_button(button)
@@ -777,22 +645,18 @@ class Builder(gtk.Window):
def handler_command_failed_cb(self, handler, msg):
if msg:
msg = msg.replace("your local.conf", "Settings")
self.show_error_dialog(msg)
self.reset()
def handler_sanity_failed_cb(self, handler, msg):
msg = msg.replace("your local.conf", "Settings")
self.show_error_dialog(msg)
self.reset()
def window_sensitive(self, sensitive):
self.image_configuration_page.machine_combo.set_sensitive(sensitive)
self.image_configuration_page.machine_combo.child.set_sensitive(sensitive)
self.image_configuration_page.image_combo.set_sensitive(sensitive)
self.image_configuration_page.image_combo.child.set_sensitive(sensitive)
self.image_configuration_page.layer_button.set_sensitive(sensitive)
self.image_configuration_page.layer_info_icon.set_sensitive(sensitive)
self.image_configuration_page.toolbar.set_sensitive(sensitive)
self.image_configuration_page.view_recipes_button.set_sensitive(sensitive)
self.image_configuration_page.view_packages_button.set_sensitive(sensitive)
self.image_configuration_page.config_build_button.set_sensitive(sensitive)
self.recipe_details_page.set_sensitive(sensitive)
@@ -819,7 +683,7 @@ class Builder(gtk.Window):
selected_packages = self.configuration.selected_packages[:]
self.image_configuration_page.update_image_combo(self.recipe_model, selected_image)
self.image_configuration_page.update_image_desc()
self.image_configuration_page.update_image_desc(selected_image)
self.update_recipe_model(selected_image, selected_recipes)
self.update_package_model(selected_packages)
@@ -829,12 +693,6 @@ class Builder(gtk.Window):
def packagelist_changed_cb(self, package_model):
self.package_details_page.refresh_selection()
def handler_recipe_populated_cb(self, handler):
self.image_configuration_page.update_progress_bar("Populated recipes", 0.99)
def handler_package_populated_cb(self, handler):
self.image_configuration_page.update_progress_bar("Populated packages", 1.0)
def handler_parsing_started_cb(self, handler, message):
if self.current_step != self.RCPPKGINFO_POPULATING:
return
@@ -854,7 +712,7 @@ class Builder(gtk.Window):
fraction = message["current"] * 1.0/message["total"]
if message["eventname"] == "TreeDataPreparationProgress":
fraction = 0.6 + 0.38 * fraction
fraction = 0.6 + 0.4 * fraction
else:
fraction = 0.6 * fraction
self.image_configuration_page.update_progress_bar(message["title"], fraction)
@@ -864,7 +722,7 @@ class Builder(gtk.Window):
return
if message["eventname"] == "TreeDataPreparationCompleted":
fraction = 0.98
fraction = 1.0
else:
fraction = 0.6
self.image_configuration_page.update_progress_bar(message["title"], fraction)
@@ -889,7 +747,7 @@ class Builder(gtk.Window):
fraction = 1.0
self.parameters.image_names = []
selected_image = self.recipe_model.get_selected_image()
if selected_image == self.recipe_model.__custom_image__:
if selected_image == self.recipe_model.__dummy_image__:
linkname = 'hob-image-' + self.configuration.curr_mach
else:
linkname = selected_image + '-' + self.configuration.curr_mach
@@ -915,20 +773,12 @@ class Builder(gtk.Window):
message = "Build stopped: "
fraction = self.build_details_page.progress_bar.get_fraction()
else:
fail_to_next_edit = ""
if self.current_step == self.FAST_IMAGE_GENERATING:
fail_to_next_edit = "image configuration"
fraction = 0.9
elif self.current_step == self.IMAGE_GENERATING:
if self.previous_step == self.FAST_IMAGE_GENERATING:
fail_to_next_edit = "image configuration"
else:
fail_to_next_edit = "packages"
fraction = 1.0
elif self.current_step == self.PACKAGE_GENERATING:
fail_to_next_edit = "recipes"
fraction = 1.0
self.build_details_page.show_fail_page(fail_to_next_edit.split(' ')[0], fail_to_next_edit)
status = "fail"
message = "Build failed: "
self.build_details_page.update_progress_bar(message, fraction, status)
@@ -947,11 +797,8 @@ class Builder(gtk.Window):
def handler_build_failed_cb(self, running_build):
self.build_failed()
def handler_build_aborted_cb(self, running_build):
self.build_failed()
def handler_no_provider_cb(self, running_build, msg):
dialog = CrumbsMessageDialog(self, Builder.interpret_markup(msg), gtk.STOCK_DIALOG_INFO)
dialog = CrumbsMessageDialog(self, msg, gtk.STOCK_DIALOG_INFO)
button = dialog.add_button("Close", gtk.RESPONSE_OK)
HobButton.style_button(button)
dialog.run()
@@ -989,10 +836,6 @@ class Builder(gtk.Window):
def handler_build_failure_cb(self, running_build):
self.build_details_page.show_issues()
def handler_build_log_cb(self, running_build, func, obj):
if hasattr(self.logger, func):
getattr(self.logger, func)(obj)
def destroy_window_cb(self, widget, event):
if not self.sensitive:
return True
@@ -1022,7 +865,7 @@ class Builder(gtk.Window):
dialog.run()
dialog.destroy()
return
self.generate_packages_async(True)
self.generate_packages_async()
def build_image(self):
selected_packages = self.package_model.get_selected_packages()
@@ -1035,14 +878,14 @@ class Builder(gtk.Window):
dialog.run()
dialog.destroy()
return
self.generate_image_async(True)
self.generate_image_async()
def just_bake(self):
selected_image = self.recipe_model.get_selected_image()
selected_packages = self.package_model.get_selected_packages() or []
# If no base image and no selected packages don't build anything
if not (selected_packages or selected_image != self.recipe_model.__custom_image__):
if not (selected_packages or selected_image != self.recipe_model.__dummy_image__):
lbl = "<b>No selections made</b>\nYou have not made any selections"
lbl = lbl + " so there isn't anything to bake at this time."
dialog = CrumbsMessageDialog(self, lbl, gtk.STOCK_DIALOG_INFO)
@@ -1052,7 +895,7 @@ class Builder(gtk.Window):
dialog.destroy()
return
self.fast_generate_image_async(True)
self.fast_generate_image_async()
def show_binb_dialog(self, binb):
markup = "<b>Brought in by:</b>\n%s" % binb
@@ -1148,6 +991,7 @@ class Builder(gtk.Window):
all_distros = self.parameters.all_distros,
all_sdk_machines = self.parameters.all_sdk_machines,
max_threads = self.parameters.max_threads,
enable_proxy = self.parameters.enable_proxy,
parent = self,
flags = gtk.DIALOG_MODAL
| gtk.DIALOG_DESTROY_WITH_PARENT
@@ -1159,6 +1003,7 @@ class Builder(gtk.Window):
response = dialog.run()
settings_changed = False
if response == gtk.RESPONSE_YES:
self.parameters.enable_proxy = dialog.enable_proxy
self.configuration = dialog.configuration
self.save_defaults() # remember settings
settings_changed = dialog.settings_changed
@@ -1197,7 +1042,16 @@ class Builder(gtk.Window):
response = dialog.run()
dialog.destroy()
def show_load_kernel_dialog(self):
def runqemu_image(self, image_name):
if not image_name:
lbl = "<b>Please select an image to launch in QEMU.</b>"
dialog = CrumbsMessageDialog(self, lbl, gtk.STOCK_DIALOG_INFO)
button = dialog.add_button("Close", gtk.RESPONSE_OK)
HobButton.style_button(button)
dialog.run()
dialog.destroy()
return
dialog = gtk.FileChooserDialog("Load Kernel Files", self,
gtk.FILE_CHOOSER_ACTION_SAVE)
button = dialog.add_button("Cancel", gtk.RESPONSE_NO)
@@ -1212,50 +1066,35 @@ class Builder(gtk.Window):
dialog.set_current_folder(self.parameters.image_addr)
response = dialog.run()
kernel_path = ""
if response == gtk.RESPONSE_YES:
kernel_path = dialog.get_filename()
image_path = os.path.join(self.parameters.image_addr, image_name)
dialog.destroy()
return kernel_path
def runqemu_image(self, image_name, kernel_name):
if not image_name or not kernel_name:
lbl = "<b>Please select an %s to launch in QEMU.</b>" % ("kernel" if image_name else "image")
dialog = CrumbsMessageDialog(self, lbl, gtk.STOCK_DIALOG_INFO)
button = dialog.add_button("Close", gtk.RESPONSE_OK)
HobButton.style_button(button)
dialog.run()
dialog.destroy()
return
kernel_path = os.path.join(self.parameters.image_addr, kernel_name)
image_path = os.path.join(self.parameters.image_addr, image_name)
source_env_path = os.path.join(self.parameters.core_base, "oe-init-build-env")
tmp_path = self.parameters.tmpdir
cmdline = bb.ui.crumbs.utils.which_terminal()
if os.path.exists(image_path) and os.path.exists(kernel_path) \
and os.path.exists(source_env_path) and os.path.exists(tmp_path) \
and cmdline:
cmdline += "\' bash -c \"export OE_TMPDIR=" + tmp_path + "; "
cmdline += "source " + source_env_path + " " + os.getcwd() + "; "
cmdline += "runqemu " + kernel_path + " " + image_path + "\"\'"
subprocess.Popen(shlex.split(cmdline))
else:
lbl = "<b>Path error</b>\nOne of your paths is wrong,"
lbl = lbl + " please make sure the following paths exist:\n"
lbl = lbl + "image path:" + image_path + "\n"
lbl = lbl + "kernel path:" + kernel_path + "\n"
lbl = lbl + "source environment path:" + source_env_path + "\n"
lbl = lbl + "tmp path: " + tmp_path + "."
lbl = lbl + "You may be missing either xterm or vte for terminal services."
dialog = CrumbsMessageDialog(self, lbl, gtk.STOCK_DIALOG_ERROR)
button = dialog.add_button("Close", gtk.RESPONSE_OK)
HobButton.style_button(button)
dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_YES:
source_env_path = os.path.join(self.parameters.core_base, "oe-init-build-env")
tmp_path = self.parameters.tmpdir
cmdline = bb.ui.crumbs.utils.which_terminal()
if os.path.exists(image_path) and os.path.exists(kernel_path) \
and os.path.exists(source_env_path) and os.path.exists(tmp_path) \
and cmdline:
cmdline += "\' bash -c \"export OE_TMPDIR=" + tmp_path + "; "
cmdline += "source " + source_env_path + " " + os.getcwd() + "; "
cmdline += "runqemu " + kernel_path + " " + image_path + "\"\'"
subprocess.Popen(shlex.split(cmdline))
else:
lbl = "<b>Path error</b>\nOne of your paths is wrong,"
lbl = lbl + " please make sure the following paths exist:\n"
lbl = lbl + "image path:" + image_path + "\n"
lbl = lbl + "kernel path:" + kernel_path + "\n"
lbl = lbl + "source environment path:" + source_env_path + "\n"
lbl = lbl + "tmp path: " + tmp_path + "."
lbl = lbl + "You may be missing either xterm or vte for terminal services."
dialog = CrumbsMessageDialog(self, lbl, gtk.STOCK_DIALOG_ERROR)
button = dialog.add_button("Close", gtk.RESPONSE_OK)
HobButton.style_button(button)
dialog.run()
dialog.destroy()
def show_packages(self, ask=True):
_, selected_recipes = self.recipe_model.get_selected_recipes()
@@ -1271,7 +1110,7 @@ class Builder(gtk.Window):
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_YES:
self.generate_packages_async(True)
self.generate_packages_async()
else:
self.switch_page(self.PACKAGE_SELECTION)
else:
@@ -1280,9 +1119,6 @@ class Builder(gtk.Window):
def show_recipes(self):
self.switch_page(self.RECIPE_SELECTION)
def show_image_details(self):
self.switch_page(self.IMAGE_GENERATED)
def show_configuration(self):
self.switch_page(self.BASEIMG_SELECTED)
@@ -1322,16 +1158,3 @@ class Builder(gtk.Window):
self.cancel_build_sync()
elif response == gtk.RESPONSE_YES:
self.cancel_build_sync(True)
def do_log(self, consolelogfile = None):
if consolelogfile:
bb.utils.mkdirhier(os.path.dirname(consolelogfile))
if self.consolelog:
self.logger.removeHandler(self.consolelog)
self.consolelog = None
self.consolelog = logging.FileHandler(consolelogfile)
bb.msg.addDefaultlogFilter(self.consolelog)
format = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
self.consolelog.setFormatter(format)
self.logger.addHandler(self.consolelog)

View File

@@ -20,20 +20,17 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import glob
import gtk
import gobject
import hashlib
import os
import re
import shlex
import subprocess
import tempfile
import shlex
from bb.ui.crumbs.hobcolor import HobColors
from bb.ui.crumbs.hobwidget import hcc, hic, HobViewTable, HobInfoButton, HobButton, HobAltButton, HobIconChecker
from bb.ui.crumbs.progressbar import HobProgressBar
import bb.ui.crumbs.utils
import bb.process
"""
The following are convenience classes for implementing GNOME HIG compliant
@@ -66,7 +63,7 @@ class CrumbsMessageDialog(CrumbsDialog):
"""
def __init__(self, parent=None, label="", icon=gtk.STOCK_INFO):
super(CrumbsMessageDialog, self).__init__("", parent, gtk.DIALOG_DESTROY_WITH_PARENT)
self.set_border_width(6)
self.vbox.set_property("spacing", 12)
self.action_area.set_property("spacing", 12)
@@ -140,8 +137,6 @@ class AdvancedSettingDialog (CrumbsDialog):
def entry_widget_select_path_cb(self, action, parent, entry):
dialog = gtk.FileChooserDialog("", parent,
gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
text = entry.get_text()
dialog.set_current_folder(text if len(text) > 0 else os.getcwd())
button = dialog.add_button("Cancel", gtk.RESPONSE_NO)
HobAltButton.style_button(button)
button = dialog.add_button("Open", gtk.RESPONSE_YES)
@@ -177,45 +172,6 @@ class AdvancedSettingDialog (CrumbsDialog):
hbox.show_all()
return hbox, entry
def details_cb(self, button, parent, protocol):
dialog = ProxyDetailsDialog(title = protocol.upper() + " Proxy Details",
user = self.configuration.proxies[protocol][1],
passwd = self.configuration.proxies[protocol][2],
parent = parent,
flags = gtk.DIALOG_MODAL
| gtk.DIALOG_DESTROY_WITH_PARENT
| gtk.DIALOG_NO_SEPARATOR)
dialog.add_button(gtk.STOCK_CLOSE, gtk.RESPONSE_OK)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.configuration.proxies[protocol][1] = dialog.user
self.configuration.proxies[protocol][2] = dialog.passwd
self.refresh_proxy_components()
dialog.destroy()
def gen_proxy_entry_widget(self, protocol, parent, need_button=True):
hbox = gtk.HBox(False, 12)
label = gtk.Label(protocol.upper() + " proxy")
hbox.pack_start(label, expand=True, fill=False, padding=24)
proxy_entry = gtk.Entry()
proxy_entry.set_size_request(300, -1)
hbox.pack_start(proxy_entry, expand=False, fill=False)
hbox.pack_start(gtk.Label(":"), expand=False, fill=False)
port_entry = gtk.Entry()
port_entry.set_size_request(60, -1)
hbox.pack_start(port_entry, expand=False, fill=False)
details_button = HobAltButton("Details")
details_button.connect("clicked", self.details_cb, parent, protocol)
hbox.pack_start(details_button, expand=False, fill=False)
hbox.show_all()
return hbox, proxy_entry, port_entry, details_button
def rootfs_combo_changed_cb(self, rootfs_combo, all_package_format, check_hbox):
combo_item = self.rootfs_combo.get_active_text()
for child in check_hbox.get_children():
@@ -353,7 +309,7 @@ class AdvancedSettingDialog (CrumbsDialog):
def __init__(self, title, configuration, all_image_types,
all_package_formats, all_distros, all_sdk_machines,
max_threads, parent, flags, buttons=None):
max_threads, enable_proxy, parent, flags, buttons=None):
super(AdvancedSettingDialog, self).__init__(title, parent, flags, buttons)
# class members from other objects
@@ -364,6 +320,7 @@ class AdvancedSettingDialog (CrumbsDialog):
self.all_distros = all_distros
self.all_sdk_machines = all_sdk_machines
self.max_threads = max_threads
self.enable_proxy = enable_proxy
# class members for internal use
self.distro_combo = None
@@ -399,10 +356,6 @@ class AdvancedSettingDialog (CrumbsDialog):
data += ("SDK_MACHINE: " + self._get_sorted_value(self.configuration.curr_sdk_machine))
data += ("TOOLCHAIN_BUILD: " + self._get_sorted_value(self.configuration.toolchain_build))
data += ("IMAGE_FSTYPES: " + self._get_sorted_value(self.configuration.image_fstypes))
data += ("ENABLE_PROXY: " + self._get_sorted_value(self.configuration.enable_proxy))
if self.configuration.enable_proxy:
for protocol in self.configuration.proxies.keys():
data += (protocol + ": " + self._get_sorted_value(self.configuration.combine_proxy(protocol)))
for key in self.configuration.extra_setting.keys():
data += (key + ": " + self._get_sorted_value(self.configuration.extra_setting[key]))
return hashlib.md5(data).hexdigest()
@@ -567,56 +520,60 @@ class AdvancedSettingDialog (CrumbsDialog):
sub_vbox = gtk.VBox(False, 6)
advanced_vbox.pack_start(sub_vbox, expand=False, fill=False)
label = self.gen_label_widget("<span weight=\"bold\">Set the proxies that will be used during fetching source code</span>")
tooltip = "Set the proxies that will be used during fetching source code or set none for direct the Internet connection"
info = HobInfoButton(tooltip, self)
hbox = gtk.HBox(False, 12)
hbox.pack_start(label, expand=True, fill=True)
hbox.pack_start(info, expand=False, fill=False)
sub_vbox.pack_start(hbox, expand=False, fill=False)
self.direct_checkbox = gtk.RadioButton(None, "Direct internet connection")
self.direct_checkbox.set_tooltip_text("Check this box to connect the Internet directly without any proxy")
self.direct_checkbox.set_active(not self.configuration.enable_proxy)
sub_vbox.pack_start(self.direct_checkbox, expand=False, fill=False)
self.proxy_checkbox = gtk.RadioButton(self.direct_checkbox, "Manual proxy configuration")
self.proxy_checkbox = gtk.CheckButton("Enable proxy")
self.proxy_checkbox.set_tooltip_text("Check this box to setup the proxy you specified")
self.proxy_checkbox.set_active(self.configuration.enable_proxy)
self.proxy_checkbox.set_active(self.enable_proxy)
self.proxy_checkbox.connect("toggled", self.proxy_checkbox_toggled_cb)
sub_vbox.pack_start(self.proxy_checkbox, expand=False, fill=False)
self.same_checkbox = gtk.CheckButton("Use the same proxy for all protocols")
self.same_checkbox.set_tooltip_text("Use the same proxy as the first proxy i.e. http proxy for all protocols")
self.same_checkbox.set_active(self.configuration.same_proxy)
hbox = gtk.HBox(False, 12)
hbox.pack_start(self.same_checkbox, expand=False, fill=False, padding=24)
sub_vbox.pack_start(hbox, expand=False, fill=False)
proxy_widget, self.http_proxy, self.http_proxy_port, self.http_proxy_details = self.gen_proxy_entry_widget(
"http", self, True)
label = self.gen_label_widget("<span weight=\"bold\">Set all proxy:</span>")
tooltip = "Set the all proxy that will be used if the proxy for a URL isn't specified."
proxy_widget, self.all_proxy_text = self.gen_entry_widget(self.configuration.all_proxy, self, tooltip, False)
self.all_proxy_text.set_editable(self.enable_proxy)
self.all_proxy_text.set_sensitive(self.enable_proxy)
sub_vbox.pack_start(label, expand=False, fill=False)
sub_vbox.pack_start(proxy_widget, expand=False, fill=False)
proxy_widget, self.https_proxy, self.https_proxy_port, self.https_proxy_details = self.gen_proxy_entry_widget(
"https", self, True)
label = self.gen_label_widget("<span weight=\"bold\">Set http proxy:</span>")
tooltip = "Set the http proxy that will be used in do_fetch() source code"
proxy_widget, self.http_proxy_text = self.gen_entry_widget(self.configuration.http_proxy, self, tooltip, False)
self.http_proxy_text.set_editable(self.enable_proxy)
self.http_proxy_text.set_sensitive(self.enable_proxy)
sub_vbox.pack_start(label, expand=False, fill=False)
sub_vbox.pack_start(proxy_widget, expand=False, fill=False)
proxy_widget, self.ftp_proxy, self.ftp_proxy_port, self.ftp_proxy_details = self.gen_proxy_entry_widget(
"ftp", self, True)
label = self.gen_label_widget("<span weight=\"bold\">Set https proxy:</span>")
tooltip = "Set the https proxy that will be used in do_fetch() source code"
proxy_widget, self.https_proxy_text = self.gen_entry_widget(self.configuration.https_proxy, self, tooltip, False)
self.https_proxy_text.set_editable(self.enable_proxy)
self.https_proxy_text.set_sensitive(self.enable_proxy)
sub_vbox.pack_start(label, expand=False, fill=False)
sub_vbox.pack_start(proxy_widget, expand=False, fill=False)
proxy_widget, self.git_proxy, self.git_proxy_port, self.git_proxy_details = self.gen_proxy_entry_widget(
"git", self, True)
label = self.gen_label_widget("<span weight=\"bold\">Set ftp proxy:</span>")
tooltip = "Set the ftp proxy that will be used in do_fetch() source code"
proxy_widget, self.ftp_proxy_text = self.gen_entry_widget(self.configuration.ftp_proxy, self, tooltip, False)
self.ftp_proxy_text.set_editable(self.enable_proxy)
self.ftp_proxy_text.set_sensitive(self.enable_proxy)
sub_vbox.pack_start(label, expand=False, fill=False)
sub_vbox.pack_start(proxy_widget, expand=False, fill=False)
proxy_widget, self.cvs_proxy, self.cvs_proxy_port, self.cvs_proxy_details = self.gen_proxy_entry_widget(
"cvs", self, True)
label = self.gen_label_widget("<span weight=\"bold\">Set git proxy:</span>")
tooltip = "Set the git proxy that will be used in do_fetch() source code"
proxy_widget, self.git_proxy_text = self.gen_entry_widget(self.configuration.git_proxy_host + ':' + self.configuration.git_proxy_port, self, tooltip, False)
self.git_proxy_text.set_editable(self.enable_proxy)
self.git_proxy_text.set_sensitive(self.enable_proxy)
sub_vbox.pack_start(label, expand=False, fill=False)
sub_vbox.pack_start(proxy_widget, expand=False, fill=False)
self.direct_checkbox.connect("toggled", self.proxy_checkbox_toggled_cb)
self.proxy_checkbox.connect("toggled", self.proxy_checkbox_toggled_cb)
self.same_checkbox.connect("toggled", self.same_checkbox_toggled_cb)
label = self.gen_label_widget("<span weight=\"bold\">Set cvs proxy:</span>")
tooltip = "Set the cvs proxy that will be used in do_fetch() source code"
proxy_widget, self.cvs_proxy_text = self.gen_entry_widget(self.configuration.cvs_proxy_host + ':' + self.configuration.cvs_proxy_port, self, tooltip, False)
self.cvs_proxy_text.set_editable(self.enable_proxy)
self.cvs_proxy_text.set_sensitive(self.enable_proxy)
sub_vbox.pack_start(label, expand=False, fill=False)
sub_vbox.pack_start(proxy_widget, expand=False, fill=False)
self.refresh_proxy_components()
return advanced_vbox
def create_others_page(self):
@@ -633,59 +590,20 @@ class AdvancedSettingDialog (CrumbsDialog):
return advanced_vbox
def refresh_proxy_components(self):
self.same_checkbox.set_sensitive(self.configuration.enable_proxy)
self.http_proxy.set_text(self.configuration.combine_host_only("http"))
self.http_proxy.set_editable(self.configuration.enable_proxy)
self.http_proxy.set_sensitive(self.configuration.enable_proxy)
self.http_proxy_port.set_text(self.configuration.combine_port_only("http"))
self.http_proxy_port.set_editable(self.configuration.enable_proxy)
self.http_proxy_port.set_sensitive(self.configuration.enable_proxy)
self.http_proxy_details.set_sensitive(self.configuration.enable_proxy)
self.https_proxy.set_text(self.configuration.combine_host_only("https"))
self.https_proxy.set_editable(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.https_proxy.set_sensitive(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.https_proxy_port.set_text(self.configuration.combine_port_only("https"))
self.https_proxy_port.set_editable(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.https_proxy_port.set_sensitive(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.https_proxy_details.set_sensitive(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.ftp_proxy.set_text(self.configuration.combine_host_only("ftp"))
self.ftp_proxy.set_editable(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.ftp_proxy.set_sensitive(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.ftp_proxy_port.set_text(self.configuration.combine_port_only("ftp"))
self.ftp_proxy_port.set_editable(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.ftp_proxy_port.set_sensitive(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.ftp_proxy_details.set_sensitive(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.git_proxy.set_text(self.configuration.combine_host_only("git"))
self.git_proxy.set_editable(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.git_proxy.set_sensitive(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.git_proxy_port.set_text(self.configuration.combine_port_only("git"))
self.git_proxy_port.set_editable(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.git_proxy_port.set_sensitive(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.git_proxy_details.set_sensitive(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.cvs_proxy.set_text(self.configuration.combine_host_only("cvs"))
self.cvs_proxy.set_editable(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.cvs_proxy.set_sensitive(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.cvs_proxy_port.set_text(self.configuration.combine_port_only("cvs"))
self.cvs_proxy_port.set_editable(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.cvs_proxy_port.set_sensitive(self.configuration.enable_proxy and (not self.configuration.same_proxy))
self.cvs_proxy_details.set_sensitive(self.configuration.enable_proxy and (not self.configuration.same_proxy))
def proxy_checkbox_toggled_cb(self, button):
self.configuration.enable_proxy = self.proxy_checkbox.get_active()
if not self.configuration.enable_proxy:
self.configuration.same_proxy = False
self.same_checkbox.set_active(self.configuration.same_proxy)
self.refresh_proxy_components()
def same_checkbox_toggled_cb(self, button):
self.configuration.same_proxy = self.same_checkbox.get_active()
self.refresh_proxy_components()
self.enable_proxy = self.proxy_checkbox.get_active()
self.all_proxy_text.set_editable(self.enable_proxy)
self.all_proxy_text.set_sensitive(self.enable_proxy)
self.http_proxy_text.set_editable(self.enable_proxy)
self.http_proxy_text.set_sensitive(self.enable_proxy)
self.https_proxy_text.set_editable(self.enable_proxy)
self.https_proxy_text.set_sensitive(self.enable_proxy)
self.ftp_proxy_text.set_editable(self.enable_proxy)
self.ftp_proxy_text.set_sensitive(self.enable_proxy)
self.git_proxy_text.set_editable(self.enable_proxy)
self.git_proxy_text.set_sensitive(self.enable_proxy)
self.cvs_proxy_text.set_editable(self.enable_proxy)
self.cvs_proxy_text.set_sensitive(self.enable_proxy)
def response_cb(self, dialog, response_id):
package_format = []
@@ -729,17 +647,12 @@ class AdvancedSettingDialog (CrumbsDialog):
self.configuration.extra_setting[key] = value
it = self.setting_store.iter_next(it)
self.configuration.split_proxy("http", self.http_proxy.get_text() + ":" + self.http_proxy_port.get_text())
if self.configuration.same_proxy:
self.configuration.split_proxy("https", self.http_proxy.get_text() + ":" + self.http_proxy_port.get_text())
self.configuration.split_proxy("ftp", self.http_proxy.get_text() + ":" + self.http_proxy_port.get_text())
self.configuration.split_proxy("git", self.http_proxy.get_text() + ":" + self.http_proxy_port.get_text())
self.configuration.split_proxy("cvs", self.http_proxy.get_text() + ":" + self.http_proxy_port.get_text())
else:
self.configuration.split_proxy("https", self.https_proxy.get_text() + ":" + self.https_proxy_port.get_text())
self.configuration.split_proxy("ftp", self.ftp_proxy.get_text() + ":" + self.ftp_proxy_port.get_text())
self.configuration.split_proxy("git", self.git_proxy.get_text() + ":" + self.git_proxy_port.get_text())
self.configuration.split_proxy("cvs", self.cvs_proxy.get_text() + ":" + self.cvs_proxy_port.get_text())
self.configuration.all_proxy = self.all_proxy_text.get_text()
self.configuration.http_proxy = self.http_proxy_text.get_text()
self.configuration.https_proxy = self.https_proxy_text.get_text()
self.configuration.ftp_proxy = self.ftp_proxy_text.get_text()
self.configuration.git_proxy_host, self.configuration.git_proxy_port = self.git_proxy_text.get_text().split(':')
self.configuration.cvs_proxy_host, self.configuration.cvs_proxy_port = self.cvs_proxy_text.get_text().split(':')
md5 = self.config_md5()
self.settings_changed = (self.md5 != md5)
@@ -751,28 +664,21 @@ class DeployImageDialog (CrumbsDialog):
__dummy_usb__ = "--select a usb drive--"
def __init__(self, title, image_path, parent, flags, buttons=None, standalone=False):
def __init__(self, title, image_path, parent, flags, buttons=None):
super(DeployImageDialog, self).__init__(title, parent, flags, buttons)
self.image_path = image_path
self.standalone = standalone
self.create_visual_elements()
self.connect("response", self.response_cb)
def create_visual_elements(self):
self.set_size_request(600, 400)
label = gtk.Label()
label.set_alignment(0.0, 0.5)
markup = "<span font_desc='12'>The image to be written into usb drive:</span>"
label.set_markup(markup)
self.vbox.pack_start(label, expand=False, fill=False, padding=2)
table = gtk.Table(2, 10, False)
table.set_col_spacings(5)
table.set_row_spacings(5)
self.vbox.pack_start(table, expand=True, fill=True)
scroll = gtk.ScrolledWindow()
scroll.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
scroll.set_shadow_type(gtk.SHADOW_IN)
@@ -780,31 +686,11 @@ class DeployImageDialog (CrumbsDialog):
tv.set_editable(False)
tv.set_wrap_mode(gtk.WRAP_WORD)
tv.set_cursor_visible(False)
self.buf = gtk.TextBuffer()
self.buf.set_text(self.image_path)
tv.set_buffer(self.buf)
buf = gtk.TextBuffer()
buf.set_text(self.image_path)
tv.set_buffer(buf)
scroll.add(tv)
table.attach(scroll, 0, 10, 0, 1)
# There are 2 ways to use DeployImageDialog
# One way is that called by HOB when the 'Deploy Image' button is clicked
# The other way is that called by a standalone script.
# Following block of codes handles the latter way. It adds a 'Select Image' button and
# emit a signal when the button is clicked.
if self.standalone:
gobject.signal_new("select_image_clicked", self, gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE, ())
icon = gtk.Image()
pix_buffer = gtk.gdk.pixbuf_new_from_file(hic.ICON_IMAGES_DISPLAY_FILE)
icon.set_from_pixbuf(pix_buffer)
button = gtk.Button("Select Image")
button.set_image(icon)
button.set_size_request(140, 50)
table.attach(button, 9, 10, 1, 2, gtk.FILL, 0, 0, 0)
button.connect("clicked", self.select_image_button_clicked_cb)
separator = gtk.HSeparator()
self.vbox.pack_start(separator, expand=False, fill=False, padding=10)
self.vbox.pack_start(scroll, expand=True, fill=True)
self.usb_desc = gtk.Label()
self.usb_desc.set_alignment(0.0, 0.5)
@@ -819,7 +705,7 @@ class DeployImageDialog (CrumbsDialog):
for usb in self.find_all_usb_devices():
self.usb_combo.append_text("/dev/" + usb)
self.usb_combo.set_active(0)
self.vbox.pack_start(self.usb_combo, expand=False, fill=False)
self.vbox.pack_start(self.usb_combo, expand=True, fill=True)
self.vbox.pack_start(self.usb_desc, expand=False, fill=False, padding=2)
self.progress_bar = HobProgressBar()
@@ -830,19 +716,12 @@ class DeployImageDialog (CrumbsDialog):
self.vbox.show_all()
self.progress_bar.hide()
def set_image_text_buffer(self, image_path):
self.buf.set_text(image_path)
def set_image_path(self, image_path):
self.image_path = image_path
def popen_read(self, cmd):
tmpout, errors = bb.process.run("%s" % cmd)
return tmpout.strip()
return os.popen("%s 2>/dev/null" % cmd).read().strip()
def find_all_usb_devices(self):
usb_devs = [ os.readlink(u)
for u in glob.glob('/dev/disk/by-id/usb*')
for u in self.popen_read('ls /dev/disk/by-id/usb*').split()
if not re.search(r'part\d+', u) ]
return [ '%s' % u[u.rfind('/')+1:] for u in usb_devs ]
@@ -851,9 +730,6 @@ class DeployImageDialog (CrumbsDialog):
(self.popen_read('cat /sys/class/block/%s/device/vendor' % dev),
self.popen_read('cat /sys/class/block/%s/device/model' % dev))
def select_image_button_clicked_cb(self, button):
self.emit('select_image_clicked')
def usb_combo_changed_cb(self, usb_combo):
combo_item = self.usb_combo.get_active_text()
if not combo_item or combo_item == self.__dummy_usb__:
@@ -865,32 +741,12 @@ class DeployImageDialog (CrumbsDialog):
def response_cb(self, dialog, response_id):
if response_id == gtk.RESPONSE_YES:
lbl = ''
combo_item = self.usb_combo.get_active_text()
if combo_item and combo_item != self.__dummy_usb__ and self.image_path:
if combo_item and combo_item != self.__dummy_usb__:
cmdline = bb.ui.crumbs.utils.which_terminal()
if cmdline:
tmpfile = tempfile.NamedTemporaryFile()
cmdline += "\"sudo dd if=" + self.image_path + \
" of=" + combo_item + "; echo $? > " + tmpfile.name + "\""
subprocess.call(shlex.split(cmdline))
if int(tmpfile.readline().strip()) == 0:
lbl = "<b>Deploy image successfully.</b>"
else:
lbl = "<b>Failed to deploy image.</b>\nPlease check image <b>%s</b> exists and USB device <b>%s</b> is writable." % (self.image_path, combo_item)
tmpfile.close()
else:
if not self.image_path:
lbl = "<b>No selection made.</b>\nYou have not selected an image to deploy."
else:
lbl = "<b>No selection made.</b>\nYou have not selected a USB device."
if len(lbl):
crumbs_dialog = CrumbsMessageDialog(self, lbl, gtk.STOCK_DIALOG_INFO)
button = crumbs_dialog.add_button("Close", gtk.RESPONSE_OK)
HobButton.style_button(button)
crumbs_dialog.run()
crumbs_dialog.destroy()
cmdline += "\"sudo dd if=" + self.image_path + " of=" + combo_item + "\""
subprocess.Popen(args=shlex.split(cmdline))
def update_progress_bar(self, title, fraction, status=None):
self.progress_bar.update(fraction)
@@ -1094,7 +950,7 @@ class LayerSelectionDialog (CrumbsDialog):
# create visual elements on the dialog
self.create_visual_elements()
self.connect("response", self.response_cb)
def create_visual_elements(self):
layer_widget, self.layer_store = self.gen_layer_widget(self.layers, self.all_layers, self, None)
layer_widget.set_size_request(450, 250)
@@ -1208,13 +1064,12 @@ class ImageSelectionDialog (CrumbsDialog):
self.image_table = HobViewTable(self.__columns__)
self.image_table.set_size_request(-1, 300)
self.image_table.connect("toggled", self.toggled_cb)
self.image_table.connect_group_selection(self.table_selected_cb)
self.image_table.connect("row-activated", self.row_actived_cb)
self.vbox.pack_start(self.image_table, expand=True, fill=True)
self.show_all()
def change_image_cb(self, model, path, columnid):
def toggled_cb(self, table, cell, path, columnid, tree):
model = tree.get_model()
if not model:
return
iter = model.get_iter_first()
@@ -1225,24 +1080,9 @@ class ImageSelectionDialog (CrumbsDialog):
model[path][columnid] = True
def toggled_cb(self, table, cell, path, columnid, tree):
model = tree.get_model()
self.change_image_cb(model, path, columnid)
def table_selected_cb(self, selection):
model, paths = selection.get_selected_rows()
if paths:
self.change_image_cb(model, paths[0], 1)
def row_actived_cb(self, tab, model, path):
self.change_image_cb(model, path, 1)
self.emit('response', gtk.RESPONSE_YES)
def select_path_cb(self, action, parent, entry):
dialog = gtk.FileChooserDialog("", parent,
gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
text = entry.get_text()
dialog.set_current_folder(text if len(text) > 0 else os.getcwd())
button = dialog.add_button("Cancel", gtk.RESPONSE_NO)
HobAltButton.style_button(button)
button = dialog.add_button("Open", gtk.RESPONSE_YES)
@@ -1269,7 +1109,7 @@ class ImageSelectionDialog (CrumbsDialog):
if f.endswith('.' + real_image_type):
imageset.add(f.rsplit('.' + real_image_type)[0].rsplit('.rootfs')[0])
self.image_list.append(f)
for image in imageset:
self.image_store.set(self.image_store.append(), 0, image, 1, False)
@@ -1285,65 +1125,5 @@ class ImageSelectionDialog (CrumbsDialog):
for f in self.image_list:
if f.startswith(self.image_store[path][0] + '.'):
self.image_names.append(f)
break
break
iter = self.image_store.iter_next(iter)
class ProxyDetailsDialog (CrumbsDialog):
def __init__(self, title, user, passwd, parent, flags, buttons=None):
super(ProxyDetailsDialog, self).__init__(title, parent, flags, buttons)
self.connect("response", self.response_cb)
self.auth = not (user == None or passwd == None or user == "")
self.user = user or ""
self.passwd = passwd or ""
# create visual elements on the dialog
self.create_visual_elements()
def create_visual_elements(self):
self.auth_checkbox = gtk.CheckButton("Use authentication")
self.auth_checkbox.set_tooltip_text("Check this box to set the username and the password")
self.auth_checkbox.set_active(self.auth)
self.auth_checkbox.connect("toggled", self.auth_checkbox_toggled_cb)
self.vbox.pack_start(self.auth_checkbox, expand=False, fill=False)
hbox = gtk.HBox(False, 6)
self.user_label = gtk.Label("Username:")
self.user_text = gtk.Entry()
self.user_text.set_text(self.user)
hbox.pack_start(self.user_label, expand=False, fill=False)
hbox.pack_end(self.user_text, expand=False, fill=False)
self.vbox.pack_start(hbox, expand=False, fill=False)
hbox = gtk.HBox(False, 6)
self.passwd_label = gtk.Label("Password:")
self.passwd_text = gtk.Entry()
self.passwd_text.set_text(self.passwd)
hbox.pack_start(self.passwd_label, expand=False, fill=False)
hbox.pack_end(self.passwd_text, expand=False, fill=False)
self.vbox.pack_start(hbox, expand=False, fill=False)
self.refresh_auth_components()
self.show_all()
def refresh_auth_components(self):
self.user_label.set_sensitive(self.auth)
self.user_text.set_editable(self.auth)
self.user_text.set_sensitive(self.auth)
self.passwd_label.set_sensitive(self.auth)
self.passwd_text.set_editable(self.auth)
self.passwd_text.set_sensitive(self.auth)
def auth_checkbox_toggled_cb(self, button):
self.auth = self.auth_checkbox.get_active()
self.refresh_auth_components()
def response_cb(self, dialog, response_id):
if response_id == gtk.RESPONSE_OK:
if self.auth:
self.user = self.user_text.get_text()
self.passwd = self.passwd_text.get_text()
else:
self.user = None
self.passwd = None

View File

@@ -42,9 +42,6 @@ class HobHandler(gobject.GObject):
"command-failed" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING,)),
"sanity-failed" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING,)),
"generating-data" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
@@ -60,16 +57,10 @@ class HobHandler(gobject.GObject):
"parsing-completed" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,)),
"recipe-populated" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
"package-populated" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
}
(GENERATE_CONFIGURATION, GENERATE_RECIPES, GENERATE_PACKAGES, GENERATE_IMAGE, POPULATE_PACKAGEINFO, SANITY_CHECK) = range(6)
(SUB_PATH_LAYERS, SUB_FILES_DISTRO, SUB_FILES_MACH, SUB_FILES_SDKMACH, SUB_MATCH_CLASS, SUB_PARSE_CONFIG, SUB_SANITY_CHECK, SUB_GNERATE_TGTS, SUB_GENERATE_PKGINFO, SUB_BUILD_RECIPES, SUB_BUILD_IMAGE) = range(11)
(GENERATE_CONFIGURATION, GENERATE_RECIPES, GENERATE_PACKAGES, GENERATE_IMAGE, POPULATE_PACKAGEINFO) = range(5)
(SUB_PATH_LAYERS, SUB_FILES_DISTRO, SUB_FILES_MACH, SUB_FILES_SDKMACH, SUB_MATCH_CLASS, SUB_PARSE_CONFIG, SUB_GNERATE_TGTS, SUB_GENERATE_PKGINFO, SUB_BUILD_RECIPES, SUB_BUILD_IMAGE) = range(10)
def __init__(self, server, recipe_model, package_model):
super(HobHandler, self).__init__()
@@ -138,8 +129,6 @@ class HobHandler(gobject.GObject):
self.runCommand(["generateTargetsTree", "classes/image.bbclass", []])
elif next_command == self.SUB_GENERATE_PKGINFO:
self.runCommand(["triggerEvent", "bb.event.RequestPackageInfo()"])
elif next_command == self.SUB_SANITY_CHECK:
self.runCommand(["triggerEvent", "bb.event.SanityCheck()"])
elif next_command == self.SUB_BUILD_RECIPES:
self.clear_busy()
self.building = True
@@ -167,15 +156,8 @@ class HobHandler(gobject.GObject):
if isinstance(event, bb.event.PackageInfo):
self.package_model.populate(event._pkginfolist)
self.emit("package-populated")
self.run_next_command()
elif isinstance(event, bb.event.SanityCheckPassed):
self.run_next_command()
elif isinstance(event, bb.event.SanityCheckFailed):
self.emit("sanity-failed", event._msg)
elif isinstance(event, logging.LogRecord):
if event.levelno >= logging.ERROR:
self.error_msg += event.msg + '\n'
@@ -184,7 +166,6 @@ class HobHandler(gobject.GObject):
self.current_phase = "data generation"
if event._model:
self.recipe_model.populate(event._model)
self.emit("recipe-populated")
elif isinstance(event, bb.event.ConfigFilesFound):
self.current_phase = "configuration lookup"
var = event._variable
@@ -213,8 +194,6 @@ class HobHandler(gobject.GObject):
self.clear_busy()
self.emit("command-failed", self.error_msg)
self.error_msg = ""
if self.building:
self.building = False
elif isinstance(event, (bb.event.ParseStarted,
bb.event.CacheLoadStarted,
bb.event.TreeDataPreparationStarted,
@@ -318,6 +297,9 @@ class HobHandler(gobject.GObject):
def set_ftp_proxy(self, ftp_proxy):
self.runCommand(["setVariable", "ftp_proxy", ftp_proxy])
def set_all_proxy(self, all_proxy):
self.runCommand(["setVariable", "all_proxy", all_proxy])
def set_git_proxy(self, host, port):
self.runCommand(["setVariable", "GIT_PROXY_HOST", host])
self.runCommand(["setVariable", "GIT_PROXY_PORT", port])
@@ -330,10 +312,6 @@ class HobHandler(gobject.GObject):
self.commands_async.append(self.SUB_GENERATE_PKGINFO)
self.run_next_command(self.POPULATE_PACKAGEINFO)
def trigger_sanity_check(self):
self.commands_async.append(self.SUB_SANITY_CHECK)
self.run_next_command(self.SANITY_CHECK)
def generate_configuration(self):
self.commands_async.append(self.SUB_PARSE_CONFIG)
self.commands_async.append(self.SUB_PATH_LAYERS)
@@ -347,7 +325,7 @@ class HobHandler(gobject.GObject):
self.commands_async.append(self.SUB_PARSE_CONFIG)
self.commands_async.append(self.SUB_GNERATE_TGTS)
self.run_next_command(self.GENERATE_RECIPES)
def generate_packages(self, tgts, default_task="build"):
targets = []
targets.extend(tgts)
@@ -390,9 +368,6 @@ class HobHandler(gobject.GObject):
def reset_build(self):
self.build.reset()
def get_logfile(self):
return self.server.runCommand(["getVariable", "BB_CONSOLELOG"])
def _remove_redundant(self, string):
ret = []
for i in string.split():
@@ -495,7 +470,6 @@ class HobHandler(gobject.GObject):
params["runnable_image_types"] = self._remove_redundant(self.runCommand(["getVariable", "RUNNABLE_IMAGE_TYPES"]) or "")
params["runnable_machine_patterns"] = self._remove_redundant(self.runCommand(["getVariable", "RUNNABLE_MACHINE_PATTERNS"]) or "")
params["deployable_image_types"] = self._remove_redundant(self.runCommand(["getVariable", "DEPLOYABLE_IMAGE_TYPES"]) or "")
params["kernel_image_type"] = self.runCommand(["getVariable", "KERNEL_IMAGETYPE"]) or ""
params["tmpdir"] = self.runCommand(["getVariable", "TMPDIR"]) or ""
params["distro_version"] = self.runCommand(["getVariable", "DISTRO_VERSION"]) or ""
params["target_os"] = self.runCommand(["getVariable", "TARGET_OS"]) or ""
@@ -511,10 +485,9 @@ class HobHandler(gobject.GObject):
params["http_proxy"] = self.runCommand(["getVariable", "http_proxy"]) or ""
params["ftp_proxy"] = self.runCommand(["getVariable", "ftp_proxy"]) or ""
params["https_proxy"] = self.runCommand(["getVariable", "https_proxy"]) or ""
params["all_proxy"] = self.runCommand(["getVariable", "all_proxy"]) or ""
params["cvs_proxy_host"] = self.runCommand(["getVariable", "CVS_PROXY_HOST"]) or ""
params["cvs_proxy_port"] = self.runCommand(["getVariable", "CVS_PROXY_PORT"]) or ""
params["image_white_pattern"] = self.runCommand(["getVariable", "BBUI_IMAGE_WHITE_PATTERN"]) or ""
params["image_black_pattern"] = self.runCommand(["getVariable", "BBUI_IMAGE_BLACK_PATTERN"]) or ""
return params

View File

@@ -34,7 +34,7 @@ class PackageListModel(gtk.TreeStore):
providing convenience functions to access gtk.TreeModel subclasses which
provide filtered views of the data.
"""
(COL_NAME, COL_VER, COL_REV, COL_RNM, COL_SEC, COL_SUM, COL_RDEP, COL_RPROV, COL_SIZE, COL_BINB, COL_INC, COL_FADE_INC, COL_FONT) = range(13)
(COL_NAME, COL_VER, COL_REV, COL_RNM, COL_SEC, COL_SUM, COL_RDEP, COL_RPROV, COL_SIZE, COL_BINB, COL_INC, COL_FADE_INC) = range(12)
__gsignals__ = {
"package-selection-changed" : (gobject.SIGNAL_RUN_LAST,
@@ -65,8 +65,7 @@ class PackageListModel(gtk.TreeStore):
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_BOOLEAN,
gobject.TYPE_BOOLEAN,
gobject.TYPE_STRING)
gobject.TYPE_BOOLEAN)
"""
@@ -190,7 +189,7 @@ class PackageListModel(gtk.TreeStore):
self.COL_SEC, section, self.COL_SUM, summary,
self.COL_RDEP, rdep + ' ' + rrec,
self.COL_RPROV, rprov, self.COL_SIZE, size,
self.COL_BINB, "", self.COL_INC, False, self.COL_FONT, '10')
self.COL_BINB, "", self.COL_INC, False)
"""
Check whether the item at item_path is included or not
@@ -456,7 +455,7 @@ class RecipeListModel(gtk.ListStore):
"""
(COL_NAME, COL_DESC, COL_LIC, COL_GROUP, COL_DEPS, COL_BINB, COL_TYPE, COL_INC, COL_IMG, COL_INSTALL, COL_PN, COL_FADE_INC) = range(12)
__custom_image__ = "Create your own image"
__dummy_image__ = "Create your own image"
__gsignals__ = {
"recipe-selection-changed" : (gobject.SIGNAL_RUN_LAST,
@@ -565,13 +564,14 @@ class RecipeListModel(gtk.ListStore):
self.clear()
# dummy image for prompt
self.set(self.append(), self.COL_NAME, self.__custom_image__,
self.COL_DESC, "Use 'Edit image' to customize recipes and packages " \
"to be included in your image ",
self.set(self.append(), self.COL_NAME, self.__dummy_image__,
self.COL_DESC, "Use the 'View recipes' and 'View packages' " \
"options to select what you want to include " \
"in your image.",
self.COL_LIC, "", self.COL_GROUP, "",
self.COL_DEPS, "", self.COL_BINB, "",
self.COL_TYPE, "image", self.COL_INC, False,
self.COL_IMG, False, self.COL_INSTALL, "", self.COL_PN, self.__custom_image__)
self.COL_IMG, False, self.COL_INSTALL, "", self.COL_PN, self.__dummy_image__)
for item in event_model["pn"]:
name = item

View File

@@ -23,7 +23,6 @@ import os
import os.path
import sys
import pango, pangocairo
import cairo
import math
from bb.ui.crumbs.hobcolor import HobColors
@@ -89,7 +88,6 @@ class hcc:
"cpio.xz" : ["cpio.xz"],
"vmdk" : ["vmdk"],
"cpio.lzma" : ["cpio.lzma"],
"elf" : ["elf"],
}
class HobViewTable (gtk.VBox):
@@ -121,7 +119,6 @@ class HobViewTable (gtk.VBox):
self.table_tree.set_headers_clickable(True)
self.table_tree.set_enable_search(True)
self.table_tree.set_rules_hint(True)
self.table_tree.set_enable_tree_lines(True)
self.table_tree.get_selection().set_mode(gtk.SELECTION_SINGLE)
self.toggle_columns = []
self.table_tree.connect("row-activated", self.row_activated_cb)
@@ -143,8 +140,6 @@ class HobViewTable (gtk.VBox):
cell = gtk.CellRendererText()
col.pack_start(cell, True)
col.set_attributes(cell, text=column['col_id'])
if 'col_t_id' in column.keys():
col.add_attribute(cell, 'font', column['col_t_id'])
elif column['col_style'] == 'check toggle':
cell = HobCellRendererToggle()
cell.set_property('activatable', True)
@@ -154,8 +149,6 @@ class HobViewTable (gtk.VBox):
col.pack_end(cell, True)
col.set_attributes(cell, active=column['col_id'])
self.toggle_columns.append(column['col_name'])
if 'col_group' in column.keys():
col.set_cell_data_func(cell, self.set_group_number_cb)
elif column['col_style'] == 'radio toggle':
cell = gtk.CellRendererToggle()
cell.set_property('activatable', True)
@@ -169,8 +162,6 @@ class HobViewTable (gtk.VBox):
cell = gtk.CellRendererText()
col.pack_start(cell, True)
col.set_cell_data_func(cell, self.display_binb_cb, column['col_id'])
if 'col_t_id' in column.keys():
col.add_attribute(cell, 'font', column['col_t_id'])
scroll = gtk.ScrolledWindow()
scroll.set_policy(gtk.POLICY_NEVER, gtk.POLICY_ALWAYS)
@@ -213,15 +204,6 @@ class HobViewTable (gtk.VBox):
def stop_cell_fadeinout_cb(self, ctrl, cell, tree):
self.emit("cell-fadeinout-stopped", ctrl, cell, tree)
def set_group_number_cb(self, col, cell, model, iter):
if model and (model.iter_parent(iter) == None):
cell.cell_attr["number_of_children"] = model.iter_n_children(iter)
else:
cell.cell_attr["number_of_children"] = 0
def connect_group_selection(self, cb_func):
self.table_tree.get_selection().connect("changed", cb_func)
"""
A method to calculate a softened value for the colour of widget when in the
provided state.
@@ -398,95 +380,363 @@ class HobInfoButton(gtk.EventBox):
def mouse_out_cb(self, widget, event):
self.image.set_from_file(hic.ICON_INFO_DISPLAY_FILE)
class HobIndicator(gtk.DrawingArea):
def __init__(self, count):
gtk.DrawingArea.__init__(self)
# Set no window for transparent background
self.set_has_window(False)
self.set_size_request(38,38)
# We need to pass through button clicks
self.add_events(gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.BUTTON_RELEASE_MASK)
class HobTabBar(gtk.DrawingArea):
__gsignals__ = {
"blank-area-changed" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_INT,
gobject.TYPE_INT,
gobject.TYPE_INT,
gobject.TYPE_INT,)),
self.connect('expose-event', self.expose)
"tab-switched" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_INT,)),
}
self.count = count
self.color = HobColors.GRAY
def expose(self, widget, event):
if self.count and self.count > 0:
ctx = widget.window.cairo_create()
x, y, w, h = self.allocation
ctx.set_operator(cairo.OPERATOR_OVER)
ctx.set_source_color(gtk.gdk.color_parse(self.color))
ctx.translate(w/2, h/2)
ctx.arc(x, y, min(w,h)/2 - 2, 0, 2*math.pi)
ctx.fill_preserve()
layout = self.create_pango_layout(str(self.count))
textw, texth = layout.get_pixel_size()
x = (w/2)-(textw/2) + x
y = (h/2) - (texth/2) + y
ctx.move_to(x, y)
self.window.draw_layout(self.style.light_gc[gtk.STATE_NORMAL], int(x), int(y), layout)
def set_count(self, count):
self.count = count
def set_active(self, active):
if active:
self.color = HobColors.DEEP_RED
else:
self.color = HobColors.GRAY
class HobTabLabel(gtk.HBox):
def __init__(self, text, count=0):
gtk.HBox.__init__(self, False, 0)
self.indicator = HobIndicator(count)
self.indicator.show()
self.pack_end(self.indicator, False, False)
self.lbl = gtk.Label(text)
self.lbl.set_alignment(0.0, 0.5)
self.lbl.show()
self.pack_end(self.lbl, True, True, 6)
def set_count(self, count):
self.indicator.set_count(count)
def set_active(self, active=True):
self.indicator.set_active(active)
class HobNotebook(gtk.Notebook):
def __init__(self):
gtk.Notebook.__init__(self)
self.set_property('homogeneous', True)
gtk.DrawingArea.__init__(self)
self.children = []
self.pages = []
self.tab_width = 140
self.tab_height = 52
self.tab_x = 10
self.tab_y = 0
self.width = 500
self.height = 53
self.tab_w_ratio = 140 * 1.0/500
self.tab_h_ratio = 52 * 1.0/53
self.set_size_request(self.width, self.height)
self.current_child = None
self.font = self.get_style().font_desc
self.font.set_size(pango.SCALE * 13)
self.update_children_text_layout_and_bg_color()
self.blank_rectangle = None
self.tab_pressed = False
self.set_property('can-focus', True)
self.set_events(gtk.gdk.EXPOSURE_MASK | gtk.gdk.POINTER_MOTION_MASK |
gtk.gdk.BUTTON1_MOTION_MASK | gtk.gdk.BUTTON_PRESS_MASK |
gtk.gdk.BUTTON_RELEASE_MASK)
self.connect("expose-event", self.on_draw)
self.connect("button-press-event", self.button_pressed_cb)
self.connect("button-release-event", self.button_released_cb)
self.connect("query-tooltip", self.query_tooltip_cb)
self.show_all()
def button_released_cb(self, widget, event):
self.tab_pressed = False
self.queue_draw()
def button_pressed_cb(self, widget, event):
if event.type == gtk.gdk._2BUTTON_PRESS:
return
result = False
if self.is_focus() or event.type == gtk.gdk.BUTTON_PRESS:
x, y = event.get_coords()
# check which tab be clicked
for child in self.children:
if (child["x"] < x) and (x < child["x"] + self.tab_width) \
and (child["y"] < y) and (y < child["y"] + self.tab_height):
self.current_child = child
result = True
self.grab_focus()
break
# check the blank area is focus in or not
if (self.blank_rectangle) and (self.blank_rectangle.x > 0) and (self.blank_rectangle.y > 0):
if (self.blank_rectangle.x < x) and (x < self.blank_rectangle.x + self.blank_rectangle.width) \
and (self.blank_rectangle.y < y) and (y < self.blank_rectangle.y + self.blank_rectangle.height):
self.grab_focus()
if result == True:
page = self.current_child["toggled_page"]
self.emit("tab-switched", page)
self.tab_pressed = True
self.queue_draw()
def update_children_size(self):
# calculate the size of tabs
self.tab_width = int(self.width * self.tab_w_ratio)
self.tab_height = int(self.height * self.tab_h_ratio)
for i, child in enumerate(self.children):
child["x"] = self.tab_x + i * self.tab_width
child["y"] = self.tab_y
if self.blank_rectangle:
self.resize_blank_rectangle()
def resize_blank_rectangle(self):
width = self.width - self.tab_width * len(self.children) - self.tab_x
x = self.tab_x + self.tab_width * len(self.children)
hpadding = vpadding = 5
self.blank_rectangle = self.set_blank_size(x + hpadding, self.tab_y + vpadding,
width - 2 * hpadding, self.tab_height - 2 * vpadding)
def update_children_text_layout_and_bg_color(self):
style = self.get_style().copy()
color = style.base[gtk.STATE_NORMAL]
for child in self.children:
pangolayout = self.create_pango_layout(child["title"])
pangolayout.set_font_description(self.font)
child["title_layout"] = pangolayout
child["r"] = color.red
child["g"] = color.green
child["b"] = color.blue
def append_tab_child(self, title, page, tooltip=""):
num = len(self.children) + 1
self.tab_width = self.tab_width * len(self.children) / num
i = 0
for i, child in enumerate(self.children):
child["x"] = self.tab_x + i * self.tab_width
i += 1
x = self.tab_x + i * self.tab_width
y = self.tab_y
pangolayout = self.create_pango_layout(title)
pangolayout.set_font_description(self.font)
color = self.style.base[gtk.STATE_NORMAL]
new_one = {
"x" : x,
"y" : y,
"r" : color.red,
"g" : color.green,
"b" : color.blue,
"title_layout" : pangolayout,
"toggled_page" : page,
"title" : title,
"indicator_show" : False,
"indicator_number" : 0,
"tooltip_markup" : tooltip,
}
self.children.append(new_one)
if tooltip and (not self.props.has_tooltip):
self.props.has_tooltip = True
# set the default current child
if not self.current_child:
self.current_child = new_one
def on_draw(self, widget, event):
cr = widget.window.cairo_create()
self.width = self.allocation.width
self.height = self.allocation.height
self.update_children_size()
self.draw_background(cr)
self.draw_toggled_tab(cr)
for child in self.children:
if child["indicator_show"] == True:
self.draw_indicator(cr, child)
self.draw_tab_text(cr)
def draw_background(self, cr):
style = self.get_style()
if self.is_focus():
cr.set_source_color(style.base[gtk.STATE_SELECTED])
else:
cr.set_source_color(style.base[gtk.STATE_NORMAL])
y = 6
h = self.height - 6 - 1
gap = 1
w = self.children[0]["x"]
cr.set_source_color(gtk.gdk.color_parse(HobColors.GRAY))
cr.rectangle(0, y, w - gap, h) # start rectangle
cr.fill()
cr.set_source_color(style.base[gtk.STATE_NORMAL])
cr.rectangle(w - gap, y, w, h) #first gap
cr.fill()
w = self.tab_width
for child in self.children:
x = child["x"]
cr.set_source_color(gtk.gdk.color_parse(HobColors.GRAY))
cr.rectangle(x, y, w - gap, h) # tab rectangle
cr.fill()
cr.set_source_color(style.base[gtk.STATE_NORMAL])
cr.rectangle(x + w - gap, y, w, h) # gap
cr.fill()
cr.set_source_color(gtk.gdk.color_parse(HobColors.GRAY))
cr.rectangle(x + w, y, self.width - x - w, h) # last rectangle
cr.fill()
def draw_tab_text(self, cr):
style = self.get_style()
for child in self.children:
pangolayout = child["title_layout"]
if pangolayout:
fontw, fonth = pangolayout.get_pixel_size()
# center pos
off_x = (self.tab_width - fontw) / 2
off_y = (self.tab_height - fonth) / 2
x = child["x"] + off_x
y = child["y"] + off_y
if not child == self.current_child:
self.window.draw_layout(self.style.fg_gc[gtk.STATE_NORMAL], int(x), int(y), pangolayout, gtk.gdk.Color(HobColors.WHITE))
else:
self.window.draw_layout(self.style.fg_gc[gtk.STATE_NORMAL], int(x), int(y), pangolayout)
def draw_toggled_tab(self, cr):
if not self.current_child:
return
x = self.current_child["x"]
y = self.current_child["y"]
width = self.tab_width
height = self.tab_height
style = self.get_style()
color = style.base[gtk.STATE_NORMAL]
r = height / 10
if self.tab_pressed == True:
for xoff, yoff, c1, c2 in [(1, 0, HobColors.SLIGHT_DARK, HobColors.DARK), (2, 0, HobColors.GRAY, HobColors.LIGHT_GRAY)]:
cr.set_source_color(gtk.gdk.color_parse(c1))
cr.move_to(x + xoff, y + height + yoff)
cr.line_to(x + xoff, r + yoff)
cr.arc(x + r + xoff, y + r + yoff, r, math.pi, 1.5*math.pi)
cr.move_to(x + r + xoff, y + yoff)
cr.line_to(x + width - r + xoff, y + yoff)
cr.arc(x + width - r + xoff, y + r + yoff, r, 1.5*math.pi, 2*math.pi)
cr.stroke()
cr.set_source_color(gtk.gdk.color_parse(c2))
cr.move_to(x + width + xoff, r + yoff)
cr.line_to(x + width + xoff, y + height + yoff)
cr.line_to(x + xoff, y + height + yoff)
cr.stroke()
x = x + 2
y = y + 2
cr.set_source_rgba(color.red, color.green, color.blue, 1)
cr.move_to(x + r, y)
cr.line_to(x + width - r , y)
cr.arc(x + width - r, y + r, r, 1.5*math.pi, 2*math.pi)
cr.move_to(x + width, r)
cr.line_to(x + width, y + height)
cr.line_to(x, y + height)
cr.line_to(x, r)
cr.arc(x + r, y + r, r, math.pi, 1.5*math.pi)
cr.fill()
def draw_indicator(self, cr, child):
text = ("%d" % child["indicator_number"])
layout = self.create_pango_layout(text)
layout.set_font_description(self.font)
textw, texth = layout.get_pixel_size()
# draw the back round area
tab_x = child["x"]
tab_y = child["y"]
dest_w = int(32 * self.tab_w_ratio)
dest_h = int(32 * self.tab_h_ratio)
if dest_h < self.tab_height:
dest_w = dest_h
# x position is offset(tab_width*3/4 - icon_width/2) + start_pos(tab_x)
x = tab_x + self.tab_width * 3/4 - dest_w/2
y = tab_y + self.tab_height/2 - dest_h/2
r = min(dest_w, dest_h)/2
if not child == self.current_child:
color = cr.set_source_color(gtk.gdk.color_parse(HobColors.DEEP_RED))
else:
color = cr.set_source_color(gtk.gdk.color_parse(HobColors.GRAY))
# check round back area can contain the text or not
back_round_can_contain_width = float(2 * r * 0.707)
if float(textw) > back_round_can_contain_width:
xoff = (textw - int(back_round_can_contain_width)) / 2
cr.move_to(x + r - xoff, y + r + r)
cr.arc((x + r - xoff), (y + r), r, 0.5*math.pi, 1.5*math.pi)
cr.fill() # left half round
cr.rectangle((x + r - xoff), y, 2 * xoff, 2 * r)
cr.fill() # center rectangle
cr.arc((x + r + xoff), (y + r), r, 1.5*math.pi, 0.5*math.pi)
cr.fill() # right half round
else:
cr.arc((x + r), (y + r), r, 0, 2*math.pi)
cr.fill()
# draw the number text
x = x + (dest_w/2)-(textw/2)
y = y + (dest_h/2) - (texth/2)
cr.move_to(x, y)
self.window.draw_layout(self.style.fg_gc[gtk.STATE_NORMAL], int(x), int(y), layout, gtk.gdk.Color(HobColors.WHITE))
def show_indicator_icon(self, child, number):
child["indicator_show"] = True
child["indicator_number"] = number
self.queue_draw()
def hide_indicator_icon(self, child):
child["indicator_show"] = False
self.queue_draw()
def set_blank_size(self, x, y, w, h):
if not self.blank_rectangle or self.blank_rectangle.x != x or self.blank_rectangle.width != w:
self.emit("blank-area-changed", x, y, w, h)
return gtk.gdk.Rectangle(x, y, w, h)
def query_tooltip_cb(self, widget, x, y, keyboardtip, tooltip):
if keyboardtip or (not tooltip):
return False
# check which tab be clicked
for child in self.children:
if (child["x"] < x) and (x < child["x"] + self.tab_width) \
and (child["y"] < y) and (y < child["y"] + self.tab_height):
tooltip.set_markup(child["tooltip_markup"])
return True
return False
class HobNotebook(gtk.VBox):
def __init__(self):
gtk.VBox.__init__(self, False, 0)
self.notebook = gtk.Notebook()
self.notebook.set_property('homogeneous', True)
self.notebook.set_property('show-tabs', False)
self.tabbar = HobTabBar()
self.tabbar.connect("tab-switched", self.tab_switched_cb)
self.notebook.connect("page-added", self.page_added_cb)
self.notebook.connect("page-removed", self.page_removed_cb)
self.search = None
self.search_name = ""
self.connect("switch-page", self.page_changed_cb)
self.tb = gtk.Table(1, 100, False)
self.hbox= gtk.HBox(False, 0)
self.hbox.pack_start(self.tabbar, True, True)
self.tb.attach(self.hbox, 0, 100, 0, 1)
self.pack_start(self.tb, False, False)
self.pack_start(self.notebook)
self.show_all()
def page_changed_cb(self, nb, page, page_num):
for p, lbl in enumerate(self.pages):
if p == page_num:
lbl.set_active()
else:
lbl.set_active(False)
def append_page(self, child, tab_label, tab_tooltip=None):
label = HobTabLabel(tab_label)
if tab_tooltip:
label.set_tooltip_text(tab_tooltip)
label.set_active(False)
self.pages.append(label)
gtk.Notebook.append_page(self, child, label)
def append_page(self, child, tab_label):
self.notebook.set_current_page(self.notebook.append_page(child, tab_label))
def set_entry(self, name="Search:"):
for child in self.tb.get_children():
if child:
self.tb.remove(child)
hbox_entry = gtk.HBox(False, 0)
hbox_entry.show()
self.search = gtk.Entry()
self.search_name = name
style = self.search.get_style()
@@ -497,20 +747,59 @@ class HobNotebook(gtk.Notebook):
self.search.set_icon_from_stock(gtk.ENTRY_ICON_SECONDARY, gtk.STOCK_CLEAR)
self.search.connect("icon-release", self.set_search_entry_clear_cb)
self.search.show()
self.align = gtk.Alignment(xalign=1.0, yalign=0.7)
self.align.add(self.search)
self.align.show()
hbox_entry.pack_end(self.align, False, False)
self.tabbar.resize_blank_rectangle()
self.tb.attach(hbox_entry, 75, 100, 0, 1, xpadding=5)
self.tb.attach(self.hbox, 0, 100, 0, 1)
self.tabbar.connect("blank-area-changed", self.blank_area_resize_cb)
self.search.connect("focus-in-event", self.set_search_entry_editable_cb)
self.search.connect("focus-out-event", self.set_search_entry_reset_cb)
self.set_action_widget(self.search, gtk.PACK_END)
self.tb.show()
def show_indicator_icon(self, title, number):
for child in self.pages:
if child.lbl.get_label() == title:
child.set_count(number)
for child in self.tabbar.children:
if child["toggled_page"] == -1:
continue
if child["title"] == title:
self.tabbar.show_indicator_icon(child, number)
def hide_indicator_icon(self, title):
for child in self.pages:
if child.lbl.get_label() == title:
child.set_count(0)
for child in self.tabbar.children:
if child["toggled_page"] == -1:
continue
if child["title"] == title:
self.tabbar.hide_indicator_icon(child)
def tab_switched_cb(self, widget, page):
self.notebook.set_current_page(page)
def page_added_cb(self, notebook, notebook_child, page):
if not notebook:
return
title = notebook.get_tab_label_text(notebook_child)
label = notebook.get_tab_label(notebook_child)
tooltip_markup = label.get_tooltip_markup()
if not title:
return
for child in self.tabbar.children:
if child["title"] == title:
child["toggled_page"] = page
return
self.tabbar.append_tab_child(title, page, tooltip_markup)
def page_removed_cb(self, notebook, notebook_child, page, title=""):
for child in self.tabbar.children:
if child["title"] == title:
child["toggled_page"] = -1
def blank_area_resize_cb(self, widget, request_x, request_y, request_width, request_height):
self.search.set_size_request(request_width, request_height)
def set_search_entry_editable_cb(self, search, event):
search.set_editable(True)
@@ -530,14 +819,7 @@ class HobNotebook(gtk.Notebook):
self.reset_entry(search)
def set_search_entry_clear_cb(self, search, icon_pos, event):
if search.get_editable() == True:
search.set_text("")
def set_page(self, title):
for child in self.pages:
if child.lbl.get_label() == title:
child.grab_focus()
self.set_current_page(self.page_num(child))
self.reset_entry(search)
class HobWarpCellRendererText(gtk.CellRendererText):
def __init__(self, col_number):
@@ -775,7 +1057,7 @@ class HobCellRendererPixbuf(gtk.CellRendererPixbuf):
if self.control.is_active():
self.control.on_draw_pixbuf_cb(pix, window.cairo_create(), x, y, w, h, True)
else:
self.control.start_run(200, 0, 0, 1000, 150, tree)
self.control.start_run(200, 0, 0, 1000, 200, tree)
else:
self.control.remove_running_cell_area(cell_area)
self.control.on_draw_pixbuf_cb(pix, window.cairo_create(), x, y, w, h, False)
@@ -802,17 +1084,11 @@ class HobCellRendererToggle(gtk.CellRendererToggle):
gtk.CellRendererToggle.__init__(self)
self.ctrl = HobCellRendererController(is_draw_row=True)
self.ctrl.running_mode = self.ctrl.MODE_ONE_SHORT
self.cell_attr = {"fadeout": False, "number_of_children": 0}
self.cell_attr = {"fadeout": False}
def do_render(self, window, widget, background_area, cell_area, expose_area, flags):
if (not self.ctrl) or (not widget):
return
if flags & gtk.CELL_RENDERER_SELECTED:
state = gtk.STATE_SELECTED
else:
state = gtk.STATE_NORMAL
if self.ctrl.is_active():
path = widget.get_path_at_pos(cell_area.x + cell_area.width/2, cell_area.y + cell_area.height/2)
# sometimes the parameters of cell_area will be a negative number,such as pull up down the scroll bar
@@ -821,23 +1097,14 @@ class HobCellRendererToggle(gtk.CellRendererToggle):
path = path[0]
if path in self.ctrl.running_cell_areas:
cr = window.cairo_create()
color = widget.get_style().base[state]
color = gtk.gdk.Color(HobColors.WHITE)
row_x, _, row_width, _ = widget.get_visible_rect()
border_y = self.get_property("ypad")
self.ctrl.on_draw_fadeinout_cb(cr, color, row_x, cell_area.y - border_y, row_width, \
cell_area.height + border_y * 2, self.cell_attr["fadeout"])
# draw number of a group
if self.cell_attr["number_of_children"]:
text = "%d pkg" % self.cell_attr["number_of_children"]
pangolayout = widget.create_pango_layout(text)
textw, texth = pangolayout.get_pixel_size()
x = cell_area.x + (cell_area.width/2) - (textw/2)
y = cell_area.y + (cell_area.height/2) - (texth/2)
widget.style.paint_layout(window, state, True, cell_area, widget, "checkbox", x, y, pangolayout)
else:
return gtk.CellRendererToggle.do_render(self, window, widget, background_area, cell_area, expose_area, flags)
return gtk.CellRendererToggle.do_render(self, window, widget, background_area, cell_area, expose_area, flags)
'''delay: normally delay time is 1000ms
cell_list: whilch cells need to be render

View File

@@ -22,7 +22,6 @@
import gtk
import glib
import re
from bb.ui.crumbs.progressbar import HobProgressBar
from bb.ui.crumbs.hobcolor import HobColors
from bb.ui.crumbs.hobwidget import hic, HobImageButton, HobInfoButton, HobAltButton, HobButton
@@ -34,9 +33,6 @@ from bb.ui.crumbs.hobpages import HobPage
#
class ImageConfigurationPage (HobPage):
__dummy_machine__ = "--select a machine--"
__dummy_image__ = "--select a base image--"
def __init__(self, builder):
super(ImageConfigurationPage, self).__init__(builder, "Image configuration")
@@ -135,10 +131,8 @@ class ImageConfigurationPage (HobPage):
self._pack_components(pack_config_build_button = True)
self.set_config_machine_layout(show_progress_bar = False)
self.set_config_baseimg_layout()
self.set_rcppkg_layout()
self.show_all()
if self.builder.recipe_model.get_selected_image() == self.builder.recipe_model.__custom_image__:
self.just_bake_button.hide()
self.or_label.hide()
def create_config_machine(self):
self.machine_title = gtk.Label()
@@ -153,6 +147,7 @@ class ImageConfigurationPage (HobPage):
self.machine_title_desc.set_markup(mark)
self.machine_combo = gtk.combo_box_new_text()
self.machine_combo.set_wrap_width(1)
self.machine_combo.connect("changed", self.machine_combo_changed_cb)
icon_file = hic.ICON_LAYERS_DISPLAY_FILE
@@ -201,14 +196,29 @@ class ImageConfigurationPage (HobPage):
self.image_title_desc.set_markup(mark)
self.image_combo = gtk.combo_box_new_text()
self.image_combo.set_wrap_width(1)
self.image_combo_id = self.image_combo.connect("changed", self.image_combo_changed_cb)
self.image_desc = gtk.Label()
self.image_desc.set_alignment(0.0, 0.5)
self.image_desc.set_size_request(360, -1)
self.image_desc.set_justify(gtk.JUSTIFY_LEFT)
self.image_desc.set_line_wrap(True)
# button to view recipes
icon_file = hic.ICON_RCIPE_DISPLAY_FILE
hover_file = hic.ICON_RCIPE_HOVER_FILE
self.view_recipes_button = HobImageButton("View recipes",
"Add/remove recipes and tasks",
icon_file, hover_file)
self.view_recipes_button.connect("clicked", self.view_recipes_button_clicked_cb)
# button to view packages
icon_file = hic.ICON_PACKAGES_DISPLAY_FILE
hover_file = hic.ICON_PACKAGES_HOVER_FILE
self.view_packages_button = HobImageButton("View packages",
"Add/remove previously built packages",
icon_file, hover_file)
self.view_packages_button.connect("clicked", self.view_packages_button_clicked_cb)
self.image_separator = gtk.HSeparator()
def set_config_baseimg_layout(self):
@@ -218,27 +228,29 @@ class ImageConfigurationPage (HobPage):
self.gtable.attach(self.image_desc, 13, 38, 23, 28)
self.gtable.attach(self.image_separator, 0, 40, 35, 36)
def set_rcppkg_layout(self):
self.gtable.attach(self.view_recipes_button, 0, 20, 28, 33)
self.gtable.attach(self.view_packages_button, 20, 40, 28, 33)
def create_config_build_button(self):
# Create the "Build packages" and "Build image" buttons at the bottom
button_box = gtk.HBox(False, 6)
# create button "Build image"
self.just_bake_button = HobButton("Build image")
self.just_bake_button.set_size_request(205, 49)
self.just_bake_button.set_tooltip_text("Build target image")
self.just_bake_button.connect("clicked", self.just_bake_button_clicked_cb)
button_box.pack_end(self.just_bake_button, expand=False, fill=False)
just_bake_button = HobButton("Build image")
just_bake_button.set_size_request(205, 49)
just_bake_button.set_tooltip_text("Build target image")
just_bake_button.connect("clicked", self.just_bake_button_clicked_cb)
button_box.pack_end(just_bake_button, expand=False, fill=False)
# create separator label
self.or_label = gtk.Label(" or ")
button_box.pack_end(self.or_label, expand=False, fill=False)
label = gtk.Label(" or ")
button_box.pack_end(label, expand=False, fill=False)
# create button "Edit Image"
self.edit_image_button = HobButton("Edit image")
self.edit_image_button.set_size_request(205, 49)
self.edit_image_button.set_tooltip_text("Edit target image")
self.edit_image_button.connect("clicked", self.edit_image_button_clicked_cb)
button_box.pack_end(self.edit_image_button, expand=False, fill=False)
# create button "Build Packages"
build_packages_button = HobAltButton("Build packages")
build_packages_button.connect("clicked", self.build_packages_button_clicked_cb)
build_packages_button.set_tooltip_text("Build recipes into packages")
button_box.pack_end(build_packages_button, expand=False, fill=False)
return button_box
@@ -247,15 +259,9 @@ class ImageConfigurationPage (HobPage):
def machine_combo_changed_cb(self, machine_combo):
combo_item = machine_combo.get_active_text()
if not combo_item or combo_item == self.__dummy_machine__:
if not combo_item:
return
# remove __dummy_machine__ item from the store list after first user selection
# because it is no longer valid
combo_store = machine_combo.get_model()
if len(combo_store) and (combo_store[0][0] == self.__dummy_machine__):
machine_combo.remove_text(0)
self.builder.configuration.curr_mach = combo_item
if self.machine_combo_changed_by_manual:
self.builder.configuration.clear_selection()
@@ -266,13 +272,13 @@ class ImageConfigurationPage (HobPage):
self.builder.populate_recipe_package_info_async()
def update_machine_combo(self):
all_machines = [self.__dummy_machine__] + self.builder.parameters.all_machines
all_machines = self.builder.parameters.all_machines
model = self.machine_combo.get_model()
model.clear()
for machine in all_machines:
self.machine_combo.append_text(machine)
self.machine_combo.set_active(0)
self.machine_combo.set_active(-1)
def switch_machine_combo(self):
self.machine_combo_changed_by_manual = False
@@ -283,15 +289,10 @@ class ImageConfigurationPage (HobPage):
self.machine_combo.set_active(active)
return
active += 1
self.machine_combo.set_active(-1)
if model[0][0] != self.__dummy_machine__:
self.machine_combo.insert_text(0, self.__dummy_machine__)
self.machine_combo.set_active(0)
def update_image_desc(self):
def update_image_desc(self, selected_image):
desc = ""
selected_image = self.image_combo.get_active_text()
if selected_image and selected_image in self.builder.recipe_model.pn_path.keys():
image_path = self.builder.recipe_model.pn_path[selected_image]
image_iter = self.builder.recipe_model.get_iter(image_path)
@@ -308,15 +309,9 @@ class ImageConfigurationPage (HobPage):
def image_combo_changed_cb(self, combo):
self.builder.window_sensitive(False)
selected_image = self.image_combo.get_active_text()
if not selected_image or (selected_image == self.__dummy_image__):
if not selected_image:
return
# remove __dummy_image__ item from the store list after first user selection
# because it is no longer valid
combo_store = combo.get_model()
if len(combo_store) and (combo_store[0][0] == self.__dummy_image__):
combo.remove_text(0)
self.builder.customized = False
selected_recipes = []
@@ -324,17 +319,13 @@ class ImageConfigurationPage (HobPage):
image_path = self.builder.recipe_model.pn_path[selected_image]
image_iter = self.builder.recipe_model.get_iter(image_path)
selected_packages = self.builder.recipe_model.get_value(image_iter, self.builder.recipe_model.COL_INSTALL).split()
self.update_image_desc()
self.update_image_desc(selected_image)
self.builder.recipe_model.reset()
self.builder.package_model.reset()
self.show_baseimg_selected()
if selected_image == self.builder.recipe_model.__custom_image__:
self.just_bake_button.hide()
self.or_label.hide()
glib.idle_add(self.image_combo_changed_idle_cb, selected_image, selected_recipes, selected_packages)
def _image_combo_connect_signal(self):
@@ -351,61 +342,32 @@ class ImageConfigurationPage (HobPage):
# populate image combo
filter = {RecipeListModel.COL_TYPE : ['image']}
image_model = recipe_model.tree_model(filter)
active = 0
cnt = 1
white_pattern = []
if self.builder.parameters.image_white_pattern:
for i in self.builder.parameters.image_white_pattern.split():
white_pattern.append(re.compile(i))
black_pattern = []
if self.builder.parameters.image_black_pattern:
for i in self.builder.parameters.image_black_pattern.split():
black_pattern.append(re.compile(i))
active = -1
cnt = 0
it = image_model.get_iter_first()
self._image_combo_disconnect_signal()
model = self.image_combo.get_model()
model.clear()
# Set a indicator text to combo store when first open
self.image_combo.append_text(self.__dummy_image__)
# append and set active
while it:
path = image_model.get_path(it)
it = image_model.iter_next(it)
image_name = image_model[path][recipe_model.COL_NAME]
if image_name == self.builder.recipe_model.__custom_image__:
if image_name == self.builder.recipe_model.__dummy_image__:
continue
if black_pattern:
allow = True
for pattern in black_pattern:
if pattern.search(image_name):
allow = False
break
elif white_pattern:
allow = False
for pattern in white_pattern:
if pattern.search(image_name):
allow = True
break
else:
allow = True
if allow:
self.image_combo.append_text(image_name)
if image_name == selected_image:
active = cnt
cnt = cnt + 1
self.image_combo.append_text(self.builder.recipe_model.__custom_image__)
if selected_image == self.builder.recipe_model.__custom_image__:
self.image_combo.append_text(image_name)
if image_name == selected_image:
active = cnt
cnt = cnt + 1
self.image_combo.append_text(self.builder.recipe_model.__dummy_image__)
if selected_image == self.builder.recipe_model.__dummy_image__:
active = cnt
self.image_combo.set_active(-1)
self.image_combo.set_active(active)
if active != 0:
if active != -1:
self.show_baseimg_selected()
self._image_combo_connect_signal()
@@ -414,11 +376,17 @@ class ImageConfigurationPage (HobPage):
# Create a layer selection dialog
self.builder.show_layer_selection_dialog()
def view_recipes_button_clicked_cb(self, button):
self.builder.show_recipes()
def view_packages_button_clicked_cb(self, button):
self.builder.show_packages()
def just_bake_button_clicked_cb(self, button):
self.builder.just_bake()
def edit_image_button_clicked_cb(self, button):
self.builder.show_recipes()
def build_packages_button_clicked_cb(self, button):
self.builder.build_packages()
def template_button_clicked_cb(self, button):
response, path = self.builder.show_load_template_dialog()

View File

@@ -25,15 +25,34 @@ import gtk
from bb.ui.crumbs.hobcolor import HobColors
from bb.ui.crumbs.hobwidget import hic, HobViewTable, HobAltButton, HobButton
from bb.ui.crumbs.hobpages import HobPage
import subprocess
from bb.ui.crumbs.hig import CrumbsDialog
#
# ImageDetailsPage
#
class ImageDetailsPage (HobPage):
__columns__ = [{
'col_name' : 'Image name',
'col_id' : 0,
'col_style': 'text',
'col_min' : 500,
'col_max' : 500
}, {
'col_name' : 'Image size',
'col_id' : 1,
'col_style': 'text',
'col_min' : 100,
'col_max' : 100
}, {
'col_name' : 'Select',
'col_id' : 2,
'col_style': 'radio toggle',
'col_min' : 100,
'col_max' : 100
}]
class DetailBox (gtk.EventBox):
def __init__(self, widget = None, varlist = None, vallist = None, icon = None, button = None, button2=None, color = HobColors.LIGHT_GRAY):
def __init__(self, widget = None, varlist = None, vallist = None, icon = None, button = None, color = HobColors.LIGHT_GRAY):
gtk.EventBox.__init__(self)
# set color
@@ -42,41 +61,33 @@ class ImageDetailsPage (HobPage):
self.set_style(style)
self.hbox = gtk.HBox()
self.hbox.set_border_width(10)
self.hbox.set_border_width(15)
self.add(self.hbox)
total_rows = 0
if widget:
total_rows = 10
if varlist and vallist:
row = 1
elif varlist and vallist:
# pack the icon and the text on the left
total_rows += len(varlist)
self.table = gtk.Table(total_rows, 20, True)
self.table.set_row_spacings(6)
row = len(varlist)
self.table = gtk.Table(row, 20, True)
self.table.set_size_request(100, -1)
self.hbox.pack_start(self.table, expand=True, fill=True, padding=15)
colid = 0
rowid = 0
self.line_widgets = {}
if icon:
self.table.attach(icon, colid, colid + 2, 0, 1)
colid = colid + 2
if widget:
self.table.attach(widget, colid, 20, 0, 10)
rowid = 10
if varlist and vallist:
for row in range(rowid, total_rows):
index = row - rowid
self.line_widgets[varlist[index]] = self.text2label(varlist[index], vallist[index])
self.table.attach(self.line_widgets[varlist[index]], colid, 20, row, row + 1)
self.table.attach(widget, colid, 20, 0, 1)
elif varlist and vallist:
for line in range(0, row):
self.line_widgets[varlist[line]] = self.text2label(varlist[line], vallist[line])
self.table.attach(self.line_widgets[varlist[line]], colid, 20, line, line + 1)
# pack the button on the right
if button:
self.bbox = gtk.VBox()
self.bbox.pack_start(button, expand=True, fill=True)
if button2:
self.bbox.pack_start(button2, expand=True, fill=True)
self.hbox.pack_end(self.bbox, expand=False, fill=False)
self.hbox.pack_end(button, expand=False, fill=False)
def update_line_widgets(self, variable, value):
if len(self.line_widgets) == 0:
@@ -85,23 +96,9 @@ class ImageDetailsPage (HobPage):
return
self.line_widgets[variable].set_markup(self.format_line(variable, value))
def wrap_line(self, inputs):
# wrap the long text of inputs
wrap_width_chars = 75
outputs = ""
tmps = inputs
less_chars = len(inputs)
while (less_chars - wrap_width_chars) > 0:
less_chars -= wrap_width_chars
outputs += tmps[:wrap_width_chars] + "\n "
tmps = inputs[less_chars:]
outputs += tmps
return outputs
def format_line(self, variable, value):
wraped_value = self.wrap_line(value)
markup = "<span weight=\'bold\'>%s</span>" % variable
markup += "<span weight=\'normal\' foreground=\'#1c1c1c\' font_desc=\'14px\'>%s</span>" % wraped_value
markup += "<span weight=\'normal\' foreground=\'#1c1c1c\' font_desc=\'14px\'>%s</span>" % value
return markup
def text2label(self, variable, value):
@@ -115,7 +112,7 @@ class ImageDetailsPage (HobPage):
def __init__(self, builder):
super(ImageDetailsPage, self).__init__(builder, "Image details")
self.image_store = []
self.image_store = gtk.ListStore(gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_BOOLEAN)
self.button_ids = {}
self.details_bottom_buttons = gtk.HBox(False, 6)
self.create_visual_elements()
@@ -160,30 +157,27 @@ class ImageDetailsPage (HobPage):
self.details_bottom_buttons.remove(child)
def show_page(self, step):
self.build_succeeded = (step == self.builder.IMAGE_GENERATED)
build_succeeded = (step == self.builder.IMAGE_GENERATED)
image_addr = self.builder.parameters.image_addr
image_names = self.builder.parameters.image_names
if self.build_succeeded:
if build_succeeded:
machine = self.builder.configuration.curr_mach
base_image = self.builder.recipe_model.get_selected_image()
layers = self.builder.configuration.layers
pkg_num = "%s" % len(self.builder.package_model.get_selected_packages())
log_file = self.builder.current_logfile
else:
pkg_num = "N/A"
log_file = None
# remove
for button_id, button in self.button_ids.items():
button.disconnect(button_id)
self._remove_all_widget()
# repack
self.pack_start(self.details_top_buttons, expand=False, fill=False)
self.pack_start(self.group_align, expand=True, fill=True)
self.build_result = None
if self.build_succeeded:
if build_succeeded:
# building is the previous step
icon = gtk.Image()
pixmap_path = hic.ICON_INDI_CONFIRM_FILE
@@ -196,89 +190,45 @@ class ImageDetailsPage (HobPage):
self.box_group_area.pack_start(self.build_result, expand=False, fill=False)
# create the buttons at the bottom first because the buttons are used in apply_button_per_image()
if self.build_succeeded:
if build_succeeded:
self.buttonlist = ["Build new image", "Save as template", "Run image", "Deploy image"]
else: # get to this page from "My images"
self.buttonlist = ["Build new image", "Run image", "Deploy image"]
# Name
self.image_store = []
self.toggled_image = ""
self.image_store.clear()
default_toggled = False
default_image_size = 0
self.num_toggled = 0
i = 0
for image_name in image_names:
image_size = HobPage._size_to_string(os.stat(os.path.join(image_addr, image_name)).st_size)
image_attr = ("run" if (self.test_type_runnable(image_name) and self.test_mach_runnable(image_name)) else \
("deploy" if self.test_deployable(image_name) else ""))
is_toggled = (image_attr != "")
if not self.toggled_image:
if not default_toggled:
default_toggled = (self.test_type_runnable(image_name) and self.test_mach_runnable(image_name)) \
or self.test_deployable(image_name)
if i == (len(image_names) - 1):
is_toggled = True
if is_toggled:
default_toggled = True
self.image_store.set(self.image_store.append(), 0, image_name, 1, image_size, 2, default_toggled)
if default_toggled:
default_image_size = image_size
self.toggled_image = image_name
split_stuff = image_name.split('.')
if "rootfs" in split_stuff:
image_type = image_name[(len(split_stuff[0]) + len(".rootfs") + 1):]
self.create_bottom_buttons(self.buttonlist, image_name)
else:
image_type = image_name[(len(split_stuff[0]) + 1):]
self.image_store.append({'name': image_name,
'type': image_type,
'size': image_size,
'is_toggled': is_toggled,
'action_attr': image_attr,})
self.image_store.set(self.image_store.append(), 0, image_name, 1, image_size, 2, False)
i = i + 1
self.num_toggled += is_toggled
is_runnable = self.create_bottom_buttons(self.buttonlist, self.toggled_image)
# Generated image files info
varlist = ["Name: ", "FileCreated: ", "Directory: "]
vallist = []
vallist.append(image_name.split('.')[0])
vallist.append(', '.join(fileitem['type'] for fileitem in self.image_store))
vallist.append(image_addr)
image_table = HobViewTable(self.__columns__)
image_table.set_model(self.image_store)
image_table.connect("toggled", self.toggled_cb)
view_files_button = HobAltButton("View files")
view_files_button.connect("clicked", self.view_files_clicked_cb, image_addr)
view_files_button.set_tooltip_text("Open the directory containing the image files")
view_log_button = None
if log_file:
view_log_button = HobAltButton("View log")
view_log_button.connect("clicked", self.view_log_clicked_cb, log_file)
view_log_button.set_tooltip_text("Open the building log files")
self.image_detail = self.DetailBox(varlist=varlist, vallist=vallist, button=view_files_button, button2=view_log_button)
self.box_group_area.pack_start(self.image_detail, expand=False, fill=True)
# The default kernel box for the qemu images
self.sel_kernel = ""
self.kernel_detail = None
if 'qemu' in image_name:
self.sel_kernel = self.get_kernel_file_name()
varlist = ["Kernel: "]
vallist = []
vallist.append(self.sel_kernel)
change_kernel_button = HobAltButton("Change")
change_kernel_button.connect("clicked", self.change_kernel_cb)
change_kernel_button.set_tooltip_text("Change qemu kernel file")
self.kernel_detail = self.DetailBox(varlist=varlist, vallist=vallist, button=change_kernel_button)
self.box_group_area.pack_start(self.kernel_detail, expand=False, fill=False)
self.image_detail = self.DetailBox(widget=image_table, button=view_files_button)
self.box_group_area.pack_start(self.image_detail, expand=True, fill=True)
# Machine, Base image and Layers
layer_num_limit = 15
varlist = ["Machine: ", "Base image: ", "Layers: "]
vallist = []
self.setting_detail = None
if self.build_succeeded:
if build_succeeded:
vallist.append(machine)
vallist.append(base_image)
i = 0
@@ -302,14 +252,14 @@ class ImageDetailsPage (HobPage):
edit_config_button.set_tooltip_text("Edit machine, base image and recipes")
edit_config_button.connect("clicked", self.edit_config_button_clicked_cb)
self.setting_detail = self.DetailBox(varlist=varlist, vallist=vallist, button=edit_config_button)
self.box_group_area.pack_start(self.setting_detail, expand=True, fill=True)
self.box_group_area.pack_start(self.setting_detail, expand=False, fill=False)
# Packages included, and Total image size
varlist = ["Packages included: ", "Total image size: "]
vallist = []
vallist.append(pkg_num)
vallist.append(default_image_size)
if self.build_succeeded:
if build_succeeded:
edit_packages_button = HobAltButton("Edit packages")
edit_packages_button.set_tooltip_text("Edit the packages included in your image")
edit_packages_button.connect("clicked", self.edit_packages_button_clicked_cb)
@@ -319,23 +269,12 @@ class ImageDetailsPage (HobPage):
self.box_group_area.pack_start(self.package_detail, expand=False, fill=False)
# pack the buttons at the bottom, at this time they are already created.
if self.build_succeeded:
self.box_group_area.pack_end(self.details_bottom_buttons, expand=False, fill=False)
else: # for "My images" page
self.details_separator = gtk.HSeparator()
self.box_group_area.pack_start(self.details_separator, expand=False, fill=False)
self.box_group_area.pack_start(self.details_bottom_buttons, expand=False, fill=False)
self.box_group_area.pack_end(self.details_bottom_buttons, expand=False, fill=False)
self.show_all()
if self.kernel_detail and (not is_runnable):
self.kernel_detail.hide()
def view_files_clicked_cb(self, button, image_addr):
subprocess.call("xdg-open /%s" % image_addr, shell=True)
def view_log_clicked_cb(self, button, log_file):
if log_file:
os.system("xdg-open /%s" % log_file)
os.system("xdg-open /%s" % image_addr)
def refresh_package_detail_box(self, image_size):
self.package_detail.update_line_widgets("Total image size: ", image_size)
@@ -364,109 +303,43 @@ class ImageDetailsPage (HobPage):
break
return deployable
def get_kernel_file_name(self, kernel_addr=""):
kernel_name = ""
if not kernel_addr:
kernel_addr = self.builder.parameters.image_addr
files = [f for f in os.listdir(kernel_addr) if f[0] <> '.']
for check_file in files:
if check_file.endswith(".bin"):
name_splits = check_file.split(".")[0]
if self.builder.parameters.kernel_image_type in name_splits.split("-"):
kernel_name = check_file
break
return kernel_name
def show_builded_images_dialog(self, widget, primary_action=""):
title = primary_action if primary_action else "Your builded images"
dialog = CrumbsDialog(title, self.builder,
gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT)
dialog.set_border_width(12)
label = gtk.Label()
label.set_use_markup(True)
label.set_alignment(0.0, 0.5)
label.set_markup("<span font_desc='12'>Select the image file you want to %s</span>" % primary_action)
dialog.vbox.pack_start(label, expand=False, fill=False)
# filter created images as action attribution (deploy or run)
action_attr = ""
action_images = []
for fileitem in self.image_store:
action_attr = fileitem['action_attr']
if (action_attr == 'run' and primary_action == "Run image") \
or (action_attr == 'deploy' and primary_action == "Deploy image"):
action_images.append(fileitem)
# pack the corresponding 'runnable' or 'deploy' radio_buttons, if there has no more than one file.
# assume that there does not both have 'deploy' and 'runnable' files in the same building result
# in possible as design.
curr_row = 0
rows = (len(action_images)) if len(action_images) < 10 else 10
table = gtk.Table(rows, 10, True)
table.set_row_spacings(6)
table.set_col_spacing(0, 12)
table.set_col_spacing(5, 12)
sel_parent_btn = None
for fileitem in action_images:
sel_btn = gtk.RadioButton(sel_parent_btn, fileitem['type'])
sel_parent_btn = sel_btn if not sel_parent_btn else sel_parent_btn
sel_btn.set_active(fileitem['is_toggled'])
sel_btn.connect('toggled', self.table_selected_cb, fileitem)
if curr_row < 10:
table.attach(sel_btn, 2, 5, curr_row, curr_row + 1)
else:
table.attach(sel_btn, 7, 10, curr_row - 10, curr_row - 9)
curr_row += 1
dialog.vbox.pack_start(table, expand=False, fill=False, padding = 6)
button = dialog.add_button("Cancel", gtk.RESPONSE_CANCEL)
HobAltButton.style_button(button)
if primary_action:
button = dialog.add_button(primary_action, gtk.RESPONSE_YES)
HobButton.style_button(button)
dialog.show_all()
response = dialog.run()
dialog.destroy()
if response != gtk.RESPONSE_YES:
def toggled_cb(self, table, cell, path, columnid, tree):
model = tree.get_model()
if not model:
return
iter = model.get_iter_first()
while iter:
rowpath = model.get_path(iter)
model[rowpath][columnid] = False
iter = model.iter_next(iter)
for fileitem in self.image_store:
if fileitem['is_toggled']:
if fileitem['action_attr'] == 'run':
self.builder.runqemu_image(fileitem['name'], self.sel_kernel)
elif fileitem['action_attr'] == 'deploy':
self.builder.deploy_image(fileitem['name'])
model[path][columnid] = True
self.refresh_package_detail_box(model[path][1])
def table_selected_cb(self, tbutton, image):
image['is_toggled'] = tbutton.get_active()
if image['is_toggled']:
self.toggled_image = image['name']
image_name = model[path][0]
def change_kernel_cb(self, widget):
kernel_path = self.builder.show_load_kernel_dialog()
if kernel_path and self.kernel_detail:
import os.path
self.sel_kernel = os.path.basename(kernel_path)
markup = self.kernel_detail.format_line("Kernel: ", self.sel_kernel)
label = ((self.kernel_detail.get_children()[0]).get_children()[0]).get_children()[0]
label.set_markup(markup)
# remove
for button_id, button in self.button_ids.items():
button.disconnect(button_id)
self._remove_all_widget()
# repack
self.pack_start(self.details_top_buttons, expand=False, fill=False)
self.pack_start(self.group_align, expand=True, fill=True)
if self.build_result:
self.box_group_area.pack_start(self.build_result, expand=False, fill=False)
self.box_group_area.pack_start(self.image_detail, expand=True, fill=True)
if self.setting_detail:
self.box_group_area.pack_start(self.setting_detail, expand=False, fill=False)
self.box_group_area.pack_start(self.package_detail, expand=False, fill=False)
self.create_bottom_buttons(self.buttonlist, image_name)
self.box_group_area.pack_end(self.details_bottom_buttons, expand=False, fill=False)
self.show_all()
def create_bottom_buttons(self, buttonlist, image_name):
# Create the buttons at the bottom
created = False
packed = False
self.button_ids = {}
is_runnable = False
# create button "Deploy image"
name = "Deploy image"
@@ -501,7 +374,15 @@ class ImageDetailsPage (HobPage):
self.button_ids[button_id] = run_button
self.details_bottom_buttons.pack_end(run_button, expand=False, fill=False)
created = True
is_runnable = True
if not packed:
box = gtk.HBox(False, 6)
box.show()
subbox = gtk.HBox(False, 0)
subbox.set_size_request(205, 49)
subbox.show()
box.add(subbox)
self.details_bottom_buttons.pack_end(box, False, False)
name = "Save as template"
if name in buttonlist:
@@ -510,13 +391,8 @@ class ImageDetailsPage (HobPage):
label = gtk.Label(" or ")
self.details_bottom_buttons.pack_end(label, expand=False, fill=False)
# create button "Save as template"
save_button = HobAltButton("Save as template")
else:
save_button = HobButton("Save as template")
save_button.set_size_request(205, 49)
save_button.set_flags(gtk.CAN_DEFAULT)
packed = True
# create button "Save as template"
save_button = HobAltButton("Save as template")
save_button.set_tooltip_text("Save the image configuration for reuse")
button_id = save_button.connect("clicked", self.save_button_clicked_cb)
self.button_ids[button_id] = save_button
@@ -526,39 +402,34 @@ class ImageDetailsPage (HobPage):
name = "Build new image"
if name in buttonlist:
# create button "Build new image"
if packed:
build_new_button = HobAltButton("Build new image")
else:
build_new_button = HobButton("Build new image")
build_new_button.set_flags(gtk.CAN_DEFAULT)
build_new_button.set_size_request(205, 49)
self.details_bottom_buttons.pack_end(build_new_button, expand=False, fill=False)
build_new_button = HobAltButton("Build new image")
build_new_button.set_tooltip_text("Create a new image from scratch")
button_id = build_new_button.connect("clicked", self.build_new_button_clicked_cb)
self.button_ids[button_id] = build_new_button
self.details_bottom_buttons.pack_start(build_new_button, expand=False, fill=False)
return is_runnable
def _get_selected_image(self):
image_name = ""
iter = self.image_store.get_iter_first()
while iter:
path = self.image_store.get_path(iter)
if self.image_store[path][2]:
image_name = self.image_store[path][0]
break
iter = self.image_store.iter_next(iter)
return image_name
def save_button_clicked_cb(self, button):
self.builder.show_save_template_dialog()
def deploy_button_clicked_cb(self, button):
if self.toggled_image:
if self.num_toggled > 1:
self.set_sensitive(False)
self.show_builded_images_dialog(None, "Deploy image")
self.set_sensitive(True)
else:
self.builder.deploy_image(self.toggled_image)
image_name = self._get_selected_image()
self.builder.deploy_image(image_name)
def run_button_clicked_cb(self, button):
if self.toggled_image:
if self.num_toggled > 1:
self.set_sensitive(False)
self.show_builded_images_dialog(None, "Run image")
self.set_sensitive(True)
else:
self.builder.runqemu_image(self.toggled_image, self.sel_kernel)
image_name = self._get_selected_image()
self.builder.runqemu_image(image_name)
def build_new_button_clicked_cb(self, button):
self.builder.initiate_new_build_async()

View File

@@ -39,7 +39,6 @@ class PackageSelectionPage (HobPage):
'columns' : [{
'col_name' : 'Package name',
'col_id' : PackageListModel.COL_NAME,
'col_t_id' : PackageListModel.COL_FONT,
'col_style': 'text',
'col_min' : 100,
'col_max' : 300,
@@ -47,7 +46,6 @@ class PackageSelectionPage (HobPage):
}, {
'col_name' : 'Brought in by',
'col_id' : PackageListModel.COL_BINB,
'col_t_id' : PackageListModel.COL_FONT,
'col_style': 'binb',
'col_min' : 100,
'col_max' : 350,
@@ -55,7 +53,6 @@ class PackageSelectionPage (HobPage):
}, {
'col_name' : 'Size',
'col_id' : PackageListModel.COL_SIZE,
'col_t_id' : PackageListModel.COL_FONT,
'col_style': 'text',
'col_min' : 100,
'col_max' : 300,
@@ -63,9 +60,7 @@ class PackageSelectionPage (HobPage):
}, {
'col_name' : 'Included',
'col_id' : PackageListModel.COL_INC,
'col_t_id' : PackageListModel.COL_FONT,
'col_style': 'check toggle',
'col_group': 'tree store group',
'col_min' : 100,
'col_max' : 100
}]
@@ -75,7 +70,6 @@ class PackageSelectionPage (HobPage):
'columns' : [{
'col_name' : 'Package name',
'col_id' : PackageListModel.COL_NAME,
'col_t_id' : PackageListModel.COL_FONT,
'col_style': 'text',
'col_min' : 100,
'col_max' : 400,
@@ -83,7 +77,6 @@ class PackageSelectionPage (HobPage):
}, {
'col_name' : 'Size',
'col_id' : PackageListModel.COL_SIZE,
'col_t_id' : PackageListModel.COL_FONT,
'col_style': 'text',
'col_min' : 100,
'col_max' : 500,
@@ -92,18 +85,14 @@ class PackageSelectionPage (HobPage):
'col_name' : 'Included',
'col_id' : PackageListModel.COL_INC,
'col_style': 'check toggle',
'col_group': 'tree store group',
'col_min' : 100,
'col_max' : 100
}]
}
]
(INCLUDED,
ALL) = range(2)
def __init__(self, builder):
super(PackageSelectionPage, self).__init__(builder, "Edit packages")
super(PackageSelectionPage, self).__init__(builder, "Packages")
# set invisiable members
self.recipe_model = self.builder.recipe_model
@@ -112,16 +101,13 @@ class PackageSelectionPage (HobPage):
# create visual elements
self.create_visual_elements()
def included_clicked_cb(self, button):
self.ins.set_current_page(self.INCLUDED)
def create_visual_elements(self):
self.label = gtk.Label("Packages included: 0\nSelected packages size: 0 MB")
self.eventbox = self.add_onto_top_bar(self.label, 73)
self.pack_start(self.eventbox, expand=False, fill=False)
self.pack_start(self.group_align, expand=True, fill=True)
# set visible members
# set visiable members
self.ins = HobNotebook()
self.tables = [] # we need to modify table when the dialog is shown
# append the tab
@@ -131,11 +117,11 @@ class PackageSelectionPage (HobPage):
filter = page['filter']
tab.set_model(self.package_model.tree_model(filter))
tab.connect("toggled", self.table_toggled_cb, page['name'])
tab.connect_group_selection(self.table_selected_cb)
if page['name'] == "Included":
tab.connect("button-release-event", self.button_click_cb)
tab.connect("cell-fadeinout-stopped", self.after_fadeout_checkin_include)
self.ins.append_page(tab, page['name'])
label = gtk.Label(page['name'])
self.ins.append_page(tab, label)
self.tables.append(tab)
self.ins.set_entry("Search packages:")
@@ -146,8 +132,8 @@ class PackageSelectionPage (HobPage):
# add all into the dialog
self.box_group_area.pack_start(self.ins, expand=True, fill=True)
self.button_box = gtk.HBox(False, 6)
self.box_group_area.pack_start(self.button_box, expand=False, fill=False)
button_box = gtk.HBox(False, 6)
self.box_group_area.pack_start(button_box, expand=False, fill=False)
self.build_image_button = HobButton('Build image')
self.build_image_button.set_size_request(205, 49)
@@ -155,11 +141,11 @@ class PackageSelectionPage (HobPage):
self.build_image_button.set_flags(gtk.CAN_DEFAULT)
self.build_image_button.grab_default()
self.build_image_button.connect("clicked", self.build_image_clicked_cb)
self.button_box.pack_end(self.build_image_button, expand=False, fill=False)
button_box.pack_end(self.build_image_button, expand=False, fill=False)
self.back_button = HobAltButton('<< Back')
self.back_button = HobAltButton("<< Back to image configuration")
self.back_button.connect("clicked", self.back_button_clicked_cb)
self.button_box.pack_start(self.back_button, expand=False, fill=False)
button_box.pack_start(self.back_button, expand=False, fill=False)
def button_click_cb(self, widget, event):
path, col = widget.table_tree.get_cursor()
@@ -169,33 +155,11 @@ class PackageSelectionPage (HobPage):
if binb:
self.builder.show_binb_dialog(binb)
def view_log_clicked_cb(self, button, log_file):
if log_file:
os.system("xdg-open /%s" % log_file)
def show_page(self, log_file):
children = self.button_box.get_children() or []
for child in children:
self.button_box.remove(child)
# re-packed the buttons as request, add the 'view log' button if build success
self.button_box.pack_start(self.back_button, expand=False, fill=False)
self.button_box.pack_end(self.build_image_button, expand=False, fill=False)
if log_file:
view_log_button = HobAltButton("View log")
view_log_button.connect("clicked", self.view_log_clicked_cb, log_file)
view_log_button.set_tooltip_text("Open the building log files")
self.button_box.pack_end(view_log_button, expand=False, fill=False)
self.show_all()
def build_image_clicked_cb(self, button):
self.builder.build_image()
def back_button_clicked_cb(self, button):
if self.builder.current_step == self.builder.PACKAGE_GENERATED:
self.builder.show_recipes()
elif self.builder.previous_step == self.builder.IMAGE_GENERATED:
self.builder.show_image_details()
self.builder.show_configuration()
def _expand_all(self):
for tab in self.tables:
@@ -219,7 +183,7 @@ class PackageSelectionPage (HobPage):
image_total_size += (51200 * 1024)
image_total_size_str = HobPage._size_to_string(image_total_size)
self.label.set_label("Packages included: %s\nSelected packages size: %s\nTotal image size: %s" %
self.label.set_text("Packages included: %s\nSelected packages size: %s\nTotal image size: %s" %
(selected_packages_num, selected_packages_size_str, image_total_size_str))
self.ins.show_indicator_icon("Included", selected_packages_num)
@@ -237,7 +201,7 @@ class PackageSelectionPage (HobPage):
self.refresh_selection()
if not self.builder.customized:
self.builder.customized = True
self.builder.configuration.selected_image = self.recipe_model.__custom_image__
self.builder.configuration.selected_image = self.recipe_model.__dummy_image__
self.builder.rcppkglist_populated()
self.builder.window_sensitive(True)
@@ -283,22 +247,3 @@ class PackageSelectionPage (HobPage):
def after_fadeout_checkin_include(self, table, ctrl, cell, tree):
tree.set_model(self.package_model.tree_model(self.pages[0]['filter']))
tree.expand_all()
def foreach_cell_change_font(self, model, path, iter, paths=None):
# Changed the font for a group cells
if path and iter and path[0] == paths[0]:
self.package_model.set(iter, self.package_model.COL_FONT, "bold")
else:
if iter and model.iter_parent(iter) == None:
self.package_model.set(iter, self.package_model.COL_FONT, '11')
else:
self.package_model.set(iter, self.package_model.COL_FONT, '10')
def table_selected_cb(self, selection):
model, paths = selection.get_selected_rows()
if paths:
child_path = self.package_model.convert_vpath_to_path(model, paths[0])
self.package_model.foreach(self.foreach_cell_change_font, child_path)
def set_packages_curr_tab(self, curr_page):
self.ins.set_current_page(curr_page)

View File

@@ -125,17 +125,11 @@ class PersistentTooltip(gtk.Window):
style.fg[gtk.STATE_NORMAL] = gtk.gdk.color_parse(val)
self.label.set_style(style)
break # we only care for the tooltip_fg_color
self.label.set_markup(markup)
self.label.show()
bin.add(self.label)
hbox.pack_end(bin, True, True, 6)
# add the original URL display for user reference
if 'a href' in markup:
hbox.set_tooltip_text(self.get_markup_url(markup))
hbox.show()
self.connect("key-press-event", self._catch_esc_cb)
"""
@@ -171,16 +165,3 @@ class PersistentTooltip(gtk.Window):
def hide(self):
self.shown = False
gtk.Window.hide(self)
"""
Called to get the hyperlink URL from markup text.
"""
def get_markup_url(self, markup):
url = "http:"
if markup and type(markup) == str:
s = markup
if 'http:' in s:
import re
url = re.search('(http:[^,\\ "]+)', s).group(0)
return url

View File

@@ -124,29 +124,23 @@ class RecipeSelectionPage (HobPage):
}]
}
]
(INCLUDED,
ALL,
TASKS) = range(3)
def __init__(self, builder = None):
super(RecipeSelectionPage, self).__init__(builder, "Edit recipes")
super(RecipeSelectionPage, self).__init__(builder, "Recipes")
# set invisible members
# set invisiable members
self.recipe_model = self.builder.recipe_model
# create visual elements
self.create_visual_elements()
def included_clicked_cb(self, button):
self.ins.set_current_page(self.INCLUDED)
def create_visual_elements(self):
self.eventbox = self.add_onto_top_bar(None, 73)
self.label = gtk.Label()
self.eventbox = self.add_onto_top_bar(self.label, 73)
self.pack_start(self.eventbox, expand=False, fill=False)
self.pack_start(self.group_align, expand=True, fill=True)
# set visible members
# set visiable members
self.ins = HobNotebook()
self.tables = [] # we need modify table when the dialog is shown
# append the tabs in order
@@ -159,7 +153,10 @@ class RecipeSelectionPage (HobPage):
if page['name'] == "Included":
tab.connect("button-release-event", self.button_click_cb)
tab.connect("cell-fadeinout-stopped", self.after_fadeout_checkin_include)
self.ins.append_page(tab, page['name'], page['tooltip'])
label = gtk.Label(page['name'])
label.set_selectable(False)
label.set_tooltip_text(page['tooltip'])
self.ins.append_page(tab, label)
self.tables.append(tab)
self.ins.set_entry("Search recipes:")
@@ -184,7 +181,7 @@ class RecipeSelectionPage (HobPage):
self.build_packages_button.connect("clicked", self.build_packages_clicked_cb)
button_box.pack_end(self.build_packages_button, expand=False, fill=False)
self.back_button = HobAltButton('<< Back')
self.back_button = HobAltButton("<< Back to image configuration")
self.back_button.connect("clicked", self.back_button_clicked_cb)
button_box.pack_start(self.back_button, expand=False, fill=False)
@@ -205,6 +202,7 @@ class RecipeSelectionPage (HobPage):
def refresh_selection(self):
self.builder.configuration.selected_image = self.recipe_model.get_selected_image()
_, self.builder.configuration.selected_recipes = self.recipe_model.get_selected_recipes()
self.label.set_text("Recipes included: %s" % len(self.builder.configuration.selected_recipes))
self.ins.show_indicator_icon("Included", len(self.builder.configuration.selected_recipes))
def toggle_item_idle_cb(self, path, view_tree, cell, pagename):
@@ -221,7 +219,7 @@ class RecipeSelectionPage (HobPage):
self.refresh_selection()
if not self.builder.customized:
self.builder.customized = True
self.builder.configuration.selected_image = self.recipe_model.__custom_image__
self.builder.configuration.selected_image = self.recipe_model.__dummy_image__
self.builder.rcppkglist_populated()
self.builder.window_sensitive(True)
@@ -265,6 +263,3 @@ class RecipeSelectionPage (HobPage):
def after_fadeout_checkin_include(self, table, ctrl, cell, tree):
tree.set_model(self.recipe_model.tree_model(self.pages[0]['filter']))
def set_recipe_curr_tab(self, curr_page):
self.ins.set_current_page(curr_page)

View File

@@ -76,9 +76,6 @@ class RunningBuild (gobject.GObject):
'build-complete' : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
'build-aborted' : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
'task-started' : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,)),
@@ -88,9 +85,6 @@ class RunningBuild (gobject.GObject):
'no-provider' : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,)),
'log' : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING, gobject.TYPE_PYOBJECT,)),
}
pids_to_task = {}
tasks_to_iter = {}
@@ -99,7 +93,6 @@ class RunningBuild (gobject.GObject):
gobject.GObject.__init__ (self)
self.model = RunningBuildModel()
self.sequential = sequential
self.buildaborted = False
def reset (self):
self.pids_to_task.clear()
@@ -129,8 +122,6 @@ class RunningBuild (gobject.GObject):
parent = self.tasks_to_iter[(package, task)]
if(isinstance(event, logging.LogRecord)):
if event.taskpid == 0 or event.levelno > logging.INFO:
self.emit("log", "handle", event)
# FIXME: this is a hack! More info in Yocto #1433
# http://bugzilla.pokylinux.org/show_bug.cgi?id=1433, temporarily
# mask the error message as it's not informative for the user.
@@ -216,7 +207,6 @@ class RunningBuild (gobject.GObject):
self.tasks_to_iter[(package, task)] = i
elif isinstance(event, bb.build.TaskBase):
self.emit("log", "info", event._message)
current = self.tasks_to_iter[(package, task)]
parent = self.tasks_to_iter[(package, None)]
@@ -284,9 +274,7 @@ class RunningBuild (gobject.GObject):
0))
# Emit the appropriate signal depending on the number of failures
if self.buildaborted:
self.emit ("build-aborted")
elif (failures >= 1):
if (failures >= 1):
self.emit ("build-failed")
else:
self.emit ("build-succeeded")
@@ -298,11 +286,7 @@ class RunningBuild (gobject.GObject):
if pbar:
pbar.set_text(event.msg)
elif isinstance(event, bb.event.DiskFull):
self.buildaborted = True
elif isinstance(event, bb.command.CommandFailed):
self.emit("log", "error", "Command execution failed: %s" % (event.error))
if event.error.startswith("Exited with"):
# If the command fails with an exit code we're done, emit the
# generic signal for the UI to notify the user
@@ -330,24 +314,7 @@ class RunningBuild (gobject.GObject):
elif isinstance(event, bb.event.ParseCompleted) and pbar:
pbar.hide()
#using runqueue events as many as possible to update the progress bar
elif isinstance(event, bb.runqueue.runQueueTaskFailed):
self.emit("log", "error", "Task %s (%s) failed with exit code '%s'" % (event.taskid, event.taskstring, event.exitcode))
elif isinstance(event, bb.runqueue.sceneQueueTaskFailed):
self.emit("log", "warn", "Setscene task %s (%s) failed with exit code '%s' - real task will be run instead" \
% (event.taskid, event.taskstring, event.exitcode))
elif isinstance(event, (bb.runqueue.runQueueTaskStarted, bb.runqueue.sceneQueueTaskStarted)):
if isinstance(event, bb.runqueue.sceneQueueTaskStarted):
self.emit("log", "info", "Running setscene task %d of %d (%s)" % \
(event.stats.completed + event.stats.active + event.stats.failed + 1,
event.stats.total, event.taskstring))
else:
if event.noexec:
tasktype = 'noexec task'
else:
tasktype = 'task'
self.emit("log", "info", "Running %s %s of %s (ID: %s, %s)" % \
(tasktype, event.stats.completed + event.stats.active + event.stats.failed + 1,
event.stats.total, event.taskid, event.taskstring))
message = {}
message["eventname"] = bb.event.getName(event)
num_of_completed = event.stats.completed + event.stats.failed
@@ -356,10 +323,6 @@ class RunningBuild (gobject.GObject):
message["title"] = ""
message["task"] = event.taskstring
self.emit("task-started", message)
elif isinstance(event, bb.event.MultipleProviders):
self.emit("log", "info", "multiple providers are available for %s%s (%s)" \
% (event._is_runtime and "runtime " or "", event._item, ", ".join(event._candidates)))
self.emit("log", "info", "consider defining a PREFERRED_PROVIDER entry to match %s" % (event._item))
elif isinstance(event, bb.event.NoProvider):
msg = ""
if event._runtime:
@@ -374,19 +337,6 @@ class RunningBuild (gobject.GObject):
for reason in event._reasons:
msg += ("%s\n" % reason)
self.emit("no-provider", msg)
self.emit("log", msg)
else:
if not isinstance(event, (bb.event.BuildBase,
bb.event.StampUpdate,
bb.event.ConfigParsed,
bb.event.RecipeParsed,
bb.event.RecipePreFinalise,
bb.runqueue.runQueueEvent,
bb.runqueue.runQueueExitWait,
bb.event.OperationStarted,
bb.event.OperationCompleted,
bb.event.OperationProgress)):
self.emit("log", "error", "Unknown event: %s" % (event.error if hasattr(event, 'error') else 'error'))
return

View File

@@ -101,19 +101,7 @@ class HobTemplateFile(ConfigFile):
return self.dictionary[var]
else:
return ""
def getVersion(self):
contents = ConfigFile.readFile(self)
pattern = "^\s*(\S+)\s*=\s*(\".*?\")"
for line in contents:
match = re.search(pattern, line)
if match:
if match.group(1) == "VERSION":
return match.group(2).strip('"')
return None
def load(self):
contents = ConfigFile.readFile(self)
self.dictionary.clear()
@@ -186,9 +174,6 @@ class TemplateMgr(gobject.GObject):
self.image_bb.save()
self.template_hob.save()
def getVersion(self, path):
return HobTemplateFile(path).getVersion()
def load(self, path):
self.template_hob = HobTemplateFile(path)
self.dictionary = self.template_hob.load()

View File

@@ -22,7 +22,6 @@
# bitbake which will allow more flexibility.
import os
import bb
def which_terminal():
term = bb.utils.which(os.environ["PATH"], "xterm")

View File

@@ -24,7 +24,7 @@ import threading
import xmlrpclib
import bb
import bb.event
from bb.ui.crumbs.progressbar import HobProgressBar
from bb.ui.crumbs.progress import ProgressBar
# Package Model
(COL_PKG_NAME) = (0)
@@ -220,12 +220,8 @@ def main(server, eventHandler):
gtk.gdk.threads_enter()
dep = DepExplorer()
bardialog = gtk.Dialog(parent=dep)
bardialog.set_default_size(400, 50)
pbar = HobProgressBar()
bardialog.vbox.pack_start(pbar)
bardialog.show_all()
bardialog.connect("delete-event", gtk.main_quit)
pbar = ProgressBar(dep)
pbar.connect("delete-event", gtk.main_quit)
gtk.gdk.threads_leave()
progress_total = 0
@@ -242,20 +238,19 @@ def main(server, eventHandler):
if isinstance(event, bb.event.CacheLoadStarted):
progress_total = event.total
gtk.gdk.threads_enter()
bardialog.set_title("Loading Cache")
pbar.update(0)
pbar.set_title("Loading Cache")
pbar.update(0, progress_total)
gtk.gdk.threads_leave()
if isinstance(event, bb.event.CacheLoadProgress):
x = event.current
gtk.gdk.threads_enter()
pbar.update(x * 1.0 / progress_total)
pbar.set_title('')
pbar.update(x, progress_total)
gtk.gdk.threads_leave()
continue
if isinstance(event, bb.event.CacheLoadCompleted):
bardialog.hide()
pbar.hide()
continue
if isinstance(event, bb.event.ParseStarted):
@@ -263,21 +258,19 @@ def main(server, eventHandler):
if progress_total == 0:
continue
gtk.gdk.threads_enter()
pbar.update(0)
bardialog.set_title("Processing recipes")
pbar.set_title("Processing recipes")
pbar.update(0, progress_total)
gtk.gdk.threads_leave()
if isinstance(event, bb.event.ParseProgress):
x = event.current
gtk.gdk.threads_enter()
pbar.update(x * 1.0 / progress_total)
pbar.set_title('')
pbar.update(x, progress_total)
gtk.gdk.threads_leave()
continue
if isinstance(event, bb.event.ParseCompleted):
bardialog.hide()
pbar.hide()
continue
if isinstance(event, bb.event.DepTreeGenerated):

View File

@@ -30,7 +30,7 @@ try:
pygtk.require('2.0') # to be certain we don't have gtk+ 1.x !?!
gtkver = gtk.gtk_version
pygtkver = gtk.pygtk_version
if gtkver < (2, 20, 0) or pygtkver < (2, 21, 0):
if gtkver < (2, 18, 0) or pygtkver < (2, 16, 0):
sys.exit("%s,\nYou have Gtk+ %s and PyGtk %s." % (requirements,
".".join(map(str, gtkver)),
".".join(map(str, pygtkver))))

View File

@@ -25,11 +25,7 @@ import sys
import xmlrpclib
import logging
import progressbar
import signal
import bb.msg
import fcntl
import struct
import copy
from bb.ui import uihelper
logger = logging.getLogger("BitBake")
@@ -41,21 +37,8 @@ class BBProgress(progressbar.ProgressBar):
widgets = [progressbar.Percentage(), ' ', progressbar.Bar(), ' ',
progressbar.ETA()]
try:
self._resize_default = signal.getsignal(signal.SIGWINCH)
except:
self._resize_default = None
progressbar.ProgressBar.__init__(self, maxval, [self.msg + ": "] + widgets)
def _handle_resize(self, signum, frame):
progressbar.ProgressBar._handle_resize(self, signum, frame)
if self._resize_default:
self._resize_default(signum, frame)
def finish(self):
progressbar.ProgressBar.finish(self)
if self._resize_default:
signal.signal(signal.SIGWINCH, self._resize_default)
class NonInteractiveProgress(object):
fobj = sys.stdout
@@ -87,128 +70,39 @@ def pluralise(singular, plural, qty):
else:
return plural % qty
class InteractConsoleLogFilter(logging.Filter):
def __init__(self, tf, format):
self.tf = tf
self.format = format
def filter(self, record):
if record.levelno == self.format.NOTE and (record.msg.startswith("Running") or record.msg.startswith("recipe ")):
return False
self.tf.clearFooter()
return True
class TerminalFilter(object):
columns = 80
def sigwinch_handle(self, signum, frame):
self.columns = self.getTerminalColumns()
if self._sigwinch_default:
self._sigwinch_default(signum, frame)
def getTerminalColumns(self):
def ioctl_GWINSZ(fd):
try:
cr = struct.unpack('hh', fcntl.ioctl(fd, self.termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(sys.stdout.fileno())
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (env['LINES'], env['COLUMNS'])
except:
cr = (25, 80)
return cr[1]
def __init__(self, main, helper, console, format):
self.main = main
self.helper = helper
self.cuu = None
self.stdinbackup = None
self.interactive = sys.stdout.isatty()
self.footer_present = False
self.lastpids = []
if not self.interactive:
return
try:
import curses
except ImportError:
sys.exit("FATAL: The knotty ui could not load the required curses python module.")
import termios
self.curses = curses
self.termios = termios
try:
fd = sys.stdin.fileno()
self.stdinbackup = termios.tcgetattr(fd)
new = copy.deepcopy(self.stdinbackup)
new[3] = new[3] & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSADRAIN, new)
curses.setupterm()
self.ed = curses.tigetstr("ed")
if self.ed:
self.cuu = curses.tigetstr("cuu")
try:
self._sigwinch_default = signal.getsignal(signal.SIGWINCH)
signal.signal(signal.SIGWINCH, self.sigwinch_handle)
except:
pass
self.columns = self.getTerminalColumns()
except:
self.cuu = None
console.addFilter(InteractConsoleLogFilter(self, format))
def clearFooter(self):
if self.footer_present:
lines = self.footer_present
sys.stdout.write(self.curses.tparm(self.cuu, lines))
sys.stdout.write(self.curses.tparm(self.ed))
self.footer_present = False
return
def updateFooter(self):
if not self.cuu:
if not main.shutdown or not self.helper.needUpdate:
return
activetasks = self.helper.running_tasks
failedtasks = self.helper.failed_tasks
runningpids = self.helper.running_pids
if self.footer_present and (self.lastpids == runningpids):
return
if self.footer_present:
self.clearFooter()
if not activetasks:
if len(runningpids) == 0:
return
self.helper.getTasks()
tasks = []
for t in runningpids:
tasks.append("%s (pid %s)" % (activetasks[t]["title"], t))
if self.main.shutdown:
content = "Waiting for %s running tasks to finish:" % len(activetasks)
if main.shutdown:
print("Waiting for %s running tasks to finish:" % len(activetasks))
else:
content = "Currently %s running tasks (%s of %s):" % (len(activetasks), self.helper.tasknumber_current, self.helper.tasknumber_total)
print content
lines = 1 + int(len(content) / (self.columns + 1))
print("Currently %s running tasks (%s of %s):" % (len(activetasks), self.helper.tasknumber_current, self.helper.tasknumber_total))
for tasknum, task in enumerate(tasks):
content = "%s: %s" % (tasknum, task)
print content
lines = lines + 1 + int(len(content) / (self.columns + 1))
self.footer_present = lines
self.lastpids = runningpids[:]
print("%s: %s" % (tasknum, task))
def finish(self):
if self.stdinbackup:
fd = sys.stdin.fileno()
self.termios.tcsetattr(fd, self.termios.TCSADRAIN, self.stdinbackup)
return
def main(server, eventHandler, tf = TerminalFilter):
@@ -225,7 +119,6 @@ def main(server, eventHandler, tf = TerminalFilter):
console.setFormatter(format)
logger.addHandler(console)
if consolelogfile:
bb.utils.mkdirhier(os.path.dirname(consolelogfile))
consolelog = logging.FileHandler(consolelogfile)
bb.msg.addDefaultlogFilter(consolelog)
consolelog.setFormatter(format)

View File

@@ -0,0 +1,109 @@
#
# BitBake (No)TTY UI Implementation (v2)
#
# Handling output to TTYs or files (no TTY)
#
# Copyright (C) 2012 Richard Purdie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from bb.ui import knotty
import logging
import sys
logger = logging.getLogger("BitBake")
class InteractConsoleLogFilter(logging.Filter):
def __init__(self, tf, format):
self.tf = tf
self.format = format
def filter(self, record):
if record.levelno == self.format.NOTE and (record.msg.startswith("Running") or record.msg.startswith("package ")):
return False
self.tf.clearFooter()
return True
class TerminalFilter2(object):
def __init__(self, main, helper, console, format):
self.main = main
self.helper = helper
self.cuu = None
self.stdinbackup = None
self.interactive = sys.stdout.isatty()
self.footer_present = False
self.lastpids = []
if not self.interactive:
return
import curses
import termios
import copy
self.curses = curses
self.termios = termios
try:
fd = sys.stdin.fileno()
self.stdinbackup = termios.tcgetattr(fd)
new = copy.deepcopy(self.stdinbackup)
new[3] = new[3] & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSADRAIN, new)
curses.setupterm()
self.ed = curses.tigetstr("ed")
if self.ed:
self.cuu = curses.tigetstr("cuu")
except:
self.cuu = None
console.addFilter(InteractConsoleLogFilter(self, format))
def clearFooter(self):
if self.footer_present:
lines = self.footer_present
sys.stdout.write(self.curses.tparm(self.cuu, lines))
sys.stdout.write(self.curses.tparm(self.ed))
self.footer_present = False
def updateFooter(self):
if not self.cuu:
return
activetasks = self.helper.running_tasks
failedtasks = self.helper.failed_tasks
runningpids = self.helper.running_pids
if self.footer_present and (self.lastpids == runningpids):
return
if self.footer_present:
self.clearFooter()
if not activetasks:
return
lines = 1
tasks = []
for t in runningpids:
tasks.append("%s (pid %s)" % (activetasks[t]["title"], t))
if self.main.shutdown:
print("Waiting for %s running tasks to finish:" % len(activetasks))
else:
print("Currently %s running tasks (%s of %s):" % (len(activetasks), self.helper.tasknumber_current, self.helper.tasknumber_total))
for tasknum, task in enumerate(tasks):
print("%s: %s" % (tasknum, task))
lines = lines + 1
self.footer_present = lines
self.lastpids = runningpids[:]
def finish(self):
if self.stdinbackup:
fd = sys.stdin.fileno()
self.termios.tcsetattr(fd, self.termios.TCSADRAIN, self.stdinbackup)
def main(server, eventHandler):
bb.ui.knotty.main(server, eventHandler, TerminalFilter2)

View File

@@ -47,13 +47,7 @@
from __future__ import division
import logging
import os, sys, itertools, time, subprocess
try:
import curses
except ImportError:
sys.exit("FATAL: The ncurses ui could not load the required curses python module.")
import os, sys, curses, itertools, time
import bb
import xmlrpclib
from bb import ui
@@ -292,7 +286,7 @@ class NCursesUI:
# bb.error("log data follows (%s)" % logfile)
# number_of_lines = data.getVar("BBINCLUDELOGS_LINES", d)
# if number_of_lines:
# subprocess.call('tail -n%s %s' % (number_of_lines, logfile), shell=True)
# os.system('tail -n%s %s' % (number_of_lines, logfile))
# else:
# f = open(logfile, "r")
# while True:

View File

@@ -48,7 +48,7 @@ class BBUIHelper:
self.running_pids.remove(event.pid)
self.failed_tasks.append( { 'title' : "%s %s" % (event._package, event._task)})
self.needUpdate = True
if isinstance(event, bb.runqueue.runQueueTaskStarted) or isinstance(event, bb.runqueue.sceneQueueTaskStarted):
if isinstance(event, bb.runqueue.runQueueTaskStarted):
self.tasknumber_current = event.stats.completed + event.stats.active + event.stats.failed + 1
self.tasknumber_total = event.stats.total

View File

@@ -26,7 +26,6 @@ import logging
import bb
import bb.msg
import multiprocessing
import fcntl
from commands import getstatusoutput
from contextlib import contextmanager
@@ -109,10 +108,130 @@ def vercmp(ta, tb):
r = vercmp_part(ra, rb)
return r
def vercmp_string(a, b):
ta = split_version(a)
tb = split_version(b)
return vercmp(ta, tb)
_package_weights_ = {"pre":-2, "p":0, "alpha":-4, "beta":-3, "rc":-1} # dicts are unordered
_package_ends_ = ["pre", "p", "alpha", "beta", "rc", "cvs", "bk", "HEAD" ] # so we need ordered list
def relparse(myver):
"""Parses the last elements of a version number into a triplet, that can
later be compared.
"""
number = 0
p1 = 0
p2 = 0
mynewver = myver.split('_')
if len(mynewver) == 2:
# an _package_weights_
number = float(mynewver[0])
match = 0
for x in _package_ends_:
elen = len(x)
if mynewver[1][:elen] == x:
match = 1
p1 = _package_weights_[x]
try:
p2 = float(mynewver[1][elen:])
except:
p2 = 0
break
if not match:
# normal number or number with letter at end
divider = len(myver)-1
if myver[divider:] not in "1234567890":
# letter at end
p1 = ord(myver[divider:])
number = float(myver[0:divider])
else:
number = float(myver)
else:
# normal number or number with letter at end
divider = len(myver)-1
if myver[divider:] not in "1234567890":
#letter at end
p1 = ord(myver[divider:])
number = float(myver[0:divider])
else:
number = float(myver)
return [number, p1, p2]
__vercmp_cache__ = {}
def vercmp_string(val1, val2):
"""This takes two version strings and returns an integer to tell you whether
the versions are the same, val1>val2 or val2>val1.
"""
# quick short-circuit
if val1 == val2:
return 0
valkey = val1 + " " + val2
# cache lookup
try:
return __vercmp_cache__[valkey]
try:
return - __vercmp_cache__[val2 + " " + val1]
except KeyError:
pass
except KeyError:
pass
# consider 1_p2 vc 1.1
# after expansion will become (1_p2,0) vc (1,1)
# then 1_p2 is compared with 1 before 0 is compared with 1
# to solve the bug we need to convert it to (1,0_p2)
# by splitting _prepart part and adding it back _after_expansion
val1_prepart = val2_prepart = ''
if val1.count('_'):
val1, val1_prepart = val1.split('_', 1)
if val2.count('_'):
val2, val2_prepart = val2.split('_', 1)
# replace '-' by '.'
# FIXME: Is it needed? can val1/2 contain '-'?
val1 = val1.split("-")
if len(val1) == 2:
val1[0] = val1[0] + "." + val1[1]
val2 = val2.split("-")
if len(val2) == 2:
val2[0] = val2[0] + "." + val2[1]
val1 = val1[0].split('.')
val2 = val2[0].split('.')
# add back decimal point so that .03 does not become "3" !
for x in xrange(1, len(val1)):
if val1[x][0] == '0' :
val1[x] = '.' + val1[x]
for x in xrange(1, len(val2)):
if val2[x][0] == '0' :
val2[x] = '.' + val2[x]
# extend varion numbers
if len(val2) < len(val1):
val2.extend(["0"]*(len(val1)-len(val2)))
elif len(val1) < len(val2):
val1.extend(["0"]*(len(val2)-len(val1)))
# add back _prepart tails
if val1_prepart:
val1[-1] += '_' + val1_prepart
if val2_prepart:
val2[-1] += '_' + val2_prepart
# The above code will extend version numbers out so they
# have the same number of digits.
for x in xrange(0, len(val1)):
cmp1 = relparse(val1[x])
cmp2 = relparse(val2[x])
for y in xrange(0, 3):
myret = cmp1[y] - cmp2[y]
if myret != 0:
__vercmp_cache__[valkey] = myret
return myret
__vercmp_cache__[valkey] = 0
return 0
def explode_deps(s):
"""
@@ -216,23 +335,20 @@ def better_compile(text, file, realfile, mode = "exec"):
for line in body:
logger.error(line)
e = bb.BBHandledException(e)
raise e
raise
def better_exec(code, context, text = None, realfile = "<code>"):
def better_exec(code, context, text, realfile = "<code>"):
"""
Similiar to better_compile, better_exec will
print the lines that are responsible for the
error.
"""
import bb.parse
if not text:
text = code
if not hasattr(code, "co_filename"):
code = better_compile(code, realfile, realfile)
try:
exec(code, _context, context)
except Exception as e:
except Exception:
(t, value, tb) = sys.exc_info()
if t in [bb.parse.SkipPackage, bb.build.FuncFailed]:
@@ -257,32 +373,22 @@ def better_exec(code, context, text = None, realfile = "<code>"):
logger.error("The code that was being executed was:")
_print_trace(textarray, linefailed)
logger.error("[From file: '%s', lineno: %s, function: %s]", tbextract[0][0], tbextract[0][1], tbextract[0][2])
logger.error("(file: '%s', lineno: %s, function: %s)", tbextract[0][0], tbextract[0][1], tbextract[0][2])
# See if this is a function we constructed and has calls back into other functions in
# "text". If so, try and improve the context of the error by diving down the trace
level = 0
nexttb = tb.tb_next
while nexttb is not None and (level+1) < len(tbextract):
while nexttb is not None:
if tbextract[level][0] == tbextract[level+1][0] and tbextract[level+1][2] == tbextract[level][0]:
_print_trace(textarray, tbextract[level+1][1])
logger.error("[From file: '%s', lineno: %s, function: %s]", tbextract[level+1][0], tbextract[level+1][1], tbextract[level+1][2])
elif "d" in context and tbextract[level+1][2]:
d = context["d"]
functionname = tbextract[level+1][2]
text = d.getVar(functionname, True)
if text:
_print_trace(text.split('\n'), tbextract[level+1][1])
logger.error("[From file: '%s', lineno: %s, function: %s]", tbextract[level+1][0], tbextract[level+1][1], tbextract[level+1][2])
else:
break
logger.error("(file: '%s', lineno: %s, function: %s)", tbextract[level+1][0], tbextract[level+1][1], tbextract[level+1][2])
else:
break
nexttb = tb.tb_next
level = level + 1
e = bb.BBHandledException(e)
raise e
raise
def simple_exec(code, context):
exec(code, _context, context)
@@ -436,6 +542,8 @@ def preserved_envvars():
'BB_PRESERVE_ENV',
'BB_ENV_WHITELIST',
'BB_ENV_EXTRAWHITE',
'LANG',
'_',
]
return v + preserved_envvars_exported() + preserved_envvars_exported_interactive()
@@ -733,8 +841,6 @@ def which(path, item, direction = 0):
for p in paths:
next = os.path.join(p, item)
if os.path.exists(next):
if not os.path.isabs(next):
next = os.path.abspath(next)
return next
return ""
@@ -766,7 +872,3 @@ def contains(variable, checkvalues, truevalue, falsevalue, d):
def cpu_count():
return multiprocessing.cpu_count()
def nonblockingfd(fd):
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)

View File

@@ -1,15 +1,12 @@
# This is a single Makefile to handle all generated Yocto Project documents.
# The Makefile needs to live in the documents directory and all figures used
# in any manuals must be .PNG files and live in the individual book's figures
# directory as well as in the figures directory for the mega-manual.
# Note that the figures for the Yocto Project Development Manual
# differ depending on the BRANCH being built.
# directory. Note that the figures for the Yocto Project Development Manual
# differ between the 'master' and 'edison' branches.
#
# The Makefile has these targets:
#
# pdf: generates a PDF version of a manual. Not valid for the Quick Start
# or the mega-manual (single, large HTML file comprised of all
# Yocto Project manuals).
# html: generates an HTML version of a manual.
# tarball: creates a tarball for the doc files.
# validate: validates
@@ -17,20 +14,18 @@
# clean: removes files
#
# The Makefile generates an HTML and PDF version of every document except the
# Yocto Project Quick Start and the single, HTML mega-manual, which is comprised
# of all the individual Yocto Project manuals. These two manuals are in HTML
# form only. The variable DOC indicates the folder name for a given manual. The
# variable VER represents the distro version of the Yocto Release for which the
# manuals are being generated. The variable BRANCH is used to indicate the
# branch (edison or denzil) and is used only when DOC=dev-manual or
# DOC=mega-manual. If you do not specify a BRANCH, the default branch used
# will be for the latest Yocto Project release.
# Yocto Project Quick Start. The Quick Start is in HTML form only. The variable
# DOC is used to indicate the folder name for a given manual. The variable
# VER represents the distro version of the Yocto Release for which the manuals
# are being generated. The variable BRANCH is used to indicate the 'edison'
# branch and is used only when DOC=dev-manual (making the YP Development
# Manual).
#
# To build a manual, you must invoke Makefile with the DOC argument. If you
# are going to publish the manual, then you must invoke Makefile with both the
# DOC and the VER argument. If you are building a particular version of the
# Yocto Project Development Manual or you are building any version of the
# mega-manual, you must use the DOC and BRANCH arguments.
# To build the HTML and PDF versions of the manual you must invoke the Makefile
# with the DOC argument. If you are going to publish the manual then you
# you must invoke the Makefile with both the DOC and the VER argument.
# If you are building the 'edison' version of the YP DEvelopment Manual then
# you must use the DOC and BRANCH arguments.
#
# Examples:
#
@@ -38,45 +33,39 @@
# make DOC=yocto-project-qs
# make pdf DOC=poky-ref-manual
# make DOC=dev-manual BRANCH=edison
# make DOC=mega-manual BRANCH=denzil
#
# The first example generates the HTML and PDF versions of the BSP Guide.
# The second example generates the HTML version only of the Quick Start. Note that
# the Quick Start only has an HTML version available. The third example generates
# both the PDF and HTML versions of the Yocto Project Reference Manual. The
# fourth example generates both the PDF and HTML 'edison' versions of the YP
# Development Manual. The last exmample generates the HTML version of the
# mega-manual and uses the 'denzil' branch when choosing figures for the
# tarball of figures.
# last example generates both the PDF and HTML 'edison' versions of the YP
# Development Manual.
#
# Use the publish target to push the generated manuals to the Yocto Project
# website. All files needed for the manual's HTML form are pushed as well as the
# PDF version (if applicable).
# Examples:
#
# make publish DOC=bsp-guide VER=1.3
# make publish DOC=adt-manual VER=1.3
# make publish DOC=bsp-guide VER=1.2
# make publish DOC=adt-manual VER=1.2
# make publish DOC=dev-manual VER=1.1.1 BRANCH=edison
# make publish DOC=dev-manual VER=1.2
# make publish DOC=mega-manual VER=1.3 BRANCH=denzil
#
# The first example publishes the 1.2 version of both the PDF and HTML versions of
# the BSP Guide. The second example publishes the 1.2 version of both the PDF and
# HTML versions of the ADT Manual. The third example publishes the PDF and HTML
# 'edison' versions of the YP Development Manual. The fourth example publishes
# the PDF and HTML 'master' versions of the YP Development Manual. The last
# example publishes the 1.3 version of the mega-manual (HTML-only) and the
# version generated and published is based on the 'denzil' branch.
# 'edison' versions of the YP Development Manual. Finally, the last example publishes
# the PDF and HTML 'master' versions of the YP Development Manual.
#
ifeq ($(DOC),bsp-guide)
XSLTOPTS = --stringparam html.stylesheet bsp-style.css \
XSLTOPTS = --stringparam html.stylesheet style.css \
--stringparam chapter.autolabel 1 \
--stringparam section.autolabel 1 \
--stringparam section.label.includes.component.label 1 \
--xinclude
ALLPREQ = html pdf tarball
TARFILES = bsp-style.css bsp-guide.html bsp-guide.pdf figures/bsp-title.png
TARFILES = style.css bsp-guide.html bsp-guide.pdf figures/bsp-title.png
MANUALS = $(DOC)/$(DOC).html $(DOC)/$(DOC).pdf
FIGURES = figures
STYLESHEET = $(DOC)/*.css
@@ -84,7 +73,7 @@ STYLESHEET = $(DOC)/*.css
endif
ifeq ($(DOC),dev-manual)
XSLTOPTS = --stringparam html.stylesheet dev-style.css \
XSLTOPTS = --stringparam html.stylesheet style.css \
--stringparam chapter.autolabel 1 \
--stringparam section.autolabel 1 \
--stringparam section.label.includes.component.label 1 \
@@ -93,12 +82,11 @@ ALLPREQ = html pdf tarball
#
# Note that the tarfile might produce the "Cannot stat: No such file or directory" error
# message for .PNG files that are not present when building a particular branch. The
# list of files is all-inclusive for all branches. Note, if you don't provide a BRANCH
# option, it defaults to the latest stuff. This would be appropriate for "master" branch.
# list of files is all-inclusive for all branches.
#
ifeq ($(BRANCH),edison)
TARFILES = dev-style.css dev-manual.html dev-manual.pdf \
TARFILES = style.css dev-manual.html dev-manual.pdf \
figures/app-dev-flow.png figures/bsp-dev-flow.png figures/dev-title.png \
figures/git-workflow.png figures/index-downloads.png figures/kernel-dev-flow.png \
figures/kernel-example-repos-edison.png \
@@ -106,22 +94,13 @@ TARFILES = dev-style.css dev-manual.html dev-manual.pdf \
figures/kernel-overview-3-edison.png \
figures/source-repos.png figures/yp-download.png \
figures/wip.png
else ifeq ($(BRANCH),denzil)
TARFILES = dev-style.css dev-manual.html dev-manual.pdf \
else
TARFILES = style.css dev-manual.html dev-manual.pdf \
figures/app-dev-flow.png figures/bsp-dev-flow.png figures/dev-title.png \
figures/git-workflow.png figures/index-downloads.png figures/kernel-dev-flow.png \
figures/kernel-example-repos-denzil.png \
figures/kernel-example-repos.png \
figures/kernel-overview-1.png figures/kernel-overview-2.png \
figures/kernel-overview-3-denzil.png \
figures/source-repos.png figures/yp-download.png \
figures/wip.png
else
TARFILES = dev-style.css dev-manual.html dev-manual.pdf \
figures/app-dev-flow.png figures/bsp-dev-flow.png figures/dev-title.png \
figures/git-workflow.png figures/index-downloads.png figures/kernel-dev-flow.png \
figures/kernel-example-repos-denzil.png \
figures/kernel-overview-1.png figures/kernel-overview-2.png \
figures/kernel-overview-3-denzil.png \
figures/kernel-overview-3.png \
figures/source-repos.png figures/yp-download.png \
figures/wip.png
endif
@@ -133,79 +112,24 @@ STYLESHEET = $(DOC)/*.css
endif
ifeq ($(DOC),yocto-project-qs)
XSLTOPTS = --stringparam html.stylesheet qs-style.css \
XSLTOPTS = --stringparam html.stylesheet style.css \
--xinclude
ALLPREQ = html tarball
TARFILES = yocto-project-qs.html qs-style.css figures/yocto-environment.png figures/building-an-image.png figures/using-a-pre-built-image.png figures/yocto-project-transp.png
TARFILES = yocto-project-qs.html style.css figures/yocto-environment.png figures/building-an-image.png figures/using-a-pre-built-image.png figures/yocto-project-transp.png
MANUALS = $(DOC)/$(DOC).html
FIGURES = figures
STYLESHEET = $(DOC)/*.css
endif
ifeq ($(DOC),mega-manual)
XSLTOPTS = --stringparam html.stylesheet mega-style.css \
--stringparam chapter.autolabel 1 \
--stringparam section.autolabel 1 \
--stringparam section.label.includes.component.label 1 \
--xinclude
ALLPREQ = html tarball
ifeq ($(BRANCH),edison)
TARFILES = mega-manual.html mega-style.css figures/yocto-environment.png figures/building-an-image.png \
figures/using-a-pre-built-image.png \
figures/poky-title.png \
figures/adt-title.png figures/bsp-title.png \
figures/kernel-title.png figures/kernel-architecture-overview.png \
figures/app-dev-flow.png figures/bsp-dev-flow.png figures/dev-title.png \
figures/git-workflow.png figures/index-downloads.png figures/kernel-dev-flow.png \
figures/kernel-example-repos-edison.png \
figures/kernel-overview-1.png figures/kernel-overview-2.png \
figures/kernel-overview-3-edison.png \
figures/source-repos.png figures/yp-download.png \
figures/wip.png
else ifeq ($(BRANCH),denzil)
TARFILES = mega-manual.html mega-style.css figures/yocto-environment.png figures/building-an-image.png \
figures/using-a-pre-built-image.png \
figures/poky-title.png \
figures/adt-title.png figures/bsp-title.png \
figures/kernel-title.png figures/kernel-architecture-overview.png \
figures/app-dev-flow.png figures/bsp-dev-flow.png figures/dev-title.png \
figures/git-workflow.png figures/index-downloads.png figures/kernel-dev-flow.png \
figures/kernel-example-repos-denzil.png \
figures/kernel-overview-1.png figures/kernel-overview-2.png \
figures/kernel-overview-3-denzil.png \
figures/source-repos.png figures/yp-download.png \
figures/wip.png
else
TARFILES = mega-manual.html mega-style.css figures/yocto-environment.png figures/building-an-image.png \
figures/using-a-pre-built-image.png \
figures/poky-title.png \
figures/adt-title.png figures/bsp-title.png \
figures/kernel-title.png figures/kernel-architecture-overview.png \
figures/app-dev-flow.png figures/bsp-dev-flow.png figures/dev-title.png \
figures/git-workflow.png figures/index-downloads.png figures/kernel-dev-flow.png \
figures/kernel-example-repos-denzil.png \
figures/kernel-overview-1.png figures/kernel-overview-2.png \
figures/kernel-overview-3-denzil.png \
figures/source-repos.png figures/yp-download.png \
figures/wip.png
endif
MANUALS = $(DOC)/$(DOC).html
FIGURES = figures
STYLESHEET = $(DOC)/*.css
endif
ifeq ($(DOC),poky-ref-manual)
XSLTOPTS = --stringparam html.stylesheet ref-style.css \
XSLTOPTS = --stringparam html.stylesheet style.css \
--stringparam chapter.autolabel 1 \
--stringparam appendix.autolabel A \
--stringparam section.autolabel 1 \
--stringparam section.label.includes.component.label 1 \
--xinclude
ALLPREQ = html pdf tarball
TARFILES = poky-ref-manual.html ref-style.css figures/poky-title.png
TARFILES = poky-ref-manual.html style.css figures/poky-title.png
MANUALS = $(DOC)/$(DOC).html $(DOC)/$(DOC).pdf
FIGURES = figures
STYLESHEET = $(DOC)/*.css
@@ -213,28 +137,28 @@ endif
ifeq ($(DOC),adt-manual)
XSLTOPTS = --stringparam html.stylesheet adt-style.css \
XSLTOPTS = --stringparam html.stylesheet style.css \
--stringparam chapter.autolabel 1 \
--stringparam appendix.autolabel A \
--stringparam section.autolabel 1 \
--stringparam section.label.includes.component.label 1 \
--xinclude
ALLPREQ = html pdf tarball
TARFILES = adt-manual.html adt-manual.pdf adt-style.css figures/adt-title.png
TARFILES = adt-manual.html adt-manual.pdf style.css figures/adt-title.png
MANUALS = $(DOC)/$(DOC).html $(DOC)/$(DOC).pdf
FIGURES = figures
STYLESHEET = $(DOC)/*.css
endif
ifeq ($(DOC),kernel-manual)
XSLTOPTS = --stringparam html.stylesheet kernel-style.css \
XSLTOPTS = --stringparam html.stylesheet style.css \
--stringparam chapter.autolabel 1 \
--stringparam appendix.autolabel A \
--stringparam section.autolabel 1 \
--stringparam section.label.includes.component.label 1 \
--xinclude
ALLPREQ = html pdf tarball
TARFILES = kernel-manual.html kernel-manual.pdf kernel-style.css figures/kernel-title.png figures/kernel-architecture-overview.png
TARFILES = kernel-manual.html kernel-manual.pdf style.css figures/kernel-title.png figures/kernel-architecture-overview.png
MANUALS = $(DOC)/$(DOC).html $(DOC)/$(DOC).pdf
FIGURES = figures
STYLESHEET = $(DOC)/*.css
@@ -252,47 +176,17 @@ all: $(ALLPREQ)
pdf:
ifeq ($(DOC),yocto-project-qs)
@echo " "
@echo "ERROR: You cannot generate a yocto-project-qs PDF file."
@echo "ERROR: You cannot generate a PDF file for the Yocto Project Quick Start"
@echo " "
else ifeq ($(DOC),mega-manual)
@echo " "
@echo "ERROR: You cannot generate a mega-manual PDF file."
@echo " "
else
cd $(DOC); ../tools/poky-docbook-to-pdf $(DOC).xml ../template; cd ..
endif
html:
ifeq ($(DOC),mega-manual)
# See http://www.sagehill.net/docbookxsl/HtmlOutput.html
@echo " "
@echo "******** Building "$(DOC)
@echo " "
cd $(DOC); xsltproc $(XSLTOPTS) -o $(DOC).html $(DOC)-customization.xsl $(DOC).xml; cd ..
@echo " "
@echo "******** Using mega-manual.sed to process external links"
@echo " "
cd $(DOC); sed -f ../tools/mega-manual.sed < mega-manual.html > mega-output.html; cd ..
@echo " "
@echo "******** Cleaning up transient file mega-output.html"
@echo " "
cd $(DOC); rm mega-manual.html; mv mega-output.html mega-manual.html; cd ..
else
# See http://www.sagehill.net/docbookxsl/HtmlOutput.html
@echo " "
@echo "******** Building "$(DOC)
@echo " "
cd $(DOC); xsltproc $(XSLTOPTS) -o $(DOC).html $(DOC)-customization.xsl $(DOC).xml; cd ..
endif
tarball: html
@echo " "
@echo "******** Creating Tarball of document files"
@echo " "
cd $(DOC); tar -cvzf $(DOC).tgz $(TARFILES); cd ..
validate:
@@ -300,18 +194,8 @@ validate:
publish:
@if test -f $(DOC)/$(DOC).html; \
then \
echo " "; \
echo "******** Publishing "$(DOC)".html"; \
echo " "; \
scp -r $(MANUALS) $(STYLESHEET) www.yoctoproject.org:/srv/www/www.yoctoproject.org-docs/$(VER)/$(DOC); \
cd $(DOC); scp -r $(FIGURES) www.yoctoproject.org:/srv/www/www.yoctoproject.org-docs/$(VER)/$(DOC); \
else \
echo " "; \
echo $(DOC)".html missing. Generate the file first then try again."; \
echo " "; \
fi
scp -r $(MANUALS) $(STYLESHEET) www.yoctoproject.org:/srv/www/www.yoctoproject.org-docs/$(VER)/$(DOC)
cd $(DOC); scp -r $(FIGURES) www.yoctoproject.org:/srv/www/www.yoctoproject.org-docs/$(VER)/$(DOC)/figures
clean:
rm -f $(MANUALS)

View File

@@ -41,13 +41,8 @@ Folders exist for individual manuals as follows:
* kernel-manual - The Yocto Project Kernel Architecture and Use Manual
* poky-ref-manual - The Yocto Project Reference Manual
* yocto-project-qs - The Yocto Project Quick Start
* mega-manual - The aggregated manual comprised of all YP manuals and guides
Each folder is self-contained regarding content and figures. Note that there
is a sed file needed to process the links of the mega-manual. The sed file
is located in the tools directory. Also note that the figures folder in the
mega-manual directory contains duplicates of all the figures in the YP folders
directories for all YP manuals and guides.
Each folder is self-contained regarding content and figures.
Makefile
========
@@ -76,10 +71,7 @@ Contains various templates, fonts, and some old PNG files.
tools
=====
Contains a tool to convert the DocBook files to PDF format. This folder also
contains the mega-manual.sed file, which is used by Makefile to process
cross-references from within the manual that normally go to an external
manual.
Contains a tool to convert the DocBook files to PDF format.

View File

@@ -8,7 +8,7 @@
<para>
Recall that earlier the manual discussed how to use an existing toolchain
tarball that had been installed into <filename>/opt/poky</filename>,
which is outside of the build directory
which is outside of the Yocto Project build tree
(see the section "<link linkend='using-an-existing-toolchain-tarball'>Using an Existing
Toolchain Tarball)</link>".
And, that sourcing your architecture-specific environment setup script
@@ -21,7 +21,7 @@
for example, <filename>configure.sh</filename> can find pre-generated
test results for tests that need target hardware on which to run.
These conditions allow you to easily use the toolchain outside of the
OpenEmbedded build environment on both autotools-based projects and
Yocto Project build environment on both autotools-based projects and
Makefile-based projects.
</para>

View File

@@ -0,0 +1,736 @@
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
[<!ENTITY % poky SYSTEM "../poky.ent"> %poky; ] >
<chapter id='adt-eclipse'>
<title>Working Within Eclipse</title>
<para>
The Eclipse IDE is a popular development environment and it fully supports
development using Yocto Project.
When you install and configure the Eclipse Yocto Project Plug-in into
the Eclipse IDE, you maximize your Yocto Project design experience.
Installing and configuring the Plug-in results in an environment that
has extensions specifically designed to let you more easily develop software.
These extensions allow for cross-compilation, deployment, and execution of
your output into a QEMU emulation session.
You can also perform cross-debugging and profiling.
The environment also supports a suite of tools that allows you to perform
remote profiling, tracing, collection of power data, collection of
latency data, and collection of performance data.
</para>
<para>
This section describes how to install and configure the Eclipse IDE
Yocto Plug-in and how to use it to develop your Yocto Project.
</para>
<section id='setting-up-the-eclipse-ide'>
<title>Setting Up the Eclipse IDE</title>
<para>
To develop within the Eclipse IDE, you need to do the following:
<orderedlist>
<listitem><para>Install the optimal version of the Eclipse IDE.</para></listitem>
<listitem><para>Configure the Eclipse IDE.</para></listitem>
<listitem><para>Install the Eclipse Yocto Plug-in.</para></listitem>
<listitem><para>Configure the Eclipse Yocto Plug-in.</para></listitem>
</orderedlist>
</para>
<section id='installing-eclipse-ide'>
<title>Installing the Eclipse IDE</title>
<para>
It is recommended that you have the Indigo 3.7.2 version of the
Eclipse IDE installed on your development system.
If you dont have this version, you can find it at
<ulink url='&ECLIPSE_MAIN_URL;'></ulink>.
From that site, choose the Eclipse Classic version particular to your development
host.
This version contains the Eclipse Platform, the Java Development
Tools (JDT), and the Plug-in Development Environment.
</para>
<para>
Once you have downloaded the tarball, extract it into a clean
directory.
For example, the following commands unpack and install the Eclipse IDE
tarball found in the <filename>Downloads</filename> area
into a clean directory using the default name <filename>eclipse</filename>:
<literallayout class='monospaced'>
$ cd ~
$ tar -xzvf ~/Downloads/eclipse-SDK-3.7.1-linux-gtk-x86_64.tar.gz
</literallayout>
</para>
<para>
One issue exists that you need to be aware of regarding the Java
Virtual machines garbage collection (GC) process.
The GC process does not clean up the permanent generation
space (PermGen).
This space stores metadata descriptions of classes.
The default value is set too small and it could trigger an
out-of-memory error such as the following:
<literallayout class='monospaced'>
Java.lang.OutOfMemoryError: PermGen space
</literallayout>
</para>
<para>
This error causes the application to hang.
</para>
<para>
To fix this issue, you can use the <filename>--vmargs</filename>
option when you start Eclipse to increase the size of the permanent generation space:
<literallayout class='monospaced'>
eclipse --vmargs --XX:PermSize=256M
</literallayout>
</para>
</section>
<section id='configuring-the-eclipse-ide'>
<title>Configuring the Eclipse IDE</title>
<para>
Before installing and configuring the Eclipse Yocto Plug-in, you need to configure
the Eclipse IDE.
Follow these general steps to configure Eclipse:
<orderedlist>
<listitem><para>Start the Eclipse IDE.</para></listitem>
<listitem><para>Make sure you are in your Workbench and select
"Install New Software" from the "Help" pull-down menu.
</para></listitem>
<listitem><para>Select <filename>indigo - &ECLIPSE_INDIGO_URL;</filename>
from the "Work with:" pull-down menu.</para></listitem>
<listitem><para>Expand the box next to <filename>Programming Languages</filename>
and select the <filename>Autotools Support for CDT (incubation)</filename>
and <filename>C/C++ Development Tools</filename> boxes.</para></listitem>
<listitem><para>Expand the box next to "Linux Tools" and select the
"LTTng - Linux Tracing Toolkit(incubation)" boxes.</para></listitem>
<listitem><para>Complete the installation and restart the Eclipse IDE.</para></listitem>
<listitem><para>After the Eclipse IDE restarts and from the Workbench, select
"Install New Software" from the "Help" pull-down menu.</para></listitem>
<listitem><para>Click the
"Available Software Sites" link.</para></listitem>
<listitem><para>Check the box next to
<filename>&ECLIPSE_UPDATES_URL;</filename>
and click "OK".</para></listitem>
<listitem><para>Select <filename>&ECLIPSE_UPDATES_URL;</filename>
from the "Work with:" pull-down menu.</para></listitem>
<listitem><para>Check the box next to <filename>TM and RSE Main Features</filename>.
</para></listitem>
<listitem><para>Expand the box next to <filename>TM and RSE Optional Add-ons</filename>
and select every item except <filename>RSE Unit Tests</filename> and
<filename>RSE WinCE Services (incubation)</filename>.</para></listitem>
<listitem><para>Complete the installation and restart the Eclipse IDE.</para></listitem>
<listitem><para>If necessary, select
"Install New Software" from the "Help" pull-down menu so you can click the
"Available Software Sites" link again.</para></listitem>
<listitem><para>After clicking "Available Software Sites", check the box next to
<filename>http://download.eclipse.org/tools/cdt/releases/indigo</filename>
and click "OK".</para></listitem>
<listitem><para>Select <filename>&ECLIPSE_INDIGO_CDT_URL;</filename>
from the "Work with:" pull-down menu.</para></listitem>
<listitem><para>Check the box next to <filename>CDT Main Features</filename>.
</para></listitem>
<listitem><para>Expand the box next to <filename>CDT Optional Features</filename>
and select <filename>C/C++ Remote Launch</filename> and
<filename>Target Communication Framework (incubation)</filename>.</para></listitem>
<listitem><para>Complete the installation and restart the Eclipse IDE.</para></listitem>
</orderedlist>
</para>
</section>
<section id='installing-the-eclipse-yocto-plug-in'>
<title>Installing or Accessing the Eclipse Yocto Plug-in</title>
<para>
You can install the Eclipse Yocto Plug-in into the Eclipse IDE
one of two ways: use the Yocto Project update site to install the pre-built plug-in,
or build and install the plug-in from the latest source code.
If you don't want to permanently install the plug-in but just want to try it out
within the Eclipse environment, you can import the plug-in project from the
Yocto Project source repositories.
</para>
<section id='new-software'>
<title>Installing the Pre-built Plug-in from the Yocto Project Eclipse Update Site</title>
<para>
To install the Eclipse Yocto Plug-in from the update site,
follow these steps:
<orderedlist>
<listitem><para>Start up the Eclipse IDE.</para></listitem>
<listitem><para>In Eclipse, select "Install New Software" from the "Help" menu.</para></listitem>
<listitem><para>Click "Add..." in the "Work with:" area.</para></listitem>
<listitem><para>Enter
<filename>&ECLIPSE_DL_PLUGIN_URL;</filename>
in the URL field and provide a meaningful name in the "Name" field.</para></listitem>
<listitem><para>Click "OK" to have the entry added to the "Work with:"
drop-down list.</para></listitem>
<listitem><para>Select the entry for the plug-in from the "Work with:" drop-down
list.</para></listitem>
<listitem><para>Check the box next to <filename>Development tools and SDKs for Yocto Linux</filename>.
</para></listitem>
<listitem><para>Complete the remaining software installation steps and
then restart the Eclipse IDE to finish the installation of the plug-in.
</para></listitem>
</orderedlist>
</para>
</section>
<section id='zip-file-method'>
<title>Installing the Plug-in Using the Latest Source Code</title>
<para>
To install the Eclipse Yocto Plug-in from the latest source code, follow these steps:
<orderedlist>
<listitem><para>Open a shell and create a Git repository with:
<literallayout class='monospaced'>
$ git clone git://git.yoctoproject.org/eclipse-poky yocto-eclipse
</literallayout>
For this example, the repository is named
<filename>~/yocto-eclipse</filename>.</para></listitem>
<listitem><para>Locate the <filename>build.sh</filename> script in the
Git repository you created in the previous step.
The script is located in the <filename>scripts</filename>.</para></listitem>
<listitem><para>Be sure to set and export the <filename>ECLIPSE_HOME</filename> environment
variable to the top-level directory in which you installed the Indigo
version of Eclipse.
For example, if your Eclipse directory is <filename>$HOME/eclipse</filename>,
use the following:
<literallayout class='monospaced'>
$ export ECLIPSE_HOME=$HOME/eclipse
</literallayout></para></listitem>
<listitem><para>Run the <filename>build.sh</filename> script and provide the
name of the Git branch along with the Yocto Project release you are
using.
Here is an example that uses the <filename>master</filename> Git repository
and the <filename>1.1M4</filename> release:
<literallayout class='monospaced'>
$ scripts/build.sh master 1.1M4
</literallayout>
After running the script, the file
<filename>org.yocto.sdk-&lt;release&gt;-&lt;date&gt;-archive.zip</filename>
is in the current directory.</para></listitem>
<listitem><para>If necessary, start the Eclipse IDE and be sure you are in the
Workbench.</para></listitem>
<listitem><para>Select "Install New Software" from the "Help" pull-down menu.
</para></listitem>
<listitem><para>Click "Add".</para></listitem>
<listitem><para>Provide anything you want in the "Name" field.</para></listitem>
<listitem><para>Click "Archive" and browse to the ZIP file you built
in step four.
This ZIP file should not be "unzipped", and must be the
<filename>*archive.zip</filename> file created by running the
<filename>build.sh</filename> script.</para></listitem>
<listitem><para>Check the box next to the new entry in the installation window and complete
the installation.</para></listitem>
<listitem><para>Restart the Eclipse IDE if necessary.</para></listitem>
</orderedlist>
</para>
<para>
At this point you should be able to configure the Eclipse Yocto Plug-in as described in the
"<link linkend='configuring-the-eclipse-yocto-plug-in'>Configuring the Eclipse Yocto Plug-in</link>"
section.</para>
</section>
<section id='yocto-project-source'>
<title>Importing the Plug-in Project into the Eclipse Environment</title>
<para>
Importing the Eclipse Yocto Plug-in project from the Yocto Project source repositories
is useful when you want to try out the latest plug-in from the tip of plug-in's
development tree.
It is important to understand when you import the plug-in you are not installing
it into the Eclipse application.
Rather, you are importing the project and just using it.
To import the plug-in project, follow these steps:
<orderedlist>
<listitem><para>Open a shell and create a Git repository with:
<literallayout class='monospaced'>
$ git clone git://git.yoctoproject.org/eclipse-poky yocto-eclipse
</literallayout>
For this example, the repository is named
<filename>~/yocto-eclipse</filename>.</para></listitem>
<listitem><para>In Eclipse, select "Import" from the "File" menu.</para></listitem>
<listitem><para>Expand the "General" box and select "existing projects into workspace"
and then click "Next".</para></listitem>
<listitem><para>Select the root directory and browse to
<filename>~/yocto-eclipse/plugins</filename>.</para></listitem>
<listitem><para>Three plug-ins exist: "org.yocto.bc.ui", "org.yocto.sdk.ide", and
"org.yocto.sdk.remotetools".
Select and import all of them.</para></listitem>
</orderedlist>
</para>
<para>
The left navigation pane in the Eclipse application shows the default projects.
Right-click on one of these projects and run it as an Eclipse application.
This brings up a second instance of Eclipse IDE that has the Yocto Plug-in.
</para>
</section>
</section>
<section id='configuring-the-eclipse-yocto-plug-in'>
<title>Configuring the Eclipse Yocto Plug-in</title>
<para>
Configuring the Eclipse Yocto Plug-in involves setting the Cross
Compiler options and the Target options.
The configurations you choose become the default settings for all projects.
You do have opportunities to change them later when
you configure the project (see the following section).
</para>
<para>
To start, you need to do the following from within the Eclipse IDE:
<itemizedlist>
<listitem><para>Choose <filename>Windows -&gt; Preferences</filename> to display
the <filename>Preferences</filename> Dialog</para></listitem>
<listitem><para>Click <filename>Yocto ADT</filename></para></listitem>
</itemizedlist>
</para>
<section id='configuring-the-cross-compiler-options'>
<title>Configuring the Cross-Compiler Options</title>
<para>
To configure the Cross Compiler Options, you must select the type of toolchain,
point to the toolchain, specify the sysroot location, and select the target architecture.
<itemizedlist>
<listitem><para><emphasis>Selecting the Toolchain Type:</emphasis>
Choose between <filename>Standalone pre-built toolchain</filename>
and <filename>Build system derived toolchain</filename> for Cross
Compiler Options.
<itemizedlist>
<listitem><para><emphasis>
<filename>Standalone Pre-built Toolchain:</filename></emphasis>
Select this mode when you are using a stand-alone cross-toolchain.
For example, suppose you are an application developer and do not
need to build a target image.
Instead, you just want to use an architecture-specific toolchain on an
existing kernel and target root filesystem.
</para></listitem>
<listitem><para><emphasis>
<filename>Build System Derived Toolchain:</filename></emphasis>
Select this mode if the cross-toolchain has been installed and built
as part of the Yocto Project build tree.
When you select <filename>Build system derived toolchain</filename>,
you are using the toolchain bundled
inside the Yocto Project build tree.
</para></listitem>
</itemizedlist>
</para></listitem>
<listitem><para><emphasis>Point to the Toolchain:</emphasis>
If you are using a stand-alone pre-built toolchain, you should be pointing to the
<filename>/opt/poky/1.1</filename> directory.
This is the location for toolchains installed by the ADT Installer or by hand.
Sections "<link linkend='configuring-and-running-the-adt-installer-script'>Configuring
and Running the ADT Installer Script</link>" and
"<link linkend='using-an-existing-toolchain-tarball'>Using a Cross-Toolchain
Tarball</link>" describe two ways to install
a stand-alone cross-toolchain in the
<filename>/opt/poky</filename> directory.
<note>It is possible to install a stand-alone cross-toolchain in a directory
other than <filename>/opt/poky</filename>.
However, doing so is discouraged.</note></para>
<para>If you are using a system-derived toolchain, the path you provide
for the <filename>Toolchain Root Location</filename>
field is the Yocto Project's build directory.
See section "<link linkend='using-the-toolchain-from-within-the-build-tree'>Using
BitBake and the Yocto Project Build Tree</link>" for
information on how to install the toolchain into the Yocto
Project build tree.</para></listitem>
<listitem><para><emphasis>Specify the Sysroot Location:</emphasis>
This location is where the root filesystem for the
target hardware is created on the development system by the ADT Installer.
The QEMU user-space tools, the
NFS boot process, and the cross-toolchain all use the sysroot location.
</para></listitem>
<listitem><para><emphasis>Select the Target Architecture:</emphasis>
The target architecture is the type of hardware you are
going to use or emulate.
Use the pull-down <filename>Target Architecture</filename> menu to make
your selection.
The pull-down menu should have the supported architectures.
If the architecture you need is not listed in the menu, you
will need to build the image.
See the "<ulink url='&YOCTO_DOCS_QS_URL;#building-image'>Building an Image</ulink>" section
of The Yocto Project Quick Start for more information.</para></listitem>
</itemizedlist>
</para>
</section>
<section id='configuring-the-target-options'>
<title>Configuring the Target Options</title>
<para>
You can choose to emulate hardware using the QEMU emulator, or you
can choose to run your image on actual hardware.
<itemizedlist>
<listitem><para><emphasis><filename>QEMU:</filename></emphasis> Select this option if
you will be using the QEMU emulator.
If you are using the emulator, you also need to locate the kernel
and specify any custom options.</para>
<para>If you selected <filename>Build system derived toolchain</filename>,
the target kernel you built will be located in the
Yocto Project build tree in <filename>tmp/deploy/images</filename> directory.
If you selected <filename>Standalone pre-built toolchain</filename>, the
pre-built image you downloaded is located
in the directory you specified when you downloaded the image.</para>
<para>Most custom options are for advanced QEMU users to further
customize their QEMU instance.
These options are specified between paired angled brackets.
Some options must be specified outside the brackets.
In particular, the options <filename>serial</filename>,
<filename>nographic</filename>, and <filename>kvm</filename> must all
be outside the brackets.
Use the <filename>man qemu</filename> command to get help on all the options
and their use.
The following is an example:
<literallayout class='monospaced'>
serial &lt;-m 256 -full-screen&gt;
</literallayout></para>
<para>
Regardless of the mode, Sysroot is already defined as part of the
Cross Compiler Options configuration in the
<filename>Sysroot Location:</filename> field.</para></listitem>
<listitem><para><emphasis><filename>External HW:</filename></emphasis> Select this option
if you will be using actual hardware.</para></listitem>
</itemizedlist>
</para>
<para>
Click the <filename>OK</filename> button to save your plug-in configurations.
</para>
</section>
</section>
</section>
<section id='creating-the-project'>
<title>Creating the Project</title>
<para>
You can create two types of projects: Autotools-based, or Makefile-based.
This section describes how to create Autotools-based projects from within
the Eclipse IDE.
For information on creating Makefile-based projects in a terminal window, see the section
"<link linkend='using-the-command-line'>Using the Command Line</link>".
</para>
<para>
To create a project based on a Yocto template and then display the source code,
follow these steps:
<orderedlist>
<listitem><para>Select <filename>File -&gt; New -&gt; Project</filename>.</para></listitem>
<listitem><para>Double click <filename>CC++</filename>.</para></listitem>
<listitem><para>Double click <filename>C Project</filename> to create the project.</para></listitem>
<listitem><para>Expand <filename>Yocto ADT Project</filename>.</para></listitem>
<listitem><para>Select <filename>Hello World ANSI C Autotools Project</filename>.
This is an Autotools-based project based on a Yocto Project template.</para></listitem>
<listitem><para>Put a name in the <filename>Project name:</filename> field.
Do not use hyphens as part of the name.</para></listitem>
<listitem><para>Click <filename>Next</filename>.</para></listitem>
<listitem><para>Add information in the <filename>Author</filename> and
<filename>Copyright notice</filename> fields.</para></listitem>
<listitem><para>Be sure the <filename>License</filename> field is correct.</para></listitem>
<listitem><para>Click <filename>Finish</filename>.</para></listitem>
<listitem><para>If the "open perspective" prompt appears, click "Yes" so that you
in the C/C++ perspective.</para></listitem>
<listitem><para>The left-hand navigation pane shows your project.
You can display your source by double clicking the project's source file.
</para></listitem>
</orderedlist>
</para>
</section>
<section id='configuring-the-cross-toolchains'>
<title>Configuring the Cross-Toolchains</title>
<para>
The earlier section, "<link linkend='configuring-the-eclipse-yocto-plug-in'>Configuring
the Eclipse Yocto Plug-in</link>", sets up the default project
configurations.
You can override these settings for a given project by following these steps:
<orderedlist>
<listitem><para>Select <filename>Project -&gt; Change Yocto Project Settings</filename>:
This selection brings up the <filename>Project Yocto Settings</filename> Dialog
and allows you to make changes specific to an individual project.
</para>
<para>By default, the Cross Compiler Options and Target Options for a project
are inherited from settings you provide using the <filename>Preferences</filename>
Dialog as described earlier
in the "<link linkend='configuring-the-eclipse-yocto-plug-in'>Configuring the Eclipse
Yocto Plug-in</link>" section.
The <filename>Project Yocto Settings</filename>
Dialog allows you to override those default settings
for a given project.</para></listitem>
<listitem><para>Make your configurations for the project and click "OK".</para></listitem>
<listitem><para>Select <filename>Project -&gt; Reconfigure Project</filename>:
This selection reconfigures the project by running
<filename>autogen.sh</filename> in the workspace for your project.
The script also runs <filename>libtoolize</filename>, <filename>aclocal</filename>,
<filename>autoconf</filename>, <filename>autoheader</filename>,
<filename>automake --a</filename>, and
<filename>./configure</filename>.
Click on the <filename>Console</filename> tab beneath your source code to
see the results of reconfiguring your project.</para></listitem>
</orderedlist>
</para>
</section>
<section id='building-the-project'>
<title>Building the Project</title>
<para>
To build the project, select <filename>Project -&gt; Build Project</filename>.
The console should update and you can note the cross-compiler you are using.
</para>
</section>
<section id='starting-qemu-in-user-space-nfs-mode'>
<title>Starting QEMU in User Space NFS Mode</title>
<para>
To start the QEMU emulator from within Eclipse, follow these steps:
<orderedlist>
<listitem><para>Expose the <filename>Run -&gt; External Tools</filename> menu.
Your image should appear as a selectable menu item.
</para></listitem>
<listitem><para>Select your image from the menu to launch the
emulator in a new window.</para></listitem>
<listitem><para>If needed, enter your host root password in the shell window at the prompt.
This sets up a <filename>Tap 0</filename> connection needed for running in user-space
NFS mode.</para></listitem>
<listitem><para>Wait for QEMU to launch.</para></listitem>
<listitem><para>Once QEMU launches, you can begin operating within that
environment.
For example, you could determine the IP Address
for the user-space NFS by using the <filename>ifconfig</filename> command.
</para></listitem>
</orderedlist>
</para>
</section>
<section id='deploying-and-debugging-the-application'>
<title>Deploying and Debugging the Application</title>
<para>
Once the QEMU emulator is running the image, using the Eclipse IDE
you can deploy your application and use the emulator to perform debugging.
Follow these steps to deploy the application.
<orderedlist>
<listitem><para>Select <filename>Run -&gt; Debug Configurations...</filename></para></listitem>
<listitem><para>In the left area, expand <filename>C/C++Remote Application</filename>.</para></listitem>
<listitem><para>Locate your project and select it to bring up a new
tabbed view in the <filename>Debug Configurations</filename> Dialog.</para></listitem>
<listitem><para>Enter the absolute path into which you want to deploy
the application.
Use the <filename>Remote Absolute File Path for C/C++Application:</filename> field.
For example, enter <filename>/usr/bin/&lt;programname&gt;</filename>.</para></listitem>
<listitem><para>Click on the <filename>Debugger</filename> tab to see the cross-tool debugger
you are using.</para></listitem>
<listitem><para>Click on the <filename>Main</filename> tab.</para></listitem>
<listitem><para>Create a new connection to the QEMU instance
by clicking on <filename>new</filename>.</para></listitem>
<listitem><para>Select <filename>TCF</filename>, which means Target Communication
Framework.</para></listitem>
<listitem><para>Click <filename>Next</filename>.</para></listitem>
<listitem><para>Clear out the <filename>host name</filename> field and enter the IP Address
determined earlier.</para></listitem>
<listitem><para>Click <filename>Finish</filename> to close the
<filename>New Connections</filename> Dialog.</para></listitem>
<listitem><para>Use the drop-down menu now in the <filename>Connection</filename> field and pick
the IP Address you entered.</para></listitem>
<listitem><para>Click <filename>Debug</filename> to bring up a login screen
and login.</para></listitem>
<listitem><para>Accept the debug perspective.</para></listitem>
</orderedlist>
</para>
</section>
<section id='running-user-space-tools'>
<title>Running User-Space Tools</title>
<para>
As mentioned earlier in the manual, several tools exist that enhance
your development experience.
These tools are aids in developing and debugging applications and images.
You can run these user-space tools from within the Eclipse IDE through the
<filename>YoctoTools</filename> menu.
</para>
<para>
Once you pick a tool, you need to configure it for the remote target.
Every tool needs to have the connection configured.
You must select an existing TCF-based RSE connection to the remote target.
If one does not exist, click <filename>New</filename> to create one.
</para>
<para>
Here are some specifics about the remote tools:
<itemizedlist>
<listitem><para><emphasis><filename>OProfile</filename>:</emphasis> Selecting this tool causes
the <filename>oprofile-server</filename> on the remote target to launch on
the local host machine.
The <filename>oprofile-viewer</filename> must be installed on the local host machine and the
<filename>oprofile-server</filename> must be installed on the remote target,
respectively, in order to use.
You must compile and install the <filename>oprofile-viewer</filename> from the source code
on your local host machine.
Furthermore, in order to convert the target's sample format data into a form that the
host can use, you must have <filename>oprofile</filename> version 0.9.4 or
greater installed on the host.</para>
<para>You can locate both the viewer and server from
<ulink url='&YOCTO_GIT_URL;/cgit/cgit.cgi/oprofileui/'></ulink>.
<note>The <filename>oprofile-server</filename> is installed by default on
the <filename>core-image-sato-sdk</filename> image.</note></para></listitem>
<listitem><para><emphasis><filename>Lttng-ust</filename>:</emphasis> Selecting this tool runs
<filename>usttrace</filename> on the remote target, transfers the output data back
to the local host machine, and uses the <filename>lttng</filename> Eclipse plug-in to
graphically display the output.
For information on how to use <filename>lttng</filename> to trace an application, see
<ulink url='http://lttng.org/files/ust/manual/ust.html'></ulink>.</para>
<para>For <filename>Application</filename>, you must supply the absolute path name of the
application to be traced by user mode <filename>lttng</filename>.
For example, typing <filename>/path/to/foo</filename> triggers
<filename>usttrace /path/to/foo</filename> on the remote target to trace the
program <filename>/path/to/foo</filename>.</para>
<para><filename>Argument</filename> is passed to <filename>usttrace</filename>
running on the remote target.</para>
<para>Before you use the <filename>lttng-ust</filename> tool, you need to setup
the <filename>lttng</filename> Eclipse plug-in and create a <filename>lttng</filename>
project.
Do the following:
<orderedlist>
<listitem><para>Follow these
<ulink url='http://wiki.eclipse.org/Linux_Tools_Project/LTTng#Downloading_and_installing_the_LTTng_parser_library'>instructions</ulink>
to download and install the <filename>lttng</filename> parser library.
</para></listitem>
<listitem><para>Select <filename>Window -> Open Perspective -> Other</filename>
and then select <filename>LTTng</filename>.</para></listitem>
<listitem><para>Click <filename>OK</filename> to change the Eclipse perspective
into the <filename>LTTng</filename> perspective.</para></listitem>
<listitem><para>Create a new <filename>LTTng</filename> project by selecting
<filename>File -> New -> Project</filename>.</para></listitem>
<listitem><para>Choose <filename>LTTng -> LTTng Project</filename>.</para></listitem>
<listitem><para>Click <filename>YoctoTools -> lttng-ust</filename> to start user mode
<filename>lttng</filename> on the remote target.</para></listitem>
</orderedlist></para>
<para>After the output data has been transferred from the remote target back to the local
host machine, new traces will be imported into the selected <filename>LTTng</filename> project.
Then you can go to the <filename>LTTng</filename> project, right click the imported
trace, and set the trace type as the <filename>LTTng</filename> kernel trace.
Finally, right click the imported trace and select <filename>Open</filename>
to display the data graphically.</para></listitem>
<listitem><para><emphasis><filename>PowerTOP</filename>:</emphasis> Selecting this tool runs
<filename>powertop</filename> on the remote target machine and displays the results in a
new view called <filename>powertop</filename>.</para>
<para><filename>Time to gather data(sec):</filename> is the time passed in seconds before data
is gathered from the remote target for analysis.</para>
<para><filename>show pids in wakeups list:</filename> corresponds to the
<filename>-p</filename> argument
passed to <filename>powertop</filename>.</para></listitem>
<listitem><para><emphasis><filename>LatencyTOP and Perf</filename>:</emphasis>
<filename>latencytop</filename> identifies system latency, while
<filename>perf</filename> monitors the system's
performance counter registers.
Selecting either of these tools causes an RSE terminal view to appear
from which you can run the tools.
Both tools refresh the entire screen to display results while they run.</para></listitem>
</itemizedlist>
</para>
</section>
<section id='customizing-an-image-using-a-bitbake-commander-project-and-hob'>
<title>Customizing an Image Using a BitBake Commander Project and Hob</title>
<para>
Within Eclipse, you can create a Yocto BitBake Commander project,
edit the metadata, and then use the
<ulink url='&YOCTO_HOME_URL;/projects/hob'>Hob</ulink> to build a customized
image all within one IDE.
</para>
<section id='creating-the-yocto-bitbake-commander-project'>
<title>Creating the Yocto BitBake Commander Project</title>
<para>
To create a Yocto BitBake Commander project, follow these steps:
<orderedlist>
<listitem><para>Select <filename>Window -> Open Perspective -> Other</filename>
and then choose <filename>Bitbake Commander</filename>.</para></listitem>
<listitem><para>Click <filename>OK</filename> to change the Eclipse perspective into the
Bitbake Commander perspective.</para></listitem>
<listitem><para>Select <filename>File -> New -> Project</filename> to create a new Yocto
Bitbake Commander project.</para></listitem>
<listitem><para>Choose <filename>Yocto Project Bitbake Commander -> New Yocto Project</filename>
and click <filename>Next</filename>.</para></listitem>
<listitem><para>Enter the Project Name and choose the Project Location.
The Yocto project's metadata files will be put under the directory
<filename>&lt;project_location&gt;/&lt;project_name&gt;</filename>.
If that directory does not exist, you need to check
the "Clone from Yocto Git Repository" box, which would execute a
<filename>git clone</filename> command to get the Yocto project's metadata files.
</para></listitem>
<listitem><para>Select <filename>Finish</filename> to create the project.</para></listitem>
</orderedlist>
</para>
</section>
<section id='editing-the-metadata-files'>
<title>Editing the Metadata Files</title>
<para>
After you create the Yocto Bitbake Commander project, you can modify the metadata files
by opening them in the project.
When editing recipe files (<filename>.bb</filename> files), you can view BitBake
variable values and information by hovering the mouse pointer over the variable name and
waiting a few seconds.
</para>
<para>
To edit the metadata, follow these steps:
<orderedlist>
<listitem><para>Select your Yocto Bitbake Commander project.</para></listitem>
<listitem><para>Select <filename>File -> New -> Yocto BitBake Commander -> BitBake Recipe</filename>
to open a new recipe wizard.</para></listitem>
<listitem><para>Point to your source by filling in the "SRC_URL" field.
For example, you can add a recipe in the
<ulink url='&YOCTO_DOCS_DEV_URL;#yocto-project-source-files'>Yocto Project Source Files</ulink>,
input the "SRC_URL" as follows:
<literallayout class='monospaced'>
ftp://ftp.gnu.org/gnu/m4/m4-1.4.9.tar.gz
</literallayout></para></listitem>
<listitem><para>Click "Populate" to calculate the archive md5, sha256,
license checksum values and to auto-generate the recipe filename.</para></listitem>
<listitem><para>Fill in the "Description" field.</para></listitem>
<listitem><para>Be sure values for all required fields exist.</para></listitem>
<listitem><para>Click <filename>Finish</filename>.</para></listitem>
</orderedlist>
</para>
</section>
<section id='buiding-and-customizing-the-image'>
<title>Building and Customizing the Image</title>
<para>
To build and customize the image in Eclipse, follow these steps:
<orderedlist>
<listitem><para>Select your Yocto Bitbake Commander project.</para></listitem>
<listitem><para>Select <filename>Project -> Launch HOB</filename>.</para></listitem>
<listitem><para>Enter the build directory where you want to put your final images.</para></listitem>
<listitem><para>Click <filename>OK</filename> to launch Hob.</para></listitem>
<listitem><para>Use Hob to customize and build your own images.
For information on Hob, see the
<ulink url='&YOCTO_HOME_URL;/projects/hob'>Hob Project Page</ulink> on the
Yocto Project website.</para></listitem>
</orderedlist>
</para>
</section>
</section>
</chapter>
<!--
vim: expandtab tw=80 ts=4
-->

View File

@@ -3,51 +3,46 @@
[<!ENTITY % poky SYSTEM "../poky.ent"> %poky; ] >
<chapter id='adt-intro'>
<title>Introduction</title>
<title>Application Development Toolkit (ADT) User's Guide</title>
<para>
Welcome to the Yocto Project Application Developer's Guide.
This manual provides information that lets you begin developing applications
using the Yocto Project.
Welcome to the Application Development Toolkit Users Guide. This manual provides
information that lets you get going with the ADT to develop projects using the Yocto
Project.
</para>
<para>
The Yocto Project provides an application development environment based on
an Application Development Toolkit (ADT) and the availability of stand-alone
cross-development toolchains and other tools.
This manual describes the ADT and how you can configure and install it,
how to access and use the cross-development toolchains, how to
customize the development packages installation,
how to use command line development for both Autotools-based and Makefile-based projects,
and an introduction to the Eclipse Yocto Plug-in.
</para>
<section id='adt-intro-section'>
<title>The Application Development Toolkit (ADT)</title>
<section id='book-intro'>
<title>Introducing the Application Development Toolkit (ADT)</title>
<para>
Part of the Yocto Project development solution is an Application Development
Toolkit (ADT).
The ADT provides you with a custom-built, cross-development
platform suited for developing a user-targeted product application.
Fundamentally, the ADT consists of an architecture-specific cross-toolchain and
a matching sysroot that are both built by the Yocto Project build system Poky.
The toolchain and sysroot are based on a metadata configuration and extensions,
which allows you to cross-develop on the host machine for the target.
</para>
<para>
Fundamentally, the ADT consists of the following:
<itemizedlist>
<listitem><para>An architecture-specific cross-toolchain and matching
sysroot both built by the OpenEmbedded build system, which uses Poky.
The toolchain and sysroot are based on a metadata configuration and extensions,
which allows you to cross-develop on the host machine for the target hardware.
</para></listitem>
<listitem><para>The Eclipse IDE Yocto Plug-in.</para></listitem>
<listitem><para>The Quick EMUlator (QEMU), which lets you simulate target hardware.
</para></listitem>
<listitem><para>Various user-space tools that greatly enhance your application
development experience.</para></listitem>
</itemizedlist>
Additionally, to provide an effective development platform, the Yocto Project
makes available and suggests other tools you can use with the ADT.
These other tools include the Eclipse IDE Yocto Plug-in, an emulator (QEMU),
and various user-space tools that greatly enhance your development experience.
</para>
<para>
The resulting combination of the architecture-specific cross-toolchain and sysroot
along with these additional tools yields a custom-built, cross-development platform
for a user-targeted product.
</para>
</section>
<section id='adt-components'>
<title>ADT Components</title>
<para>
This section provides a brief description of what comprises the ADT.
</para>
<section id='the-cross-toolchain'>
<title>The Cross-Toolchain</title>
@@ -55,7 +50,7 @@
The cross-toolchain consists of a cross-compiler, cross-linker, and cross-debugger
that are used to develop user-space applications for targeted hardware.
This toolchain is created either by running the ADT Installer script or
through a build directory that is based on your metadata
through a Yocto Project build tree that is based on your metadata
configuration or extension for your targeted device.
The cross-toolchain works with a matching target sysroot.
</para>
@@ -68,38 +63,11 @@
The matching target sysroot contains needed headers and libraries for generating
binaries that run on the target architecture.
The sysroot is based on the target root filesystem image that is built by
the OpenEmbedded build system Poky and uses the same metadata configuration
the Yocto Project's build system Poky and uses the same metadata configuration
used to build the cross-toolchain.
</para>
</section>
<section id='eclipse-overview'>
<title>Eclipse Yocto Plug-in</title>
<para>
The Eclipse IDE is a popular development environment and it fully supports
development using the Yocto Project.
When you install and configure the Eclipse Yocto Project Plug-in into
the Eclipse IDE, you maximize your Yocto Project experience.
Installing and configuring the Plug-in results in an environment that
has extensions specifically designed to let you more easily develop software.
These extensions allow for cross-compilation, deployment, and execution of
your output into a QEMU emulation session.
You can also perform cross-debugging and profiling.
The environment also supports a suite of tools that allows you to perform
remote profiling, tracing, collection of power data, collection of
latency data, and collection of performance data.
</para>
<para>
For information about the application development workflow that uses the Eclipse
IDE and for a detailed example of how to install and configure the Eclipse
Yocto Project Plug-in, see the
"<ulink url='&YOCTO_DOCS_DEV_URL;#adt-eclipse'>Working Within Eclipse</ulink>" section
of the Yocto Project Development Manual.
</para>
</section>
<section id='the-qemu-emulator'>
<title>The QEMU Emulator</title>
@@ -111,8 +79,8 @@
<listitem><para>If you use the ADT Installer script to install ADT, you can
specify whether or not to install QEMU.</para></listitem>
<listitem><para>If you have downloaded a Yocto Project release and unpacked
it to create a source directory and you have sourced
the environment setup script, QEMU is installed and automatically
it to create a Yocto Project file structure and you have sourced
the Yocto Project environment setup script, QEMU is installed and automatically
available.</para></listitem>
<listitem><para>If you have installed the cross-toolchain
tarball and you have sourcing the toolchain's setup environment script, QEMU
@@ -152,7 +120,7 @@
<listitem><para><emphasis>SystemTap:</emphasis> A free software infrastructure
that simplifies information gathering about a running Linux system.
This information helps you diagnose performance or functional problems.
SystemTap is not available as a user-space tool through the Eclipse IDE Yocto Plug-in.
SystemTap is not available as a user-space tool through the Yocto Eclipse IDE Plug-in.
See <ulink url='http://sourceware.org/systemtap'></ulink> for more information
on SystemTap.</para></listitem>
<listitem><para><emphasis>Lttng-ust:</emphasis> A User-space Tracer designed to

View File

@@ -46,13 +46,8 @@
</revision>
<revision>
<revnumber>1.2</revnumber>
<date>April 2012</date>
<revremark>Released with the Yocto Project 1.2 Release.</revremark>
</revision>
<revision>
<revnumber>1.3</revnumber>
<date>Sometime in 2012</date>
<revremark>Released with the Yocto Project 1.3 Release.</revremark>
<date>TBD 2012</date>
<revremark>Work in progress for the Yocto Project 1.2 Release.</revremark>
</revision>
</revhistory>
@@ -69,7 +64,8 @@
<note>
Due to production processes, there could be differences between the Yocto Project
documentation bundled in the release tarball and the
<ulink url='&YOCTO_DOCS_ADT_URL;'>Yocto Project Application Developer's Guide</ulink> on
<ulink url='&YOCTO_DOCS_ADT_URL;'>
Application Developer's Toolkit (ADT) User's Guide</ulink> on
the <ulink url='&YOCTO_HOME_URL;'>Yocto Project</ulink> website.
For the latest version of this manual, see the manual on the website.
</note>
@@ -84,6 +80,8 @@
<xi:include href="adt-package.xml"/>
<xi:include href="adt-eclipse.xml"/>
<xi:include href="adt-command.xml"/>
<!-- <index id='index'>

View File

@@ -17,7 +17,7 @@
<title>Package Management Systems</title>
<para>
The OpenEmbedded build system supports the generation of sysroot files using
The Yocto Project supports the generation of sysroot files using
three different Package Management Systems (PMS):
<itemizedlist>
<listitem><para><emphasis>OPKG:</emphasis> A less well known PMS whose use
@@ -28,7 +28,7 @@
<listitem><para><emphasis>RPM:</emphasis> A more widely known PMS intended for GNU/Linux
distributions.
This PMS works with files packaged in an <filename>.rms</filename> format.
The build system currently installs through this PMS by default.
The Yocto Project currently installs through this PMS by default.
See <ulink url='http://en.wikipedia.org/wiki/RPM_Package_Manager'></ulink>
for more information about RPM.</para></listitem>
<listitem><para><emphasis>Debian:</emphasis> The PMS for Debian-based systems
@@ -45,8 +45,7 @@
<para>
Whichever PMS you are using, you need to be sure that the
<ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_CLASSES'><filename>PACKAGE_CLASSES</filename></ulink>
variable in the <filename>conf/local.conf</filename>
<filename>PACKAGE_CLASSES</filename> variable in the <filename>conf/local.conf</filename>
file is set to reflect that system.
The first value you choose for the variable specifies the package file format for the root
filesystem at sysroot.
@@ -56,8 +55,7 @@
<note>
For build performance information related to the PMS, see
<ulink url='&YOCTO_DOCS_REF_URL;#ref-classes-package'>Packaging - <filename>package*.bbclass</filename></ulink>
in the Yocto Project Reference Manual.
<ulink url='&YOCTO_DOCS_REF_URL;#ref-classes-package'>Packaging - <filename>package*.bbclass</filename></ulink> in The Yocto Project Reference Manual.
</note>
<para>
@@ -77,8 +75,7 @@
</para>
<para>
Next, source the environment setup script found in the
<ulink url='&YOCTO_DOCS_DEV_URL;#source-directory'>source directory</ulink>.
Next, source the environment setup script found in the Yocto Project files.
Follow that by setting up the installation destination to point to your
sysroot as <filename>&lt;sysroot_dir&gt;</filename>.
Finally, have an OPKG configuration file <filename>&lt;conf_file&gt;</filename>

View File

@@ -4,41 +4,25 @@
<chapter id='adt-prepare'>
<title>Preparing for Application Development</title>
<title>Preparing to Use the Application Development Toolkit (ADT)</title>
<para>
In order to develop applications, you need set up your host development system.
Several ways exist that allow you to install cross-development tools, QEMU, the
Eclipse Yocto Plug-in, and other tools.
This chapter describes how to prepare for application development.
In order to use the ADT, you must install it, <filename>source</filename> a script to set up the
environment, and be sure both the kernel and filesystem image specific to the target architecture
exist.
This chapter describes how to be sure you meet the ADT requirements.
</para>
<section id='installing-the-adt'>
<title>Installing the ADT and Toolchains</title>
<title>Installing the ADT</title>
<para>
The following list describes installation methods that set up varying degrees of tool
availabiltiy on your system.
Regardless of the installation method you choose,
you must <filename>source</filename> the cross-toolchain
environment setup script before you use a toolchain.
The following list describes how you can install the ADT, which includes the cross-toolchain.
Regardless of the installation you choose, you must <filename>source</filename> the cross-toolchain
environment setup script before you use the toolchain.
See the "<link linkend='setting-up-the-cross-development-environment'>Setting Up the
Cross-Development Environment</link>" section for more information.
</para>
<note>
<para>Avoid mixing installation methods when installing toolchains for different architectures.
For example, avoid using the ADT Installer to install some toolchains and then hand-installing
cross-development toolchains from downloaded tarballs to install toolchains
for different architectures.
Mixing installation methods can result in situations where the ADT Installer becomes
unreliable and might not install the toolchain.</para>
<para>If you must mix installation methods, you might avoid problems by deleting
<filename>/var/lib/opkg</filename>, thus purging the <filename>opkg</filename> package
metadata</para>
</note>
<para>
Cross-Development Environment</link>"
section for more information.
<itemizedlist>
<listitem><para><emphasis>Use the ADT Installer Script:</emphasis>
This method is the recommended way to install the ADT because it
@@ -51,10 +35,9 @@
toolchain tarball and then hand-install the toolchain.
If you use this method, you just get the cross-toolchain and QEMU - you do not
get any of the other mentioned benefits had you run the ADT Installer script.</para></listitem>
<listitem><para><emphasis>Use the Toolchain from within the Build Directory:</emphasis>
If you already have a
<ulink url='&YOCTO_DOCS_DEV_URL;#build-directory'>build directory</ulink>,
you can build the cross-toolchain within the directory.
<listitem><para><emphasis>Use the Toolchain from within a Yocto Project Build Tree:</emphasis>
If you already have a Yocto Project build tree, you can build the cross-toolchain
within tree.
However, like the previous method mentioned, you only get the cross-toolchain and QEMU - you
do not get any of the other benefits without taking separate steps.</para></listitem>
</itemizedlist>
@@ -77,22 +60,22 @@
<ulink url='&YOCTO_DL_URL;/releases'>Index of Releases</ulink>, specifically
at
<ulink url='&YOCTO_ADTINSTALLER_DL_URL;'></ulink>.
Or, you can use BitBake to generate the tarball inside the existing
<ulink url='&YOCTO_DOCS_DEV_URL;#build-directory'>build directory</ulink>.
Or, you can use BitBake to generate the tarball inside the existing Yocto Project
build tree.
</para>
<para>
If you use BitBake to generate the ADT Installer tarball, you must
<filename>source</filename> the environment setup script
<filename>source</filename> the Yocto Project environment setup script
(<filename>oe-init-build-env</filename>) located
in the source directory before running the <filename>bitbake</filename>
in the Yocto Project file structure before running the <filename>bitbake</filename>
command that creates the tarball.
</para>
<para>
The following example commands download the Poky tarball, set up the
<ulink url='&YOCTO_DOCS_DEV_URL;#source-directory'>source directory</ulink>,
set up the environment while also creating the default build directory,
The following example commands download the Yocto Project release tarball, set up the Yocto
Project files structure, set up the environment while also creating the
default Yocto Project build tree,
and run the <filename>bitbake</filename> command that results in the tarball
<filename>~/yocto-project/build/tmp/deploy/sdk/adt_installer.tar.bz2</filename>:
<literallayout class='monospaced'>
@@ -153,7 +136,7 @@
or not to install the emulator QEMU.</para></listitem>
<listitem><para><filename>YOCTOADT_NFS_UTIL</filename>: Indicates whether
or not to install user-mode NFS.
If you plan to use the Eclipse IDE Yocto plug-in against QEMU,
If you plan to use the Yocto Eclipse IDE plug-in against QEMU,
you should install NFS.
<note>To boot QEMU images using our userspace NFS server, you need
to be running <filename>portmap</filename> or <filename>rpcbind</filename>.
@@ -183,10 +166,7 @@
<para>
After you have configured the <filename>adt_installer.conf</filename> file,
run the installer using the following command.
Be sure that you are not trying to use cross-compilation tools.
When you run the installer, the environment must use a
host <filename>gcc</filename>:
run the installer using the following command:
<literallayout class='monospaced'>
$ ./adt_installer
</literallayout>
@@ -196,7 +176,7 @@
The ADT Installer requires the <filename>libtool</filename> package to complete.
If you install the recommended packages as described in
"<ulink url='&YOCTO_DOCS_QS_URL;#packages'>The Packages</ulink>"
section of the Yocto Project Quick Start, then you will have libtool installed.
section of The Yocto Project Quick Start, then you will have libtool installed.
</note>
<para>
@@ -246,17 +226,17 @@
poky-eglibc-x86_64-i586-toolchain-gmae-&DISTRO;.tar.bz2
</literallayout>
<note><para>As an alternative to steps one and two, you can build the toolchain tarball
if you have a <ulink url='&YOCTO_DOCS_DEV_URL;#build-directory'>build directory</ulink>.
if you have a Yocto Project build tree.
If you need GMAE, you should use the <filename>bitbake meta-toolchain-gmae</filename>
command.
The resulting tarball will support such development.
However, if you are not concerned with GMAE,
you can generate the tarball using <filename>bitbake meta-toolchain</filename>.</para>
<para>Use the appropriate <filename>bitbake</filename> command only after you have
sourced the <filename>oe-build-init-env</filename> script located in the source
directory.
sourced the <filename>oe-build-init-env</filename> script located in the Yocto
Project files.
When the <filename>bitbake</filename> command completes, the tarball will
be in <filename>tmp/deploy/sdk</filename> in the build directory.
be in <filename>tmp/deploy/sdk</filename> in the Yocto Project build tree.
</para></note></para></listitem>
<listitem><para>Make sure you are in the root directory with root privileges and then expand
the tarball.
@@ -269,49 +249,46 @@
</section>
<section id='using-the-toolchain-from-within-the-build-tree'>
<title>Using BitBake and the Build Directory</title>
<title>Using BitBake and the Yocto Project Build Tree</title>
<para>
A final way of making the cross-toolchain available is to use BitBake
to generate the toolchain within an existing
<ulink url='&YOCTO_DOCS_DEV_URL;#build-directory'>build directory</ulink>.
This method does not install the toolchain into the
<filename>/opt</filename> directory.
A final way of installing just the cross-toolchain is to use BitBake to build the
toolchain within an existing Yocto Project build tree.
This method does not install the toolchain into the <filename>/opt</filename> directory.
As with the previous method, if you need to install the target sysroot, you must
do that separately as well.
do this separately.
</para>
<para>
Follow these steps to generate the toolchain into the build directory:
Follow these steps to build and install the toolchain into the build tree:
<orderedlist>
<listitem><para>Source the environment setup script
<filename>oe-init-build-env</filename> located in the
<ulink url='&YOCTO_DOCS_DEV_URL;#source-directory'>source directory</ulink>.
</para></listitem>
<filename>oe-init-build-env</filename> located in the Yocto Project
files.</para></listitem>
<listitem><para>At this point, you should be sure that the
<ulink url='&YOCTO_DOCS_REF_URL;#var-MACHINE'><filename>MACHINE</filename></ulink> variable
<filename>MACHINE</filename> variable
in the <filename>local.conf</filename> file found in the
<filename>conf</filename> directory of the build directory
<filename>conf</filename> directory of the Yocto Project build directory
is set for the target architecture.
Comments within the <filename>local.conf</filename> file list the values you
can use for the <filename>MACHINE</filename> variable.
<note>You can populate the build directory with the cross-toolchains for more
<note>You can populate the build tree with the cross-toolchains for more
than a single architecture.
You just need to edit the <filename>MACHINE</filename> variable in the
<filename>local.conf</filename> file and re-run the BitBake
command.</note></para></listitem>
<listitem><para>Run <filename>bitbake meta-ide-support</filename> to complete the
cross-toolchain generation.
<note>If you change out of your working directory after you
cross-toolchain installation.
<note>If change out of your working directory after you
<filename>source</filename> the environment setup script and before you run
the <filename>bitbake</filename> command, the command might not work.
Be sure to run the <filename>bitbake</filename> command immediately
after checking or editing the <filename>local.conf</filename> but without
changing out of your working directory.</note>
Once the <filename>bitbake</filename> command finishes,
the cross-toolchain is generated and populated within the build directory.
the tarball for the cross-toolchain is generated within the Yocto Project build tree.
You will notice environment setup files for the cross-toolchain in the
build directory in the <filename>tmp</filename> directory.
Yocto Project build tree in the <filename>tmp</filename> directory.
Setup script filenames contain the strings <filename>environment-setup</filename>.
</para></listitem>
</orderedlist>
@@ -325,13 +302,11 @@
<para>
Before you can develop using the cross-toolchain, you need to set up the
cross-development environment by sourcing the toolchain's environment setup script.
If you used the ADT Installer or hand-installed cross-toolchain,
If you used the ADT Installer or used an existing ADT tarball to install the ADT,
then you can find this script in the <filename>&YOCTO_ADTPATH_DIR;</filename>
directory.
If you installed the toolchain in the
<ulink url='&YOCTO_DOCS_DEV_URL;#build-directory'>build directory</ulink>,
you can find the environment setup
script for the toolchain in the build directory's <filename>tmp</filename> directory.
If you installed the toolchain in the build tree, you can find the environment setup
script for the toolchain in the Yocto Project build tree's <filename>tmp</filename> directory.
</para>
<para>
@@ -365,21 +340,20 @@
pre-built versions.
You can find examples for both these situations in the
"<ulink url='&YOCTO_DOCS_QS_URL;#test-run'>A Quick Test Run</ulink>" section of
the Yocto Project Quick Start.
The Yocto Project Quick Start.
</para>
<para>
The Yocto Project ships basic kernel and filesystem images for several
The Yocto Project provides basic kernel and filesystem images for several
architectures (<filename>x86</filename>, <filename>x86-64</filename>,
<filename>mips</filename>, <filename>powerpc</filename>, and <filename>arm</filename>)
that you can use unaltered in the QEMU emulator.
These kernel images reside in the release
These kernel images reside in the Yocto Project release
area - <ulink url='&YOCTO_MACHINES_DL_URL;'></ulink>
and are ideal for experimentation using Yocto Project.
For information on the image types you can build using the OpenEmbedded build system,
see the
"<ulink url='&YOCTO_DOCS_REF_URL;#ref-images'>Images</ulink>" chapter in
the Yocto Project Reference Manual.
and are ideal for experimentation within Yocto Project.
For information on the image types you can build using the Yocto Project, see the
"<ulink url='&YOCTO_DOCS_REF_URL;#ref-images'>Reference: Images</ulink>" appendix in
The Yocto Project Reference Manual.
</para>
<para>
@@ -396,10 +370,8 @@
you can do so one of two ways:
<itemizedlist>
<listitem><para>Modify the <filename>conf/local.conf</filename> configuration in
the <ulink url='&YOCTO_DOCS_DEV_URL;#build-directory'>build directory</ulink>
and then rebuild the image.
With this method, you need to modify the
<ulink url='&YOCTO_DOCS_REF_URL;#var-EXTRA_IMAGE_FEATURES'><filename>EXTRA_IMAGE_FEATURES</filename></ulink>
the Yocto Project build directory and then rebuild the image.
With this method, you need to modify the <filename>EXTRA_IMAGE_FEATURES</filename>
variable to have the value of "tools-debug" before rebuilding the image.
Once the image is rebuilt, the <filename>tcf-agent</filename> will be included
in the image and is launched automatically after the boot.</para></listitem>
@@ -407,7 +379,7 @@
To build the agent, follow these steps:
<orderedlist>
<listitem><para>Be sure the ADT is installed as described in the
"<link linkend='installing-the-adt'>Installing the ADT and Toolchains</link>" section.
"<link linkend='installing-the-adt'>Installing the ADT</link>" section.
</para></listitem>
<listitem><para>Set up the cross-development environment as described in the
"<link linkend='setting-up-the-cross-development-environment'>Setting
@@ -420,8 +392,7 @@
</literallayout></para></listitem>
<listitem><para>Modify the <filename>Makefile.inc</filename> file
for the cross-compilation environment by setting the
<filename>OPSYS</filename> and
<ulink url='&YOCTO_DOCS_REF_URL;#var-MACHINE'><filename>MACHINE</filename></ulink>
<filename>OPSYS</filename> and <filename>MACHINE</filename>
variables according to your target.</para></listitem>
<listitem><para>Use the cross-development tools to build the
<filename>tcf-agent</filename>.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

After

Width:  |  Height:  |  Size: 17 KiB

View File

@@ -110,7 +110,7 @@ h5 {
h6 {
margin: 1em 0em 0em 0em;
padding: 1em 0em 0em 0em;
font-size: 110%;
font-size: 80%;
font-weight: bold;
}

View File

@@ -58,13 +58,8 @@
</revision>
<revision>
<revnumber>1.2</revnumber>
<date>April 2012</date>
<revremark>Released with the Yocto Project 1.2 Release.</revremark>
</revision>
<revision>
<revnumber>1.3</revnumber>
<date>Sometime in 2012</date>
<revremark>Released with the Yocto Project 1.3 Release.</revremark>
<date>TBD 2012</date>
<revremark>Work in progress for the Yocto Project 1.2 Release.</revremark>
</revision>
</revhistory>
@@ -81,7 +76,8 @@
<note>
Due to production processes, there could be differences between the Yocto Project
documentation bundled in the release tarball and the
<ulink url='&YOCTO_DOCS_BSP_URL;'>Yocto Project Board Support Package (BSP) Developer's Guide</ulink> on
<ulink url='&YOCTO_DOCS_BSP_URL;'>
Board Support Package (BSP) Developer's Guide</ulink> on
the <ulink url='&YOCTO_HOME_URL;'>Yocto Project</ulink> website.
For the latest version of this manual, see the manual on the website.
</note>

View File

@@ -48,8 +48,8 @@
This root is what you add to the
<ulink url='&YOCTO_DOCS_REF_URL;#var-BBLAYERS'><filename>BBLAYERS</filename></ulink>
variable in the <filename>conf/bblayers.conf</filename> file found in the
<ulink url='&YOCTO_DOCS_DEV_URL;#build-directory'>build directory</ulink>.
Adding the root allows the OpenEmbedded build system to recognize the BSP
<ulink url='&YOCTO_DOCS_DEV_URL;#yocto-project-build-directory'>Yocto Project Build Directory</ulink>.
Adding the root allows the Yocto Project build system to recognize the BSP
definition and from it build an image.
Here is an example:
<literallayout class='monospaced'>
@@ -75,7 +75,7 @@
<para>
Some layers function as a layer to hold other BSP layers.
An example of this type of layer is the <filename>meta-intel</filename> layer.
An example of this type of layers is the <filename>meta-intel</filename> layer.
The <filename>meta-intel</filename> layer contains over 10 individual BSP layers.
</para>
@@ -83,8 +83,8 @@
For more detailed information on layers, see the
"<ulink url='&YOCTO_DOCS_DEV_URL;#understanding-and-creating-layers'>Understanding and Creating Layers</ulink>"
section of the Yocto Project Development Manual.
You can also see the detailed examples in the appendices of the
<ulink url='&YOCTO_DOCS_DEV_URL;'>Yocto Project Development Manual</ulink>.
You can also see the detailed examples in the appendices of
<ulink url='&YOCTO_DOCS_DEV_URL;'>The Yocto Project Development Manual</ulink>.
</para>
</section>
@@ -99,14 +99,13 @@
</para>
<para>
The proposed form does have elements that are specific to the
OpenEmbedded build system.
The proposed form does have elements that are specific to the Yocto Project and
OpenEmbedded build systems.
It is intended that this information can be
used by other build systems besides the OpenEmbedded build system
and that it will be simple
used by other systems besides Yocto Project and OpenEmbedded and that it will be simple
to extract information and convert it to other formats if required.
The OpenEmbedded build system, through its standard layers mechanism, can directly
accept the format described as a layer.
Yocto Project, through its standard layers mechanism, can directly accept the format
described as a layer.
The BSP captures all
the hardware-specific details in one place in a standard format, which is
useful for any person wishing to use the hardware platform regardless of
@@ -122,15 +121,6 @@
are separate components that happen to be combined in certain end products.
</para>
<para>
Before looking at the common form for the file structure inside a BSP Layer,
you should be aware that some requirements do exist in order for a BSP to
be considered compliant with the Yocto Project.
For that list of requirements, see the
"<link linkend='released-bsp-requirements'>Released BSP Requirements</link>"
section.
</para>
<para>
Below is the common form for the file structure inside a BSP Layer.
While you can use this basic form for the standard, realize that the actual structures
@@ -298,10 +288,9 @@
</para>
<para>
The <filename>conf/layer.conf</filename> file identifies the file structure as a
layer, identifies the
contents of the layer, and contains information about how the build
system should use it.
The <filename>conf/layer.conf</filename> file identifies the file structure as a Yocto
Project layer, identifies the
contents of the layer, and contains information about how Yocto Project should use it.
Generally, a standard boilerplate file such as the following works.
In the following example, you would replace "<filename>bsp</filename>" and
"<filename>_bsp</filename>" with the actual name
@@ -314,8 +303,8 @@
BBPATH := "${BBPATH}:${LAYERDIR}"
# We have a recipes directory, add to BBFILES
BBFILES := "${BBFILES} ${LAYERDIR}/recipes-*/*.bb \
${LAYERDIR}/recipes-*/*.bbappend"
BBFILES := "${BBFILES} ${LAYERDIR}/recipes/*/*.bb \
${LAYERDIR}/recipes/*/*.bbappend"
BBFILE_COLLECTIONS += "bsp"
BBFILE_PATTERN_bsp := "^${LAYERDIR}/"
@@ -335,7 +324,7 @@
<para>
This file simply makes BitBake aware of the recipes and configuration directories.
The file must exist so that the OpenEmbedded build system can recognize the BSP.
The file must exist so that the Yocto Project build system can recognize the BSP.
</para>
</section>
@@ -350,7 +339,7 @@
<para>
The machine files bind together all the information contained elsewhere
in the BSP into a format that the build system can understand.
in the BSP into a format that the Yocto Project build system can understand.
If the BSP supports multiple machines, multiple machine configuration files
can be present.
These filenames correspond to the values to which users have set the
@@ -390,8 +379,8 @@
<para>
Tuning files are found in the <filename>meta/conf/machine/include</filename>
directory within the
<ulink url='&YOCTO_DOCS_DEV_URL;#source-directory'>source directory</ulink>.
directory of the
<ulink url='&YOCTO_DOCS_DEV_URL;#yocto-project-files'>Yocto Project Files</ulink>.
Tuning files can also reside in the BSP Layer itself.
For example, the <filename>ia32-base.inc</filename> file resides in the
<filename>meta-intel</filename> BSP Layer in <filename>conf/machine/include</filename>.
@@ -443,7 +432,7 @@
formfactor recipe
<filename>meta/recipes-bsp/formfactor/formfactor_0.0.bb</filename>,
which is found in the
<ulink url='&YOCTO_DOCS_DEV_URL;#source-directory'>source directory</ulink>.
<ulink url='&YOCTO_DOCS_DEV_URL;#yocto-project-files'>Yocto Project Files</ulink>.
</para></note>
</section>
@@ -505,33 +494,33 @@
</para>
<para>
These files append your specific changes to the main kernel recipe you are using.
These files append your specific changes to the kernel you are using.
</para>
<para>
For your BSP, you typically want to use an existing Yocto Project kernel recipe found in the
<ulink url='&YOCTO_DOCS_DEV_URL;#source-directory'>source directory</ulink>
at <filename>meta/recipes-kernel/linux</filename>.
For your BSP, you typically want to use an existing Yocto Project kernel found in the
<ulink url='&YOCTO_DOCS_DEV_URL;#yocto-project-files'>Yocto
Project Files</ulink> at <filename>meta/recipes-kernel/linux</filename>.
You can append your specific changes to the kernel recipe by using a
similarly named append file, which is located in the BSP Layer (e.g.
the <filename>meta-&lt;bsp_name&gt;/recipes-kernel/linux</filename> directory).
</para>
<para>
Suppose you are using the <filename>linux-yocto_3.4.bb</filename> recipe to build
the kernel.
Suppose the BSP uses the <filename>linux-yocto_3.0.bb</filename> kernel,
which is the preferred kernel to use for developing a new BSP using the Yocto Project.
In other words, you have selected the kernel in your
<filename>&lt;bsp_name&gt;.conf</filename> file by adding the following statements:
<literallayout class='monospaced'>
PREFERRED_PROVIDER_virtual/kernel ?= "linux-yocto"
PREFERRED_VERSION_linux-yocto = "3.4%"
PREFERRED_VERSION_linux-yocto = "3.0%"
</literallayout>
You would use the <filename>linux-yocto_3.4.bbappend</filename> file to append
You would use the <filename>linux-yocto_3.0.bbappend</filename> file to append
specific BSP settings to the kernel, thus configuring the kernel for your particular BSP.
</para>
<para>
As an example, look at the existing Crown Bay BSP.
The append file used is:
<literallayout class='monospaced'>
meta-crownbay/recipes-kernel/linux/linux-yocto_3.4.bbappend
meta-crownbay/recipes-kernel/linux/linux-yocto_3.0.bbappend
</literallayout>
The following listing shows the file.
Be aware that the actual commit ID strings in this example listing might be different
@@ -541,87 +530,82 @@
FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
COMPATIBLE_MACHINE_crownbay = "crownbay"
KMACHINE_crownbay = "crownbay"
KBRANCH_crownbay = "standard/default/crownbay"
KMACHINE_crownbay = "yocto/standard/crownbay"
KERNEL_FEATURES_append_crownbay += " cfg/smp.scc"
COMPATIBLE_MACHINE_crownbay-noemgd = "crownbay-noemgd"
KMACHINE_crownbay-noemgd = "crownbay"
KBRANCH_crownbay-noemgd = "standard/default/crownbay"
KMACHINE_crownbay-noemgd = "yocto/standard/crownbay"
KERNEL_FEATURES_append_crownbay-noemgd += " cfg/smp.scc"
SRCREV_machine_pn-linux-yocto_crownbay ?= "48101e609711fcfe8d5e737a37a5a69f4bd57d9a"
SRCREV_meta_pn-linux-yocto_crownbay ?= "5b4c9dc78b5ae607173cc3ddab9bce1b5f78129b"
SRCREV_machine_pn-linux-yocto_crownbay ?= "63c65842a3a74e4bd3128004ac29b5639f16433f"
SRCREV_meta_pn-linux-yocto_crownbay ?= "59314a3523e360796419d76d78c6f7d8c5ef2593"
SRCREV_machine_pn-linux-yocto_crownbay-noemgd ?= "48101e609711fcfe8d5e737a37a5a69f4bd57d9a"
SRCREV_meta_pn-linux-yocto_crownbay-noemgd ?= "5b4c9dc78b5ae607173cc3ddab9bce1b5f78129b"
SRCREV_machine_pn-linux-yocto_crownbay-noemgd ?= "63c65842a3a74e4bd3128004ac29b5639f16433f"
SRCREV_meta_pn-linux-yocto_crownbay-noemgd ?= "59314a3523e360796419d76d78c6f7d8c5ef2593"
</literallayout>
This append file contains statements used to support the Crown Bay BSP for both
<trademark class='registered'>Intel</trademark> EMGD and the VESA graphics.
The build process, in this case, recognizes and uses only the statements that
apply to the defined machine name - <filename>crownbay</filename> in this case.
So, the applicable statements in the <filename>linux-yocto_3.4.bbappend</filename>
So, the applicable statements in the <filename>linux-yocto_3.0.bbappend</filename>
file are follows:
<literallayout class='monospaced'>
FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
COMPATIBLE_MACHINE_crownbay = "crownbay"
KMACHINE_crownbay = "crownbay"
KBRANCH_crownbay = "standard/default/crownbay"
KMACHINE_crownbay = "yocto/standard/crownbay"
KERNEL_FEATURES_append_crownbay += " cfg/smp.scc"
SRCREV_machine_pn-linux-yocto_crownbay ?= "48101e609711fcfe8d5e737a37a5a69f4bd57d9a"
SRCREV_meta_pn-linux-yocto_crownbay ?= "5b4c9dc78b5ae607173cc3ddab9bce1b5f78129b"
SRCREV_machine_pn-linux-yocto_crownbay ?= "63c65842a3a74e4bd3128004ac29b5639f16433f"
SRCREV_meta_pn-linux-yocto_crownbay ?= "59314a3523e360796419d76d78c6f7d8c5ef2593"
</literallayout>
The append file defines <filename>crownbay</filename> as the
<ulink url='&YOCTO_DOCS_REF_URL;#var-COMPATIBLE_MACHINE'><filename>COMPATIBLE_MACHINE</filename></ulink>
and uses the
<ulink url='&YOCTO_DOCS_REF_URL;#var-KMACHINE'><filename>KMACHINE</filename></ulink> variable to
ensure the machine name used by the OpenEmbedded build system maps to the
machine name used by the Linux Yocto kernel.
The file also uses the optional
<ulink url='&YOCTO_DOCS_REF_URL;#var-KBRANCH'><filename>KBRANCH</filename></ulink> variable
to ensure the build process uses the <filename>standard/default/crownbay</filename>
kernel branch.
Finally, the append file points to the specific top commits in the
<ulink url='&YOCTO_DOCS_DEV_URL;#source-directory'>source directory</ulink> Git
The append file defines <filename>crownbay</filename> as the compatible machine and
defines the <filename>KMACHINE</filename>.
The file also points to some configuration fragments to use by setting the
<filename>KERNEL_FEATURES</filename> variable.
The location for the configuration fragments is the kernel tree itself in the
<ulink url='&YOCTO_DOCS_DEV_URL;#yocto-project-build-directory'>Yocto Project Build
Directory</ulink> under <filename>linux/meta</filename>.
Finally, the append file points to the specific commits in the
<ulink url='&YOCTO_DOCS_DEV_URL;#yocto-project-files'>Yocto Project Files</ulink> Git
repository and the <filename>meta</filename> Git repository branches to identify the
exact kernel needed to build the Crown Bay BSP.
</para>
<para>
One thing missing in this particular BSP, which you will typically need when
developing a BSP, is the kernel configuration file (<filename>.config</filename>) for your BSP.
When developing a BSP, you probably have a kernel configuration file or a set of kernel
configuration files that, when taken together, define the kernel configuration for your BSP.
You can accomplish this definition by putting the configurations in a file or a set of files
inside a directory located at the same level as your kernel's append file and having the same
name as the kernel's main recipe file.
With all these conditions met, simply reference those files in a
inside a directory located at the same level as your append file and having the same name
as the kernel.
With all these conditions met simply reference those files in a
<filename>SRC_URI</filename> statement in the append file.
</para>
<para>
For example, suppose you had a some configuration options in a file called
<filename>network_configs.cfg</filename>.
You can place that file inside a directory named <filename>/linux-yocto</filename> and then add
a <filename>SRC_URI</filename> statement such as the following to the append file.
When the OpenEmbedded build system builds the kernel, the configuration options are
picked up and applied.
For example, suppose you had a set of configuration options in a file called
<filename>myconfig</filename>.
If you put that file inside a directory named
<filename>/linux-yocto</filename> and then added
a <filename>SRC_URI</filename> statement such as the following to the append file,
those configuration
options will be picked up and applied when the kernel is built.
<literallayout class='monospaced'>
SRC_URI += "file://network_configs.cfg"
SRC_URI += "file://myconfig"
</literallayout>
</para>
<para>
To group related configurations into multiple files, you perform a similar procedure.
Here is an example that groups separate configurations specifically for Ethernet and graphics
into their own files and adds the configurations
by using a <filename>SRC_URI</filename> statement like the following in your append file:
As mentioned earlier, you can group related configurations into multiple files and
name them all in the <filename>SRC_URI</filename> statement as well.
For example, you could group separate configurations specifically for Ethernet and graphics
into their own files and add those by using a <filename>SRC_URI</filename> statement like the
following in your append file:
<literallayout class='monospaced'>
SRC_URI += "file://myconfig.cfg \
SRC_URI += "file://myconfig \
file://eth.cfg \
file://gfx.cfg"
</literallayout>
</para>
<para>
The <filename>FILESEXTRAPATHS</filename> variable is in boilerplate form in the
previous example in order to make it easy to do that.
@@ -630,253 +614,36 @@
The <filename>FILESEXTRAPATHS</filename> variable enables the build process to
find those configuration files.
</para>
<note>
<para>
Other methods exist to accomplish grouping and defining configuration options.
For example, if you are working with a local clone of the kernel repository,
you could checkout the kernel's <filename>meta</filename> branch, make your changes,
and then push the changes to the local bare clone of the kernel.
The result is that you directly add configuration options to the
<filename>meta</filename> branch for your BSP.
The configuration options will likely end up in that location anyway if the BSP gets
added to the Yocto Project.
For an example showing how to change the BSP configuration, see the
"<ulink url='&YOCTO_DOCS_DEV_URL;#changing-the-bsp-configuration'>Changing the BSP Configuration</ulink>"
section in the Yocto Project Development Manual.
For a better understanding of working with a local clone of the kernel repository
and a local bare clone of the kernel, see the
"<ulink url='&YOCTO_DOCS_DEV_URL;#modifying-the-kernel-source-code'>Modifying the Kernel
Source Code</ulink>" section also in the Yocto Project Development Manual.
</para>
Other methods exist to accomplish grouping and defining configuration options.
For example, if you are working with a local clone of the kernel repository,
you could checkout the kernel's <filename>meta</filename> branch, make your changes,
and then push the changes to the local bare clone of the kernel.
The result is that you directly add configuration options to the Yocto kernel
<filename>meta</filename> branch for your BSP.
The configuration options will likely end up in that location anyway if the BSP gets
added to the Yocto Project.
For an example showing how to change the BSP configuration, see the
"<ulink url='&YOCTO_DOCS_DEV_URL;#changing-the-bsp-configuration'>Changing the BSP Configuration</ulink>"
section in the Yocto Project Development Manual.
For a better understanding of working with a local clone of the kernel repository
and a local bare clone of the kernel, see the
"<ulink url='&YOCTO_DOCS_DEV_URL;#modifying-the-kernel-source-code'>Modifying the Kernel
Source Code</ulink>" section also in the Yocto Project Development Manual.</para>
<para>
In general, however, the Yocto Project maintainers take care of moving the
<filename>SRC_URI</filename>-specified
configuration options to the kernel's <filename>meta</filename> branch.
Not only is it easier for BSP developers to not have to worry about putting those
configurations in the branch, but having the maintainers do it allows them to apply
'global' knowledge about the kinds of common configuration options multiple BSPs in
the tree are typically using.
This allows for promotion of common configurations into common features.
</para>
In general, however, the Yocto Project maintainers take care of moving the
<filename>SRC_URI</filename>-specified
configuration options to the kernel's <filename>meta</filename> branch.
Not only is it easier for BSP developers to not have to worry about putting those
configurations in the branch, but having the maintainers do it allows them to apply
'global' knowledge about the kinds of common configuration options multiple BSPs in
the tree are typically using.
This allows for promotion of common configurations into common features.</para>
</note>
</section>
</section>
<section id='requirements-and-recommendations-for-released-bsps'>
<title>Requirements and Recommendations for Released BSPs</title>
<para>
Certain requirements exist for a released BSP to be considered
compliant with the Yocto Project.
Additionally, a single recommendation also exists.
This section describes the requirements and recommendation for
released BSPs.
</para>
<section id='released-bsp-requirements'>
<title>Released BSP Requirements</title>
<para>
Before looking at BSP requirements, you should consider the following:
<itemizedlist>
<listitem><para>The requirements here assume the BSP layer is a well-formed, "legal"
layer that can be added to the Yocto Project.
For guidelines on creating a layer that meets these base requirements, see the
"<link linkend='bsp-layers'>BSP Layers</link>" and the
"<ulink url='&YOCTO_DOCS_DEV_URL;#understanding-and-creating-layers'>Understanding
and Creating Layers"</ulink> in the Yocto Project Development Manual.</para></listitem>
<listitem><para>The requirements in this section apply regardless of how you
ultimately package a BSP.
You should consult the packaging and distribution guidelines for your
specific release process.
For an example of packaging and distribution requirements, see the
<ulink url='https://wiki.yoctoproject.org/wiki/Third_Party_BSP_Release_Process'>Third
Party BSP Release Process</ulink> wiki page.</para></listitem>
<listitem><para>The requirements for the BSP as it is made available to a developer
are completely independent of the released form of the BSP.
For example, the BSP metadata can be contained within a Git repository
and could have a directory structure completely different from what appears
in the officially released BSP layer.</para></listitem>
<listitem><para>It is not required that specific packages or package
modifications exist in the BSP layer, beyond the requirements for general
compliance with the Yocto Project.
For example, no requirement exists dictating that a specific kernel or
kernel version be used in a given BSP.</para></listitem>
</itemizedlist>
</para>
<para>
Following are the requirements for a released BSP that conforms to the
Yocto Project:
<itemizedlist>
<listitem><para><emphasis>Layer Name:</emphasis>
The BSP must have a layer name that follows the Yocto
Project standards.
For information on BSP layer names, see the
"<link linkend='bsp-layers'>BSP Layers</link>" section.
</para></listitem>
<listitem><para><emphasis>File System Layout:</emphasis>
When possible, use the same directory names in your
BSP layer as listed in the <filename>recipes.txt</filename> file.
In particular, you should place recipes
(<filename>.bb</filename> files) and recipe
modifications (<filename>.bbappend</filename> files) into
<filename>recipes-*</filename> subdirectories by functional area
as outlined in <filename>recipes.txt</filename>.
If you cannot find a category in <filename>recipes.txt</filename>
to fit a particular recipe, you can make up your own
<filename>recipe-*</filename> subdirectory.
You can find <filename>recipes.txt</filename> in the
<filename>meta</filename> directory of the
<ulink url='&YOCTO_DOCS_DEV_URL;#source-directory'>source directory</ulink>,
or in the OpenEmbedded Core Layer
(<filename>openembedded-core</filename>) found at
<ulink url='http://git.openembedded.org/openembedded-core/tree/meta'></ulink>.
</para>
<para>Within any particular <filename>recipes-*</filename> category, the layout
should match what is found in the OpenEmbedded Core
Git repository (<filename>openembedded-core</filename>)
or the source directory (<filename>poky</filename>).
In other words, make sure you place related files in appropriately
related <filename>recipes-*</filename> subdirectories specific to the
recipe's function, or within a subdirectory containing a set of closely-related
recipes.
The recipes themselves should follow the general guidelines
for recipes used in the Yocto Project found in the
<ulink url='https://wiki.yoctoproject.org/wiki/Recipe_%26_Patch_Style_Guide'>Yocto
Recipe and Patch Style Guide</ulink>.</para></listitem>
<listitem><para><emphasis>License File:</emphasis>
You must include a license file in the
<filename>meta-&lt;bsp_name&gt;</filename> directory.
This license covers the BSP metadata as a whole.
You must specify which license to use since there is no
default license if one is not specified.
See the
<ulink url='&YOCTO_GIT_URL;/cgit.cgi/meta-intel/tree/meta-fishriver/COPYING.MIT'><filename>COPYING.MIT</filename></ulink>
file for the Fish River BSP in the <filename>meta-fishriver</filename> BSP layer
as an example.</para></listitem>
<listitem><para><emphasis>README File:</emphasis>
You must include a <filename>README</filename> file in the
<filename>meta-&lt;bsp_name&gt;</filename> directory.
See the
<ulink url='&YOCTO_GIT_URL;/cgit.cgi/meta-intel/tree/meta-fishriver/README'><filename>README</filename></ulink>
file for the Fish River BSP in the <filename>meta-fishriver</filename> BSP layer
as an example.</para>
<para>At a minimum, the <filename>README</filename> file should
contain the following:
<itemizedlist>
<listitem><para>A brief description about the hardware the BSP
targets.</para></listitem>
<listitem><para>A list of all the dependencies a
on which a BSP layer depends.
These dependencies are typically a list of required layers needed
to build the BSP.
However, the dependencies should also contain information regarding
any other dependencies the BSP might have.</para></listitem>
<listitem><para>Any required special licensing information.
For example, this information includes information on
special variables needed to satisfy a EULA,
or instructions on information needed to build or distribute
binaries built from the BSP metadata.</para></listitem>
<listitem><para>The name and contact information for the
BSP layer maintainer.
This is the person to whom patches and questions should
be sent.</para></listitem>
<listitem><para>Instructions on how to build the BSP using the BSP
layer.</para></listitem>
<listitem><para>Instructions on how to boot the BSP build from
the BSP layer.</para></listitem>
<listitem><para>Instructions on how to boot the binary images
contained in the <filename>/binary</filename> directory,
if present.</para></listitem>
<listitem><para>Information on any known bugs or issues that users
should know about when either building or booting the BSP
binaries.</para></listitem>
</itemizedlist></para></listitem>
<listitem><para><emphasis>README.sources File:</emphasis>
You must include a <filename>README.sources</filename> in the
<filename>meta-&lt;bsp_name&gt;</filename> directory.
This file specifies exactly where you can find the sources used to
generate the binary images contained in the
<filename>/binary</filename> directory, if present.
See the
<ulink url='&YOCTO_GIT_URL;/cgit.cgi/meta-intel/tree/meta-fishriver/README.sources'><filename>README.sources</filename></ulink>
file for the Fish River BSP in the <filename>meta-fishriver</filename> BSP layer
as an example.</para></listitem>
<listitem><para><emphasis>Layer Configuration File:</emphasis>
You must include a <filename>conf/layer.conf</filename> in the
<filename>meta-&lt;bsp_name&gt;</filename> directory.
This file identifies the <filename>meta-&lt;bsp_name&gt;</filename>
BSP layer as a layer to the build system.</para></listitem>
<listitem><para><emphasis>Machine Configuration File:</emphasis>
You must include a <filename>conf/machine/&lt;bsp_name&gt;.conf</filename>
in the <filename>meta-&lt;bsp_name&gt;</filename> directory.
This configuration file defines a machine target that can be built
using the BSP layer.
Multiple machine configuration files define variations of machine
configurations that are supported by the BSP.
If a BSP supports more multiple machine variations, you need to
adequately describe each variation in the BSP
<filename>README</filename> file.
Do not use multiple machine configuration files to describe disparate
hardware.
Multiple machine configuration files should describe very similar targets.
If you do have very different targets, you should create a separate
BSP.
<note>It is completely possible for a developer to structure the
working repository as a conglomeration of unrelated BSP
files, and to possibly generate specifically targeted 'release' BSPs
from that directory using scripts or some other mechanism.
Such considerations are outside the scope of this document.</note>
</para></listitem>
</itemizedlist>
</para>
</section>
<section id='released-bsp-recommendations'>
<title>Released BSP Recommendations</title>
<para>
Following are recommendations for a released BSP that conforms to the
Yocto Project:
<itemizedlist>
<listitem><para><emphasis>Bootable Images:</emphasis>
BSP releases
can contain one or more bootable images.
Including bootable images allows users to easily try out the BSP
on their own hardware.</para>
<para>In some cases, it might not be convenient to include a
bootable image.
In this case, you might want to make two versions of the
BSP available: one that contains binary images, and one
that does not.
The version that does not contain bootable images avoids
unnecessary download times for users not interested in the images.
</para>
<para>If you need to distribute a BSP and include bootable images or build kernel and
filesystems meant to allow users to boot the BSP for evaluation
purposes, you should put the images and artifacts within a
<filename>binary/</filename> subdirectory located in the
<filename>meta-&lt;bsp_name&gt;</filename> directory.
<note>If you do include a bootable image as part of the BSP and the image
was built by software covered by the GPL or other open source licenses,
it is your responsibility to understand
and meet all licensing requirements, which could include distribution
of source files.</note></para></listitem>
<listitem><para><emphasis>Use a Yocto Linux Kernel:</emphasis>
Kernel recipes in the BSP should be based on a Yocto Linux kernel.
Basing your recipes on these kernels reduces the costs for maintaining
the BSP and increases its scalability.
See the <filename>Yocto Linux Kernel</filename> category in the
<ulink url='&YOCTO_GIT_URL;/cgit.cgi'><filename>Yocto Source Repositories</filename></ulink>
for these kernels.</para></listitem>
</itemizedlist>
</para>
</section>
</section>
<section id='customizing-a-recipe-for-a-bsp'>
<title>Customizing a Recipe for a BSP</title>
@@ -921,7 +688,7 @@
for a component or components.
For these cases, you are required to accept the terms of a commercial or other
type of license that requires some kind of explicit End User License Agreement (EULA).
Once the license is accepted, the OpenEmbedded build system can then build and
Once the license is accepted, the Yocto Project build system can then build and
include the corresponding component in the final BSP image.
If the BSP is available as a pre-built image, you can download the image after
agreeing to the license or EULA.
@@ -964,12 +731,13 @@
</para>
<para>
A couple different methods exist within the OpenEmbedded build system to
satisfy the licensing requirements for an encumbered BSP.
A couple different methods exist within the Yocto
Project build system to satisfy the licensing
requirements for an encumbered BSP.
The following list describes them in order of preference:
<orderedlist>
<listitem><para><emphasis>Use the <filename>LICENSE_FLAGS</filename> variable
to define the recipes that have commercial or other types of
to define the Yocto Project recipes that have commercial or other types of
specially-licensed packages:</emphasis>
For each of those recipes, you can
specify a matching license string in a
@@ -992,7 +760,7 @@
restart the build to continue where it left off.
During the build, the prompt will not appear again
since you have satisfied the requirement.</para>
<para>Once the appropriate license flags are on the white list
<para>Once the appropriate license flags are whitelisted
in the <filename>LICENSE_FLAGS_WHITELIST</filename> variable, you
can build the encumbered image with no change at all
to the normal build process.</para></listitem>
@@ -1034,7 +802,7 @@
The Yocto Project includes a couple of tools that enable
you to create a <link linkend='bsp-layers'>BSP layer</link>
from scratch and do basic configuration and maintenance
of the kernel without ever looking at a metadata file.
of the kernel without ever looking at a Yocto Project metadata file.
These tools are <filename>yocto-bsp</filename> and <filename>yocto-kernel</filename>,
respectively.
</para>
@@ -1061,7 +829,8 @@
<para>
Both tools reside in the <filename>scripts/</filename> subdirectory
of the <ulink url='&YOCTO_DOCS_DEV_URL;#source-directory'>source directory</ulink>.
of the <ulink url='&YOCTO_DOCS_DEV_URL;#yocto-project-files'>Yocto Project
Files</ulink>.
Consequently, to use the scripts, you must <filename>source</filename> the
environment just as you would when invoking a build:
<literallayout class='monospaced'>
@@ -1113,7 +882,7 @@
[-i &lt;JSON PROPERTY FILE&gt; | --infile &lt;JSON PROPERTY_FILE&gt;]
This command creates a Yocto BSP based on the specified parameters.
The new BSP will be a new BSP layer contained by default within
The new BSP will be a new Yocto BSP layer contained by default within
the top-level directory specified as 'meta-bsp-name'. The -o option
can be used to place the BSP layer in a directory with a different
name and location.
@@ -1150,7 +919,7 @@
...
NOTE: Once created, you should add your new layer to your
bblayers.conf file in order for it to be subsequently seen and
bblayers.conf file in order for it to be subsquently seen and
modified by the yocto-kernel tool.
NOTE for x86- and x86_64-based BSPs: The generated BSP assumes the
@@ -1162,7 +931,7 @@
<para>
Now that you know where these two commands reside and how to access information
on them, you should find it relatively straightforward to discover the commands
necessary to create a BSP and perform basic kernel maintenance on that BSP using
necessary to create a BSP and perform basic kernel maintainence on that BSP using
the tools.
The next sections provide a concrete starting point to expand on a few points that
might not be immediately obvious or that could use further explanation.
@@ -1221,7 +990,7 @@
In every other way, this architecture is representative of how creating a BSP for
a 'real' machine would work.
The reason the example uses this architecture is because it is an emulated architecture
and can easily be followed without requiring actual hardware.
and can easily be followed without requireing actual hardware.
</para>
<para>
@@ -1290,7 +1059,7 @@
If you enter 'n', the script prompts you to further enter the kernel
you do want to use (e.g. 3.0, 3.2_preempt-rt, etc.).</para></listitem>
<listitem><para>Next, the script asks whether you would like to have a new
branch created especially for your BSP in the local
branch created especially for your BSPin the local
<ulink url='&YOCTO_DOCS_DEV_URL;#local-kernel-files'>Linux Yocto Kernel</ulink>
Git repository .
If not, then the script re-uses an existing branch.</para>
@@ -1319,8 +1088,8 @@
<listitem><para>The remainder of the prompts are routine.
Defaults are accepted for each.</para></listitem>
<listitem><para>By default, the script creates the new BSP Layer in the
<ulink url='&YOCTO_DOCS_DEV_URL;#build-directory'>build directory</ulink>.
</para></listitem>
<ulink url='&YOCTO_DOCS_DEV_URL;#yocto-project-build-directory'>Yocto Project
Build Directory</ulink>.</para></listitem>
</orderedlist>
</para>
@@ -1345,7 +1114,8 @@
<title>Managing Kernel Patches and Config Items with yocto-kernel</title>
<para>
Assuming you have created a <link linkend='bsp-layers'>BSP Layer</link> using
Assuming you have created a Yocto Project
<link linkend='bsp-layers'>BSP Layer</link> using
<link linkend='creating-a-new-bsp-layer-using-the-yocto-bsp-script'>
<filename>yocto-bsp</filename></link> and you added it to your
<ulink url='&YOCTO_DOCS_REF_URL;#var-BBLAYERS'><filename>BBLAYERS</filename></ulink>
@@ -1356,7 +1126,7 @@
<para>
The <filename>yocto-kernel</filename> script allows you to add, remove, and list patches
and kernel config settings to a BSP's kernel
and kernel config settings to a Yocto Project BSP's kernel
<filename>.bbappend</filename> file.
All you need to do is use the appropriate sub-command.
Recall that the easiest way to see exactly what sub-commands are available

Binary file not shown.

Before

Width:  |  Height:  |  Size: 17 KiB

After

Width:  |  Height:  |  Size: 17 KiB

View File

@@ -110,7 +110,7 @@ h5 {
h6 {
margin: 1em 0em 0em 0em;
padding: 1em 0em 0em 0em;
font-size: 110%;
font-size: 80%;
font-weight: bold;
}

View File

@@ -23,20 +23,19 @@
</para>
<section id='getting-local-yocto-project-files-and-bsp-files'>
<title>Getting Local Source Files and BSP Files</title>
<title>Getting Local Yocto Project Files and BSP Files</title>
<para>
You need to have the <link linkend='source-directory'>source directory</link>
available on your host system.
You can set up this directory through tarball extraction or by cloning the
<filename>poky</filename> Git repository.
You need to have the Yocto Project files available on your host system.
You can get files through tarball extraction or by cloning the <filename>poky</filename>
Git repository.
The following paragraphs describe both methods.
For additional information, see the bulleted item
"<link linkend='local-yp-release'>Yocto Project Release</link>".
</para>
<para>
As mentioned, one way to set up the source directory is to use Git to clone the
As mentioned, one way to get the Yocto Project files is to use Git to clone the
<filename>poky</filename> repository.
These commands create a local copy of the Git repository.
By default, the top-level directory of the repository is named <filename>poky</filename>:
@@ -44,10 +43,10 @@
$ git clone git://git.yoctoproject.org/poky
$ cd poky
</literallayout>
Alternatively, you can start with the downloaded Poky "&DISTRO_NAME;" tarball.
These commands unpack the tarball into a source directory structure.
By default, the top-level directory of the source directory is named
<filename>&YOCTO_POKY;</filename>:
Alternatively, you can start with the downloaded Poky "edison" tarball.
These commands unpack the tarball into a Yocto Project File directory structure.
By default, the top-level directory of the file structure is named
<filename>poky-&YOCTO_POKY;</filename>:
<literallayout class='monospaced'>
$ tar xfj &YOCTO_POKY_TARBALL;
$ cd &YOCTO_POKY;
@@ -55,19 +54,20 @@
<note><para>If you're using the tarball method, you can ignore all the following steps that
ask you to carry out Git operations.
You already have the results of those operations
in the form of the &DISTRO_NAME; release tarballs.
in the form of the edison release tarballs.
Consequently, there is nothing left to do other than extract those tarballs into the
proper locations.</para>
<para>Once you expand the released tarball, you have a snapshot of the Git repository
that represents a specific release.
Fundamentally, this is different than having a local copy of the Poky Git repository.
Fundamentally, this is different than having a local copy of the Yocto Project
Git repository.
Given the tarball method, changes you make are building on top of a release.
With the Git repository method you have the ability to track development
and keep changes in revision control.
See the
"<link linkend='repositories-tags-and-branches'>Repositories, Tags, and Branches</link>" section
for more discussion around these differences.</para></note>
for more discussion around these differneces.</para></note>
</para>
<para>
@@ -133,12 +133,11 @@
<para>
You need to have the base BSP layer on your development system.
Similar to the local <link linkend='source-directory'>source directory</link>,
you can get the BSP
Similar to the local Yocto Project files, you can get the BSP
layer in a couple of different ways:
download the BSP tarball and extract it, or set up a local Git repository that
has the BSP layers.
You should use the same method that you used to set up the source directory earlier.
has the Yocto Project BSP layers.
You should use the same method that you used to get the local Yocto Project files earlier.
See "<link linkend='getting-setup'>Getting Setup</link>" for information on how to get
the BSP files.
</para>
@@ -161,15 +160,15 @@
$ cd meta-intel
</literallayout>
Alternatively, you can start with the downloaded Crown Bay tarball.
You can download the &DISTRO_NAME; version of the BSP tarball from the
You can download the edison version of the BSP tarball from the
<ulink url='&YOCTO_HOME_URL;/download'>Download</ulink> page of the
Yocto Project website.
Here is the specific link for the tarball needed for this example:
<ulink url='&YOCTO_MACHINES_DL_URL;/crownbay-noemgd/crownbay-noemgd-&DISTRO_NAME;-&POKYVERSION;.tar.bz2'></ulink>.
<ulink url='&YOCTO_MACHINES_DL_URL;/crownbay-noemgd/crownbay-noemgd-&DISTRO_NAME;-6.0.0.tar.bz2'></ulink>.
Again, be sure that you are already in the <filename>poky</filename> directory
as described previously before installing the tarball:
<literallayout class='monospaced'>
$ tar xfj crownbay-noemgd-&DISTRO_NAME;-&POKYVERSION;.tar.bz2
$ tar xfj crownbay-noemgd-&DISTRO_NAME;-6.0.0.tar.bz2
$ cd meta-intel
</literallayout>
</para>
@@ -178,7 +177,7 @@
The <filename>meta-intel</filename> directory contains all the metadata
that supports BSP creation.
If you're using the Git method, the following
step will switch to the &DISTRO_NAME; metadata.
step will switch to the edison metadata.
If you're using the tarball method, you already have the correct metadata and can
skip to the next step.
Because <filename>meta-intel</filename> is its own Git repository, you will want
@@ -186,7 +185,6 @@
For this example we are going to use the <filename>&DISTRO_NAME;</filename> branch.
<literallayout class='monospaced'>
$ git checkout -b &DISTRO_NAME; origin/&DISTRO_NAME;
Branch &DISTRO_NAME; set up to track remote branch &DISTRO_NAME; from origin.
Switched to a new branch '&DISTRO_NAME;'
</literallayout>
</para>
@@ -196,8 +194,8 @@
<title>Making a Copy of the Base BSP to Create Your New BSP Layer</title>
<para>
Now that you have set up the source directory and included the base BSP files, you need to
create a new layer for your BSP.
Now that you have the local Yocto Project files and the base BSP files, you need to create a
new layer for your BSP.
To create your BSP layer, you simply copy the <filename>meta-crownbay</filename>
layer to a new layer.
</para>
@@ -207,7 +205,7 @@
The name should follow the BSP layer naming convention, which is
<filename>meta-&lt;name&gt;</filename>.
The following assumes your working directory is <filename>meta-intel</filename>
inside your source directory.
inside the local Yocto Project files.
To start your new layer, just copy the new layer alongside the existing
BSP layers in the <filename>meta-intel</filename> directory:
<literallayout class='monospaced'>
@@ -239,7 +237,7 @@
First, since in this example the new BSP will not support EMGD, we will get rid of the
<filename>crownbay.conf</filename> file and then rename the
<filename>crownbay-noemgd.conf</filename> file to <filename>mymachine.conf</filename>.
Much of what we do in the configuration directory is designed to help the OpenEmbedded
Much of what we do in the configuration directory is designed to help the Yocto Project
build system work with the new layer and to be able to find and use the right software.
The following two commands result in a single machine configuration file named
<filename>mymachine.conf</filename>.
@@ -263,10 +261,10 @@
<para>
Note that inside the <filename>mymachine.conf</filename> is the
<filename>PREFERRED_VERSION_linux-yocto</filename> statement.
<filename>PREFERRED_PROVIDER_virtual/kernel</filename> statement.
This statement identifies the kernel that the BSP is going to use.
In this case, the BSP is using <filename>linux-yocto</filename>, which is the
current Yocto Project kernel based on the Linux 3.2 release.
current Linux Yocto kernel based on the Linux 3.0 release.
</para>
<para>
@@ -285,8 +283,6 @@
BBFILE_COLLECTIONS += "crownbay"
BBFILE_PATTERN_crownbay := "^${LAYERDIR}/"
BBFILE_PRIORITY_crownbay = "6"
LAYERDEPENDS_crownbay = "intel"
</literallayout>
</para>
@@ -297,8 +293,6 @@
BBFILE_COLLECTIONS += "mymachine"
BBFILE_PATTERN_mymachine := "^${LAYERDIR}/"
BBFILE_PRIORITY_mymachine = "6"
LAYERDEPENDS_mymachine = "intel"
</literallayout>
</para>
</section>
@@ -312,7 +306,7 @@
When you create a BSP, you use these areas for appropriate recipes and append files.
Recipes take the form of <filename>.bb</filename> files, while append files take
the form of <filename>.bbappend</filename> files.
If you want to leverage the existing recipes the OpenEmbedded build system uses
If you want to leverage the existing recipes the Yocto Project build system uses
but change those recipes, you can use <filename>.bbappend</filename> files.
All new recipes and append files for your layer must go in the layers
<filename>recipes-bsp</filename>, <filename>recipes-kernel</filename>,
@@ -365,7 +359,7 @@
Now let's look at changes in <filename>recipes-core</filename>.
The file <filename>task-core-tools.bbappend</filename> in
<filename>recipes-core/tasks</filename> appends the similarly named recipe
located in the <link linkend='source-directory'>source directory</link> at
located in the local <link linkend='yocto-project-files'>Yocto Project Files</link> at
<filename>meta/recipes-core/tasks</filename>.
The append file in our layer right now is Crown Bay-specific and supports
EMGD and non-EMGD.
@@ -395,9 +389,9 @@
Recall that the BSP uses the <filename>linux-yocto</filename> kernel as determined
earlier in the <filename>mymachine.conf</filename>.
The recipe for that kernel is not located in the
BSP layer but rather in the source directory at
BSP layer but rather in the local Yocto Project files at
<filename>meta/recipes-kernel/linux</filename> and is
named <filename>linux-yocto_3.2.bb</filename>.
named <filename>linux-yocto_3.0.bb</filename>.
The <filename>SRCREV_machine</filename> and <filename>SRCREV_meta</filename>
statements point to the exact commits used by the Yocto Project development team
in their source repositories that identify the right kernel for our hardware.
@@ -410,7 +404,7 @@
<para>
However, in the <filename>meta-mymachine</filename> layer in
<filename>recipes-kernel/linux</filename> resides a <filename>.bbappend</filename>
file named <filename>linux-yocto_3.2.bbappend</filename> that
file named <filename>linux-yocto_3.0.bbappend</filename> that
appends information to the recipe of the same name in <filename>meta/recipes-kernel/linux</filename>.
Thus, the <filename>SRCREV</filename> statements in the append file override
the more general statements found in <filename>meta</filename>.
@@ -419,20 +413,17 @@
<para>
The <filename>SRCREV</filename> statements in the append file currently identify
the kernel that supports the Crown Bay BSP with and without EMGD support.
Here are the statements:
<note>The commit ID strings used in this manual might not match the actual commit
ID strings found in the <filename>linux-yocto_3.2.bbappend</filename> file.
For the example, this difference does not matter.</note>
Here are the statements:
<literallayout class='monospaced'>
SRCREV_machine_pn-linux-yocto_crownbay ?= \
"211fc7f4d10ec2b82b424286aabbaff9254b7cbd"
"2247da9131ea7e46ed4766a69bb1353dba22f873"
SRCREV_meta_pn-linux-yocto_crownbay ?= \
"514847185c78c07f52e02750fbe0a03ca3a31d8f"
"d05450e4aef02c1b7137398ab3a9f8f96da74f52"
SRCREV_machine_pn-linux-yocto_crownbay-noemgd ?= \
"211fc7f4d10ec2b82b424286aabbaff9254b7cbd"
"2247da9131ea7e46ed4766a69bb1353dba22f873"
SRCREV_meta_pn-linux-yocto_crownbay-noemgd ?= \
"514847185c78c07f52e02750fbe0a03ca3a31d8f"
"d05450e4aef02c1b7137398ab3a9f8f96da74f52"
</literallayout>
</para>
@@ -447,19 +438,19 @@
</para>
<para>
To fix this situation in <filename>linux-yocto_3.2.bbappend</filename>,
To fix this situation in <filename>linux-yocto_3.0.bbappend</filename>,
we delete the two <filename>SRCREV</filename> statements that support
EMGD (the top pair).
We also change the remaining pair to specify <filename>mymachine</filename>
and insert the commit identifiers to identify the kernel in which we
are interested, which will be based on the <filename>atom-pc-standard</filename>
kernel.
In this case, because we're working with the &DISTRO_NAME; branch of everything, we
In this case, because we're working with the edison branch of everything, we
need to use the <filename>SRCREV</filename> values for the atom-pc branch
that are associated with the &DISTRO_NAME; release.
that are associated with the edison release.
To find those values, we need to find the <filename>SRCREV</filename>
values that &DISTRO_NAME; uses for the atom-pc branch, which we find in the
<filename>poky/meta-yocto/recipes-kernel/linux/linux-yocto_3.2.bbappend</filename>
values that edison uses for the atom-pc branch, which we find in the
<filename>poky/meta-yocto/recipes-kernel/linux/linux-yocto_3.0.bbappend</filename>
file.
</para>
@@ -468,21 +459,23 @@
<filename>SRCREV_machine_atom-pc</filename> variable.
The meta <filename>SRCREV</filename> isn't specified in this file, so it must be
specified in the base kernel recipe in the
<filename>poky/meta/recipes-kernel/linux/linux-yocto_3.2.bb</filename>
file, in the <filename>SRCREV_meta</filename> variable found there.
<filename>poky/meta/recipes-kernel/linux/linux-yocto_3.0.bb</filename>
file, in the <filename>SRCREV_meta variable</filename> found there.
It happens to be the same as the value we already inherited from the
<filename>meta-crownbay</filename> BSP.
Here are the final <filename>SRCREV</filename> statements:
<literallayout class='monospaced'>
SRCREV_machine_pn-linux-yocto_mymachine ?= \
"f29531a41df15d74be5ad47d958e4117ca9e489e"
SRCREV_meta_pn-linux-yocto_mymachine ?= \
"b14a08f5c7b469a5077c10942f4e1aec171faa9d"
SRCREV_machine_pn-linux-yocto_mymachine ?= \
"1e18e44adbe79b846e382370eb29bc4b8cd5a1a0"
SRCREV_meta_pn-linux-yocto_mymachine ?= \
"d05450e4aef02c1b7137398ab3a9f8f96da74f52"
</literallayout>
</para>
<para>
In this example, we're using the <filename>SRCREV</filename> values we
found already captured in the &DISTRO_NAME; release because we're creating a BSP based on
&DISTRO_NAME;.
found already captured in the edison release because we're creating a BSP based on
edison.
If, instead, we had based our BSP on the master branches, we would want to use
the most recent <filename>SRCREV</filename> values taken directly from the kernel repo.
We will not be doing that for this example.
@@ -491,19 +484,19 @@
exact commit strings in the Yocto Project source repositories you need to change
the <filename>SRCREV</filename> statements.
You can find all the <filename>machine</filename> and <filename>meta</filename>
branch points (commits) for the <filename>linux-yocto-3.2</filename> kernel at
<ulink url='&YOCTO_GIT_URL;/cgit/cgit.cgi/linux-yocto-3.2'></ulink>.
branch points (commits) for the <filename>linux-yocto-3.0</filename> kernel at
<ulink url='&YOCTO_GIT_URL;/cgit/cgit.cgi/linux-yocto-3.0'></ulink>.
</para>
<para>
If you need a little more assistance after going to the link then do the following:
<orderedlist>
<listitem><para>Expand the list of branches by clicking <filename>[…]</filename></para></listitem>
<listitem><para>Click on the <filename>standard/default/common-pc/atom-pc</filename>
<listitem><para>Click on the <filename>yocto/standard/common-pc/atom-pc</filename>
branch</para></listitem>
<listitem><para>Click on the commit column header to view the top commit</para></listitem>
<listitem><para>Copy the commit string for use in the
<filename>linux-yocto_3.2.bbappend</filename> file</para></listitem>
<filename>linux-yocto_3.0.bbappend</filename> file</para></listitem>
</orderedlist>
</para>
@@ -514,36 +507,32 @@
</para>
<para>
Also in the <filename>linux-yocto_3.2.bbappend</filename> file are
<ulink url='&YOCTO_DOCS_REF_URL;#var-COMPATIBLE_MACHINE'><filename>COMPATIBLE_MACHINE</filename></ulink>,
<ulink url='&YOCTO_DOCS_REF_URL;#var-KMACHINE'><filename>KMACHINE</filename></ulink>,
and
<ulink url='&YOCTO_DOCS_REF_URL;#var-KBRANCH'><filename>KBRANCH</filename></ulink> statements.
Also in the <filename>linux-yocto_3.0.bbappend</filename> file are
<filename>COMPATIBLE_MACHINE</filename>, <filename>KMACHINE</filename>,
and <filename>KERNEL_FEATURES</filename> statements.
Two sets of these exist: one set supports EMGD and one set does not.
Because we are not interested in supporting EMGD those three can be deleted.
The remaining three must be changed so that <filename>mymachine</filename> replaces
<filename>crownbay-noemgd</filename> and <filename>crownbay</filename>.
Because we are using the <filename>atom-pc</filename> branch for this new BSP, we can also find
the exact branch we need for the <filename>KMACHINE</filename>
and <filename>KBRANCH</filename> variables in our new BSP from the value
Because we are using the atom-pc branch for this new BSP, we can also find
the exact branch we need for the KMACHINE variable in our new BSP from the value
we find in the
<filename>poky/meta-yocto/recipes-kernel/linux/linux-yocto_3.2.bbappend</filename>
<filename>poky/meta-yocto/recipes-kernel/linux/linux-yocto_3.0.bbappend</filename>
file we looked at in a previous step.
In this case, the values we want are in the <filename>KMACHINE_atom-pc</filename> variable
and the <filename>KBRANCH_atom-pc</filename> variables in that file.
Here is the final <filename>linux-yocto_3.2.bbappend</filename> file after all
In this case, the value we want is in the KMACHINE_atom-pc variable in that file.
Here is the final <filename>linux-yocto_3.0.bbappend</filename> file after all
the edits:
<literallayout class='monospaced'>
FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
COMPATIBLE_MACHINE_mymachine = "mymachine"
KMACHINE_mymachine = "atom-pc"
KBRANCH_mymachine = "standard/default/common-pc/atom-pc"
KMACHINE_mymachine = "yocto/standard/common-pc/atom-pc"
KERNEL_FEATURES_append_mymachine += " cfg/smp.scc"
SRCREV_machine_pn-linux-yocto_mymachine ?= \
"f29531a41df15d74be5ad47d958e4117ca9e489e"
"1e18e44adbe79b846e382370eb29bc4b8cd5a1a0"
SRCREV_meta_pn-linux-yocto_mymachine ?= \
"b14a08f5c7b469a5077c10942f4e1aec171faa9d"
"d05450e4aef02c1b7137398ab3a9f8f96da74f52"
</literallayout>
</para>
</section>
@@ -578,14 +567,15 @@
<orderedlist>
<listitem><para>Get the environment ready for the build by sourcing the environment
script.
The environment script is in the top-level of the source directory.
The environment script is in the top-level of the local Yocto Project files
directory structure.
The script has the string
<filename>init-build-env</filename> in the files name.
For this example, the following command gets the build environment ready:
<literallayout class='monospaced'>
$ source oe-init-build-env yocto-build
</literallayout>
When you source the script, a build directory is created in the current
When you source the script a build directory is created in the current
working directory.
In our example we were in the <filename>poky</filename> directory.
Thus, entering the previous command created the <filename>yocto-build</filename> directory.
@@ -610,21 +600,21 @@
both the <filename>BB_NUMBER_THREADS</filename> and <filename>PARALLEL_MAKE</filename>
variables to twice the number of cores your system supports.</para></listitem>
<listitem><para>Update the <filename>bblayers.conf</filename> file so that it includes
both the path to your new BSP layer and the path to the
<filename>meta-intel</filename> layer.
In this example, you need to include both these paths as part of the
<filename>BBLAYERS</filename> variable:
the path to your new BSP layer.
In this example you need to include the pathname to <filename>meta-mymachine</filename>.
For this example the
<filename>BBLAYERS</filename> variable in the file would need to include the following path:
<literallayout class='monospaced'>
$HOME/poky/meta-intel
$HOME/poky/meta-intel/meta-mymachine
</literallayout></para></listitem>
</orderedlist>
</para>
<para>
The
<ulink url='&YOCTO_DOCS_REF_URL;#ref-variables-glos'>Variables Glossary</ulink> chapter in the
Yocto Project Reference Manual has more information on configuration variables.
The appendix
<ulink url='&YOCTO_DOCS_REF_URL;#ref-variables-glos'>
Reference: Variables Glossary</ulink> in the Yocto Project Reference Manual has more information
on configuration variables.
</para>
</section>
@@ -692,12 +682,12 @@
</para>
<para>
For reference, the sato image produced by the previous steps for &DISTRO_NAME;
For reference, the sato image produced by the previous steps for edison
should look like the following in terms of size.
If your sato image is much different from this,
you probably made a mistake in one of the above steps:
<literallayout class='monospaced'>
260538368 2012-04-27 01:44 core-image-sato-mymachine-20120427025051.hddimg
358715392 2011-11-01 19:11 core-image-sato-mymachine-20111101223904.hddimg
</literallayout>
<note>The previous instructions are also present in the README that was copied
from meta-crownbay, which should also be updated to reflect the specifics of your

File diff suppressed because it is too large Load Diff

View File

@@ -18,13 +18,12 @@
sources where you can find more detail.
For example, detailed information on Git, repositories and open source in general
can be found in many places.
Another example is how to get set up to use the Yocto Project, which our Yocto Project
Quick Start covers.
Another example is how to get set up to use the Yocto Project, which our Yocto Project Quick Start covers.
</para>
<para>
The Yocto Project Development Manual, however, does provide detailed examples on how to create a
Board Support Package (BSP), change the kernel source code, and reconfigure the kernel.
Board Support Package (BSP), change the kernel source code, and re-configure the kernel.
You can find this information in the appendices of the manual.
</para>
</section>
@@ -45,7 +44,13 @@
applications.</para></listitem>
<listitem><para>An overview and understanding of the emulation environment used with
the Yocto Project (QEMU).</para></listitem>
<listitem><para>An understanding of basic kernel architecture and concepts.</para></listitem>
<!-- <listitem><para>A discussion of target-level analysis techniques, tools, tips,
and tricks.</para></listitem>
<listitem><para>Considerations for deploying your final product.</para></listitem> -->
<listitem><para>An understanding of basic kernel architecture and
concepts.</para></listitem>
<!-- <listitem><para>Information that will help you migrate an existing project to the
Yocto Project development environment.</para></listitem> -->
<listitem><para>Many references to other sources of related information.</para></listitem>
</itemizedlist>
</para>
@@ -59,13 +64,14 @@
<itemizedlist>
<listitem><para>Step-by-step instructions if those instructions exist in other Yocto
Project documentation.
For example, the Yocto Project Development Manual contains detailed
For example, the Application Development Toolkit (ADT) Users Guide contains detailed
instruction on how to obtain and configure the
<trademark class='trade'>Eclipse</trademark> Yocto Plug-in.</para></listitem>
<listitem><para>Reference material.
This type of material resides in an appropriate reference manual.
For example, system variables are documented in the
<ulink url='&YOCTO_DOCS_REF_URL;'>Yocto Project Reference Manual</ulink>.</para></listitem>
<ulink url='&YOCTO_DOCS_REF_URL;'>
Yocto Project Reference Manual</ulink>.</para></listitem>
<listitem><para>Detailed public information that is not specific to the Yocto Project.
For example, exhaustive information on how to use Git is covered better through the
Internet than in this manual.</para></listitem>
@@ -85,29 +91,33 @@
</emphasis> The home page for the Yocto Project provides lots of information on the project
as well as links to software and documentation.</para></listitem>
<listitem><para><emphasis>
<ulink url='&YOCTO_DOCS_QS_URL;'>Yocto Project Quick Start</ulink>:</emphasis> This short document lets you get started
<ulink url='&YOCTO_DOCS_QS_URL;'>
The Yocto Project Quick Start</ulink>:</emphasis> This short document lets you get started
with the Yocto Project quickly and start building an image.</para></listitem>
<listitem><para><emphasis>
<ulink url='&YOCTO_DOCS_REF_URL;'>Yocto Project Reference Manual</ulink>:</emphasis> This manual is a reference
guide to the OpenEmbedded build system known as "Poky."
<ulink url='&YOCTO_DOCS_REF_URL;'>
The Yocto Project Reference Manual</ulink>:</emphasis> This manual is a reference
guide to the Yocto Project build component known as "Poky."
The manual also contains a reference chapter on Board Support Package (BSP)
layout.</para></listitem>
<listitem><para><emphasis>
<ulink url='&YOCTO_DOCS_ADT_URL;'>Yocto Project Application Developer's Guide</ulink>:</emphasis>
This guide provides information that lets you get going with the Application
Development Toolkit (ADT) and stand-alone cross-development toolchains to
<ulink url='&YOCTO_DOCS_ADT_URL;'>
The Yocto Project Application Development Toolkit (ADT) User's Guide</ulink>:</emphasis>
This guide provides information that lets you get going with the ADT to
develop projects using the Yocto Project.</para></listitem>
<listitem><para><emphasis>
<ulink url='&YOCTO_DOCS_BSP_URL;'>Yocto Project Board Support Package (BSP) Developer's Guide</ulink>:</emphasis>
<ulink url='&YOCTO_DOCS_BSP_URL;'>
The Yocto Project Board Support Package (BSP) Developer's Guide</ulink>:</emphasis>
This guide defines the structure for BSP components.
Having a commonly understood structure encourages standardization.</para></listitem>
<listitem><para><emphasis>
<ulink url='&YOCTO_DOCS_KERNEL_URL;'>Yocto Project Kernel Architecture and Use Manual</ulink>:</emphasis>
<ulink url='&YOCTO_DOCS_KERNEL_URL;'>
The Yocto Project Kernel Architecture and Use Manual</ulink>:</emphasis>
This manual describes the architecture of the Yocto Project kernel and provides
some work flow examples.</para></listitem>
<listitem><para><emphasis>
<ulink url='http://www.youtube.com/watch?v=3ZlOu-gLsh0'>
Eclipse IDE Yocto Plug-in</ulink>:</emphasis> A step-by-step instructional video that
Yocto Eclipse Plug-in</ulink>:</emphasis> A step-by-step instructional video that
demonstrates how an application developer uses Yocto Plug-in features within
the Eclipse IDE.</para></listitem>
<listitem><para><emphasis>
@@ -117,17 +127,6 @@
<ulink url='&YOCTO_HOME_URL;/download/yocto/yocto-project-1.1-release-notes-poky-&POKYVERSION;'>
Release Notes</ulink>:</emphasis> Features, updates and known issues for the current
release of the Yocto Project.</para></listitem>
<listitem><para><emphasis>
<ulink url='&YOCTO_HOME_URL;/projects/hob'>
Hob</ulink>:</emphasis> A graphical user interface for BitBake.
Hob's primary goal is to enable a user to perform common tasks more easily.</para></listitem>
<listitem><para><emphasis>
<ulink url='&YOCTO_HOME_URL;/documentation/build-appliance'>
Build Appliance</ulink>:</emphasis> A bootable custom embedded Linux image you can
either build using a non-Linux development system (VMware applications) or download
from the Yocto Project website.
See the <ulink url='&YOCTO_HOME_URL;/documentation/build-appliance'>Build Appliance</ulink>
page for more information.</para></listitem>
<listitem><para><emphasis>
<ulink url='&YOCTO_BUGZILLA_URL;'>Bugzilla</ulink>:</emphasis>
The bug tracking application the Yocto Project uses.
@@ -142,36 +141,36 @@
<listitem><para><ulink url='&YOCTO_LISTS_URL;/listinfo/poky'></ulink> for a
Yocto Project Discussions mailing list about the Poky build system.</para></listitem>
<listitem><para><ulink url='&YOCTO_LISTS_URL;/listinfo/yocto-announce'></ulink>
for a mailing list to receive official Yocto Project announcements for developments and
for a mailing list to receive offical Yocto Project announcements for developments and
as well as Yocto Project milestones.</para></listitem>
</itemizedlist></para></listitem>
<listitem><para><emphasis>Internet Relay Chat (IRC):</emphasis>
Two IRC channels on freenode are available
for Yocto Project and Poky discussions: <filename>#yocto</filename> and
<filename>#poky</filename>, respectively.</para></listitem>
<filename>#poky</filename>.</para></listitem>
<listitem><para><emphasis>
<ulink url='&OH_HOME_URL;'>OpenedHand</ulink>:</emphasis>
The company that initially developed the Poky project, which is the basis
for the OpenEmbedded build system used by the Yocto Project.
OpenedHand was acquired by Intel Corporation in 2008.</para></listitem>
The company where the Yocto Project build system Poky was first developed.
OpenedHand has since been acquired by Intel Corporation.</para></listitem>
<listitem><para><emphasis>
<ulink url='http://www.intel.com/'>Intel Corporation</ulink>:</emphasis>
A multinational semiconductor chip manufacturer company whose Software and
Services Group created and supports the Yocto Project.
Intel acquired OpenedHand in 2008.</para></listitem>
The company that acquired OpenedHand in 2008 and continues development on the
Yocto Project.</para></listitem>
<listitem><para><emphasis>
<ulink url='&OE_HOME_URL;'>OpenEmbedded</ulink>:</emphasis>
The build system used by the Yocto Project.
This project is the upstream, generic, embedded distribution from which the Yocto
Project derives its build system (Poky) from and to which it contributes.</para></listitem>
The upstream, generic, embedded distribution the Yocto Project build system (Poky) derives
from and to which it contributes.</para></listitem>
<listitem><para><emphasis>
<ulink url='http://developer.berlios.de/projects/bitbake/'>
BitBake</ulink>:</emphasis> The tool used by the OpenEmbedded build system
to process project metadata.</para></listitem>
BitBake</ulink>:</emphasis> The tool used to process Yocto Project metadata.</para></listitem>
<listitem><para><emphasis>
<ulink url='&BITBAKE_DOCS_URL;'>
<ulink url='http://bitbake.berlios.de/manual/'>
BitBake User Manual</ulink>:</emphasis> A comprehensive guide to the BitBake tool.
</para></listitem>
<listitem><para><emphasis>
<ulink url='http://pimlico-project.org/'>Pimlico</ulink>:</emphasis>
A suite of lightweight Personal Information Management (PIM) applications designed
primarily for handheld and mobile devices.</para></listitem>
<listitem><para><emphasis>
<ulink url='http://wiki.qemu.org/Index.html'>QEMU</ulink>:
</emphasis> An open-source machine emulator and virtualizer.</para></listitem>

View File

@@ -12,6 +12,17 @@
or even altering the source code itself.
This appendix presents simple examples that modify the kernel source code,
change the kernel configuration, and add a kernel source recipe.
<!-- [WRITER'S NOTE: I might want to work in information about applying a local
change to a kernel layer and also pushing a change upstream into the tree]
<orderedlist>
<listitem><para>Iteratively determine and set kernel configurations and make
kernel recipe changes.</para></listitem>
<listitem><para>Apply your configuration changes to your local kernel layer.
</para></listitem>
<listitem><para>Push your configuration and recipe changes upstream into the
Yocto Project source repositories to make them available to the community.
</para></listitem>
</orderedlist> -->
</para>
<section id='modifying-the-kernel-source-code'>
@@ -34,17 +45,18 @@
Briefly, you need the following:
<itemizedlist>
<listitem><para>A local
<link linkend='source-directory'>source directory</link> for the
poky Git repository</para></listitem>
<listitem><para>Local copies of the
<link linkend='yocto-project-files'>Yocto Project Files</link>
Git repository</para></listitem>
<listitem><para>The
<link linkend='poky-extras-repo'><filename>poky-extras</filename></link>
Git repository placed within the source directory.</para></listitem>
Git repository placed within the local Yocto Project files Git
repository</para></listitem>
<listitem><para>A bare clone of the
<link linkend='local-kernel-files'>Yocto Project Kernel</link> upstream Git
<link linkend='local-kernel-files'>Linux Yocto Kernel</link> upstream Git
repository to which you want to push your modifications.
</para></listitem>
<listitem><para>A copy of that bare clone in which you make your source
modifications</para></listitem>
modifcations</para></listitem>
</itemizedlist>
</para>
@@ -59,38 +71,37 @@
</para>
<para>
<imagedata fileref="figures/kernel-example-repos-denzil.png" width="7in" depth="5in"
<imagedata fileref="figures/kernel-example-repos.png" width="7in" depth="5in"
align="center" scale="100" />
</para>
<para>
Here is a brief description of the four areas:
<itemizedlist>
<listitem><para><emphasis>Local Source Directory:</emphasis>
This area contains all the metadata that supports building images
using the OpenEmbedded build system.
In this example, the source directory also
<listitem><para><emphasis>Local Yocto Project Files Git Repository:</emphasis>
This area contains all the metadata that supports building images in the
Yocto Project build environment - the local Yocto Project files.
In this example, the local Yocto Project files Git repository also
contains the build directory, which contains the configuration directory
that lets you control the build.
Also in this example, the source directory contains local copies of the
In this example, the repository also contains the
<filename>poky-extras</filename> Git repository.</para>
<para>See the bulleted item
"<link linkend='local-yp-release'>Yocto Project Release</link>"
for information on how to get these files on your local system.</para></listitem>
<listitem><para><emphasis>Local copies of the<filename>poky-extras</filename>
Git Repository:</emphasis>
for information on how to get these files.</para></listitem>
<listitem><para><emphasis><filename>poky-extras</filename> Git Repository:</emphasis>
This area contains the <filename>meta-kernel-dev</filename> layer,
which is where you make changes that append the kernel build recipes.
You edit <filename>.bbappend</filename> files to locate your
local kernel source files and to identify the kernel being built.
This Git repository is a gathering place for extensions to the Yocto Project
This Git repository is a gathering place for extensions to the Linux Yocto
(or really any) kernel recipes that faciliate the creation and development
of kernel features, BSPs or configurations.</para>
<para>See the bulleted item
"<link linkend='poky-extras-repo'>The
<filename>poky-extras</filename> Git Repository</link>"
for information on how to get these files.</para></listitem>
<listitem><para><emphasis>Bare Clone of the Yocto Project kernel:</emphasis>
<listitem><para><emphasis>Bare Clone of the Linux Yocto kernel:</emphasis>
This bare Git repository tracks the upstream Git repository of the Linux
Yocto kernel source code you are changing.
When you modify the kernel you must work through a bare clone.
@@ -100,15 +111,15 @@
<filename>poky-extras</filename> repository points to the bare clone
so that the build process can locate the locally changed source files.</para>
<para>See the bulleted item
"<link linkend='local-kernel-files'>Yocto Project Kernel</link>"
"<link linkend='local-kernel-files'>Linux Yocto Kernel</link>"
for information on how to set up the bare clone.
</para></listitem>
<listitem><para><emphasis>Copy of the Yocto Project Kernel Bare Clone:</emphasis>
<listitem><para><emphasis>Copy of the Linux Yocto Kernel Bare Clone:</emphasis>
This Git repository contains the actual source files that you modify.
Any changes you make to files in this location need to ultimately be pushed
to the bare clone using the <filename>git push</filename> command.</para>
<para>See the bulleted item
"<link linkend='local-kernel-files'>Yocto Project Kernel</link>"
"<link linkend='local-kernel-files'>Linux Yocto Kernel</link>"
for information on how to set up the bare clone.
<note>Typically, Git workflows follow a scheme where changes made to a local area
are pulled into a Git repository.
@@ -122,65 +133,50 @@
</section>
<section id='setting-up-the-local-yocto-project-files-git-repository'>
<title>Setting Up the Local Source Directory</title>
<title>Setting Up the Local Yocto Project Files Git Repository</title>
<para>
You can set up the source directory through tarball extraction or by
You can get the local Yocto Project files through tarball extraction or by
cloning the <filename>poky</filename> Git repository.
This example uses <filename>poky</filename> as the root directory of the
local source directory.
local Yocto Project files Git repository.
See the bulleted item
"<link linkend='local-yp-release'>Yocto Project Release</link>"
for information on how to get these files.
</para>
<para>
Once you have source directory set up,
Once you have the repository set up,
you have many development branches from which you can work.
From inside the local repository you can see the branch names and the tag names used
in the upstream Git repository by using either of the following commands:
From inside the repository you can see the branch names and the tag names used
in the Git repository using either of the following two commands:
<literallayout class='monospaced'>
$ cd poky
$ git branch -a
$ git tag -l
</literallayout>
This example uses the Yocto Project &DISTRO; Release code named "&DISTRO_NAME;",
which maps to the <filename>&DISTRO_NAME;</filename> branch in the repository.
The following commands create and checkout the local <filename>&DISTRO_NAME;</filename>
This example uses the Yocto Project 1.1 Release code named "edison",
which maps to the <filename>edison</filename> branch in the repository.
The following commands create and checkout the local <filename>edison</filename>
branch:
<literallayout class='monospaced'>
$ git checkout -b &DISTRO_NAME; origin/&DISTRO_NAME;
Branch &DISTRO_NAME; set up to track remote branch &DISTRO_NAME; from origin.
Branch edison set up to track remote branch &DISTRO_NAME; from origin.
Switched to a new branch '&DISTRO_NAME;'
</literallayout>
</para>
</section>
<section id='setting-up-the-poky-extras-git-repository'>
<title>Setting Up the Local poky-extras Git Repository</title>
<title>Setting Up the poky-extras Git Repository</title>
<para>
This example creates a local copy of the <filename>poky-extras</filename> Git
repository inside the <filename>poky</filename> source directory.
See the bulleted item "<link linkend='poky-extras-repo'>The
This example places the <filename>poky-extras</filename> Git repository inside
of <filename>poky</filename>.
See the bulleted item
"<link linkend='poky-extras-repo'>The
<filename>poky-extras</filename> Git Repository</link>"
for information on how to set up a local copy of the
<filename>poky-extras</filename> repository.
</para>
<para>
Because this example uses the Yocto Project &DISTRO; Release code
named "&DISTRO_NAME;", which maps to the <filename>&DISTRO_NAME;</filename>
branch in the repository, you need to be sure you are using that
branch for <filename>poky-extra</filename>.
The following commands create and checkout the local
branch you are using for the <filename>&DISTRO_NAME;</filename>
branch:
<literallayout class='monospaced'>
$ git checkout -b &DISTRO_NAME; origin/&DISTRO_NAME;
Branch &DISTRO_NAME; set up to track remote branch &DISTRO_NAME; from origin.
Switched to a new branch '&DISTRO_NAME;'
</literallayout>
for information on how to get the <filename>poky-extras</filename> repository.
</para>
</section>
@@ -188,11 +184,11 @@
<title>Setting Up the Bare Clone and its Copy</title>
<para>
This example modifies the <filename>linux-yocto-3.2</filename> kernel.
This example modifies the <filename>linux-yocto-3.0-1.1.x</filename> kernel.
Thus, you need to create a bare clone of that kernel and then make a copy of the
bare clone.
See the bulleted item
"<link linkend='local-kernel-files'>Yocto Project Kernel</link>"
"<link linkend='local-kernel-files'>Linux Yocto Kernel</link>"
for information on how to do that.
</para>
@@ -200,16 +196,15 @@
The bare clone exists for the kernel build tools and simply as the receiving end
of <filename>git push</filename>
commands after you make edits and commits inside the copy of the clone.
The copy (<filename>my-linux-yocto-3.2-work</filename> in this example) has to have
The copy (<filename>my-linux-yocto-3.0-1.1.x-work</filename> in this example) has to have
a local branch created and checked out for your work.
This example uses <filename>common-pc-base</filename> as the local branch.
The following commands create and checkout the branch:
<literallayout class='monospaced'>
$ cd ~/my-linux-yocto-3.2-work
$ git checkout -b common-pc-base origin/standard/default/common-pc/base
Checking out files: 100% (532/532), done.
$ cd ~/my-linux-yocto-3.0-1.1.x-work
$ git checkout -b common-pc-base origin/yocto/standard/common-pc/base
Branch common-pc-base set up to track remote branch
standard/default/common-pc/base from origin.
yocto/standard/common-pc/base from origin.
Switched to a new branch 'common-pc-base'
</literallayout>
</para>
@@ -301,7 +296,7 @@
<para>
The file you change in this example is named <filename>calibrate.c</filename>
and is located in the <filename>my-linux-yocto-3.2-work</filename> Git repository
and is located in the <filename>my-linux-yocto-3.0-1.1.x-work</filename> Git repository
(the copy of the bare clone) in <filename>init</filename>.
This example simply inserts several <filename>printk</filename> statements
at the beginning of the <filename>calibrate_delay</filename> function.
@@ -312,11 +307,10 @@
<literallayout class='monospaced'>
void __cpuinit calibrate_delay(void)
{
unsigned long lpj;
static bool printed;
int this_cpu = smp_processor_id();
unsigned long lpj;
static bool printed;
if (per_cpu(cpu_loops_per_jiffy, this_cpu)) {
if (preset_lpj) {
.
.
.
@@ -325,21 +319,19 @@
<para>
Here is the altered code showing five new <filename>printk</filename> statements
near the top of the function:
just after initializing <filename>lps_precision</filename>:
<literallayout class='monospaced'>
void __cpuinit calibrate_delay(void)
{
unsigned long lpj;
static bool printed;
int this_cpu = smp_processor_id();
unsigned long lpj;
static bool printed;
printk("*************************************\n");
printk("* *\n");
printk("* HELLO YOCTO KERNEL *\n");
printk("* *\n");
printk("*************************************\n");
if (per_cpu(cpu_loops_per_jiffy, this_cpu)) {
if (preset_lpj) {
.
.
.
@@ -358,14 +350,14 @@
<para>
Once the source code has been modified, you need to use Git to push the changes to
the bare clone.
If you do not push the changes, then the OpenEmbedded build system will not pick
If you do not push the changes, then the Yocto Project build system will not pick
up the changed source files.
</para>
<para>
The following command pushes the changes to the bare clone:
<literallayout class='monospaced'>
$ git push origin common-pc-base:standard/default/common-pc/base
$ git push origin common-pc-base:yocto/standard/common-pc/base
</literallayout>
</para>
</section>
@@ -375,7 +367,7 @@
<para>
At this point, the source has been changed and pushed.
The example now defines some variables used by the OpenEmbedded build system
The example now defines some variables used by the Yocto Project build system
to locate your kernel source.
You essentially need to identify where to find the kernel recipe and the changed source code.
You also need to be sure some basic configurations are in place that identify the
@@ -424,35 +416,37 @@
"
</literallayout></para></listitem>
<listitem><para><emphasis>Identify Your Source Files:</emphasis> In the
<filename>linux-yocto_3.2.bbappend</filename> file located in the
<filename>linux-yocto_3.0.bbappend</filename> file located in the
<filename>poky-extras/meta-kernel-dev/recipes-kernel/linux</filename>
directory, you need to identify the location of the
local source code, which in this example is the bare clone named
<filename>linux-yocto-3.2.git</filename>.
<filename>linux-yocto-3.0-1.1.x.git</filename>.
To do this, set the <filename>KSRC_linux_yocto</filename> variable to point to your
local <filename>linux-yocto-3.2.git</filename> Git repository by adding the
local <filename>linux-yocto-3.0-1.1.x.git</filename> Git repository by adding the
following statement.
Be sure to substitute your user information in the statement:
<literallayout class='monospaced'>
KSRC_linux_yocto_3_2 ?= "/home/scottrif/linux-yocto-3.2.git"
KSRC_linux_yocto ?= /home/scottrif/linux-yocto-3.0-1.1.x.git
</literallayout></para></listitem>
<listitem><para><emphasis>Specify the Kernel Machine:</emphasis> Also in the
<filename>linux-yocto_3.0.bbappend</filename> file, you need to specify
the kernel machine with the following statement:
<literallayout class='monospaced'>
KMACHINE_qemux86 = "yocto/standard/common-pc/base"
</literallayout></para></listitem>
</itemizedlist>
</para>
<note>
<para>Before attempting to build the modified kernel, there is one more set of changes you
Before attempting to build the modified kernel, there is one more set of changes you
need to make in the <filename>meta-kernel-dev</filename> layer.
Because all the kernel <filename>.bbappend</filename> files are parsed during the
build process regardless of whether you are using them or not, you should either
comment out the <filename>COMPATIBLE_MACHINE</filename> statements in all
unused <filename>.bbappend</filename> files, or simply remove (or rename) all the files
unused <filename>.bbappend</filename> files.
Alternatively, you can simply remove all the files
except the one your are using for the build
(i.e. <filename>linux-yocto_3.2.bbappend</filename> in this example).</para>
<para>If you do not make one of these two adjustments, your machine will be compatible
with all the kernel recipes in the <filename>meta-kernel-dev</filename> layer.
When your machine is comapatible with all the kernel recipes, the build attempts
to build all kernels in the layer.
You could end up with build errors blocking your work.</para>
(i.e. <filename>linux-yocto_3.0.bbappend</filename> in this example).
</note>
</section>
@@ -477,7 +471,7 @@
$ bitbake -c cleanall linux-yocto
</literallayout></para>
<para><note>Never remove any files by hand from the <filename>tmp/deploy</filename>
directory insided the build directory.
directory insided the local Yocto Project files build directory.
Always use the BitBake <filename>cleanall</filename> task to clear
out previous builds.</note></para></listitem>
<listitem><para>Next, build the kernel image using this command:
@@ -511,8 +505,8 @@
<title>Changing the Kernel Configuration</title>
<para>
This example changes the default behavior, which is "on", of the Symmetric
Multi-processing Support (<filename>CONFIG_SMP</filename>) to "off".
This example changes the default behavior, which is "off", of the Symmetric
Multi-processing Support (<filename>CONFIG_SMP</filename>) to "on".
It is a simple example that demonstrates how to reconfigure the kernel.
</para>
@@ -522,42 +516,42 @@
<para>
If you took the time to work through the example that modifies the kernel source code
in "<link linkend='modifying-the-kernel-source-code'>Modifying the Kernel Source
Code</link>" you should already have the source directory set up on your
Code</link>" you should already have the Yocto Project files set up on your
host machine.
If this is the case, go to the next section, which is titled
If this is the case, go to then next section titled
"<link linkend='examining-the-default-config-smp-behavior'>Examining the Default
<filename>CONFIG_SMP</filename> Behavior</link>", and continue with the
<filename>CONFIG_SMP</filename> Behavior</link>" and continue with the
example.
</para>
<para>
If you don't have the source directory established on your system,
If you don't have the Yocto Project files established on your system,
you can get them through tarball extraction or by
cloning the <filename>poky</filename> Git repository.
This example uses <filename>poky</filename> as the root directory of the
<link linkend='source-directory'>source directory</link>.
local Yocto Project files Git repository.
See the bulleted item
"<link linkend='local-yp-release'>Yocto Project Release</link>"
for information on how to get these files.
</para>
<para>
Once you have the local copy of the repository set up,
Once you have the repository set up,
you have many development branches from which you can work.
From inside the repository you can see the branch names and the tag names used
in the upstream Git repository using either of the following commands:
in the Git repository using either of the following two commands:
<literallayout class='monospaced'>
$ cd poky
$ git branch -a
$ git tag -l
</literallayout>
This example uses the Yocto Project &DISTRO; Release code named "&DISTRO_NAME;",
This example uses the Yocto Project 1.1.1 Release code named "&DISTRO_NAME;",
which maps to the <filename>&DISTRO_NAME;</filename> branch in the repository.
The following commands create and checkout the local <filename>&DISTRO_NAME;</filename>
branch:
<literallayout class='monospaced'>
$ git checkout -b &DISTRO_NAME; origin/&DISTRO_NAME;
Branch &DISTRO_NAME; set up to track remote branch &DISTRO_NAME; from origin.
Branch edison set up to track remote branch &DISTRO_NAME; from origin.
Switched to a new branch '&DISTRO_NAME;'
</literallayout>
</para>
@@ -577,9 +571,9 @@
If your host development system supports multi-core and multi-thread capabilities,
you can uncomment these statements and set the variables to significantly shorten
the full build time.
As a guideline, set both the <filename>BB_NUMBER_THREADS</filename> and the
<filename>PARALLEL_MAKE</filename> variables to twice the number
of cores your machine supports.
As a guideline, set <filename>BB_NUMBER_THREADS</filename> to twice the number
of cores your machine supports and set <filename>PARALLEL_MAKE</filename> to one and
a half times the number of cores your machine supports.
</note>
The following two commands <filename>source</filename> the build environment setup script
and build the default <filename>qemux86</filename> image.
@@ -618,11 +612,11 @@
<title>Examining the Default&nbsp;&nbsp;<filename>CONFIG_SMP</filename> Behavior</title>
<para>
By default, <filename>CONFIG_SMP</filename> supports multiple processor machines.
By default, <filename>CONFIG_SMP</filename> supports single processor machines.
To see this default setting from within the QEMU emulator, boot your image using
the emulator as follows:
<literallayout class='monospaced'>
$ runqemu qemux86 qemuparams="-smp 4"
$ runqemu qemux86 qemuparams="-smp 2"
</literallayout>
</para>
@@ -630,25 +624,11 @@
Login to the machine using <filename>root</filename> with no password.
After logging in, enter the following command to see how many processors are
being supported in the emulator.
The emulator reports support for the number of processors you specified using
the <filename>-smp</filename> option, four in this case:
The emulator reports support for a single processor:
<literallayout class='monospaced'>
# cat /proc/cpuinfo | grep processor
processor : 0
processor : 1
processor : 2
processor : 3
#
</literallayout>
To check the setting for <filename>CONFIG_SMP</filename>, you can use the
following command:
<literallayout class='monospaced'>
zcat /proc/config.gz | grep CONFIG_SMP
</literallayout>
The console returns the following showing that multi-processor machine support
is set:
<literallayout class='monospaced'>
CONFIG_SMP=y
</literallayout>
Logout of the emulator using the <filename>exit</filename> command and
then close it down.
@@ -663,7 +643,7 @@
to set kernel configurations.
You need to run <filename>menuconfig</filename> inside the Yocto BitBake environment.
Thus, the environment must be set up using the <filename>oe-init-build-env</filename>
script found in the build directory.
script found in the Yocto Project files Git repository build directory.
If you have not sourced this script do so with the following commands:
<literallayout class='monospaced'>
$ cd ~/poky
@@ -674,14 +654,12 @@
<para>
After setting up the environment to run <filename>menuconfig</filename>, you are ready
to use the tool to interactively change the kernel configuration.
In this example, we are basing our changes on the <filename>linux-yocto-3.2</filename>
In this example, we are basing our changes on the <filename>linux-yocto-3.0-1.1.x</filename>
kernel.
The OpenEmbedded build system recognizes this kernel as
The Yocto Project build environment recognizes this kernel as
<filename>linux-yocto</filename>.
Thus, the following commands from the shell in which you previously sourced the
environment initialization script cleans the shared state cache and the
<ulink url='&YOCTO_DOCS_REF_URL;#var-WORKDIR'><filename>WORKDIR</filename></ulink>
directory and then builds and launches <filename>menuconfig</filename>:
Thus, the following command from the shell in which you previously sourced the
environment initialization script launches <filename>menuconfig</filename>:
<literallayout class='monospaced'>
$ bitbake linux-yocto -c menuconfig
</literallayout>
@@ -693,33 +671,31 @@
You can find it at <filename>Processor Type and Features</filename>.
The configuration selection is
<filename>Symmetric Multi-processing Support</filename>.
After using the arrow keys to highlight this selection, press "n" to turn it off.
After using the arrow keys to highlight this selection, press "y" to select it.
Then, exit out and save your selections.
</para>
<para>
Once you save the selection, the <filename>.config</filename> configuration file
is updated.
This is the file that the build system uses to configure the Yocto Project kernel
This is the file that the build system uses to configure the Linux Yocto kernel
when it is built.
You can find and examine this file in the build directory.
This example uses the following:
You can find and examine this file in the Yocto Project files Git repository in
the build directory.
This example uses the following.
Note that this example directory is artificially split and many of the characters
in the actually filename are omitted in order to make it more
readable:
<literallayout class='monospaced'>
~/poky/build/tmp/work/qemux86-poky-linux/linux-yocto-3.2.11+git1+84f...
...656ed30-r1/linux-qemux86-standard-build
~/poky/build/tmp/work/qemux86-poky-linux/linux-yocto-2.6.37+git1+84f...
...r20/linux-qemux86-standard-build
</literallayout>
<note>
The previous example directory is artificially split and many of the characters
in the actual filename are omitted in order to make it more readable.
Also, depending on the kernel you are using, the exact pathname might differ
slightly.
</note>
</para>
<para>
Within the <filename>.config</filename> file, you can see the following setting:
<literallayout class='monospaced'>
# CONFIG_SMP is not set
CONFIG_SMP=y
</literallayout>
</para>
@@ -736,7 +712,7 @@
<note>
Be sure to make a copy of the <filename>.config</filename> and don't just
rename it.
The build system needs an existing <filename>.config</filename>
The Yocto Project build system needs an existing <filename>.config</filename>
from which to work.
</note>
</para>
@@ -747,16 +723,17 @@
<para>
At this point, you are ready to recompile your kernel image with
the new setting in effect using the BitBake command below:
the new setting in effect using the BitBake commands below:
<literallayout class='monospaced'>
$ bitbake linux-yocto -c compile -f
$ bitbake linux-yocto
</literallayout>
</para>
<para>
Now run the QEMU emulator and pass it the same multi-processor option as before:
Now run the QEMU emulator:
<literallayout class='monospaced'>
$ runqemu qemux86 qemuparams="-smp 4"
$ runqemu qemux86 qemuparams="-smp 2"
</literallayout>
</para>
@@ -766,22 +743,13 @@
<literallayout class='monospaced'>
# cat /proc/cpuinfo | grep processor
processor : 0
processor : 1
#
</literallayout>
</para>
<para>
From the output, you can see that the kernel no longer supports multi-processor systems.
The output indicates support for a single processor. You can verify the
<filename>CONFIG_SMP</filename> setting by using this command:
<literallayout class='monospaced'>
zcat /proc/config.gz | grep CONFIG_SMP
</literallayout>
The console returns the following output:
<literallayout class='monospaced'>
# CONFIG_SMP is not set
</literallayout>
You have successfully reconfigured the kernel.
From the output, you can see that you have successfully reconfigured the kernel.
</para>
</section>
</section>
@@ -799,8 +767,352 @@
width="2in" depth="3in" align="center" scalefit="1" />
</para>
</section>
<!-- <section id='is-vfat-supported'>
<title>Is VFAT Supported?</title>
<para>
<literallayout class='monospaced'>
I entered runqemu qemux86 and it fires upthis fires up the emulator and uses the
image and filesystem in the build area created in the previous section.
Then I copied over a pre-created and formated 5.2MB VFAT file named vfat.img.
I did this with scp vfat.img root@192.168.7.2:
The file is in the root directory.
I had to do this because the mkfs.vfat vfat.img command does not work.
mkfs is not recognized in the qemu terminal session.
when I try mount -o loop -t vfat vfat.img mnt/ I get the error
mount: can't set up loop device: No space left on device.
This error is because the loop module is not currently in the kernel image.
However, this module is available in the
build area in the tarball modules-2.6.37.6-yocto-starndard+-20-qemux86.tgz.
You can add this to the kernel image by adding the
IMAGE_INSTALL += " kernel-module-loop" statement at the top of the local.conf
file in the build area and then rebuilding the kernel using bitbake.
It should just build whatever is necessary and not go through an entire build again.
The <filename>menuconfig</filename> tool provides an interactive method with which
to set kernel configurations.
In order to use <filename>menuconfig</filename> from within the BitBake environment
you need to source an environment setup script.
This script is located in the local Yocto Project file structure and is called
<filename>oe-init-build-env</filename>.
</para>
<para>
The following command sets up the environment:
<literallayout class='monospaced'>
$ cd ~/poky
$ source oe-init-build-env
$ runqemu qemux86
Continuing with the following parameters:
KERNEL: [/home/scottrif/poky/build/tmp/deploy/images/bzImage-qemux86.bin]
ROOTFS: [/home/scottrif/poky/build/tmp/deploy/images/core-image-sato-qemux86.ext3]
FSTYPE: [ext3]
Setting up tap interface under sudo
Acquiring lockfile for tap0...
WARNING: distccd not present, no distcc support loaded.
Running qemu...
/home/scottrif/poky/build/tmp/sysroots/x86_64-linux/usr/bin/qemu
-kernel /home/scottrif/poky/build/tmp/deploy/images/bzImage-qemux86.bin
-net nic,vlan=0 -net tap,vlan=0,ifname=tap0,script=no,downscript=no
-hda /home/scottrif/poky/build/tmp/deploy/images/core-image-sato-qemux86.ext3
-show-cursor -usb -usbdevice wacom-tablet -vga vmware -enable-gl -no-reboot
-m 128 &dash;&dash;append "vga=0 root=/dev/hda rw mem=128M ip=192.168.7.2::192.168.7.1:255.255.255.0 oprofile.timer=1 "
Enabling opengl
vmsvga_value_write: guest runs Linux.
</literallayout>
</para>
</section>
<section id='prepare-to-use-menuconfig'>
<title>Prepare to use <filename>menuconfig</filename></title>
<para>
[WRITER'S NOTE: Stuff from here down are crib notes]
</para>
<para>
Once menuconfig fires up you see all kinds of categories that you can interactively
investigate.
If they have an "M" in it then the feature is "modularized".
I guess that means that means that it needs to be manually linked in when the
kernel is booted??? (Not sure).
If they have an "*" then the feature is automatically part of the kernel.]
</para>
<para>
So the tmp/work/ area was created in poky and there is a .config file in there and
a .config.old file.
The old one must have been created when I exited from menuconfig after poking around
a bit.
Nope - appears to just be created automatically.
</para>
<para>
A good practice is to first determine what configurations you have for the kernel.
You can see the results by looking in the .config file in the build/tmp/work/qemux86-poky-linux area
of the local YP files.
There is a directory named linux-yocto-2.6.37* in the directory.
In that directory is a directory named linux-qemux86-standard-build.
In that directory you will find a file named .config that is the configuration file
for the kernel that will be used when you build the kernel.
You can open that file up and examine it.
If you do a search for "VFAT" you will see that that particular configuration is not
enabled for the kernel.
This means that you cannot print a VFAT text file, or for that matter, even mount one
from the image if you were to build it at this point.
</para>
<para>
You can prove the point by actually trying it at this point.
Here are the commands:
<literallayout class='monospaced'>
$ mkdir ~/vfat-test
$ cd ~/vfat-test
$ dd if=/dev/zero of=vfat.img bs=1024 count=5000 [creates a 5MB disk image]
5+0 records in
5+0 records out
5242880 bytes (5.2 MB) copied, 0.00798912 s, 656 MB/s
$ ls -lah [lists the contents of the new image. l=long, a=all, h=human readable]
total 5.1M
drwxr-xr-x 2 srifenbark scottrif 4.0K 2011-08-01 08:18 .
drwxr-xr-x 66 srifenbark scottrif 4.0K 2011-08-01 08:14 ..
-rw-r&dash;&dash;r&dash;&dash; 1 srifenbark scottrif 5.0M 2011-08-01 08:18 vfat.img
$ mkfs.vfat vfat.img [formats the disk image]
mkfs.vfat 3.0.7 (24 Dec 2009)
$ mkdir mnt [mounts the disk image]
$ sudo su [gives you root privilege]
# mount -o loop vfat.img mnt [mounts it as a loop device]
# ls mnt [shows nothing in mnt]
# mount [lists the mounted filesystems - note/dev/loop0]
/dev/sda1 on / type ext4 (rw,errors=remount-ro)
proc on /proc type proc (rw,noexec,nosuid,nodev)
none on /sys type sysfs (rw,noexec,nosuid,nodev)
none on /sys/fs/fuse/connections type fusectl (rw)
none on /sys/kernel/debug type debugfs (rw)
none on /sys/kernel/security type securityfs (rw)
none on /dev type devtmpfs (rw,mode=0755)
none on /dev/pts type devpts (rw,noexec,nosuid,gid=5,mode=0620)
none on /dev/shm type tmpfs (rw,nosuid,nodev)
none on /var/run type tmpfs (rw,nosuid,mode=0755)
none on /var/lock type tmpfs (rw,noexec,nosuid,nodev)
none on /lib/init/rw type tmpfs (rw,nosuid,mode=0755)
binfmt_misc on /proc/sys/fs/binfmt_misc type binfmt_misc (rw,noexec,nosuid,nodev)
gvfs-fuse-daemon on /home/scottrif/.gvfs type fuse.gvfs-fuse-daemon (rw,nosuid,nodev,user=srifenbark)
/dev/loop0 on /home/scottrif/vfat-test/mnt type vfat (rw)
# echo "hello world" > mnt/hello.txt [creates a text file in the mounted VFAT system]
# ls mnt [verifies the file is there]
hello.txt
# cat mnt/hello.txt [displays the contents of the file created]
hello world
# umount mnt [unmounts the system and destroys the loop]
# exit [gets out of privileged user mode]
exit
$ lsmod [this stuff Darren did to show me ]
Module Size Used by [the status of modules in the regular linux kernel]
nls_iso8859_1 4633 0
nls_cp437 6351 0
vfat 10866 0
fat 55350 1 vfat
snd_hda_codec_atihdmi 3023 1
binfmt_misc 7960 1
snd_hda_codec_realtek 279008 1
ppdev 6375 0
snd_hda_intel 25805 2
fbcon 39270 71
tileblit 2487 1 fbcon
font 8053 1 fbcon
bitblit 5811 1 fbcon
snd_hda_codec 85759 3 snd_hda_codec_atihdmi,snd_hda_codec_realtek,snd_hda_intel
softcursor 1565 1 bitblit
snd_seq_dummy 1782 0
snd_hwdep 6924 1 snd_hda_codec
vga16fb 12757 0
snd_pcm_oss 41394 0
snd_mixer_oss 16299 1 snd_pcm_oss
snd_pcm 87946 3 snd_hda_intel,snd_hda_codec,snd_pcm_oss
vgastate 9857 1 vga16fb
snd_seq_oss 31191 0
snd_seq_midi 5829 0
snd_rawmidi 23420 1 snd_seq_midi
radeon 744506 3
snd_seq_midi_event 7267 2 snd_seq_oss,snd_seq_midi
ttm 61007 1 radeon
snd_seq 57481 6 snd_seq_dummy,snd_seq_oss,snd_seq_midi,snd_seq_midi_event
drm_kms_helper 30742 1 radeon
snd_timer 23649 2 snd_pcm,snd_seq
snd_seq_device 6888 5 snd_seq_dummy,snd_seq_oss,snd_seq_midi,snd_rawmidi,snd_seq
usb_storage 50377 0
snd 71283 16 \
snd_hda_codec_realtek,snd_hda_intel,snd_hda_codec, \
snd_hwdep,snd_pcm_oss,snd_mixer_oss,snd_pcm, \
snd_seq_oss,snd_rawmidi,snd_seq,snd_timer,snd_seq_device
soundcore 8052 1 snd
psmouse 65040 0
drm 198886 5 radeon,ttm,drm_kms_helper
i2c_algo_bit 6024 1 radeon
serio_raw 4918 0
snd_page_alloc 8500 2 snd_hda_intel,snd_pcm
dell_wmi 2177 0
dcdbas 6886 0
lp 9336 0
parport 37160 2 ppdev,lp
usbhid 41116 0
ohci1394 30260 0
hid 83888 1 usbhid
ieee1394 94771 1 ohci1394
tg3 122382 0
</literallayout>
</para>
</section>
</section> -->
</appendix>
<!--
EXTRA STUFF I MIGHT NEED BUT NOW SURE RIGHT NOW.
In the standard layer structure you have several areas that you need to examine or
modify.
For this example the layer contains four areas:
<itemizedlist>
<listitem><para><emphasis><filename>conf</filename></emphasis> - Contains the
<filename>layer.conf</filename> that identifies the location of the recipe files.
</para></listitem>
<listitem><para><emphasis><filename>images</filename></emphasis> - Contains the
image recipe file.
This recipe includes the base image you will be using and specifies other
packages the image might need.</para></listitem>
<listitem><para><emphasis><filename>recipes-bsp</filename></emphasis> - Contains
recipes specific to the hardware for which you are developing the kernel.
</para></listitem>
<listitem><para><emphasis><filename>recipes-kernel</filename></emphasis> - Contains the
"append" files that add information to the main recipe kernel.
</para></listitem>
</itemizedlist>
</para>
<para>
Let's take a look at the <filename>layer.conf</filename> in the
<filename>conf</filename> directory first.
This configuration file enables the Yocto Project build system to locate and
use the information in your new layer.
</para>
<para>
The variable <filename>BBPATH</filename> needs to include the path to your layer
as follows:
<literallayout class='monospaced'>
BBPATH := "${BBPATH}:${LAYERDIR}"
</literallayout>
And, the variable <filename>BBFILES</filename> needs to be modified to include your
recipe and append files:
<literallayout class='monospaced'>
BBFILES := "${BBFILES} ${LAYERDIR}/images/*.bb \
${LAYERDIR}/images/*.bbappend \
${LAYERDIR}/recipes-*/*/*.bb \
${LAYERDIR}/recipes-*/*/*.bbappend"
</literallayout>
Finally, you need to be sure to use your layer name in these variables at the
end of the file:
<literallayout class='monospaced'>
BBFILE_COLLECTIONS += "elc"
BBFILE_PATTERN_elc := "^${LAYERDIR}/"
BBFILE_PRIORITY_elc = "9"
</literallayout>
</para>
<para>
The <filename>images</filename> directory contains an append file that helps
further define the image.
In our example, the base image is <filename>core-image-minimal</filename>.
The image does, however, need some additional modules that we are using
for this example.
These modules support the amixer functionality.
Here is the append file:
<literallayout class='monospaced'>
require recipes-core/images/poky-image-minimal.bb
IMAGE_INSTALL += "dropbear alsa-utils-aplay alsa-utils-alsamixer"
IMAGE_INSTALL_append_qemux86 += " kernel-module-snd-ens1370 \
kernel-module-snd-rawmidi kernel-module-loop kernel-module-nls-cp437 \
kernel-module-nls-iso8859-1 qemux86-audio alsa-utils-amixer"
LICENSE = "MIT"
</literallayout>
</para>
<para>
While the focus of this example is not on the BSP, it is worth mentioning that the
<filename>recipes-bsp</filename> directory has the recipes and append files for
features that the hardware requires.
In this example, there is a script and a recipe to support the
<filename>amixer</filename> functionality in QEMU.
It is beyond the scope of this manual to go too deeply into the script.
Suffice it to say that the script tests for the presence of the mixer, sets up
default mixer values, enables the mixer, unmutes master and then
sets the volume to 100.
</para>
<para>
The recipe <filename>qemu86-audio.bb</filename> installs and runs the
<filename>amixer</filename> when the system boots.
Here is the recipe:
<literallayout class='monospaced'>
SUMMARY = "Provide a basic init script to enable audio"
DESCRIPTION = "Set the volume and unmute the Front mixer setting during boot."
SECTION = "base"
LICENSE = "MIT"
LIC_FILES_CHKSUM = "file://${POKYBASE}/LICENSE;md5=3f40d7994397109285ec7b81fdeb3b58"
PR = "r4"
inherit update-rc.d
RDEPENDS = "alsa-utils-amixer"
SRC_URI = "file://qemux86-audio"
INITSCRIPT_NAME = "qemux86-audio"
INITSCRIPT_PARAMS = "defaults 90"
do_install() {
install -d ${D}${sysconfdir} \
${D}${sysconfdir}/init.d
install -m 0755 ${WORKDIR}/qemux86-audio ${D}${sysconfdir}/init.d
cat ${WORKDIR}/${INITSCRIPT_NAME} | \
sed -e 's,/etc,${sysconfdir},g' \
-e 's,/usr/sbin,${sbindir},g' \
-e 's,/var,${localstatedir},g' \
-e 's,/usr/bin,${bindir},g' \
-e 's,/usr,${prefix},g' > ${D}${sysconfdir}/init.d/${INITSCRIPT_NAME}
chmod 755 ${D}${sysconfdir}/init.d/${INITSCRIPT_NAME}
}
</literallayout>
</para>
<para>
The last area to look at is <filename>recipes-kernel</filename>.
This area holds configuration fragments and kernel append files.
The append file must have the same name as the kernel recipe, which is
<filename>linux-yocto-2.6.37</filename> in this example.
The file can <filename>SRC_URI</filename> statements to point to configuration
fragments you might have in the layer.
The file can also contain <filename>KERNEL_FEATURES</filename> statements that specify
included kernel configurations that ship with the Yocto Project.
</para>
-->
<!--
vim: expandtab tw=80 ts=4
-->

File diff suppressed because it is too large Load Diff

View File

@@ -23,7 +23,7 @@
Open source philosophy is characterized by software development directed by peer production
and collaboration through an active community of developers.
Contrast this to the more standard centralized development models used by commercial software
companies where a finite set of developers produces a product for sale using a defined set
companies where a finite set of developers produce a product for sale using a defined set
of procedures that ultimately result in an end product whose architecture and source material
are closed to the public.
</para>
@@ -55,7 +55,7 @@
</section>
<section id="usingpoky-changes-collaborate">
<title>Using the Yocto Project in a Team Environment</title>
<title>Using The Yocto Project in a Team Environment</title>
<para>
It might not be immediately clear how you can use the Yocto Project in a team environment,
@@ -97,20 +97,19 @@
<para>
Most teams have many pieces of software undergoing active development at any given time.
You can derive large benefits by putting these pieces under the control of a source
control system that is compatible (i.e. Git or Subversion (SVN)) with the OpenEmbeded
build system that the Yocto Project uses.
control system that is compatible with the Yocto Project (i.e. Git or Subversion (SVN).
You can then set the autobuilder to pull the latest revisions of the packages
and test the latest commits by the builds.
This practice quickly highlights issues.
The build system easily supports testing configurations that use both a
The Yocto Project easily supports testing configurations that use both a
stable known good revision and a floating revision.
The build system can also take just the changes from specific source control branches.
The Yocto Project can also take just the changes from specific source control branches.
This capability allows you to track and test specific changes.
</para>
<para>
Perhaps the hardest part of setting this up is defining the software project or
the metadata policies that surround the different source control systems.
the Yocto Project metadata policies that surround the different source control systems.
Of course circumstances will be different in each case.
However, this situation reveals one of the Yocto Project's advantages -
the system itself does not
@@ -130,7 +129,7 @@
From the interface, you can click on any particular item in the "Name" column and
see the URL at the bottom of the page that you need to set up a Git repository for
that particular item.
Having a local Git repository of the source directory (poky) allows you to
Having a local Git repository of the Yocto Project files allows you to
make changes, contribute to the history, and ultimately enhance the Yocto Project's
tools, Board Support Packages, and so forth.
</para>
@@ -148,8 +147,8 @@
<ulink url='&YOCTO_HOME_URL;/download'>download page</ulink> and get a
tarball of the release.
You can also go to this site to download any supported BSP tarballs.
Unpacking the tarball gives you a hierarchical source directory that lets you develop
using the Yocto Project.
Unpacking the tarball gives you a hierarchical directory structure of Yocto Project
files that lets you develop using the Yocto Project.
</para>
<para>
@@ -158,22 +157,22 @@
</para>
<para>
In summary, here is where you can get the project files needed for development:
In summary, here is where you can get the Yocto Project files needed for development:
<itemizedlist>
<listitem><para id='source-repositories'><emphasis><ulink url='&YOCTO_GIT_URL;/cgit/cgit.cgi'>Source Repositories:</ulink></emphasis>
This area contains IDE Plugins, Matchbox, Poky, Poky Support, Tools, Yocto Linux Kernel, and Yocto
Metadata Layers.
You can create local copies of Git repositories for each of these areas.</para>
You can create Git repositories for each of these areas.</para>
<para>
<imagedata fileref="figures/source-repos.png" align="center" width="6in" depth="4in" />
</para></listitem>
<listitem><para><anchor id='index-downloads' /><emphasis><ulink url='&YOCTO_DL_URL;/releases/'>Index of /releases:</ulink></emphasis>
This area contains index releases such as
the <trademark class='trade'>Eclipse</trademark>
Yocto Plug-in, miscellaneous support, poky, pseudo, cross-development toolchains,
Yocto Plug-in, miscellaneous support, Poky, pseudo, cross-development toolchains,
and all released versions of Yocto Project in the form of images or tarballs.
Downloading and extracting these files does not produce a local copy of the
Git repository but rather a snapshot of a particular release or image.</para>
Downloading and extracting these files does not produce a Git repository but rather
a snapshot of a particular release or image.</para>
<para>
<imagedata fileref="figures/index-downloads.png" align="center" width="6in" depth="4in" />
</para></listitem>
@@ -200,7 +199,7 @@
<listitem><para><emphasis>Append Files:</emphasis> Files that append build information to
a recipe file.
Append files are known as BitBake append files and <filename>.bbappend</filename> files.
The OpenEmbedded build system expects every append file to have a corresponding and
The Yocto Project build system expects every append file to have a corresponding and
underlying recipe (<filename>.bb</filename>) file.
Furthermore, the append file and the underlying recipe must have the same root filename.
The filenames can differ only in the file type suffix used (e.g.
@@ -212,25 +211,140 @@
"<link linkend='changing-recipes-kernel'>Changing <filename>recipes-kernel</filename></link>"
sections.</para></listitem>
<listitem><para><emphasis>BitBake:</emphasis> The task executor and scheduler used by
the OpenEmbedded build system to build images.
For more information on BitBake, see the <ulink url='&BITBAKE_DOCS_URL;'>
the Yocto Project to build images.
For more information on BitBake, see the <ulink url='http://bitbake.berlios.de/manual/'>
BitBake documentation</ulink>.</para></listitem>
<listitem><para><emphasis>Classes:</emphasis> Files that provide for logic encapsulation
and inheritance allowing commonly used patterns to be defined once and easily used
in multiple recipes.
Class files end with the <filename>.bbclass</filename> filename extension.
</para></listitem>
<listitem><para><emphasis>Configuration File:</emphasis> Configuration information in various
<filename>.conf</filename> files provides global definitions of variables.
The <filename>conf/local.conf</filename> configuration file in the
<link linkend='yocto-project-build-directory'>Yocto Project Build Directory</link>
contains user-defined variables that affect each build.
The <filename>meta-yocto/conf/distro/poky.conf</filename> configuration file
defines Yocto distro configuration
variables used only when building with this policy.
Machine configuration files, which
are located throughout the Yocto Project file structure, define
variables for specific hardware and are only used when building for that target
(e.g. the <filename>machine/beagleboard.conf</filename> configuration file defines
variables for the Texas Instruments ARM Cortex-A8 development board).
Configuration files end with a <filename>.conf</filename> filename extension.
</para></listitem>
<listitem><para><emphasis>Cross-Development Toolchain:</emphasis>
A collection of software development
tools and utilities that allow you to develop software for targeted architectures.
This toolchain contains cross-compilers, linkers, and debuggers that are specific to
an architecture.
You can use the Yocto Project to build cross-development toolchains in tarball form that when
unpacked contain the development tools you need to cross-compile and test your software.
The Yocto Project ships with images that contain toolchains for supported architectures
as well.
Sometimes this toolchain is referred to as the meta-toolchain.</para></listitem>
<listitem><para><emphasis>Image:</emphasis> An image is the result produced when
BitBake processes a given collection of recipes and related metadata.
Images are the binary output that runs on specific hardware and for specific
use cases.
For a list of the supported image types that the Yocto Project provides, see the
"<ulink url='&YOCTO_DOCS_REF_URL;#ref-images'>Reference: Images</ulink>"
appendix in The Yocto Project Reference Manual.</para></listitem>
<listitem><para id='layer'><emphasis>Layer:</emphasis> A collection of recipes representing the core,
a BSP, or an application stack.
For a discussion on BSP Layers, see the
"<ulink url='&YOCTO_DOCS_BSP_URL;#bsp-layers'>BSP Layers</ulink>"
section in the Yocto Project Board Support Packages (BSP) Developer's Guide.</para></listitem>
<listitem><para id='metadata'><emphasis>Metadata:</emphasis> The files that BitBake parses when
building an image.
Metadata includes recipes, classes, and configuration files.</para></listitem>
<listitem><para><emphasis>OE-Core:</emphasis> A core set of metadata originating
with OpenEmbedded (OE) that is shared between OE and the Yocto Project.
This metadata is found in the <filename>meta</filename> directory of the Yocto Project
files.</para></listitem>
<listitem><para><emphasis>Package:</emphasis> The packaged output from a baked recipe.
A package is generally the compiled binaries produced from the recipe's sources.
You bake something by running it through BitBake.</para></listitem>
<listitem><para><emphasis>Poky:</emphasis> The build tool that the Yocto Project
uses to create images.</para></listitem>
<listitem><para><emphasis>Recipe:</emphasis> A set of instructions for building packages.
A recipe describes where you get source code and which patches to apply.
Recipes describe dependencies for libraries or for other recipes, and they
also contain configuration and compilation options.
Recipes contain the logical unit of execution, the software/images to build, and
use the <filename>.bb</filename> file extension.</para></listitem>
<listitem><para><emphasis>Tasks:</emphasis> Arbitrary groups of software Recipes.
You simply use Tasks to hold recipes that, when built, usually accomplish a single task.
For example, a task could contain the recipes for a companys proprietary or value-add software.
Or, the task could contain the recipes that enable graphics.
A task is really just another recipe.
Because task files are recipes, they end with the <filename>.bb</filename> filename
extension.</para></listitem>
<listitem><para><emphasis>Upstream:</emphasis> A reference to source code or repositories
that are not local to the development system but located in a master area that is controlled
by the maintainer of the source code.
For example, in order for a developer to work on a particular piece of code, they need to
first get a copy of it from an "upstream" source.</para></listitem>
<listitem>
<para id='build-directory'><emphasis>Build Directory:</emphasis>
This term refers to the area used by the OpenEmbedded build system for builds.
The area is created when you <filename>source</filename> the setup
environment script that is found in the source directory
<para id='yocto-project-files'><emphasis>Yocto Project Files:</emphasis>
This term refers to the directory structure created as a result of either downloading
and unpacking a Yocto Project release tarball or setting up a Git repository
by cloning <filename>git://git.yoctoproject.org/poky</filename>.
Sometimes the term "the Yocto Project Files structure" is used as well.</para>
<para>The Yocto Project Files contain BitBake, Documentation, metadata and
other files that all support the development environment.
Consequently, you must have the Yocto Project Files in place on your development
system in order to do any development using the Yocto Project.</para>
<para>The name of the top-level directory of the Yocto Project Files structure
is derived from the Yocto Project release tarball.
For example, downloading and unpacking <filename>&YOCTO_POKY_TARBALL;</filename>
results in a Yocto Project file structure whose Yocto Project source directory is named
<filename>&YOCTO_POKY;</filename>.
If you create a Git repository, then you can name the repository anything you like.
Throughout much of the documentation, the name of the Git repository is used as the
name for the local folder.
So, for example, cloning the <filename>poky</filename> Git repository results in a
local Git repository also named <filename>poky</filename>.</para>
<para>It is important to understand the differences between Yocto Project Files created
by unpacking a release tarball as compared to cloning
<filename>git://git.yoctoproject.org/poky</filename>.
When you unpack a tarball, you have an exact copy of the files based on the time of
release - a fixed release point.
Any changes you make to your local Yocto Project Files are on top of the release.
On the other hand, when you clone the Yocto Project Git repository, you have an
active development repository.
In this case, any local changes you make to the Yocto Project can be later applied
to active development branches of the upstream Yocto Project Git repository.</para>
<para>Finally, if you want to track a set of local changes while starting from the same point
as a release tarball, you can create a local Git branch that
reflects the exact copy of the files at the time of their release.
You do this using Git tags that are part of the repository.</para>
<para>For more information on concepts around Git repositories, branches, and tags,
see the
"<link linkend='repositories-tags-and-branches'>Repositories, Tags, and Branches</link>"
section.</para></listitem>
<listitem>
<para id='yocto-project-build-directory'><emphasis>Yocto Project Build Directory:</emphasis>
This term refers to the area used by the Yocto Project for builds.
The area is created when you <filename>source</filename> the Yocto Project setup
environment script that is found in the Yocto Project files area
(i.e. <filename>oe-init-build-env</filename>).
The <ulink url='&YOCTO_DOCS_REF_URL;#var-TOPDIR'><filename>TOPDIR</filename></ulink>
variable points to the build directory.</para>
The <filename>TOPDIR</filename> variable points to the build directory.</para>
<para>You have a lot of flexibility when creating the build directory.
<para>You have a lot of flexibility when creating the Yocto Project Build Directory.
Following are some examples that show how to create the directory:
<itemizedlist>
<listitem><para>Create the build directory in your current working directory
and name it <filename>build</filename>.
This is the default behavior.
<literallayout class='monospaced'>
$ cd ~/poky
$ source oe-init-build-env
</literallayout></para></listitem>
<listitem><para>Provide a directory path and specifically name the build
@@ -249,140 +363,6 @@
</literallayout></para></listitem>
</itemizedlist>
</para></listitem>
<listitem><para><emphasis>Build System:</emphasis> In the context of the Yocto Project
this term refers to the OpenEmbedded build system used by the project.
This build system is based on the project known as "Poky."
For some historical information about Poky, see the
<link linkend='poky'>poky</link> term further along in this section.
</para></listitem>
<listitem><para><emphasis>Classes:</emphasis> Files that provide for logic encapsulation
and inheritance allowing commonly used patterns to be defined once and easily used
in multiple recipes.
Class files end with the <filename>.bbclass</filename> filename extension.
</para></listitem>
<listitem><para><emphasis>Configuration File:</emphasis> Configuration information in various
<filename>.conf</filename> files provides global definitions of variables.
The <filename>conf/local.conf</filename> configuration file in the
<link linkend='build-directory'>build directory</link>
contains user-defined variables that affect each build.
The <filename>meta-yocto/conf/distro/poky.conf</filename> configuration file
defines Yocto distro configuration
variables used only when building with this policy.
Machine configuration files, which
are located throughout the
<link linkend='source-directory'>source directory</link>, define
variables for specific hardware and are only used when building for that target
(e.g. the <filename>machine/beagleboard.conf</filename> configuration file defines
variables for the Texas Instruments ARM Cortex-A8 development board).
Configuration files end with a <filename>.conf</filename> filename extension.
</para></listitem>
<listitem><para><emphasis>Cross-Development Toolchain:</emphasis>
A collection of software development
tools and utilities that allow you to develop software for targeted architectures.
This toolchain contains cross-compilers, linkers, and debuggers that are specific to
an architecture.
You can use the OpenEmbedded build system to build cross-development toolchains in tarball
form that, when
unpacked, contain the development tools you need to cross-compile and test your software.
The Yocto Project ships with images that contain toolchains for supported architectures
as well.
Sometimes this toolchain is referred to as the meta-toolchain.</para></listitem>
<listitem><para><emphasis>Image:</emphasis> An image is the result produced when
BitBake processes a given collection of recipes and related metadata.
Images are the binary output that run on specific hardware and for specific
use cases.
For a list of the supported image types that the Yocto Project provides, see the
"<ulink url='&YOCTO_DOCS_REF_URL;#ref-images'>Images</ulink>"
chapter in the Yocto Project Reference Manual.</para></listitem>
<listitem><para id='layer'><emphasis>Layer:</emphasis> A collection of recipes representing the core,
a BSP, or an application stack.
For a discussion on BSP Layers, see the
"<ulink url='&YOCTO_DOCS_BSP_URL;#bsp-layers'>BSP Layers</ulink>"
section in the Yocto Project Board Support Packages (BSP) Developer's Guide.</para></listitem>
<listitem><para id='metadata'><emphasis>Metadata:</emphasis> The files that BitBake parses when
building an image.
Metadata includes recipes, classes, and configuration files.</para></listitem>
<listitem><para><emphasis>OE-Core:</emphasis> A core set of metadata originating
with OpenEmbedded (OE) that is shared between OE and the Yocto Project.
This metadata is found in the <filename>meta</filename> directory of the source
directory.</para></listitem>
<listitem><para><emphasis>Package:</emphasis> The packaged output from a baked recipe.
A package is generally the compiled binaries produced from the recipe's sources.
You bake something by running it through BitBake.</para></listitem>
<listitem><para id='poky'><emphasis>Poky:</emphasis> The term "poky" can mean several things.
In its most general sence, it is an open-source project that was initially developed
by OpenedHand. With OpenedHand, poky was developed off of the existing OpenEmbedded
build system becoming a build system for embedded images.
After Intel Corporation aquired OpenedHand, the project poky became the basis for
the Yocto Project's build system.
Within the Yocto Project source repositories, poky exists as a separate Git repository
that can be cloned to yield a local copy on the host system.
Thus, "poky" can refer to the local copy of the source directory used to develop within
the Yocto Project.</para></listitem>
<listitem><para><emphasis>Recipe:</emphasis> A set of instructions for building packages.
A recipe describes where you get source code and which patches to apply.
Recipes describe dependencies for libraries or for other recipes, and they
also contain configuration and compilation options.
Recipes contain the logical unit of execution, the software/images to build, and
use the <filename>.bb</filename> file extension.</para></listitem>
<listitem>
<para id='source-directory'><emphasis>Source Directory:</emphasis>
This term refers to the directory structure created as a result of either downloading
and unpacking a Yocto Project release tarball or creating a local copy of
<filename>poky</filename> Git repository <filename>git://git.yoctoproject.org/poky</filename>.
Sometimes you might here the term "poky directory" used to refer to this
directory structure.</para>
<para>The source directory contains BitBake, Documentation, metadata and
other files that all support the Yocto Project.
Consequently, you must have the source directory in place on your development
system in order to do any development using the Yocto Project.</para>
<para>For tarball expansion, the name of the top-level directory of the source directory
is derived from the Yocto Project release tarball.
For example, downloading and unpacking <filename>&YOCTO_POKY_TARBALL;</filename>
results in a source directory whose top-level folder is named
<filename>&YOCTO_POKY;</filename>.
If you create a local copy of the Git repository, then you can name the repository
anything you like.
Throughout much of the documentation, <filename>poky</filename> is used as the name of
the top-level folder of the local copy of the poky Git repository.
So, for example, cloning the <filename>poky</filename> Git repository results in a
local Git repository whose top-level folder is also named <filename>poky</filename>.</para>
<para>It is important to understand the differences between the source directory created
by unpacking a released tarball as compared to cloning
<filename>git://git.yoctoproject.org/poky</filename>.
When you unpack a tarball, you have an exact copy of the files based on the time of
release - a fixed release point.
Any changes you make to your local files in the source directory are on top of the release.
On the other hand, when you clone the <filename>poky</filename> Git repository, you have an
active development repository.
In this case, any local changes you make to the source directory can be later applied
to active development branches of the upstream <filename>poky</filename> Git
repository.</para>
<para>Finally, if you want to track a set of local changes while starting from the same point
as a release tarball, you can create a local Git branch that
reflects the exact copy of the files at the time of their release.
You do this using Git tags that are part of the repository.</para>
<para>For more information on concepts around Git repositories, branches, and tags,
see the
"<link linkend='repositories-tags-and-branches'>Repositories, Tags, and Branches</link>"
section.</para></listitem>
<listitem><para><emphasis>Tasks:</emphasis> Arbitrary groups of software Recipes.
You simply use Tasks to hold recipes that, when built, usually accomplish a single task.
For example, a task could contain the recipes for a companys proprietary or value-add software.
Or, the task could contain the recipes that enable graphics.
A task is really just another recipe.
Because task files are recipes, they end with the <filename>.bb</filename> filename
extension.</para></listitem>
<listitem><para><emphasis>Upstream:</emphasis> A reference to source code or repositories
that are not local to the development system but located in a master area that is controlled
by the maintainer of the source code.
For example, in order for a developer to work on a particular piece of code, they need to
first get a copy of it from an "upstream" source.</para></listitem>
</itemizedlist>
</para>
</section>
@@ -422,7 +402,7 @@
<filename>meta/files/common-licenses</filename>.
Once the build completes, the list of all licenses found and used during that build are
kept in the
<link linkend='build-directory'>build directory</link> at
<link linkend='yocto-project-build-directory'>Yocto Project Build Directory</link> at
<filename>tmp/deploy/images/licenses</filename>.
</para>
@@ -514,8 +494,7 @@
Git uses "branches" to organize different development efforts.
For example, the <filename>poky</filename> repository has
<filename>laverne</filename>, <filename>bernard</filename>,
<filename>edison</filename>, <filename>denzil</filename> and
<filename>master</filename> branches among
<filename>edison</filename>, and <filename>master</filename> branches among
others.
You can see all the branches by going to
<ulink url='&YOCTO_GIT_URL;/cgit.cgi/poky/'></ulink> and
@@ -546,12 +525,12 @@
$ cd ~
$ git clone git://git.yoctoproject.org/poky
$ cd poky
$ git checkout -b &DISTRO_NAME; origin/&DISTRO_NAME;
$ git checkout &DISTRO_NAME; -b &DISTRO_NAME;
</literallayout>
In this example, the name of the top-level directory of your local Yocto Project
Files Git repository is <filename>poky</filename>,
and the name of the local working area (or local branch) you have created and checked
out is <filename>&DISTRO_NAME;</filename>.
Files Git repository is <filename>poky</filename>.
And, the name of the local working area (or local branch) you have created and checked
out is named <filename>&DISTRO_NAME;</filename>.
The files in your repository now reflect the same files that are in the
<filename>&DISTRO_NAME;</filename> development branch of the Yocto Project's
<filename>poky</filename> repository.
@@ -598,14 +577,14 @@
$ cd ~
$ git clone git://git.yoctoproject.org/poky
$ cd poky
$ git checkout -b my-&DISTRO_NAME;-&POKYVERSION; &DISTRO_NAME;-&POKYVERSION;
$ git checkout &DISTRO_NAME;-&POKYVERSION; -b &DISTRO_NAME;-&POKYVERSION;
</literallayout>
In this example, the name of the top-level directory of your local Yocto Project
Files Git repository is <filename>poky</filename>.
And, the name of the local branch you have created and checked out is
<filename>my-&DISTRO_NAME;-&POKYVERSION;</filename>.
<filename>&DISTRO_NAME;-&POKYVERSION;</filename>.
The files in your repository now exactly match the Yocto Project &DISTRO;
Release tag (<filename>&DISTRO_NAME;-&POKYVERSION;</filename>).
Release tag (&DISTRO_NAME;).
It is important to understand that when you create and checkout a local
working branch based on a tag, your environment matches a specific point
in time and not a development branch.
@@ -874,53 +853,54 @@
<listitem><para>Submit the bug by clicking the "Submit Bug" button.</para></listitem>
</orderedlist>
</para>
<note>
Bugs in the Yocto Project Bugzilla follow naming convention:
<filename>[YOCTO #&lt;number&gt;]</filename>, where <filename>&lt;number&gt;</filename> is the
assigned defect ID used in Bugzilla.
So, for example, a valid way to refer to a defect would be <filename>[YOCTO #1011]</filename>.
This convention becomes important if you are submitting patches against the Yocto Project
code itself.
</note>
</section>
<section id='how-to-submit-a-change'>
<title>How to Submit a Change</title>
<para>
Contributions to the Yocto Project and OpenEmbedded are very welcome.
Because the system is extremely configurable and flexible, we recognize that developers
Contributions to the Yocto Project are very welcome.
Because the Yocto Project is extremely configurable and flexible, we recognize that developers
will want to extend, configure or optimize it for their specific uses.
You should send patches to the appropriate mailing list so that they
can be reviewed and merged by the appropriate maintainer.
For a list of the Yocto Project and related mailing lists, see the
You should send patches to the appropriate Yocto Project mailing list to get them
in front of the Yocto Project Maintainer.
For a list of the Yocto Project mailing lists, see the
"<ulink url='&YOCTO_DOCS_REF_URL;#resources-mailinglist'>Mailing lists</ulink>" section in
the Yocto Project Reference Manual.
The Yocto Project Reference Manual.
</para>
<para>
The following is some guidance on which mailing list to use for what type of change:
The following is some guidance on which mailing list to use for what type of defect:
<itemizedlist>
<listitem><para>For changes to the core metadata, send your patch to the
<ulink url='&OE_LISTS_URL;/listinfo/openembedded-core'>openembedded-core</ulink> mailing list.
For example, a change to anything under the <filename>meta</filename> or
<filename>scripts</filename> directories
should be sent to this mailing list.</para></listitem>
<listitem><para>For changes to BitBake (anything under the <filename>bitbake</filename>
directory), send your patch to the
<ulink url='&OE_LISTS_URL;/listinfo/bitbake-devel'>bitbake-devel</ulink> mailing list.</para></listitem>
<listitem><para>For changes to <filename>meta-yocto</filename>, send your patch to the
<ulink url='&YOCTO_LISTS_URL;/listinfo/poky'>poky</ulink> mailing list.</para></listitem>
<listitem><para>For changes to other layers hosted on yoctoproject.org (unless the
layer's documentation specifies otherwise), tools, and Yocto Project
documentation, use the
<ulink url='&YOCTO_LISTS_URL;/listinfo/yocto'>yocto</ulink> mailing list.</para></listitem>
<listitem><para>For additional recipes that do not fit into the core metadata,
you should determine which layer the recipe should go into and submit the
change in the manner recommended by the documentation (e.g. README) supplied
with the layer. If in doubt, please ask on the
<ulink url='&YOCTO_LISTS_URL;/listinfo/yocto'>yocto</ulink> or
<ulink url='&OE_LISTS_URL;/listinfo/openembedded-devel'>openembedded-devel</ulink>
mailing lists.</para></listitem>
<listitem><para>For defects against the Yocto Project build system Poky, send
your patch to the
<ulink url='&YOCTO_LISTS_URL;/listinfo/poky'></ulink> mailing list.
This mailing list corresponds to issues that are not specific to the Yocto Project but
are part of the OE-core.
For example, a defect against anything in the <filename>meta</filename> layer
or the BitBake Manual could be sent to this mailing list.</para></listitem>
<listitem><para>For defects against Yocto-specific layers, tools, and Yocto Project
documentation use the
<ulink url='&YOCTO_LISTS_URL;/listinfo/yocto'></ulink> mailing list.
This mailing list corresponds to Yocto-specific areas such as
<filename>meta-yocto</filename>, <filename>meta-intel</filename>,
<filename>linux-yocto</filename>, and <filename>documentation</filename>.</para></listitem>
</itemizedlist>
</para>
<para>
When you send a patch, be sure to include a "Signed-off-by:"
line in the same style as required by the Linux kernel.
Adding this line signifies that you, the submitter, have agreed to the Developer's Certificate of Origin 1.1
Adding this line signifies the developer has agreed to the Developer's Certificate of Origin 1.1
as follows:
<literallayout class='monospaced'>
Developer's Certificate of Origin 1.1
@@ -949,52 +929,52 @@
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
</literallayout>
A Poky contributions tree (<filename>poky-contrib</filename>,
<filename>git://git.yoctoproject.org/poky-contrib.git</filename>)
exists for contributors to stage contributions.
If people desire such access, please ask on the mailing list.
Usually, the Yocto Project team will grant access to anyone with a proven track
record of good patches.
</para>
<para>
In a collaborative environment, it is necessary to have some sort of standard
or method through which you submit changes.
Otherwise, things could get quite chaotic.
One general practice to follow is to make small, controlled changes.
Keeping changes small and isolated aids review, makes merging/rebasing easier
and keeps the change history clean when anyone needs to refer to it in future.
One general practice to follow is to make small, controlled changes to the
Yocto Project.
Keeping changes small and isolated lets you best keep pace with future Yocto Project changes.
</para>
<para>
When you make a commit, you must follow certain standards established by the
OpenEmbedded and Yocto Project development teams.
For each commit, you must provide a single-line summary of the change and you
should almost always provide a more detailed description of what you did (i.e.
the body of the commit message).
When you create a commit, you must follow certain standards established by the
Yocto Project development team.
For each commit, you must provide a single-line summary of the change and you
almost always provide a more detailed description of what you did (i.e. the body
of the commit).
The only exceptions for not providing a detailed description would be if your
change is a simple, self-explanatory change that needs no description.
Here are the guidelines for composing a commit message:
Here are the Yocto Project commit message guidelines:
<itemizedlist>
<listitem><para>Provide a single-line, short summary of the change.
This summary is typically viewable in the "shortlist" of changes.
This summary is typically viewable by source control systems.
Thus, providing something short and descriptive that gives the reader
a summary of the change is useful when viewing a list of many commits.
This should be prefixed by the recipe name (if changing a recipe), or
else the short form path to the file being changed.
</para></listitem>
<listitem><para>For the body of the commit message, provide detailed information
that describes what you changed, why you made the change, and the approach
you used. It may also be helpful if you mention how you tested the change.
you used.
Provide as much detail as you can in the body of the commit message.
</para></listitem>
<listitem><para>If the change addresses a specific bug or issue that is
associated with a bug-tracking ID, include a reference to that ID in
your detailed description.
For example, the Yocto Project uses a specific convention for bug
references - any commit that addresses a specific bug should include the
bug ID in the description (typically at the beginning) as follows:
associated with a bug-tracking ID, prefix your detailed description
with the bug or issue ID.
For example, the Yocto Project tracks bugs using a bug-naming convention.
Any commits that address a bug must start with the bug ID in the description
as follows:
<literallayout class='monospaced'>
[YOCTO #&lt;bug-id&gt;]
&lt;detailed description of change&gt;
YOCTO #&lt;bug-id&gt;: &lt;Detailed description of commit&gt;
</literallayout></para></listitem>
Where &lt;bug-id&gt; is replaced with the specific bug ID from the
Yocto Project Bugzilla instance.
</itemizedlist>
</para>
@@ -1005,22 +985,21 @@
</para>
<para>
Following are general instructions for both pushing changes upstream and for submitting
changes as patches.
Following are general instructions for both pushing changes upstream and for submitting changes as patches.
</para>
<section id='pushing-a-change-upstream'>
<title>Using Scripts to Push a Change Upstream and Request a Pull</title>
<title>Pushing a Change Upstream and Requesting a Pull</title>
<para>
The basic flow for pushing a change to an upstream "contrib" Git repository is as follows:
<itemizedlist>
<listitem><para>Make your changes in your local Git repository.</para></listitem>
<listitem><para>Stage your changes by using the <filename>git add</filename>
command on each file you changed.</para></listitem>
<listitem><para>Stage your commit (or change) by using the <filename>git add</filename>
command.</para></listitem>
<listitem><para>Commit the change by using the <filename>git commit</filename>
command and push it to the "contrib" repository.
Be sure to provide a commit message that follows the projects commit message standards
Be sure to provide a commit message that follows the projects commit standards
as described earlier.</para></listitem>
<listitem><para>Notify the maintainer that you have pushed a change by making a pull
request.
@@ -1030,16 +1009,11 @@
<filename>send-pull-request</filename>.
You can find these scripts in the <filename>scripts</filename> directory of the
Yocto Project file structure.</para>
<para>Using these scripts correctly formats the requests without introducing any
whitespace or HTML formatting.
The maintainer that receives your patches needs to be able to save and apply them
directly from your emails.
Using these scripts is the preferred method for sending patches.</para>
<para>For help on using these scripts, simply provide the
<filename>-h</filename> argument as follows:
<filename>--help</filename> argument as follows:
<literallayout class='monospaced'>
$ ~/poky/scripts/create-pull-request -h
$ ~/poky/scripts/send-pull-request -h
$ ~/poky/scripts/create-pull-request --help
$ ~/poky/scripts/send-pull-request --help
</literallayout></para></listitem>
</itemizedlist>
</para>
@@ -1051,32 +1025,16 @@
</section>
<section id='submitting-a-patch'>
<title>Using Email to Submit a Patch</title>
<title>Submitting a Patch Through Email</title>
<para>
You can submit patches without using the <filename>create-pull-request</filename> and
<filename>send-pull-request</filename> scripts described in the previous section.
Keep in mind, the preferred method is to use the scripts, however.
</para>
<para>
Depending on the components changed, you need to submit the email to a specific
mailing list.
For some guidance on which mailing list to use, see the list in the
"<link linkend='how-to-submit-a-change'>How to Submit a Change</link>" section
earlier in this manual.
For a description of the available mailing lists, see
"<ulink url='&YOCTO_DOCS_REF_URL;#resources-mailinglist'>Mailing Lists</ulink>"
section in the Yocto Project Reference Manual.
</para>
<para>
Here is the general procedure on how to submit a patch through email without using the
scripts:
If you have a just a few changes, you can commit them and then submit them as an
email to the maintainer.
Here is a general procedure:
<itemizedlist>
<listitem><para>Make your changes in your local Git repository.</para></listitem>
<listitem><para>Stage your changes by using the <filename>git add</filename>
command on each file you changed.</para></listitem>
<listitem><para>Stage your commit (or change) by using the <filename>git add</filename>
command.</para></listitem>
<listitem><para>Commit the change by using the
<filename>git commit --signoff</filename> command.
Using the <filename>--signoff</filename> option identifies you as the person
@@ -1109,10 +1067,7 @@
the series of patches.
For information on the <filename>git format-patch</filename> command,
see <filename>GIT_FORMAT_PATCH(1)</filename> displayed using the
<filename>man git-format-patch</filename> command.</para>
<note>If you are or will be a frequent contributor to the Yocto Project
or to OpenEmbedded, you might consider requesting a contrib area and the
necessary associated rights.</note></listitem>
<filename>man git-format-patch</filename> command.</para></listitem>
<listitem><para>Import the files into your mail client by using the
<filename>git send-email</filename> command.
<note>In order to use <filename>git send-email</filename>, you must have the

View File

@@ -9,8 +9,9 @@
<para>
This chapter introduces the Yocto Project and gives you an idea of what you need to get started.
You can find enough information to set up your development host and build or use images for
hardware supported by the Yocto Project by reading the
<ulink url='&YOCTO_DOCS_QS_URL;'>Yocto Project Quick Start</ulink>.
hardware supported by the Yocto Project by reading
<ulink url='&YOCTO_DOCS_QS_URL;'>
The Yocto Project Quick Start</ulink>.
</para>
<para>
@@ -23,16 +24,14 @@
<para>
The Yocto Project is an open-source collaboration project focused on embedded Linux development.
The project currently provides a build system, which is
referred to as the OpenEmbedded build system in the Yocto Project documentation.
The Yocto Project provides various ancillary tools suitable for the embedded developer
and also features the Sato reference User Interface, which is optimized for
The project currently provides a build system, which is sometimes referred to as "Poky",
and provides various ancillary tools suitable for the embedded developer.
The Yocto Project also features the Sato reference User Interface, which is optimized for
stylus driven, low-resolution screens.
</para>
<para>
You can use the OpenEmbedded build system, which uses
<ulink url='&BITBAKE_DOCS_URL;'>BitBake</ulink>, to develop complete Linux
You can use the Yocto Project, which uses the BitBake build tool, to develop complete Linux
images and associated user-space applications for architectures based on ARM, MIPS, PowerPC,
x86 and x86-64.
While the Yocto Project does not provide a strict testing framework,
@@ -53,50 +52,56 @@
<listitem><para><emphasis>Host System:</emphasis> You should have a reasonably current
Linux-based host system.
You will have the best results with a recent release of Fedora,
OpenSUSE, Ubuntu, or CentOS as these releases are frequently tested against the Yocto Project
OpenSUSE, or Ubuntu as these releases are frequently tested against the Yocto Project
and officially supported.
You should also have about 100 gigabytes of free disk space for building images.
</para></listitem>
<listitem><para><emphasis>Packages:</emphasis> The OpenEmbedded build system
requires certain packages exist on your development system (e.g. Python 2.6 or 2.7).
<listitem><para><emphasis>Packages:</emphasis> The Yocto Project requires certain packages
exist on your development system (e.g. Python 2.6 or 2.7).
See "<ulink url='&YOCTO_DOCS_QS_URL;#packages'>The Packages</ulink>"
section in the Yocto Project Quick Start for the exact package
section in the Yocto Project Quick start for the exact package
requirements and the installation commands to install them
for the supported distributions.</para></listitem>
<listitem id='local-yp-release'><para><emphasis>Yocto Project Release:</emphasis>
You need a release of the Yocto Project.
You set up a with local <link linkend='source-directory'>source directory</link>
one of two ways depending on whether you
are going to contribute back into the Yocto Project or not.
You can get set up with local
<link linkend='yocto-project-files'>Yocto Project Files</link> one of two ways
depending on whether you
are going to be contributing back into the Yocto Project source repository or not.
<note>
Regardless of the method you use, this manual refers to the resulting local
hierarchical set of files as the "source directory."
Regardless of the method you use, this manual refers to the resulting
hierarchical set of files as the "Yocto Project Files" or the "Yocto Project File
Structure."
</note>
<itemizedlist>
<listitem><para><emphasis>Tarball Extraction:</emphasis> If you are not going to contribute
back into the Yocto Project, you can simply download a Yocto Project release you want
back into the Yocto Project, you can simply download the Yocto Project release you want
from the websites <ulink url='&YOCTO_HOME_URL;/download'>download page</ulink>.
Once you have the tarball, just extract it into a directory of your choice.</para>
<para>For example, the following command extracts the Yocto Project &DISTRO;
release tarball
into the current working directory and sets up the local source directory
with a top-level folder named <filename>&YOCTO_POKY;</filename>:
into the current working directory and sets up the Yocto Project file structure
with a top-level directory named <filename>&YOCTO_POKY;</filename>:
<literallayout class='monospaced'>
$ tar xfj &YOCTO_POKY_TARBALL;
</literallayout></para>
<para>This method does not produce a local Git repository.
Instead, you simply end up with a snapshot of the release.</para></listitem>
<para>This method does not produce a Git repository.
Instead, you simply end up with a local snapshot of the
Yocto Project files that are based on the particular release in the
tarball.</para></listitem>
<listitem><para><emphasis>Git Repository Method:</emphasis> If you are going to be contributing
back into the Yocto Project or you simply want to keep up
with the latest developments, you should use Git commands to set up a local
Git repository of the upstream <filename>poky</filename> source repository.
Doing so creates a repository with a complete history of changes and allows
Git repository of the Yocto Project Files.
Doing so creates a Git repository with a complete history of changes and allows
you to easily submit your changes upstream to the project.
Because you cloned the repository, you have access to all the Yocto Project development
branches and tag names used in the upstream repository.</para>
<para>The following transcript shows how to clone the <filename>poky</filename>
<para>The following transcript shows how to clone the Yocto Project Files'
Git repository into the current working directory.
<note>You can view the Yocto Project Source Repositories at
<note>The name of the Yocto Project Files Git repository in the Yocto Project Files
Source Repositories is <filename>poky</filename>.
You can view the Yocto Project Source Repositories at
<ulink url='&YOCTO_GIT_URL;/cgit.cgi'></ulink></note>
The command creates the local repository in a directory named <filename>poky</filename>.
For information on Git used within the Yocto Project, see the
@@ -104,52 +109,52 @@
<literallayout class='monospaced'>
$ git clone git://git.yoctoproject.org/poky
Initialized empty Git repository in /home/scottrif/poky/.git/
remote: Counting objects: 141863, done.
remote: Compressing objects: 100% (38624/38624), done.
remote: Total 141863 (delta 99661), reused 141816 (delta 99614)
Receiving objects: 100% (141863/141863), 76.64 MiB | 126 KiB/s, done.
Resolving deltas: 100% (99661/99661), done.
remote: Counting objects: 116882, done.
remote: Compressing objects: 100% (35987/35987), done.
remote: Total 116882 (delta 80651), reused 113045 (delta 77578)
Receiving objects: 100% (116882/116882), 72.13 MiB | 2.68 MiB/s, done.
Resolving deltas: 100% (80651/80651), done.
</literallayout></para>
<para>For another example of how to set up your own local Git repositories, see this
<ulink url='&YOCTO_WIKI_URL;/wiki/Transcript:_from_git_checkout_to_meta-intel_BSP'>
wiki page</ulink>, which describes how to create both <filename>poky</filename>
and <filename>meta-intel</filename> Git repositories.</para></listitem>
</itemizedlist></para></listitem>
<listitem id='local-kernel-files'><para><emphasis>Yocto Project Kernel:</emphasis>
If you are going to be making modifications to a supported Yocto Project kernel, you
<listitem id='local-kernel-files'><para><emphasis>Linux Yocto Kernel:</emphasis>
If you are going to be making modifications to a supported Linux Yocto kernel, you
need to establish local copies of the source.
You can find Git repositories of supported Yocto Project Kernels organized under
"Yocto Project Linux Kernel" in the Yocto Project Source Repositories at
You can find Git repositories of supported Linux Yocto Kernels organized under
"Yocto Linux Kernel" in the Yocto Project Source Repositories at
<ulink url='&YOCTO_GIT_URL;/cgit.cgi'></ulink>.</para>
<para>This setup involves creating a bare clone of the Yocto Project kernel and then
<para>This setup involves creating a bare clone of the Linux Yocto kernel and then
copying that cloned repository.
You can create the bare clone and the copy of the bare clone anywhere you like.
For simplicity, it is recommended that you create these structures outside of the
source directory (usually <filename>poky</filename>).</para>
Yocto Project Files Git repository.</para>
<para>As an example, the following transcript shows how to create the bare clone
of the <filename>linux-yocto-3.2</filename> kernel and then create a copy of
of the <filename>linux-yocto-3.0-1.1.x</filename> kernel and then create a copy of
that clone.
<note>When you have a local Yocto Project kernel Git repository, you can
<note>When you have a local Linux Yocto kernel Git repository, you can
reference that repository rather than the upstream Git repository as
part of the <filename>clone</filename> command.
Doing so can speed up the process.</note></para>
<para>In the following example, the bare clone is named
<filename>linux-yocto-3.2.git</filename>, while the
copy is named <filename>my-linux-yocto-3.2-work</filename>:
<filename>linux-yocto-3.0-1.1.x.git</filename>, while the
copy is named <filename>my-linux-yocto-3.0-1.1.x-work</filename>:
<literallayout class='monospaced'>
$ git clone --bare git://git.yoctoproject.org/linux-yocto-3.2 linux-yocto-3.2.git
Initialized empty Git repository in /home/scottrif/linux-yocto-3.2.git/
remote: Counting objects: 2468027, done.
remote: Compressing objects: 100% (392255/392255), done.
remote: Total 2468027 (delta 2071693), reused 2448773 (delta 2052498)
Receiving objects: 100% (2468027/2468027), 530.46 MiB | 129 KiB/s, done.
Resolving deltas: 100% (2071693/2071693), done.
$ git clone --bare git://git.yoctoproject.org/linux-yocto-3.0-1.1.x linux-yocto-3.0-1.1.x.git
Initialized empty Git repository in /home/scottrif/linux-yocto-3.0-1.1.x.git/
remote: Counting objects: 2259181, done.
remote: Compressing objects: 100% (373259/373259), done.
remote: Total 2259181 (delta 1892638), reused 2231556 (delta 1865300)
Receiving objects: 100% (2259181/2259181), 482.44 MiB | 580 KiB/s, done.
Resolving deltas: 100% (1892638/1892638), done.
</literallayout></para>
<para>Now create a clone of the bare clone just created:
<literallayout class='monospaced'>
$ git clone linux-yocto-3.2.git my-linux-yocto-3.2-work
Initialized empty Git repository in /home/scottrif/my-linux-yocto-3.2-work/.git/
Checking out files: 100% (37619/37619), done.
$ git clone linux-yocto-3.0-1.1.x.git my-linux-yocto-3.0-1.1.x-work
Initialized empty Git repository in /home/scottrif/my-linux-yocto-3.0-1.1.x/.git/
Checking out files: 100% (36898/36898), done.
</literallayout></para></listitem>
<listitem id='poky-extras-repo'><para><emphasis>
The <filename>poky-extras</filename> Git Repository</emphasis>:
@@ -160,22 +165,24 @@
edit to point to your locally modified kernel source files and to build the kernel
image.
Pointing to these local files is much more efficient than requiring a download of the
kernel's source files from upstream each time you make changes to the kernel.</para>
source files from upstream each time you make changes to the kernel.</para>
<para>You can find the <filename>poky-extras</filename> Git Repository in the
"Yocto Metadata Layers" area of the Yocto Project Source Repositories at
<ulink url='&YOCTO_GIT_URL;/cgit.cgi'></ulink>.
It is good practice to create this Git repository inside the source directory.</para>
It is good practice to create this Git repository inside the Yocto Project
files Git repository.</para>
<para>Following is an example that creates the <filename>poky-extras</filename> Git
repository inside the source directory, which is named <filename>poky</filename>
in this case:
repository inside the Yocto Project files Git repository, which is named
<filename>poky</filename> in this case:
<literallayout class='monospaced'>
$ cd ~/poky
$ git clone git://git.yoctoproject.org/poky-extras poky-extras
Initialized empty Git repository in /home/scottrif/poky/poky-extras/.git/
remote: Counting objects: 618, done.
remote: Compressing objects: 100% (558/558), done.
remote: Total 618 (delta 192), reused 307 (delta 39)
Receiving objects: 100% (618/618), 526.26 KiB | 111 KiB/s, done.
Resolving deltas: 100% (192/192), done.
remote: Counting objects: 561, done.
remote: Compressing objects: 100% (501/501), done.
remote: Total 561 (delta 159), reused 306 (delta 39)
Receiving objects: 100% (561/561), 519.96 KiB | 479 KiB/s, done.
Resolving deltas: 100% (159/159), done.
</literallayout></para></listitem>
<listitem><para id='supported-board-support-packages-(bsps)'><emphasis>Supported Board
Support Packages (BSPs):</emphasis>
@@ -187,7 +194,7 @@
layer.
You can get set up for BSP development one of two ways: tarball extraction or
with a local Git repository.
It is a good idea to use the same method that you used to set up the source directory.
It is a good idea to use the same method used to set up the Yocto Project Files.
Regardless of the method you use, the Yocto Project uses the following BSP layer
naming scheme:
<literallayout class='monospaced'>
@@ -213,24 +220,25 @@
Again, this method just produces a snapshot of the BSP layer in the form
of a hierarchical directory structure.</para></listitem>
<listitem><para><emphasis>Git Repository Method:</emphasis> If you are working
with a local Git repository for your source directory, you should also use this method
with a Yocto Project Files Git repository, you should also use this method
to set up the <filename>meta-intel</filename> Git repository.
You can locate the <filename>meta-intel</filename> Git repository in the
"Yocto Metadata Layers" area of the Yocto Project Source Repositories at
<ulink url='&YOCTO_GIT_URL;/cgit.cgi'></ulink>.</para>
<para>Typically, you set up the <filename>meta-intel</filename> Git repository inside
the source directory.
the Yocto Project Files Git repository.
For example, the following transcript shows the steps to clone the
<filename>meta-intel</filename>
Git repository inside the local <filename>poky</filename> Git repository.
Git repository inside the <filename>poky</filename> Git repository.
<literallayout class='monospaced'>
$ cd poky
$ git clone git://git.yoctoproject.org/meta-intel.git
Initialized empty Git repository in /home/scottrif/poky/meta-intel/.git/
remote: Counting objects: 3380, done.
remote: Compressing objects: 100% (2750/2750), done.
remote: Total 3380 (delta 1689), reused 227 (delta 113)
Receiving objects: 100% (3380/3380), 1.77 MiB | 128 KiB/s, done.
Resolving deltas: 100% (1689/1689), done.
remote: Counting objects: 1325, done.
remote: Compressing objects: 100% (1078/1078), done.
remote: Total 1325 (delta 546), reused 85 (delta 27)
Receiving objects: 100% (1325/1325), 1.56 MiB | 330 KiB/s, done.
Resolving deltas: 100% (546/546), done.
</literallayout></para>
<para>The same
<ulink url='&YOCTO_WIKI_URL;/wiki/Transcript:_from_git_checkout_to_meta-intel_BSP'>
@@ -241,8 +249,9 @@
applications using the Eclipse Integrated Development Environment (IDE),
you will need this plug-in.
See the
"<link linkend='setting-up-the-eclipse-ide'>Setting up the Eclipse IDE</link>"
section for more information.</para></listitem>
"<ulink url='&YOCTO_DOCS_ADT_URL;#setting-up-the-eclipse-ide'>Setting up the Eclipse IDE</ulink>"
section in the Yocto Application Development Toolkit (ADT)
Users Guide for more information.</para></listitem>
</itemizedlist>
</para>
</section>
@@ -260,13 +269,13 @@
<para>
The build process is as follows:
<orderedlist>
<listitem><para>Make sure you have set up the source directory described in the
<listitem><para>Make sure you have the Yocto Project files as described in the
previous section.</para></listitem>
<listitem><para>Initialize the build environment by sourcing a build environment
script.</para></listitem>
<listitem><para>Optionally ensure the <filename>conf/local.conf</filename> configuration file,
<listitem><para>Optionally ensure the <filename>/conf/local.conf</filename> configuration file,
which is found in the
<link linkend='build-directory'>build directory</link>,
<link linkend='yocto-project-build-directory'>Yocto Project Build Directory</link>,
is set up how you want it.
This file defines many aspects of the build environment including
the target machine architecture through the
@@ -289,93 +298,20 @@
<title>Using Pre-Built Binaries and QEMU</title>
<para>
Another option you have to get started is to use pre-built binaries.
The Yocto Project provides many types of binaries with each release.
See the <ulink url='&YOCTO_DOCS_REF_URL;#ref-images'>Images</ulink>
chapter in the Yocto Project Reference Manual
for descriptions of the types of binaries that ship with a Yocto Project
release.
Another option you have to get started is to use pre-built binaries.
This scenario is ideal for developing software applications to run on your target hardware.
To do this, you need to install the stand-alone Yocto Project cross-toolchain tarball and
then download the pre-built kernel that you will boot in the QEMU emulator.
Next, you must download and extract the target root filesystem for your target
machines architecture.
Finally, you set up the environment to emulate the hardware and then start the QEMU emulator.
</para>
<para>
Using a pre-built binary is ideal for developing software applications to run on your
target hardware.
To do this, you need to be able to access the appropriate cross-toolchain tarball for
the architecture on which you are developing.
If you are using an SDK type image, the image ships with the complete toolchain native to
the architecture.
If you are not using an SDK type image, you need to separately download and
install the stand-alone Yocto Project cross-toolchain tarball.
</para>
<para>
Regardless of the type of image you are using, you need to download the pre-built kernel
that you will boot in the QEMU emulator and then download and extract the target root
filesystem for your target machines architecture.
You can get architecture-specific binaries and filesystem from
<ulink url='&YOCTO_MACHINES_DL_URL;'>machines</ulink>.
You can get stand-alone toolchains from
<ulink url='&YOCTO_TOOLCHAIN_DL_URL;'>toolchains</ulink>.
Once you have all your files, you set up the environment to emulate the hardware
by sourcing an environment setup script.
Finally, you start the QEMU emulator.
You can find details on all these steps in the
"<ulink url='&YOCTO_DOCS_QS_URL;#using-pre-built'>Using Pre-Built Binaries and QEMU</ulink>"
section of the Yocto Project Quick Start.
</para>
<para>
Using QEMU to emulate your hardware can result in speed issues
depending on the target and host architecture mix.
For example, using the <filename>qemux86</filename> image in the emulator
on an Intel-based 32-bit (x86) host machine is fast because the target and
host architectures match.
On the other hand, using the <filename>qemuarm</filename> image on the same Intel-based
host can be slower.
But, you still achieve faithful emulation of ARM-specific issues.
</para>
<para>
To speed things up, the QEMU images support using <filename>distcc</filename>
to call a cross-compiler outside the emulated system.
If you used <filename>runqemu</filename> to start QEMU, and the
<filename>distccd</filename> application is present on the host system, any
BitBake cross-compiling toolchain available from the build system is automatically
used from within QEMU simply by calling <filename>distcc</filename>.
You can accomplish this by defining the cross-compiler variable
(e.g. <filename>export CC="distcc"</filename>).
Alternatively, if you are using a suitable SDK image or the appropriate
stand-alone toolchain is present in <filename>/opt/poky</filename>,
the toolchain is also automatically used.
</para>
<note>
Several mechanisms exist that let you connect to the system running on the
QEMU emulator:
<itemizedlist>
<listitem><para>QEMU provides a framebuffer interface that makes standard
consoles available.</para></listitem>
<listitem><para>Generally, headless embedded devices have a serial port.
If so, you can configure the operating system of the running image
to use that port to run a console.
The connection uses standard IP networking.</para></listitem>
<listitem><para>SSH servers exist in some QEMU images.
The <filename>core-image-sato</filename> QEMU image has a Dropbear secure
shell (ssh) server that runs with the root password disabled.
The <filename>core-image-basic</filename> and <filename>core-image-lsb</filename> QEMU images
have OpenSSH instead of Dropbear.
Including these SSH servers allow you to use standard <filename>ssh</filename> and
<filename>scp</filename> commands.
The <filename>core-image-minimal</filename> QEMU image, however, contains no ssh
server.</para></listitem>
<listitem><para>You can use a provided, user-space NFS server to boot the QEMU session
using a local copy of the root filesystem on the host.
In order to make this connection, you must extract a root filesystem tarball by using the
<filename>runqemu-extract-sdk</filename> command.
After running the command, you must then point the <filename>runqemu</filename>
script to the extracted directory instead of a root filesystem image file.</para></listitem>
</itemizedlist>
</note>
</section>
</chapter>
<!--

View File

@@ -36,13 +36,8 @@
</revision>
<revision>
<revnumber>1.2</revnumber>
<date>April 2012</date>
<revremark>Released with the Yocto Project 1.2 Release.</revremark>
</revision>
<revision>
<revnumber>1.3</revnumber>
<date>Sometime in 2012</date>
<revremark>Released with the Yocto Project 1.3 Release.</revremark>
<date>TBD 2012</date>
<revremark>Work in progress for the Yocto Project 1.2 Release.</revremark>
</revision>
</revhistory>
@@ -61,8 +56,9 @@
<note>
Due to production processes, there could be differences between the Yocto Project
documentation bundled in the release tarball and the
<ulink url='&YOCTO_DOCS_DEV_URL;'>Yocto Project Development Manual</ulink> on
documentation bundled in the release tarball and
<ulink url='&YOCTO_DOCS_DEV_URL;'>
The Yocto Project Development Manual</ulink> on
the <ulink url='&YOCTO_HOME_URL;'>Yocto Project</ulink> website.
For the latest version of this manual, see the manual on the website.
</note>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

View File

@@ -110,7 +110,7 @@ h5 {
h6 {
margin: 1em 0em 0em 0em;
padding: 1em 0em 0em 0em;
font-size: 110%;
font-size: 80%;
font-weight: bold;
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 14 KiB

After

Width:  |  Height:  |  Size: 14 KiB

View File

@@ -9,10 +9,10 @@
<section id='concepts-org'>
<title>Introduction</title>
<para>
This chapter provides conceptual information about the kernel:
This chapter provides conceptual information about the Yocto Project kernel:
<itemizedlist>
<listitem><para>Kernel Goals</para></listitem>
<listitem><para>Kernel Development and Maintenance Overview</para></listitem>
<listitem><para>Yocto Project Kernel Development and Maintenance Overview</para></listitem>
<listitem><para>Kernel Architecture</para></listitem>
<listitem><para>Kernel Tools</para></listitem>
</itemizedlist>
@@ -24,9 +24,9 @@
<para>
The complexity of embedded kernel design has increased dramatically.
Whether it is managing multiple implementations of a particular feature or tuning and
optimizing board specific features, both flexibility and maintainability are key concerns.
The Linux kernels available through the Yocto Project are presented with the embedded
developer's needs in mind and have evolved to assist in these key concerns.
optimizing board specific features, flexibility and maintainability are key concerns.
The Yocto Project Linux kernel is presented with the embedded
developer's needs in mind and has evolved to assist in these key concerns.
For example, prior methods such as applying hundreds of patches to an extracted
tarball have been replaced with proven techniques that allow easy inspection,
bisection and analysis of changes.
@@ -34,7 +34,7 @@
collaboration with the thousands of upstream development projects.
</para>
<para>
With all these considerations in mind, the Yocto Project's kernel and development team
With all these considerations in mind, the Yocto Project kernel and development team
strives to attain these goals:
<itemizedlist>
<listitem><para>Allow the end user to leverage community best practices to seamlessly
@@ -45,7 +45,7 @@
management techniques.</para></listitem>
<listitem><para>Deliver the most up-to-date kernel possible while still ensuring that
the baseline kernel is the most stable official release.</para></listitem>
<listitem><para>Include major technological features as part of the Yocto Project's
<listitem><para>Include major technological features as part of Yocto Project's
upward revision strategy.</para></listitem>
<listitem><para>Present a kernel Git repository that, similar to the upstream
<filename>kernel.org</filename> tree,
@@ -63,12 +63,12 @@
<section id='kernel-big-picture'>
<title>Yocto Project Kernel Development and Maintenance Overview</title>
<para>
Kernels available through the Yocto Project, like other kernels, are based off the Linux
kernel releases from <ulink url='http://www.kernel.org'></ulink>.
The Yocto Project kernel, like other kernels, is based off the Linux kernel release
from <ulink url='http://www.kernel.org'></ulink>.
At the beginning of a major development cycle, the Yocto Project team
chooses its kernel based on factors such as release timing, the anticipated release
timing of final upstream <filename>kernel.org</filename> versions, and Yocto Project
feature requirements.
chooses its Yocto Project kernel
based on factors like release timing, the anticipated release timing of final
upstream <filename>kernel.org</filename> versions, and Yocto Project feature requirements.
Typically, the kernel chosen is in the
final stages of development by the community.
In other words, the kernel is in the release
@@ -80,21 +80,21 @@
<para>
This balance allows the team to deliver the most up-to-date kernel
as possible, while still ensuring that the team has a stable official release for
the baseline Linux kernel version.
the baseline kernel version.
</para>
<para>
The ultimate source for kernels available through the Yocto Project are released kernels
The ultimate source for the Yocto Project kernel is a released kernel
from <filename>kernel.org</filename>.
In addition to a foundational kernel from <filename>kernel.org</filename>, the
kernels available contain a mix of important new mainline
In addition to a foundational kernel from <filename>kernel.org</filename>, the released
Yocto Project kernel contains a mix of important new mainline
developments, non-mainline developments (when there is no alternative),
Board Support Package (BSP) developments,
and custom features.
These additions result in a commercially released Yocto Project Linux kernel that caters
These additions result in a commercially released Yocto Project kernel that caters
to specific embedded designer needs for targeted hardware.
</para>
<para>
Once a kernel is officially released, the Yocto Project team goes into
Once a Yocto Project kernel is officially released, the Yocto Project team goes into
their next development cycle, or upward revision (uprev) cycle, while still
continuing maintenance on the released kernel.
It is important to note that the most sustainable and stable way
@@ -127,8 +127,8 @@
These policies result in both a stable and a cutting
edge kernel that mixes forward ports of existing features and significant and critical
new functionality.
Forward porting functionality in the kernels available through the Yocto Project kernel
can be thought of as a "micro uprev."
Forward porting functionality in the Yocto Project kernel can be thought of as a
"micro uprev."
The many “micro uprevs” produce a kernel version with a mix of
important new mainline, non-mainline, BSP developments and feature integrations.
This kernel gives insight into new features and allows focused
@@ -142,8 +142,7 @@
<section id='kernel-architecture'>
<title>Kernel Architecture</title>
<para>
This section describes the architecture of the kernels available through the
Yocto Project and provides information
This section describes the architecture of the Yocto Project kernel and provides information
on the mechanisms used to achieve that architecture.
</para>
@@ -157,7 +156,7 @@
upstream <filename>kernel.org</filename>.
</para>
<para>
You can think of a Yocto Project kernel as consisting of a baseline Linux kernel with
You can think of the Yocto Project kernel as consisting of a baseline kernel with
added features logically structured on top of the baseline.
The features are tagged and organized by way of a branching strategy implemented by the
source code manager (SCM) Git.
@@ -255,7 +254,7 @@
In other words, the divisions of the kernel are transparent and are not relevant
to the developer on a day-to-day basis.
From the developer's perspective, this path is the "master" branch.
The developer does not need to be aware of the existence of any other branches at all.
The developer does not need not be aware of the existence of any other branches at all.
Of course, there is value in the existence of these branches
in the tree, should a person decide to explore them.
For example, a comparison between two BSPs at either the commit level or at the line-by-line
@@ -293,7 +292,7 @@
"<ulink url='&YOCTO_DOCS_DEV_URL;#git'>Git</ulink>"
section in the Yocto Project Development Manual.
These referenced sections overview Git and describe a minimal set of
commands that allows you to be functional using Git.
commands that allow you to be functional using Git.
<note>
You can use as much, or as little, of what Git has to offer to accomplish what
you need for your project.
@@ -306,9 +305,9 @@
<section id='kernel-configuration'>
<title>Kernel Configuration</title>
<para>
Kernel configuration, along with kernel features, defines how a kernel
image is built for the Yocto Project.
Through configuration settings, you can customize a Yocto Project kernel to be
Kernel configuration, along with kernel features, defines how a Linux Yocto
kernel image is built.
Through configuration settings, you can customize a Linux Yocto kernel to be
specific to particular hardware.
For example, you can specify sound support or networking support.
This section describes basic concepts behind Kernel configuration within the
@@ -317,9 +316,9 @@
</para>
<para>
Conceptually, configuration of a Yocto Project kernel occurs similarly to that needed for any
Conceptually, Linux Yocto kernel configuration occurs similarly to that needed for any
Linux kernel.
The build process for a Yocto Project kernel uses a <filename>.config</filename> file, which
The Linux Yocto kernel build process uses a <filename>.config</filename> file, which
is created through the Linux Kernel Coinfiguration (LKC) tool.
You can directly set various configurations in the
<filename>.config</filename> file by using the <filename>menuconfig</filename>
@@ -345,7 +344,7 @@
You can see how <filename>menuconfig</filename> is used to change a simple
kernel configuration in the
"<ulink url='&YOCTO_DOCS_DEV_URL;#changing-the-config-smp-configuration-using-menuconfig'>Changing the&nbsp;&nbsp;<filename>CONFIG_SMP</filename> Configuration Using&nbsp;&nbsp;<filename>menuconfig</filename></ulink>"
section of the Yocto Project Development Manual.
section of The Yocto Project Development Manual.
For general information on <filename>menuconfig</filename>, see
<ulink url='http://en.wikipedia.org/wiki/Menuconfig'></ulink>.
</para></listitem>
@@ -353,7 +352,7 @@
list of kernel options just as they would appear syntactically in the
<filename>.config</filename> file.
Configuration fragments are typically logical groupings and are assembled
by the OpenEmbedded build system to produce input used by the LKC
by the Yocto Project build system to produce input used by the LKC
that ultimately generates the <filename>.config</filename> file.</para>
<para>The
<filename><ulink url='&YOCTO_DOCS_REF_URL;#var-KERNEL_FEATURES'>KERNEL_FEATURES</ulink></filename>
@@ -385,6 +384,20 @@
with the <filename>kernel.org</filename> history and development.</para></listitem>
</itemizedlist>
</para>
<!--<para>
WRITER NOTE: Put this in for post 1.1 if possible:
The tools that construct a kernel tree will be discussed later in this
document. The following tools form the foundation of the Yocto Project
kernel toolkit:
<itemizedlist>
<listitem><para>git : distributed revision control system created by Linus Torvalds</para></listitem>
<listitem><para>guilt: quilt on top of git</para></listitem>
<listitem><para>*cfg : kernel configuration management and classification</para></listitem>
<listitem><para>kgit*: Yocto Project kernel tree creation and management tools</para></listitem>
<listitem><para>scc : series &amp; configuration compiler</para></listitem>
</itemizedlist>
</para> -->
</section>
</chapter>
<!--

View File

@@ -6,29 +6,29 @@
<title>Yocto Project Kernel Architecture and Use Manual</title>
<section id='kernel-intro-section'>
<section id='book-intro'>
<title>Introduction</title>
<para>
The Yocto Project presents kernels as a fully patched, history-clean Git
repositories.
Each repository represents selected features, board support,
The Yocto Project presents the kernel as a fully patched, history-clean Git
repository.
The Git tree represents the selected features, board support,
and configurations extensively tested by the Yocto Project.
Yocto Project kernels allow the end user to leverage community
The Yocto Project kernel allows the end user to leverage community
best practices to seamlessly manage the development, build and debug cycles.
</para>
<para>
This manual describes Yocto Project kernels by providing information
on history, organization, benefits, and use.
This manual describes the Yocto Project kernel by providing information
on its history, organization, benefits, and use.
The manual consists of two sections:
<itemizedlist>
<listitem><para><emphasis>Concepts:</emphasis> Describes concepts behind a kernel.
You will understand how a kernel is organized and why it is organized in
the way it is. You will understand the benefits of a kernel's organization
<listitem><para><emphasis>Concepts:</emphasis> Describes concepts behind the kernel.
You will understand how the kernel is organized and why it is organized in
the way it is. You will understand the benefits of the kernel's organization
and the mechanisms used to work with the kernel and how to apply it in your
design process.</para></listitem>
<listitem><para><emphasis>Using a Kernel:</emphasis> Describes best practices
<listitem><para><emphasis>Using the Kernel:</emphasis> Describes best practices
and "how-to" information
that lets you put a kernel to practical use.
that lets you put the kernel to practical use.
Some examples are how to examine changes in a branch and how to
save kernel modifications.</para></listitem>
</itemizedlist>
@@ -40,14 +40,14 @@
<listitem><para>The Linux Foundation's guide for kernel development
process - <ulink url='http://ldn.linuxfoundation.org/book/1-a-guide-kernel-development-process'></ulink></para></listitem>
<!-- <listitem><para><ulink url='http://userweb.kernel.org/~akpm/stuff/tpp.txt'></ulink></para></listitem> -->
<listitem><para>A fairly encompassing guide on Linux kernel development -
<listitem><para>A fairly emcompassing guide on Linux kernel development -
<ulink url='http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=blob_plain;f=Documentation/HOWTO;hb=HEAD'></ulink></para></listitem>
</itemizedlist>
</para>
<para>
For more discussion on the Yocto Project kernel, you can see these sections
in the Yocto Project Development Manual:
in <ulink url='&YOCTO_DOCS_DEV_URL;'>The Yocto Project Development Manual</ulink>:
<itemizedlist>
<listitem><para>
"<ulink url='&YOCTO_DOCS_DEV_URL;#kernel-overview'>Kernel Overview</ulink>"</para></listitem>

View File

@@ -10,7 +10,7 @@
<section id='actions-org'>
<title>Introduction</title>
<para>
This chapter describes how to accomplish tasks involving a kernel's tree structure.
This chapter describes how to accomplish tasks involving the kernel's tree structure.
The information is designed to help the developer that wants to modify the Yocto
Project kernel and contribute changes upstream to the Yocto Project.
The information covers the following:
@@ -25,44 +25,42 @@
<section id='tree-construction'>
<title>Tree Construction</title>
<para>
This section describes construction of the Yocto Project kernel source repositories
This section describes construction of the Yocto Project kernel repositories
as accomplished by the Yocto Project team to create kernel repositories.
These kernel repositories are found under the heading "Yocto Linux Kernel" at
These kernel repositories are found at
<ulink url='&YOCTO_GIT_URL;/cgit.cgi'>&YOCTO_GIT_URL;/cgit.cgi</ulink>
and can be shipped as part of a Yocto Project release.
The team creates these repositories by
compiling and executing the set of feature descriptions for every BSP/feature
in the product.
Those feature descriptions list all necessary patches,
configuration, branching, tagging and feature divisions found in a kernel.
configuration, branching, tagging and feature divisions found in the kernel.
Thus, the Yocto Project kernel repository (or tree) is built.
</para>
<para>
The existence of this tree allows you to access and clone a particular
Yocto Project kernel repository and use it to build images based on their configurations
Linux Yocto kernel repository and use it to build images based on their configurations
and features.
</para>
<para>
You can find the files used to describe all the valid features and BSPs
in the Yocto Project kernel in any clone of the Yocto Project kernel source repository
Git tree.
in the Yocto Project kernel in any clone of the Linux Yocto kernel source repository Git tree.
For example, the following command clones the Yocto Project baseline kernel that
branched off of <filename>linux.org</filename> version 3.4:
branched off of <filename>linux.org</filename> version 3.0:
<literallayout class='monospaced'>
$ git clone git://git.yoctoproject.org/linux-yocto-3.4
$ git clone git://git.yoctoproject.org/linux-yocto-3.0
</literallayout>
For another example of how to set up a local Git repository of the Yocto Project
For another example of how to set up a local Git repository of the Linux Yocto
kernel files, see the
"<ulink url='&YOCTO_DOCS_DEV_URL;#local-kernel-files'>Yocto Project Kernel</ulink>" bulleted
item in the Yocto Project Development Manual.
"<ulink url='&YOCTO_DOCS_DEV_URL;#local-kernel-files'>Linux Yocto Kernel</ulink>" bulleted item in The Yocto Project Development Manual.
</para>
<para>
Once you have cloned the kernel Git repository on your local machine, you can
switch to the <filename>meta</filename> branch within the repository.
Here is an example that assumes the local Git repository for the kernel is in
a top-level directory named <filename>linux-yocto-3.4</filename>:
a top-level directory named <filename>linux-yocto-3.0</filename>:
<literallayout class='monospaced'>
$ cd ~/linux-yocto-3.4
$ cd ~/linux-yocto-3.0
$ git checkout -b meta origin/meta
</literallayout>
Once you have checked out and switched to the <filename>meta</filename> branch,
@@ -87,7 +85,7 @@
</para>
<para>
The following steps describe what happens when the Yocto Project Team constructs
the Yocto Project kernel source Git repository (or tree) found at
the Yocto Linux kernel source Git repository (or tree) found at
<ulink url='&YOCTO_GIT_URL;/cgit.cgi'></ulink> given the
introduction of a new top-level kernel feature or BSP.
These are the actions that effectively create the tree
@@ -114,9 +112,8 @@
of actions, or into an existing equivalent script that is already part of the
shipped kernel.</para></listitem>
<listitem><para>Extra features are appended to the top-level feature description.
These features can come from the
<ulink url='&YOCTO_DOCS_REF_URL;#var-KERNEL_FEATURES'><filename>KERNEL_FEATURES</filename></ulink>
variable in recipes.</para></listitem>
These features can come from the <filename>KERNEL_FEATURES</filename> variable in
recipes.</para></listitem>
<listitem><para>Each extra feature is located, compiled and appended to the script
as described in step three.</para></listitem>
<listitem><para>The script is executed to produce a series of <filename>meta-*</filename>
@@ -133,7 +130,7 @@
</para>
<para>
The kernel tree is now ready for developer consumption to be locally cloned,
configured, and built into a Yocto Project kernel specific to some target hardware.
configured, and built into a Linux Yocto kernel specific to some target hardware.
<note><para>The generated <filename>meta-*</filename> directories add to the kernel
as shipped with the Yocto Project release.
Any add-ons and configuration data are applied to the end of an existing branch.
@@ -152,7 +149,7 @@
<section id='build-strategy'>
<title>Build Strategy</title>
<para>
Once a local Git repository of the Yocto Project kernel exists on a development system,
Once a local Git repository of the Linux Yocto kernel exists on a development system,
you can consider the compilation phase of kernel development - building a kernel image.
Some prerequisites exist that are validated by the build process before compilation
starts:
@@ -169,7 +166,7 @@
</itemizedlist>
<para>
The OpenEmbedded build system makes sure these conditions exist before attempting compilation.
The Yocto Project makes sure these conditions exist before attempting compilation.
Other means, however, do exist, such as as bootstrapping a BSP, see
the "<link linkend='workflow-examples'>Workflow Examples</link>".
</para>
@@ -212,7 +209,7 @@
</para>
<para>
This behavior means that all the generated files for a particular machine or BSP are now in
What this means, is that all the generated files for a particular machine or BSP are now in
the build tree directory.
The files include the final <filename>.config</filename> file, all the <filename>.o</filename>
files, the <filename>.a</filename> files, and so forth.
@@ -225,7 +222,7 @@
<title>Workflow Examples</title>
<para>
As previously noted, the Yocto Project kernel has built-in Git integration.
As previously noted, the Yocto Project kernel has built in Git integration.
However, these utilities are not the only way to work with the kernel repository.
The Yocto Project has not made changes to Git or to other tools that
would invalidate alternate workflows.
@@ -241,7 +238,7 @@
<ulink url='http://git-scm.com/documentation'></ulink>.
You can find a simple overview of using Git with the Yocto Project in the
"<ulink url='&YOCTO_DOCS_DEV_URL;#git'>Git</ulink>"
section of the Yocto Project Development Manual.
section of The Yocto Project Development Manual.
</para>
<section id='change-inspection-kernel-changes-commits'>
@@ -311,35 +308,32 @@
<title>Show a Particular Feature or Branch Change</title>
<para>
Developers use tags in the Yocto Project kernel tree to divide changes for significant
features or branches.
Once you know a particular tag, you can use Git commands
to show changes associated with the tag and find the branches that contain
the feature.
Significant features or branches are tagged in the Yocto Project tree to divide
changes.
Remember to first determine (or add) the tag of interest.
<note>
Because BSP branch, <filename>kernel.org</filename>, and feature tags are all
present, there could be many tags.
</note>
The <filename>git show &lt;tag&gt;</filename> command shows changes that are tagged by
a feature.
Here is an example that shows changes tagged by the <filename>systemtap</filename>
feature:
<literallayout class='monospaced'>
$ git show systemtap
</literallayout>
You can use the <filename>git branch --contains &lt;tag&gt;</filename> command
to show the branches that contain a particular feature.
This command shows the branches that contain the <filename>systemtap</filename>
feature:
<literallayout class='monospaced'>
$ git branch --contains systemtap
# show the changes tagged by a feature
&gt; git show &lt;tag&gt;
&gt; eg: git show yaffs2
# determine which branches contain a feature
&gt; git branch --contains &lt;tag&gt;
# show the changes in a kernel type
&gt; git whatchanged yocto/base..&lt;kernel type&gt;
&gt; eg: git whatchanged yocto/base..yocto/standard/base
</literallayout>
</para>
<para>
You can use many other comparisons to isolate BSP and kernel changes.
You can use many other comparisons to isolate BSP changes.
For example, you can compare against <filename>kernel.org</filename> tags
such as the <filename>v3.4</filename> tag.
(e.g. v2.6.27.18, etc), or
you can compare against subsystems (e.g. <filename>git whatchanged mm</filename>).
</para>
</section>
</section>
@@ -369,10 +363,10 @@
The Yocto Project provides scripts that help you work in a collaborative development
environment.
For information on these scripts, see the
"<ulink url='&YOCTO_DOCS_DEV_URL;#pushing-a-change-upstream'>Using Scripts to Push a Change
Upstream and Request a Pull</ulink>" and
"<ulink url='&YOCTO_DOCS_DEV_URL;#submitting-a-patch'>Using Email to Submit a Patch</ulink>"
sections in the Yocto Project Development Manual.
"<ulink url='&YOCTO_DOCS_DEV_URL;#pushing-a-change-upstream'>Pushing a Change
Upstream and Requesting a Pull</ulink>" and
"<ulink url='&YOCTO_DOCS_DEV_URL;#submitting-a-patch'>Submitting a Patch Through
Email</ulink>" sections in The Yocto Project Development Manual.
</para>
<para>
@@ -420,10 +414,10 @@
# bulk export of ALL modifications without separation or division
# of the changes
$ git add .
$ git commit -s -a -m &lt;msg&gt;
&gt; git add .
&gt; git commit -s -a -m &gt;commit message&lt;
or
$ git commit -s -a # and interact with $EDITOR
&gt; git commit -s -a # and interact with $EDITOR
</literallayout>
</para>
@@ -460,15 +454,15 @@
<literallayout class='monospaced'>
# edit a file
$ vi &lt;path&gt;/file
&gt; vi &gt;path&lt;/file
# stage the change
$ git add &lt;path&gt;/file
&gt; git add &gt;path&lt;/file
# commit the change
$ git commit -s
&gt; git commit -s
# remove a file
$ git rm &lt;path&gt;/file
&gt; git rm &gt;path&lt;/file
# commit the change
$ git commit -s
&gt; git commit -s
... etc.
</literallayout>
@@ -495,25 +489,25 @@
associated with development by using the following commands:
<literallayout class='monospaced'>
$ Git add &lt;path&gt;/file
$ Git commit --amend
$ Git rebase or Git rebase -i
&gt; Git add &gt;path&lt;/file
&gt; Git commit --amend
&gt; Git rebase or Git rebase -i
</literallayout>
</para>
<para>
Again, assuming that the changes have not been pushed upstream, and that
no pending works-in-progress exist (use <filename>git status</filename> to check), then
no pending works-in-progress exists (use <filename>git status</filename> to check), then
you can revert (undo) commits by using the following commands:
<literallayout class='monospaced'>
# remove the commit, update working tree and remove all
# traces of the change
$ git reset --hard HEAD^
&gt; git reset --hard HEAD^
# remove the commit, but leave the files changed and staged for re-commit
$ git reset --soft HEAD^
&gt; git reset --soft HEAD^
# remove the commit, leave file change, but not staged for commit
$ git reset --mixed HEAD^
&gt; git reset --mixed HEAD^
</literallayout>
</para>
@@ -526,7 +520,7 @@
"permanent" and you should not modify them.
If the commits need to be changed, you can incrementally do so with new commits.
These practices follow standard Git workflow and the <filename>kernel.org</filename> best
practices, which is recommended.
practices, which Yocto Project recommends.
<note>
It is recommended to tag or branch before adding changes to a Yocto Project
BSP or before creating a new one.
@@ -541,7 +535,7 @@
<para>
This section describes how you can extract committed changes from a working directory
by exporting them as patches.
Once the changes have been extracted, you can use the patches for upstream submission,
Once extracted, you can use the patches for upstream submission,
place them in a Yocto Project template for automatic kernel patching,
or apply them in many other common uses.
</para>
@@ -561,7 +555,7 @@
# began. It can also be the parent branch if a branch was created
# before development began.
$ git format-patch -o &lt;dir&gt; &lt;first commit&gt;..&lt;last commit&gt;
&gt; git format-patch -o &lt;dir&gt; &lt;first commit&gt;..&lt;last commit&gt;
</literallayout>
</para>
@@ -571,14 +565,14 @@
# Identify commits of interest.
# If the tree was tagged before development
$ git format-patch -o &lt;save dir&gt; &lt;tag&gt;
&gt; git format-patch -o &lt;save dir&gt; &lt;tag&gt;
# If no tags are available
$ git format-patch -o &lt;save dir&gt; HEAD^ # last commit
$ git format-patch -o &lt;save dir&gt; HEAD^^ # last 2 commits
$ git whatchanged # identify last commit
$ git format-patch -o &lt;save dir&gt; &lt;commit id&gt;
$ git format-patch -o &lt;save dir&gt; &lt;rev-list&gt;
&gt; git format-patch -o &lt;save dir&gt; HEAD^ # last commit
&gt; git format-patch -o &lt;save dir&gt; HEAD^^ # last 2 commits
&gt; git whatchanged # identify last commit
&gt; git format-patch -o &lt;save dir&gt; &lt;commit id&gt;
&gt; git format-patch -o &lt;save dir&gt; &lt;rev-list&gt;
</literallayout>
</para>
</section>
@@ -589,14 +583,14 @@
<para>
This section describes how you can export changes from a working directory
by pushing the changes into a master repository or by making a pull request.
Once you have pushed the changes to the master repository, you can then
Once you have pushed the changes in the master repository, you can then
pull those same changes into a new kernel build at a later time.
</para>
<para>
Use this command form to push the changes:
<literallayout class='monospaced'>
$ git push ssh://&lt;master_server&gt;/&lt;path_to_repo&gt;
&gt; git push ssh://&lt;master_server&gt;/&lt;path_to_repo&gt;
&lt;local_branch&gt;:&lt;remote_branch&gt;
</literallayout>
</para>
@@ -604,15 +598,15 @@
<para>
For example, the following command pushes the changes from your local branch
<filename>yocto/standard/common-pc/base</filename> to the remote branch with the same name
in the master repository <filename>//git.mycompany.com/pub/git/kernel-3.4</filename>.
in the master repository <filename>//git.mycompany.com/pub/git/kernel-3.0</filename>.
<literallayout class='monospaced'>
$ git push ssh://git.mycompany.com/pub/git/kernel-3.4 \
&gt; git push ssh://git.mycompany.com/pub/git/kernel-3.0 \
yocto/standard/common-pc/base:yocto/standard/common-pc/base
</literallayout>
</para>
<para>
A pull request entails using the <filename>git request-pull</filename> command to compose
A pull request entails using <filename>git request-pull</filename> to compose
an email to the
maintainer requesting that a branch be pulled into the master repository, see
<ulink url='http://github.com/guides/pull-requests'></ulink> for an example.
@@ -651,7 +645,7 @@
Consequently, be sure that the headers for each commit have the required information.
For information on how to follow the Yocto Project commit message standards, see the
"<ulink url='&YOCTO_DOCS_DEV_URL;#how-to-submit-a-change'>How to Submit a
Change</ulink>" section in the Yocto Project Development Manual.
Change</ulink>" section in The Yocto Project Development Manual.
</para>
<para>
@@ -674,8 +668,8 @@
The following is an example of dumping patches for external submission:
<literallayout class='monospaced'>
# dump the last 4 commits
$ git format-patch --thread -n -o ~/rr/ HEAD^^^^
$ git send-email --compose --subject '[RFC 0/N] &lt;patch series summary&gt;' \
&gt; git format-patch --thread -n -o ~/rr/ HEAD^^^^
&gt; git send-email --compose --subject '[RFC 0/N] &lt;patch series summary&gt;' \
--to foo@yoctoproject.org --to bar@yoctoproject.org \
--cc list@yoctoproject.org ~/rr
# the editor is invoked for the 0/N patch, and when complete the entire
@@ -694,7 +688,7 @@
However, if the patches are manually applied to a secondary tree and then
that tree is checked into the SCM, you can lose change information such as
commit logs.
This process is not recommended.
The Yocto Project does not recommend this process.
</para>
<para>
@@ -711,14 +705,14 @@
<para>
This section describes kernel development in an SCM other than Git,
which is not the same as exporting changes to another SCM described earlier.
For this scenario, you use the OpenEmbedded build system to
For this scenario, you use the Yocto Project build system to
develop the kernel in a different SCM.
The following must be true for you to accomplish this:
<itemizedlist>
<listitem><para>The delivered Yocto Project kernel must be exported into the second
SCM.</para></listitem>
<listitem><para>Development must be exported from that secondary SCM into a
format that can be used by the OpenEmbedded build system.</para></listitem>
format that can be used by the Yocto Project build system.</para></listitem>
</itemizedlist>
</para>
@@ -742,9 +736,9 @@
import the <filename>yocto/standard/common-pc/base</filename>
kernel into a secondary SCM:
<literallayout class='monospaced'>
$ git checkout yocto/standard/common-pc/base
$ cd .. ; echo linux/.git &gt; .cvsignore
$ cvs import -m "initial import" linux MY_COMPANY start
&gt; git checkout yocto/standard/common-pc/base
&gt; cd .. ; echo linux/.git &gt; .cvsignore
&gt; cvs import -m "initial import" linux MY_COMPANY start
</literallayout>
</para>
@@ -756,11 +750,11 @@
The following commands illustrate how you can condense and merge two BSPs into a
second SCM:
<literallayout class='monospaced'>
$ git checkout yocto/standard/common-pc/base
$ git merge yocto/standard/common-pc-64/base
&gt; git checkout yocto/standard/common-pc/base
&gt; git merge yocto/standard/common-pc-64/base
# resolve any conflicts and commit them
$ cd .. ; echo linux/.git &gt; .cvsignore
$ cvs import -m "initial import" linux MY_COMPANY start
&gt; cd .. ; echo linux/.git &gt; .cvsignore
&gt; cvs import -m "initial import" linux MY_COMPANY start
</literallayout>
</para>
</section>
@@ -794,10 +788,9 @@
<para>
The basic steps you need to follow are:
<orderedlist>
<listitem><para><emphasis>Make sure you have set up a local source directory:</emphasis>
You must create a local <ulink url='&YOCTO_DOCS_DEV_URL;#source-directory'>source
directory</ulink> by either creating a Git repository (recommended) or
extracting a Yocto Project release tarball.</para></listitem>
<listitem><para><emphasis>Make sure you have the Yocto Project source tree available:</emphasis>
You should either create a Yocto Project Git repository (recommended), or
you should get the Yocto Project release tarball and extract it.</para></listitem>
<listitem><para><emphasis>Choose an existing BSP available with the Yocto Project:</emphasis>
Try to map your board features as closely to the features of a BSP that is
already supported and exists in the Yocto Project.
@@ -807,12 +800,12 @@
on the Yocto Project's Download page at
<ulink url='&YOCTO_HOME_URL;/download'></ulink>.</para></listitem>
<listitem><para><emphasis>Be sure you have the Base BSP:</emphasis>
You need to either have a local Git repository of the base BSP set up or
have downloaded and extracted the files from a release BSP tarball.
You need to either have the Yocto Project Git repository set up or download
the tarball of the base BSP.
Either method gives you access to the BSP source files.</para></listitem>
<listitem><para><emphasis>Make a copy of the existing BSP, thus isolating your new
BSP work:</emphasis>
Copying the existing BSP file structure gives you a new area in which to work.</para></listitem>
Copying the existing BSP structure gives you a new area in which to work.</para></listitem>
<listitem><para><emphasis>Make configuration and recipe changes to your new BSP:</emphasis>
Configuration changes involve the files in the BSP's <filename>conf</filename>
directory.
@@ -828,7 +821,7 @@
changes to the <filename>local.conf</filename> and <filename>bblayers.conf</filename>
files.</para></listitem>
<listitem><para><emphasis>Build the image:</emphasis>
The OpenEmbedded build system uses BitBake to create the image.
The Yocto Project uses the BitBake tool to create the image.
You need to decide on the type of image you are going to build (e.g. minimal, base,
core, sato, and so forth) and then start the build using the <filename>bitbake</filename>
command.</para></listitem>
@@ -844,7 +837,7 @@
string, this simply means that modifications in the source
directory have not been committed.
<literallayout class='monospaced'>
$ git status
&gt; git status
</literallayout>
</para>
@@ -858,8 +851,8 @@
<para>
To brute force pickup and commit all such pending changes, enter the following:
<literallayout class='monospaced'>
$ git add .
$ git commit -s -a -m "getting rid of -dirty"
&gt; git add .
&gt; git commit -s -a -m "getting rid of -dirty"
</literallayout>
</para>

View File

@@ -51,13 +51,8 @@
</revision>
<revision>
<revnumber>1.2</revnumber>
<date>April 2012</date>
<revremark>Released with the Yocto Project 1.2 Release.</revremark>
</revision>
<revision>
<revnumber>1.3</revnumber>
<date>Sometime in 2012</date>
<revremark>Released with the Yocto Project 1.3 Release.</revremark>
<date>TBD 2012</date>
<revremark>Work in progress for the Yocto Project 1.2 Release.</revremark>
</revision>
</revhistory>
@@ -73,8 +68,9 @@
</para>
<note>
Due to production processes, there could be differences between the Yocto Project
documentation bundled in the release tarball and the
<ulink url='&YOCTO_DOCS_KERNEL_URL;'>Yocto Project Kernel Architecture and Use Manual</ulink> on
documentation bundled in the release tarball and
<ulink url='&YOCTO_DOCS_KERNEL_URL;'>
The Yocto Project Kernel Architecture and Use Manual</ulink> on
the <ulink url='&YOCTO_HOME_URL;'>Yocto Project</ulink> website.
For the latest version of this manual, see the manual on the website.
</note>

View File

@@ -110,7 +110,7 @@ h5 {
h6 {
margin: 1em 0em 0em 0em;
padding: 1em 0em 0em 0em;
font-size: 110%;
font-size: 80%;
font-weight: bold;
}

Some files were not shown because too many files have changed in this diff Show More