Compare commits

..

1 Commits

Author SHA1 Message Date
Joshua Lock
a62982ffb6 ui/crumbs/tasklistmodel: fix saving recipes
After switching to dynamically finding the relative path for the recipe
file it's no longer to append .bb when inserting the require line into the
saved recipe.

Fixes [YOCTO #1247]

(Bitbake rev: 2d05ce4f527daa905ed64485029ebeb2b349daa6)

Signed-off-by: Joshua Lock <josh@linux.intel.com>
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
2011-07-21 15:54:49 -07:00
540 changed files with 7657 additions and 9771 deletions

12
.gitignore vendored
View File

@@ -1,11 +1,11 @@
*.pyc
*.pyo
build*/conf/local.conf
build*/conf/bblayers.conf
build*/downloads
build*/tmp/
build*/sstate-cache
build*/pyshtables.py
build/conf/local.conf
build/conf/bblayers.conf
build/downloads
build/tmp/
build/sstate-cache
build/pyshtables.py
pstage/
scripts/oe-git-proxy-socks
sources/

View File

@@ -239,57 +239,30 @@ software development of network attached storage (NAS) and digital media server
applications. The MPC8315E-RDB features the PowerQUICC II Pro processor, which
includes a built-in security accelerator.
(Note: you may find it easier to order MPC8315E-RDBA; this appears to be the
same board in an enclosure with accessories. In any case it is fully
compatible with the instructions given here.)
Setup instructions
------------------
You will need the following:
* NFS root setup on your workstation
* TFTP server installed on your workstation
* Null modem cable connected from your workstation to the first serial port
on the board
* Ethernet connected to the first ethernet port on the board
* nfs root setup on your workstation
* tftp server installed on your workstation
--- Preparation ---
Load the kernel and boot it as follows:
Note: if you have altered your board's ethernet MAC address(es) from the
defaults, or you need to do so because you want multiple boards on the same
network, then you will need to change the values in the dts file (patch
linux/arch/powerpc/boot/dts/mpc8315erdb.dts within the kernel source). If
you have left them at the factory default then you shouldn't need to do
anything here.
1. Get the kernel (uImage.mpc8315erdb) and dtb (mpc8315erdb.dtb) files from
the Poky build tmp/deploy directory, and make them available on your tftp
server.
--- Booting from NFS root ---
2. Set up the environment in U-Boot:
Load the kernel and dtb (device tree blob), and boot the system as follows:
=>setenv ipaddr <board ip>
=>setenv serverip <tftp server ip>
=>setenv bootargs root=/dev/nfs rw nfsroot=<nfsroot ip>:<rootfs path> ip=<board ip>:<server ip>:<gateway ip>:255.255.255.0:mpc8315e:eth0:off console=ttyS0,115200
1. Get the kernel (uImage-mpc8315e-rdb.bin) and dtb (uImage-mpc8315e-rdb.dtb)
files from the Poky build tmp/deploy directory, and make them available on
your TFTP server.
3. Download kernel and dtb to boot kernel.
2. Connect the board's first serial port to your workstation and then start up
your favourite serial terminal so that you will be able to interact with
the serial console. If you don't have a favourite, picocom is suggested:
$ picocom /dev/ttyUSB0 -b 115200
3. Power up or reset the board and press a key on the terminal when prompted
to get to the U-Boot command line
4. Set up the environment in U-Boot:
=> setenv ipaddr <board ip>
=> setenv serverip <tftp server ip>
=> setenv bootargs root=/dev/nfs rw nfsroot=<nfsroot ip>:<rootfs path> ip=<board ip>:<server ip>:<gateway ip>:255.255.255.0:mpc8315e:eth0:off console=ttyS0,115200
5. Download the kernel and dtb, and boot:
=> tftp 800000 uImage-mpc8315e-rdb.bin
=> tftp 780000 uImage-mpc8315e-rdb.dtb
=> bootm 800000 - 780000
=>tftp 800000 uImage.mpc8315erdb
=>tftp 780000 mpc8315erdb.dtb
=>bootm 800000 - 780000
Ubiquiti Networks RouterStation Pro (routerstationpro)

View File

@@ -40,7 +40,7 @@ from bb import cooker
from bb import ui
from bb import server
__version__ = "1.13.3"
__version__ = "1.13.2"
logger = logging.getLogger("BitBake")
@@ -102,7 +102,7 @@ It expects that BBFILES is defined, which is a space separated list of files to
be executed. BBFILES does support wildcards.
Default BBFILES are the .bb files in the current directory.""")
parser.add_option("-b", "--buildfile", help = "execute the task against this .bb file, rather than a package from BBFILES. Does not handle any dependencies.",
parser.add_option("-b", "--buildfile", help = "execute the task against this .bb file, rather than a package from BBFILES.",
action = "store", dest = "buildfile", default = None)
parser.add_option("-k", "--continue", help = "continue as much as possible after an error. While the target that failed, and those that depend on it, cannot be remade, the other dependencies of these targets can be processed all the same.",
@@ -172,8 +172,8 @@ Default BBFILES are the .bb files in the current directory.""")
ui_main = get_ui(configuration)
# Server type can be xmlrpc, process or none currently, if nothing is specified,
# the default server is process
# Server type could be xmlrpc or none currently, if nothing is specified,
# default server would be none
if configuration.servertype:
server_type = configuration.servertype
else:
@@ -184,7 +184,7 @@ Default BBFILES are the .bb files in the current directory.""")
server = getattr(module, server_type)
except AttributeError:
sys.exit("FATAL: Invalid server type '%s' specified.\n"
"Valid interfaces: xmlrpc, process [default], none." % servertype)
"Valid interfaces: xmlrpc, process, none [default]." % servertype)
# Save a logfile for cooker into the current working directory. When the
# server is daemonized this logfile will be truncated.
@@ -197,9 +197,6 @@ Default BBFILES are the .bb files in the current directory.""")
handler = bb.event.LogHandler()
logger.addHandler(handler)
# Before we start modifying the environment we should take a pristine
# copy for possible later use
initialenv = os.environ.copy()
# Clear away any spurious environment variables. But don't wipe the
# environment totally. This is necessary to ensure the correct operation
# of the UIs (e.g. for DISPLAY, etc.)
@@ -210,7 +207,7 @@ Default BBFILES are the .bb files in the current directory.""")
server.initServer()
idle = server.getServerIdleCB()
cooker = bb.cooker.BBCooker(configuration, idle, initialenv)
cooker = bb.cooker.BBCooker(configuration, idle)
cooker.parseCommandLine()
server.addcooker(cooker)

View File

@@ -8,7 +8,7 @@
import cmd
import logging
import os
import os.path
import sys
bindir = os.path.dirname(__file__)
@@ -41,15 +41,14 @@ def main(args):
class Commands(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
initialenv = os.environ.copy()
self.returncode = 0
self.config = Config(parse_only=True)
self.cooker = bb.cooker.BBCooker(self.config,
self.register_idle_function,
initialenv)
self.register_idle_function)
self.config_data = self.cooker.configuration.data
bb.providers.logger.setLevel(logging.ERROR)
self.cooker_data = None
self.prepare_cooker()
def register_idle_function(self, function, data):
pass
@@ -72,24 +71,10 @@ class Commands(cmd.Cmd):
self.cooker_data = self.cooker.status
self.cooker_data.appends = self.cooker.appendlist
def check_prepare_cooker(self):
if not self.cooker_data:
self.prepare_cooker()
def do_show_layers(self, args):
"""show_layers: shows current configured layers"""
self.check_prepare_cooker()
logger.info(str(self.config_data.getVar('BBLAYERS', True)))
def do_show_overlayed(self, args):
"""show_overlayed: list overlayed recipes (where there is a recipe in another
layer that has a higher layer priority)
syntax: show_overlayed
Highest priority recipes are listed with the recipes they overlay as subitems.
"""
self.check_prepare_cooker()
if self.cooker.overlayed:
logger.info('Overlayed recipes:')
for f in self.cooker.overlayed.iterkeys():
@@ -100,32 +85,15 @@ Highest priority recipes are listed with the recipes they overlay as subitems.
logger.info('No overlayed recipes found')
def do_flatten(self, args):
"""flatten: flattens layer configuration into a separate output directory.
syntax: flatten <outputdir>
Takes the current layer configuration and builds a "flattened" directory
containing the contents of all layers, with any overlayed recipes removed
and bbappends appended to the corresponding recipes. Note that some manual
cleanup may still be necessary afterwards, in particular:
* where non-recipe files (such as patches) are overwritten (the flatten
command will show a warning for these)
* where anything beyond the normal layer setup has been added to
layer.conf (only the lowest priority layer's layer.conf is used)
* overridden/appended items from bbappends will need to be tidied up
"""
arglist = args.split()
if len(arglist) != 1:
logger.error('Please specify an output directory')
self.do_help('flatten')
logger.error('syntax: flatten <outputdir>')
return
if os.path.exists(arglist[0]) and os.listdir(arglist[0]):
logger.error('Directory %s exists and is non-empty, please clear it out first' % arglist[0])
return
self.check_prepare_cooker()
layers = (self.config_data.getVar('BBLAYERS', True) or "").split()
for layer in layers:
overlayed = []
@@ -175,13 +143,6 @@ cleanup may still be necessary afterwards, in particular:
recipefile.writelines(appendfile.readlines())
def do_show_appends(self, args):
"""show_appends: List bbappend files and recipe files they apply to
syntax: show_appends
Recipes are listed with the bbappends that apply to them as subitems.
"""
self.check_prepare_cooker()
if not self.cooker_data.appends:
logger.info('No append files found')
return
@@ -241,6 +202,9 @@ Recipes are listed with the bbappends that apply to them as subitems.
notappended.append(basename)
return appended, notappended
def do_EOF(self, line):
return True
class Config(object):
def __init__(self, **options):

View File

@@ -21,7 +21,7 @@
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
__version__ = "1.13.3"
__version__ = "1.13.2"
import sys
if sys.version_info < (2, 6, 0):

View File

@@ -43,7 +43,7 @@ except ImportError:
logger.info("Importing cPickle failed. "
"Falling back to a very slow implementation.")
__cache_version__ = "142"
__cache_version__ = "141"
def getCacheFile(path, filename):
return os.path.join(path, filename)
@@ -99,7 +99,6 @@ class CoreRecipeInfo(RecipeInfoCommon):
self.skipreason = self.getvar('__SKIPPED', metadata)
if self.skipreason:
self.pn = self.getvar('PN', metadata) or bb.parse.BBHandler.vars_from_file(filename,metadata)[0]
self.skipped = True
self.provides = self.depvar('PROVIDES', metadata)
self.rprovides = self.depvar('RPROVIDES', metadata)
@@ -372,9 +371,8 @@ class Cache(object):
fn = virtualfn
cls = ""
if virtualfn.startswith('virtual:'):
elems = virtualfn.split(':')
cls = ":".join(elems[1:-1])
fn = elems[-1]
cls = virtualfn.split(':', 2)[1]
fn = virtualfn.replace('virtual:' + cls + ':', '')
return (fn, cls)
@staticmethod
@@ -543,7 +541,7 @@ class Cache(object):
# If any one of the variants is not present, mark as invalid for all
if invalid:
for cls in info_array[0].variants:
for cls in info.variants:
virtualfn = self.realfn2virtual(fn, cls)
if virtualfn in self.clean:
logger.debug(2, "Cache: Removing %s from cache", virtualfn)

View File

@@ -311,14 +311,6 @@ class CommandsAsync:
command.finishAsyncCommand()
parseFiles.needcache = True
def reparseFiles(self, command, params):
"""
Reparse .bb files
"""
command.cooker.reparseFiles()
command.finishAsyncCommand()
reparseFiles.needcache = True
def compareRevisions(self, command, params):
"""
Parse the .bb files

View File

@@ -59,13 +59,11 @@ class state:
class SkippedPackage:
def __init__(self, info = None, reason = None):
self.pn = None
self.skipreason = None
self.provides = None
self.rprovides = None
if info:
self.pn = info.pn
self.skipreason = info.skipreason
self.provides = info.provides
self.rprovides = info.rprovides
@@ -80,7 +78,7 @@ class BBCooker:
Manages one bitbake build run
"""
def __init__(self, configuration, server_registration_cb, savedenv={}):
def __init__(self, configuration, server_registration_cb):
self.status = None
self.appendlist = {}
self.skiplist = {}
@@ -89,14 +87,6 @@ class BBCooker:
self.configuration = configuration
# Keep a datastore of the initial environment variables and their
# values from when BitBake was launched to enable child processes
# to use environment variables which have been cleaned from the
# BitBake processes env
self.savedenv = bb.data.init()
for k in savedenv:
self.savedenv.setVar(k, savedenv[k])
self.caches_array = []
# Currently, only Image Creator hob ui needs extra cache.
# So, we save Extra Cache class name and container file
@@ -132,8 +122,21 @@ class BBCooker:
logger.critical("Unable to import extra RecipeInfo '%s' from '%s': %s" % (cache_name, module_name, exc))
sys.exit("FATAL: Failed to import extra cache class '%s'." % cache_name)
self.configuration.data = None
self.loadConfigurationData()
self.configuration.data = bb.data.init()
if not self.server_registration_cb:
bb.data.setVar("BB_WORKERCONTEXT", "1", self.configuration.data)
bb.data.inheritFromOS(self.configuration.data)
try:
self.parseConfigurationFiles(self.configuration.prefile,
self.configuration.postfile)
except SyntaxError:
sys.exit(1)
except Exception:
logger.exception("Error parsing configuration files")
sys.exit(1)
if not self.configuration.cmd:
self.configuration.cmd = bb.data.getVar("BB_DEFAULT_TASK", self.configuration.data, True) or "build"
@@ -163,26 +166,6 @@ class BBCooker:
self.parser = None
def loadConfigurationData(self):
self.configuration.data = bb.data.init()
if not self.server_registration_cb:
bb.data.setVar("BB_WORKERCONTEXT", "1", self.configuration.data)
bb.data.inheritFromOS(self.configuration.data, self.savedenv)
try:
self.parseConfigurationFiles(self.configuration.prefile,
self.configuration.postfile)
except SyntaxError:
sys.exit(1)
except Exception:
logger.exception("Error parsing configuration files")
sys.exit(1)
if not self.configuration.cmd:
self.configuration.cmd = bb.data.getVar("BB_DEFAULT_TASK", self.configuration.data, True) or "build"
def parseConfiguration(self):
@@ -319,6 +302,7 @@ class BBCooker:
"""
# Need files parsed
self.updateCache()
# If we are told to do the None task then query the default task
if (task == None):
task = self.configuration.cmd
@@ -330,7 +314,7 @@ class BBCooker:
bb.data.expandKeys(localdata)
# We set abort to False here to prevent unbuildable targets raising
# an exception when we're just generating data
taskdata = bb.taskdata.TaskData(False, skiplist=self.skiplist)
taskdata = bb.taskdata.TaskData(False)
runlist = []
for k in pkgs_to_build:
@@ -338,17 +322,19 @@ class BBCooker:
runlist.append([k, "do_%s" % task])
taskdata.add_unresolved(localdata, self.status)
return runlist, taskdata
def generateTaskDepTreeData(self, pkgs_to_build, task):
"""
Create a dependency graph of pkgs_to_build including reverse dependency
information.
"""
runlist, taskdata = self.prepareTreeData(pkgs_to_build, task)
rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist)
rq.rqdata.prepare()
return taskdata, rq
def generateDepTreeData(self, pkgs_to_build, task, more_meta=False):
"""
Create a dependency tree of pkgs_to_build, returning the data.
When more_meta is set to True include summary, license and group
information in the returned tree.
"""
taskdata, rq = self.prepareTreeData(pkgs_to_build, task)
seen_fnids = []
depend_tree = {}
depend_tree["depends"] = {}
@@ -365,10 +351,18 @@ class BBCooker:
fn = taskdata.fn_index[fnid]
pn = self.status.pkg_fn[fn]
version = "%s:%s-%s" % self.status.pkg_pepvpr[fn]
if more_meta:
summary = self.status.summary[fn]
lic = self.status.license[fn]
section = self.status.section[fn]
if pn not in depend_tree["pn"]:
depend_tree["pn"][pn] = {}
depend_tree["pn"][pn]["filename"] = fn
depend_tree["pn"][pn]["version"] = version
if more_meta:
depend_tree["pn"][pn]["summary"] = summary
depend_tree["pn"][pn]["license"] = lic
depend_tree["pn"][pn]["section"] = section
for dep in rq.rqdata.runq_depends[task]:
depfn = taskdata.fn_index[rq.rqdata.runq_fnid[dep]]
deppn = self.status.pkg_fn[depfn]
@@ -412,74 +406,13 @@ class BBCooker:
return depend_tree
def generatePkgDepTreeData(self, pkgs_to_build, task):
"""
Create a dependency tree of pkgs_to_build, returning the data.
"""
_, taskdata = self.prepareTreeData(pkgs_to_build, task)
tasks_fnid = []
if len(taskdata.tasks_name) != 0:
for task in xrange(len(taskdata.tasks_name)):
tasks_fnid.append(taskdata.tasks_fnid[task])
seen_fnids = []
depend_tree = {}
depend_tree["depends"] = {}
depend_tree["pn"] = {}
depend_tree["rdepends-pn"] = {}
depend_tree["packages"] = {}
depend_tree["rdepends-pkg"] = {}
for task in xrange(len(tasks_fnid)):
fnid = tasks_fnid[task]
fn = taskdata.fn_index[fnid]
pn = self.status.pkg_fn[fn]
version = "%s:%s-%s" % self.status.pkg_pepvpr[fn]
summary = self.status.summary[fn]
lic = self.status.license[fn]
section = self.status.section[fn]
if pn not in depend_tree["pn"]:
depend_tree["pn"][pn] = {}
depend_tree["pn"][pn]["filename"] = fn
depend_tree["pn"][pn]["version"] = version
depend_tree["pn"][pn]["summary"] = summary
depend_tree["pn"][pn]["license"] = lic
depend_tree["pn"][pn]["section"] = section
if fnid not in seen_fnids:
seen_fnids.append(fnid)
packages = []
depend_tree["depends"][pn] = []
for dep in taskdata.depids[fnid]:
depend_tree["depends"][pn].append(taskdata.build_names_index[dep])
depend_tree["rdepends-pn"][pn] = []
for rdep in taskdata.rdepids[fnid]:
depend_tree["rdepends-pn"][pn].append(taskdata.run_names_index[rdep])
rdepends = self.status.rundeps[fn]
for package in rdepends:
depend_tree["rdepends-pkg"][package] = []
for rdepend in rdepends[package]:
depend_tree["rdepends-pkg"][package].append(rdepend)
packages.append(package)
for package in packages:
if package not in depend_tree["packages"]:
depend_tree["packages"][package] = {}
depend_tree["packages"][package]["pn"] = pn
depend_tree["packages"][package]["filename"] = fn
depend_tree["packages"][package]["version"] = version
return depend_tree
def generateDepTreeEvent(self, pkgs_to_build, task):
"""
Create a task dependency graph of pkgs_to_build.
Generate an event with the result
"""
depgraph = self.generateTaskDepTreeData(pkgs_to_build, task)
depgraph = self.generateDepTreeData(pkgs_to_build, task)
bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.configuration.data)
def generateDotGraphFiles(self, pkgs_to_build, task):
@@ -488,7 +421,7 @@ class BBCooker:
Save the result to a set of .dot files.
"""
depgraph = self.generateTaskDepTreeData(pkgs_to_build, task)
depgraph = self.generateDepTreeData(pkgs_to_build, task)
# Prints a flattened form of package-depends below where subpackages of a package are merged into the main pn
depends_file = file('pn-depends.dot', 'w' )
@@ -578,7 +511,7 @@ class BBCooker:
bb.data.expandKeys(localdata)
# Handle PREFERRED_PROVIDERS
for p in (bb.data.getVar('PREFERRED_PROVIDERS', localdata, True) or "").split():
for p in (bb.data.getVar('PREFERRED_PROVIDERS', localdata, 1) or "").split():
try:
(providee, provider) = p.split(':')
except:
@@ -695,7 +628,7 @@ class BBCooker:
pkgs = pkgs + extra_pkgs
# generate a dependency tree for all our packages
tree = self.generatePkgDepTreeData(pkgs, 'build')
tree = self.generateDepTreeData(pkgs, 'build', more_meta=True)
bb.event.fire(bb.event.TargetsTreeGenerated(tree), self.configuration.data)
def buildWorldTargetList(self):
@@ -926,10 +859,6 @@ class BBCooker:
Build the file matching regexp buildfile
"""
# Too many people use -b because they think it's how you normally
# specify a target to be built, so show a warning
bb.warn("Buildfile specified, dependencies will not be handled. If this is not what you want, do not use -b / --buildfile.")
# Parse the configuration here. We need to do it explicitly here since
# buildFile() doesn't use the cache
self.parseConfiguration()
@@ -1062,7 +991,7 @@ class BBCooker:
bb.data.update_data(localdata)
bb.data.expandKeys(localdata)
taskdata = bb.taskdata.TaskData(self.configuration.abort, skiplist=self.skiplist)
taskdata = bb.taskdata.TaskData(self.configuration.abort)
runlist = []
for k in targets:
@@ -1074,8 +1003,8 @@ class BBCooker:
self.server_registration_cb(buildTargetsIdle, rq)
def updateCache(self, force=False):
if self.state == state.running and not force:
def updateCache(self):
if self.state == state.running:
return
if self.state in (state.shutdown, state.stop):
@@ -1085,8 +1014,6 @@ class BBCooker:
if self.state != state.parsing:
self.parseConfiguration ()
if self.status:
del self.status
self.status = bb.cache.CacheData(self.caches_array)
ignore = bb.data.getVar("ASSUME_PROVIDED", self.configuration.data, 1) or ""
@@ -1160,7 +1087,7 @@ class BBCooker:
collectlog.debug(1, "collecting .bb files")
files = (data.getVar( "BBFILES", self.configuration.data, True) or "").split()
files = (data.getVar( "BBFILES", self.configuration.data, 1 ) or "").split()
data.setVar("BBFILES", " ".join(files), self.configuration.data)
# Sort files by priority
@@ -1217,8 +1144,7 @@ class BBCooker:
base = os.path.basename(f).replace('.bbappend', '.bb')
if not base in self.appendlist:
self.appendlist[base] = []
if f not in self.appendlist[base]:
self.appendlist[base].append(f)
self.appendlist[base].append(f)
# Find overlayed recipes
# bbfiles will be in priority order which makes this easy
@@ -1259,10 +1185,6 @@ class BBCooker:
def stop(self):
self.state = state.stop
def reparseFiles(self):
self.loadConfigurationData()
self.updateCache(force=True)
def server_main(cooker, func, *args):
cooker.pre_serve()

View File

@@ -159,12 +159,12 @@ def expandKeys(alterdata, readdata = None):
ekey = todolist[key]
renameVar(key, ekey, alterdata)
def inheritFromOS(d, savedenv):
"""Inherit variables from the initial environment."""
def inheritFromOS(d):
"""Inherit variables from the environment."""
exportlist = bb.utils.preserved_envvars_exported()
for s in savedenv.keys():
for s in os.environ.keys():
try:
setVar(s, getVar(s, savedenv, True), d)
setVar(s, os.environ[s], d)
if s in exportlist:
setVarFlag(s, "export", True, d)
except TypeError:

View File

@@ -102,7 +102,8 @@ def print_ui_queue():
console = logging.StreamHandler(sys.stdout)
console.setFormatter(BBLogFormatter("%(levelname)s: %(message)s"))
logger.handlers = [console]
for event in ui_queue:
while ui_queue:
event = ui_queue.pop()
if isinstance(event, logging.LogRecord):
logger.handle(event)
@@ -287,12 +288,11 @@ class BuildCompleted(BuildBase):
class NoProvider(Event):
"""No Provider for an Event"""
def __init__(self, item, runtime=False, dependees=None, reasons=[]):
def __init__(self, item, runtime=False, dependees=None):
Event.__init__(self)
self._item = item
self._runtime = runtime
self._dependees = dependees
self._reasons = reasons
def getItem(self):
return self._item

View File

@@ -427,19 +427,17 @@ def multi_finalize(fn, d):
extended = d.getVar("BBCLASSEXTEND", True) or ""
if extended:
# the following is to support bbextends with arguments, for e.g. multilib
# an example is as follows:
# the following is to support bbextends with argument, for e.g. multilib
# an example is as follow:
# BBCLASSEXTEND = "multilib:lib32"
# it will create foo-lib32, inheriting multilib.bbclass and set
# BBEXTENDCURR to "multilib" and BBEXTENDVARIANT to "lib32"
# CURRENTEXTEND to "lib32"
extendedmap = {}
variantmap = {}
for ext in extended.split():
eext = ext.split(':', 2)
eext = ext.split(':')
if len(eext) > 1:
extendedmap[ext] = eext[0]
variantmap[ext] = eext[1]
extendedmap[eext[1]] = eext[0]
else:
extendedmap[ext] = ext
@@ -447,7 +445,7 @@ def multi_finalize(fn, d):
def extendfunc(name, d):
if name != extendedmap[name]:
d.setVar("BBEXTENDCURR", extendedmap[name])
d.setVar("BBEXTENDVARIANT", variantmap[name])
d.setVar("BBEXTENDVARIANT", name)
else:
d.setVar("PN", "%s-%s" % (pn, name))
bb.parse.BBHandler.inherit([extendedmap[name]], d)

View File

@@ -96,7 +96,7 @@ def handle(fn, data, include):
s = s.rstrip()
if s[0] == '#': continue # skip comments
while s[-1] == '\\':
s2 = f.readline().strip()
s2 = f.readline()[:-1].strip()
lineno = lineno + 1
s = s[:-1] + s2
feeder(lineno, s, fn, statements)

View File

@@ -124,18 +124,6 @@ def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
itemstr = " (for item %s)" % item
if preferred_file is None:
logger.info("preferred version %s of %s not available%s", pv_str, pn, itemstr)
available_vers = []
for file_set in pkg_pn:
for f in file_set:
pe, pv, pr = dataCache.pkg_pepvpr[f]
ver_str = pv
if pe:
ver_str = "%s:%s" % (pe, ver_str)
if not ver_str in available_vers:
available_vers.append(ver_str)
if available_vers:
available_vers.sort()
logger.info("versions of %s available: %s", pn, ' '.join(available_vers))
else:
logger.debug(1, "selecting %s as PREFERRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr)

View File

@@ -41,7 +41,7 @@ class TaskData:
"""
BitBake Task Data implementation
"""
def __init__(self, abort = True, tryaltconfigs = False, skiplist = None):
def __init__(self, abort = True, tryaltconfigs = False):
self.build_names_index = []
self.run_names_index = []
self.fn_index = []
@@ -70,8 +70,6 @@ class TaskData:
self.abort = abort
self.tryaltconfigs = tryaltconfigs
self.skiplist = skiplist
def getbuild_id(self, name):
"""
Return an ID number for the build target name.
@@ -350,22 +348,6 @@ class TaskData:
dependees.append(self.fn_index[fnid])
return dependees
def get_reasons(self, item, runtime=False):
"""
Get the reason(s) for an item not being provided, if any
"""
reasons = []
if self.skiplist:
for fn in self.skiplist:
skipitem = self.skiplist[fn]
if skipitem.pn == item:
reasons.append("%s was skipped: %s" % (skipitem.pn, skipitem.skipreason))
elif runtime and item in skipitem.rprovides:
reasons.append("%s RPROVIDES %s but was skipped: %s" % (skipitem.pn, item, skipitem.skipreason))
elif not runtime and item in skipitem.provides:
reasons.append("%s PROVIDES %s but was skipped: %s" % (skipitem.pn, item, skipitem.skipreason))
return reasons
def add_provider(self, cfgData, dataCache, item):
try:
self.add_provider_internal(cfgData, dataCache, item)
@@ -387,7 +369,7 @@ class TaskData:
return
if not item in dataCache.providers:
bb.event.fire(bb.event.NoProvider(item, dependees=self.get_dependees_str(item), reasons=self.get_reasons(item)), cfgData)
bb.event.fire(bb.event.NoProvider(item, dependees=self.get_rdependees_str(item)), cfgData)
raise bb.providers.NoProvider(item)
if self.have_build_target(item):
@@ -399,7 +381,7 @@ class TaskData:
eligible = [p for p in eligible if not self.getfn_id(p) in self.failed_fnids]
if not eligible:
bb.event.fire(bb.event.NoProvider(item, dependees=self.get_dependees_str(item), reasons=["No eligible PROVIDERs exist for '%s'" % item]), cfgData)
bb.event.fire(bb.event.NoProvider(item, dependees=self.get_dependees_str(item)), cfgData)
raise bb.providers.NoProvider(item)
if len(eligible) > 1 and foundUnique == False:
@@ -436,14 +418,14 @@ class TaskData:
all_p = bb.providers.getRuntimeProviders(dataCache, item)
if not all_p:
bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees_str(item), reasons=self.get_reasons(item, True)), cfgData)
bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees_str(item)), cfgData)
raise bb.providers.NoRProvider(item)
eligible, numberPreferred = bb.providers.filterProvidersRunTime(all_p, item, cfgData, dataCache)
eligible = [p for p in eligible if not self.getfn_id(p) in self.failed_fnids]
if not eligible:
bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees_str(item), reasons=["No eligible RPROVIDERs exist for '%s'" % item]), cfgData)
bb.event.fire(bb.event.NoProvider(item, runtime=True, dependees=self.get_rdependees_str(item)), cfgData)
raise bb.providers.NoRProvider(item)
if len(eligible) > 1 and numberPreferred == 0:

View File

@@ -84,39 +84,18 @@ class Configurator(gobject.GObject):
pmake = getString('PARALLEL_MAKE')
if pmake and pmake != self.config.get('PARALLEL_MAKE', ''):
self.config['PARALLEL_MAKE'] = pmake
pclass = getString('PACKAGE_CLASSES')
if pclass and pclass != self.config.get('PACKAGE_CLASSES', ''):
self.config['PACKAGE_CLASSES'] = pclass
fstypes = getString('IMAGE_FSTYPES')
if fstypes and fstypes != self.config.get('IMAGE_FSTYPES', ''):
self.config['IMAGE_FSTYPES'] = fstypes
# Values which aren't always set in the conf must be explicitly
# loaded as empty values for save to work
incompat = getString('INCOMPATIBLE_LICENSE')
if incompat and incompat != self.config.get('INCOMPATIBLE_LICENSE', ''):
self.config['INCOMPATIBLE_LICENSE'] = incompat
else:
self.config['INCOMPATIBLE_LICENSE'] = ""
# Non-standard, namespaces, variables for GUI preferences
toolchain = getString('HOB_BUILD_TOOLCHAIN')
if toolchain and toolchain != self.config.get('HOB_BUILD_TOOLCHAIN', ''):
self.config['HOB_BUILD_TOOLCHAIN'] = toolchain
header = getString('HOB_BUILD_TOOLCHAIN_HEADERS')
if header and header != self.config.get('HOB_BUILD_TOOLCHAIN_HEADERS', ''):
self.config['HOB_BUILD_TOOLCHAIN_HEADERS'] = header
pclass = getString('PACKAGE_CLASSES')
if pclass and pclass != self.config.get('PACKAGE_CLASSES', ''):
self.config['PACKAGE_CLASSES'] = pclass
self.orig_config = copy.deepcopy(self.config)
def setLocalConfVar(self, var, val):
self.config[var] = val
def getLocalConfVar(self, var):
if var in self.config:
return self.config[var]
else:
return ""
self.config[var] = val
def _loadLayerConf(self, path):
self.bblayers = path
@@ -160,9 +139,7 @@ class Configurator(gobject.GObject):
name = self._getLayerName(layerpath)
if name not in self.enabled_layers:
self.addLayer(name, layerpath)
return name, layerpath
else:
return None, None
return name, layerpath
def addLayer(self, name, path):
self.enabled_layers[name] = path
@@ -251,7 +228,7 @@ class Configurator(gobject.GObject):
cnt = cnt + 1
if not replaced:
new_config_lines.append("%s = \"%s\"\n" % (var, changed_values[var]))
new_config_lines.append("%s = \"%s\"" % (var, changed_values[var]))
# Add the modified variables
config_lines.extend(new_config_lines)
@@ -263,24 +240,6 @@ class Configurator(gobject.GObject):
del self.orig_config
self.orig_config = copy.deepcopy(self.config)
def insertTempBBPath(self, bbpath, bbfiles):
# Create a backup of the local.conf
bkup = "%s~" % self.local
os.rename(self.local, bkup)
# read the original conf into a list
with open(bkup, 'r') as config:
config_lines = config.readlines()
if bbpath:
config_lines.append("BBPATH := \"${BBPATH}:%s\"\n" % bbpath)
if bbfiles:
config_lines.append("BBFILES := \"${BBFILES} %s\"\n" % bbfiles)
# Write the updated lines list object to the local.conf
with open(self.local, "w") as n:
n.write("".join(config_lines))
def writeLayerConf(self):
# If we've not added/removed new layers don't write
if not self._isLayerConfDirty():

View File

@@ -52,19 +52,20 @@ class HobHandler(gobject.GObject):
"error" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING,)),
"build-complete" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
()),
"reload-triggered" : (gobject.SIGNAL_RUN_LAST,
gobject.TYPE_NONE,
(gobject.TYPE_STRING,
gobject.TYPE_STRING)),
}
(CFG_PATH_LOCAL, CFG_PATH_HOB, CFG_PATH_LAYERS, CFG_FILES_DISTRO, CFG_FILES_MACH, CFG_FILES_SDK, FILES_MATCH_CLASS, GENERATE_TGTS, REPARSE_FILES, BUILD_IMAGE) = range(10)
def __init__(self, taskmodel, server):
gobject.GObject.__init__(self)
self.current_command = None
self.building = None
self.gplv3_excluded = False
self.build_toolchain = False
self.build_toolchain_headers = False
self.generating = False
@@ -73,55 +74,30 @@ class HobHandler(gobject.GObject):
self.model = taskmodel
self.server = server
self.image_output_types = self.server.runCommand(["getVariable", "IMAGE_FSTYPES"]).split(" ")
self.command_map = {
"findConfigFilePathLocal" : ("findConfigFilePath", ["hob.local.conf"], "findConfigFilePathHobLocal"),
"findConfigFilePathHobLocal" : ("findConfigFilePath", ["bblayers.conf"], "findConfigFilePathLayers"),
"findConfigFilePathLayers" : ("findConfigFiles", ["DISTRO"], "findConfigFilesDistro"),
"findConfigFilesDistro" : ("findConfigFiles", ["MACHINE"], "findConfigFilesMachine"),
"findConfigFilesMachine" : ("findConfigFiles", ["MACHINE-SDK"], "findConfigFilesSdkMachine"),
"findConfigFilesSdkMachine" : ("findFilesMatchingInDir", ["rootfs_", "classes"], "findFilesMatchingPackage"),
"findFilesMatchingPackage" : ("generateTargetsTree", ["classes/image.bbclass"], None),
"generateTargetsTree" : (None, [], None),
}
def run_next_command(self):
if self.current_command and not self.generating:
self.emit("generating-data")
self.generating = True
if self.current_command == self.CFG_PATH_LOCAL:
self.current_command = self.CFG_PATH_HOB
self.server.runCommand(["findConfigFilePath", "hob.local.conf"])
elif self.current_command == self.CFG_PATH_HOB:
self.current_command = self.CFG_PATH_LAYERS
self.server.runCommand(["findConfigFilePath", "bblayers.conf"])
elif self.current_command == self.CFG_PATH_LAYERS:
self.current_command = self.CFG_FILES_DISTRO
self.server.runCommand(["findConfigFiles", "DISTRO"])
elif self.current_command == self.CFG_FILES_DISTRO:
self.current_command = self.CFG_FILES_MACH
self.server.runCommand(["findConfigFiles", "MACHINE"])
elif self.current_command == self.CFG_FILES_MACH:
self.current_command = self.CFG_FILES_SDK
self.server.runCommand(["findConfigFiles", "MACHINE-SDK"])
elif self.current_command == self.CFG_FILES_SDK:
self.current_command = self.FILES_MATCH_CLASS
self.server.runCommand(["findFilesMatchingInDir", "rootfs_", "classes"])
elif self.current_command == self.FILES_MATCH_CLASS:
self.current_command = self.GENERATE_TGTS
self.server.runCommand(["generateTargetsTree", "classes/image.bbclass"])
elif self.current_command == self.GENERATE_TGTS:
if self.generating:
self.emit("data-generated")
self.generating = False
self.current_command = None
elif self.current_command == self.REPARSE_FILES:
if self.build_queue:
self.current_command = self.BUILD_IMAGE
else:
self.current_command = self.CFG_PATH_LAYERS
self.server.runCommand(["reparseFiles"])
elif self.current_command == self.BUILD_IMAGE:
self.building = "image"
if self.generating:
self.emit("data-generated")
self.generating = False
bbpath = self.server.runCommand(["getVariable", "BBPATH"])
bbfiles = self.server.runCommand(["getVariable", "BBFILES"])
self.server.runCommand(["buildTargets", self.build_queue, "build"])
self.build_queue = []
self.current_command = None
# FIXME: this is ugly and I *will* replace it
if self.current_command:
if not self.generating:
self.emit("generating-data")
self.generating = True
next_cmd = self.command_map[self.current_command]
command = next_cmd[0]
argument = next_cmd[1]
self.current_command = next_cmd[2]
args = [command]
args.extend(argument)
self.server.runCommand(args)
def handle_event(self, event, running_build, pbar):
if not event:
@@ -131,6 +107,8 @@ class HobHandler(gobject.GObject):
if self.building:
running_build.handle_event(event)
elif isinstance(event, bb.event.TargetsTreeGenerated):
self.emit("data-generated")
self.generating = False
if event._model:
self.model.populate(event._model)
elif isinstance(event, bb.event.ConfigFilesFound):
@@ -207,7 +185,8 @@ class HobHandler(gobject.GObject):
img = self.model.selected_image
selected_packages, _ = self.model.get_selected_packages()
self.emit("reload-triggered", img, " ".join(selected_packages))
self.current_command = self.REPARSE_FILES
self.server.runCommand(["reparseFiles"])
self.current_command = "findConfigFilePathLayers"
self.run_next_command()
def set_bbthreads(self, threads):
@@ -217,48 +196,27 @@ class HobHandler(gobject.GObject):
pmake = "-j %s" % threads
self.server.runCommand(["setVariable", "BB_NUMBER_THREADS", pmake])
def build_image(self, image, image_path, configurator):
def run_build(self, tgts):
self.building = "image"
targets = []
targets.append(image)
targets.append(tgts)
if self.build_toolchain and self.build_toolchain_headers:
targets.append("meta-toolchain-sdk")
targets = ["meta-toolchain-sdk"] + targets
elif self.build_toolchain:
targets.append("meta-toolchain")
self.build_queue = targets
bbpath_ok = False
bbpath = self.server.runCommand(["getVariable", "BBPATH"])
if image_path in bbpath.split(":"):
bbpath_ok = True
bbfiles_ok = False
bbfiles = self.server.runCommand(["getVariable", "BBFILES"]).split(" ")
for files in bbfiles:
import re
pattern = "%s/\*.bb" % image_path
if re.match(pattern, files):
bbfiles_ok = True
if not bbpath_ok:
nbbp = image_path
else:
nbbp = None
if not bbfiles_ok:
nbbf = "%s/*.bb" % image_path
else:
nbbf = None
if not bbfiles_ok or not bbpath_ok:
configurator.insertTempBBPath(nbbp, nbbf)
self.current_command = self.REPARSE_FILES
self.run_next_command()
targets = ["meta-toolchain"] + targets
self.server.runCommand(["buildTargets", targets, "build"])
def build_packages(self, pkgs):
self.building = "packages"
if 'meta-toolchain' in self.build_queue:
self.build_queue.remove('meta-toolchain')
pkgs.extend('meta-toolchain')
self.server.runCommand(["buildTargets", pkgs, "build"])
def build_file(self, image):
self.building = "image"
self.server.runCommand(["buildFile", image, "build"])
def cancel_build(self, force=False):
if force:
# Force the cooker to stop as quickly as possible
@@ -268,8 +226,13 @@ class HobHandler(gobject.GObject):
# leave the workdir in a usable state
self.server.runCommand(["stateShutdown"])
def set_incompatible_license(self, incompatible):
self.server.runCommand(["setVariable", "INCOMPATIBLE_LICENSE", incompatible])
def toggle_gplv3(self, excluded):
if self.gplv3_excluded != excluded:
self.gplv3_excluded = excluded
if excluded:
self.server.runCommand(["setVariable", "INCOMPATIBLE_LICENSE", "GPLv3"])
else:
self.server.runCommand(["setVariable", "INCOMPATIBLE_LICENSE", ""])
def toggle_toolchain(self, enabled):
if self.build_toolchain != enabled:
@@ -295,23 +258,8 @@ class HobHandler(gobject.GObject):
self.building = None
self.emit("build-complete")
def set_fstypes(self, fstypes):
self.server.runCommand(["setVariable", "IMAGE_FSTYPES", fstypes])
def add_image_output_type(self, output_type):
if output_type not in self.image_output_types:
self.image_output_types.append(output_type)
fstypes = " ".join(self.image_output_types)
self.set_fstypes(fstypes)
return fstypes
def remove_image_output_type(self, output_type):
if output_type in self.image_output_types:
ind = self.image_output_types.index(output_type)
self.image_output_types.pop(ind)
fstypes = " ".join(self.image_output_types)
self.set_fstypes(fstypes)
return fstypes
def set_image_output_type(self, output_type):
self.server.runCommand(["setVariable", "IMAGE_FSTYPES", output_type])
def get_image_deploy_dir(self):
return self.server.runCommand(["getVariable", "DEPLOY_DIR_IMAGE"])

View File

@@ -19,7 +19,6 @@
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import gtk
import glib
from bb.ui.crumbs.configurator import Configurator
class HobPrefs(gtk.Dialog):
@@ -30,15 +29,11 @@ class HobPrefs(gtk.Dialog):
if model:
model.clear()
def output_type_toggled_cb(self, check, handler):
ot = check.get_label()
enabled = check.get_active()
if enabled:
self.selected_image_types = handler.add_image_output_type(ot)
else:
self.selected_image_types = handler.remove_image_output_type(ot)
self.configurator.setLocalConfVar('IMAGE_FSTYPES', "%s" % self.selected_image_types)
def output_type_changed_cb(self, combo, handler):
ot = combo.get_active_text()
if ot != self.curr_output_type:
self.curr_output_type = ot
handler.set_image_output_type(ot)
def sdk_machine_combo_changed_cb(self, combo, handler):
sdk_mach = combo.get_active_text()
@@ -93,7 +88,6 @@ class HobPrefs(gtk.Dialog):
self.curr_package_format = package_format
self.configurator.setLocalConfVar('PACKAGE_CLASSES', 'package_%s' % package_format)
handler.set_package_format(package_format)
self.reload_required = True
def update_package_formats(self, handler, formats):
active = 0
@@ -113,20 +107,12 @@ class HobPrefs(gtk.Dialog):
def include_gplv3_cb(self, toggle):
excluded = toggle.get_active()
orig_incompatible = self.configurator.getLocalConfVar('INCOMPATIBLE_LICENSE')
new_incompatible = ""
self.handler.toggle_gplv3(excluded)
if excluded:
if not orig_incompatible:
new_incompatible = "GPLv3"
elif not orig_incompatible.find('GPLv3'):
new_incompatible = "%s GPLv3" % orig_incompatible
self.configurator.setLocalConfVar('INCOMPATIBLE_LICENSE', 'GPLv3')
else:
new_incompatible = orig_incompatible.replace('GPLv3', '')
if new_incompatible != orig_incompatible:
self.handler.set_incompatible_license(new_incompatible)
self.configurator.setLocalConfVar('INCOMPATIBLE_LICENSE', new_incompatible)
self.reload_required = True
self.configurator.setLocalConfVar('INCOMPATIBLE_LICENSE', '')
self.reload_required = True
def change_bb_threads_cb(self, spinner):
val = spinner.get_value_as_int()
@@ -140,19 +126,11 @@ class HobPrefs(gtk.Dialog):
def toggle_toolchain_cb(self, check):
enabled = check.get_active()
toolchain = '0'
if enabled:
toolchain = '1'
self.handler.toggle_toolchain(enabled)
self.configurator.setLocalConfVar('HOB_BUILD_TOOLCHAIN', toolchain)
def toggle_headers_cb(self, check):
enabled = check.get_active()
headers = '0'
if enabled:
headers = '1'
self.handler.toggle_toolchain_headers(enabled)
self.configurator.setLocalConfVar('HOB_BUILD_TOOLCHAIN_HEADERS', headers)
def set_parent_window(self, parent):
self.set_transient_for(parent)
@@ -165,8 +143,7 @@ class HobPrefs(gtk.Dialog):
glib.idle_add(self.handler.reload_data)
def __init__(self, configurator, handler, curr_sdk_mach, curr_distro, pclass,
cpu_cnt, pmake, bbthread, selected_image_types, all_image_types,
gplv3disabled, build_toolchain, build_toolchain_headers):
cpu_cnt, pmake, bbthread, image_types):
"""
"""
gtk.Dialog.__init__(self, "Preferences", None,
@@ -184,14 +161,10 @@ class HobPrefs(gtk.Dialog):
self.curr_sdk_mach = curr_sdk_mach
self.curr_distro = curr_distro
self.curr_package_format = pclass
self.curr_output_type = None
self.cpu_cnt = cpu_cnt
self.pmake = pmake
self.bbthread = bbthread
self.selected_image_types = selected_image_types.split(" ")
self.gplv3disabled = gplv3disabled
self.build_toolchain = build_toolchain
self.build_toolchain_headers = build_toolchain_headers
self.reload_required = False
self.distro_handler_id = None
self.sdk_machine_handler_id = None
@@ -226,7 +199,6 @@ class HobPrefs(gtk.Dialog):
check = gtk.CheckButton("Exclude GPLv3 packages")
check.set_tooltip_text("Check this box to prevent GPLv3 packages from being included in your image")
check.show()
check.set_active(self.gplv3disabled)
check.connect("toggled", self.include_gplv3_cb)
hbox.pack_start(check, expand=False, fill=False, padding=6)
hbox = gtk.HBox(False, 12)
@@ -237,29 +209,22 @@ class HobPrefs(gtk.Dialog):
label.show()
hbox.pack_start(label, expand=False, fill=False, padding=6)
self.package_combo = gtk.combo_box_new_text()
self.package_combo.set_tooltip_text("""The package format is that used in creation
of the root filesystem and also dictates the package manager used in your image""")
self.package_combo.set_tooltip_text("Select the package format you would like to use in your image")
self.package_combo.show()
hbox.pack_start(self.package_combo, expand=False, fill=False, padding=6)
if all_image_types:
# Image output type selector
label = gtk.Label("Image output types:")
label.show()
hbox.pack_start(label, expand=False, fill=False, padding=6)
chk_cnt = 3
for it in all_image_types.split(" "):
chk_cnt = chk_cnt + 1
if chk_cnt % 6 == 0:
hbox = gtk.HBox(False, 12)
hbox.show()
pbox.pack_start(hbox, expand=False, fill=False, padding=6)
chk = gtk.CheckButton(it)
if it in self.selected_image_types:
chk.set_active(True)
chk.set_tooltip_text("Build an %s image" % it)
chk.connect("toggled", self.output_type_toggled_cb, handler)
chk.show()
hbox.pack_start(chk, expand=False, fill=False, padding=3)
# Image output type selector
label = gtk.Label("Image output type:")
label.show()
hbox.pack_start(label, expand=False, fill=False, padding=6)
output_combo = gtk.combo_box_new_text()
if image_types:
for it in image_types.split(" "):
output_combo.append_text(it)
output_combo.connect("changed", self.output_type_changed_cb, handler)
else:
output_combo.set_sensitive(False)
output_combo.show()
hbox.pack_start(output_combo)
# BitBake
label = gtk.Label()
label.set_markup("<b>BitBake</b>")
@@ -277,12 +242,7 @@ class HobPrefs(gtk.Dialog):
pbox.pack_start(hbox, expand=False, fill=False, padding=6)
label = gtk.Label("BitBake threads:")
label.show()
# NOTE: may be a good idea in future to intelligently cap the maximum
# values but we need more data to make an educated decision, for now
# set a high maximum as a value for upper bounds is required by the
# gtk.Adjustment
spin_max = 30 # seems like a high enough arbitrary number
#spin_max = self.cpu_cnt * 3
spin_max = 9 #self.cpu_cnt * 3
hbox.pack_start(label, expand=False, fill=False, padding=6)
bbadj = gtk.Adjustment(value=self.bbthread, lower=1, upper=spin_max, step_incr=1)
bbspinner = gtk.SpinButton(adjustment=bbadj, climb_rate=1, digits=0)
@@ -314,7 +274,6 @@ class HobPrefs(gtk.Dialog):
pbox.pack_start(hbox, expand=False, fill=False, padding=6)
toolcheck = gtk.CheckButton("Build external development toolchain with image")
toolcheck.show()
toolcheck.set_active(self.build_toolchain)
toolcheck.connect("toggled", self.toggle_toolchain_cb)
hbox.pack_start(toolcheck, expand=False, fill=False, padding=6)
hbox = gtk.HBox(False, 12)
@@ -329,7 +288,6 @@ class HobPrefs(gtk.Dialog):
hbox.pack_start(self.sdk_machine_combo, expand=False, fill=False, padding=6)
headerscheck = gtk.CheckButton("Include development headers with toolchain")
headerscheck.show()
headerscheck.set_active(self.build_toolchain_headers)
headerscheck.connect("toggled", self.toggle_headers_cb)
hbox.pack_start(headerscheck, expand=False, fill=False, padding=6)
self.connect("response", self.prefs_response_cb)

View File

@@ -132,6 +132,5 @@ class LayerEditor(gtk.Dialog):
# FIXME: verify we've actually got a layer conf?
if path.endswith(".conf"):
name, layerpath = self.configurator.addLayerConf(path)
if name:
self.newly_added[name] = layerpath
self.layer_store.append([name, layerpath, True])
self.newly_added[name] = layerpath
self.layer_store.append([name, layerpath, True])

View File

@@ -60,19 +60,9 @@ require %s
IMAGE_INSTALL += "%s"
"""
meta_path = model.find_image_path(self.base_image)
empty_template = """
# Recipe generated by the HOB
inherit core-image
IMAGE_INSTALL = "%s"
"""
if self.base_image and not self.base_image == "empty":
meta_path = model.find_image_path(self.base_image)
recipe = template % (meta_path, self.userpkgs)
else:
recipe = empty_template % self.allpkgs
recipe = template % (meta_path, self.userpkgs)
if os.path.exists(writepath):
os.rename(writepath, "%s~" % writepath)
@@ -195,9 +185,6 @@ class TaskListModel(gtk.ListStore):
if model.get_value(it, self.COL_TYPE) != 'package':
return False
else:
name = model.get_value(it, self.COL_NAME)
if name.count('-native') or name.count('cross'):
return False
return True
"""
@@ -275,13 +262,13 @@ class TaskListModel(gtk.ListStore):
it = self.images.iter_next(it)
# Mark all of the additional packages for inclusion
packages = rep.userpkgs.split(" ")
packages = rep.packages.split(" ")
it = self.get_iter_first()
while it:
path = self.get_path(it)
name = self[path][self.COL_NAME]
if name in packages:
self.include_item(path, binb="User Selected")
self.include_item(path)
packages.remove(name)
it = self.iter_next(it)
@@ -306,15 +293,11 @@ class TaskListModel(gtk.ListStore):
self[path][self.COL_INC] = False
"""
Recursively called to mark the item at opath and any package which
depends on it for removal.
NOTE: This method dumbly removes user selected packages and since we don't
do significant reverse dependency tracking it's easier and simpler to save
the items marked as user selected and re-add them once the removal sweep is
complete.
recursively called to mark the item at opath and any package which
depends on it for removal
"""
def mark(self, opath):
usersel = {}
removals = []
it = self.get_iter_first()
name = self[opath][self.COL_NAME]
@@ -323,40 +306,20 @@ class TaskListModel(gtk.ListStore):
# Remove all dependent packages, update binb
while it:
path = self.get_path(it)
it = self.iter_next(it)
inc = self[path][self.COL_INC]
deps = self[path][self.COL_DEPS]
binb = self[path][self.COL_BINB]
itype = self[path][self.COL_TYPE]
iname = self[path][self.COL_NAME]
# We ignore anything that isn't a package
if not itype == "package":
continue
# If the user added this item and it's not the item we're removing
# we should keep it and its dependencies, the easiest way to do so
# is to save its name and re-mark it for inclusion once dependency
# processing is complete
if binb == "User Selected":
usersel[iname] = self[path][self.COL_IMG]
# FIXME: need to ensure partial name matching doesn't happen
if inc and deps.count(name):
# found a dependency, remove it
self.mark(path)
if inc and binb.count(name):
bib = self.find_alt_dependency(name)
self[path][self.COL_BINB] = bib
# Re-add any removed user selected items
for u in usersel:
npath = self.find_path_for_item(u)
self.include_item(item_path=npath,
binb="User Selected",
image_contents=usersel[u])
it = self.iter_next(it)
"""
Remove items from contents if the have an empty COL_BINB (brought in by)
caused by all packages they are a dependency of being removed.
@@ -388,21 +351,22 @@ class TaskListModel(gtk.ListStore):
"""
Find the name of an item in the image contents which depends on the item
name.
Returns either an item name (str) or None
at contents_path returns either an item name (str) or None
NOTE:
contents_path must be a path in the self.contents gtk.TreeModel
"""
def find_alt_dependency(self, name):
it = self.contents.get_iter_first()
it = self.get_iter_first()
while it:
# iterate all items in the contents model
path = self.contents.get_path(it)
deps = self.contents[path][self.COL_DEPS]
itname = self.contents[path][self.COL_NAME]
inc = self.contents[path][self.COL_INC]
# iterate all items in the model
path = self.get_path(it)
deps = self[path][self.COL_DEPS]
itname = self[path][self.COL_NAME]
inc = self[path][self.COL_INC]
if itname != name and inc and deps.count(name) > 0:
# if this item depends on the item, return this items name
return itname
it = self.contents.iter_next(it)
it = self.iter_next(it)
return ""
"""
@@ -513,23 +477,9 @@ class TaskListModel(gtk.ListStore):
it = self.contents.iter_next(it)
return userpkgs, allpkgs
def image_contents_removed(self):
it = self.get_iter_first()
while it:
sel = self.get_value(it, self.COL_INC)
img = self.get_value(it, self.COL_IMG)
if img and not sel:
return True
it = self.iter_next(it)
return False
def get_build_rep(self):
userpkgs, allpkgs = self.get_selected_packages()
# If base image contents have been removed start from an empty rootfs
if not self.selected_image or self.image_contents_removed():
image = "empty"
else:
image = self.selected_image
image = self.selected_image
return BuildRep(" ".join(userpkgs), " ".join(allpkgs), image)
@@ -538,18 +488,10 @@ class TaskListModel(gtk.ListStore):
it = self.contents.get_iter_first()
while it:
name = self.contents.get_value(it, self.COL_NAME)
itype = self.contents.get_value(it, self.COL_TYPE)
deps = self.contents.get_value(it, self.COL_DEPS)
if self.contents.get_value(it, self.COL_DEPS).count(pn) != 0:
revdeps.append(self.contents.get_value(it, self.COL_NAME))
it = self.contents.iter_next(it)
if not itype == 'package':
continue
if deps.count(pn) != 0:
revdeps.append(name)
if pn in revdeps:
revdeps.remove(pn)
return revdeps

View File

@@ -46,7 +46,6 @@ class MainWindow (gtk.Window):
self.files_to_clean = []
self.selected_image = None
self.selected_packages = None
self.stopping = False
self.model = taskmodel
self.model.connect("tasklist-populated", self.update_model)
@@ -57,7 +56,6 @@ class MainWindow (gtk.Window):
self.layers = layers
self.save_path = None
self.dirty = False
self.build_succeeded = False
self.connect("delete-event", self.destroy_window)
self.set_title("Image Creator")
@@ -66,9 +64,10 @@ class MainWindow (gtk.Window):
self.build = RunningBuild()
self.build.connect("build-failed", self.running_build_failed_cb)
self.build.connect("build-succeeded", self.running_build_succeeded_cb)
self.build.connect("build-complete", self.handler.build_complete_cb)
self.build.connect("build-started", self.build_started_cb)
self.build.connect("build-complete", self.build_complete_cb)
self.handler.connect("build-complete", self.build_complete_cb)
vbox = gtk.VBox(False, 0)
vbox.set_border_width(0)
@@ -115,17 +114,11 @@ class MainWindow (gtk.Window):
def scroll_tv_cb(self, model, path, it, view):
view.scroll_to_cell(path)
def running_build_succeeded_cb(self, running_build):
self.build_succeeded = True
def running_build_failed_cb(self, running_build):
self.build_succeeded = False
# FIXME: handle this
print("Build failed")
def image_changed_string_cb(self, model, new_image):
# disconnect the image combo's signal handler
if self.image_combo_id:
self.image_combo.disconnect(self.image_combo_id)
self.image_combo_id = None
cnt = 0
it = self.model.images.get_iter_first()
while it:
@@ -135,8 +128,6 @@ class MainWindow (gtk.Window):
break
it = self.model.images.iter_next(it)
cnt = cnt + 1
# Reconnect the signal handler
self.image_combo_id = self.image_combo.connect("changed", self.image_changed_cb)
def image_changed_cb(self, combo):
model = self.image_combo.get_model()
@@ -161,10 +152,6 @@ class MainWindow (gtk.Window):
def data_generated(self, handler):
self.generating = False
self.image_combo.set_model(self.model.images_model())
# Without this the image combo is incorrectly sized on first load of the GUI
self.image_combo.set_active(0)
self.image_combo.set_active(-1)
if not self.image_combo_id:
self.image_combo_id = self.image_combo.connect("changed", self.image_changed_cb)
self.enable_widgets()
@@ -295,7 +282,6 @@ class MainWindow (gtk.Window):
dialog.destroy()
if response == gtk.RESPONSE_OK:
self.reset_build()
self.search.set_text("")
return
def reset_build(self):
@@ -375,36 +361,26 @@ class MainWindow (gtk.Window):
self.dirty = False
def bake_clicked_cb(self, button):
build_image = True
rep = self.model.get_build_rep()
if not rep.base_image:
lbl = "<b>Build empty image or only packages?</b>\nA base image"
lbl = lbl + " has not been selected.\n\'Empty image' will build"
lbl = lbl + " an image with only the selected packages as its"
lbl = lbl + " contents.\n'Packages Only' will build only the"
lbl = lbl + " selected packages, no image will be created"
lbl = "<b>Build only packages?</b>\n\nAn image has not been selected, so only the selected packages will be built."
dialog = CrumbsDialog(self, lbl, gtk.STOCK_DIALOG_WARNING)
dialog.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
dialog.add_button("Empty Image", gtk.RESPONSE_OK)
dialog.add_button("Packages Only", gtk.RESPONSE_YES)
dialog.add_button("Build", gtk.RESPONSE_YES)
response = dialog.run()
dialog.destroy()
if response == gtk.RESPONSE_CANCEL:
return
elif response == gtk.RESPONSE_YES:
build_image = False
elif response == gtk.RESPONSE_OK:
rep.base_image = "empty"
if build_image:
import tempfile, datetime
image_name = "hob-%s-variant-%s" % (rep.base_image, datetime.date.today().isoformat())
image_file = "%s.bb" % (image_name)
image_dir = os.path.join(tempfile.gettempdir(), 'hob-images')
bb.utils.mkdirhier(image_dir)
recipepath = os.path.join(image_dir, image_file)
else:
# TODO: show a confirmation dialog ?
if not self.save_path:
import tempfile, datetime
image_name = "hob-%s-variant-%s.bb" % (rep.base_image, datetime.date.today().isoformat())
image_dir = os.path.join(tempfile.gettempdir(), 'hob-images')
bb.utils.mkdirhier(image_dir)
recipepath = os.path.join(image_dir, image_name)
else:
recipepath = self.save_path
rep.writeRecipe(recipepath, self.model)
# In the case where we saved the file for the purpose of building
@@ -413,10 +389,9 @@ class MainWindow (gtk.Window):
if not self.save_path:
self.files_to_clean.append(recipepath)
self.handler.build_image(image_name, image_dir, self.configurator)
else:
self.handler.build_packages(rep.allpkgs.split(" "))
self.handler.queue_image_recipe_path(recipepath)
self.handler.build_packages(rep.allpkgs.split(" "))
self.nb.set_current_page(1)
def back_button_clicked_cb(self, button):
@@ -427,20 +402,19 @@ class MainWindow (gtk.Window):
self.nb.set_current_page(0)
def build_complete_cb(self, running_build):
self.stopping = False
self.back.connect("clicked", self.back_button_clicked_cb)
self.back.set_sensitive(True)
self.cancel.set_sensitive(False)
for f in self.files_to_clean:
os.remove(f)
lbl = "<b>Build completed</b>\n\nClick 'Edit Image' to start another build or 'View Messages' to view the messages output during the build."
if self.handler.building == "image" and self.build_succeeded:
lbl = "<b>Build completed</b>\n\nClick 'Edit Image' to start another build or 'View Log' to view the build log."
if self.handler.building == "image":
deploy = self.handler.get_image_deploy_dir()
lbl = lbl + "\n<a href=\"file://%s\" title=\"%s\">Browse folder of built images</a>." % (deploy, deploy)
dialog = CrumbsDialog(self, lbl)
dialog.add_button("View Messages", gtk.RESPONSE_CANCEL)
dialog.add_button("View Log", gtk.RESPONSE_CANCEL)
dialog.add_button("Edit Image", gtk.RESPONSE_OK)
response = dialog.run()
dialog.destroy()
@@ -602,12 +576,12 @@ class MainWindow (gtk.Window):
hb = gtk.HBox(False, 0)
hb.show()
self.search = gtk.Entry()
self.search.set_icon_from_stock(gtk.ENTRY_ICON_SECONDARY, "gtk-clear")
self.search.connect("icon-release", self.search_entry_clear_cb)
self.search.show()
self.pkgsaz_tree.set_search_entry(self.search)
hb.pack_end(self.search, False, False, 0)
search = gtk.Entry()
search.set_icon_from_stock(gtk.ENTRY_ICON_SECONDARY, "gtk-clear")
search.connect("icon-release", self.search_entry_clear_cb)
search.show()
self.pkgsaz_tree.set_search_entry(search)
hb.pack_end(search, False, False, 0)
label = gtk.Label("Search packages:")
label.show()
hb.pack_end(label, False, False, 6)
@@ -628,7 +602,7 @@ class MainWindow (gtk.Window):
self.tasks_tree.set_search_column(0)
self.tasks_tree.get_selection().set_mode(gtk.SELECTION_SINGLE)
col = gtk.TreeViewColumn('Package Collection')
col = gtk.TreeViewColumn('Package')
col.set_min_width(430)
col1 = gtk.TreeViewColumn('Description')
col1.set_min_width(430)
@@ -676,32 +650,13 @@ class MainWindow (gtk.Window):
return vbox
def cancel_build(self, button):
if self.stopping:
lbl = "<b>Force Stop build?</b>\nYou've already selected Stop once,"
lbl = lbl + " would you like to 'Force Stop' the build?\n\n"
lbl = lbl + "This will stop the build as quickly as possible but may"
lbl = lbl + " well leave your build directory in an unusable state"
lbl = lbl + " that requires manual steps to fix.\n"
dialog = CrumbsDialog(self, lbl, gtk.STOCK_DIALOG_WARNING)
dialog.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
dialog.add_button("Force Stop", gtk.RESPONSE_YES)
else:
lbl = "<b>Stop build?</b>\n\nAre you sure you want to stop this"
lbl = lbl + " build?\n\n'Force Stop' will stop the build as quickly as"
lbl = lbl + " possible but may well leave your build directory in an"
lbl = lbl + " unusable state that requires manual steps to fix.\n\n"
lbl = lbl + "'Stop' will stop the build as soon as all in"
lbl = lbl + " progress build tasks are finished. However if a"
lbl = lbl + " lengthy compilation phase is in progress this may take"
lbl = lbl + " some time."
dialog = CrumbsDialog(self, lbl, gtk.STOCK_DIALOG_WARNING)
dialog.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
dialog.add_button("Stop", gtk.RESPONSE_OK)
dialog.add_button("Force Stop", gtk.RESPONSE_YES)
lbl = "<b>Stop build?</b>\n\nAre you sure you want to stop this build?"
dialog = CrumbsDialog(self, lbl, gtk.STOCK_DIALOG_WARNING)
dialog.add_button(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL)
dialog.add_button("Stop", gtk.RESPONSE_OK)
dialog.add_button("Force Stop", gtk.RESPONSE_YES)
response = dialog.run()
dialog.destroy()
if response != gtk.RESPONSE_CANCEL:
self.stopping = True
if response == gtk.RESPONSE_OK:
self.handler.cancel_build()
elif response == gtk.RESPONSE_YES:
@@ -921,10 +876,9 @@ def main (server, eventHandler):
# The PARALLEL_MAKE variable will be of the format: "-j 3" and we only
# want a number for the spinner, so strip everything from the variable
# up to and including the space
pmake = int(pmake.lstrip("-j "))
pmake = int(pmake[pmake.find(" ")+1:])
selected_image_types = server.runCommand(["getVariable", "IMAGE_FSTYPES"])
all_image_types = server.runCommand(["getVariable", "IMAGE_TYPES"])
image_types = server.runCommand(["getVariable", "IMAGE_TYPES"])
pclasses = server.runCommand(["getVariable", "PACKAGE_CLASSES"]).split(" ")
# NOTE: we're only supporting one value for PACKAGE_CLASSES being set
@@ -932,17 +886,8 @@ def main (server, eventHandler):
# PACKAGE_CLASSES and that's the package manager used for the rootfs
pkg, sep, pclass = pclasses[0].rpartition("_")
incompatible = server.runCommand(["getVariable", "INCOMPATIBLE_LICENSE"])
gplv3disabled = False
if incompatible and incompatible.lower().find("gplv3"):
gplv3disabled = True
build_toolchain = bool(server.runCommand(["getVariable", "HOB_BUILD_TOOLCHAIN"]))
build_headers = bool(server.runCommand(["getVariable", "HOB_BUILD_TOOLCHAIN_HEADERS"]))
prefs = HobPrefs(configurator, handler, sdk_mach, distro, pclass, cpu_cnt,
pmake, bbthread, selected_image_types, all_image_types,
gplv3disabled, build_toolchain, build_headers)
pmake, bbthread, image_types)
layers = LayerEditor(configurator, None)
window = MainWindow(taskmodel, handler, configurator, prefs, layers, mach)
prefs.set_parent_window(window)
@@ -961,7 +906,7 @@ def main (server, eventHandler):
try:
# kick the while thing off
handler.current_command = handler.CFG_PATH_LOCAL
handler.current_command = "findConfigFilePathLocal"
server.runCommand(["findConfigFilePath", "local.conf"])
except xmlrpclib.Fault:
print("XMLRPC Fault getting commandline:\n %s" % x)

View File

@@ -208,9 +208,6 @@ def main(server, eventHandler):
logger.error("Nothing %sPROVIDES '%s' (but %s %sDEPENDS on or otherwise requires it)", r, event._item, ", ".join(event._dependees), r)
else:
logger.error("Nothing %sPROVIDES '%s'", r, event._item)
if event._reasons:
for reason in event._reasons:
logger.error("%s", reason)
continue
if isinstance(event, bb.runqueue.runQueueTaskStarted):

View File

@@ -856,16 +856,3 @@ def to_boolean(string, default=None):
return False
else:
raise ValueError("Invalid value for to_boolean: %s" % string)
def contains(variable, checkvalues, truevalue, falsevalue, d):
val = d.getVar(variable, True)
if not val:
return falsevalue
val = set(val.split())
if isinstance(checkvalues, basestring):
checkvalues = set(checkvalues.split())
else:
checkvalues = set(checkvalues)
if checkvalues.issubset(val):
return truevalue
return falsevalue

View File

@@ -52,20 +52,6 @@ STYLESHEET = $(DOC)/*.css
endif
ifeq ($(DOC),dev-manual)
XSLTOPTS = --stringparam html.stylesheet style.css \
--stringparam chapter.autolabel 1 \
--stringparam section.autolabel 1 \
--stringparam section.label.includes.component.label 1 \
--xinclude
ALLPREQ = html pdf tarball
TARFILES = style.css dev-manual.html dev-manual.pdf figures/dev-title.png
MANUALS = $(DOC)/$(DOC).html $(DOC)/$(DOC).pdf
FIGURES = figures
STYLESHEET = $(DOC)/*.css
endif
ifeq ($(DOC),yocto-project-qs)
XSLTOPTS = --stringparam html.stylesheet style.css \
--xinclude

View File

@@ -18,7 +18,7 @@
<listitem><para><emphasis>Yocto Project Source Tree:</emphasis>
This term refers to the directory structure created as a result of downloading
and unpacking a Yocto Project release tarball.
The Yocto Project source tree contains BitBake, Documentation, Meta-data and
The Yocto Project source tree contains Bitbake, Documentation, Meta-data and
other files.
The name of the top-level directory of the Yocto Project source tree
is derived from the Yocto Project release tarball.
@@ -37,9 +37,9 @@
<literallayout class='monospaced'>
$ source poky-bernard-5.0.1/poky-init-build-env $HOME/mybuilds/YP-5.0.1
</literallayout>
If you don't specifically name the build directory then BitBake creates it
If you don't specifically name the build directory then Bitbake creates it
in the current directory and uses the name <filename>build</filename>.
Also, if you supply an existing directory then BitBake uses that
Also, if you supply an existing directory then Bitbake uses that
directory as the Yocto Project build directory and populates the build tree
beneath it.</para></listitem>
</itemizedlist>
@@ -90,19 +90,19 @@
The ADT Installer is contained in the ADT Installer tarball.
You can download the tarball into any directory from
<ulink url='http://autobuilder.yoctoproject.org/downloads/yocto-1.0/adt-installer/'></ulink>.
Or, you can use BitBake to generate the tarball inside the existing Yocto Project build tree.
Or, you can use Bitbake to generate the tarball inside the existing Yocto Project build tree.
</para>
<para>
If you use BitBake to generate the ADT Installer tarball, you must
If you use Bitbake to generate the ADT Installer tarball, you must
source the Yocto Project environment setup script located in the Yocto Project
source directory before running the BitBake command that creates the tarball.
source directory before running the Bitbake command that creates the tarball.
</para>
<para>
The following example commands download the Yocto Project release tarball, create the Yocto
Project source tree, set up the environment while also creating the Yocto Project build tree,
and finally run the BitBake command that results in the tarball
and finally run the Bitbake command that results in the tarball
<filename>~/yocto-project/build/tmp/deploy/sdk/adt_installer.tar.bz2</filename>:
<literallayout class='monospaced'>
$ cd ~
@@ -267,9 +267,9 @@
</section>
<section id='using-the-toolchain-from-within-the-build-tree'>
<title>Using BitBake and the Yocto Project Build Tree</title>
<title>Using Bitbake and the Yocto Project Build Tree</title>
<para>
A final way of installing just the cross-toolchain is to use BitBake within an existing
A final way of installing just the cross-toolchain is to use Bitbake within an existing
Yocto Project build tree.
Follow these steps:
<orderedlist>
@@ -291,10 +291,10 @@
<listitem><para>Run <filename>bitbake meta-ide-support</filename> to complete the
cross-toolchain installation.
<note>If you change your working directory after you source the environment
setup script and before you run the BitBake command the command will not work.
Be sure to run the BitBake command immediately after checking or editing the
setup script and before you run the Bitbake command the command will not work.
Be sure to run the Bitbake command immediately after checking or editing the
<filename>local.conf</filename> but without changing your working directory.</note>
Once BitBake finishes, the cross-toolchain is installed.
Once Bitbake finishes, the cross-toolchain is installed.
You will notice environment setup files for the cross-toolchain in the
Yocto Project build tree in the <filename>tmp</filename> directory.
Setup script filenames contain the strings <filename>environment-setup</filename>.
@@ -312,7 +312,7 @@
If you used the ADT Installer or used an existing ADT tarball to install the ADT,
then you can find this script in the <filename>/opt/poky/$SDKVERSION</filename>
directory.
If you used BitBake and the Yocto Project Build Tree to install the cross-toolchain
If you used Bitbake and the Yocto Project Build Tree to install the cross-toolchain
then you can find the environment setup scripts in in the Yocto Project build tree
in the <filename>tmp</filename> directory.
</para>

View File

@@ -27,20 +27,13 @@
of software support of hardware.
</para>
<note><para>
The information here does not provide an example of how to create a BSP.
For information on how to create a BSP, see the Yocto Project Development Manual or the
<ulink url='https://wiki.yoctoproject.org/wiki/Transcript:_creating_one_generic_Atom_BSP_from_another'></ulink>
wiki page.
</para></note>
<para>
The proposed format does have elements that are specific to the Yocto Project and
The proposed format does have elements that are specific to the Poky and
OpenEmbedded build systems.
It is intended that this information can be
used by other systems besides Yocto Project and OpenEmbedded and that it will be simple
used by other systems besides Poky and OpenEmbedded and that it will be simple
to extract information and convert it to other formats if required.
Yocto Project, through its standard layers mechanism, can directly accept the format
Poky, through its standard layers mechanism, can directly accept the format
described as a layer.
The BSP captures all
the hardware-specific details in one place in a standard format, which is
@@ -93,7 +86,7 @@
</literallayout>
For more detailed information on layers, see the
<ulink url='http://www.yoctoproject.org/docs/poky-ref-manual/poky-ref-manual.html#usingpoky-changes-layers'>
BitBake Layers</ulink> section of the Yocto Project Reference Manual.
BitBake Layers</ulink> section of the Poky Reference Manual.
</para>
<para>
@@ -211,8 +204,8 @@ meta-&lt;bsp_name&gt;/conf/layer.conf
</programlisting>
<para>
This file identifies the structure as a Yocto Project layer, identifies the
contents of the layer, and contains information about how Yocto Project should use it.
This file identifies the structure as a Poky layer, identifies the
contents of the layer, and contains information about how Poky should use it.
Generally, a standard boilerplate file such as the following works.
In the following example you would replace "bsp" and "_bsp" with the actual name
of the BSP (i.e. &lt;bsp_name&gt; from the example template).
@@ -235,7 +228,7 @@ BBFILE_PRIORITY_bsp = "5"
<para>
This file simply makes BitBake aware of the recipes and configuration directories.
This file must exist so that the Yocto Project build system can recognize the BSP.
This file must exist so that Poky can recognize the BSP.
</para>
</section>
@@ -247,7 +240,7 @@ meta-&lt;bsp_name&gt;/conf/machine/*.conf
<para>
The machine files bind together all the information contained elsewhere
in the BSP into a format that the Yocto Project build system can understand.
in the BSP into a format that Poky can understand.
If the BSP supports multiple machines, multiple machine configuration files
can be present.
These filenames correspond to the values to which users have set the MACHINE variable.
@@ -285,10 +278,10 @@ TARGET_CC_ARCH = "-m32 -march=core2 -msse3 -mtune=generic -mfpmath=sse"
<para>
The tune file would be included by the machine definition and can be
contained in the BSP or referenced from one of the standard core set of
files included with the Yocto Project.
files included with Poky itself.
</para>
<para>
Both the base package architecture file and the tune file are optional for a BSP layer.
Both the base package architecture file and the tune file are optional for a Poky BSP layer.
</para>
</section>
@@ -350,8 +343,8 @@ meta-&lt;bsp_name&gt;/recipes-kernel/linux/linux-yocto_git.bbappend
This file appends your specific changes to the kernel you are using.
</para>
<para>
For your BSP you typically want to use an existing Yocto Project kernel found in the
Yocto Project repository at <filename class='directory'>meta/recipes-kernel/linux</filename>.
For your BSP you typically want to use an existing Poky kernel found in the
Poky repository at <filename class='directory'>meta/recipes-kernel/kernel</filename>.
You can append your specific changes to the kernel recipe by using an append file,
which is located in the
<filename class='directory'>meta-&lt;bsp_name&gt;/recipes-kernel/linux</filename>
@@ -382,7 +375,7 @@ KMACHINE_crownbay = "yocto/standard/crownbay"
</programlisting>
This append file adds "crownbay" as a compatible machine,
and additionally sets a Yocto Kernel-specific variable that identifies the name of the
BSP branch to use in the Git repository to find configuration information.
BSP branch to use in the GIT repository to find configuration information.
</para>
<para>
One thing missing in this particular BSP, which you will typically need when
@@ -549,7 +542,7 @@ FILESEXTRAPATHS := "${THISDIR}/${PN}"
upon the user a requirement to accept the terms of a
'click-through' license.
Once the license is accepted the
Yocto Project build system can then build and include the
Poky build system can then build and include the
corresponding component in the final BSP image.
Some affected components might be essential to the normal
functioning of the system and have no 'free' replacement
@@ -581,7 +574,7 @@ FILESEXTRAPATHS := "${THISDIR}/${PN}"
</para>
<para>
Several methods exist within the Yocto Project build system to satisfy the licensing
Several methods exist within the Poky build system to satisfy the licensing
requirements for an encumbered BSP.
The following list describes them in preferential order:
</para>
@@ -651,7 +644,7 @@ FILESEXTRAPATHS := "${THISDIR}/${PN}"
These prompts usually take the form of instructions
needed to manually fetch the encumbered package(s)
and md5 sums into the required directory
(e.g. the <filename>yocto/build/downloads</filename>).
(e.g. the <filename>poky/build/downloads</filename>).
Once the manual package fetch has been
completed, restart the build to continue where
it left off.

View File

@@ -1,779 +0,0 @@
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<chapter id='dev-manual-cases'>
<title>Development Cases</title>
<para>
For the purposes of this manual we are going to focus on two common development cases or groupings:
System Development and User Application Development.
System Development covers Board Support Package (BSP) development and kernel image modification.
User Application Development covers development of applications that you intend to run on some
target hardware.
</para>
<para>
[WRITERS NOTE: What is undetermined at this point is how much of the entire development process
we include in this particular chapter.
In other words, do we cover debugging and emulation steps here on a case-specific basis?
Or, do we capture that information in the appropriate subsequent chapter by case?]
</para>
<section id='system-development'>
<title>System Development</title>
<para>
System development involves modification or creation of an image that you want to run on
a specific hardware target.
Usually when you want to create an image that runs on embedded hardware the image does
not require the same amount of features that a full-fledged Linux distribution provides.
Thus, you can create a much smaller image that is designed to just use the hardware
features for your particular hardware.
</para>
<para>
To help you understand how system development works in the Yocto Project, this section
covers two types of image development: BSP creation and kernel modification.
</para>
<section id='developing-a-board-support-package-bsp'>
<title>Developing a Board Support Package (BSP)</title>
<para>
A BSP is a package of recipes that when applied while building an image results in
an image you can run on a particular board.
Thus, the package, when compiled into the new image, supports the operation of the board.
</para>
<para>
Packages consist of recipes.
Recipes are sets of instructions for building a package.
The recipes describe where to get source code and what patches to apply.
Recipes also describe dependencies for libraries or for other recipes.
They also contain configuration and compilation options.
Recipes are logical units of execution.
</para>
<para>
Here are the basic steps involved in creating a BSP:
<orderedlist>
<listitem><para>Be sure you are set up to use Yocto Project (see
<xref linkend='dev-manual-start'>Getting Started with the Yocto Project</xref>).</para></listitem>
<listitem><para>Choose a BSP available with Yocto Project that most closely represents
your hardware.</para></listitem>
<listitem><para>Get set up with a base BSP.</para></listitem>
<listitem><para>Make a copy of the existing BSP and isolate your work by creating a layer
for your recipes.</para></listitem>
<listitem><para>Make configuration and recipe changes to your new BSP layer.</para></listitem>
<listitem><para>Prepare for the build.</para></listitem>
<listitem><para>Select and configure the kernel. (WRITER'S NOTE: Not sure on this step).</para></listitem>
<listitem><para>Identify the machine branch in the Git repository.</para></listitem>
<listitem><para>Build the image.</para></listitem>
</orderedlist>
You can view a video presentation of the BSP creation process
<ulink url='http://free-electrons.com/blog/elc-2011-videos'>here</ulink>.
You can also find supplemental information in the
<ulink url='http://yoctoproject.org/docs/1.1/bsp-guide/bsp-guide.html'>
Board Support Package (BSP) Development Guide</ulink>.
Finally, there is wiki page write up of the example located
<ulink url='https://wiki.yoctoproject.org/wiki/Transcript:_creating_one_generic_Atom_BSP_from_another'>
here</ulink> you might find helpful.
</para>
<section id='setting-up-yocto-project'>
<title>Setting Up Yocto Project</title>
<para>
For general host development system preparation such as package requirements and
operating system requirements, see
<xref linkend='dev-manual-start'>Getting Started with the Yocto Project</xref>)Chapter 2 of
this manual or the
<ulink url='http://www.yoctoproject.org/docs/1.1/yocto-project-qs/yocto-project-qs.html'>
Yocto Project Quick Start</ulink>.
</para>
<para>
You need to have the Yocto Project source tree available on your host system.
You can get that through tarball extraction or by initializing and checking out the
Yocto Project Git repository.
Typically, checking out the Git repository is the method to use.
This allows you to maintain a complete history of changes and facilitates you
contributing back to the Yocto Project.
However, if you just want the source you can download the Yocto Project Release
tarball from the
<ulink url='http://yoctoproject.org/download'>download page</ulink>.
If you download the tarball you can extract it into any directory you want using the
tar command.
For example, the following commands extract the 1.0.1 release tarball into
<filename>/usr/local/yocto</filename> with the Yocto Project source directory as
<filename>poky.bernard.5.0.1.tar.bz2</filename>:
<literallayout class='monospaced'>
/usr/local/yocto$ tar xfj poky.bernard.5.0.1.tar.bz2
</literallayout>
</para>
<para>
The following transcript shows how to initialize a Git repository and checkout the
Yocto Project source tree:
<literallayout class='monospaced'>
/usr/local/yocto$ git init
Initialized empty Git repository in /usr/local/yocto/.git
/usr/local/yocto$ git remote add poky git://git.yoctoproject.org/poky.git
/usr/local/yocto$ git remote update
Fetching poky
remote: Counting objects: 106111, done.
remote: Compressing objects: 100% (36106/36106), done.
remote: Total 106111 (delta 72275), reused 99193 (delta 66808)
Receiving objects: 100% (106111/106111), 69.51 MiB | 518 KiB/s, done.
Resolving deltas: 100% (72275/72275), done.
From git://git.yoctoproject.org/poky
* [new branch] 1.1_M1 -> poky/1.1_M1
* [new branch] 1.1_M2 -> poky/1.1_M2
* [new branch] bernard -> poky/bernard
* [new branch] blinky -> poky/blinky
* [new branch] clyde -> poky/clyde
* [new branch] elroy -> poky/elroy
* [new branch] green -> poky/green
* [new branch] laverne -> poky/laverne
* [new branch] master -> poky/master
* [new branch] pinky -> poky/pinky
* [new branch] purple -> poky/purple
* [new tag] 1.1_M1.final -> 1.1_M1.final
* [new tag] 1.1_M2.rc1 -> 1.1_M2.rc1
* [new tag] bernard-5.0.1 -> bernard-5.0.1
* [new tag] pinky-3.1.2 -> pinky-3.1.2
From git://git.yoctoproject.org/poky
* [new tag] 1.1_M1.rc1 -> 1.1_M1.rc1
* [new tag] 1.1_M1.rc2 -> 1.1_M1.rc2
* [new tag] bernard-1.0rc1 -> bernard-1.0rc1
* [new tag] bernard-5.0 -> bernard-5.0
* [new tag] bernard-5.0-alpha -> bernard-5.0-alpha
* [new tag] bernard-5.0rc1 -> bernard-5.0rc1
* [new tag] bernard-5.0rc2 -> bernard-5.0rc2
* [new tag] laverne-4.0 -> laverne-4.0
* [new tag] laverne-4.0.1 -> laverne-4.0.1
* [new tag] m4 -> m4
* [new tag] purple-3.2 -> purple-3.2
* [new tag] purple-3.2.1 -> purple-3.2.1
</literallayout>
</para>
<para>
Once you have the repository set up, you have many development branches from which
you can work.
For this example we are going to use the Yocto Project 1.0.1 Release,
which maps to the <filename>Bernard 5.0.1</filename> tag in Git.
<literallayout class='monospaced'>
/usr/local/yocto$ git checkout -b Bernard-5.0.1 bernard-5.0.1
Switched to a new branch 'bernard-5.0.1'
</literallayout>
</para>
</section>
<section id='choosing-a-base-bsp'>
<title>Choosing a Base BSP</title>
<para>
The Yocto Project ships with several BSPs that support various hardware.
It is best to base your new BSP on an existing BSP rather than create all the
recipes and configuration files from scratch.
While it is possible to create everything from scratch, basing your new BSP
on something that is close is much easier.
Or, at a minimum, it gives you some structure with which to start.
</para>
<para>
At this point you need to understand your target hardware well enough to determine which
existing BSP most closely matches it.
Things to consider are your hardwares on-board features such as CPU type and graphics support.
You should look at the README files for supported BSPs to get an idea of which one
you could use.
A generic Atom-based BSP to consider is the Crown Bay with no Intel® Embedded Media
Graphics Driver (EMGD) support.
That is the BSP that this example is going to use.
</para>
<para>
To see the supported BSPs, go to the Yocto Project
<ulink url='http://www.yoctoproject.org/download'>download page</ulink> and click on “BSP Downloads.”
</para>
</section>
<section id='getting-your-base-bsp'>
<title>Getting Your Base BSP</title>
<para>
You need to have the base BSP layer on your development system.
Like the Yocto Project source tree you can get the BSP layer one of two ways:
download the tarball and extract it, or initialize a Git repository and check out the BSP.
You should use the same method that you used for the Yocto Project source tree.
</para>
<para>
If you are using tarball extraction then simply download the tarball for the base
BSP you chose in the previous step and then extract it into any directory
you choose using the tar command.
Upon extraction, the BSP source directory (layer) will be named
<filename>meta-&lt;BSP_name&gt;</filename>.
The following command extracts the Crown Bay BSP into a directory named
<filename>meta-crownbay</filename>:
<literallayout class='monospaced'>
/usr/local$ tar xjf crownbay-noemgd-bernard-5.0.1.tar.bz2
</literallayout>
</para>
<para>
If you initialized a Yocto Project Git repository then you need to do the same for the
BSP, which is located in the meta-intel Git repository.
The meta-intel repository contains all the metadata that supports BSP creation.
</para>
<para>
The following transcript shows the steps to create and set up the meta-intel Git
repository inside the Yocto Project Git repository:
<literallayout class='monospaced'>
/usr/local/yocto$ mkdir meta-intel
/usr/local/yocto$ cd meta-intel
/usr/local/yocto/meta-intel$ git init
Initialized empty Git repository in /usr/local/yocto/meta-intel/.git/
/usr/local/yocto/meta-intel$ git remote add meta-intel \ git://git.yoctoproject.org/meta-intel.git
/usr/local/yocto/meta-intel$ git remote update
Fetching meta-intel
remote: Counting objects: 1240, done.
remote: Compressing objects: 100% (1008/1008), done.
remote: Total 1240 (delta 513), reused 85 (delta 27)
Receiving objects: 100% (1240/1240), 1.55 MiB | 510 KiB/s, done.
Resolving deltas: 100% (513/513), done.
From git://git.yoctoproject.org/meta-intel
* [new branch] 1.1_M1 -> meta-intel/1.1_M1
* [new branch] 1.1_M2 -> meta-intel/1.1_M2
* [new branch] bernard -> meta-intel/bernard
* [new branch] dvhart/n450 -> meta-intel/dvhart/n450
* [new branch] laverne -> meta-intel/laverne
* [new branch] master -> meta-intel/master
</literallayout>
</para>
<para>
Once you have the repository set up, you have many development branches from
which you can work.
For this example we are going to use Bernard 5.0.
<literallayout class='monospaced'>
/usr/local/yocto/meta-intel$ git checkout -b Bernard-5.0.1 meta-intel/bernard
Branch Bernard-5.0.1 set up to track remote branch bernard from meta-intel.
Switched to a new branch 'bernard-5.0.1'
</literallayout>
</para>
</section>
<section id='making-a-copy-of-the-base bsp-to-create-your-new-bsp-layer'>
<title>Making a Copy of the Base BSP to Create Your New BSP Layer</title>
<para>
Now that you have the Yocto Project and base BSP source you need to create a
new layer for your BSP.
</para>
<para>
Layers are ideal for isolating and storing work for a given piece of hardware.
A layer is really just a location or area in which you place the recipes for your BSP.
In fact, a BSP is, in itself, a special type of layer.
Consider an application as another example that illustrates a layer.
Suppose you are creating an application that has library or other dependencies in
order for it to compile and run.
The layer, in this case, would be where all the recipes that define those dependencies
are kept. The key point for a layer is that it is an isolated area that contains
all the relevant information for the project that the Yocto Project build system knows about.
</para>
<note>
The Yocto Project supports four BSPs that are part of the
Yocto Project release: <filename>atom-pc</filename>, <filename>beagleboard</filename>,
<filename>mpc8315e</filename>, and <filename>routerstationpro</filename>.
The recipes and configurations for these four BSPs are located and dispersed
within <filename>meta</filename>, which can be found in the Yocto Project source directory.
Consequently, they are not totally isolated in the spirit of layers unless you think
of <filename>meta</filename> as a layer itself.
On the other hand, the Yocto Project has isolated BSP layers within
<filename>meta-intel</filename> for the Crown Bay, Emenlow, Jasper Forest, N450, and
Sugar Bay.
[WRITER'S NOTE: <filename>meta-yocto</filename>, <filename>meta</filename>, and
<filename>meta-intel</filename> need some explanation.
Not sure about the relationship of meta-yocto as compared to meta-intel.]
</note>
<para>
When you set up a layer for a new BSP you should follow a standard layout.
This layout is described in
<ulink url='http://www.yoctoproject.org/docs/1.1/bsp-guide/bsp-guide.html#bsp-filelayout'>
Example Filesystem Layout</ulink> section of the Board Support Package (BSP) Development
Guide.
In the standard layout you will notice a suggested hierarchy for BSP kernel recipes,
graphics recipes, and configuration information.
You can see the standard layout for the Crown Bay BSP in this example by examining the
directory structure of <filename>meta-crownbay</filename>.
</para>
<para>
To create your BSP layer you simply copy the <filename>meta-crownbay</filename>
layer to a new layer.
For this example the new layer is named <filename>meta-mymachine</filename>.
The name must follow the BSP layer naming convention, which is
<filename>meta-&lt;name&gt;</filename>.
The following example assumes a meta-intel Git repository.
If you downloaded and expanded a Crown Bay tarball then you simply copy the resulting
<filename>meta-crownbay</filename> directory structure to a location of your choice:
<literallayout class='monospaced'>
/usr/local/yocto/meta-intel$ cp -a meta-crownbay/ meta-mymachine
</literallayout>
</para>
</section>
<section id='making-changes-to-your-bsp'>
<title>Making Changes to Your BSP</title>
<para>
Right now you have two identical BSP layers with different names:
<filename>meta-crownbay</filename> and <filename>meta-mymachine</filename>.
You need to change your configurations so that they work for your new BSP and
your particular hardware.
We will look first at the configurations, which are all done in the layers
<filename>conf</filename> directory.
</para>
<para>
First, since in this example the new BSP will not support EMGD we will get rid of the
<filename>crownbay.conf</filename> file and then rename the
<filename>crownbay-noemgd.conf</filename> file to <filename>mymachine.conf</filename>.
Much of what we do in the configuration directory is designed to help the Yocto Project
build system work with the new layer and to be able to find and use the right software.
The following two commands result in a single machine configuration file named
<filename>mymachine.conf</filename>.
<literallayout class='monospaced'>
/usr/local/yocto/meta-intel$ rm meta-mymachine/conf/machine/crownbay.conf
/usr/local/yocto/meta-intel$ mv meta-mymachine/conf/machine/crownbay-noemgd.conf \
meta-mymachine/conf/machine/mymachine.conf
</literallayout>
</para>
<para>
The next step makes changes to <filename>mymachine.conf</filename> itself.
The only changes needed for this example are changes to the comment lines and to the
Source Revision (<filename>SRCREV</filename>) lines at the bottom of the file.
</para>
<para>
For the comments the string <filename>crownbay-noemgd</filename> needs to be changed to
<filename>mymachine</filename>.
</para>
<para>
To understand how to complete the changes to the <filename>SRCREV</filename>
statements we need to know which kernel we are using.
The <filename>PREFERRED_PROVIDER_virtual/kernel</filename> statement in the file specifies
the kernel we are going to use.
We are going to use <filename>linux-yocto-stable</filename>.
The <filename>SRCREV</filename> statement pairs point to the exact machine branch
(commit) and <filename>meta</filename> branch in the Git repository.
Right now the <filename>SRCREV</filename> variables are as follows in
<filename>mymachine.conf</filename>:
<literallayout class='monospaced'>
SRCREV_machine_pn-linux-yocto_crownbay-noemgd ?= \ "56fe215d3f1a2cc3a5a26482ac9809ba44495695"
SRCREV_meta_pn-linux-yocto_crownbay-noemgd ?= \ "e1f85a470934a0cf6abde5d95533e74501822c6b"
SRCREV_machine_pn-linux-yocto-stable_crownbay-noemgd ?= \ "56fe215d3f1a2cc3a5a26482ac9809ba44495695"
SRCREV_meta_pn-linux-yocto-stable_crownbay-noemgd ?= \ "e1f85a470934a0cf6abde5d95533e74501822c6b"
</literallayout>
</para>
<para>
You will notice that there are two pairs of <filename>SRCREV</filename> statements.
The first pair points to a current development kernel, which we dont care about
in this example.
The bottom pair points to the stable kernel that we will use:
<filename>linux-yocto-stable</filename>.
At this point though, the unique commit strings all are still associated with
Crown Bay.
So the next changes we make to the configuration file gets rid of the pair that points
to the development kernel and provides new commit strings that points to the
<filename>atom-pc-standard</filename>, which we are choosing for the initial build of this BSP.
Here are the final <filename>SRCREV</filename> statements:
<literallayout class='monospaced'>
SRCREV_machine_pn-linux-yocto-stable_mymachine ?= \ "72ca49ab08b8eb475cec82a10049503602325791"
SRCREV_meta_pn-linux-yocto-stable_mymachine ?= \ "ec26387cb168e9e0976999b528b5a9dd62e3157a"
</literallayout>
</para>
<para>
If you are familiar with Git repositories you probably wont have trouble locating the
exact commit strings you need to change the <filename>SRCREV</filename> statements.
You can find all the <filename>machine</filename> and <filename>meta</filename>
branch points (commits) for the <filename>linux-yocto-2.6.34</filename> kernel
<ulink url='http://git.yoctoproject.org/cgit/cgit.cgi/linux-yocto-2.6.34'>here</ulink>.
</para>
<para>
If you need a little more assistance after going to the link then do the following:
<orderedlist>
<listitem><para>Expand the list of branches by clicking <filename>[…]</filename></para></listitem>
<listitem><para>Click on the <filename>atom-pc-standard</filename> branch</para></listitem>
<listitem><para>Click on the commit column header to view the top commit</para></listitem>
<listitem><para>Copy the commit string for use in the <filename>mymachine.conf</filename>
file</para></listitem>
</orderedlist>
</para>
<para>
For the <filename>SRCREV</filename> statement that points to the <filename>meta</filename>
branch use the same procedure except expand the <filename>wrs_meta</filename>
branch in step 2 above.
</para>
<para>
The next configuration file in the new BSP layer we need to edit is <filename>layer.conf</filename>.
This file identifies build information needed for the new layer.
You can see the
<ulink url='http://www.yoctoproject.org/docs/1.1/bsp-guide/bsp-guide.html#bsp-filelayout-layer'>
Layer Configuration File</ulink> section in the Board Support Packages (BSP) Development Guide
for more information on this configuration file.
Basically, we are removing statements that support EMGD and changing the ones that support no EMGD.
</para>
<para>
First, remove these statements from the file:
<literallayout class='monospaced'>
BBFILE_COLLECTIONS_crownbayd += "crownbay"
BBFILE_PATTERN_crownbay := "^${LAYERDIR}/"
BBFILE_PRIORITY_crownbay = "6"
</literallayout>
</para>
<para>
This leaves three similar statements that we care about:
<literallayout class='monospaced'>
BBFILE_COLLECTIONS_crownbay-noemgd += "crownbay-noemgd"
BBFILE_PATTERN_crownbay-noemgd := "^${LAYERDIR}/"
BBFILE_PRIORITY_crownbay-noemgd = "6"
</literallayout>
</para>
<para>
Simply substitute the machine string name <filename>crownbay-noemgd</filename>
with the new machine name <filename>mymachine</filename> to get the following:
<literallayout class='monospaced'>
BBFILE_COLLECTIONS_mymachine += "mymachine"
BBFILE_PATTERN_mymachine := "^${LAYERDIR}/"
BBFILE_PRIORITY_mymachine = "6"
</literallayout>
</para>
<para>
Now we will take a look at the recipes in your new layer.
The standard BSP structure has areas for BSP, graphics, and kernel recipes.
When you create a BSP you use these areas for appropriate recipes and append files.
Recipes take the form of <filename>.bb</filename> files.
If you want to leverage off of existing recipes elsewhere in the Yocto Project
source tree but change them you can use <filename>.bbappend</filename> files.
All new recipes and append files for your layer go in the layers
<filename>recipes-bsp</filename>, <filename>recipes-kernel</filename>, and
<filename>recipes-graphics</filename> directories.
</para>
<para>
For this example we are not adding any new BSP recipes.
And, we only need to remove the formfactor we do not want and change the name of
the remaining one that supports no EMGD.
These commands take care of the new layers BSP recipes:
<literallayout class='monospaced'>
/usr/local/yocto/meta-intel$ rm -rf \
meta-mymachine/recipes-bsp/formfactor/formfactor/crownbay
/usr/local/yocto/meta-intel$ mv \
meta-mymachine/recipes-bsp/formfactor/formfactor/crownbay-noemgd/ \
meta-mymachine/recipes-bsp/formfactor/formfactor/mymachine
</literallayout>
</para>
<para>
For this example we want to remove anything that supports EMGD.
The following command cleans up the <filename>recipes-graphics</filename> directory:
<literallayout class='monospaced'>
/usr/local/yocto/meta-intel$ rm rf \
meta-mymachine/recipes-graphics/xorg-xserver/xserver-xf86-emgd*
</literallayout>
</para>
<para>
At this point the <filename>recipes-graphics</filename> directory just has files that
support Video Electronics Standards Association (VESA) graphics modes.
However, we still need to rename a directory in the layer.
This command applies the final change to the <filename>recipes-graphics</filename> directory:
<literallayout class='monospaced'>
/usr/local/yocto/meta-intel$ mv \
meta-mymachine/recipes-graphics/xorg-xserver/xserver-xf86-config/crownbay-noemgd \
meta-mymachine/recipes-graphics/xorg-xserver/xserver-xf86-config/mymachine
</literallayout>
</para>
<para>
Finally, let us look at the <filename>recipes-kernel</filename> directory in the example.
The only file we are concerned with for the example is
<filename>linux-yocto-stable_git.bbappend</filename>.
The other files all support the EMGD feature of Crown Bay.
These commands clean up the directory:
<literallayout class='monospaced'>
/usr/local/yocto/meta-intel$ rm rf meta-mymachine/recipes-kernel/linux/linux-yocto
/usr/local/yocto/meta-intel$ rm rf \
meta-mymachine/recipes-kernel/linux/linux-yocto-stable
/usr/local/yocto/meta-intel$ rm \
meta-mymachine/recipes-kernel/linux/linux-yocto_git.bbappend
</literallayout>
</para>
<para>
The <filename>linux-yocto-stable_git.bbappend</filename> file appends a Yocto Project
recipe having the same name.
The changes we need to make are to remove the statements that support EMGD
and change the remaining Crown Bay strings to be <filename>mymachine</filename>.
We also do not need to include the pointer to the EMGD licenses patch at the end of
the file.
Here is the original file:
<literallayout class='monospaced'>
FILESEXTRAPATHS := “${THISDIR}/${PN}”
COMPATIBLE_MACHINE_crownbay = “crownbay”
KMACHINE_CROWNBAY = “CROWNBAY”
COMPATIBLE_MACHINE_crownbay-noemgd = “crownbay-noemgd”
KMACHINE_crownbay-noemgd = “crownbay”
SRC_URI += file://0001-crownbay-update-a-handful-of-EMGD-licenses.patch
</literallayout>
</para>
<para>
After editing the file it looks like this:
<literallayout class='monospaced'>
FILESEXTRAPATHS := “${THISDIR}/${PN}”
COMPATIBLE_MACHINE_mymachine = “mymachine”
KMACHINE_mymachine = “mymachine
</literallayout>
</para>
<para>
In summary, the edits to the layers recipe files result in removal of any files and
statements that do not support your targeted hardware in addition to the inclusion
of any new recipes you might need.
In this example, it was simply a matter of ridding the new layer <filename>meta-machine</filename>
of any code that supported the EMGD features.
We did not introduce any new recipes to the layer.
</para>
<para>
Finally, it is also important to update the layers <filename>README</filename>
file so that the information in it reflects your BSP.
</para>
</section>
<section id='preparing-for-the-build'>
<title>Preparing for the Build</title>
<para>
Once you have made all the changes to your BSP layer there remains a few things
you need to do for the Yocto Project build system in order for it to create your image.
You need to get the build environment ready by sourcing an environment setup script
and you need to be sure two key configuration files are configured appropriately.
</para>
<para>
The entire process for building an image is overviewed in the
<ulink url='http://www.yoctoproject.org/docs/1.1/yocto-project-qs/yocto-project-qs.html#building-image'>
Building an Image</ulink> section of the Yocto Project Quick Start.
You might want to reference this information.
The remainder of this section will apply to our example of the <filename>meta-mymachine</filename> layer.
</para>
<para>
To get ready to build your new layer you need to do the following:
<orderedlist>
<listitem><para>Get the environment ready for the build by sourcing the environment
script.
The environment script is in the Yocto Project source directory and has the string
<filename>init-build-env</filename> in the files name.
For this example, the following command gets the build environment ready:
<literallayout class='monospaced'>
/usr/local/yocto$ source oe-init-build-env yocto-build
</literallayout>
When you source the script a build directory is created in the current
working directory.
In our example we were in the Yocto Project source directory.
Thus, entering the previous command created the <filename>yocto-build</filename> directory.
If you do not provide a name for the build directory it defaults to build.
The build directory contains a <filename>conf</filename> directory that contains
two configuration files you will need to check: <filename>bblayers.conf</filename>
and <filename>local.conf</filename>.</para></listitem>
<listitem><para>Check and edit the resulting <filename>local.conf</filename> file.
This file minimally identifies the machine for which to build the image by
configuring the <filename>MACHINE</filename> variable.
For this example you must set the variable to mymachine as follows:
<literallayout class='monospaced'>
MACHINE ??= “mymachine”
</literallayout>
You should also be sure any other variables in which you are interested are set.
Some variables to consider are <filename>BB_NUMBER_THREADS</filename>
and <filename>PARALLEL_MAKE</filename>, both of which can greatly reduce your build time
if you are using a multi-threaded development system (e.g. values of
<filename>8</filename> and <filename>j 6</filename>, respectively are optimal
for a development machine that has four available cores).</para></listitem>
<listitem><para>Update the <filename>bblayers.conf</filename> file so that it includes
the path to your new BSP layer.
In this example you need to include the pathname to <filename>meta-mymachine</filename>.
For example, if you created a Yocto Project Git repository named
<filename>yocto</filename> in <filename>/usr/local</filename> then the
<filename>BBLAYERS</filename> variable in the file would need to include the following path:
<literallayout class='monospaced'>
/usr/local/yocto/meta-intel/meta-mymachine
</literallayout></para></listitem>
</orderedlist>
</para>
<para>
The appendix
<ulink url='http://www.yoctoproject.org/docs/1.1/poky-ref-manual/poky-ref-manual.html#ref-variables-glos'>
Reference: Variables Glossary</ulink> in the Yocto Project Reference Manual has more information
on configuration variables.
</para>
</section>
<section id='building-the-image'>
<title>Building the Image</title>
<para>
The Yocto Project uses the BitBake tool to build images based on the type of image
you want to create.
You can find more information on BitBake
<ulink url='http://bitbake.berlios.de/manual/'>here</ulink>.
</para>
<para>
The build process supports several types of images to satisfy different needs.
When you issue the BitBake command you provide a “top-level” recipe that essentially
starts the process off of building the type of image you want.
</para>
<para>
You can find these recipes in the <filename>meta/recipes-core/images</filename> and
<filename>meta/recipes-sato/images</filename> directories of the Yocto Project source
tree or Git repository.
Although the recipe names are somewhat explanatory, here is a list that describes them:
<itemizedlist>
<listitem><para><emphasis>Base</emphasis> A foundational basic image without support
for X that can be reasonably used for customization.</para></listitem>
<listitem><para><emphasis>Core</emphasis> A foundational basic image with support for
X that can be reasonably used for customization.</para></listitem>
<listitem><para><emphasis>Direct Disk</emphasis> An image that you can copy directory to
the disk of the target device.</para></listitem>
<listitem><para><emphasis>Live</emphasis> An image you can run from a USB device or from
a CD without having to first install something.</para></listitem>
<listitem><para><emphasis>Minimal</emphasis> A small image without a GUI.
This image is not much more than a kernel with a shell.</para></listitem>
<listitem><para><emphasis>Minimal Development</emphasis> A Minimal image suitable for
development work.</para></listitem>
<listitem><para><emphasis>Minimal Direct Disk</emphasis> A Minimal Direct Disk image.</para></listitem>
<listitem><para><emphasis>Minimal RAM-based Initial Root Filesystem</emphasis> A minimal image
that has the <filename>initramfs</filename> as part of the kernel, which allows the
system to find the first “init” program more efficiently.</para></listitem>
<listitem><para><emphasis>Minimal Live</emphasis> A Minimal Live image.</para></listitem>
<listitem><para><emphasis>Minimal MTD Utilities</emphasis> A minimal image that has support
for the MTD utilities, which let the user interact with the MTD subsystem in
the kernel to perform operations on flash devices.</para></listitem>
<listitem><para><emphasis>Sato</emphasis> An image with Sato support, a mobile environment
and visual style that works well with mobile devices.</para></listitem>
<listitem><para><emphasis>Sato Development</emphasis> A Sato image suitable for
development work.</para></listitem>
<listitem><para><emphasis>Sato Direct Disk</emphasis> A Sato Direct Disk image.</para></listitem>
<listitem><para><emphasis>Sato Live</emphasis> A Sato Live image.</para></listitem>
<listitem><para><emphasis>Sato SDK</emphasis> A Sato image that includes the Yocto Project
toolchain and development libraries.</para></listitem>
<listitem><para><emphasis>Sato SDK Direct Disk</emphasis> A Sato SDK Direct
Disk image.</para></listitem>
<listitem><para><emphasis>Sato SDK Live</emphasis> A Sato SDK Live image.</para></listitem>
</itemizedlist>
</para>
<para>
The remainder of this section applies to our example of the <filename>meta-mymachine</filename> layer.
</para>
<para>
To build the image for our <filename>meta-mymachine</filename> BSP enter the following command
from the same shell from which you ran the setup script.
You should run the <filename>bitbake</filename> command without any intervening shell commands.
For example, moving your working directory around could cause problems.
Here is the command for this example:
<literallayout class='monospaced'>
/usr/local/yocto/yocto-build$ bitbake k poky-image-sato-live
</literallayout>
</para>
<para>
This command requests an image that has Sato support and can be run from a USB device or
from a CD without having to first install anything.
The build process takes significant time and includes thousands of tasks, which are reported
at the console.
If the build results in any type of error you should check for misspellings in the
files you changed or problems with your host development environment such as missing packages.
</para>
</section>
</section>
<section id='modifying-a-kernel'>
<title>Modifying a Kernel</title>
<para>
[WRITER'S NOTE: This section is a second example that focuses on just modifying the kernel.
I don't have any information on this yet.
</para>
<para>
Here are some points to consider though:
<itemizedlist>
<listitem><para>Reference Darren's presentation
<ulink url='http://events.linuxfoundation.org/events/embedded-linux-conference/hart'>
here</ulink></para></listitem>
<listitem><para>Reference <xref linkend='dev-manual-start'>Getting Started with the Yocto Project</xref>
section to get set up at minimum.</para></listitem>
<listitem><para>Are there extra steps I need specific to kernel development to get started?</para></listitem>
<listitem><para>What do I do to get set up?
Is it a matter of just installing YP and having some pieces together?
What are the pieces?</para></listitem>
<listitem><para>Where do I get the base kernel to start with?</para></listitem>
<listitem><para>Do I install the appropriate toolchain?</para></listitem>
<listitem><para>What kernel git repository do I use?</para></listitem>
<listitem><para>What is the conversion script?
What does it do?</para></listitem>
<listitem><para>What do I have to do to integrate the kernel layer?</para></listitem>
<listitem><para>What do I use to integrate the kernel layer?
HOB?
Do I just Bitbake it?</para></listitem>
<listitem><para>Using the System Image Creator.]</para></listitem>
</itemizedlist>
</para>
</section>
</section>
<section id='user-application-development'>
<title>User Application Development</title>
<para>
[WRITER'S NOTE: This section is the second major development case - developing an application.
Here are points to consider:
<itemizedlist>
<listitem><para>User-space Application Development scenario overview.</para></listitem>
<listitem><para>Using the Yocto Eclipse Plug-in.</para></listitem>
<listitem><para>Back-door support.</para></listitem>
<listitem><para>I feel there is more to this area than we have captured during our two review meetings.]</para></listitem>
</itemizedlist>
</para>
</section>
</chapter>
<!--
vim: expandtab tw=80 ts=4
-->

View File

@@ -1,8 +0,0 @@
<?xml version='1.0'?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns="http://www.w3.org/1999/xhtml" xmlns:fo="http://www.w3.org/1999/XSL/Format" version="1.0">
<xsl:import href="http://docbook.sourceforge.net/release/xsl/current/xhtml/docbook.xsl" />
<!-- <xsl:param name="generate.toc" select="'article nop'"></xsl:param> -->
</xsl:stylesheet>

View File

@@ -1,128 +0,0 @@
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<chapter id='dev-manual-intro'>
<title>The Yocto Project Development Manual</title>
<para>
WRITER NOTE: The goal of this manual is to provide an over-arching development guide for using the Yocto Project.
The intent is to give the reader the “big picture” around development.
Much of the information in the manual will be detailed in other manuals.
For example, detailed information on Git, repositories and open-source in general can be found in many places.
Another example is getting set up to use the Yocto Project, which our Yocto Project Quick Start covers.
However, this manual needs to at least address it.
One might ask “What becomes of the Poky Reference Manual?”
This manual, over time, needs to develop into a pure reference manual where all procedural information
eventually ends up in an appropriate guide.
A good example of information perfect for the Poky Reference Manual is the appendix on variable
definitions (glossary).
</para>
<section id='intro'>
<title>Introduction</title>
<para>
Welcome to the Yocto Project Development Guide!
This guide provides an over-arching view of the development process within the Yocto Project.
This guide is just that a guide.
It helps you understand the bigger picture involving development using the Yocto Project.
</para>
</section>
<section id='what-this-manual-provides'>
<title>What this Manual Provides</title>
<para>
The following list describes what you can get from this guide:
<itemizedlist>
<listitem><para>A general idea of and references to information that lets you get set
up to develop using the Yocto Project.</para></listitem>
<listitem><para>Information to help developers that are new to the open source environment
and to the distributed revision control system Git, which the Yocto Project
uses.</para></listitem>
<listitem><para>An understanding of common end-to-end development models.</para></listitem>
<listitem><para>Development case overviews for both system development and user-space
applications.</para></listitem>
<listitem><para>An overview and understanding of the emulation environment used with
the Yocto Project (QEMU).</para></listitem>
<listitem><para>A discussion of target-level analysis techniques, tools, tips,
and tricks.</para></listitem>
<listitem><para>Considerations for deploying your final product.</para></listitem>
<listitem><para>An understanding of basic kernel architecture and
concepts.</para></listitem>
<listitem><para>Information that will help you migrate an existing project to the
Yocto Project development environment.</para></listitem>
</itemizedlist>
</para>
</section>
<section id='what-this-manual-does-not-provide'>
<title>What this Manual Does Not Provide</title>
<para>
This manual will not give you the following:
<itemizedlist>
<listitem><para>Step-by-step instructions when these instructions exist in other Yocto
Project documentation.
For example, The Application Development Toolkit (ADT) Users Guide contains detailed
instruction on how to obtain and configure the Eclipse Yocto Plug-in.</para></listitem>
<listitem><para>Reference material.
This type of material resides in an appropriate reference manual.
For example, system variables are documented in the Poky Reference Manual.</para></listitem>
<listitem><para>Detailed public information that is not specific to the Yocto Project.
For example, exhaustive information on how to use Git is better covered in the public
domain than in this manual.</para></listitem>
</itemizedlist>
</para>
</section>
<section id='other-information'>
<title>Other Information</title>
<para>
Because this manual presents overview information for many different topics, you will
need to supplement it with other information.
The following list presents other sources of information you might find helpful:
<itemizedlist>
<listitem><para>The <ulink url='http://www.yoctoproject.org'>Yocto Project Website</ulink> - The
home page for the Yocto Project
provides lots of information on the project as well as links to software
and documentation.</para></listitem>
<listitem><para>The <ulink url='http://www.yoctoproject.org/docs/1.1/yocto-project-qs/yocto-project-qs.html'>
Yocto Project Quick Start</ulink> - This short document lets you get started
with the Yocto Project quickly and start building an image.</para></listitem>
<listitem><para>The <ulink url='http://www.yoctoproject.org/docs/1.1/poky-ref-manual/poky-ref-manual.html'>
Yocto Project Reference Manual</ulink> - This manual is the complete reference
guide to the Yocto Project build component.
The manual also contains a reference chapter on Board Support Package (BSP)
layout.</para></listitem>
<listitem><para><ulink url='http://www.yoctoproject.org/docs/1.1/adt-manual/adt-manual.html'>
Application Development Toolkit (ADT) User's Guide</ulink> - This guide provides
information that lets you get going with the ADT to develop projects using the
Yocto Project.</para></listitem>
<listitem><para><ulink url='http://www.yoctoproject.org/docs/1.1/bsp-guide/bsp-guide.html'>
Board Support Package (BSP) Developer's Guide</ulink> - This guide defines the
structure for BSP components.
Having a commonly understood structure encourages standardization.</para></listitem>
<listitem><para><ulink url='http://www.yoctoproject.org/docs/1.1/kernel-manual/kernel-manual.html'>
Yocto Project Kernel Architecture and Use Manual</ulink> - This manual
describes the architecture of the Yocto Project kernel and provides some work flow
examples.</para></listitem>
<listitem><para><ulink url='http://www.youtube.com/watch?v=3ZlOu-gLsh0'>
Yocto Eclipse Plug-in</ulink> - A step-by-step instructional video that
demonstrates how an application developer uses Yocto Plug-in features within
the Eclipse IDE.</para></listitem>
<listitem><para><ulink url='http://wiki.yoctoproject.org/wiki/FAQ'>FAQ</ulink> - A
list of commonly asked questions and their answers.</para></listitem>
<listitem><para><ulink url='http://www.yoctoproject.org/download/yocto/yocto-project-1.0-release-notes-poky-5.0'>
Release Notes</ulink> - Features, updates and known issues for the current
release of the Yocto Project.</para></listitem>
</itemizedlist>
</para>
</section>
</chapter>
<!--
vim: expandtab tw=80 ts=4
-->

View File

@@ -1,45 +0,0 @@
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<chapter id='dev-manual-model'>
<title>Common Development Models</title>
<para>
[WRITERS NOTE: This chapter presents common development models within the Yocto Project.
Reading this chapter will give the user a feel for the overall development process.
The chapter will follow the framework for the manual.
The team decided to present a single development model and not to try and represent all the
various possibilities that might exist.
The chapter will include an over-arching diagram that shows a simple, most-common development model.
The diagram will consist of boxes that represent high-level areas of the development process.
For example, a box for “Setting Up” will be in the model.
A box for “Debugging” will exist.
The diagram needs to account for the two use-cases we are going to showcase
(system development and application development)].
</para>
<section id='place-holder-section-one'>
<title>Place-Holder Section One</title>
<para>
Text needed here.
</para>
</section>
<section id='place-holder-section-two'>
<title>Place-Holder Section Two</title>
<para>
Text needed here.
</para>
</section>
</chapter>
<!--
vim: expandtab tw=80 ts=4
-->

View File

@@ -1,539 +0,0 @@
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<chapter id='dev-manual-newbie'>
<title>Working with Open Source Code</title>
<para>
This chapter presents information for users new or unfamiliar with working in an open source environment.
Working in this type of environment is different than working in a closed, proprietary environment.
The chapter also presents information specific to the Yocto Project environment.
It specifically addresses licensing issues, code repositories, the open-source distributed version control
system Git, and best practices within Yocto Project.
</para>
<note><para>
If you are a seasoned open source developer and are familiar with Git, you might just be interested
in the Yocto Project specific information in this chapter.
</para></note>
<section id='open-source-philosophy'>
<title>Open Source Philosophy</title>
<para>
Open source philosophy is characterized by software development directed by peer production,
bartering, and collaboration through a concerned community of developers.
Contrast this to the more standard centralized development models used by commercial software
companies where a finite set of developers produce a product for sale using a defined set
of procedures that ultimately result in an end-product whose architecture and source material
are closed to the public.
</para>
<para>
Open source projects conceptually have differing concurrent agendas, approaches, and production.
These facets of the development process can come from anyone in the public (community) that has a
stake in the software project.
The open source environment contains new copyright, licensing, domain, and consumer issues
that differ from the more traditional development environment.
In an open source environment the end-product, source material, and documentation are
all available to the public at no cost.
</para>
<para>
A benchmark example of an open source project is the Linux Kernel, which was initially conceived
and created by Finnish computer science student Linus Torvalds in 1991.
Conversely, a good example of a non-open source project is the Windows family of operating
systems developed by Microsoft Corporation.
</para>
<para>
Wikipedia has a good historical description of the Open Source Philosophy
<ulink url='http://en.wikipedia.org/wiki/Open_source'>here</ulink>.
</para>
<para>
You can also find helpful information on how to participate in the Linux Community
<ulink url='http://ldn.linuxfoundation.org/book/how-participate-linux-community'>here</ulink>.
</para>
</section>
<section id='yocto-project-repositories'>
<title>Yocto Project Repositories</title>
<para>
The Yocto Project team maintains complete source repositories that allow further development
of Yocto Project, its tools, Board Support Packages, and so forth.
As a developer who uses Yocto Project, however, you need only to be able to access your
kernel or application source code and any layers (modifications) on which you might be working.
</para>
<para>
For any supported release of Yocto Project you can go to the Yocto Project websites
<ulink url='http://www.yoctoproject.org/download'>download page</ulink> and get a
<filename>.bz2</filename> tarball of the release.
You can also go to this site to download any supported BSP tarballs.
</para>
<para>
After obtaining the code, you can unpack the tarballs and have a working Git repository
from which you can develop.
Or, you can take steps to create local repositories of Yocto Project source code and metadata on
your development system.
See the information
<ulink url='https://wiki.yoctoproject.org/wiki/Transcript:_from_git_checkout_to_meta-intel_BSP'>here</ulink>
for information on how to set up these local Git repositories.
</para>
<note><para>
Should you be interested in locations of complete Yocto Project development code, there are
two areas where this code is maintained:
<itemizedlist>
<listitem><para><emphasis><ulink url='http://git.yoctoproject.org/cgit/cgit.cgi'>Source Repositories:</ulink></emphasis>
This area contains IDE Plugins, Matchbox, Poky, Poky Support, Tools, Yocto Linux Kernel, and Yocto
Metadata Layers.</para></listitem>
<listitem><para><emphasis><ulink url='http://autobuilder.yoctoproject.org/downloads/'>Index of /downloads:</ulink></emphasis>
This area contains an index of the Eclipse-plugin, miscellaneous support, poky, pseudo, and
all released versions of Yocto Project.
[WRITER NOTE: link will be http://downloads.yoctoproject.org.]</para></listitem>
</itemizedlist>
</para></note>
</section>
<section id='licensing'>
<title>Licensing</title>
<para>
Because open source projects are open to the public they have different licensing structures in place.
License evolution for both Open Source and Free Software has an interesting history.
If you are interested in the history you can find basic information here:
<itemizedlist>
<listitem><para><ulink url='http://en.wikipedia.org/wiki/Open-source_license'>Open source license history</ulink>
</para></listitem>
<listitem><para><ulink url='http://en.wikipedia.org/wiki/Free_software_license'>Free software license
history</ulink></para></listitem>
</itemizedlist>
</para>
<para>
In general, Yocto Project is broadly licensed under the Massachusetts Institute of Technology
(MIT) License.
MIT licensing permits the reuse of software within proprietary software as long as the
license is distributed with that software.
MIT is also compatible with the GNU General Public License (GPL).
Patches to the Yocto Project follow the up-stream licensing scheme.
</para>
<para>
You can find information on the MIT License <ulink url='http://en.wikipedia.org/wiki/MIT_License'>here</ulink>.
You can find information on the GNU GPL <ulink url='http://en.wikipedia.org/wiki/GPL'>here</ulink>.
</para>
<para>
When you build an image using Yocto Project the build process uses a known list of licenses to
ensure compliance.
Once the build completes the list of all licenses found and used during the build are
kept in the resulting build directory at
<filename>&lt;build_directory&gt;/tmp/deploy/images/licenses</filename>.
If a module requires a license that is not in the base list then the build process
generates a warning during the build.
It is up to the developer to resolve potential licensing issues.
</para>
<para>
The base list of licenses used by the build process is a combination of the Software Package
Data Exchange (SPDX) list and the Open Source Initiative (OSI) projects.
<ulink url='http://spdx.org'>SPDX Group</ulink> is a working group of the Linux Foundation
that maintains a specification
for a standard format for communicating the components, licenses, and copyrights
associated with a software package.
<ulink url='http://opensource.org'>OSI</ulink> is a corporation dedicated to the Open Source
Definition and the effort for reviewing
and approving licenses that are OSD-conformant.
</para>
<para>
You can find a list of the combined SPDX and OSI licenses that the Yocto Project uses
<ulink url='http://git.yoctoproject.org/cgit/cgit.cgi/poky/tree/meta/files/common-licenses'>here</ulink>.
The wiki page discusses the license infrastructure used by the Yocto Project.
</para>
</section>
<section id='git'>
<title>Git</title>
<para>
The Yocto Project uses Git, which is a free, open source distributed version control.
Git supports distributed development, non-linear development, can handle large projects,
cryptographic authentication of history, and toolkit design.
It is best that you know how to work with Git if you are going to use Yocto Project for development.
</para>
<para>
Git has an extensive set of commands that lets you manage and collaborate changes over the life
of a project.
Conveniently though, you can manage with a small set of basic operations and workflows
once you understand the basic philosophy behind Git.
You do not have to be an expert in Git to be functional.
A good place to look for instruction on a minimal set of Git commands is
<ulink url='http://git-scm.com/documentation'>here</ulink>.
If you need to download Git you can do so
<ulink url='http://git-scm.com/download'>here</ulink>.
</para>
<para>
Git works by using branching techniques that track content change (not files)
within a project (e.g. a new feature or updated documentation).
Creating a tree-like structure based on project divergence allows for excellent historical
information over the life of a project.
This methodology also allows for an environment in which you can do lots of
experimentation on your project as you develop changes or new features.
For example, you can create a “branch”, experiment with some feature, and then
if you like the feature you incorporate the branch into the tree.
If you dont, you cut the branch off by deleting it.
</para>
<para>
If you dont know much about Git it is strongly suggested that you educate
yourself by visiting the links previously mentioned.
</para>
<para>
The following list briefly describes some basic Git operations as a way to get started.
As with any set of commands, this list (in most cases) simply shows the base command and
omits the many arguments they support.
See the Git documentation for complete descriptions and strategies on how to use these commands:
<itemizedlist>
<listitem><para><emphasis><filename>git init</filename></emphasis> Initializes an empty Git repository.
You cannot use Git commands unless you have a <filename>.git</filename> repository.</para></listitem>
<listitem><para><emphasis><filename>git clone</filename></emphasis> Creates a clone of a repository.
During collaboration this command allows you to create a local repository that is on
equal footing with a fellow developers repository.</para></listitem>
<listitem><para><emphasis><filename>git add</filename></emphasis> Adds updated file contents to the index that
Git uses to track changes.
All files that have changed must be added before they can be committed.</para></listitem>
<listitem><para><emphasis><filename>git commit</filename></emphasis> Creates a “commit” that documents
the changes you made.
Commits are used for historical purposes, for determining if a maintainer of a project
will allow the change, and for ultimately pushing the change from your local Git repository
into the projects upstream (or master) repository.</para></listitem>
<listitem><para><emphasis><filename>git status</filename></emphasis> Reports any modified files that
possibly need added and committed.</para></listitem>
<listitem><para><emphasis><filename>git checkout &lt;branch-name&gt;</filename></emphasis> - Changes
your working branch. This command is analogous to “cd”.</para></listitem>
<listitem><para><emphasis><filename>git checkout b &lt;working-branch&gt;</filename></emphasis> - Creates
a working branch on your local machine where you can isolate work.
It is a good idea to use local branches when adding specific features or changes.
This way if you dont like what you have done you can easily get rid of the work.</para></listitem>
<listitem><para><emphasis><filename>git branch</filename></emphasis> Reports existing branches and
tells you which branch in which you are currently working.</para></listitem>
<listitem><para><emphasis><filename>git pull</filename></emphasis> Retrieves information from an upstream Git
repository and places it in your local Git repository.
You use this command to make sure you are synchronized with the upstream repository
from which the projects maintainer uses to pull changes into the master repository.</para></listitem>
<listitem><para><emphasis><filename>git push</filename></emphasis> Sends all your local changes you
have committed to an upstream Git repository.
The maintainer of the project draws from these repositories when adding your changes to the
projects master repository.</para></listitem>
<listitem><para><emphasis><filename>git merge</filename></emphasis> Combines or adds changes from one
local branch of your repository with another branch.
When you create a local Git repository the default branch is named “master”.
A typical workflow is to create a temporary branch for isolated work, make and commit your
changes, switch to the master branch, merge the changes in the temporary branch with the
master branch, and then delete the temporary branch</para></listitem>
<listitem><para><emphasis><filename>git cherry-pick</filename></emphasis> Choose and apply specific
commits from one branch into another branch.
There are times when you might not be able to merge all the changes in one branch with
another but need to pick out certain ones.</para></listitem>
<listitem><para><emphasis><filename>gitk</filename></emphasis> Provides a GUI view of the branches
and changes in your local Git repository.
This command is a good way to see where things have diverged in your local repository.</para></listitem>
<listitem><para><emphasis><filename>git log</filename></emphasis> Reports a history of your changes to the
repository.</para></listitem>
</itemizedlist>
</para>
</section>
<section id='workflows'>
<title>Workflows</title>
<para>
This section provides some overview on workflows using Git.
In particular, the information covers basic practices that describe roles and actions in a
collaborative development environment.
Again, if you are familiar with this type of development environment you might want to just skip the section.
</para>
<para>
Following are the definitions for some terms used in the Yocto Project.
[WRITER NOTE: I need to move this list of definitions somewhere useful.]
<itemizedlist>
<listitem><para><emphasis>Image</emphasis> - An image is a collection of recipes created with
Bitbake (baked). Images run on specific hardware and use cases.</para></listitem>
<listitem><para><emphasis>Recipe</emphasis> - A set of instructions for building packages.
A recipe describes where you get the source and which patches to apply.
Recipes describe dependencies for libraries or for other recipes and they
also contain configuration and compilation options.
Recipes also let you install customizations.
Recipes contain the logical unit of execution, the software/images to build and
use the <filename>.bb</filename> file extension.</para></listitem>
<listitem><para><emphasis>BitBake</emphasis> - The task executor and scheduler used by Yocto Project
to build images.
For more information on BitBake, see the <ulink url='http://bitbake.berlios.de/manual/'>
BitBake documentation</ulink>.</para></listitem>
<listitem><para><emphasis>Package</emphasis> - A collection of baked recipes.
You bake something by running it through Bitbake.</para></listitem>
<listitem><para><emphasis>Layer</emphasis> - A logical collection of recipes representing the core,
a BSP, or an application stack.</para></listitem>
<listitem><para><emphasis>Metadata</emphasis> - Information for a build that is generally
architecture-independent.
This information includes Task definitions in recipes, classes, and configuration
information.</para></listitem>
<listitem><para><emphasis>Configuration File</emphasis>: Configuration information in the
<filename>.conf</filename> files provides global definition of variables.
The <filename>build/conf/local.conf</filename> configuration file defines local user-defined variables.
The <filename>distro/poky.conf</filename> configuration file defines Yocto distro configuration
variables.
The <filename>machine/beagleboard.conf</filename> configuration file defines machine-specific variables.
Configuration files end with a <filename>.conf</filename> filename extension.</para></listitem>
<listitem><para><emphasis>Classes</emphasis> - Files that encapsulate and inherit logic.
Class files end with the <filename>.bbclass</filename> filename extension.</para></listitem>
<listitem><para><emphasis>Tasks</emphasis> - Arbitrary groups of software used to contain Recipes.
You simply use Tasks to hold recipes that when build usually accomplished a single task.
For example, a task could contain the recipes for a companys proprietary or value-add software.
Or the task could contain the recipes that enable graphics.
A task is really just another recipe.
Because task files are recipes, they end with the <filename>.bb</filename> filename
extension.</para></listitem>
<listitem><para><emphasis>Common OE-Core</emphasis> - A core set of metadata originating
with OpenEmbedded (OE) that is shared between OE and the Yocto Project.</para></listitem>
</itemizedlist>
</para>
<para>
A master Git repository exists that contains the project.
Usually a key individual is responsible for this repository.
It is the “upstream” repository where the final builds of the project occur.
The maintainer is responsible for allowing changes in from other developers and for
organizing the branch structure of the repository to reflect release strategies and so forth.
</para>
<para>
The maintainer of the project also owns a contribution repository usually known as a “contrib” area.
The contrib area temporarily holds changes to the project that have been submitted or committed
by the development team.
The maintainer determines if the changes are qualified to be moved into the master repository.
</para>
<para>
Developers create and maintain cloned repositories of the upstream master repository.
These repositories are local to their development platforms and are used to develop changes.
When a developer is satisfied with a particular feature or change they “push” the changes
up to the contrib repository.
Developers are responsible for keeping their local repository up-to-date with the master
repository.
They are also responsible for straightening out any conflicts that might arise within files
that are being worked on simultaneously by more than one person.
All this work is done locally on the developers machine before anything is pushed upstream
and examined at the maintainers level.
</para>
<para>
A somewhat formal method exists by which developers commit changes and push them into the
contrib area and subsequently request that the maintainer include them into the master repository.
This process is called “submitting a patch” or “submitting a change.”
</para>
<para>
To summarize the environment: we have a single point of entry for changes into the projects
master repository, which is controlled by the projects maintainer.
And, we have a set of developers who independently develop, test, and submit changes
upstream for the maintainer to examine.
The maintainer then chooses which changes are going to become permanently a part of the project.
</para>
<para>
[WRITER NOTE: Would like a figure here for Git workflow]
</para>
<para>
While each development environment is unique, there are some best practices or methods
that help development run smoothly.
The following list describes some of these practices.
For more detailed information about these strategies see
<ulink url='http://www.kernel.org/pub/software/scm/git/docs/gitworkflows.html'>Git Workflows</ulink>.
<itemizedlist>
<listitem><para><emphasis>Small Changes</emphasis> - It is best to keep your changes you commit
small as compared to bundling many disparate changes into a single commit.
This practice not only keeps things manageable but also allows the maintainer
to more easily include or refuse changes.</para></listitem>
<listitem><para><emphasis>Use Branches Liberally</emphasis> - It is very easy to create, use, and
delete local branches in your working Git repository.
You can name these branches anything you like.
It is helpful to give them names associated with the particular feature or change
on which you are working.
Once you are done with a feature or change you simply discard the branch.</para></listitem>
<listitem><para><emphasis>Merge Changes</emphasis> - The Git merge command allows you to take the
changes from one branch and fold them into another branch.
This process is especially helpful when more than a single developer might be working
on different parts of the same feature.
Merging changes also automatically identifies any collisions or “conflicts”
that might happen resulting from the same lines of code be altered by two different
developers.</para></listitem>
<listitem><para><emphasis>Manage Branches</emphasis> - Because branches are easy to use, you should
use a system where branches indicate varying levels of code readiness.
For example, you can have a “work” branch to develop in, a “test” branch where the code or
change is tested, a “stage” branch where changes are ready to be committed, and so forth.
As your project develops, you can merge code across the branches to reflect ever-increasing
stable states of the development.</para></listitem>
<listitem><para><emphasis>Use Push and Pull</emphasis> - The push-pull workflow is based on the
concept of developers “pushing” local commits upstream to the remote repository, which is
usually a contribution repository.
It is also based on the developers “pulling” known states of the project down into their
local development repositories.
This workflow easily allows you to pull changes submitted by other developers from the
upstream repository into your work area ensuring that you have the most recent software
on which to develop.</para></listitem>
<listitem><para><emphasis>Patch Workflow</emphasis> - This workflow allows you to notify the
maintainer through an email that you have a change (or patch) you would like considered
for the master repository.
To send this type of change you format the patch and then send the email using the Git commands
<filename>git format-patch</filename> and <filename>git send-email</filename>.
You can find information on how to submit later in this chapter.</para></listitem>
</itemizedlist>
</para>
</section>
<section id='tracking-bugs'>
<title>Tracking Bugs</title>
<para>
The Yocto Project uses Bugzilla to track bugs.
This bug-tracking application works well for group development because it tracks bugs and code
changes, can be used to communicate changes and problems with developers, can be used to
submit and review patches, and can be used to manage quality assurance.
You can find a good overview of Bugzilla <ulink url='http://www.bugzilla.org/about/'>here</ulink>.
</para>
<para>
Sometimes it is helpful to submit, investigate, or track a bug against the Yocto Project itself.
While normally this is a process relevant only to Yocto Project developers, you can find information
for Bugzilla configuration and bug tracking procedures specific to the Yocto Project
<ulink url='https://wiki.yoctoproject.org/wiki/Bugzilla_Configuration_and_Bug_Tracking'>here</ulink>.
</para>
<para>
The Yocto Project uses its own version of the Bugzilla application.
You can find the home page <ulink url='http://bugzilla.yoctoproject.org'>here</ulink>.
You need to use this implementation of Bugzilla when logging a defect against anything released
by the Yocto Project team.
</para>
<para>
Here are some things to remember when dealing with bugs against the Yocto Project:
<itemizedlist>
<listitem><para>The Yocto Project follows a naming bug-naming convention:
<filename>[YOCTO &lt;number&gt;]</filename>, where <filename>&lt;number&gt;</filename> is the
assigned defect ID used in Bugzilla.
So, for example, a valid way to refer to a defect when creating a commit comment
would be <filename>[YOCTO 1011]</filename>.
This convention becomes important if you are submitting patches against the Yocto Project
code itself (see the next section “How to Submit a Change”).</para></listitem>
<listitem><para>Defects for Yocto Project fall into one of four classifications: Yocto Projects,
Infrastructure, Poky, and Yocto Metadata Layers.</para></listitem>
</itemizedlist>
</para>
</section>
<section id='how-to-submit-a-change'>
<title>How to Submit a Change</title>
<para>
During the development process it is necessary to submit your changes to the maintainer
of the project.
Furthermore, in a collaborative environment it is necessary to have some sort of standard
or method through which you submit changes.
Otherwise, things would get quite chaotic.
</para>
<para>
Sometimes you might find it necessary to submit a change or patch to the Yocto Project.
If so, you must follow certain procedures.
In particular, the headers in patches and the commit messages must follow a certain standard.
The general process is the same as described earlier in this section.
For complete details on how to create proper commit messages and patch headers see
[WRITER NOTE: I need the link to Mark's wiki page here that describes the process.]
</para>
<para>
Following are general instructions for both pushing changes upstream and for submitting changes as patches.
</para>
<section id='pushing-a-change-upstream'>
<title>Pushing a Change Upstream</title>
<para>
The basic flow for pushing a change to an upstream contrib repository is as follows:
<itemizedlist>
<listitem><para>Make your changes in your local repository.</para></listitem>
<listitem><para>Stage your commit (or change) by using the <filename>git add</filename>
command.</para></listitem>
<listitem><para>Commit the change by using the <filename>git commit</filename>
command and push it to an upstream contrib repository.
Be sure to provide a commit message that follows the projects commit standards.</para></listitem>
<listitem><para>Notify the maintainer that you have pushed a change.</para></listitem>
</itemizedlist>
You can find detailed information on how to push a change upstream
<ulink url='http://www.kernel.org/pub/software/scm/git/docs/user-manual.html#Developing-With-git'>
here</ulink>.
</para>
</section>
<section id='submitting-a-patch'>
<title>Submitting a Patch</title>
<para>
If you have a just a few changes you can commit them and then submit them as an email to the maintainer.
Here is the general procedure:
<itemizedlist>
<listitem><para>Make your changes in your local repository.</para></listitem>
<listitem><para>Stage your commit (or change) by using the <filename>git add</filename>
command.</para></listitem>
<listitem><para>Commit the change by using the <filename>git commit</filename> command.
Be sure to provide a commit message that follows the projects commit standards.</para></listitem>
<listitem><para>Format the commit by using the <filename>git-format-patch</filename>
command.
This step produces a numbered series of files in the current directory one for
each commit.</para></listitem>
<listitem><para>Import the files into your mail client by using the
<filename>git-send-email</filename> command.</para></listitem>
<listitem><para>Send the email by hand to the maintainer.</para></listitem>
</itemizedlist>
Be aware that there could be protocols and standards that you need to follow for your particular
project.
You can find detailed information on the general process
<ulink url='http://www.kernel.org/pub/software/scm/git/docs/user-manual.html#sharing-development'>
here</ulink>.
</para>
</section>
</section>
</chapter>
<!--
vim: expandtab tw=80 ts=4
-->

View File

@@ -1,151 +0,0 @@
<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<chapter id='dev-manual-start'>
<title>Getting Started with the Yocto Project</title>
<para>
This chapter introduces the Yocto Project and gives you an idea of what you need to get started.
You can find enough information to set your development host up and build or use images for
hardware supported by the Yocto Project by reading the
<ulink url='http://www.yoctoproject.org/docs/yocto-quick-start/yocto-project-qs.html'>
Yocto Project Quick Start</ulink> located on the <ulink url='http://www.yoctoproject.org'>
Yocto Project website</ulink>.
</para>
<para>
The remainder of this chapter summarizes what is in the Yocto Project Quick Start and provides
some higher level concepts you might want to consider.
</para>
<section id='introducing-the-yocto-project'>
<title>Introducing the Yocto Project</title>
<para>
The Yocto Project is an open-source collaboration project focused on embedded Linux developers.
The project provides a recent Linux kernel along with a set of system commands, libraries,
and system components suitable for the embedded developer.
The Yocto Project also features the Sato reference User Interface should you be dealing with
devices with restricted screens.
</para>
<para>
You can use the Yocto Project, which uses the BitBake build tool, to develop complete Linux
images and user-space applications for architectures based on ARM, MIPS, PowerPC, x86 and x86-64.
You can perform target-level testing and debugging as well as test in a hardware emulated environment.
And, if you are an Eclipse user, you can install an Eclipse Yocto Plug-in to allow you to
develop within that familiar environment.
</para>
</section>
<section id='getting-setup'>
<title>Getting Setup</title>
<para>
Here is what you need to get set up to use the Yocto Project:
<itemizedlist>
<listitem><para><emphasis>Host System:</emphasis> You need a recent release of Fedora,
OpenSUSE, Debian, or Ubuntu.
You should have a reasonably current Linux-based host system.
You should also have about 100 gigabytes of free disk space if you plan on building
images.</para></listitem>
<listitem><para><emphasis>Packages:</emphasis> Depending on your host system (Debian-based or RPM-based),
you need certain packages.
See the <ulink url='http://www.yoctoproject.org/docs/yocto-quick-start/yocto-project-qs.html#packages'>
The Packages</ulink> section in the Yocto Project Quick start for the exact package
requirements.</para></listitem>
<listitem><para><emphasis>Yocto Project Release:</emphasis> You need a release of the Yocto Project.
You can get set up for this one of two ways depending on whether you are going to be contributing
back into the Yocto Project source repository or not.
<itemizedlist>
<listitem><para><emphasis>Tarball Extraction:</emphasis> If you are not going to contribute
back into the Yocto Project you can simply download the Yocto Project release you want
from the websites <ulink url='http://yoctoproject.org/download'>download page</ulink>.
Once you have the tarball, just extract it into a directory of your choice.
If you are interested in supported Board Support Packages (BSPs) you can also download
these release tarballs from the same site and locate them in a directory of your
choice.</para></listitem>
<listitem><para><emphasis>Git Method:</emphasis> If you are going to be contributing
back into the Yocto Project you should probably use Git commands to set up a local
Git repository of the Yocto Project.
Doing so creates a history of changes you might make and allows you to easily submit
changes upstream to the project.
For an example of how to set up your own local Git repository of Yocto Project,
see this
<ulink url='https://wiki.yoctoproject.org/wiki/Transcript:_from_git_checkout_to_meta-intel_BSP'>
wiki page</ulink>, which covers checking out the Yocto sources.</para></listitem>
</itemizedlist></para></listitem>
<listitem><para><emphasis>Supported Board Support Packages (BSPs):</emphasis> The same considerations
exist for BSPs.
You can get set up for BSP development one of two ways:
<itemizedlist>
<listitem><para><emphasis>Tarball Extraction:</emphasis> You can download any released
BSP tarball from the same
<ulink url='http://yoctoproject.org/download'>download site</ulink>.
Once you have the tarball just extract it into a directory of your choice.</para></listitem>
<listitem><para><emphasis>Git Method:</emphasis> For an example of how to integrate
the metadata for BSPs into your local Yocto Project Git repository see this
<ulink url='https://wiki.yoctoproject.org/wiki/Transcript:_from_git_checkout_to_meta-intel_BSP'>
wiki page</ulink>, which covers how to check out the meta-intel repository.</para></listitem>
</itemizedlist></para></listitem>
<listitem><para><emphasis>Eclipse Yocto Plug-in:</emphasis> If you are developing using the
Eclipse Integrated Development Environment (IDE) you will need this plug-in.
See the
<ulink url='http://www.yoctoproject.org/docs/adt-manual/adt-manual.html#setting-up-the-eclipse-ide'>
Setting up the Eclipse IDE</ulink> section in the Yocto Application Development Toolkit (ADT)
Users Guide for more information.</para></listitem>
</itemizedlist>
</para>
</section>
<section id='building-images'>
<title>Building Images</title>
<para>
The build process creates an entire Linux distribution, including the toolchain, from source.
For more information on this topic, see the
<ulink url='http://www.yoctoproject.org/docs/yocto-quick-start/yocto-project-qs.html#building-image'>
Building an Image</ulink> section in the Yocto Project Quick Start.
</para>
<para>
The build process is as follows:
<orderedlist>
<listitem><para>Make sure you have the Yocto Project files as described in the
previous section.</para></listitem>
<listitem><para>Initialize the build environment by sourcing a build environment
script.</para></listitem>
<listitem><para>Make sure the <filename>conf/local.conf</filename> configuration file is set
up how you want it.
This file defines the target machine architecture and and other build configurations.</para></listitem>
<listitem><para>Build the image using the BitBake command.
If you want information on Bitbake, see the user manual at
<ulink url='http://docs.openembedded.org/bitbake/html'></ulink>.</para></listitem>
<listitem><para>Optionally, you can run the image in the QEMU emulator.</para></listitem>
</orderedlist>
</para>
</section>
<section id='using-pre-built-binaries-and-qemu'>
<title>Using Pre-Built Binaries and QEMU</title>
<para>
Another option you have to get started is to use a pre-built binary.
This scenario is ideal for developing software applications to run on your target hardware.
To do this you need to install the stand-alone Yocto toolchain tarball and then download the
pre-built kernel that you will boot using the QEMU emulator.
Next, you must download the filesystem for your target machines architecture.
Finally, you set up the environment to emulate the hardware then start the emulator.
</para>
<para>
You can find details on all these steps in the
<ulink url='http://www.yoctoproject.org/docs/yocto-quick-start/yocto-project-qs.html#using-pre-built'>
Using Pre-Built Binaries and QEMU</ulink> section in the Yocto Project Quick Start.
</para>
</section>
</chapter>
<!--
vim: expandtab tw=80 ts=4
-->

View File

@@ -1,71 +0,0 @@
<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd">
<book id='dev-manual' lang='en'
xmlns:xi="http://www.w3.org/2003/XInclude"
xmlns="http://docbook.org/ns/docbook"
>
<bookinfo>
<mediaobject>
<imageobject>
<imagedata fileref='figures/dev-title.png'
format='SVG'
align='left' scalefit='1' width='100%'/>
</imageobject>
</mediaobject>
<title></title>
<authorgroup>
<author>
<firstname>Scott</firstname> <surname>Rifenbark</surname>
<affiliation>
<orgname>Intel Corporation</orgname>
</affiliation>
<email>scott.m.rifenbark@intel.com</email>
</author>
</authorgroup>
<revhistory>
<revision>
<revnumber>1.1</revnumber>
<date>TBD 2011</date>
<revremark>This revision is the initial document draft and corresponds with
the Yocto Project 1.1 Release.</revremark>
</revision>
</revhistory>
<copyright>
<year>2010-2011</year>
<holder>Linux Foundation</holder>
</copyright>
<legalnotice>
<para>
Permission is granted to copy, distribute and/or modify this document under
the terms of the <ulink type="http" url="http://creativecommons.org/licenses/by-sa/2.0/uk/">Creative Commons Attribution-Share Alike 2.0 UK: England &amp; Wales</ulink> as published by Creative Commons.
</para>
</legalnotice>
</bookinfo>
<xi:include href="dev-manual-intro.xml"/>
<xi:include href="dev-manual-start.xml"/>
<xi:include href="dev-manual-newbie.xml"/>
<xi:include href="dev-manual-model.xml"/>
<xi:include href="dev-manual-cases.xml"/>
<!-- <index id='index'>
<title>Index</title>
</index>
-->
</book>
<!--
vim: expandtab tw=80 ts=4
-->

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

View File

@@ -1,968 +0,0 @@
/*
Generic XHTML / DocBook XHTML CSS Stylesheet.
Browser wrangling and typographic design by
Oyvind Kolas / pippin@gimp.org
Customised for Poky by
Matthew Allum / mallum@o-hand.com
Thanks to:
Liam R. E. Quin
William Skaggs
Jakub Steiner
Structure
---------
The stylesheet is divided into the following sections:
Positioning
Margins, paddings, width, font-size, clearing.
Decorations
Borders, style
Colors
Colors
Graphics
Graphical backgrounds
Nasty IE tweaks
Workarounds needed to make it work in internet explorer,
currently makes the stylesheet non validating, but up until
this point it is validating.
Mozilla extensions
Transparency for footer
Rounded corners on boxes
*/
/*************** /
/ Positioning /
/ ***************/
body {
font-family: Verdana, Sans, sans-serif;
min-width: 640px;
width: 80%;
margin: 0em auto;
padding: 2em 5em 5em 5em;
color: #333;
}
.reviewer {
color: red;
}
h1,h2,h3,h4,h5,h6,h7 {
font-family: Arial, Sans;
color: #00557D;
clear: both;
}
h1 {
font-size: 2em;
text-align: left;
padding: 0em 0em 0em 0em;
margin: 2em 0em 0em 0em;
}
h2.subtitle {
margin: 0.10em 0em 3.0em 0em;
padding: 0em 0em 0em 0em;
font-size: 1.8em;
padding-left: 20%;
font-weight: normal;
font-style: italic;
}
h2 {
margin: 2em 0em 0.66em 0em;
padding: 0.5em 0em 0em 0em;
font-size: 1.5em;
font-weight: bold;
}
h3.subtitle {
margin: 0em 0em 1em 0em;
padding: 0em 0em 0em 0em;
font-size: 142.14%;
text-align: right;
}
h3 {
margin: 1em 0em 0.5em 0em;
padding: 1em 0em 0em 0em;
font-size: 140%;
font-weight: bold;
}
h4 {
margin: 1em 0em 0.5em 0em;
padding: 1em 0em 0em 0em;
font-size: 120%;
font-weight: bold;
}
h5 {
margin: 1em 0em 0.5em 0em;
padding: 1em 0em 0em 0em;
font-size: 110%;
font-weight: bold;
}
h6 {
margin: 1em 0em 0em 0em;
padding: 1em 0em 0em 0em;
font-size: 80%;
font-weight: bold;
}
.authorgroup {
background-color: transparent;
background-repeat: no-repeat;
padding-top: 256px;
background-image: url("figures/dev-title.png");
background-position: left top;
margin-top: -256px;
padding-right: 50px;
margin-left: 0px;
text-align: right;
width: 740px;
}
h3.author {
margin: 0em 0me 0em 0em;
padding: 0em 0em 0em 0em;
font-weight: normal;
font-size: 100%;
color: #333;
clear: both;
}
.author tt.email {
font-size: 66%;
}
.titlepage hr {
width: 0em;
clear: both;
}
.revhistory {
padding-top: 2em;
clear: both;
}
.toc,
.list-of-tables,
.list-of-examples,
.list-of-figures {
padding: 1.33em 0em 2.5em 0em;
color: #00557D;
}
.toc p,
.list-of-tables p,
.list-of-figures p,
.list-of-examples p {
padding: 0em 0em 0em 0em;
padding: 0em 0em 0.3em;
margin: 1.5em 0em 0em 0em;
}
.toc p b,
.list-of-tables p b,
.list-of-figures p b,
.list-of-examples p b{
font-size: 100.0%;
font-weight: bold;
}
.toc dl,
.list-of-tables dl,
.list-of-figures dl,
.list-of-examples dl {
margin: 0em 0em 0.5em 0em;
padding: 0em 0em 0em 0em;
}
.toc dt {
margin: 0em 0em 0em 0em;
padding: 0em 0em 0em 0em;
}
.toc dd {
margin: 0em 0em 0em 2.6em;
padding: 0em 0em 0em 0em;
}
div.glossary dl,
div.variablelist dl {
}
.glossary dl dt,
.variablelist dl dt,
.variablelist dl dt span.term {
font-weight: normal;
width: 20em;
text-align: right;
}
.variablelist dl dt {
margin-top: 0.5em;
}
.glossary dl dd,
.variablelist dl dd {
margin-top: -1em;
margin-left: 25.5em;
}
.glossary dd p,
.variablelist dd p {
margin-top: 0em;
margin-bottom: 1em;
}
div.calloutlist table td {
padding: 0em 0em 0em 0em;
margin: 0em 0em 0em 0em;
}
div.calloutlist table td p {
margin-top: 0em;
margin-bottom: 1em;
}
div p.copyright {
text-align: left;
}
div.legalnotice p.legalnotice-title {
margin-bottom: 0em;
}
p {
line-height: 1.5em;
margin-top: 0em;
}
dl {
padding-top: 0em;
}
hr {
border: solid 1px;
}
.mediaobject,
.mediaobjectco {
text-align: center;
}
img {
border: none;
}
ul {
padding: 0em 0em 0em 1.5em;
}
ul li {
padding: 0em 0em 0em 0em;
}
ul li p {
text-align: left;
}
table {
width :100%;
}
th {
padding: 0.25em;
text-align: left;
font-weight: normal;
vertical-align: top;
}
td {
padding: 0.25em;
vertical-align: top;
}
p a[id] {
margin: 0px;
padding: 0px;
display: inline;
background-image: none;
}
a {
text-decoration: underline;
color: #444;
}
pre {
overflow: auto;
}
a:hover {
text-decoration: underline;
/*font-weight: bold;*/
}
div.informalfigure,
div.informalexample,
div.informaltable,
div.figure,
div.table,
div.example {
margin: 1em 0em;
padding: 1em;
page-break-inside: avoid;
}
div.informalfigure p.title b,
div.informalexample p.title b,
div.informaltable p.title b,
div.figure p.title b,
div.example p.title b,
div.table p.title b{
padding-top: 0em;
margin-top: 0em;
font-size: 100%;
font-weight: normal;
}
.mediaobject .caption,
.mediaobject .caption p {
text-align: center;
font-size: 80%;
padding-top: 0.5em;
padding-bottom: 0.5em;
}
.epigraph {
padding-left: 55%;
margin-bottom: 1em;
}
.epigraph p {
text-align: left;
}
.epigraph .quote {
font-style: italic;
}
.epigraph .attribution {
font-style: normal;
text-align: right;
}
span.application {
font-style: italic;
}
.programlisting {
font-family: monospace;
font-size: 80%;
white-space: pre;
margin: 1.33em 0em;
padding: 1.33em;
}
.tip,
.warning,
.caution,
.note {
margin-top: 1em;
margin-bottom: 1em;
}
/* force full width of table within div */
.tip table,
.warning table,
.caution table,
.note table {
border: none;
width: 100%;
}
.tip table th,
.warning table th,
.caution table th,
.note table th {
padding: 0.8em 0.0em 0.0em 0.0em;
margin : 0em 0em 0em 0em;
}
.tip p,
.warning p,
.caution p,
.note p {
margin-top: 0.5em;
margin-bottom: 0.5em;
padding-right: 1em;
text-align: left;
}
.acronym {
text-transform: uppercase;
}
b.keycap,
.keycap {
padding: 0.09em 0.3em;
margin: 0em;
}
.itemizedlist li {
clear: none;
}
.filename {
font-size: medium;
font-family: Courier, monospace;
}
div.navheader, div.heading{
position: absolute;
left: 0em;
top: 0em;
width: 100%;
background-color: #cdf;
width: 100%;
}
div.navfooter, div.footing{
position: fixed;
left: 0em;
bottom: 0em;
background-color: #eee;
width: 100%;
}
div.navheader td,
div.navfooter td {
font-size: 66%;
}
div.navheader table th {
/*font-family: Georgia, Times, serif;*/
/*font-size: x-large;*/
font-size: 80%;
}
div.navheader table {
border-left: 0em;
border-right: 0em;
border-top: 0em;
width: 100%;
}
div.navfooter table {
border-left: 0em;
border-right: 0em;
border-bottom: 0em;
width: 100%;
}
div.navheader table td a,
div.navfooter table td a {
color: #777;
text-decoration: none;
}
/* normal text in the footer */
div.navfooter table td {
color: black;
}
div.navheader table td a:visited,
div.navfooter table td a:visited {
color: #444;
}
/* links in header and footer */
div.navheader table td a:hover,
div.navfooter table td a:hover {
text-decoration: underline;
background-color: transparent;
color: #33a;
}
div.navheader hr,
div.navfooter hr {
display: none;
}
.qandaset tr.question td p {
margin: 0em 0em 1em 0em;
padding: 0em 0em 0em 0em;
}
.qandaset tr.answer td p {
margin: 0em 0em 1em 0em;
padding: 0em 0em 0em 0em;
}
.answer td {
padding-bottom: 1.5em;
}
.emphasis {
font-weight: bold;
}
/************* /
/ decorations /
/ *************/
.titlepage {
}
.part .title {
}
.subtitle {
border: none;
}
/*
h1 {
border: none;
}
h2 {
border-top: solid 0.2em;
border-bottom: solid 0.06em;
}
h3 {
border-top: 0em;
border-bottom: solid 0.06em;
}
h4 {
border: 0em;
border-bottom: solid 0.06em;
}
h5 {
border: 0em;
}
*/
.programlisting {
border: solid 1px;
}
div.figure,
div.table,
div.informalfigure,
div.informaltable,
div.informalexample,
div.example {
border: 1px solid;
}
.tip,
.warning,
.caution,
.note {
border: 1px solid;
}
.tip table th,
.warning table th,
.caution table th,
.note table th {
border-bottom: 1px solid;
}
.question td {
border-top: 1px solid black;
}
.answer {
}
b.keycap,
.keycap {
border: 1px solid;
}
div.navheader, div.heading{
border-bottom: 1px solid;
}
div.navfooter, div.footing{
border-top: 1px solid;
}
/********* /
/ colors /
/ *********/
body {
color: #333;
background: white;
}
a {
background: transparent;
}
a:hover {
background-color: #dedede;
}
h1,
h2,
h3,
h4,
h5,
h6,
h7,
h8 {
background-color: transparent;
}
hr {
border-color: #aaa;
}
.tip, .warning, .caution, .note {
border-color: #aaa;
}
.tip table th,
.warning table th,
.caution table th,
.note table th {
border-bottom-color: #aaa;
}
.warning {
background-color: #fea;
}
.caution {
background-color: #fea;
}
.tip {
background-color: #eff;
}
.note {
background-color: #dfc;
}
.glossary dl dt,
.variablelist dl dt,
.variablelist dl dt span.term {
color: #044;
}
div.figure,
div.table,
div.example,
div.informalfigure,
div.informaltable,
div.informalexample {
border-color: #aaa;
}
pre.programlisting {
color: black;
background-color: #fff;
border-color: #aaa;
border-width: 2px;
}
.guimenu,
.guilabel,
.guimenuitem {
background-color: #eee;
}
b.keycap,
.keycap {
background-color: #eee;
border-color: #999;
}
div.navheader {
border-color: black;
}
div.navfooter {
border-color: black;
}
/*********** /
/ graphics /
/ ***********/
/*
body {
background-image: url("images/body_bg.jpg");
background-attachment: fixed;
}
.navheader,
.note,
.tip {
background-image: url("images/note_bg.jpg");
background-attachment: fixed;
}
.warning,
.caution {
background-image: url("images/warning_bg.jpg");
background-attachment: fixed;
}
.figure,
.informalfigure,
.example,
.informalexample,
.table,
.informaltable {
background-image: url("images/figure_bg.jpg");
background-attachment: fixed;
}
*/
h1,
h2,
h3,
h4,
h5,
h6,
h7{
}
/*
Example of how to stick an image as part of the title.
div.article .titlepage .title
{
background-image: url("figures/white-on-black.png");
background-position: center;
background-repeat: repeat-x;
}
*/
div.preface .titlepage .title,
div.colophon .title,
div.chapter .titlepage .title,
div.article .titlepage .title
{
}
div.section div.section .titlepage .title,
div.sect2 .titlepage .title {
background: none;
}
h1.title {
background-color: transparent;
background-image: url("figures/yocto-project-bw.png");
background-repeat: no-repeat;
height: 256px;
text-indent: -9000px;
overflow:hidden;
}
h2.subtitle {
background-color: transparent;
text-indent: -9000px;
overflow:hidden;
width: 0px;
display: none;
}
/*************************************** /
/ pippin.gimp.org specific alterations /
/ ***************************************/
/*
div.heading, div.navheader {
color: #777;
font-size: 80%;
padding: 0;
margin: 0;
text-align: left;
position: absolute;
top: 0px;
left: 0px;
width: 100%;
height: 50px;
background: url('/gfx/heading_bg.png') transparent;
background-repeat: repeat-x;
background-attachment: fixed;
border: none;
}
div.heading a {
color: #444;
}
div.footing, div.navfooter {
border: none;
color: #ddd;
font-size: 80%;
text-align:right;
width: 100%;
padding-top: 10px;
position: absolute;
bottom: 0px;
left: 0px;
background: url('/gfx/footing_bg.png') transparent;
}
*/
/****************** /
/ nasty ie tweaks /
/ ******************/
/*
div.heading, div.navheader {
width:expression(document.body.clientWidth + "px");
}
div.footing, div.navfooter {
width:expression(document.body.clientWidth + "px");
margin-left:expression("-5em");
}
body {
padding:expression("4em 5em 0em 5em");
}
*/
/**************************************** /
/ mozilla vendor specific css extensions /
/ ****************************************/
/*
div.navfooter, div.footing{
-moz-opacity: 0.8em;
}
div.figure,
div.table,
div.informalfigure,
div.informaltable,
div.informalexample,
div.example,
.tip,
.warning,
.caution,
.note {
-moz-border-radius: 0.5em;
}
b.keycap,
.keycap {
-moz-border-radius: 0.3em;
}
*/
table tr td table tr td {
display: none;
}
hr {
display: none;
}
table {
border: 0em;
}
.photo {
float: right;
margin-left: 1.5em;
margin-bottom: 1.5em;
margin-top: 0em;
max-width: 17em;
border: 1px solid gray;
padding: 3px;
background: white;
}
.seperator {
padding-top: 2em;
clear: both;
}
#validators {
margin-top: 5em;
text-align: right;
color: #777;
}
@media print {
body {
font-size: 8pt;
}
.noprint {
display: none;
}
}
.tip,
.note {
background: #666666;
color: #fff;
padding: 20px;
margin: 20px;
}
.tip h3,
.note h3 {
padding: 0em;
margin: 0em;
font-size: 2em;
font-weight: bold;
color: #fff;
}
.tip a,
.note a {
color: #fff;
text-decoration: underline;
}

View File

@@ -46,11 +46,11 @@
the baseline kernel is the most stable official release.</para></listitem>
<listitem><para>Include major technological features as part of Yocto Project's up-rev
strategy.</para></listitem>
<listitem><para>Present a Git tree, that just like the upstream kernel.org tree, has a
<listitem><para>Present a git tree, that just like the upstream kernel.org tree, has a
clear and continuous history.</para></listitem>
<listitem><para>Deliver a key set of supported kernel types, where each type is tailored
to a specific use case (i.e. networking, consumer, devices, and so forth).</para></listitem>
<listitem><para>Employ a Git branching strategy that from a customer's point of view
to a specific use case (i.g. networking, consumer, devices, and so forth).</para></listitem>
<listitem><para>Employ a git branching strategy that from a customer's point of view
results in a linear path from the baseline kernel.org, through a select group of features and
ends with their BSP-specific commits.</para></listitem>
</itemizedlist>
@@ -170,7 +170,7 @@
You can think of the Yocto Project kernel as consisting of a baseline kernel with
added features logically structured on top of the baseline.
The features are tagged and organized by way of a branching strategy implemented by the
source code manager (SCM) Git.
source code manager (SCM) git.
The result is that the user has the ability to see the added features and
the commits that make up those features.
In addition to being able to see added features, the user can also view the history of what
@@ -279,20 +279,15 @@
</section>
<section id='source-code-manager-git'>
<title>Source Code Manager - Git</title>
<title>Source Code Manager - git</title>
<para>
The Source Code Manager (SCM) is Git and it is the obvious mechanism for meeting the
The Source Code Manager (SCM) is git and it is the obvious mechanism for meeting the
previously mentioned goals.
Not only is it the SCM for kernel.org but Git continues to grow in popularity and
Not only is it the SCM for kernel.org but git continues to grow in popularity and
supports many different work flows, front-ends and management techniques.
</para>
<para>
You can find documentation on Git at <ulink url='http://git-scm.com/documentation'></ulink>.
Also, the Yocto Project Development manual has an introduction to Git and describes a
minimal set of commands that allow you to be functional with Git.
</para>
<note><para>
It should be noted that you can use as much, or as little, of what Git has to offer
It should be noted that you can use as much, or as little, of what git has to offer
as is appropriate to your project.
</para></note>
</section>
@@ -301,22 +296,21 @@
<section id='kernel-tools'>
<title>Kernel Tools</title>
<para>
Since most standard workflows involve moving forward with an existing tree by
continuing to add and alter the underlying baseline, the tools that manage
the Yocto Project's kernel construction are largely hidden from the developer to
present a simplified view of the kernel for ease of use.
</para>
<para>
The fundamental properties of the tools that manage and construct the
Yocto Project kernel are:
<itemizedlist>
<listitem><para>Group patches into named, reusable features.</para></listitem>
<listitem><para>Allow top down control of included features.</para></listitem>
<listitem><para>Bind kernel configuration to kernel patches and features.</para></listitem>
<listitem><para>Present a seamless Git repository that blends Yocto Project value
with the kernel.org history and development.</para></listitem>
</itemizedlist>
</para>
Since most standard workflows involve moving forward with an existing tree by
continuing to add and alter the underlying baseline, the tools that manage
Yocto Project's kernel construction are largely hidden from the developer to
present a simplified view of the kernel for ease of use.
</para>
<para>
The fundamental properties of the tools that manage and construct the
kernel are:
<itemizedlist>
<listitem><para>the ability to group patches into named, reusable features</para></listitem>
<listitem><para>to allow top down control of included features</para></listitem>
<listitem><para>the binding of kernel configuration to kernel patches/features</para></listitem>
<listitem><para>the presentation of a seamless git repository that blends Yocto Project value with the kernel.org history and development</para></listitem>
</itemizedlist>
</para>
<!--<para>
The tools that construct a kernel tree will be discussed later in this
document. The following tools form the foundation of the Yocto Project

View File

@@ -8,9 +8,9 @@
<section id='book-intro'>
<title>Introduction</title>
<para>
The Yocto Project presents the kernel as a fully patched, history-clean Git
The Yocto Project presents the kernel as a fully patched, history-clean git
repository.
The Git tree represents the selected features, board support,
The git tree represents the selected features, board support,
and configurations extensively tested by Yocto Project.
The Yocto Project kernel allows the end user to leverage community
best practices to seamlessly manage the development, build and debug cycles.

View File

@@ -10,8 +10,6 @@
<title>Introduction</title>
<para>
This chapter describes how to accomplish tasks involving the kernel's tree structure.
This information is designed to help the developer that wants to modify the Yocto Project kernel
and contribute changes upstream to the Yocto Project.
The information covers the following:
<itemizedlist>
<listitem><para>Tree construction</para></listitem>
@@ -40,31 +38,19 @@
in the product.
Those feature descriptions list all necessary patches,
configuration, branching, tagging and feature divisions found in the kernel.
Thus, the Yocto Project kernel repository (or tree) is built.
The existence of this tree allows you to build images based on your configurations
and features.
</para>
<para>
You can find the files used to describe all the valid features and BSPs in the Yocto Project
kernel in any clone of the kernel Git tree.
For example, the following command clones the Yocto Project baseline kernel that
branched off of linux.org version 2.6.37:
<literallayout class='monospaced'>
$ git clone http://git.yoctoproject.org/cgit/cgit.cgi/linux-yocto-2.6.37
</literallayout>
After you switch to the <filename>meta</filename> branch within the repository
you can see a snapshot of all the kernel configuration and feature descriptions that are
used to build the kernel repository.
These descriptions are in the form of <filename>.scc</filename> files.
</para>
<para>
kernel in any clone of the kernel git tree.
The directory <filename>meta/cfg/kernel-cache/</filename> is a snapshot of all the kernel
configuration and feature descriptions (.scc) used to build the kernel repository.
You should realize, however, that browsing the snapshot of feature
descriptions and patches is not an effective way to determine what is in a
particular kernel branch.
Instead, you should use Git directly to discover the changes
Instead, you should use git directly to discover the changes
in a branch.
Using Git is an efficient and flexible way to inspect changes to the kernel.
For examples showing how to use Git to inspect kernel commits, see the following sections
Using git is a efficient and flexible way to inspect changes to the kernel.
For examples showing how to use git to inspect kernel commits, see the following sections
in this chapter.
</para>
<note><para>
@@ -74,56 +60,46 @@
and development.
</para></note>
<para>
The following steps describe what happens during tree construction given the introduction
of a new top-level kernel feature or BSP.
These are the actions that effectively create the tree that includes the new feature, patch,
or BSP:
<orderedlist>
<listitem><para>A top-level kernel feature is passed to the kernel build subsystem.
Normally, this is a BSP for a particular kernel type.</para></listitem>
The general flow for constructing a project-specific kernel tree is as follows:
<orderedlist>
<listitem><para>A top-level kernel feature is passed to the kernel build subsystem.
Normally, this is a BSP for a particular kernel type.</para></listitem>
<listitem><para>The file that describes the top-level feature is located by searching
these system directories:
<listitem><para>The file that describes the top-level feature is located by searching
these system directories:</para>
<itemizedlist>
<listitem><para>The in-tree kernel-cache directories, which are located
in <filename>meta/cfg/kernel-cache</filename></para></listitem>
<itemizedlist>
<listitem><para>The in-tree kernel-cache directories</para></listitem>
<!-- <listitem><para>kernel-*-cache directories in layers</para></listitem> -->
<listitem><para>Areas pointed to by <filename>SRC_URI</filename> statements
found in recipes</para></listitem>
<listitem><para>Recipe SRC_URIs</para></listitem>
<!-- <listitem><para>configured and default templates</para></listitem> -->
</itemizedlist>
</itemizedlist>
For a typical build, the target of the search is a
feature description in an <filename>.scc</filename> file
whose name follows this format:
<literallayout class='monospaced'>
&lt;bsp_name&gt;-&lt;kernel_type&gt;.scc
</literallayout>
</para></listitem>
<para>For a typical build a feature description of the format:
&lt;bsp name&gt;-&lt;kernel type&gt;.scc is the target of the search.
</para></listitem>
<listitem><para>Once located, the feature description is either compiled into a simple script
of actions, or into an existing equivalent script that is already part of the
shipped kernel.</para></listitem>
<listitem><para>Once located, the feature description is either compiled into a simple script
of actions, or an existing equivalent script that was part of the
shipped kernel is located.</para></listitem>
<listitem><para>Extra features are appended to the top-level feature description.
These features can come from the <filename>KERNEL_FEATURES</filename> variable in
recipes.</para></listitem>
<listitem><para>Extra features are appended to the top-level feature description.
These features can come from the KERNEL_FEATURES variable in recipes.</para></listitem>
<listitem><para>Each extra feature is located, compiled and appended to the script
as described in step three.</para></listitem>
<listitem><para>Each extra feature is located, compiled and appended to the script from
step #3</para></listitem>
<listitem><para>The script is executed to produce a meta-series.
The meta-series is a description of all the branches, tags, patches and configurations that
need to be applied to the base Git repository to completely create the
source (build) branch for the new BSP or feature.</para></listitem>
<listitem><para>The script is executed, and a meta-series is produced.
The meta-series is a description of all the branches, tags, patches and configuration that
needs to be applied to the base git repository to completely create the
BSP source (build) branch.</para></listitem>
<listitem><para>The base repository is cloned, and the actions
listed in the meta-series are applied to the tree.</para></listitem>
<listitem><para>The base repository is cloned, and the actions
listed in the meta-series are applied to the tree.</para></listitem>
<listitem><para>The Git repository is left with the desired branch checked out and any
required branching, patching and tagging has been performed.</para></listitem>
</orderedlist>
<listitem><para>The git repository is left with the desired branch checked out and any
required branching, patching and tagging has been performed.</para></listitem>
</orderedlist>
</para>
<para>
@@ -137,7 +113,7 @@
official Yocto Project kernel repositories is the combination of all
supported boards and configurations.</para>
<para>This technique is flexible and allows for seamless blending of an immutable
<para>This technique is flexible and allows the seamless blending of an immutable
history with additional deployment specific patches.
Any additions to the kernel become an integrated part of the branches.
</para></note>
@@ -161,7 +137,7 @@ A summary of end user tree construction activities follow:
<itemizedlist>
<listitem><para>compile and link a full top-down kernel description from feature descriptions</para></listitem>
<listitem><para>execute the complete description to generate a meta-series</para></listitem>
<listitem><para>interpret the meta-series to create a customized Git repository for the
<listitem><para>interpret the meta-series to create a customized git repository for the
board</para></listitem>
<listitem><para>migrate configuration fragments and configure the kernel</para></listitem>
<listitem><para>checkout the BSP branch and build</para></listitem>
@@ -177,7 +153,7 @@ A summary of end user tree construction activities follow:
</para>
<itemizedlist>
<listitem><para>There must be a kernel Git repository indicated in the SRC_URI.</para></listitem>
<listitem><para>There must be a kernel git repository indicated in the SRC_URI.</para></listitem>
<listitem><para>There must be a BSP build branch - &lt;bsp name&gt;-&lt;kernel type&gt; in 0.9 or
&lt;kernel type&gt;/&lt;bsp name&gt; in 1.0.</para></listitem>
</itemizedlist>
@@ -192,14 +168,12 @@ A summary of end user tree construction activities follow:
<para>
Before building a kernel it is configured by processing all of the
configuration "fragments" specified by feature descriptions in the <filename>scc</filename>
files.
configuration "fragments" specified by the scc feature descriptions.
As the features are compiled, associated kernel configuration fragments are noted
and recorded in the meta-series in their compilation order.
The fragments are migrated, pre-processed and passed to the Linux Kernel
Configuration subsystem (<filename>lkc</filename>) as raw input in the form
of a <filename>.config</filename> file.
The <filename>lkc</filename> uses its own internal dependency constraints to do the final
Configuration subsystem (lkc) as raw input in the form of a <filename>.config</filename> file.
The lkc uses its own internal dependency constraints to do the final
processing of that information and generates the final <filename>.config</filename> file
that is used during compilation.
</para>
@@ -210,7 +184,7 @@ A summary of end user tree construction activities follow:
</para>
<para>The other thing that you will first see once you configure a kernel is that
it will generate a build tree that is separate from your Git source tree.
it will generate a build tree that is separate from your git source tree.
This build tree has the name using the following form:
<literallayout class='monospaced'>
linux-&lt;BSPname&gt;-&lt;kerntype&gt;-build
@@ -227,7 +201,7 @@ A summary of end user tree construction activities follow:
The files include the final <filename>.config</filename>, all the <filename>.o</filename>
files, the <filename>.a</filename> files, and so forth.
Since each BSP has its own separate build directory in its own separate branch
of the Git tree you can easily switch between different BSP builds.
of the git tree you can easily switch between different BSP builds.
</para>
</section>
@@ -246,7 +220,7 @@ to be used or not. The 2.0 release already made use of some stateful
construction of series files, but since the delivery mechanism was unchanged
(tar + patches + series files), most people were not aware of anything really
different. The 3.0 release continues with this stateful construction of
series files, but since the delivery mechanism is changed (Git + branches) it
series files, but since the delivery mechanism is changed (git + branches) it
now is more apparent to people.
</para>
<para>
@@ -255,7 +229,7 @@ compiler". Its role is to combine feature descriptions into a format that can
be used to generate a meta-series. A meta series contains all the required
information to construct a complete set of branches that are required to
build a desired board and feature set. The meta series is interpreted by the
kgit tools to create a Git repository that could be built.
kgit tools to create a git repository that could be built.
</para>
<para>
To illustrate how scc works, a feature description must first be understood.
@@ -272,7 +246,7 @@ Each feature description can use any of the following valid scc commands:
<listitem><para>shell constructs: bash conditionals and other utilities can be used in a feature
description. During compilation, the working directory is the feature
description itself, so any command that is "raw shell" and not from the
list of supported commands, can not directly modify a Git repository.</para></listitem>
list of supported commands, can not directly modify a git repository.</para></listitem>
<listitem><para>patch &lt;relative path&gt;/&lt;patch name&gt;: outputs a patch to be included in a feature's patch set. Only the name of
the patch is supplied, the path is calculated from the currently set
@@ -323,9 +297,9 @@ Each feature description can use any of the following valid scc commands:
include is processed, so is normally only used by a new top level feature
to modify the order of features in something it is including.</para></listitem>
<listitem><para>git &lt;command&gt;: Issues any Git command during tree construction. Note: this command is
<listitem><para>git &lt;command&gt;: Issues any git command during tree construction. Note: this command is
not validated/sanitized so care must be taken to not damage the
tree. This can be used to script branching, tagging, pulls or other Git
tree. This can be used to script branching, tagging, pulls or other git
operations.</para></listitem>
<listitem><para>dir &lt;directory&gt;: changes the working directory for "patch" directives. This can be used to
@@ -375,17 +349,17 @@ kgit-meta is the actual application of feature description(s) to a kernel repo.
In other words, it is responsible for interpreting the meta series generated
from a scc compiled script. As a result, kgit-meta is coupled to the set of
commands permitted in a .scc feature description (listed in the scc section).
kgit-meta understands both the meta series format and how to use Git and
guilt to modify a base Git repository. It processes a meta-series line by
kgit-meta understands both the meta series format and how to use git and
guilt to modify a base git repository. It processes a meta-series line by
line, branching, tagging, patching and tracking changes that are made to the
base Git repository.
base git repository.
</para>
<para>
Once kgit-meta has processed a meta-series, it leaves the repository with the
last branch checked out, and creates the necessary guilt infrastructure to
inspect the tree, or add to it via using guilt. As was previously mentioned,
guilt is not required, but is provided as a convenience. Other utilities such
as quilt, stgit, Git or others can also be used to manipulate the Git
as quilt, stgit, git or others can also be used to manipulate the git
repository.
</para>
</section> -->
@@ -394,12 +368,12 @@ repository.
<title>Workflow Examples</title>
<para>
As previously noted, the Yocto Project kernel has built in Git integration.
As previously noted, the Yocto Project kernel has built in git integration.
However, these utilities are not the only way to work with the kernel repository.
Yocto Project has not made changes to Git or to other tools that
Yocto Project has not made changes to git or to other tools that
would invalidate alternate workflows.
Additionally, the way the kernel repository is constructed results in using
only core Git functionality thus allowing any number of tools or front ends to use the
only core git functionality thus allowing any number of tools or front ends to use the
resulting tree.
</para>
@@ -428,7 +402,7 @@ repository.
<para>
A more efficient way to determine what has changed in the kernel is to use
Git and inspect or search the kernel tree.
git and inspect or search the kernel tree.
This method gives you a full view of not only the source code modifications,
but also provides the reasons for the changes.
</para>
@@ -437,8 +411,8 @@ repository.
<title>What Changed in a BSP?</title>
<para>
Following are a few examples that show how to use Git to examine changes.
Note that because the Yocto Project Git repository does not break existing Git
Following are a few examples that show how to use git to examine changes.
Note that because the Yocto Project git repository does not break existing git
functionality and because there exists many permutations of these types of
commands there are many more methods to discover changes.
</para>
@@ -501,7 +475,7 @@ repository.
<para>
You can use many other comparisons to isolate BSP changes.
For example, you can compare against kernel.org tags (e.g. v2.6.27.18, etc), or
you can compare against subsystems (e.g. <filename>git whatchanged mm</filename>).
you can compare against subsystems (e.g. git whatchanged mm).
</para>
</section>
</section>
@@ -516,9 +490,9 @@ repository.
</para>
<para>
Since the Yocto Project kernel source tree is backed by Git, this activity is
Since the Yocto Project kernel source tree is backed by git, this activity is
much easier as compared to with previous releases.
Because Git tracks file modifications, additions and deletions, it is easy
Because git tracks file modifications, additions and deletions, it is easy
to modify the code and later realize that the changes should be saved.
It is also easy to determine what has changed.
This method also provides many tools to commit, undo and export those modifications.
@@ -531,7 +505,7 @@ repository.
<itemizedlist>
<listitem><para>Bulk storage</para></listitem>
<listitem><para>Internal sharing either through patches or by using Git</para></listitem>
<listitem><para>Internal sharing either through patches or by using git</para></listitem>
<listitem><para>External submissions</para></listitem>
<listitem><para>Exporting for integration into another SCM</para></listitem>
</itemizedlist>
@@ -579,7 +553,7 @@ repository.
<para>
The previous operations capture all the local changes in the project source
tree in a single Git commit.
tree in a single git commit.
And, that commit is also stored in the project's source tree.
</para>
@@ -599,12 +573,12 @@ repository.
The examples in this section assume that changes have been incrementally committed
to the tree during development and now need to be exported. The sections that follow
describe how you can export your changes internally through either patches or by
using Git commands.
using git commands.
</para>
<para>
During development the following commands are of interest.
For full Git documentation, refer to the Git man pages or to an online resource such
For full git documentation, refer to the git man pages or to an online resource such
as <ulink url='http://github.com'></ulink>.
<literallayout class='monospaced'>
@@ -643,15 +617,15 @@ repository.
associated with development by using the following commands:
<literallayout class='monospaced'>
&gt; Git add &gt;path&lt;/file
&gt; Git commit --amend
&gt; Git rebase or Git rebase -i
&gt; git add &gt;path&lt;/file
&gt; git commit --amend
&gt; git rebase or git rebase -i
</literallayout>
</para>
<para>
Again, assuming that the changes have not been pushed upstream, and that
no pending works-in-progress exist (use <filename>git status</filename> to check) then
no pending works-in-progress exist (use "git status" to check) then
you can revert (undo) commits by using the following commands:
<literallayout class='monospaced'>
@@ -666,13 +640,13 @@ repository.
</para>
<para>
You can create branches, "cherry-pick" changes or perform any number of Git
You can create branches, "cherry-pick" changes or perform any number of git
operations until the commits are in good order for pushing upstream
or for pull requests.
After a push or pull, commits are normally considered
"permanent" and you should not modify them.
If they need to be changed you can incrementally do so with new commits.
These practices follow the standard Git workflow and the kernel.org best
These practices follow the standard "git" workflow and the kernel.org best
practices, which Yocto Project recommends.
</para>
@@ -741,7 +715,7 @@ repository.
</section>
<section id='export-internally-via-git'>
<title>Exporting Changes Internally by Using Git</title>
<title>Exporting Changes Internally by Using git</title>
<para>
This section describes how you can export changes from a working directory
@@ -753,8 +727,7 @@ repository.
<para>
Use this command form to push the changes:
<literallayout class='monospaced'>
&gt; git push ssh://&lt;master_server&gt;/&lt;path_to_repo&gt;
&lt;local_branch&gt;:&lt;remote_branch&gt;
git push ssh://&lt;master server&gt;/&lt;path to repo&gt; &lt;local branch&gt;:&lt;remote branch&gt;
</literallayout>
</para>
@@ -763,26 +736,25 @@ repository.
<filename>yocto/standard/common-pc/base</filename> to the remote branch with the same name
in the master repository <filename>//git.mycompany.com/pub/git/kernel-2.6.37</filename>.
<literallayout class='monospaced'>
&gt; git push ssh://git.mycompany.com/pub/git/kernel-2.6.37 \
yocto/standard/common-pc/base:yocto/standard/common-pc/base
&gt; push ssh://git.mycompany.com/pub/git/kernel-2.6.37 yocto/standard/common-pc/base:yocto/standard/common-pc/base
</literallayout>
</para>
<para>
A pull request entails using <filename>git request-pull</filename> to compose an email to the
A pull request entails using "git request-pull" to compose an email to the
maintainer requesting that a branch be pulled into the master repository, see
<ulink url='http://github.com/guides/pull-requests'></ulink> for an example.
</para>
<note><para>
Other commands such as <filename>git stash</filename> or branching can also be used to save
Other commands such as 'git stash' or branching can also be used to save
changes, but are not covered in this document.
</para></note>
<!--<para>
See the section "importing from another SCM" for how a Git push to the
See the section "importing from another SCM" for how a git push to the
default_kernel, can be used to automatically update the builds of all users
of a central Git repository.
of a central git repository.
</para>-->
</section>
</section>
@@ -813,7 +785,7 @@ repository.
The messages used to commit changes are a large part of these standards.
Consequently, be sure that the headers for each commit have the required information.
If the initial commits were not properly documented or do not meet those standards,
you can re-base by using the <filename>git rebase -i</filename> command to manipulate the commits and
you can re-base by using the "git rebase -i" command to manipulate the commits and
get them into the required format.
Other techniques such as branching and cherry-picking commits are also viable options.
</para>
@@ -821,7 +793,7 @@ repository.
<para>
Once you complete the commits, you can generate the email that sends the patches
to the maintainer(s) or lists that review and integrate changes.
The command <filename>git send-email</filename> is commonly used to ensure that patches are properly
The command "git send-email" is commonly used to ensure that patches are properly
formatted for easy application and avoid mailer-induced patch damage.
</para>
@@ -853,7 +825,7 @@ repository.
</para>
<para>
Many SCMs can directly import Git commits, or can translate Git patches so that
Many SCMs can directly import git commits, or can translate git patches so that
information is not lost.
Those facilities are SCM-dependent and you should use them whenever possible.
</para>
@@ -882,7 +854,7 @@ repository.
<para>
Depending on the SCM it might be possible to export the entire Yocto Project
kernel Git repository, branches and all, into a new environment.
kernel git repository, branches and all, into a new environment.
This method is preferred because it has the most flexibility and potential to maintain
the meta data associated with each commit.
</para>
@@ -928,14 +900,14 @@ repository.
automatically apply them to the kernel during patching.
</para>
<!--<para>
If changes are imported directly into Git, they must be propagated to the
If changes are imported directly into git, they must be propagated to the
wrll-linux-2.6.27/git/default_kernel bare clone of each individual build
to be present when the kernel is checked out.
</para>
<para>
The following example illustrates one variant of this workflow:
<literallayout class='monospaced'>
# on master Git repository
# on master git repository
&gt; cd linux-2.6.27
&gt; git tag -d common_pc-standard-mark
&gt; git pull ssh://&lt;foo&gt;@&lt;bar&gt;/pub/git/kernel-2.6.27 common_pc-standard:common_pc-standard
@@ -956,7 +928,7 @@ The following example illustrates one variant of this workflow:
<!-- <section id='bsp-template-migration-from-2'>
<title>BSP: Template Migration from 2.0</title>
<para>
The move to a Git-backed kernel build system in 3.0 introduced a small new
The move to a git-backed kernel build system in 3.0 introduced a small new
requirement for any BSP that is not integrated into the GA release of the
product: branching information.
</para>
@@ -1034,60 +1006,204 @@ That's it. Configure and build.
<title>Creating a BSP Based on an Existing Similar BSP</title>
<para>
This section overviews the process of creating a BSP based on an
existing similar BSP.
The information is introductory in nature and does not provide step-by-step examples.
For detailed information on how to create a BSP given an existing similar BSP
see the Yocto Project Development Manual [NEED LINK] or the
<ulink url='https://wiki.yoctoproject.org/wiki/Transcript:_creating_one_generic_Atom_BSP_from_another'></ulink>
wiki page.
</para>
This section provides an example for creating a BSP
that is based on an existing, and hopefully, similar
one. It assumes you will be using a local kernel
repository and will be pointing the kernel recipe at
that. Follow these steps and keep in mind your
particular situation and differences:
<para>
The basic steps you need to follow are:
<orderedlist>
<listitem><para>Make sure you have the Yocto Project source tree available.
You should either create a Yocto Project Git repository (recommended), or
you should get the Yocto Project release tarball and extract it.</para></listitem>
<listitem><para>Choose an existing BSP available with the Yocto Project.
Try to map your board features as closely to the features of a BSP that is
already supported and exists in the Yocto Project.
Starting with something as close as possible to your board makes developing
your BSP easier.
You can find all the BSPs that are supported and ship with the Yocto Project
on the Yocto Project's Download page at
<ulink url='http://www.yoctoproject.org/download'></ulink>.</para></listitem>
<listitem><para>Be sure you have the Base BSP.
You need to either have the Yocto Project Git repository set up or download
the tarball of the base BSP.
Either method gives you access to the BSP source files.</para></listitem>
<listitem><para>Make a copy of the existing BSP, thus isolating your new BSP work.
Copying the existing BSP structure gives you a new area in which to work.</para></listitem>
<listitem><para>Make configuration and recipe changes to your new BSP.
Configuration changes involve the files in the BSP's <filename>conf</filename>
directory.
Changes include creating a machine-specific configuration file and editing the
<filename>layer.conf</filename> file.
The configuration changes identify the kernel you will be using.
Recipe changes include removing, modifying, or adding new recipe files that
instruct the build process on what features to include in the image.</para></listitem>
<listitem><para>Prepare for the build.
Before you actually initiate the build you need to set up the build environment
by sourcing the environment initialization script.
After setting up the environment you need to make some build configuration
changes to the <filename>local.conf</filename> and <filename>bblayers.conf</filename>
files.</para></listitem>
<listitem><para>Build the image.
The Yocto Project uses the BitBake tool to create the image.
You need to decide on the type of image you are going to build (e.g. minimal, base,
core, sato, and so forth) and then start the build using the <filename>bitbake</filename>
command.</para></listitem>
</orderedlist>
<orderedlist>
<listitem><para>
Identify a machine configuration file that matches your machine.
</para>
<para>
You can start with something in <filename>meta/conf/machine</filename> - <filename>
meta/conf/machine/atom-pc.conf</filename> for example. Or, you can start with a machine
configuration from any of the BSP layers in the meta-intel repository at
<ulink url='http://git.yoctoproject.org/cgit/cgit.cgi/meta-intel/'></ulink>, such as
<filename>meta-intel/meta-emenlow/conf/machine/emenlow.conf</filename>.
</para>
<para>
The main difference between the two is that "emenlow" is in its own layer.
It is in its own layer because it needs extra machine-specific packages such as its
own video driver and other supporting packages.
The "atom-pc" is simpler and does not need any special packages - everything it needs can
be specified in the configuration file.
The "atom-pc" machine also supports all of Asus eee901, Acer Aspire One, Toshiba NB305,
and the Intel&reg; Embedded Development Board 1-N450 with no changes.
</para>
<para>
If you want to make minor changes to support a slightly different machine, you can
create a new configuration file for it and add it alongside the others.
You might consider keeping the common information separate and including it.
</para>
<para>
Similarly, you can also use multiple configuration files for different machines even
if you do it as a separate layer like meta-emenlow.
</para>
<para>
As an example consider this:
<itemizedlist>
<listitem><para>Copy meta-emenlow to meta-mymachine</para></listitem>
<listitem><para>Fix or remove anything you do not need.
For this example the only thing left was the kernel directory with a
<filename>linux-yocto_git.bbappend</filename>
file
and <filename>meta-mymachine/conf/machine/mymachine.conf</filename>
(linux-yocto is the kernel listed in
<filename>meta-emenlow/conf/machine/emenlow.conf</filename>)</para></listitem>.
<listitem><para>Add a new entry in the <filename>build/conf/bblayers.conf</filename>
so the new layer can be found by BitBake.</para></listitem>
</itemizedlist>
</para></listitem>
<listitem><para>
Create a machine branch for your machine.
</para>
<para>
For the kernel to compile successfully, you need to create a branch in the git repository
specifically named for your machine.
To create this branch first create a bare clone of the Yocto Project git repository.
Next, create a local clone of that:
<literallayout class='monospaced'>
$ git clone --bare git://git.yoctoproject.org/linux-yocto-2.6.37.git
linux-yocto-2.6.37.git
$ git clone linux-yocto-2.6.37.git linux-yocto-2.6.37
</literallayout>
</para>
<para>
Now create a branch in the local clone and push it to the bare clone:
<literallayout class='monospaced'>
$ git checkout -b yocto/standard/mymachine origin/yocto/standard/base
$ git push origin yocto/standard/mymachine:yocto/standard/mymachine
</literallayout>
</para></listitem>
<listitem><para>
In a layer, create a <filename>linux-yocto_git.bbappend</filename>
file with the following:
</para>
<para>
<literallayout class='monospaced'>
FILESEXTRAPATHS := "${THISDIR}/${PN}"
COMPATIBLE_MACHINE_mymachine = "mymachine"
# It is often nice to have a local clone of the kernel repository, to
# allow patches to be staged, branches created, and so forth. Modify
# KSRC to point to your local clone as appropriate.
KSRC ?= /path/to/your/bare/clone/for/example/linux-yocto-2.6.37.git
# KMACHINE is the branch to be built, or alternatively
# KBRANCH can be directly set.
# KBRANCH is set to KMACHINE in the main linux-yocto_git.bb
# KBRANCH ?= "${LINUX_KERNEL_TYPE}/${KMACHINE}"
KMACHINE_mymachine = "yocto/standard/mymachine"
SRC_URI = "git://${KSRC};nocheckout=1;branch=${KBRANCH},meta;name=machine,meta"
</literallayout>
</para>
<para>
After doing that, select the machine in <filename>build/conf/local.conf</filename>:
<literallayout class='monospaced'>
#
MACHINE ?= "mymachine"
#
</literallayout>
</para>
<para>
You should now be able to build and boot an image with the new kernel:
<literallayout class='monospaced'>
$ bitbake core-image-sato-live
</literallayout>
</para></listitem>
<listitem><para>
Modify the kernel configuration for your machine.
</para>
<para>
Of course, that will give you a kernel with the default configuration file, which is probably
not what you want.
If you just want to set some kernel configuration options, you can do that by
putting them in a file.
For example, inserting the following into some <filename>.cfg</filename> file:
<literallayout class='monospaced'>
CONFIG_NETDEV_1000=y
CONFIG_E1000E=y
</literallayout>
</para>
<para>
And, another <filename>.cfg</filename> file would contain:
<literallayout class='monospaced'>
CONFIG_LOG_BUF_SHIFT=18
</literallayout>
<para>
These config fragments could then be picked up and
applied to the kernel .config by appending them to the kernel SRC_URI:
</para>
<literallayout class='monospaced'>
SRC_URI_append_mymachine = " file://some.cfg \
file://other.cfg \
"
</literallayout>
</para>
<para>
You could also add these directly to the git repository <filename>meta</filename>
branch as well.
However, the former method is a simple starting point.
</para></listitem>
<listitem><para>
If you're also adding patches to the kernel, you can do the same thing.
Put your patches in the SRC_URI as well (plus <filename>.cfg</filename> for their kernel
configuration options if needed).
</para>
<para>
Practically speaking, to generate the patches, you'd go to the source in the build tree:
<literallayout class='monospaced'>
build/tmp/work/mymachine-poky-linux/linux-yocto-2.6.37+git0+d1cd5c80ee97e81e130be8c3de3965b770f320d6_0+
0431115c9d720fee5bb105f6a7411efb4f851d26-r13/linux
</literallayout>
</para>
<para>
Then, modify the code there, using quilt to save the changes, and recompile until
it works:
<literallayout class='monospaced'>
$ bitbake -c compile -f linux-yocto
</literallayout>
</para></listitem>
<listitem><para>
Once you have the final patch from quilt, copy it to the
SRC_URI location.
The patch is applied the next time you do a clean build.
Of course, since you have a branch for the BSP in git, it would be better to put it there instead.
For example, in this case, commit the patch to the "yocto/standard/mymachine" branch, and during the
next build it is applied from there.
</para></listitem>
</orderedlist>
</para>
</section>
<!--
<section id='bsp-creating-bsp-without-a-local-kernel-repo'>
<title>Creating a BSP Based on an Existing Similar BSP Without a Local Kernel Repository</title>
@@ -1120,8 +1236,7 @@ That's it. Configure and build.
</section>
<section id='bsp-creating-a-new-bsp'>
<!-- <section id='bsp-creating-a-new-bsp'>
<title>BSP: Creating a New BSP</title>
<para>
Although it is obvious that the structure of a new BSP uses the migrated
@@ -1314,7 +1429,7 @@ In this technique the .scc file in the board template is slightly different
<para>
The previous examples created the board templates and configured a build
before beginning work on a new BSP. It is also possible for advanced users to
simply treat the Yocto Project Git repository as an upstream source and begin
simply treat the Yocto Project git repository as an upstream source and begin
BSP development directly on the repository. This is the closest match to how
the kernel community at large would operate.
</para>
@@ -1564,7 +1679,7 @@ Or you can do this:
</para>
<para>
For details on conflict resolution and patch application, see the
Git manual, or other suitable online references.
git manual, or other suitable online references.
<literallayout class='monospaced'>
&gt; git am &lt;mbox&gt;
# conflict
@@ -1692,8 +1807,8 @@ Other guilt operations of interest are:
</literallayout>
</para>
<note><para>
Guilt only uses Git commands and Git plumbing to perform its operations,
anything that guilt does can also be done using Git directly. It is provided
Guilt only uses git commands and git plumbing to perform its operations,
anything that guilt does can also be done using git directly. It is provided
as a convenience utility, but is not required and the developer can use whatever
tools or workflow they wish.
</para></note>
@@ -1702,7 +1817,7 @@ The following builds from the above instructions to show how guilt can be
used to assist in getting your BSP kernel patches ready. You should follow
the above instructions up to and including 'make linux.config'. In this
example I will create a new commit (patch) from scratch and import another
fictitious patch from some external public Git tree (ie, a commit with full
fictitious patch from some external public git tree (ie, a commit with full
message, signoff etc.). Please ensure you have host-cross/bin in your path.
<literallayout class='monospaced'>
%> cd linux
@@ -1720,7 +1835,7 @@ message, signoff etc.). Please ensure you have host-cross/bin in your path.
Here are a few notes about the above:
<itemizedlist>
<listitem><para>guilt-header -e &dash;&dash; this will open editing of the patch header in
EDITOR. As with a Git commit the first line is the short log and
EDITOR. As with a git commit the first line is the short log and
should be just that short and concise message about the commit. Follow
the short log with lines of text that will be the long description but
note Do not put a blank line after the short log. As usual you will
@@ -1734,7 +1849,7 @@ Here are a few notes about the above:
review comment in the first patch (first_one.patch in the case of this
example) it is very easy to use guilt to pop the other patches off
allowing you to make the necessary changes without having to use more
inventive Git type strategies.</para></listitem>
inventive git type strategies.</para></listitem>
</itemizedlist>
</para>
</section>
@@ -1839,7 +1954,7 @@ This section shows an example of transforms:
</para>
<para>
You can use the Git command above to report modified, removed, or added files.
You can use the git command above to report modified, removed, or added files.
You should commit those changes to the tree regardless of whether they will be saved,
exported, or used.
Once you commit the changes you need to rebuild the kernel.
@@ -1866,7 +1981,7 @@ This section shows an example of transforms:
<orderedlist>
<listitem><para>Create a custom kernel layer.</para></listitem>
<listitem><para>Create a Git repository of the transition kernel.</para></listitem>
<listitem><para>Create a git repository of the transition kernel.</para></listitem>
</orderedlist>
</para>
@@ -1908,12 +2023,12 @@ patches. If a custom BSP is being used, this is not required.
</section> -->
<!-- <section id='git-repo-of-the-transition-kernel'>
<title>Git Repo of the Transition Kernel</title>
<title>git Repo of the Transition Kernel</title>
<para>
The kernel build system requires a base kernel repository to
seed the build process. This repository must be found in the
same layer as the build infrastructure (i.e wrll-linux-2.6.27)
in the <filename>.git</filename> subdir, with the name 'default_kernel'
in the 'git' subdir, with the name 'default_kernel'
</para>
<para>Since Yocto Project Linux ships with a default_kernel
(the validated Yocto Project kernel) in the wrll-linux-2.6.27
@@ -1922,15 +2037,15 @@ transition kernel.
</para>
<para>If the Yocto Project install cannot be directly modified
with the new default kernel, then the path to the transition
kernel layer's <filename>.git</filename> subdir must be passed to the build
kernel layer's 'git' subdir must be passed to the build
process via:
<programlisting>
linux_GIT_BASE=&lt;absolute path to layer&gt;/git
</programlisting>
</para>
<para>
If the transition kernel has not been delivered via Git,
then a Git repo should be created, and bare cloned into
If the transition kernel has not been delivered via git,
then a git repo should be created, and bare cloned into
place. Creating this repository is as simple as:
<literallayout class='monospaced'>
&gt; tar zxvf temp_kernel.tgz
@@ -2003,7 +2118,7 @@ To build the kernel:
</para>
<para>
If this is to build without some user intervention (passing of the
GIT_BASE), you must do the clone into the <filename>wrll-linux-2.6.27/.git</filename> directory.
GIT_BASE), you must do the clone into the wrll-linux-2.6.27/git directory.
</para>
<note><para>Unless you define valid "hardware.kcf" and "non-hardware.kcf" some
non fatal warnings will be seen. They can be fixed by populating these
@@ -2053,7 +2168,7 @@ options.
<listitem><para>Building a 'dirty' image.</para></listitem>
<listitem><para>Temporarily using a different base kernel.</para></listitem>
<listitem><para>Creating a custom kernel layer.</para></listitem>
<listitem><para>Creating the Git repository of the transition kernel.</para></listitem>
<listitem><para>Creating the git repository of the transition kernel.</para></listitem>
</itemizedlist> -->

View File

@@ -675,7 +675,7 @@ BBFILE_PRIORITY_emenlow = "6"
These functions allow generation of dependency data between functions and
variables allowing moves to be made towards generating checksums and allowing
use of the dependency information in other parts of BitBake.
use of the dependency information in other parts of bitbake.
Signed-off-by: Richard Purdie richard.purdie@linuxfoundation.org
</literallayout>

View File

@@ -251,7 +251,7 @@ PREFERRED_PROVIDER_virtual/kernel = "linux-rp"
<title>BitBake Command Line</title>
<para>
Following is the BitBake manpage:
Following is the bitbake manpage:
</para>
<screen>

View File

@@ -102,8 +102,8 @@
<para>
Another important Yocto Project feature is the Sato reference User Interface.
This optional GNOME mobile-based UI, which is intended for devices with
restricted screen sizes, sits neatly on top of a device using the
GNOME Mobile Stack and provides a well-defined user experience.
resolution but restricted size screens, sits neatly on top of a device using the
GNOME Mobile Stack providing a well-defined user experience.
Implemented in its own layer, it makes it clear to developers how they can implement
their own UIs on top of Yocto Linux.
</para>
@@ -119,7 +119,7 @@
<itemizedlist>
<listitem>
<para>A host system running a supported Linux distribution (i.e. recent releases of
Fedora, openSUSE, Debian, and Ubuntu).
Fedora, OpenSUSE, Debian, and Ubuntu).
<note>
For notes about using the Yocto Project on development systems that use
older Linux distributions see
@@ -145,7 +145,7 @@
<itemizedlist>
<listitem><para>Ubuntu</para></listitem>
<listitem><para>Fedora</para></listitem>
<listitem><para>openSUSE</para></listitem>
<listitem><para>OpenSuse</para></listitem>
</itemizedlist>
</para>
<para>
@@ -180,7 +180,7 @@
<note><para>
If you are using a Fedora version prior to version 15 you will need to take some
extra steps to enable <filename>sudo</filename>.
See <ulink url='https://fedoraproject.org/wiki/Configuring_Sudo'></ulink> for details.
See <ulink url='https://fedoraproject.org/wiki/Configureing_Sudo'></ulink> for details.
</para></note>
<para>
@@ -196,7 +196,7 @@
</literallayout>
<para>
The packages you need for an RPM-based host like Fedora and openSUSE,
The packages you need for an RPM-based host like Fedora and OpenSUSE,
respectively, are as follows:
</para>
@@ -213,9 +213,9 @@
</literallayout>
<literallayout class='monospaced'>
$ sudo zypper install python gcc gcc-c++ libtool \
subversion git chrpath automake \
help2man diffstat texinfo mercurial wget
$ sudo zypper install python gcc gcc-c++ libtool
$ subversion git chrpath automake
$ help2man diffstat texinfo mercurial wget
</literallayout>
</section>
@@ -332,13 +332,13 @@
</para>
<para>
Continue with the following command to build an OS image for the target, which is
<filename>core-image-sato</filename> in this example.
<filename>poky-image-sato</filename> in this example.
For information on the <filename>&dash;k</filename> option use the
<filename>bitbake &dash;&dash;help</filename> command or see
<ulink url='http://www.yoctoproject.org/docs/poky-ref-manual/poky-ref-manual.html#usingpoky-components-bitbake'>
BitBake</ulink> section in the Poky Reference Manual.
<literallayout class='monospaced'>
$ bitbake -k core-image-sato
$ bitbake -k poky-image-sato
</literallayout>
<note><para>
BitBake requires Python 2.6 or 2.7. For more information on this requirement,
@@ -425,7 +425,7 @@
</para>
<literallayout class='monospaced'>
yocto-eglibc&lt;<emphasis>host_system</emphasis>&gt;-&lt;<emphasis>arch</emphasis>&gt;-toolchain-gmae-&lt;<emphasis>release</emphasis>&gt;.tar.bz2
yocto-eglibc&lt;<emphasis>host_system</emphasis>&gt;-&lt;<emphasis>arch</emphasis>&gt;-toolchain-sdk-&lt;<emphasis>release</emphasis>&gt;.tar.bz2
Where:
&lt;<emphasis>host_system</emphasis>&gt; is a string representing your development system:
@@ -443,11 +443,11 @@
</para>
<literallayout class='monospaced'>
yocto-eglibc-x86_64-i686-toolchain-gmae-1.0.tar.bz2
yocto-eglibc-x86_64-i686-toolchain-sdk-1.0.tar.bz2
</literallayout>
<para>
The toolchain tarballs are self-contained and must be installed into <filename>/opt/poky</filename>.
The toolchain tarballs are self-contained and should be installed into <filename>/opt/poky</filename>.
The following commands show how you install the toolchain tarball given a 64-bit development host system
and a 32-bit target architecture.
</para>
@@ -455,7 +455,7 @@
<para>
<literallayout class='monospaced'>
$ cd /
$ sudo tar -xvjf yocto-eglibc-x86_64-i686-toolchain-gmae-1.0.tar.bz2
$ sudo tar -xvjf yocto-eglibc-x86_64-i686-toolchain-sdk-1.0.tar.bz2
</literallayout>
</para>
</section>
@@ -471,19 +471,15 @@
</para>
<para>
Most kernel files have one of the following forms:
Most kernel files have the following form:
</para>
<literallayout class='monospaced'>
*zImage-&lt;<emphasis>kernel-rev</emphasis>&gt;-qemu&lt;<emphasis>arch</emphasis>&gt;*.bin
vmlinux-&lt;<emphasis>kernel-rev</emphasis>&gt;-&lt;<emphasis>arch</emphasis>&gt;*.bin
*zImage*qemu&lt;<emphasis>arch</emphasis>&gt;*.bin
Where:
&lt;<emphasis>arch</emphasis>&gt; is a string representing the target architecture:
x86, x86-64, ppc, mips, or arm.
&lt;<emphasis>kernel-rev</emphasis>&gt; is the base Linux kernel revision
(e.g. 2.6.37).
</literallayout>
</section>
@@ -497,7 +493,7 @@
</para>
<literallayout class='monospaced'>
yocto-image-&lt;<emphasis>profile</emphasis>&gt;-qemu&lt;<emphasis>arch</emphasis>&gt;.rootfs.ext3.bz2
yocto-image-&lt;<emphasis>profile</emphasis>&gt;-qemu&lt;<emphasis>arch</emphasis>&gt;.rootfs.ext3
yocto-image-&lt;<emphasis>profile</emphasis>&gt;-qemu&lt;<emphasis>arch</emphasis>&gt;.rootfs.tar.bz2
Where:
@@ -546,15 +542,13 @@
<para>
Continuing with the example, the following two commands setup the emulation
environment and launch QEMU.
This example assumes the root filesystem tarball has been downloaded and expanded, and
that the kernel and filesystem are for a 32-bit target architecture.
environment and launch QEMU.
The kernel and filesystem are for a 32-bit target architecture.
</para>
<literallayout class='monospaced'>
$ source /opt/poky/1.0/environment-setup-i686-poky-linux
$ poky-qemu qemux86 bzImage-2.6.37-qemux86-1.0.bin \
yocto-image-sato-qemux86-1.0.rootfs.ext3
$ source /opt/poky/environment-setup-i686-poky-linux
$ poky-qemu qemux86 zImage-2.6.34-qemux86-1.0.bin yocto-image-sdk-qemux86-1.0.rootfs.ext3
</literallayout>
<para>

View File

@@ -1,6 +1,6 @@
DESCRIPTION = "FarSight is an audio/video conferencing framework specifically designed for Instant Messengers."
HOMEPAGE = "http://farsight.sf.net"
SRC_URI = "http://farsight.freedesktop.org/releases/farsight2/${BPN}-${PV}.tar.gz"
SRC_URI = "http://farsight.freedesktop.org/releases/farsight2/${P}.tar.gz"
LICENSE = "GPLv2.1"
DEPENDS = "libnice glib-2.0 libxml2 zlib dbus gstreamer gst-plugins-base"

View File

@@ -5,6 +5,6 @@ LICENSE = "LGPL"
DEPENDS = "glib-2.0 gnutls libcheck"
PR = "r2"
SRC_URI = "http://ftp.imendio.com/pub/imendio/${BPN}/src/${BPN}-${PV}.tar.bz2"
SRC_URI = "http://ftp.imendio.com/pub/imendio/${PN}/src/${PN}-${PV}.tar.bz2"
inherit autotools pkgconfig

View File

@@ -2,7 +2,7 @@ DEPENDS = "libopensync (>= 0.36)"
DESCRIPTION ?= "OpenSync plugin"
SRC_URI = "http://opensync.org/download/releases/${PV}/${BPN}-${PV}.tar.bz2"
SRC_URI = "http://opensync.org/download/releases/${PV}/${P}.tar.bz2"
inherit cmake

View File

@@ -5,7 +5,7 @@ HOMEPAGE = "http://telepathy.freedesktop.org/wiki/"
DEPENDS = "glib-2.0 dbus telepathy-glib farsight2"
LICENSE = "LGPLv2"
SRC_URI = "http://telepathy.freedesktop.org/releases/telepathy-farsight/${BPN}-${PV}.tar.gz \
SRC_URI = "http://telepathy.freedesktop.org/releases/telepathy-farsight/${P}.tar.gz \
"
inherit autotools

View File

@@ -5,7 +5,7 @@ DEPENDS = "glib-2.0 dbus loudmouth telepathy-glib dbus-glib"
LICENSE = "LGPL"
# gabble.manager needs to get regenerated every release, so please don't copy it over blindly
SRC_URI = "http://telepathy.freedesktop.org/releases/telepathy-gabble/${BPN}-${PV}.tar.gz \
SRC_URI = "http://telepathy.freedesktop.org/releases/telepathy-gabble/${P}.tar.gz \
file://gabble.manager"
inherit autotools pkgconfig

View File

@@ -7,7 +7,7 @@ LICENSE = "GPLv2"
DEPENDS = "libxml2 sed-native expat"
SRC_URI = "${SOURCEFORGE_MIRROR}/wbxmllib/${BPN}-${PV}.tar.gz \
SRC_URI = "${SOURCEFORGE_MIRROR}/wbxmllib/${P}.tar.gz \
file://no-doc-install.patch;patch=1"
inherit autotools pkgconfig

View File

@@ -4,7 +4,7 @@ DEPENDS = "gtk+"
DESCRIPTION = "gcalctool is a powerful calculator"
PR = "r2"
SRC_URI = "http://download.gnome.org/sources/${BPN}/5.7/${BPN}-${PV}.tar.gz \
SRC_URI = "http://download.gnome.org/sources/${PN}/5.7/${PN}-${PV}.tar.gz \
file://makefile-fix.diff;patch=1\
file://fix-includedir.patch;patch=1"

View File

@@ -4,7 +4,7 @@ DEPENDS = "gtk+ gnome-doc-utils"
DESCRIPTION = "gcalctool is a powerful calculator"
PR = "r0"
SRC_URI = "http://download.gnome.org/sources/${BPN}/5.8/${BPN}-${PV}.tar.gz \
SRC_URI = "http://download.gnome.org/sources/${PN}/5.8/${PN}-${PV}.tar.gz \
file://fix-includedir.patch;patch=1"
inherit autotools pkgconfig

View File

@@ -6,7 +6,7 @@ PR = "r2"
inherit autotools
SRC_URI = "http://burtonini.com/temp/${BPN}-${PV}.tar.gz \
SRC_URI = "http://burtonini.com/temp/${PN}-${PV}.tar.gz \
file://gtkstylus.sh"
do_install_append() {

View File

@@ -9,6 +9,6 @@ SRC_URI = "${SOURCEFORGE_MIRROR}/wvware/wv-${PV}.tar.gz \
inherit autotools pkgconfig
S = "${WORKDIR}/${BPN}-${PV}"
S = "${WORKDIR}/${PN}-${PV}"
EXTRA_OECONF = ""

View File

@@ -3,7 +3,7 @@ LICENSE = "GPL"
DEPENDS = "libxml2 glib-2.0 gtk+ loudmouth libglade"
PR = "r4"
SRC_URI = "http://jabberstudio.2nw.net/${BPN}/${BPN}-${PV}.tar.gz \
SRC_URI = "http://jabberstudio.2nw.net/${PN}/${PN}-${PV}.tar.gz \
file://fix-configure.patch;patch=1 \
file://fix-desktop-file.patch;patch=0 \
file://gcc4.patch;patch=1"

View File

@@ -7,7 +7,7 @@ PV = "0.3+git${SRCPV}"
LIC_FILES_CHKSUM = "file://configure.ac;endline=7;md5=3c4e087662e37f10e469425f3a0ad225"
SRC_URI = "git://git.yoctoproject.org/${BPN};protocol=git"
SRC_URI = "git://git.yoctoproject.org/${PN};protocol=git"
S = "${WORKDIR}/git"
inherit autotools pkgconfig

View File

@@ -2,7 +2,7 @@ DESCRIPTION = "Poppler is a PDF rendering library based on the xpdf-3.0 code bas
LICENSE = "Adobe"
PR = "r0"
SRC_URI = "http://poppler.freedesktop.org/${BPN}-${PV}.tar.gz"
SRC_URI = "http://poppler.freedesktop.org/${PN}-${PV}.tar.gz"
do_compile() {
}

View File

@@ -3,7 +3,7 @@ DEPENDS = "fontconfig jpeg zlib gtk+ cairo"
LICENSE = "GPL"
PR = "r1"
SRC_URI = "http://poppler.freedesktop.org/${BPN}-${PV}.tar.gz"
SRC_URI = "http://poppler.freedesktop.org/${PN}-${PV}.tar.gz"
inherit autotools pkgconfig

View File

@@ -0,0 +1,15 @@
#
# Copyright (C) 2010 Intel Corporation.
#
require recipes-core/images/core-image-directdisk.inc
DESCRIPTION = "Bootable Minimal Real-Time Direct Disk Image"
ROOTFS = "${DEPLOY_DIR_IMAGE}/core-image-minimal-rt-${MACHINE}.ext3"
LICENSE = "MIT"
do_bootdirectdisk[depends] += "core-image-minimal-rt:do_rootfs"

View File

@@ -0,0 +1,15 @@
#
# Copyright (C) 2010 Intel Corporation.
#
DESCRIPTION = "Bootable Live Minimal Real-Time Linux Image"
require recipes-core/images/core-image-live.inc
LABELS += "boot install"
ROOTFS = "${DEPLOY_DIR_IMAGE}/core-image-minimal-rt-${MACHINE}.ext3"
LICENSE = "MIT"
do_bootimg[depends] += "core-image-minimal-rt:do_rootfs"

View File

@@ -12,8 +12,6 @@ LOCALCONF_VERSION = "1"
DISTRO_FEATURES_append = " largefile"
PREFERRED_VERSION_linux-yocto ?= "2.6.37+git%"
SDK_NAME = "${DISTRO}-${TCLIBC}-${SDK_ARCH}-${TARGET_ARCH}"
SDKPATH = "/opt/${DISTRO}/${SDK_VERSION}"

View File

@@ -2,10 +2,10 @@
CONF_VERSION = "1"
# Uncomment and change to cache the files Poky downloads in an alternative
# location, default is ${TOPDIR}/downloads
# location, default it ${TOPDIR}/downloads
#DL_DIR ?= "${TOPDIR}/downloads"
# Uncomment and change to cache Poky's built staging output in an alternative
# location, default is ${TOPDIR}/sstate-cache
# location, default ${TOPDIR}/sstate-cache
#SSTATE_DIR ?= "${TOPDIR}/sstate-cache"
# Uncomment and set to allow bitbake to execute multiple tasks at once.
@@ -37,8 +37,6 @@ DISTRO ?= "poky"
# For bleeding edge / experimental / unstable package versions
# DISTRO ?= "poky-bleeding"
# BBMASK is a regular expression that can be used to tell BitBake to ignore
# certain recipes.
BBMASK = ""
# EXTRA_IMAGE_FEATURES allows extra packages to be added to the generated images
@@ -144,17 +142,22 @@ PACKAGE_DEBUG_SPLIT_STYLE = '.debug'
# Uncomment this if you want BitBake to emit the log if a build fails.
BBINCLUDELOGS = "yes"
# Set this if you wish to make pkgconfig libraries from your system available
# for native builds. Combined with extra ASSUME_PROVIDEDs this can allow
# native builds of applications like oprofileui-native (unsupported feature).
#EXTRA_NATIVE_PKGCONFIG_PATH = ":/usr/lib/pkgconfig"
#ASSUME_PROVIDED += "gtk+-native libglade-native"
ENABLE_BINARY_LOCALE_GENERATION = "1"
# The architecture to build SDK items for, by setting this you can build SDK
# packages for architectures other than the host i.e. building i686 packages
# on an x86_64 host.
# Supported values are i686 and x86_64
#SDKMACHINE ?= "i686"
# The build system can check data caches for prebuilt data objects before
# it builds the data itself. This can be a filesystem directory, or a remote url
# such as http or ftp. These would contain the sstate-cache results from previous
# builds (possibly from other machines). This variable works like fetcher
# MIRRORS/PREMIRRORS and points to the cache locations to check for the shared objects.
# Poky can try and fetch packaged-staging packages from a http, https or ftp
# mirror. Set this variable to the root of a pstage directory on a server.
#SSTATE_MIRRORS ?= "\
#file://.* http://someserver.tld/share/sstate/ \n \
#file://.* file:///some/local/dir/sstate/"
@@ -176,12 +179,6 @@ BBINCLUDELOGS = "yes"
#case, which will take much time.
#TEST_SERIALIZE = "1"
# ENABLE_BINARY_LOCALE_GENERATION controls the generation of binary locale
# packages at build time using qemu-native. Disabling it (by setting it to 0)
# will save some build time at the expense of breaking i18n on devices with
# less than 128MB RAM.
ENABLE_BINARY_LOCALE_GENERATION = "1"
# Set GLIBC_GENERATE_LOCALES to the locales you wish to generate should you not
# wish to perform the time-consuming step of generating all LIBC locales.
# NOTE: If removing en_US.UTF-8 you will also need to uncomment, and set
@@ -192,10 +189,10 @@ ENABLE_BINARY_LOCALE_GENERATION = "1"
#IMAGE_LINGUAS ?= "en-gb"
#LIMIT_BUILT_LOCALES ?= "POSIX en_GB"
# This value is currently used by pseudo to determine if the recipe should
# This value is currently used by PSEUDO to determine if the recipe should
# build both the 32-bit and 64-bit wrapper libraries on a 64-bit build system.
#
# Pseudo will attempt to determine if a 32-bit wrapper is necessary, but
# PSEUDO will attempt to determine if a 32-bit wrapper is necessary, but
# it doesn't always guess properly. If you have 32-bit executables on
# your 64-bit build system, you likely want to set this to "0",
# otherwise you could end up with incorrect file attributes on the
@@ -205,26 +202,15 @@ ENABLE_BINARY_LOCALE_GENERATION = "1"
# out if that is desired
NO32LIBS = "1"
# If you do not use (or have installed) xterm you will need to
# If you do not use (or have installed) gnome-terminal you will need to
# uncomment these variables and set them to the terminal you wish to use
# when resolving patches which cannot be applied
# Supported shell prefixes for *_TERMCMD and *_TERMCMDRUN ARE:
# GNOME, SCREEN, XTERM and KONSOLE
# Note: currently, Konsole support only works for KDE 3.x due to the way
# newer Konsole versions behave
#TERMCMD = "${XTERM_TERMCMD}"
#TERMCMDRUN = "${XTERM_TERMCMDRUN}"
# Alternatively, if you prefer you can disable patch resolution:
#PATCHRESOLVE = "noop"
#TERMCMD = "${KONSOLE_TERMCMD}"
#TERMCMDRUN = "${KONSOLE_TERMCMDRUN}"
# The network based PR service host and port
#PRSERV_HOST = "localhost"
#PRSERV_PORT = "8585"
# Uncomment this if your host distribution provides the help2man tool.
#ASSUME_PROVIDED += "help2man-native"
# Uncomment the following lines to enable multilib builds
#require conf/multilib.conf
#MULTILIBS = "multilib:lib32"
#DEFAULTTUNE_virtclass-multilib-lib32 = "x86"

View File

@@ -3,6 +3,8 @@
#@DESCRIPTION: Machine configuration for Intel Atom based PCs. Currently supported machines are the Asus eee901, Acer Aspire One, Toshiba NB305, and Intel BlackSand development board.
TARGET_ARCH = "i586"
include conf/machine/include/tune-atom.inc
MACHINE_FEATURES = "kernel26 screen keyboard pci usbhost ext2 ext3 x86 wifi \
@@ -28,7 +30,7 @@ XSERVER ?= "xserver-xf86-dri-lite \
MACHINE_EXTRA_RRECOMMENDS = "kernel-modules eee-acpi-scripts"
IMAGE_FSTYPES ?= "ext3 cpio.gz live"
IMAGE_FSTYPES ?= "ext3 cpio.gz"
APPEND += "usbcore.autosuspend=1"

View File

@@ -1,6 +1,7 @@
#@TYPE: Machine
#@NAME: Beagleboard machine
#@DESCRIPTION: Machine configuration for the http://beagleboard.org/ board
TARGET_ARCH = "arm"
PREFERRED_PROVIDER_virtual/xserver = "xserver-xf86-lite"
XSERVER = "xserver-xf86-lite \

View File

@@ -1,7 +1,8 @@
#@TYPE: Machine
#@DESCRIPTION: Machine configuration for running
TARGET_FPU = ""
TARGET_ARCH = "powerpc"
TARGET_FPU = "spe"
require conf/machine/include/tune-ppc603e.inc
@@ -16,6 +17,7 @@ PREFERRED_PROVIDER_virtual/kernel = "linux-yocto"
PREFERRED_PROVIDER_virtual/xserver = "xserver-kdrive"
XSERVER = "xserver-kdrive-fbdev"
UBOOT_ENTRYPOINT = "0x00000000"
KERNEL_DEVICETREE = "${S}/arch/powerpc/boot/dts/mpc8315erdb.dts"

View File

@@ -2,6 +2,8 @@
#@NAME: mti_malta32_be MIPS
#@DESCRIPTION: mti_malta32_be
TARGET_ARCH = "mips"
require conf/machine/include/tune-mips32.inc
MACHINE_FEATURES = "kernel26 screen keyboard pci usbhost ext2 ext3 \

View File

@@ -3,11 +3,11 @@ KMACHINE_routerstationpro = "yocto/standard/routerstationpro"
KMACHINE_mpc8315e-rdb = "yocto/standard/fsl-mpc8315e-rdb"
KMACHINE_beagleboard = "yocto/standard/beagleboard"
SRCREV_machine_emenlow = "398d5adac19cb411cd80753e177769f6a666a7e7"
SRCREV_machine_atom-pc = "fce17f046d3756045e4dfb49221d1cf60fcae329"
SRCREV_machine_routerstationpro = "8f84c1aec0907766ab6d6ac79fcc3b7b9ce79b70"
SRCREV_machine_mpc8315e-rdb = "bda049366dad5fc3c7ba229cd4633992581e7a1f"
SRCREV_machine_beagleboard = "3ddb22772862a8223640fa97580569924f51bddc"
SRCREV_machine_emenlow = "cc5662b9bec39205074c13f51ac4caba4af0afe7"
SRCREV_machine_atom-pc = "687233649bbe0ec4ef26c2db4e369fecb1237f6f"
SRCREV_machine_routerstationpro = "6214197a40b8fcb97dfad5b386d64384ce302b81"
SRCREV_machine_mpc8315e-rdb = "e79b560f5bb709448d81e51609c0ce72253310fc"
SRCREV_machine_beagleboard = "83544c00cd60f5842683d4b89a16a832271b599e"
COMPATIBLE_MACHINE_mpc8315e-rdb = "mpc8315e-rdb"
COMPATIBLE_MACHINE_routerstationpro = "routerstationpro"

View File

@@ -1,15 +0,0 @@
KMACHINE_atom-pc = "yocto/standard/common-pc/atom-pc"
KMACHINE_routerstationpro = "yocto/standard/routerstationpro"
KMACHINE_mpc8315e-rdb = "yocto/standard/fsl-mpc8315e-rdb"
KMACHINE_beagleboard = "yocto/standard/beagleboard"
SRCREV_machine_emenlow = "374340f311d7b035451f45e29bab5e1b8c058ce8"
SRCREV_machine_atom-pc = "fdd23ed909594ba6c32c894ee6536cf823b0377f"
SRCREV_machine_routerstationpro = "fd6d538850a28413e3c4867b905c56039773df3a"
SRCREV_machine_mpc8315e-rdb = "5d3c0b06071abd663c7790df141060ae462660a9"
SRCREV_machine_beagleboard = "32631d785b0b907e5c14435ef7531ed041ab8c64"
# COMPATIBLE_MACHINE_mpc8315e-rdb = "mpc8315e-rdb"
# COMPATIBLE_MACHINE_routerstationpro = "routerstationpro"
# COMPATIBLE_MACHINE_beagleboard = "beagleboard"
# COMPATIBLE_MACHINE_atom-pc = "atom-pc"

View File

@@ -2,6 +2,7 @@
# This class is used for architecture independent recipes/data files (usally scripts)
#
BASE_PACKAGE_ARCH = "all"
PACKAGE_ARCH = "all"
# No need for virtual/libc or a cross compiler

View File

@@ -5,11 +5,11 @@ def autotools_dep_prepend(d):
pn = bb.data.getVar('PN', d, 1)
deps = ''
if pn in ['autoconf-native', 'automake-native', 'help2man-native']:
if pn in ['autoconf-native', 'automake-native']:
return deps
deps += 'autoconf-native automake-native help2man-native '
deps += 'autoconf-native automake-native '
if not pn in ['libtool', 'libtool-native'] and not pn.endswith("libtool-cross"):
if not pn in ['libtool', 'libtool-native', 'libtool-cross']:
deps += 'libtool-native '
if not bb.data.inherits_class('native', d) \
and not bb.data.inherits_class('cross', d) \
@@ -111,13 +111,8 @@ autotools_do_configure() {
if [ -d ${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV ]; then
acpaths="$acpaths -I${STAGING_DATADIR_NATIVE}/aclocal-$AUTOV"
fi
# The aclocal directory could get modified by other processes
# uninstalling data from the sysroot. See Yocto #861 for details.
# We avoid this by taking a copy here and then files cannot disappear.
if [ -d ${STAGING_DATADIR}/aclocal ]; then
mkdir -p ${B}/aclocal-copy/
cp ${STAGING_DATADIR}/aclocal/* ${B}/aclocal-copy/
acpaths="$acpaths -I ${B}/aclocal-copy/"
acpaths="$acpaths -I ${STAGING_DATADIR}/aclocal"
fi
# autoreconf is too shy to overwrite aclocal.m4 if it doesn't look
# like it was auto-generated. Work around this by blowing it away

View File

@@ -133,13 +133,6 @@ def generate_git_config(e):
f.write(proxy_command)
f.close
def pkgarch_mapping(d):
# Compatibility mappings of TUNE_PKGARCH (opt in)
if d.getVar("PKGARCHCOMPAT_ARMV7A", True):
if d.getVar("TUNE_PKGARCH", True) == "armv7a-vfp-neon":
d.setVar("TUNE_PKGARCH", "armv7a")
addhandler base_eventhandler
python base_eventhandler() {
from bb import note, error, data
@@ -173,7 +166,7 @@ python base_eventhandler() {
if name.startswith("BuildStarted"):
bb.data.setVar( 'BB_VERSION', bb.__version__, e.data )
statusvars = ['BB_VERSION', 'TARGET_ARCH', 'TARGET_OS', 'MACHINE', 'DISTRO', 'DISTRO_VERSION','TUNE_FEATURES', 'TARGET_FPU']
statusvars = ['BB_VERSION', 'TARGET_ARCH', 'TARGET_OS', 'MACHINE', 'DISTRO', 'DISTRO_VERSION','TARGET_FPU']
statuslines = ["%-17s = \"%s\"" % (i, bb.data.getVar(i, e.data, 1) or '') for i in statusvars]
layers = (data.getVar("BBLAYERS", e.data, 1) or "").split()
@@ -210,7 +203,6 @@ python base_eventhandler() {
if name == "ConfigParsed":
generate_git_config(e)
pkgarch_mapping(e.data)
if not data in e.__dict__:
return
@@ -362,12 +354,6 @@ python () {
depends = depends + " xz-native:do_populate_sysroot"
bb.data.setVarFlag('do_unpack', 'depends', depends, d)
# unzip-native should already be staged before unpacking ZIP recipes
if ".zip" in srcuri:
depends = bb.data.getVarFlag('do_unpack', 'depends', d) or ""
depends = depends + " unzip-native:do_populate_sysroot"
bb.data.setVarFlag('do_unpack', 'depends', depends, d)
# 'multimachine' handling
mach_arch = bb.data.getVar('MACHINE_ARCH', d, 1)
pkg_arch = bb.data.getVar('PACKAGE_ARCH', d, 1)
@@ -407,10 +393,20 @@ python () {
# if multiple differences are present?
# Look through PACKAGE_ARCHS for the priority order?
if pkgarch and pkgarch == mach_arch:
bb.data.setVar('PACKAGE_ARCH', "${MACHINE_ARCH}", d)
bb.data.setVar('PACAKGE_ARCH', "${MACHINE_ARCH}", d)
bb.warn("Recipe %s is marked as only being architecture specific but seems to have machine specific packages?! The recipe may as well mark itself as machine specific directly." % d.getVar("PN", True))
}
def check_gcc3(data):
gcc3_versions = 'gcc-3.4.6 gcc-3.4.7 gcc-3.4 gcc34 gcc-3.4.4 gcc-3.3 gcc33 gcc-3.3.6 gcc-3.2 gcc32'
for gcc3 in gcc3_versions.split():
if check_app_exists(gcc3, data):
return gcc3
return False
addtask cleansstate after do_clean
python do_cleansstate() {
sstate_clean_cachefiles(d)

View File

@@ -0,0 +1,20 @@
inherit base package rpm_core
SPECFILE="${RPMBUILDPATH}/SPECS/${PN}.spec"
base_srpm_do_unpack() {
test -e ${SRPMFILE} || die "Source rpm \"${SRPMFILE}\"does not exist"
if ! test -e ${SPECFILE}; then
${RPM} -i ${SRPMFILE}
fi
test -e ${SPECFILE} || die "Spec file \"${SPECFILE}\" does not exist"
${RPMBUILD} -bp ${SPECFILE}
}
base_srpm_do_compile() {
${RPMBUILD} -bc ${SPECFILE}
}
base_srpm_do_install() {
${RPMBUILD} -bi ${SPECFILE}
}

21
meta/classes/ccdv.bbclass Normal file
View File

@@ -0,0 +1,21 @@
python () {
if bb.data.getVar('PN', d, 1) in ['ccdv-native']:
if not bb.data.getVar('INHIBIT_DEFAULT_DEPS', d, 1):
bb.data.setVar("DEPENDS", '%s %s' % ("ccdv-native", bb.data.getVar("DEPENDS", d, 1) or ""), d)
bb.data.setVar("CC", '%s %s' % ("ccdv", bb.data.getVar("CC", d, 1) or ""), d)
bb.data.setVar("BUILD_CC", '%s %s' % ("ccdv", bb.data.getVar("BUILD_CC", d, 1) or ""), d)
bb.data.setVar("CCLD", '%s %s' % ("ccdv", bb.data.getVar("CCLD", d, 1) or ""), d)
}
def quiet_libtool(bb,d):
deps = (bb.data.getVar('DEPENDS', d, 1) or "").split()
if 'libtool-cross' in deps:
return "'LIBTOOL=${STAGING_BINDIR_NATIVE}/${HOST_SYS}-libtool --silent'"
elif 'libtool-native' in deps:
return "'LIBTOOL=${B}/${HOST_SYS}-libtool --silent'"
else:
return ""
CCDV = "ccdv"
EXTRA_OEMAKE_append = " ${@quiet_libtool(bb,d)}"
MAKE += "-s"

View File

@@ -57,7 +57,7 @@ set( CMAKE_INSTALL_RPATH ${OECMAKE_RPATH} )
set( CMAKE_MODULE_PATH ${STAGING_DATADIR}/cmake/Modules/ )
# add for non /usr/lib libdir, e.g. /usr/lib64
set( CMAKE_LIBRARY_PATH ${libdir} )
LIST(APPEND CMAKE_SYSTEM_LIBRARY_PATH ${libdir})
EOF
}

View File

@@ -35,9 +35,6 @@ cpan_do_compile () {
cpan_do_install () {
oe_runmake DESTDIR="${D}" install_vendor
for PERLSCRIPT in `grep -rIEl '#!${bindir}/perl-native.*/perl' ${D}`; do
sed -i -e 's|^#!${bindir}/perl-native.*/perl|#!/usr/bin/env nativeperl|' $PERLSCRIPT
done
}
EXPORT_FUNCTIONS do_configure do_compile do_install

View File

@@ -9,12 +9,15 @@
# or indirectly via dependency. No need to be in 'world'.
EXCLUDE_FROM_WORLD = "1"
STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}:${STAGING_DIR_NATIVE}${bindir_native}/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}:${STAGING_DIR_NATIVE}${bindir_native}/${OLD_BASE_PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
#
# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS
#
PACKAGE_ARCH = "${SDK_ARCH}-nativesdk"
OLD_PACKAGE_ARCH := "${PACKAGE_ARCH}"
OLD_MULTIMACH_TARGET_SYS := "${MULTIMACH_TARGET_SYS}"
OLD_BASE_PACKAGE_ARCH := ${BASE_PACKAGE_ARCH}
BASE_PACKAGE_ARCH = "${SDK_ARCH}-nativesdk"
python () {
archs = bb.data.getVar('PACKAGE_ARCHS', d, True).split()
sdkarchs = []
@@ -63,12 +66,12 @@ target_exec_prefix := "${exec_prefix}"
base_prefix = "${SDKPATHNATIVE}"
prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
exec_prefix = "${SDKPATHNATIVE}${prefix_nativesdk}"
bindir = "${exec_prefix}/bin/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
bindir = "${exec_prefix}/bin/${OLD_PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
sbindir = "${bindir}"
base_bindir = "${bindir}"
base_sbindir = "${bindir}"
libdir = "${exec_prefix}/lib/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
libexecdir = "${exec_prefix}/libexec/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
libdir = "${exec_prefix}/lib/${OLD_PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
libexecdir = "${exec_prefix}/libexec/${OLD_PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
FILES_${PN} = "${prefix}"
FILES_${PN}-dbg += "${prefix}/.debug \

View File

@@ -4,6 +4,14 @@ inherit relocatable
# no need for them to be a direct target of 'world'
EXCLUDE_FROM_WORLD = "1"
# Save PACKAGE_ARCH before changing HOST_ARCH
OLD_PACKAGE_ARCH := "${PACKAGE_ARCH}"
PACKAGE_ARCH = "${OLD_PACKAGE_ARCH}"
# Also save BASE_PACKAGE_ARCH since HOST_ARCH can influence it
OLD_BASE_PACKAGE_ARCH := "${BASE_PACKAGE_ARCH}"
BASE_PACKAGE_ARCH = "${OLD_BASE_PACKAGE_ARCH}"
BASEPKG_HOST_SYS = "${HOST_ARCH}${HOST_VENDOR}-${HOST_OS}"
PACKAGES = ""
HOST_ARCH = "${BUILD_ARCH}"
@@ -12,10 +20,10 @@ HOST_OS = "${BUILD_OS}"
HOST_PREFIX = "${BUILD_PREFIX}"
HOST_CC_ARCH = "${BUILD_CC_ARCH}"
STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}${HOST_VENDOR}-${HOST_OS}"
STAGING_DIR_HOST = "${STAGING_DIR}/${BASEPKG_HOST_SYS}"
export PKG_CONFIG_DIR = "${STAGING_DIR}/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}${libdir}/pkgconfig"
export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR}/${TUNE_PKGARCH}${TARGET_VENDOR}-${TARGET_OS}"
export PKG_CONFIG_DIR = "${STAGING_DIR}/${BASE_PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}${libdir}/pkgconfig"
export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR}/${BASE_PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
CPPFLAGS = "${BUILD_CPPFLAGS}"
CFLAGS = "${BUILD_CFLAGS}"
@@ -30,12 +38,11 @@ DEPENDS_GETTEXT = "gettext-native"
# Path mangling needed by the cross packaging
# Note that we use := here to ensure that libdir and includedir are
# target paths.
target_base_prefix := "${base_prefix}"
target_libdir := "${libdir}"
target_includedir := "${includedir}"
target_base_libdir := "${base_libdir}"
target_prefix := "${prefix}"
target_exec_prefix := "${exec_prefix}"
target_base_libdir = "${target_base_prefix}/${baselib}"
target_libdir = "${target_exec_prefix}/${baselib}"
target_includedir := "${includedir}"
# Overrides for paths
CROSS_TARGET_SYS_DIR = "${MULTIMACH_TARGET_SYS}"
@@ -54,3 +61,4 @@ do_populate_sysroot[sstate-inputdirs] = "${SYSROOT_DESTDIR}/${STAGING_DIR_NATIVE
do_install () {
oe_runmake 'DESTDIR=${D}' install
}

View File

@@ -1,8 +1,8 @@
inherit cross
PACKAGE_ARCH = "${SDK_ARCH}"
BASE_PACKAGE_ARCH = "${SDK_ARCH}"
PACKAGE_ARCH = "${BASE_PACKAGE_ARCH}"
STAGING_DIR_TARGET = "${STAGING_DIR}/${SDK_ARCH}-nativesdk${SDK_VENDOR}-${SDK_OS}"
STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${TARGET_ARCH}${TARGET_VENDOR}-${TARGET_OS}"
TARGET_ARCH = "${SDK_ARCH}"
TARGET_VENDOR = "${SDK_VENDOR}"

View File

@@ -0,0 +1,5 @@
# gcc-3.4 blows up in gtktext with -frename-registers on arm-linux
python () {
cflags = (bb.data.getVar('CFLAGS', d, 1) or '').replace('-frename-registers', '')
bb.data.setVar('CFLAGS', cflags, d)
}

View File

@@ -7,7 +7,7 @@ def gettext_dependencies(d):
def gettext_oeconf(d):
# Remove the NLS bits if USE_NLS is no.
if d.getVar('USE_NLS', True) == 'no' and not oe.utils.inherits(d, 'native', 'nativesdk', 'cross', 'cross-canadian'):
if d.getVar('USE_NLS', True) == 'no':
return '--disable-nls'
return "--enable-nls"

View File

@@ -1,13 +0,0 @@
AUTO_SYSLINUXCFG = "1"
INITRD = "${DEPLOY_DIR_IMAGE}/core-image-minimal-initramfs-${MACHINE}.cpio.gz"
APPEND += "root=/dev/ram0 "
TIMEOUT = "10"
LABELS += "boot install"
ROOTFS = "${DEPLOY_DIR_IMAGE}/${IMAGE_BASENAME}-${MACHINE}.ext3"
do_bootimg[depends] += "core-image-minimal-initramfs:do_rootfs"
do_bootimg[depends] += "${IMAGE_BASENAME}:do_rootfs"
inherit bootimg

View File

@@ -5,15 +5,13 @@ inherit imagetest-${IMAGETEST}
LICENSE = "MIT"
PACKAGES = ""
MULTILIB_IMAGE_INSTALL ?= ""
RDEPENDS += "${IMAGE_INSTALL} ${LINGUAS_INSTALL} ${MULTILIB_IMAGE_INSTALL}"
RDEPENDS += "${IMAGE_INSTALL} ${LINGUAS_INSTALL}"
INHIBIT_DEFAULT_DEPS = "1"
# "export IMAGE_BASENAME" not supported at this time
IMAGE_BASENAME[export] = "1"
export PACKAGE_INSTALL ?= "${IMAGE_INSTALL}"
export MULTILIB_PACKAGE_INSTALL ?= "${MULTILIB_IMAGE_INSTALL}"
PACKAGE_INSTALL_ATTEMPTONLY ?= ""
# Images are generally built explicitly, do not need to be part of world.
@@ -31,9 +29,6 @@ LDCONFIGDEPEND_libc-uclibc = ""
do_rootfs[depends] += "makedevs-native:do_populate_sysroot virtual/fakeroot-native:do_populate_sysroot ${LDCONFIGDEPEND}"
do_rootfs[depends] += "virtual/update-alternatives-native:do_populate_sysroot update-rc.d-native:do_populate_sysroot"
IMAGE_TYPE = ${@base_contains("IMAGE_FSTYPES", "live", "live", "empty", d)}
inherit image-${IMAGE_TYPE}
python () {
deps = bb.data.getVarFlag('do_rootfs', 'depends', d) or ""
for type in (bb.data.getVar('IMAGE_FSTYPES', d, True) or "").split():
@@ -93,7 +88,6 @@ do_rootfs[umask] = 022
fakeroot do_rootfs () {
#set -x
rm -rf ${IMAGE_ROOTFS}
rm -rf ${MULTILIB_TEMP_ROOTFS}
mkdir -p ${IMAGE_ROOTFS}
mkdir -p ${DEPLOY_DIR_IMAGE}
@@ -169,55 +163,6 @@ log_check() {
done
}
MULTILIBRE_ALLOW_REP =. "${base_bindir}|${base_sbindir}|${bindir}|${sbindir}|${libexecdir}|"
MULTILIBRE_FORCE_SAME =. "${sysconfdir}|${datadir}|"
MULTILIB_CHECK_FILE = "${WORKDIR}/multilib_check.py"
MULTILIB_TEMP_ROOTFS = "${WORKDIR}/multilib"
multilib_generate_python_file() {
cat >${MULTILIB_CHECK_FILE} <<EOF
import sys, os, os.path
import re,filecmp
allow_rep=re.compile(re.sub("\|$","","${MULTILIBRE_ALLOW_REP}"))
force_same=re.compile(re.sub("\|$","","${MULTILIBRE_FORCE_SAME}"))
error_promt="Multilib check error:"
files={}
dirs=raw_input()
for dir in dirs.split():
for root, subfolers, subfiles in os.walk(dir):
for file in subfiles:
item=os.path.join(root,file)
key=str(os.path.join("/",os.path.relpath(item,dir)))
valid=True;
if files.has_key(key):
#check whether files are the same
if force_same.match(key):
if not filecmp.cmp(files[key],item):
valid=False
print("%s %s is not the same as %s\n" % (error_promt, item, files[key]))
sys.exit(1)
#check whether the file is allow to replace
elif allow_rep.match(key):
valid=True
else:
valid=False
print("%s duplicated files %s %s not allowed\n" % (error_promt, item, files[key]))
sys.exit(1)
#pass the check, add to list
if valid:
files[key]=item
EOF
}
multilib_sanity_check() {
multilib_generate_python_file
echo $@ | python ${MULTILIB_CHECK_FILE}
}
# set '*' as the rootpassword so the images
# can decide if they want it or not

View File

@@ -1,18 +1,7 @@
def get_imagecmds(d):
cmds = "\n"
old_overrides = bb.data.getVar('OVERRIDES', d, 0)
types = bb.data.getVar('IMAGE_FSTYPES', d, True).split()
# Live images will be processed via inheriting bbclass and
# does not get processed here.
# live images also depend on ext3 so ensure its present
if "live" in types:
if "ext3" not in types:
types.append("ext3")
types.remove("live")
for type in types:
for type in bb.data.getVar('IMAGE_FSTYPES', d, True).split():
localdata = bb.data.createCopy(d)
localdata.setVar('OVERRIDES', '%s:%s' % (type, old_overrides))
bb.data.update_data(localdata)
@@ -114,4 +103,4 @@ IMAGE_DEPENDS_ubi = "mtd-utils-native"
IMAGE_DEPENDS_ubifs = "mtd-utils-native"
# This variable is available to request which values are suitable for IMAGE_FSTYPES
IMAGE_TYPES = "jffs2 cramfs ext2 ext2.gz ext3 ext3.gz live squashfs squashfs-lzma ubi tar tar.gz tar.bz2 tar.xz cpio cpio.gz cpio.xz cpio.lzma"
IMAGE_TYPES = "jffs2 cramfs ext2 ext2.gz ext3 ext3.gz squashfs squashfs-lzma ubi"

View File

@@ -38,7 +38,6 @@ def package_qa_get_machine_dict():
"arm" : (40, 97, 0, True, 32),
"armeb": (40, 97, 0, False, 32),
"powerpc": (20, 0, 0, False, 32),
"powerpc64": (21, 0, 0, False, 64),
"i386": ( 3, 0, 0, True, 32),
"i486": ( 3, 0, 0, True, 32),
"i586": ( 3, 0, 0, True, 32),
@@ -143,7 +142,7 @@ def package_qa_check_rpath(file,name, d, elf, messages):
messages.append("package %s contains bad RPATH %s in file %s" % (name, line, file))
QAPATHTEST[useless-rpaths] = "package_qa_check_useless_rpaths"
def package_qa_check_useless_rpaths(file, name, d, elf, messages):
def package_qa_check_useless_rpaths(file,name, d, elf, messages):
"""
Check for RPATHs that are useless but not dangerous
"""
@@ -165,7 +164,7 @@ def package_qa_check_useless_rpaths(file, name, d, elf, messages):
if rpath == libdir or rpath == base_libdir:
# The dynamic linker searches both these places anyway. There is no point in
# looking there again.
messages.append("%s: %s contains probably-redundant RPATH %s" % (name, package_qa_clean_path(file, d), rpath))
messages.append("dynamic section contains probably-redundant RPATH %s" % rpath)
QAPATHTEST[dev-so] = "package_qa_check_dev"
def package_qa_check_dev(path, name, d, elf, messages):
@@ -571,13 +570,12 @@ Rerun configure task after fixing this. The path was '%s'""" % root)
cnf = bb.data.getVar('EXTRA_OECONF', d, True) or ""
if "gettext" not in bb.data.getVar('P', d, True) and "gcc-runtime" not in bb.data.getVar('P', d, True) and "--disable-nls" not in cnf:
ml = d.getVar("MLPREFIX", True) or ""
if bb.data.inherits_class('native', d) or bb.data.inherits_class('cross', d) or bb.data.inherits_class('crosssdk', d) or bb.data.inherits_class('nativesdk', d):
gt = "gettext-native"
elif bb.data.inherits_class('cross-canadian', d):
gt = "gettext-nativesdk"
else:
gt = "virtual/" + ml + "gettext"
gt = "virtual/gettext"
deps = bb.utils.explode_deps(bb.data.getVar('DEPENDS', d, True) or "")
if gt not in deps:
for config in configs:

View File

@@ -6,7 +6,7 @@
valid_archs = "alpha cris ia64 \
i386 x86 \
m68knommu m68k ppc powerpc powerpc64 ppc64 \
m68knommu m68k ppc powerpc ppc64 \
sparc sparc64 \
arm arm26 \
m32r mips \
@@ -24,7 +24,6 @@ def map_kernel_arch(a, d):
elif re.match('arm26$', a): return 'arm26'
elif re.match('armeb$', a): return 'arm'
elif re.match('mipsel$', a): return 'mips'
elif re.match('p(pc|owerpc)(|64)', a): return 'powerpc'
elif re.match('sh(3|4)$', a): return 'sh'
elif re.match('bfin', a): return 'blackfin'
elif re.match('microblazeel', a): return 'microblaze'
@@ -37,7 +36,7 @@ export ARCH = "${@map_kernel_arch(bb.data.getVar('TARGET_ARCH', d, 1), d)}"
def map_uboot_arch(a, d):
import re
if re.match('p(pc|owerpc)(|64)', a): return 'ppc'
if re.match('powerpc$', a): return 'ppc'
elif re.match('i.86$', a): return 'x86'
return a

View File

@@ -83,7 +83,6 @@ do_kernel_checkout[dirs] = "${S}"
addtask kernel_checkout before do_patch after do_unpack
do_kernel_configme[dirs] = "${CCACHE_DIR} ${S} ${B}"
do_kernel_configme() {
echo "[INFO] doing kernel configme"
@@ -119,6 +118,12 @@ python do_kernel_configcheck() {
bb.plain( "%s" % result )
}
# overrides the base kernel_do_configure, since we don't want all the
# defconfig processing in there
kernel_do_configure() {
yes '' | oe_runmake oldconfig
}
# Ensure that the branches (BSP and meta) are on the locatios specified by
# their SRCREV values. If they are NOT on the right commits, the branches

View File

@@ -19,7 +19,7 @@ python __anonymous () {
image = bb.data.getVar('INITRAMFS_IMAGE', d, True)
if image:
bb.data.setVar('INITRAMFS_TASK', '${INITRAMFS_IMAGE}:do_rootfs', d)
bb.data.setVar('INITRAMFS_TASK', '${INITRAMFS_IMAGE}:do_rootfs', d)
}
inherit kernel-arch deploy
@@ -181,10 +181,10 @@ sysroot_stage_all_append() {
kernel_do_configure() {
# Copy defconfig to .config if .config does not exist. This allows
# recipes to manage the .config themselves in do_configure_prepend().
if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${B}/.config" ]; then
cp "${WORKDIR}/defconfig" "${B}/.config"
if [ -f "${WORKDIR}/defconfig" ] && [ ! -f "${S}/.config" ]; then
cp "${WORKDIR}/defconfig" "${S}/.config"
fi
yes '' | oe_runmake oldconfig
yes '' | oe_runmake oldconfig
if [ ! -z "${INITRAMFS_IMAGE}" ]; then
for img in cpio.gz cpio.lzo cpio.lzma cpio.xz; do

View File

@@ -191,7 +191,7 @@ python package_do_split_gconvs () {
do_split_packages(d, locales_dir, file_regex='(.*)', output_pattern=bpn+'-localedata-%s', \
description='locale definition for %s', hook=calc_locale_deps, extra_depends='')
bb.data.setVar('PACKAGES', bb.data.getVar('PACKAGES', d) + ' ' + bb.data.getVar('MLPREFIX', d) + bpn + '-gconv', d)
bb.data.setVar('PACKAGES', bb.data.getVar('PACKAGES', d) + ' ' + bpn + '-gconv', d)
use_bin = bb.data.getVar("GLIBC_INTERNAL_USE_BINARY_LOCALE", d, 1)
@@ -266,7 +266,6 @@ python package_do_split_gconvs () {
locale_arch_options = { \
"arm": " --uint32-align=4 --little-endian ", \
"powerpc": " --uint32-align=4 --big-endian ", \
"powerpc64": " --uint32-align=4 --big-endian ", \
"mips": " --uint32-align=4 --big-endian ", \
"mipsel": " --uint32-align=4 --little-endian ", \
"i586": " --uint32-align=4 --little-endian ", \

View File

@@ -0,0 +1,53 @@
SECTION = "x11/utils"
DEPENDS += "gnu-config-native virtual/libintl xt libxi \
zip-native gtk+"
LICENSE = "MPL NPL"
SRC_URI += "file://mozconfig"
inherit gettext pkgconfig
EXTRA_OECONF = "--target=${TARGET_SYS} --host=${BUILD_SYS} \
--build=${BUILD_SYS} --prefix=${prefix}"
EXTRA_OEMAKE = "'HOST_LIBIDL_LIBS=${HOST_LIBIDL_LIBS}' \
'HOST_LIBIDL_CFLAGS=${HOST_LIBIDL_CFLAGS}'"
SELECTED_OPTIMIZATION = "-Os -fsigned-char -fno-strict-aliasing"
export CROSS_COMPILE = "1"
export MOZCONFIG = "${WORKDIR}/mozconfig"
export MOZ_OBJDIR = "${S}"
export CONFIGURE_ARGS = "${EXTRA_OECONF}"
export HOST_LIBIDL_CFLAGS = "`${HOST_LIBIDL_CONFIG} --cflags`"
export HOST_LIBIDL_LIBS = "`${HOST_LIBIDL_CONFIG} --libs`"
export HOST_LIBIDL_CONFIG = "PKG_CONFIG_SYSROOT_DIR="" PKG_CONFIG_PATH=${STAGING_LIBDIR_NATIVE}/pkgconfig pkg-config libIDL-2.0"
export HOST_CC = "${BUILD_CC}"
export HOST_CXX = "${BUILD_CXX}"
export HOST_CFLAGS = "${BUILD_CFLAGS}"
export HOST_CXXFLAGS = "${BUILD_CXXFLAGS}"
export HOST_LDFLAGS = "${BUILD_LDFLAGS}"
export HOST_RANLIB = "${BUILD_RANLIB}"
export HOST_AR = "${BUILD_AR}"
mozilla_do_configure() {
(
set -e
for cg in `find ${S} -name config.guess`; do
install -m 0755 \
${STAGING_DATADIR_NATIVE}/gnu-config/config.guess \
${STAGING_DATADIR_NATIVE}/gnu-config/config.sub \
`dirname $cg`/
done
)
oe_runmake -f client.mk ${MOZ_OBJDIR}/Makefile \
${MOZ_OBJDIR}/config.status
}
mozilla_do_compile() {
oe_runmake -f client.mk build_all
}
mozilla_do_install() {
oe_runmake DESTDIR="${D}" destdir="${D}" install
}
EXPORT_FUNCTIONS do_configure do_compile do_install

View File

@@ -1,89 +0,0 @@
python multilib_virtclass_handler () {
if not isinstance(e, bb.event.RecipePreFinalise):
return
cls = e.data.getVar("BBEXTENDCURR", True)
variant = e.data.getVar("BBEXTENDVARIANT", True)
if cls != "multilib" or not variant:
return
override = ":virtclass-multilib-" + variant
e.data.setVar("MLPREFIX", variant + "-")
e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
e.data.setVar("SHLIBSDIR_virtclass-multilib-" + variant ,e.data.getVar("SHLIBSDIR", False) + "/" + variant)
e.data.setVar("TARGET_VENDOR_virtclass-multilib-" + variant, e.data.getVar("TARGET_VENDOR", False) + "ml" + variant)
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
}
addhandler multilib_virtclass_handler
STAGINGCC_prepend = "${BBEXTENDVARIANT}-"
python __anonymous () {
variant = d.getVar("BBEXTENDVARIANT", True)
def extend_name(name):
if name.startswith("virtual/"):
subs = name.split("/", 1)[1]
if not subs.startswith(variant):
return "virtual/" + variant + "-" + subs
return name
if not name.startswith(variant):
return variant + "-" + name
return name
def map_dependencies(varname, d, suffix = ""):
if suffix:
varname = varname + "_" + suffix
deps = d.getVar(varname, True)
if not deps:
return
deps = bb.utils.explode_deps(deps)
newdeps = []
for dep in deps:
if dep.endswith(("-native", "-native-runtime")):
newdeps.append(dep)
else:
newdeps.append(extend_name(dep))
d.setVar(varname, " ".join(newdeps))
def map_variable(varname, d):
var = d.getVar(varname, True)
if not var:
return
var = var.split()
newvar = []
for v in var:
newvar.append(extend_name(v))
d.setVar(varname, " ".join(newvar))
pkgs = []
pkgrename = {}
for pkg in (d.getVar("PACKAGES", True) or "").split():
if pkg.startswith(variant):
pkgs.append(pkg)
continue
pkgrename[pkg] = extend_name(pkg)
pkgs.append(pkgrename[pkg])
if pkgrename:
d.setVar("PACKAGES", " ".join(pkgs))
for pkg in pkgrename:
for subs in ["FILES", "RDEPENDS", "RRECOMMENDS", "SUMMARY", "DESCRIPTION", "RSUGGESTS", "RPROVIDES", "RCONFLICTS", "PKG", "ALLOW_EMPTY"]:
d.renameVar("%s_%s" % (subs, pkg), "%s_%s" % (subs, pkgrename[pkg]))
map_dependencies("DEPENDS", d)
for pkg in (d.getVar("PACKAGES", True).split() + [""]):
map_dependencies("RDEPENDS", d, pkg)
map_dependencies("RRECOMMENDS", d, pkg)
map_dependencies("RSUGGESTS", d, pkg)
map_dependencies("RPROVIDES", d, pkg)
map_dependencies("RREPLACES", d, pkg)
map_dependencies("RCONFLICTS", d, pkg)
map_dependencies("PKG", d, pkg)
map_variable("PROVIDES", d)
map_variable("PACKAGES_DYNAMIC", d)
map_variable("PACKAGE_INSTALL", d)
}

View File

@@ -1,29 +0,0 @@
inherit siteinfo
# If applicable on the architecture, this routine will rename the header and add
# a unique identifier to the name for the ABI/bitsize that is being used. A wrapper will
# be generated for the architecture that knows how to call all of the ABI variants for that
# given architecture.
#
# TODO: mips64 n32 is not yet recognized in this code
# when that is identified the name of the wrapped item should be "n32" and appropriately
# determined int he if coding...
#
oe_multilib_header() {
# Do nothing on ARM, only one ABI is supported at once
if echo ${TARGET_ARCH} | grep -q arm; then
return
fi
for each_header in "$@" ; do
if [ ! -f "${D}/${includedir}/$each_header" ]; then
bberror "oe_multilib_header: Unable to find header $each_header."
continue
fi
stem=$(echo $each_header | sed 's#\.h$##')
ident=${SITEINFO_BITS}
# if mips64/n32 set ident to n32
mv ${D}/${includedir}/$each_header ${D}/${includedir}/${stem}-${ident}.h
sed -e "s#ENTER_HEADER_FILENAME_HERE#${stem}#g" ${COREBASE}/scripts/multilib_header_wrapper.h > ${D}/${includedir}/$each_header
done
}

View File

@@ -15,6 +15,10 @@ PACKAGE_ARCH = "${BUILD_ARCH}"
OECMAKE_RPATH = "${libdir}"
OECMAKE_RPATH_virtclass-native = "${libdir}"
BASE_PACKAGE_ARCH = "${BUILD_ARCH}"
BASEPKG_HOST_SYS = "${BUILD_ARCH}${BUILD_VENDOR}-${BUILD_OS}"
BASEPKG_TARGET_SYS = "${BUILD_ARCH}${BUILD_VENDOR}-${BUILD_OS}"
# When this class has packaging enabled, setting
# RPROVIDES becomes unnecessary.
RPROVIDES = "${PN}"

View File

@@ -5,9 +5,10 @@ EXCLUDE_FROM_WORLD = "1"
STAGING_BINDIR_TOOLCHAIN = "${STAGING_DIR_NATIVE}${bindir_native}/${SDK_ARCH}${SDK_VENDOR}-${SDK_OS}"
#
# Update PACKAGE_ARCH and PACKAGE_ARCHS
# Update BASE_PACKAGE_ARCH and PACKAGE_ARCHS
#
PACKAGE_ARCH = "${SDK_ARCH}-nativesdk"
OLD_PACKAGE_ARCH := ${BASE_PACKAGE_ARCH}
BASE_PACKAGE_ARCH = "${SDK_ARCH}-nativesdk"
python () {
archs = bb.data.getVar('PACKAGE_ARCHS', d, True).split()
sdkarchs = []
@@ -16,8 +17,10 @@ python () {
bb.data.setVar('PACKAGE_ARCHS', " ".join(sdkarchs), d)
}
STAGING_DIR_HOST = "${STAGING_DIR}/${MULTIMACH_HOST_SYS}"
STAGING_DIR_TARGET = "${STAGING_DIR}/${MULTIMACH_TARGET_SYS}"
#STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_SYS}-nativesdk"
#STAGING_DIR_TARGET = "${STAGING_DIR}/${BASEPKG_TARGET_SYS}-nativesdk"
STAGING_DIR_HOST = "${STAGING_DIR}/${BASEPKG_HOST_SYS}"
STAGING_DIR_TARGET = "${STAGING_DIR}/${BASEPKG_TARGET_SYS}"
HOST_ARCH = "${SDK_ARCH}"
HOST_VENDOR = "${SDK_VENDOR}"

View File

@@ -0,0 +1,19 @@
HOMEPAGE = "http://www.openmoko.org"
LICENSE ?= "GPL"
OPENMOKO_RELEASE ?= "OM-2007"
OPENMOKO_MIRROR ?= "svn://svn.openmoko.org/trunk"
def openmoko_base_get_subdir(d):
openmoko, section = bb.data.getVar('SECTION', d, 1).split("/")
if section == 'base' or section == 'libs': return ""
elif section in 'apps tools pim'.split(): return "applications"
elif section == "panel-plugin": return "panel-plugins"
elif section == "inputmethods": return "inputmethods"
else: return section
SUBDIR = "${@openmoko_base_get_subdir(d)}"
SRC_URI := "${OPENMOKO_MIRROR}/src/target/${OPENMOKO_RELEASE}/${SUBDIR};module=${BPN};proto=http"
S = "${WORKDIR}/${PN}"
FILES_${PN} += "${datadir}/icons"

View File

@@ -0,0 +1,6 @@
SECTION = "openmoko/panel-plugin"
DEPENDS += "matchbox-panel-2 libmokopanelui2"
inherit openmoko2
FILES_${PN} = "${libdir}/matchbox-panel/lib*.so* ${datadir}"

View File

@@ -0,0 +1,3 @@
inherit openmoko-base autotools pkgconfig
DEPENDS_prepend = "${@["openmoko-libs ", ""][(bb.data.getVar('PN', d, 1) == 'openmoko-libs')]}"

View File

@@ -0,0 +1,31 @@
inherit autotools pkgconfig
HOMEPAGE = "http://www.openmoko.org"
OPENMOKO_RELEASE ?= "OM-2007.2"
OPENMOKO_MIRROR ?= "svn://svn.openmoko.org/trunk"
def openmoko_two_get_license(d):
openmoko, section = bb.data.getVar('SECTION', d, 1).split("/")
return "LGPL GPL".split()[section != "libs"]
def openmoko_two_get_subdir(d):
openmoko, section = bb.data.getVar('SECTION', d, 1).split("/")
if section == 'base': return ""
elif section == 'libs': return "libraries"
elif section in 'apps tools pim'.split(): return "applications"
elif section == "panel-plugin": return "panel-plugins"
elif section == "inputmethods": return "inputmethods"
elif section == "daemons": return "daemons"
elif section == "misc": return "misc"
else: return section
LICENSE = "${@openmoko_two_get_license(d)}"
SUBDIR = "${@openmoko_two_get_subdir(d)}"
SRC_URI := "${OPENMOKO_MIRROR}/src/target/${OPENMOKO_RELEASE}/${SUBDIR};module=${BPN};proto=http"
S = "${WORKDIR}/${PN}"
FILES_${PN} += "${datadir}/icons"
SVNREV = "r${SRCREV}"
#SVNREV = "${SRCDATE}"

View File

@@ -70,20 +70,6 @@ def do_split_packages(d, root, file_regex, output_pattern, description, postinst
given package, usually plugins or modules.
"""
ml = d.getVar("MLPREFIX", True)
if ml:
if not output_pattern.startswith(ml):
output_pattern = ml + output_pattern
newdeps = []
for dep in (extra_depends or "").split():
if dep.startswith(ml):
newdeps.append(dep)
else:
newdeps.append(ml + dep)
if newdeps:
extra_depends = " ".join(newdeps)
dvar = bb.data.getVar('PKGD', d, True)
packages = bb.data.getVar('PACKAGES', d, True).split()
@@ -182,17 +168,6 @@ python () {
bb.data.setVarFlag('do_package', 'deptask', " ".join(deps), d)
else:
d.setVar("PACKAGERDEPTASK", "")
multilib_archs = []
multilibs= d.getVar('MULTILIBS', True) or ""
if multilibs:
for ext in multilibs.split():
eext = ext.split(':')
if len(eext) > 1:
if eext[0] == 'multilib':
multilib_archs.append('ml' + eext[1])
d.setVar("MULTILIB_ARCHS", ' '.join(multilib_archs))
}
def splitfile(file, debugfile, debugsrcdir, d):
@@ -497,7 +472,7 @@ python fixup_perms () {
else:
return int(mode,8)
# Note uid/gid -1 has special significance in os.lchown
# Note uid/gid -1 has special significance in os.chown
def _procuid(self, uid):
if uid is None or uid == "-":
return -1
@@ -539,14 +514,14 @@ python fixup_perms () {
# Fix the permission, owner and group of path
def fix_perms(path, mode, uid, gid, dir):
if mode and not os.path.islink(path):
if mode:
#bb.note("Fixup Perms: chmod 0%o %s" % (mode, dir))
os.chmod(path, mode)
# -1 is a special value that means don't change the uid/gid
# if they are BOTH -1, don't bother to lchown
# if they are BOTH -1, don't bother to chown
if not (uid == -1 and gid == -1):
#bb.note("Fixup Perms: lchown %d:%d %s" % (uid, gid, dir))
os.lchown(path, uid, gid)
#bb.note("Fixup Perms: chown %d:%d %s" % (uid, gid, dir))
os.chown(path, uid, gid)
# Return a list of configuration files based on either the default
# files/fs-perms.txt or the contents of FILESYSTEM_PERMS_TABLES

View File

@@ -78,10 +78,14 @@ package_update_index_deb () {
return
fi
for arch in ${PACKAGE_ARCHS} ${SDK_PACKAGE_ARCHS}; do
for arch in ${PACKAGE_ARCHS}; do
sdkarch=`echo $arch | sed -e 's/${HOST_ARCH}/${SDK_ARCH}/'`
if [ -e ${DEPLOY_DIR_DEB}/$arch ]; then
debarchs="$debarchs $arch"
fi
if [ -e ${DEPLOY_DIR_DEB}/$sdkarch-nativesdk ]; then
debarchs="$debarchs $sdkarch-nativesdk"
fi
done
for arch in $debarchs; do
@@ -112,7 +116,7 @@ package_install_internal_deb () {
local archs="${INSTALL_ARCHS_DEB}"
local package_to_install="${INSTALL_PACKAGES_NORMAL_DEB}"
local package_attemptonly="${INSTALL_PACKAGES_ATTEMPTONLY_DEB}"
local package_linguas="${INSTALL_PACKAGES_LINGUAS_DEB}"
local package_lingusa="${INSTALL_PACKAGES_LINGUAS_DEB}"
local task="${INSTALL_TASK_DEB}"
rm -f ${STAGING_ETCDIR_NATIVE}/apt/sources.list.rev
@@ -151,12 +155,12 @@ package_install_internal_deb () {
# Uclibc builds don't provide this stuff..
if [ x${TARGET_OS} = "xlinux" ] || [ x${TARGET_OS} = "xlinux-gnueabi" ] ; then
if [ ! -z "${package_linguas}" ]; then
if [ ! -z "${package_lingusa}" ]; then
apt-get install glibc-localedata-i18n --force-yes --allow-unauthenticated
if [ $? -ne 0 ]; then
exit 1
fi
for i in ${package_linguas}; do
for i in ${package_lingusa}; do
apt-get install $i --force-yes --allow-unauthenticated
if [ $? -ne 0 ]; then
exit 1

View File

@@ -61,28 +61,6 @@ python package_ipk_install () {
raise bb.build.FuncFailed
}
package_tryout_install_multilib_ipk() {
#try install multilib
multilib_tryout_dirs=""
for arch in ${MULTILIB_ARCHS}; do
local target_rootfs="${MULTILIB_TEMP_ROOTFS}/${arch}"
local ipkg_args="-f ${INSTALL_CONF_IPK} -o ${target_rootfs} --force_overwrite"
local selected_pkg=""
#strip the "ml" from package_arch
local pkgarch_prefix="${arch:2}-"
for pkg in "${INSTALL_PACKAGES_MULTILIB_IPK}"; do
if [ ${pkg:0:${#pkgarch_prefix}} == ${pkgarch_prefix} ]; then
selected_pkg="${selected_pkg} ${pkg}"
fi
done
if [ ! -z "${selected_pkg}" ]; then
mkdir -p ${target_rootfs}/${opkglibdir}
opkg-cl ${ipkg_args} update
opkg-cl ${ipkg_args} install ${selected_pkg}
multilib_tryout_dirs="${multilib_tryout_dirs} ${target_rootfs}"
fi
done
}
#
# install a bunch of packages using opkg
# the following shell variables needs to be set before calling this func:
@@ -99,8 +77,7 @@ package_install_internal_ipk() {
local conffile="${INSTALL_CONF_IPK}"
local package_to_install="${INSTALL_PACKAGES_NORMAL_IPK}"
local package_attemptonly="${INSTALL_PACKAGES_ATTEMPTONLY_IPK}"
local package_linguas="${INSTALL_PACKAGES_LINGUAS_IPK}"
local package_multilib="${INSTALL_PACKAGES_MULTILIB_IPK}"
local package_lingusa="${INSTALL_PACKAGES_LINGUAS_IPK}"
local task="${INSTALL_TASK_IPK}"
mkdir -p ${target_rootfs}${localstatedir}/lib/opkg/
@@ -111,8 +88,8 @@ package_install_internal_ipk() {
# Uclibc builds don't provide this stuff...
if [ x${TARGET_OS} = "xlinux" ] || [ x${TARGET_OS} = "xlinux-gnueabi" ] ; then
if [ ! -z "${package_linguas}" ]; then
for i in ${package_linguas}; do
if [ ! -z "${package_lingusa}" ]; then
for i in ${package_lingusa}; do
opkg-cl ${ipkg_args} install $i
done
fi
@@ -125,16 +102,6 @@ package_install_internal_ipk() {
if [ ! -z "${package_attemptonly}" ]; then
opkg-cl ${ipkg_args} install ${package_attemptonly} > "${WORKDIR}/temp/log.do_${task}_attemptonly.${PID}" || true
fi
package_tryout_install_multilib_ipk
if [ ! -z "${MULTILIB_CHECK_FILE}" ]; then
#sanity check
multilib_sanity_check ${target_rootfs} ${multilib_tryout_dirs} || exit 1
fi
if [ ! -z "${package_multilib}" ]; then
opkg-cl ${ipkg_args} install ${package_multilib}
fi
}
ipk_log_check() {
@@ -163,7 +130,7 @@ ipk_log_check() {
package_update_index_ipk () {
set -x
ipkgarchs="${PACKAGE_ARCHS} ${SDK_PACKAGE_ARCHS}"
ipkgarchs="${PACKAGE_ARCHS}"
if [ ! -z "${DEPLOY_KEEP_PACKAGES}" ]; then
return
@@ -171,12 +138,8 @@ package_update_index_ipk () {
packagedirs="${DEPLOY_DIR_IPK}"
for arch in $ipkgarchs; do
packagedirs="$packagedirs ${DEPLOY_DIR_IPK}/$arch"
done
multilib_archs="${MULTILIB_ARCHS}"
for arch in $multilib_archs; do
packagedirs="$packagedirs ${DEPLOY_DIR_IPK}/$arch"
sdkarch=`echo $arch | sed -e 's/${HOST_ARCH}/${SDK_ARCH}/'`
packagedirs="$packagedirs ${DEPLOY_DIR_IPK}/$arch ${DEPLOY_DIR_IPK}/$sdkarch-nativesdk"
done
for pkgdir in $packagedirs; do
@@ -194,48 +157,35 @@ package_update_index_ipk () {
#
package_generate_ipkg_conf () {
package_generate_archlist
echo "src oe file:${DEPLOY_DIR_IPK}" >> ${IPKGCONF_SDK}
ipkgarchs="${SDK_PACKAGE_ARCHS}"
for arch in $ipkgarchs; do
if [ -e ${DEPLOY_DIR_IPK}/$arch/Packages ] ; then
echo "src oe-$arch file:${DEPLOY_DIR_IPK}/$arch" >> ${IPKGCONF_SDK}
fi
done
echo "src oe file:${DEPLOY_DIR_IPK}" >> ${IPKGCONF_TARGET}
echo "src oe file:${DEPLOY_DIR_IPK}" >> ${IPKGCONF_SDK}
ipkgarchs="${PACKAGE_ARCHS}"
for arch in $ipkgarchs; do
if [ -e ${DEPLOY_DIR_IPK}/$arch/Packages ] ; then
echo "src oe-$arch file:${DEPLOY_DIR_IPK}/$arch" >> ${IPKGCONF_TARGET}
fi
done
multilib_archs="${MULTILIB_ARCHS}"
for arch in $multilib_archs; do
if [ -e ${DEPLOY_DIR_IPK}/$arch/Packages ] ; then
echo "src oe-$arch file:${DEPLOY_DIR_IPK}/$arch" >> ${IPKGCONF_TARGET}
sdkarch=`echo $arch | sed -e 's/${HOST_ARCH}/${SDK_ARCH}/'`
extension=-nativesdk
if [ "$sdkarch" = "all" -o "$sdkarch" = "any" -o "$sdkarch" = "noarch" ]; then
extension=""
fi
if [ -e ${DEPLOY_DIR_IPK}/$sdkarch$extension/Packages ] ; then
echo "src oe-$sdkarch$extension file:${DEPLOY_DIR_IPK}/$sdkarch$extension" >> ${IPKGCONF_SDK}
fi
done
}
package_generate_archlist () {
ipkgarchs="${SDK_PACKAGE_ARCHS}"
priority=1
for arch in $ipkgarchs; do
echo "arch $arch $priority" >> ${IPKGCONF_SDK}
priority=$(expr $priority + 5)
done
ipkgarchs="${PACKAGE_ARCHS}"
priority=1
for arch in $ipkgarchs; do
sdkarch=`echo $arch | sed -e 's/${HOST_ARCH}/${SDK_ARCH}/'`
echo "arch $arch $priority" >> ${IPKGCONF_TARGET}
priority=$(expr $priority + 5)
done
multilib_archs="${MULTILIB_ARCHS}"
for arch in $multilib_archs; do
echo "arch $arch $priority" >> ${IPKGCONF_TARGET}
extension=-nativesdk
if [ "$sdkarch" = "all" -o "$sdkarch" = "any" -o "$sdkarch" = "noarch" ]; then
extension=""
fi
echo "arch $sdkarch$extension $priority" >> ${IPKGCONF_SDK}
priority=$(expr $priority + 5)
done
}

View File

@@ -21,52 +21,67 @@ RPMCONF_HOST_BASE = "${DEPLOY_DIR_RPM}/solvedb-sdk"
# Update the Packages depsolver db in ${DEPLOY_DIR_RPM}
#
package_update_index_rpm () {
rpmarchs="${PACKAGE_ARCHS}"
if [ ! -z "${DEPLOY_KEEP_PACKAGES}" ]; then
return
fi
# Update target packages
base_archs="${PACKAGE_ARCHS}"
ml_archs="${MULTILIB_PACKAGE_ARCHS}"
package_update_index_rpm_common "${RPMCONF_TARGET_BASE}" base_archs ml_archs
packagedirs=""
packagedirs_sdk=""
for arch in $rpmarchs ; do
sdkarch=`echo $arch | sed -e 's/${HOST_ARCH}/${SDK_ARCH}/'`
extension="-nativesdk"
if [ "$sdkarch" = "all" -o "$sdkarch" = "any" -o "$sdkarch" = "noarch" ]; then
extension=""
fi
packagedirs="${DEPLOY_DIR_RPM}/$arch $packagedirs"
packagedirs_sdk="${DEPLOY_DIR_RPM}/$sdkarch$extension $packagedirs_sdk"
# Update SDK packages
base_archs="${SDK_PACKAGE_ARCHS}"
package_update_index_rpm_common "${RPMCONF_HOST_BASE}" base_archs
}
rm -rf ${DEPLOY_DIR_RPM}/$arch/solvedb
rm -rf ${DEPLOY_DIR_RPM}/$sdkarch$extension/solvedb
done
package_update_index_rpm_common () {
rpmconf_base="$1"
shift
for archvar in "$@"; do
eval archs=\${${archvar}}
packagedirs=""
for arch in $archs; do
packagedirs="${DEPLOY_DIR_RPM}/$arch $packagedirs"
rm -rf ${DEPLOY_DIR_RPM}/$arch/solvedb
done
cat /dev/null > ${rpmconf_base}-${archvar}.conf
for pkgdir in $packagedirs; do
if [ -e $pkgdir/ ]; then
echo "Generating solve db for $pkgdir..."
echo $pkgdir/solvedb >> ${rpmconf_base}-${archvar}.conf
if [ -d $pkgdir/solvedb ]; then
# We've already processed this and it's a duplicate
continue
fi
mkdir -p $pkgdir/solvedb
echo "# Dynamically generated solve manifest" >> $pkgdir/solvedb/manifest
find $pkgdir -maxdepth 1 -type f >> $pkgdir/solvedb/manifest
${RPM} -i --replacepkgs --replacefiles --oldpackage \
-D "_dbpath $pkgdir/solvedb" --justdb \
--noaid --nodeps --noorder --noscripts --notriggers --noparentdirs --nolinktos --stats \
--ignoresize --nosignature --nodigest \
-D "__dbi_txn create nofsync" \
$pkgdir/solvedb/manifest
cat /dev/null > ${RPMCONF_TARGET_BASE}.conf
for pkgdir in $packagedirs; do
if [ -e $pkgdir/ ]; then
echo "Generating solve db for $pkgdir..."
echo $pkgdir/solvedb >> ${RPMCONF_TARGET_BASE}.conf
if [ -d $pkgdir/solvedb ]; then
# We've already processed this and it's a duplicate
continue
fi
done
mkdir -p $pkgdir/solvedb
echo "# Dynamically generated solve manifest" >> $pkgdir/solvedb/manifest
find $pkgdir -maxdepth 1 -type f >> $pkgdir/solvedb/manifest
${RPM} -i --replacepkgs --replacefiles --oldpackage \
-D "_dbpath $pkgdir/solvedb" --justdb \
--noaid --nodeps --noorder --noscripts --notriggers --noparentdirs --nolinktos --stats \
--ignoresize --nosignature --nodigest \
-D "__dbi_txn create nofsync" \
$pkgdir/solvedb/manifest
fi
done
cat /dev/null > ${RPMCONF_HOST_BASE}.conf
for pkgdir in $packagedirs_sdk; do
if [ -e $pkgdir/ ]; then
echo "Generating solve db for $pkgdir..."
echo $pkgdir/solvedb >> ${RPMCONF_HOST_BASE}.conf
if [ -d $pkgdir/solvedb ]; then
# We've already processed this and it's a duplicate
continue
fi
mkdir -p $pkgdir/solvedb
echo "# Dynamically generated solve manifest" >> $pkgdir/solvedb/manifest
find $pkgdir -maxdepth 1 -type f >> $pkgdir/solvedb/manifest
${RPM} -i --replacepkgs --replacefiles --oldpackage \
-D "_dbpath $pkgdir/solvedb" --justdb \
--noaid --nodeps --noorder --noscripts --notriggers --noparentdirs --nolinktos --stats \
--ignoresize --nosignature --nodigest \
-D "__dbi_txn create nofsync" \
$pkgdir/solvedb/manifest
fi
done
}
@@ -75,38 +90,27 @@ package_update_index_rpm_common () {
# generated depsolver db's...
#
package_generate_rpm_conf () {
# Update target packages
package_generate_rpm_conf_common "${RPMCONF_TARGET_BASE}" base_archs ml_archs
# Update SDK packages
package_generate_rpm_conf_common "${RPMCONF_HOST_BASE}" base_archs
}
package_generate_rpm_conf_common() {
rpmconf_base="$1"
shift
printf "_solve_dbpath " > ${rpmconf_base}.macro
o_colon=false
for archvar in "$@"; do
printf "_solve_dbpath " > ${rpmconf_base}-${archvar}.macro
colon=false
for each in `cat ${rpmconf_base}-${archvar}.conf` ; do
if [ "$o_colon" == true ]; then
printf ":" >> ${rpmconf_base}.macro
fi
if [ "$colon" == true ]; then
printf ":" >> ${rpmconf_base}-${archvar}.macro
fi
printf "%s" $each >> ${rpmconf_base}.macro
o_colon=true
printf "%s" $each >> ${rpmconf_base}-${archvar}.macro
colon=true
done
printf "\n" >> ${rpmconf_base}-${archvar}.macro
printf "_solve_dbpath " > ${RPMCONF_TARGET_BASE}.macro
colon=false
for each in `cat ${RPMCONF_TARGET_BASE}.conf` ; do
if [ "$colon" == true ]; then
printf ":" >> ${RPMCONF_TARGET_BASE}.macro
fi
printf "%s" $each >> ${RPMCONF_TARGET_BASE}.macro
colon=true
done
printf "\n" >> ${rpmconf_base}.macro
printf "\n" >> ${RPMCONF_TARGET_BASE}.macro
printf "_solve_dbpath " > ${RPMCONF_HOST_BASE}.macro
colon=false
for each in `cat ${RPMCONF_HOST_BASE}.conf` ; do
if [ "$colon" == true ]; then
printf ":" >> ${RPMCONF_HOST_BASE}.macro
fi
printf "%s" $each >> ${RPMCONF_HOST_BASE}.macro
colon=true
done
printf "\n" >> ${RPMCONF_HOST_BASE}.macro
}
rpm_log_check() {
@@ -135,11 +139,11 @@ rpm_log_check() {
# resolve_pacakge <pkgname> <solvdb conffile>
#
resolve_package_rpm () {
local conffile="$1"
shift
local pkg="$1"
local conffile="$2"
local pkg_name=""
for solve in `cat ${conffile}`; do
pkg_name=$(${RPM} -D "_dbpath $solve" -D "__dbi_txn create nofsync" -q --yaml $@ | grep -i 'Packageorigin' | cut -d : -f 2)
pkg_name=$(${RPM} -D "_dbpath $solve" -D "__dbi_txn create nofsync" -q --yaml $pkg | grep -i 'Packageorigin' | cut -d : -f 2)
if [ -n "$pkg_name" ]; then
break;
fi
@@ -168,16 +172,16 @@ package_install_internal_rpm () {
local confbase="${INSTALL_CONFBASE_RPM}"
local package_to_install="${INSTALL_PACKAGES_NORMAL_RPM}"
local package_attemptonly="${INSTALL_PACKAGES_ATTEMPTONLY_RPM}"
local package_linguas="${INSTALL_PACKAGES_LINGUAS_RPM}"
local package_lingusa="${INSTALL_PACKAGES_LINGUAS_RPM}"
local providename="${INSTALL_PROVIDENAME_RPM}"
local task="${INSTALL_TASK_RPM}"
# Setup base system configuration
mkdir -p ${target_rootfs}/etc/rpm/
echo "${platform}${TARGET_VENDOR}-${TARGET_OS}" > ${target_rootfs}/etc/rpm/platform
echo "${platform}-poky-linux-gnu" > ${target_rootfs}/etc/rpm/platform
if [ ! -z "$platform_extra" ]; then
for pt in $platform_extra ; do
echo "$pt-.*-${TARGET_OS}" >> ${target_rootfs}/etc/rpm/platform
echo "$pt-.*-linux.*" >> ${target_rootfs}/etc/rpm/platform
done
fi
@@ -185,7 +189,7 @@ package_install_internal_rpm () {
mkdir -p ${target_rootfs}/etc/rpm/sysinfo
echo "/" >${target_rootfs}/etc/rpm/sysinfo/Dirnames
if [ ! -z "$providename" ]; then
cat /dev/null > ${target_rootfs}/etc/rpm/sysinfo/Providename
>>${target_rootfs}/etc/rpm/sysinfo/Providename
for provide in $providename ; do
echo $provide >> ${target_rootfs}/etc/rpm/sysinfo/Providename
done
@@ -197,22 +201,15 @@ package_install_internal_rpm () {
# Uclibc builds don't provide this stuff...
if [ x${TARGET_OS} = "xlinux" ] || [ x${TARGET_OS} = "xlinux-gnueabi" ] ; then
if [ ! -z "${package_linguas}" ]; then
for pkg in ${package_linguas}; do
if [ ! -z "${package_lingusa}" ]; then
for pkg in ${package_lingusa}; do
echo "Processing $pkg..."
archvar=base_archs
ml_pkg=$(echo ${pkg} | sed "s,^${MLPREFIX}\(.*\),\1,")
if [ "${ml_pkg}" != "${pkg}" ]; then
archvar=ml_archs
fi
pkg_name=$(resolve_package_rpm ${confbase}-${archvar}.conf ${ml_pkg})
pkg_name=$(resolve_package_rpm $pkg ${confbase}.conf)
if [ -z "$pkg_name" ]; then
echo "Unable to find package $pkg ($ml_pkg)!"
echo "Unable to find package $pkg!"
exit 1
fi
echo $pkg_name >> ${target_rootfs}/install/install.manifest
echo $pkg_name >> ${IMAGE_ROOTFS}/install/install.manifest
done
fi
fi
@@ -220,16 +217,9 @@ package_install_internal_rpm () {
if [ ! -z "${package_to_install}" ]; then
for pkg in ${package_to_install} ; do
echo "Processing $pkg..."
archvar=base_archs
ml_pkg=$(echo ${pkg} | sed "s,^${MLPREFIX}\(.*\),\1,")
if [ "${ml_pkg}" != "${pkg}" ]; then
archvar=ml_archs
fi
pkg_name=$(resolve_package_rpm ${confbase}-${archvar}.conf ${ml_pkg})
pkg_name=$(resolve_package_rpm $pkg ${confbase}.conf)
if [ -z "$pkg_name" ]; then
echo "Unable to find package $pkg ($ml_pkg)!"
echo "Unable to find package $pkg!"
exit 1
fi
echo $pkg_name >> ${target_rootfs}/install/install.manifest
@@ -277,7 +267,7 @@ package_install_internal_rpm () {
# Dump the full set of recommends...
${RPM} --predefine "_rpmds_sysinfo_path ${target_rootfs}/etc/rpm/sysinfo" \
--predefine "_rpmrc_platform_path ${target_rootfs}/etc/rpm/platform" \
-D "_dbpath ${target_rootfs}/install" -D "`cat ${confbase}.macro`" \
-D "_dbpath ${IMAGE_ROOTFS}/install" -D "`cat ${confbase}.macro`" \
-D "__dbi_txn create nofsync private" \
-qa --qf "[%{RECOMMENDS}\n]" | sort -u > ${target_rootfs}/install/recommend
# Did we add more to the list?
@@ -328,21 +318,6 @@ python write_specfile () {
import textwrap
import oe.packagedata
# We need a simple way to remove the MLPREFIX from the package name,
# and dependency information...
def strip_multilib(name, d):
multilibs = d.getVar('MULTILIBS', True) or ""
for ext in multilibs.split():
eext = ext.split(':')
if len(eext) > 1 and eext[0] == 'multilib' and name and name.find(eext[1] + '-') == 0:
name = (eext[1] + '-').join(name.split(eext[1] + '-', 1)[1:])
return name
# ml = bb.data.getVar("MLPREFIX", d, True)
# if ml and name and len(ml) != 0 and name.find(ml) == 0:
# return ml.join(name.split(ml, 1)[1:])
# return name
# In RPM, dependencies are of the format: pkg <>= Epoch:Version-Release
# This format is similar to OE, however there are restrictions on the
# characters that can be in a field. In the Version field, "-"
@@ -371,7 +346,7 @@ python write_specfile () {
pv = subd['PKGV']
reppv = pv.replace('-', '+')
ver = ver.replace(pv, reppv)
newdeps_dict[strip_multilib(dep, d)] = ver
newdeps_dict[dep] = ver
depends = bb.utils.join_deps(newdeps_dict)
bb.data.setVar(varname, depends.strip(), d)
@@ -419,7 +394,7 @@ python write_specfile () {
return
# Construct the SPEC file...
srcname = strip_multilib(bb.data.getVar('PN', d, True), d)
srcname = bb.data.getVar('PN', d, True)
srcsummary = (bb.data.getVar('SUMMARY', d, True) or bb.data.getVar('DESCRIPTION', d, True) or ".")
srcversion = bb.data.getVar('PKGV', d, True).replace('-', '+')
srcrelease = bb.data.getVar('PKGR', d, True)
@@ -430,7 +405,7 @@ python write_specfile () {
srchomepage = bb.data.getVar('HOMEPAGE', d, True)
srcdescription = bb.data.getVar('DESCRIPTION', d, True) or "."
srcdepends = strip_multilib(bb.data.getVar('DEPENDS', d, True), d)
srcdepends = bb.data.getVar('DEPENDS', d, True)
srcrdepends = []
srcrrecommends = []
srcrsuggests = []
@@ -473,7 +448,7 @@ python write_specfile () {
conffiles = (bb.data.getVar('CONFFILES', localdata, True) or "").split()
splitname = strip_multilib(pkgname, d)
splitname = pkgname
splitsummary = (bb.data.getVar('SUMMARY', localdata, True) or bb.data.getVar('DESCRIPTION', localdata, True) or ".")
splitversion = (bb.data.getVar('PKGV', localdata, True) or "").replace('-', '+')
@@ -501,10 +476,6 @@ python write_specfile () {
splitrconflicts = bb.data.getVar('RCONFLICTS', localdata, True) or ""
splitrobsoletes = []
# For now we need to manually supplement RPROVIDES with any update-alternatives links
if pkg == d.getVar("PN", True):
splitrprovides = splitrprovides + " " + (d.getVar('ALTERNATIVE_LINK', True) or '') + " " + (d.getVar('ALTERNATIVE_LINKS', True) or '')
# Gather special src/first package data
if srcname == splitname:
srcrdepends = splitrdepends
@@ -728,14 +699,6 @@ python write_specfile () {
python do_package_rpm () {
import os
# We need a simple way to remove the MLPREFIX from the package name,
# and dependency information...
def strip_multilib(name, d):
ml = bb.data.getVar("MLPREFIX", d, True)
if ml and name and len(ml) != 0 and name.find(ml) == 0:
return ml.join(name.split(ml, 1)[1:])
return name
workdir = bb.data.getVar('WORKDIR', d, True)
outdir = bb.data.getVar('DEPLOY_DIR_IPK', d, True)
tmpdir = bb.data.getVar('TMPDIR', d, True)
@@ -751,7 +714,7 @@ python do_package_rpm () {
return
# Construct the spec file...
srcname = strip_multilib(bb.data.getVar('PN', d, True), d)
srcname = bb.data.getVar('PN', d, True)
outspecfile = workdir + "/" + srcname + ".spec"
bb.data.setVar('OUTSPECFILE', outspecfile, d)
bb.build.exec_func('write_specfile', d)

View File

@@ -0,0 +1,7 @@
# Now that BitBake/OpenEmbedded uses Quilt by default, you can simply add an
# inherit patcher
# to one of your config files to let BB/OE use patcher again.
PATCHCLEANCMD = "patcher -B"
PATCHCMD = "patcher -R -p '%s' -n '%s' -i '%s'"
PATCH_DEPENDS = "${@["patcher-native", ""][(bb.data.getVar('PN', d, 1) == 'patcher-native')]}"

View File

@@ -22,15 +22,6 @@ fakeroot do_populate_sdk() {
rm -rf ${SDK_OUTPUT}
mkdir -p ${SDK_OUTPUT}
# populate_sdk_<image> is required to construct two images:
# SDK_ARCH-nativesdk - contains the cross compiler and associated tooling
# target - contains a target rootfs configured for the SDK usage
#
# the output of populate_sdk_<image> should end up in ${SDK_OUTPUT} it is made
# up of:
# ${SDK_OUTPUT}/<sdk_arch-nativesdk pkgs>
# ${SDK_OUTPUT}/${SDKTARGETSYSROOT}/<target pkgs>
populate_sdk_${IMAGE_PKGTYPE}
# Don't ship any libGL in the SDK

View File

@@ -11,38 +11,41 @@ populate_sdk_post_deb () {
fakeroot populate_sdk_deb () {
# update index
package_update_index_deb
## install target ##
# This needs to work in the same way as rootfs_deb.bbclass
export INSTALL_TASK_DEB="populate_sdk"
export INSTALL_PACKAGES_LINGUAS_DEB=""
export INSTALL_PACKAGES_ATTEMPTONLY_DEB=""
#install target
echo "Installing TARGET packages"
mkdir -p ${IMAGE_ROOTFS}/var/dpkg/alternatives
export INSTALL_ROOTFS_DEB="${SDK_OUTPUT}/${SDKTARGETSYSROOT}"
export INSTALL_BASEARCH_DEB="${DPKG_ARCH}"
export INSTALL_ARCHS_DEB="${PACKAGE_ARCHS}"
export INSTALL_PACKAGES_NORMAL_DEB="${TOOLCHAIN_TARGET_TASK}"
export INSTALL_PACKAGES_ATTEMPTONLY_DEB=""
export PACKAGES_LINGUAS_DEB=""
export INSTALL_TASK_DEB="populate_sdk-target"
package_install_internal_deb
populate_sdk_post_deb ${INSTALL_ROOTFS_DEB}
populate_sdk_log_check populate_sdk
## install nativesdk ##
echo "Installing NATIVESDK packages"
#install host
echo "Installing HOST packages"
export INSTALL_ROOTFS_DEB="${SDK_OUTPUT}"
export INSTALL_BASEARCH_DEB="${SDK_ARCH}"
export INSTALL_ARCHS_DEB="${SDK_PACKAGE_ARCHS}"
export INSTALL_PACKAGES_NORMAL_DEB="${TOOLCHAIN_HOST_TASK}"
export INSTALL_PACKAGES_ATTEMPTONLY_DEB=""
export PACKAGES_LINGUAS_DEB=""
export INSTALL_TASK_DEB="populate_sdk-nativesdk"
INSTALL_ARCHS_DEB=""
for arch in ${PACKAGE_ARCHS}; do
sdkarch=`echo $arch | sed -e 's/${HOST_ARCH}/${SDK_ARCH}/'`
extension="-nativesdk"
if [ "$sdkarch" = "all" -o "$sdkarch" = "any" -o "$sdkarch" = "noarch" ]; then
extension=""
fi
if [ -e ${DEPLOY_DIR_DEB}/$sdkarch$extension ]; then
INSTALL_ARCHS_DEB="$INSTALL_ARCHS_DEB $sdkarch$extension"
fi
done
export INSTALL_ARCHS_DEB
package_install_internal_deb
populate_sdk_post_deb ${SDK_OUTPUT}/${SDKPATHNATIVE}

Some files were not shown because too many files have changed in this diff Show More