Compare commits
121 Commits
1.5_M5.rc1
...
yocto-1.5_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e624d737b7 | ||
|
|
1db33e3c4d | ||
|
|
442be3ef60 | ||
|
|
ac191eb964 | ||
|
|
da470776f9 | ||
|
|
37cb3e3aa4 | ||
|
|
9a4169a465 | ||
|
|
14fee32867 | ||
|
|
eae5f4bdac | ||
|
|
ab9e266ab9 | ||
|
|
015cb13a67 | ||
|
|
7303b67d33 | ||
|
|
8dd7ab3a78 | ||
|
|
a71761e6e6 | ||
|
|
44c65db250 | ||
|
|
bb939628ec | ||
|
|
3091150590 | ||
|
|
242ad61580 | ||
|
|
5eae2d57b4 | ||
|
|
9470f687d9 | ||
|
|
bf413add98 | ||
|
|
e3cacf17e8 | ||
|
|
7d6094489c | ||
|
|
0af5853de2 | ||
|
|
63ae37f924 | ||
|
|
85a5a2c3b2 | ||
|
|
c78c1f9c92 | ||
|
|
238e9b54e2 | ||
|
|
2ac9925ceb | ||
|
|
b925c752c0 | ||
|
|
9592f311b0 | ||
|
|
1b7f829de7 | ||
|
|
42ef04b364 | ||
|
|
06078af4ca | ||
|
|
1bf8d83d5a | ||
|
|
641a80d760 | ||
|
|
912e8d5672 | ||
|
|
d96c512db1 | ||
|
|
d15652d78c | ||
|
|
9a32eca022 | ||
|
|
d7e7b991ce | ||
|
|
d6ac67f9af | ||
|
|
385bd4410d | ||
|
|
a61519f3fb | ||
|
|
0902850e97 | ||
|
|
66c9c01b2b | ||
|
|
36b4fcde7a | ||
|
|
f78db82e1a | ||
|
|
43f1867e32 | ||
|
|
c971868360 | ||
|
|
43c670accc | ||
|
|
602bb695cf | ||
|
|
eb4854f903 | ||
|
|
a828c89822 | ||
|
|
1181e69119 | ||
|
|
5fca4d286e | ||
|
|
366bd119bd | ||
|
|
f8ccabac55 | ||
|
|
97f1c03d98 | ||
|
|
e909857be0 | ||
|
|
707da95b4a | ||
|
|
a6974f2a70 | ||
|
|
1875fb796f | ||
|
|
99875e2e1a | ||
|
|
019dafd930 | ||
|
|
576a19ed6c | ||
|
|
5fb63f685c | ||
|
|
27bb9d0a90 | ||
|
|
34e875e7ec | ||
|
|
58324d8c09 | ||
|
|
3e31a50b66 | ||
|
|
babbf7a46a | ||
|
|
214bb6828e | ||
|
|
8ebe7be3d9 | ||
|
|
af5b3f3510 | ||
|
|
6670be71f7 | ||
|
|
ee7ccda0ec | ||
|
|
0c11a7740b | ||
|
|
4137f9a996 | ||
|
|
493e8b46fd | ||
|
|
643252f889 | ||
|
|
0acde33c75 | ||
|
|
ca1b5ddb86 | ||
|
|
926b60f6e4 | ||
|
|
ef7e3882a9 | ||
|
|
0519d1ae13 | ||
|
|
7663a52061 | ||
|
|
2174a51ee8 | ||
|
|
fe1258d478 | ||
|
|
a392877e57 | ||
|
|
dd36930f3f | ||
|
|
89ca97371d | ||
|
|
3c4f2a6118 | ||
|
|
c375134c6a | ||
|
|
775fbab597 | ||
|
|
91b9202de9 | ||
|
|
743106f392 | ||
|
|
de62377415 | ||
|
|
0ca3c5f540 | ||
|
|
a266619317 | ||
|
|
cc2626727c | ||
|
|
2fe0213997 | ||
|
|
973fd9b7b1 | ||
|
|
fdc1ad2936 | ||
|
|
cc265bf535 | ||
|
|
7b70da93bc | ||
|
|
b359e9a981 | ||
|
|
78e209d346 | ||
|
|
6a18edd8e2 | ||
|
|
661c27d2c7 | ||
|
|
8cb2038c70 | ||
|
|
ddb29c561c | ||
|
|
27b499841a | ||
|
|
4cf514fb34 | ||
|
|
6a0c6eac99 | ||
|
|
69daf50cde | ||
|
|
d3a849fdb4 | ||
|
|
4265649931 | ||
|
|
db678a124d | ||
|
|
bd76847d86 | ||
|
|
01db559abd |
@@ -69,9 +69,10 @@ class FuncFailed(Exception):
|
||||
class TaskBase(event.Event):
|
||||
"""Base class for task events"""
|
||||
|
||||
def __init__(self, t, d ):
|
||||
def __init__(self, t, logfile, d):
|
||||
self._task = t
|
||||
self._package = d.getVar("PF", True)
|
||||
self.logfile = logfile
|
||||
event.Event.__init__(self)
|
||||
self._message = "recipe %s: task %s: %s" % (d.getVar("PF", True), t, self.getDisplayName())
|
||||
|
||||
@@ -96,16 +97,11 @@ class TaskFailed(TaskBase):
|
||||
"""Task execution failed"""
|
||||
|
||||
def __init__(self, task, logfile, metadata, errprinted = False):
|
||||
self.logfile = logfile
|
||||
self.errprinted = errprinted
|
||||
super(TaskFailed, self).__init__(task, metadata)
|
||||
super(TaskFailed, self).__init__(task, logfile, metadata)
|
||||
|
||||
class TaskFailedSilent(TaskBase):
|
||||
"""Task execution failed (silently)"""
|
||||
def __init__(self, task, logfile, metadata):
|
||||
self.logfile = logfile
|
||||
super(TaskFailedSilent, self).__init__(task, metadata)
|
||||
|
||||
def getDisplayName(self):
|
||||
# Don't need to tell the user it was silent
|
||||
return "Failed"
|
||||
@@ -113,7 +109,7 @@ class TaskFailedSilent(TaskBase):
|
||||
class TaskInvalid(TaskBase):
|
||||
|
||||
def __init__(self, task, metadata):
|
||||
super(TaskInvalid, self).__init__(task, metadata)
|
||||
super(TaskInvalid, self).__init__(task, None, metadata)
|
||||
self._message = "No such task '%s'" % task
|
||||
|
||||
|
||||
@@ -416,7 +412,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
localdata.setVar('BB_LOGFILE', logfn)
|
||||
localdata.setVar('BB_RUNTASK', task)
|
||||
|
||||
event.fire(TaskStarted(task, localdata), localdata)
|
||||
event.fire(TaskStarted(task, logfn, localdata), localdata)
|
||||
try:
|
||||
for func in (prefuncs or '').split():
|
||||
exec_func(func, localdata)
|
||||
@@ -453,7 +449,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
logger.debug(2, "Zero size logfn %s, removing", logfn)
|
||||
bb.utils.remove(logfn)
|
||||
bb.utils.remove(loglink)
|
||||
event.fire(TaskSucceeded(task, localdata), localdata)
|
||||
event.fire(TaskSucceeded(task, logfn, localdata), localdata)
|
||||
|
||||
if not localdata.getVarFlag(task, 'nostamp') and not localdata.getVarFlag(task, 'selfstamp'):
|
||||
make_stamp(task, localdata)
|
||||
|
||||
@@ -117,14 +117,14 @@ class Command:
|
||||
return False
|
||||
|
||||
def finishAsyncCommand(self, msg=None, code=None):
|
||||
if msg:
|
||||
if msg or msg == "":
|
||||
bb.event.fire(CommandFailed(msg), self.cooker.event_data)
|
||||
elif code:
|
||||
bb.event.fire(CommandExit(code), self.cooker.event_data)
|
||||
else:
|
||||
bb.event.fire(CommandCompleted(), self.cooker.event_data)
|
||||
self.currentAsyncCommand = None
|
||||
|
||||
self.cooker.finishcommand()
|
||||
|
||||
class CommandsSync:
|
||||
"""
|
||||
@@ -137,13 +137,13 @@ class CommandsSync:
|
||||
"""
|
||||
Trigger cooker 'shutdown' mode
|
||||
"""
|
||||
command.cooker.shutdown()
|
||||
command.cooker.shutdown(False)
|
||||
|
||||
def stateStop(self, command, params):
|
||||
def stateForceShutdown(self, command, params):
|
||||
"""
|
||||
Stop the cooker
|
||||
"""
|
||||
command.cooker.stop()
|
||||
command.cooker.shutdown(True)
|
||||
|
||||
def getVariable(self, command, params):
|
||||
"""
|
||||
|
||||
@@ -61,7 +61,7 @@ class CollectionError(bb.BBHandledException):
|
||||
"""
|
||||
|
||||
class state:
|
||||
initial, parsing, running, shutdown, stop = range(5)
|
||||
initial, parsing, running, shutdown, forceshutdown, stopped = range(6)
|
||||
|
||||
|
||||
class SkippedPackage:
|
||||
@@ -93,22 +93,6 @@ class BBCooker:
|
||||
|
||||
self.configuration = configuration
|
||||
|
||||
self.caches_array = []
|
||||
|
||||
caches_name_array = ['bb.cache:CoreRecipeInfo'] + configuration.extra_caches
|
||||
|
||||
# At least CoreRecipeInfo will be loaded, so caches_array will never be empty!
|
||||
# This is the entry point, no further check needed!
|
||||
for var in caches_name_array:
|
||||
try:
|
||||
module_name, cache_name = var.split(':')
|
||||
module = __import__(module_name, fromlist=(cache_name,))
|
||||
self.caches_array.append(getattr(module, cache_name))
|
||||
except ImportError as exc:
|
||||
logger.critical("Unable to import extra RecipeInfo '%s' from '%s': %s" % (cache_name, module_name, exc))
|
||||
sys.exit("FATAL: Failed to import extra cache class '%s'." % cache_name)
|
||||
|
||||
self.data = None
|
||||
self.loadConfigurationData()
|
||||
|
||||
# Take a lock so only one copy of bitbake can run against a given build
|
||||
@@ -118,13 +102,6 @@ class BBCooker:
|
||||
if not self.lock:
|
||||
bb.fatal("Only one copy of bitbake should be run against a build directory")
|
||||
|
||||
#
|
||||
# Special updated configuration we use for firing events
|
||||
#
|
||||
self.event_data = bb.data.createCopy(self.data)
|
||||
bb.data.update_data(self.event_data)
|
||||
bb.parse.init_parser(self.event_data)
|
||||
|
||||
# TOSTOP must not be set or our children will hang when they output
|
||||
fd = sys.stdout.fileno()
|
||||
if os.isatty(fd):
|
||||
@@ -141,11 +118,24 @@ class BBCooker:
|
||||
self.parser = None
|
||||
|
||||
def initConfigurationData(self):
|
||||
worker = False
|
||||
if not self.configuration.server_register_idlecallback:
|
||||
worker = True
|
||||
|
||||
self.databuilder = bb.cookerdata.CookerDataBuilder(self.configuration, worker)
|
||||
self.state = state.initial
|
||||
|
||||
self.caches_array = []
|
||||
caches_name_array = ['bb.cache:CoreRecipeInfo'] + self.configuration.extra_caches
|
||||
|
||||
# At least CoreRecipeInfo will be loaded, so caches_array will never be empty!
|
||||
# This is the entry point, no further check needed!
|
||||
for var in caches_name_array:
|
||||
try:
|
||||
module_name, cache_name = var.split(':')
|
||||
module = __import__(module_name, fromlist=(cache_name,))
|
||||
self.caches_array.append(getattr(module, cache_name))
|
||||
except ImportError as exc:
|
||||
logger.critical("Unable to import extra RecipeInfo '%s' from '%s': %s" % (cache_name, module_name, exc))
|
||||
sys.exit("FATAL: Failed to import extra cache class '%s'." % cache_name)
|
||||
|
||||
self.databuilder = bb.cookerdata.CookerDataBuilder(self.configuration, False)
|
||||
self.data = self.databuilder.data
|
||||
|
||||
def enableDataTracking(self):
|
||||
@@ -162,6 +152,13 @@ class BBCooker:
|
||||
self.data = self.databuilder.data
|
||||
self.data_hash = self.databuilder.data_hash
|
||||
|
||||
#
|
||||
# Special updated configuration we use for firing events
|
||||
#
|
||||
self.event_data = bb.data.createCopy(self.data)
|
||||
bb.data.update_data(self.event_data)
|
||||
bb.parse.init_parser(self.event_data)
|
||||
|
||||
def modifyConfigurationVar(self, var, val, default_file, op):
|
||||
if op == "append":
|
||||
self.appendConfigurationVar(var, val, default_file)
|
||||
@@ -348,13 +345,7 @@ class BBCooker:
|
||||
if pkgs_to_build[0] in set(ignore.split()):
|
||||
bb.fatal("%s is in ASSUME_PROVIDED" % pkgs_to_build[0])
|
||||
|
||||
localdata = data.createCopy(self.data)
|
||||
bb.data.update_data(localdata)
|
||||
bb.data.expandKeys(localdata)
|
||||
|
||||
taskdata = bb.taskdata.TaskData(self.configuration.abort)
|
||||
taskdata.add_provider(localdata, self.recipecache, pkgs_to_build[0])
|
||||
taskdata.add_unresolved(localdata, self.recipecache)
|
||||
taskdata, runlist, pkgs_to_build = self.buildTaskData(pkgs_to_build, None, self.configuration.abort)
|
||||
|
||||
targetid = taskdata.getbuild_id(pkgs_to_build[0])
|
||||
fnid = taskdata.build_targets[targetid][0]
|
||||
@@ -386,34 +377,44 @@ class BBCooker:
|
||||
if data.getVarFlag( e, 'python', envdata ):
|
||||
logger.plain("\npython %s () {\n%s}\n", e, data.getVar(e, envdata, 1))
|
||||
|
||||
def prepareTreeData(self, pkgs_to_build, task):
|
||||
|
||||
def buildTaskData(self, pkgs_to_build, task, abort):
|
||||
"""
|
||||
Prepare a runqueue and taskdata object for iteration over pkgs_to_build
|
||||
"""
|
||||
bb.event.fire(bb.event.TreeDataPreparationStarted(), self.data)
|
||||
|
||||
# If we are told to do the None task then query the default task
|
||||
if (task == None):
|
||||
# A task of None means use the default task
|
||||
if task is None:
|
||||
task = self.configuration.cmd
|
||||
|
||||
pkgs_to_build = self.checkPackages(pkgs_to_build)
|
||||
fulltargetlist = self.checkPackages(pkgs_to_build)
|
||||
|
||||
localdata = data.createCopy(self.data)
|
||||
bb.data.update_data(localdata)
|
||||
bb.data.expandKeys(localdata)
|
||||
taskdata = bb.taskdata.TaskData(abort, skiplist=self.skiplist)
|
||||
|
||||
current = 0
|
||||
runlist = []
|
||||
for k in fulltargetlist:
|
||||
taskdata.add_provider(localdata, self.recipecache, k)
|
||||
current += 1
|
||||
runlist.append([k, "do_%s" % task])
|
||||
bb.event.fire(bb.event.TreeDataPreparationProgress(current, len(fulltargetlist)), self.data)
|
||||
taskdata.add_unresolved(localdata, self.recipecache)
|
||||
bb.event.fire(bb.event.TreeDataPreparationCompleted(len(fulltargetlist)), self.data)
|
||||
return taskdata, runlist, fulltargetlist
|
||||
|
||||
def prepareTreeData(self, pkgs_to_build, task):
|
||||
"""
|
||||
Prepare a runqueue and taskdata object for iteration over pkgs_to_build
|
||||
"""
|
||||
|
||||
# We set abort to False here to prevent unbuildable targets raising
|
||||
# an exception when we're just generating data
|
||||
taskdata = bb.taskdata.TaskData(False, skiplist=self.skiplist)
|
||||
taskdata, runlist, pkgs_to_build = self.buildTaskData(pkgs_to_build, task, False)
|
||||
|
||||
runlist = []
|
||||
current = 0
|
||||
for k in pkgs_to_build:
|
||||
taskdata.add_provider(localdata, self.recipecache, k)
|
||||
runlist.append([k, "do_%s" % task])
|
||||
current += 1
|
||||
bb.event.fire(bb.event.TreeDataPreparationProgress(current, len(pkgs_to_build)), self.data)
|
||||
taskdata.add_unresolved(localdata, self.recipecache)
|
||||
bb.event.fire(bb.event.TreeDataPreparationCompleted(len(pkgs_to_build)), self.data)
|
||||
return runlist, taskdata
|
||||
|
||||
######## WARNING : this function requires cache_extra to be enabled ########
|
||||
@@ -1047,7 +1048,7 @@ class BBCooker:
|
||||
|
||||
def buildFileIdle(server, rq, abort):
|
||||
|
||||
if abort or self.state == state.stop:
|
||||
if abort or self.state == state.forceshutdown:
|
||||
rq.finish_runqueue(True)
|
||||
elif self.state == state.shutdown:
|
||||
rq.finish_runqueue(False)
|
||||
@@ -1076,15 +1077,8 @@ class BBCooker:
|
||||
Attempt to build the targets specified
|
||||
"""
|
||||
|
||||
# If we are told to do the NULL task then query the default task
|
||||
if (task == None):
|
||||
task = self.configuration.cmd
|
||||
|
||||
universe = ('universe' in targets)
|
||||
targets = self.checkPackages(targets)
|
||||
|
||||
def buildTargetsIdle(server, rq, abort):
|
||||
if abort or self.state == state.stop:
|
||||
if abort or self.state == state.forceshutdown:
|
||||
rq.finish_runqueue(True)
|
||||
elif self.state == state.shutdown:
|
||||
rq.finish_runqueue(False)
|
||||
@@ -1108,23 +1102,13 @@ class BBCooker:
|
||||
|
||||
self.buildSetVars()
|
||||
|
||||
taskdata, runlist, fulltargetlist = self.buildTaskData(targets, task, self.configuration.abort)
|
||||
|
||||
buildname = self.data.getVar("BUILDNAME")
|
||||
bb.event.fire(bb.event.BuildStarted(buildname, targets), self.data)
|
||||
|
||||
localdata = data.createCopy(self.data)
|
||||
bb.data.update_data(localdata)
|
||||
bb.data.expandKeys(localdata)
|
||||
|
||||
taskdata = bb.taskdata.TaskData(self.configuration.abort, skiplist=self.skiplist)
|
||||
|
||||
runlist = []
|
||||
for k in targets:
|
||||
taskdata.add_provider(localdata, self.recipecache, k)
|
||||
runlist.append([k, "do_%s" % task])
|
||||
taskdata.add_unresolved(localdata, self.recipecache)
|
||||
bb.event.fire(bb.event.BuildStarted(buildname, fulltargetlist), self.data)
|
||||
|
||||
rq = bb.runqueue.RunQueue(self, self.data, self.recipecache, taskdata, runlist)
|
||||
if universe:
|
||||
if 'universe' in targets:
|
||||
rq.rqdata.warn_multi_bb = True
|
||||
|
||||
self.configuration.server_register_idlecallback(buildTargetsIdle, rq)
|
||||
@@ -1173,9 +1157,9 @@ class BBCooker:
|
||||
if self.state == state.running:
|
||||
return
|
||||
|
||||
if self.state in (state.shutdown, state.stop):
|
||||
if self.state in (state.shutdown, state.forceshutdown):
|
||||
self.parser.shutdown(clean=False, force = True)
|
||||
sys.exit(1)
|
||||
raise bb.BBHandledException()
|
||||
|
||||
if self.state != state.parsing:
|
||||
self.parseConfiguration ()
|
||||
@@ -1197,7 +1181,7 @@ class BBCooker:
|
||||
if not self.parser.parse_next():
|
||||
collectlog.debug(1, "parsing complete")
|
||||
if self.parser.error:
|
||||
sys.exit(1)
|
||||
raise bb.BBHandledException()
|
||||
self.show_appends_with_no_recipes()
|
||||
self.handlePrefProviders()
|
||||
self.recipecache.bbfile_priority = self.collection.collection_priorities(self.recipecache.pkg_fn)
|
||||
@@ -1243,18 +1227,19 @@ class BBCooker:
|
||||
prserv.serv.auto_shutdown(self.data)
|
||||
bb.event.fire(CookerExit(), self.event_data)
|
||||
|
||||
def shutdown(self):
|
||||
self.state = state.shutdown
|
||||
def shutdown(self, force = False):
|
||||
if force:
|
||||
self.state = state.forceshutdown
|
||||
else:
|
||||
self.state = state.shutdown
|
||||
|
||||
def stop(self):
|
||||
self.state = state.stop
|
||||
def finishcommand(self):
|
||||
self.state = state.initial
|
||||
|
||||
def initialize(self):
|
||||
self.state = state.initial
|
||||
self.initConfigurationData()
|
||||
|
||||
def reset(self):
|
||||
self.state = state.initial
|
||||
self.loadConfigurationData()
|
||||
|
||||
def server_main(cooker, func, *args):
|
||||
@@ -1482,7 +1467,7 @@ class Feeder(multiprocessing.Process):
|
||||
continue
|
||||
|
||||
class Parser(multiprocessing.Process):
|
||||
def __init__(self, jobs, results, quit, init):
|
||||
def __init__(self, jobs, results, quit, init, profile):
|
||||
self.jobs = jobs
|
||||
self.results = results
|
||||
self.quit = quit
|
||||
@@ -1490,8 +1475,28 @@ class Parser(multiprocessing.Process):
|
||||
multiprocessing.Process.__init__(self)
|
||||
self.context = bb.utils.get_context().copy()
|
||||
self.handlers = bb.event.get_class_handlers().copy()
|
||||
self.profile = profile
|
||||
|
||||
def run(self):
|
||||
|
||||
if not self.profile:
|
||||
self.realrun()
|
||||
return
|
||||
|
||||
try:
|
||||
import cProfile as profile
|
||||
except:
|
||||
import profile
|
||||
prof = profile.Profile()
|
||||
try:
|
||||
profile.Profile.runcall(prof, self.realrun)
|
||||
finally:
|
||||
logfile = "profile-parse-%s.log" % multiprocessing.current_process().name
|
||||
prof.dump_stats(logfile)
|
||||
bb.utils.process_profilelog(logfile)
|
||||
print("Raw profiling information saved to %s and processed statistics to %s.processed" % (logfile, logfile))
|
||||
|
||||
def realrun(self):
|
||||
if self.init:
|
||||
self.init()
|
||||
|
||||
@@ -1592,7 +1597,7 @@ class CookerParser(object):
|
||||
self.feeder = Feeder(self.willparse, self.jobs, self.feeder_quit)
|
||||
self.feeder.start()
|
||||
for i in range(0, self.num_processes):
|
||||
parser = Parser(self.jobs, self.result_queue, self.parser_quit, init)
|
||||
parser = Parser(self.jobs, self.result_queue, self.parser_quit, init, self.cooker.configuration.profile)
|
||||
parser.start()
|
||||
self.processes.append(parser)
|
||||
|
||||
|
||||
@@ -59,7 +59,7 @@ def init():
|
||||
def init_db(parent = None):
|
||||
"""Return a new object representing the Bitbake data,
|
||||
optionally based on an existing object"""
|
||||
if parent:
|
||||
if parent is not None:
|
||||
return parent.createCopy()
|
||||
else:
|
||||
return _dict_type()
|
||||
@@ -148,7 +148,7 @@ def expandKeys(alterdata, readdata = None):
|
||||
readdata = alterdata
|
||||
|
||||
todolist = {}
|
||||
for key in keys(alterdata):
|
||||
for key in alterdata:
|
||||
if not '${' in key:
|
||||
continue
|
||||
|
||||
@@ -285,20 +285,24 @@ def update_data(d):
|
||||
"""Performs final steps upon the datastore, including application of overrides"""
|
||||
d.finalize(parent = True)
|
||||
|
||||
def build_dependencies(key, keys, shelldeps, vardepvals, d):
|
||||
def build_dependencies(key, keys, shelldeps, varflagsexcl, d):
|
||||
deps = set()
|
||||
vardeps = d.getVarFlag(key, "vardeps", True)
|
||||
try:
|
||||
if key[-1] == ']':
|
||||
vf = key[:-1].split('[')
|
||||
value = d.getVarFlag(vf[0], vf[1], False)
|
||||
else:
|
||||
value = d.getVar(key, False)
|
||||
parser = d.expandWithRefs(value, key)
|
||||
deps |= parser.references
|
||||
deps = deps | (keys & parser.execs)
|
||||
return deps, value
|
||||
varflags = d.getVarFlags(key, ["vardeps", "vardepvalue", "vardepsexclude"]) or {}
|
||||
vardeps = varflags.get("vardeps")
|
||||
value = d.getVar(key, False)
|
||||
|
||||
if key in vardepvals:
|
||||
value = d.getVarFlag(key, "vardepvalue", True)
|
||||
elif d.getVarFlag(key, "func"):
|
||||
if d.getVarFlag(key, "python"):
|
||||
if "vardepvalue" in varflags:
|
||||
value = varflags.get("vardepvalue")
|
||||
elif varflags.get("func"):
|
||||
if varflags.get("python"):
|
||||
parsedvar = d.expandWithRefs(value, key)
|
||||
parser = bb.codeparser.PythonParser(key, logger)
|
||||
if parsedvar.value and "\t" in parsedvar.value:
|
||||
@@ -320,19 +324,16 @@ def build_dependencies(key, keys, shelldeps, vardepvals, d):
|
||||
deps = deps | (keys & parser.execs)
|
||||
|
||||
# Add varflags, assuming an exclusion list is set
|
||||
varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS', True)
|
||||
if varflagsexcl:
|
||||
varfdeps = []
|
||||
varflags = d.getVarFlags(key)
|
||||
if varflags:
|
||||
for f in varflags:
|
||||
if f not in varflagsexcl:
|
||||
varfdeps.append('%s[%s]' % (key, f))
|
||||
for f in varflags:
|
||||
if f not in varflagsexcl:
|
||||
varfdeps.append('%s[%s]' % (key, f))
|
||||
if varfdeps:
|
||||
deps |= set(varfdeps)
|
||||
|
||||
deps |= set((vardeps or "").split())
|
||||
deps -= set((d.getVarFlag(key, "vardepsexclude", True) or "").split())
|
||||
deps -= set(varflags.get("vardepsexclude", "").split())
|
||||
except Exception as e:
|
||||
raise bb.data_smart.ExpansionError(key, None, e)
|
||||
return deps, value
|
||||
@@ -341,16 +342,16 @@ def build_dependencies(key, keys, shelldeps, vardepvals, d):
|
||||
|
||||
def generate_dependencies(d):
|
||||
|
||||
keys = set(key for key in d.keys() if not key.startswith("__"))
|
||||
shelldeps = set(key for key in keys if d.getVarFlag(key, "export") and not d.getVarFlag(key, "unexport"))
|
||||
vardepvals = set(key for key in keys if d.getVarFlag(key, "vardepvalue"))
|
||||
keys = set(key for key in d if not key.startswith("__"))
|
||||
shelldeps = set(key for key in d.getVar("__exportlist", False) if d.getVarFlag(key, "export") and not d.getVarFlag(key, "unexport"))
|
||||
varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS', True)
|
||||
|
||||
deps = {}
|
||||
values = {}
|
||||
|
||||
tasklist = d.getVar('__BBTASKS') or []
|
||||
for task in tasklist:
|
||||
deps[task], values[task] = build_dependencies(task, keys, shelldeps, vardepvals, d)
|
||||
deps[task], values[task] = build_dependencies(task, keys, shelldeps, varflagsexcl, d)
|
||||
newdeps = deps[task]
|
||||
seen = set()
|
||||
while newdeps:
|
||||
@@ -359,7 +360,7 @@ def generate_dependencies(d):
|
||||
newdeps = set()
|
||||
for dep in nextdeps:
|
||||
if dep not in deps:
|
||||
deps[dep], values[dep] = build_dependencies(dep, keys, shelldeps, vardepvals, d)
|
||||
deps[dep], values[dep] = build_dependencies(dep, keys, shelldeps, varflagsexcl, d)
|
||||
newdeps |= deps[dep]
|
||||
newdeps -= seen
|
||||
#print "For %s: %s" % (task, str(deps[task]))
|
||||
|
||||
@@ -40,7 +40,7 @@ logger = logging.getLogger("BitBake.Data")
|
||||
|
||||
__setvar_keyword__ = ["_append", "_prepend", "_remove"]
|
||||
__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend|_remove)(_(?P<add>.*))?$')
|
||||
__expand_var_regexp__ = re.compile(r"\${[^{}]+}")
|
||||
__expand_var_regexp__ = re.compile(r"\${[^{}@]+}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@.+?}")
|
||||
|
||||
def infer_caller_details(loginfo, parent = False, varval = True):
|
||||
@@ -94,9 +94,14 @@ class VariableParse:
|
||||
if self.varname and key:
|
||||
if self.varname == key:
|
||||
raise Exception("variable %s references itself!" % self.varname)
|
||||
if key in self.d.expand_cache:
|
||||
varparse = self.d.expand_cache[key]
|
||||
self.references |= varparse.references
|
||||
self.execs |= varparse.execs
|
||||
return varparse.value
|
||||
var = self.d.getVar(key, True)
|
||||
self.references.add(key)
|
||||
if var is not None:
|
||||
self.references.add(key)
|
||||
return var
|
||||
else:
|
||||
return match.group()
|
||||
@@ -573,10 +578,17 @@ class DataSmart(MutableMapping):
|
||||
if flag == "defaultval" and '_' in var:
|
||||
self._setvar_update_overrides(var)
|
||||
|
||||
if flag == "unexport" or flag == "export":
|
||||
if not "__exportlist" in self.dict:
|
||||
self._makeShadowCopy("__exportlist")
|
||||
if not "_content" in self.dict["__exportlist"]:
|
||||
self.dict["__exportlist"]["_content"] = set()
|
||||
self.dict["__exportlist"]["_content"].add(var)
|
||||
|
||||
def getVarFlag(self, var, flag, expand=False, noweakdefault=False):
|
||||
local_var = self._findVar(var)
|
||||
value = None
|
||||
if local_var:
|
||||
if local_var is not None:
|
||||
if flag in local_var:
|
||||
value = copy.copy(local_var[flag])
|
||||
elif flag == "_content" and "defaultval" in local_var and not noweakdefault:
|
||||
@@ -586,8 +598,10 @@ class DataSmart(MutableMapping):
|
||||
cachename = None
|
||||
if flag == "_content":
|
||||
cachename = var
|
||||
else:
|
||||
cachename = var + "[" + flag + "]"
|
||||
value = self.expand(value, cachename)
|
||||
if value and flag == "_content" and local_var and "_removeactive" in local_var:
|
||||
if value is not None and flag == "_content" and local_var is not None and "_removeactive" in local_var:
|
||||
filtered = filter(lambda v: v not in local_var["_removeactive"],
|
||||
value.split(" "))
|
||||
value = " ".join(filtered)
|
||||
@@ -635,16 +649,17 @@ class DataSmart(MutableMapping):
|
||||
self.varhistory.record(**loginfo)
|
||||
self.dict[var][i] = flags[i]
|
||||
|
||||
def getVarFlags(self, var):
|
||||
def getVarFlags(self, var, expand = False, internalflags=False):
|
||||
local_var = self._findVar(var)
|
||||
flags = {}
|
||||
|
||||
if local_var:
|
||||
for i in local_var:
|
||||
if i.startswith("_"):
|
||||
if i.startswith("_") and not internalflags:
|
||||
continue
|
||||
flags[i] = local_var[i]
|
||||
|
||||
if expand and i in expand:
|
||||
flags[i] = self.expand(flags[i], var + "[" + i + "]")
|
||||
if len(flags) == 0:
|
||||
return None
|
||||
return flags
|
||||
@@ -750,13 +765,16 @@ class DataSmart(MutableMapping):
|
||||
for key in keys:
|
||||
if key in config_whitelist:
|
||||
continue
|
||||
|
||||
value = d.getVar(key, False) or ""
|
||||
data.update({key:value})
|
||||
|
||||
varflags = d.getVarFlags(key)
|
||||
varflags = d.getVarFlags(key, internalflags = True)
|
||||
if not varflags:
|
||||
continue
|
||||
for f in varflags:
|
||||
if f == "_content":
|
||||
continue
|
||||
data.update({'%s[%s]' % (key, f):varflags[f]})
|
||||
|
||||
for key in ["__BBTASKS", "__BBANONFUNCS", "__BBHANDLERS"]:
|
||||
|
||||
@@ -1325,9 +1325,10 @@ class RunQueueExecuteTasks(RunQueueExecute):
|
||||
if self.rqdata.taskData.abort:
|
||||
self.rq.state = runQueueCleanUp
|
||||
|
||||
def task_skip(self, task):
|
||||
def task_skip(self, task, reason):
|
||||
self.runq_running[task] = 1
|
||||
self.runq_buildable[task] = 1
|
||||
bb.event.fire(runQueueTaskSkipped(task, self.stats, self.rq, reason), self.cfgData)
|
||||
self.task_completeoutright(task)
|
||||
self.stats.taskCompleted()
|
||||
self.stats.taskSkipped()
|
||||
@@ -1352,13 +1353,13 @@ class RunQueueExecuteTasks(RunQueueExecute):
|
||||
if task in self.rq.scenequeue_covered:
|
||||
logger.debug(2, "Setscene covered task %s (%s)", task,
|
||||
self.rqdata.get_user_idstring(task))
|
||||
self.task_skip(task)
|
||||
self.task_skip(task, "covered")
|
||||
return True
|
||||
|
||||
if self.rq.check_stamp_task(task, taskname, cache=self.stampcache):
|
||||
logger.debug(2, "Stamp current task %s (%s)", task,
|
||||
self.rqdata.get_user_idstring(task))
|
||||
self.task_skip(task)
|
||||
self.task_skip(task, "existing")
|
||||
return True
|
||||
|
||||
taskdep = self.rqdata.dataCache.task_deps[fn]
|
||||
@@ -1834,6 +1835,14 @@ class sceneQueueTaskCompleted(sceneQueueEvent):
|
||||
Event notifing a setscene task completed
|
||||
"""
|
||||
|
||||
class runQueueTaskSkipped(runQueueEvent):
|
||||
"""
|
||||
Event notifing a task was skipped
|
||||
"""
|
||||
def __init__(self, task, stats, rq, reason):
|
||||
runQueueEvent.__init__(self, task, stats, rq)
|
||||
self.reason = reason
|
||||
|
||||
class runQueuePipe():
|
||||
"""
|
||||
Abstraction for a pipe between a worker thread and the server
|
||||
|
||||
@@ -113,7 +113,7 @@ class ProcessServer(Process, BaseImplServer):
|
||||
self.event_queue.close()
|
||||
bb.event.unregister_UIHhandler(self.event_handle.value)
|
||||
self.command_channel.close()
|
||||
self.cooker.stop()
|
||||
self.cooker.shutdown(True)
|
||||
self.idle_commands(.1)
|
||||
|
||||
def idle_commands(self, delay, fds = []):
|
||||
|
||||
@@ -169,51 +169,6 @@ class BitBakeXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
|
||||
self.end_headers()
|
||||
self.wfile.write(response)
|
||||
|
||||
class BitBakeUIEventServer(threading.Thread):
|
||||
class EventAdapter():
|
||||
"""
|
||||
Adapter to wrap our event queue since the caller (bb.event) expects to
|
||||
call a send() method, but our actual queue only has put()
|
||||
"""
|
||||
def __init__(self, notify):
|
||||
self.queue = []
|
||||
self.notify = notify
|
||||
self.qlock = threading.Lock()
|
||||
|
||||
def send(self, event):
|
||||
self.qlock.acquire()
|
||||
self.queue.append(event)
|
||||
self.qlock.release()
|
||||
self.notify.set()
|
||||
|
||||
def get(self):
|
||||
self.qlock.acquire()
|
||||
if len(self.queue) == 0:
|
||||
self.qlock.release()
|
||||
return None
|
||||
e = self.queue.pop(0)
|
||||
if len(self.queue) == 0:
|
||||
self.notify.clear()
|
||||
self.qlock.release()
|
||||
return e
|
||||
|
||||
def __init__(self, connection):
|
||||
self.connection = connection
|
||||
self.notify = threading.Event()
|
||||
self.event = BitBakeUIEventServer.EventAdapter(self.notify)
|
||||
self.quit = False
|
||||
threading.Thread.__init__(self)
|
||||
|
||||
def terminateServer(self):
|
||||
self.quit = True
|
||||
|
||||
def run(self):
|
||||
while not self.quit:
|
||||
self.notify.wait(0.1)
|
||||
evt = self.event.get()
|
||||
if evt:
|
||||
self.connection.event.sendpickle(pickle.dumps(evt))
|
||||
|
||||
|
||||
class XMLRPCProxyServer(BaseImplServer):
|
||||
""" not a real working server, but a stub for a proxy server connection
|
||||
|
||||
@@ -91,8 +91,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
basehash = {}
|
||||
|
||||
for task in tasklist:
|
||||
data = d.getVar(task, False)
|
||||
lookupcache[task] = data
|
||||
data = lookupcache[task]
|
||||
|
||||
if data is None:
|
||||
bb.error("Task %s from %s seems to be empty?!" % (task, fn))
|
||||
@@ -115,16 +114,8 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
alldeps = sorted(seen)
|
||||
for dep in alldeps:
|
||||
data = data + dep
|
||||
if dep in lookupcache:
|
||||
var = lookupcache[dep]
|
||||
elif dep[-1] == ']':
|
||||
vf = dep[:-1].split('[')
|
||||
var = d.getVarFlag(vf[0], vf[1], False)
|
||||
lookupcache[dep] = var
|
||||
else:
|
||||
var = d.getVar(dep, False)
|
||||
lookupcache[dep] = var
|
||||
if var:
|
||||
var = lookupcache[dep]
|
||||
if var is not None:
|
||||
data = data + str(var)
|
||||
self.basehash[fn + "." + task] = hashlib.md5(data).hexdigest()
|
||||
taskdeps[task] = alldeps
|
||||
|
||||
@@ -440,12 +440,12 @@ class HobHandler(gobject.GObject):
|
||||
self.building = False
|
||||
|
||||
def cancel_parse(self):
|
||||
self.runCommand(["stateStop"])
|
||||
self.runCommand(["stateForceShutdown"])
|
||||
|
||||
def cancel_build(self, force=False):
|
||||
if force:
|
||||
# Force the cooker to stop as quickly as possible
|
||||
self.runCommand(["stateStop"])
|
||||
self.runCommand(["stateForceShutdown"])
|
||||
else:
|
||||
# Wait for tasks to complete before shutting down, this helps
|
||||
# leave the workdir in a usable state
|
||||
|
||||
@@ -314,7 +314,7 @@ def main(server, eventHandler, params):
|
||||
break
|
||||
if shutdown == 1:
|
||||
print("\nSecond Keyboard Interrupt, stopping...\n")
|
||||
_, error = server.runCommand(["stateStop"])
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
if error:
|
||||
print('Unable to cleanly stop: %s' % error)
|
||||
if shutdown == 0:
|
||||
|
||||
@@ -117,5 +117,5 @@ def main (server, eventHandler, params):
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
finally:
|
||||
server.runCommand(["stateStop"])
|
||||
server.runCommand(["stateForceShutdown"])
|
||||
|
||||
|
||||
@@ -405,8 +405,9 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
|
||||
if isinstance(event, bb.command.CommandFailed):
|
||||
return_value = event.exitcode
|
||||
errors = errors + 1
|
||||
logger.error("Command execution failed: %s", event.error)
|
||||
if event.error:
|
||||
errors = errors + 1
|
||||
logger.error("Command execution failed: %s", event.error)
|
||||
main.shutdown = 2
|
||||
continue
|
||||
if isinstance(event, bb.command.CommandExit):
|
||||
@@ -499,7 +500,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
main.shutdown = 2
|
||||
if not params.observe_only and main.shutdown == 1:
|
||||
print("\nSecond Keyboard Interrupt, stopping...\n")
|
||||
_, error = server.runCommand(["stateStop"])
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
if error:
|
||||
logger.error("Unable to cleanly stop: %s" % error)
|
||||
if not params.observe_only and main.shutdown == 0:
|
||||
@@ -520,7 +521,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
if warnings:
|
||||
summary += pluralise("\nSummary: There was %s WARNING message shown.",
|
||||
"\nSummary: There were %s WARNING messages shown.", warnings)
|
||||
if return_value:
|
||||
if return_value and errors:
|
||||
summary += pluralise("\nSummary: There was %s ERROR message shown, returning a non-zero exit code.",
|
||||
"\nSummary: There were %s ERROR messages shown, returning a non-zero exit code.", errors)
|
||||
if summary:
|
||||
|
||||
@@ -350,7 +350,7 @@ class NCursesUI:
|
||||
exitflag = True
|
||||
if shutdown == 1:
|
||||
mw.appendText("Second Keyboard Interrupt, stopping...\n")
|
||||
_, error = server.runCommand(["stateStop"])
|
||||
_, error = server.runCommand(["stateForceShutdown"])
|
||||
if error:
|
||||
print("Unable to cleanly stop: %s" % error)
|
||||
if shutdown == 0:
|
||||
|
||||
@@ -91,7 +91,9 @@
|
||||
<para>
|
||||
If you use BitBake to generate the ADT Installer tarball, you must
|
||||
<filename>source</filename> the environment setup script
|
||||
(<ulink url='&YOCTO_DOCS_REF_URL;#structure-core-script'><filename>&OE_INIT_FILE;</filename></ulink>)
|
||||
(<ulink url='&YOCTO_DOCS_REF_URL;#structure-core-script'><filename>&OE_INIT_FILE;</filename></ulink>
|
||||
or
|
||||
<ulink url='&YOCTO_DOCS_REF_URL;#structure-memres-core-script'><filename>oe-init-build-env-memres</filename></ulink>)
|
||||
located in the Source Directory before running the
|
||||
BitBake command that creates the tarball.
|
||||
</para>
|
||||
@@ -549,8 +551,12 @@
|
||||
|
||||
<para>
|
||||
Remember, before using any <filename>bitbake</filename> command, you
|
||||
must source the <filename>&OE_INIT_PATH;</filename> script located in
|
||||
the Source Directory and you must make sure your
|
||||
must source the build environment setup script
|
||||
(i.e.
|
||||
<ulink url='&YOCTO_DOCS_REF_URL;#structure-core-script'><filename>&OE_INIT_FILE;</filename></ulink>
|
||||
or
|
||||
<ulink url='&YOCTO_DOCS_REF_URL;#structure-memres-core-script'><filename>oe-init-build-env-memres</filename></ulink>)
|
||||
located in the Source Directory and you must make sure your
|
||||
<filename>conf/local.conf</filename> variables are correct.
|
||||
In particular, you need to be sure the
|
||||
<ulink url='&YOCTO_DOCS_REF_URL;#var-MACHINE'><filename>MACHINE</filename></ulink>
|
||||
|
||||
@@ -1812,6 +1812,8 @@
|
||||
environment, you must build the tool using BitBake.
|
||||
Thus, the environment must be set up using the
|
||||
<ulink url='&YOCTO_DOCS_REF_URL;#structure-core-script'><filename>&OE_INIT_FILE;</filename></ulink>
|
||||
or
|
||||
<ulink url='&YOCTO_DOCS_REF_URL;#structure-memres-core-script'><filename>oe-init-build-env-memres</filename></ulink>
|
||||
script found in the
|
||||
<link linkend='build-directory'>Build Directory</link>.
|
||||
The following commands build and invoke <filename>menuconfig</filename> assuming the
|
||||
@@ -2924,6 +2926,8 @@
|
||||
<para>
|
||||
This section describes a few tasks that involve packages:
|
||||
<itemizedlist>
|
||||
<listitem><para>Excluding packages from an image
|
||||
</para></listitem>
|
||||
<listitem><para>Incrementing a package revision number
|
||||
</para></listitem>
|
||||
<listitem><para>Handling a package name alias
|
||||
@@ -2938,6 +2942,50 @@
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
<section id='excluding-packages-from-an-image'>
|
||||
<title>Excluding Packages from an Image</title>
|
||||
|
||||
<para>
|
||||
You might find it necessary to prevent specific packages
|
||||
from being installed into an image.
|
||||
If so, you can use several variables to direct the build
|
||||
system to essentially ignore installing recommended packages
|
||||
or to not install a package at all.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The following list introduces variables you can use to
|
||||
prevent packages from being installed into your image.
|
||||
Each of these variables only works with IPK and RPM
|
||||
package types.
|
||||
Support for Debian packages does not exist.
|
||||
Also, you can use these variables from your
|
||||
<filename>local.conf</filename> file or attach them to a
|
||||
specific image recipe by using a recipe name override.
|
||||
For more detail on the variables, see the descriptions in the
|
||||
Yocto Project Reference Manual's glossary chapter.
|
||||
<itemizedlist>
|
||||
<listitem><para><ulink url='&YOCTO_DOCS_REF_URL;#var-BAD_RECOMMENDATIONS'><filename>BAD_RECOMMENDATIONS</filename></ulink>:
|
||||
Use this variable to specify "recommended-only"
|
||||
packages that you do not want installed.
|
||||
</para></listitem>
|
||||
<listitem><para><ulink url='&YOCTO_DOCS_REF_URL;#var-NO_RECOMMENDATIONS'><filename>NO_RECOMMENDATIONS</filename></ulink>:
|
||||
Use this variable to prevent all "recommended-only"
|
||||
packages from being installed.
|
||||
</para></listitem>
|
||||
<listitem><para><ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_EXCLUDE'><filename>PACKAGE_EXCLUDE</filename></ulink>:
|
||||
Use this variable to prevent specific packages from
|
||||
being installed regardless of whether they are
|
||||
"recommended-only" or not.
|
||||
You need to realize that the build process could
|
||||
fail with an error when you
|
||||
prevent the installation of a package whose presence
|
||||
is required by an installed package.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='incrementing-a-package-revision-number'>
|
||||
<title>Incrementing a Package Revision Number</title>
|
||||
|
||||
@@ -4980,15 +5028,15 @@
|
||||
BBFILES ?= ""
|
||||
|
||||
BBLAYERS ?= " \
|
||||
##COREBASE##/meta \
|
||||
##COREBASE##/meta-yocto \
|
||||
##COREBASE##/meta-yocto-bsp \
|
||||
##COREBASE##/meta-mylayer \
|
||||
##OEROOT##/meta \
|
||||
##OEROOT##/meta-yocto \
|
||||
##OEROOT##/meta-yocto-bsp \
|
||||
##OEROOT##/meta-mylayer \
|
||||
"
|
||||
|
||||
BBLAYERS_NON_REMOVABLE ?= " \
|
||||
##COREBASE##/meta \
|
||||
##COREBASE##/meta-yocto \
|
||||
##OEROOT##/meta \
|
||||
##OEROOT##/meta-yocto \
|
||||
"
|
||||
</literallayout>
|
||||
Creating and providing an archive of the
|
||||
|
||||
@@ -156,7 +156,7 @@
|
||||
"<ulink url='&YOCTO_DOCS_BSP_URL;#bsp-layers'>BSP Layers</ulink>" section in the
|
||||
Yocto Project Board Support Package (BSP) Developer's Guide.</para>
|
||||
<note>Four BSPs exist that are part of the
|
||||
Yocto Project release: <filename>atom-pc</filename>, <filename>beagleboard</filename>,
|
||||
Yocto Project release: <filename>genericx86</filename>, <filename>beagleboard</filename>,
|
||||
<filename>mpc8315e</filename>, and <filename>routerstationpro</filename>.
|
||||
The recipes and configurations for these four BSPs are located and dispersed
|
||||
within the <link linkend='source-directory'>Source Directory</link>.
|
||||
|
||||
@@ -509,7 +509,9 @@
|
||||
This term refers to the area used by the OpenEmbedded build system for builds.
|
||||
The area is created when you <filename>source</filename> the setup
|
||||
environment script that is found in the Source Directory
|
||||
(i.e. <ulink url='&YOCTO_DOCS_REF_URL;#structure-core-script'><filename>&OE_INIT_FILE;</filename></ulink>).
|
||||
(i.e. <ulink url='&YOCTO_DOCS_REF_URL;#structure-core-script'><filename>&OE_INIT_FILE;</filename></ulink>
|
||||
or
|
||||
<ulink url='&YOCTO_DOCS_REF_URL;#structure-memres-core-script'><filename>oe-init-build-env-memres</filename></ulink>).
|
||||
The <ulink url='&YOCTO_DOCS_REF_URL;#var-TOPDIR'><filename>TOPDIR</filename></ulink>
|
||||
variable points to the Build Directory.</para>
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 66 KiB After Width: | Height: | Size: 62 KiB |
|
Before Width: | Height: | Size: 45 KiB After Width: | Height: | Size: 45 KiB |
|
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 20 KiB |
|
Before Width: | Height: | Size: 45 KiB After Width: | Height: | Size: 45 KiB |
|
Before Width: | Height: | Size: 27 KiB After Width: | Height: | Size: 27 KiB |
|
Before Width: | Height: | Size: 40 KiB After Width: | Height: | Size: 42 KiB |
|
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 20 KiB |
|
Before Width: | Height: | Size: 37 KiB After Width: | Height: | Size: 38 KiB |
|
Before Width: | Height: | Size: 40 KiB After Width: | Height: | Size: 39 KiB |
|
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 23 KiB |
1055
documentation/ref-manual/closer-look.xml
Normal file
@@ -682,9 +682,11 @@
|
||||
<para>
|
||||
Yes - you can easily do this.
|
||||
When you use BitBake to build an image, all the build output
|
||||
goes into the directory created when you source the
|
||||
goes into the directory created when you run the
|
||||
build environment setup script (i.e.
|
||||
<link linkend='structure-core-script'><filename>&OE_INIT_FILE;</filename></link>
|
||||
setup script.
|
||||
or
|
||||
<link linkend='structure-memres-core-script'><filename>oe-init-build-env-memres</filename></link>).
|
||||
By default, this <ulink url='&YOCTO_DOCS_DEV_URL;#build-directory'>Build Directory</ulink>
|
||||
is named <filename>build</filename> but can be named
|
||||
anything you want.
|
||||
|
||||
|
Before Width: | Height: | Size: 66 KiB After Width: | Height: | Size: 62 KiB |
|
Before Width: | Height: | Size: 45 KiB After Width: | Height: | Size: 45 KiB |
|
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 20 KiB |
|
Before Width: | Height: | Size: 45 KiB After Width: | Height: | Size: 45 KiB |
|
Before Width: | Height: | Size: 27 KiB After Width: | Height: | Size: 27 KiB |
|
Before Width: | Height: | Size: 40 KiB After Width: | Height: | Size: 42 KiB |
|
Before Width: | Height: | Size: 17 KiB After Width: | Height: | Size: 20 KiB |
|
Before Width: | Height: | Size: 37 KiB After Width: | Height: | Size: 38 KiB |
|
Before Width: | Height: | Size: 40 KiB After Width: | Height: | Size: 39 KiB |
|
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 23 KiB |
@@ -230,6 +230,30 @@
|
||||
</para>
|
||||
</section>
|
||||
</section>
|
||||
|
||||
<section id='1.3-linux-kernel-naming'>
|
||||
<title>Linux Kernel Naming</title>
|
||||
|
||||
<para>
|
||||
The naming scheme for kernel output binaries has been changed to
|
||||
now include
|
||||
<link linkend='var-PE'><filename>PE</filename></link> as part of the
|
||||
filename:
|
||||
<literallayout class='monospaced'>
|
||||
KERNEL_IMAGE_BASE_NAME ?= "${KERNEL_IMAGETYPE}-${PE}-${PV}-${PR}-${MACHINE}-${DATETIME}"
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Because the <filename>PE</filename> variable is not set by default,
|
||||
these binary files could result with names that include two dash
|
||||
characters.
|
||||
Here is an example:
|
||||
<literallayout class='monospaced'>
|
||||
bzImage--3.10.9+git0+cd502a8814_7144bcc4b8-r0-qemux86-64-20130830085431.bin
|
||||
</literallayout>
|
||||
</para>
|
||||
</section>
|
||||
</section>
|
||||
|
||||
<section id='moving-to-the-yocto-project-1.4-release'>
|
||||
|
||||
@@ -322,10 +322,10 @@
|
||||
$ bitbake --help
|
||||
Usage: bitbake [options] [package ...]
|
||||
|
||||
Executes the specified task (default is 'build') for a given set of BitBake files.
|
||||
It expects that BBFILES is defined, which is a space separated list of files to
|
||||
be executed. BBFILES does support wildcards.
|
||||
Default BBFILES are the .bb files in the current directory.
|
||||
Executes the specified task (default is 'build') for a given set of BitBake files.
|
||||
It expects that BBFILES is defined, which is a space separated list of files to
|
||||
be executed. BBFILES does support wildcards.
|
||||
Default BBFILES are the .bb files in the current directory.
|
||||
|
||||
Options:
|
||||
--version show program's version number and exit
|
||||
@@ -348,6 +348,10 @@ Options:
|
||||
what you are doing). Depending on the base.bbclass a
|
||||
listtasks tasks is defined and will show available
|
||||
tasks
|
||||
-C INVALIDATE_STAMP, --clear-stamp=INVALIDATE_STAMP
|
||||
Invalidate the stamp for the specified cmd such as
|
||||
'compile' and run the default task for the specified
|
||||
target(s)
|
||||
-r PREFILE, --read=PREFILE
|
||||
read the specified file before bitbake.conf
|
||||
-R POSTFILE, --postread=POSTFILE
|
||||
@@ -360,11 +364,12 @@ Options:
|
||||
don't execute, just dump out the signature
|
||||
construction information
|
||||
-p, --parse-only quit after parsing the BB files (developers only)
|
||||
-s, --show-versions show current and preferred versions of all packages
|
||||
-s, --show-versions show current and preferred versions of all recipes
|
||||
-e, --environment show the global or per-package environment (this is
|
||||
what used to be bbread)
|
||||
-g, --graphviz emit the dependency trees of the specified packages in
|
||||
the dot syntax
|
||||
the dot syntax, and the pn-buildlist to show the build
|
||||
list
|
||||
-I EXTRA_ASSUME_PROVIDED, --ignore-deps=EXTRA_ASSUME_PROVIDED
|
||||
Assume these dependencies don't exist and are already
|
||||
provided (equivalent to ASSUME_PROVIDED). Useful to
|
||||
@@ -374,9 +379,17 @@ Options:
|
||||
-P, --profile profile the command and print a report
|
||||
-u UI, --ui=UI userinterface to use
|
||||
-t SERVERTYPE, --servertype=SERVERTYPE
|
||||
Choose which server to use, none, process or xmlrpc
|
||||
Choose which server to use, process or xmlrpc
|
||||
--revisions-changed Set the exit code depending on whether upstream
|
||||
floating revisions have changed or not
|
||||
--server-only Run bitbake without UI, the frontend can connect with
|
||||
bitbake server itself
|
||||
-B BIND, --bind=BIND The name/address for the bitbake server to bind to
|
||||
--no-setscene Do not run any setscene tasks, forces builds
|
||||
--remote-server=REMOTE_SERVER
|
||||
Connect to the specified server
|
||||
-m, --kill-server Terminate the remote server
|
||||
--observe-only Connect to a server as an observing-only client
|
||||
</screen>
|
||||
</section>
|
||||
|
||||
|
||||
@@ -99,6 +99,8 @@
|
||||
|
||||
<xi:include href="usingpoky.xml"/>
|
||||
|
||||
<xi:include href="closer-look.xml"/>
|
||||
|
||||
<xi:include href="technical-details.xml"/>
|
||||
|
||||
<xi:include href="migration.xml"/>
|
||||
|
||||
@@ -74,7 +74,11 @@
|
||||
the source tree is combined with the output.
|
||||
The <ulink url='&YOCTO_DOCS_DEV_URL;#build-directory'>Build Directory</ulink>
|
||||
is created initially when you <filename>source</filename>
|
||||
the OpenEmbedded build environment setup script <filename>&OE_INIT_FILE;</filename>.
|
||||
the OpenEmbedded build environment setup script
|
||||
(i.e.
|
||||
<link linkend='structure-core-script'><filename>&OE_INIT_FILE;</filename></link>
|
||||
or
|
||||
<link linkend='structure-memres-core-script'><filename>oe-init-build-env-memres</filename></link>).
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@@ -85,8 +89,10 @@
|
||||
the setup script.
|
||||
For information on separating output from your local
|
||||
Source Directory files, see the
|
||||
"<link linkend='structure-core-script'><filename>&OE_INIT_FILE;</filename></link>"
|
||||
section.
|
||||
"<link linkend='structure-core-script'><filename>&OE_INIT_FILE;</filename></link>
|
||||
and
|
||||
"<link linkend='structure-memres-core-script'><filename>oe-init-build-env-memres</filename></link>"
|
||||
sections.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
@@ -143,7 +149,7 @@
|
||||
which is a Yocto Project build user interface.
|
||||
For more information on the Hob, see the
|
||||
<ulink url='&YOCTO_HOME_URL;/tools-resources/projects/hob'>Hob Project</ulink>
|
||||
webpage.
|
||||
web page.
|
||||
</para>
|
||||
</section>
|
||||
|
||||
@@ -162,7 +168,9 @@
|
||||
This directory contains various integration scripts that implement
|
||||
extra functionality in the Yocto Project environment (e.g. QEMU scripts).
|
||||
The <link linkend="structure-core-script"><filename>&OE_INIT_FILE;</filename></link>
|
||||
script appends this directory to the shell's
|
||||
and
|
||||
<link linkend='structure-memres-core-script'><filename>oe-init-build-env-memres</filename></link>
|
||||
scripts append this directory to the shell's
|
||||
<filename>PATH</filename> environment variable.
|
||||
</para>
|
||||
|
||||
@@ -177,11 +185,19 @@
|
||||
<title><filename>&OE_INIT_FILE;</filename></title>
|
||||
|
||||
<para>
|
||||
This script sets up the OpenEmbedded build environment.
|
||||
This script is one of two scripts that set up the OpenEmbedded build
|
||||
environment.
|
||||
For information on the other script, see the
|
||||
"<link linkend='structure-memres-core-script'><filename>oe-init-build-env-memres</filename></link>"
|
||||
section.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Running this script with the <filename>source</filename> command in
|
||||
a shell makes changes to <filename>PATH</filename> and sets other
|
||||
core BitBake variables based on the current working directory.
|
||||
You need to run this script before running BitBake commands.
|
||||
You need to run an environment setup script before running BitBake
|
||||
commands.
|
||||
The script uses other scripts within the
|
||||
<filename>scripts</filename> directory to do the bulk of the work.
|
||||
</para>
|
||||
@@ -191,7 +207,7 @@
|
||||
<ulink url='&YOCTO_DOCS_DEV_URL;#build-directory'>Build Directory</ulink>
|
||||
argument creates the <filename>build</filename> directory.
|
||||
If you provide a Build Directory argument when you
|
||||
<filename>source</filename> the script, you direct OpenEmbedded
|
||||
<filename>source</filename> the script, you direct the OpenEmbedded
|
||||
build system to create a Build Directory of your choice.
|
||||
For example, the following command creates a Build Directory named
|
||||
<filename>mybuilds</filename> that is outside of the
|
||||
@@ -211,6 +227,81 @@
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='structure-memres-core-script'>
|
||||
<title><filename>oe-init-build-env-memres</filename></title>
|
||||
|
||||
<para>
|
||||
This script is one of two scripts that set up the OpenEmbedded build
|
||||
environment.
|
||||
Setting up the environment with this script uses a
|
||||
memory-resident BitBake.
|
||||
For information on the other setup script, see the
|
||||
"<link linkend='structure-core-script'><filename>&OE_INIT_FILE;</filename></link>"
|
||||
section.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Memory-resident BitBake resides in memory until you specifically
|
||||
remove it using the following BitBake command:
|
||||
<literallayout class='monospaced'>
|
||||
$ bitbake -m
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Running this script with the <filename>source</filename> command in
|
||||
a shell makes changes to <filename>PATH</filename> and sets other
|
||||
core BitBake variables based on the current working directory.
|
||||
One of these variables is the
|
||||
<link linkend='var-BBSERVER'><filename>BBSERVER</filename></link>
|
||||
variable, which allows the OpenEmbedded build system to locate
|
||||
the server that is running BitBake.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You need to run an environment setup script before running BitBake
|
||||
commands.
|
||||
Following is the script syntax:
|
||||
<literallayout class='monospaced'>
|
||||
$ source oe-init-build-env-memres <port_number> <build_dir>
|
||||
</literallayout>
|
||||
The script uses other scripts within the
|
||||
<filename>scripts</filename> directory to do the bulk of the work.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If you do not provide a port number with the script, the default
|
||||
port "12345" is used.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
By default, running this script without a
|
||||
<ulink url='&YOCTO_DOCS_DEV_URL;#build-directory'>Build Directory</ulink>
|
||||
argument creates the <filename>build</filename> directory.
|
||||
If you provide a Build Directory argument when you
|
||||
<filename>source</filename> the script, you direct the OpenEmbedded
|
||||
build system to create a Build Directory of your choice.
|
||||
For example, the following command uses the default port number
|
||||
"12345" and creates a Build Directory named
|
||||
<filename>mybuilds</filename> that is outside of the
|
||||
<ulink url='&YOCTO_DOCS_DEV_URL;#source-directory'>Source Directory</ulink>:
|
||||
<literallayout class='monospaced'>
|
||||
$ source oe-init-build-env-memres ~/mybuilds
|
||||
</literallayout>
|
||||
<note>
|
||||
The OpenEmbedded build system does not support file or
|
||||
directory names that contain spaces.
|
||||
If you attempt to run the
|
||||
<filename>oe-init-build-env-memres</filename> script
|
||||
from a Source Directory that contains spaces in either the
|
||||
filenames or directory names, the script returns an error
|
||||
indicating no such file or directory.
|
||||
Be sure to use a Source Directory free of names containing
|
||||
spaces.
|
||||
</note>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='structure-basic-top-level'>
|
||||
<title><filename>LICENSE, README, and README.hardware</filename></title>
|
||||
|
||||
@@ -243,47 +334,123 @@
|
||||
<title><filename>build/conf/local.conf</filename></title>
|
||||
|
||||
<para>
|
||||
This file contains all the local user configuration for your build environment.
|
||||
If there is no <filename>local.conf</filename> present, it is created from
|
||||
<filename>local.conf.sample</filename>.
|
||||
The <filename>local.conf</filename> file contains documentation on the various configuration options.
|
||||
Any variable set here overrides any variable set elsewhere within the environment unless
|
||||
that variable is hard-coded within a file (e.g. by using '=' instead of '?=').
|
||||
Some variables are hard-coded for various reasons but these variables are
|
||||
relatively rare.
|
||||
This configuration file contains all the local user configurations
|
||||
for your build environment.
|
||||
The <filename>local.conf</filename> file contains documentation on
|
||||
the various configuration options.
|
||||
Any variable set here overrides any variable set elsewhere within
|
||||
the environment unless that variable is hard-coded within a file
|
||||
(e.g. by using '=' instead of '?=').
|
||||
Some variables are hard-coded for various reasons but these
|
||||
variables are relatively rare.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Edit this file to set the <filename><link linkend='var-MACHINE'>MACHINE</link></filename>
|
||||
Edit this file to set the
|
||||
<filename><link linkend='var-MACHINE'>MACHINE</link></filename>
|
||||
for which you want to build, which package types you wish to use
|
||||
(<link linkend='var-PACKAGE_CLASSES'><filename>PACKAGE_CLASSES</filename></link>),
|
||||
the location from which you want to downloaded files
|
||||
(<filename><link linkend='var-DL_DIR'>DL_DIR</link></filename>),
|
||||
and how you want your host machine to use resources
|
||||
(<link linkend='var-BB_NUMBER_THREADS'><filename>BB_NUMBER_THREADS</filename></link> and
|
||||
(<link linkend='var-BB_NUMBER_THREADS'><filename>BB_NUMBER_THREADS</filename></link>
|
||||
and
|
||||
<link linkend='var-PARALLEL_MAKE'><filename>PARALLEL_MAKE</filename></link>).
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If <filename>local.conf</filename> is not present when you
|
||||
start the build, the OpenEmbedded build system creates it from
|
||||
<filename>local.conf.sample</filename> when
|
||||
you <filename>source</filename> the top-level build environment
|
||||
setup script (i.e.
|
||||
<link linkend='structure-core-script'><filename>&OE_INIT_FILE;</filename></link>
|
||||
or
|
||||
<link linkend='structure-memres-core-script'><filename>oe-init-build-env-memres</filename></link>).
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The source <filename>local.conf.sample</filename> file used
|
||||
depends on the <filename>$TEMPLATECONF</filename> script variable,
|
||||
which defaults to <filename>/meta-yocto/conf</filename>
|
||||
when you are building from the Yocto Project development
|
||||
environment and defaults to <filename>/meta/conf</filename> when
|
||||
you are building from the OpenEmbedded Core environment.
|
||||
Because the script variable points to the source of the
|
||||
<filename>local.conf.sample</filename> file, this implies that
|
||||
you can configure your build environment from any layer by setting
|
||||
the variable in the top-level build environment setup script as
|
||||
follows:
|
||||
<literallayout class='monospaced'>
|
||||
TEMPLATECONF=<your_layer>/conf
|
||||
</literallayout>
|
||||
Once the build process gets the sample file, it uses
|
||||
<filename>sed</filename> to substitute final
|
||||
<filename>${</filename><link linkend='var-OEROOT'><filename>OEROOT</filename></link><filename>}</filename>
|
||||
values for all <filename>##OEROOT##</filename> values.
|
||||
<note>
|
||||
You can see how the <filename>TEMPLATECONF</filename> variable
|
||||
is used by looking at the
|
||||
<filename>/scripts/oe-setup-builddir</filename> script in the
|
||||
<ulink url='&YOCTO_DOCS_DEV_URL;#source-directory'>Source Directory</ulink>.
|
||||
You can find the Yocto Project version of the
|
||||
<filename>local.conf.sample</filename> file in the
|
||||
<filename>/meta-yocto/conf</filename> directory.
|
||||
</note>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
<section id='structure-build-conf-bblayers.conf'>
|
||||
<title><filename>build/conf/bblayers.conf</filename></title>
|
||||
|
||||
<para>
|
||||
This file defines
|
||||
This configuration file defines
|
||||
<ulink url='&YOCTO_DOCS_DEV_URL;#understanding-and-creating-layers'>layers</ulink>,
|
||||
which are directory trees, traversed (or walked) by BitBake.
|
||||
If <filename>bblayers.conf</filename>
|
||||
is not present, it is created from <filename>bblayers.conf.sample</filename> when
|
||||
you <filename>source</filename> the environment setup script.
|
||||
The <filename>bblayers.conf</filename> file uses the
|
||||
<link linkend='var-BBLAYERS'><filename>BBLAYERS</filename></link>
|
||||
variable to list the layers BitBake tries to find, and uses the
|
||||
<link linkend='var-BBLAYERS_NON_REMOVABLE'><filename>BBLAYERS_NON_REMOVABLE</filename></link>
|
||||
variable to list layers that must not be removed.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The <filename>bblayers.conf</filename> file uses the
|
||||
<link linkend='var-BBLAYERS'><filename>BBLAYERS</filename></link> variable to
|
||||
list the layers BitBake tries to find.
|
||||
The file uses the
|
||||
<link linkend='var-BBLAYERS_NON_REMOVABLE'><filename>BBLAYERS_NON_REMOVABLE</filename></link>
|
||||
variable to list layers that must not be removed.
|
||||
If <filename>bblayers.conf</filename> is not present when you
|
||||
start the build, the OpenEmbedded build system creates it from
|
||||
<filename>bblayers.conf.sample</filename> when
|
||||
you <filename>source</filename> the top-level build environment
|
||||
setup script (i.e.
|
||||
<link linkend='structure-core-script'><filename>&OE_INIT_FILE;</filename></link>
|
||||
or
|
||||
<link linkend='structure-memres-core-script'><filename>oe-init-build-env-memres</filename></link>).
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The source <filename>bblayers.conf.sample</filename> file used
|
||||
depends on the <filename>$TEMPLATECONF</filename> script variable,
|
||||
which defaults to <filename>/meta-yocto/conf</filename>
|
||||
when you are building from the Yocto Project development
|
||||
environment and defaults to <filename>/meta/conf</filename> when
|
||||
you are building from the OpenEmbedded Core environment.
|
||||
Because the script variable points to the source of the
|
||||
<filename>bblayers.conf.sample</filename> file, this implies that
|
||||
you can base your build from any layer by setting the variable in
|
||||
the top-level build environment setup script as follows:
|
||||
<literallayout class='monospaced'>
|
||||
TEMPLATECONF=<your_layer>/conf
|
||||
</literallayout>
|
||||
Once the build process gets the sample file, it uses
|
||||
<filename>sed</filename> to substitute final
|
||||
<filename>${</filename><link linkend='var-OEROOT'><filename>OEROOT</filename></link><filename>}</filename>
|
||||
values for all <filename>##OEROOT##</filename> values.
|
||||
<note>
|
||||
You can see how the <filename>TEMPLATECONF</filename> variable
|
||||
<filename>/scripts/oe-setup-builddir</filename> script in the
|
||||
<ulink url='&YOCTO_DOCS_DEV_URL;#source-directory'>Source Directory</ulink>.
|
||||
You can find the Yocto Project version of the
|
||||
<filename>bblayers.conf.sample</filename> file in the
|
||||
<filename>/meta-yocto/conf</filename> directory.
|
||||
</note>
|
||||
</para>
|
||||
</section>
|
||||
|
||||
|
||||
@@ -252,9 +252,47 @@
|
||||
<glossentry id='var-BAD_RECOMMENDATIONS'><glossterm>BAD_RECOMMENDATIONS</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
A list of packages not to install despite being recommended by a recipe.
|
||||
Lists "recommended-only" packages to not install.
|
||||
Recommended-only packages are packages installed only
|
||||
through the
|
||||
<link linkend='var-RRECOMMENDS'><filename>RRECOMMENDS</filename></link>
|
||||
variable.
|
||||
You can prevent any of these "recommended" packages from
|
||||
being installed by listing them with the
|
||||
<filename>BAD_RECOMMENDATIONS</filename> variable:
|
||||
<literallayout class='monospaced'>
|
||||
BAD_RECOMMENDATIONS = "<package_name> <package_name> <package_name> ..."
|
||||
</literallayout>
|
||||
You can set this variable globally in your
|
||||
<filename>local.conf</filename> file or you can attach it to
|
||||
a specific image recipe by using the recipe name override:
|
||||
<literallayout class='monospaced'>
|
||||
BAD_RECOMMENDATIONS_pn-<target_image> = "<package_name>"
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
It is important to realize that if you choose to not install
|
||||
packages using this variable and some other packages are
|
||||
dependent on them (i.e. listed in a recipe's
|
||||
<link linkend='var-RDEPENDS'><filename>RDEPENDS</filename></link>
|
||||
variable), the OpenEmbedded build system ignores your
|
||||
request and will install the packages to avoid dependency
|
||||
errors.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Support for this variable exists only when using the
|
||||
IPK packaging backend.
|
||||
IPK and RPM packaging backend.
|
||||
Support does not exist for DEB.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
See the
|
||||
<link linkend='var-NO_RECOMMENDATIONS'><filename>NO_RECOMMENDATIONS</filename></link>
|
||||
and the
|
||||
<link linkend='var-PACKAGE_EXCLUDE'><filename>PACKAGE_EXCLUDE</filename></link>
|
||||
variables for related information.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
@@ -455,6 +493,14 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BB_NUMBER_THREADS'><glossterm>BB_NUMBER_THREADS</glossterm>
|
||||
<glossdef>
|
||||
<para>The maximum number of tasks BitBake should run in parallel at any one time.
|
||||
If your host development system supports multiple cores a good rule of thumb
|
||||
is to set this variable to twice the number of cores.</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BBCLASSEXTEND'><glossterm>BBCLASSEXTEND</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
@@ -479,74 +525,6 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BBMASK'><glossterm>BBMASK</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
Prevents BitBake from processing recipes and recipe
|
||||
append files.
|
||||
Use the <filename>BBMASK</filename> variable from within the
|
||||
<filename>conf/local.conf</filename> file found
|
||||
in the
|
||||
<ulink url='&YOCTO_DOCS_DEV_URL;#build-directory'>Build Directory</ulink>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You can use the <filename>BBMASK</filename> variable
|
||||
to "hide" these <filename>.bb</filename> and
|
||||
<filename>.bbappend</filename> files.
|
||||
BitBake ignores any recipe or recipe append files that
|
||||
match the expression.
|
||||
It is as if BitBake does not see them at all.
|
||||
Consequently, matching files are not parsed or otherwise
|
||||
used by BitBake.</para>
|
||||
<para>
|
||||
The value you provide is passed to Python's regular
|
||||
expression compiler.
|
||||
The expression is compared against the full paths to
|
||||
the files.
|
||||
For complete syntax information, see Python's
|
||||
documentation at
|
||||
<ulink url='http://docs.python.org/release/2.3/lib/re-syntax.html'></ulink>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The following example uses a complete regular expression
|
||||
to tell BitBake to ignore all recipe and recipe append
|
||||
files in the <filename>/meta-ti/recipes-misc/</filename>
|
||||
directory:
|
||||
<literallayout class='monospaced'>
|
||||
BBMASK = "/meta-ti/recipes-misc/"
|
||||
</literallayout>
|
||||
If you want to mask out multiple directories or recipes,
|
||||
use the vertical bar to separate the regular expression
|
||||
fragments.
|
||||
This next example masks out multiple directories and
|
||||
individual recipes:
|
||||
<literallayout class='monospaced'>
|
||||
BBMASK = "meta-ti/recipes-misc/|meta-ti/recipes-ti/packagegroup/"
|
||||
BBMASK .= "|.*meta-oe/recipes-support/"
|
||||
BBMASK .= "|.*openldap"
|
||||
BBMASK .= "|.*opencv"
|
||||
BBMASK .= "|.*lzma"
|
||||
</literallayout>
|
||||
Notice how the vertical bar is used to append the fragments.
|
||||
<note>
|
||||
When specifying a directory name, use the trailing
|
||||
slash character to ensure you match just that directory
|
||||
name.
|
||||
</note>
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BB_NUMBER_THREADS'><glossterm>BB_NUMBER_THREADS</glossterm>
|
||||
<glossdef>
|
||||
<para>The maximum number of tasks BitBake should run in parallel at any one time.
|
||||
If your host development system supports multiple cores a good rule of thumb
|
||||
is to set this variable to twice the number of cores.</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BBFILE_COLLECTIONS'><glossterm>BBFILE_COLLECTIONS</glossterm>
|
||||
<glossdef>
|
||||
<para>Lists the names of configured layers.
|
||||
@@ -606,13 +584,6 @@
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BBPATH'><glossterm>BBPATH</glossterm>
|
||||
<glossdef>
|
||||
<para>Used by BitBake to locate <filename>.bbclass</filename> and configuration files.
|
||||
This variable is analogous to the <filename>PATH</filename> variable.</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BBINCLUDELOGS'><glossterm>BBINCLUDELOGS</glossterm>
|
||||
<glossdef>
|
||||
<para>Variable that controls how BitBake displays logs on build failure.</para>
|
||||
@@ -672,6 +643,95 @@ Core layer for images cannot be removed
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BBMASK'><glossterm>BBMASK</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
Prevents BitBake from processing recipes and recipe
|
||||
append files.
|
||||
Use the <filename>BBMASK</filename> variable from within the
|
||||
<filename>conf/local.conf</filename> file found
|
||||
in the
|
||||
<ulink url='&YOCTO_DOCS_DEV_URL;#build-directory'>Build Directory</ulink>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You can use the <filename>BBMASK</filename> variable
|
||||
to "hide" these <filename>.bb</filename> and
|
||||
<filename>.bbappend</filename> files.
|
||||
BitBake ignores any recipe or recipe append files that
|
||||
match the expression.
|
||||
It is as if BitBake does not see them at all.
|
||||
Consequently, matching files are not parsed or otherwise
|
||||
used by BitBake.</para>
|
||||
<para>
|
||||
The value you provide is passed to Python's regular
|
||||
expression compiler.
|
||||
The expression is compared against the full paths to
|
||||
the files.
|
||||
For complete syntax information, see Python's
|
||||
documentation at
|
||||
<ulink url='http://docs.python.org/release/2.3/lib/re-syntax.html'></ulink>.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The following example uses a complete regular expression
|
||||
to tell BitBake to ignore all recipe and recipe append
|
||||
files in the <filename>/meta-ti/recipes-misc/</filename>
|
||||
directory:
|
||||
<literallayout class='monospaced'>
|
||||
BBMASK = "/meta-ti/recipes-misc/"
|
||||
</literallayout>
|
||||
If you want to mask out multiple directories or recipes,
|
||||
use the vertical bar to separate the regular expression
|
||||
fragments.
|
||||
This next example masks out multiple directories and
|
||||
individual recipes:
|
||||
<literallayout class='monospaced'>
|
||||
BBMASK = "meta-ti/recipes-misc/|meta-ti/recipes-ti/packagegroup/"
|
||||
BBMASK .= "|.*meta-oe/recipes-support/"
|
||||
BBMASK .= "|.*openldap"
|
||||
BBMASK .= "|.*opencv"
|
||||
BBMASK .= "|.*lzma"
|
||||
</literallayout>
|
||||
Notice how the vertical bar is used to append the fragments.
|
||||
<note>
|
||||
When specifying a directory name, use the trailing
|
||||
slash character to ensure you match just that directory
|
||||
name.
|
||||
</note>
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BBPATH'><glossterm>BBPATH</glossterm>
|
||||
<glossdef>
|
||||
<para>Used by BitBake to locate <filename>.bbclass</filename> and configuration files.
|
||||
This variable is analogous to the <filename>PATH</filename> variable.</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BBSERVER'><glossterm>BBSERVER</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
Points to the server that runs memory-resident BitBake.
|
||||
This variable is set by the
|
||||
<link linkend='structure-memres-core-script'><filename>oe-init-build-env-memres</filename></link>
|
||||
setup script and should not be hand-edited.
|
||||
The variable is only used when you employ memory-resident
|
||||
BitBake.
|
||||
The setup script exports the value as follows:
|
||||
<literallayout class='monospaced'>
|
||||
export BBSERVER=localhost:$port
|
||||
</literallayout>
|
||||
For more information on how the
|
||||
<filename>BBSERVER</filename> is used, see the
|
||||
<filename>oe-init-build-env-memres</filename> script, which
|
||||
is located in the
|
||||
<ulink url='&YOCTO_DOCS_DEV_URL;#source-directory'>Source Directory</ulink>.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-BINCONFIG_GLOB'><glossterm>BINCONFIG_GLOB</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
@@ -724,13 +784,16 @@ Core layer for images cannot be removed
|
||||
|
||||
<glossentry id='var-BUILDDIR'><glossterm>BUILDDIR</glossterm>
|
||||
<glossdef>
|
||||
<para>Points to the location of the
|
||||
<para>
|
||||
Points to the location of the
|
||||
<ulink url='&YOCTO_DOCS_DEV_URL;#build-directory'>Build Directory</ulink>.
|
||||
You can define this directory indirectly through the
|
||||
<link linkend='structure-core-script'><filename>&OE_INIT_FILE;</filename></link>
|
||||
script by passing in a Build Directory path when you run the
|
||||
script.
|
||||
If you run the script and do not provide a Build Directory
|
||||
and
|
||||
<link linkend='structure-memres-core-script'><filename>oe-init-build-env-memres</filename></link>
|
||||
scripts by passing in a Build Directory path when you run
|
||||
the scripts.
|
||||
If you run the scripts and do not provide a Build Directory
|
||||
path, the <filename>BUILDDIR</filename> defaults to
|
||||
<filename>build</filename> in the current directory.
|
||||
</para>
|
||||
@@ -968,6 +1031,28 @@ Core layer for images cannot be removed
|
||||
have been built and have their contents in the appropriate
|
||||
sysroots before the recipe's configure task is executed.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Consider this simple example for two recipes named "a" and
|
||||
"b" that produce similarly named packages.
|
||||
In this example, the <filename>DEPENDS</filename>
|
||||
statement appears in the "a" recipe:
|
||||
<literallayout class='monospaced'>
|
||||
DEPENDS = "b"
|
||||
</literallayout>
|
||||
Here, the dependency is such that the
|
||||
<filename>do_configure</filename> task for recipe "a"
|
||||
depends on the <filename>do_populate_sysroot</filename>
|
||||
task of recipe "b".
|
||||
This means anything that recipe "b" puts into sysroot
|
||||
is available when recipe "a" is configuring itself.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For information on runtime dependencies, see the
|
||||
<link linkend='var-RDEPENDS'><filename>RDEPENDS</filename></link>
|
||||
variable.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
@@ -1145,37 +1230,40 @@ Core layer for images cannot be removed
|
||||
<glossentry id='var-DL_DIR'><glossterm>DL_DIR</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
The central download directory used by the build process to store downloads.
|
||||
You can set this directory by defining the <filename>DL_DIR</filename>
|
||||
variable in the <filename>/conf/local.conf</filename> file.
|
||||
The central download directory used by the build process to
|
||||
store downloads.
|
||||
You can set this directory by defining the
|
||||
<filename>DL_DIR</filename> variable in the
|
||||
<filename>/conf/local.conf</filename> file.
|
||||
This directory is self-maintaining and you should not have
|
||||
to touch it.
|
||||
By default, the directory is <filename>downloads</filename> in the
|
||||
By default, the directory is <filename>downloads</filename>
|
||||
in the
|
||||
<ulink url='&YOCTO_DOCS_DEV_URL;#build-directory'>Build Directory</ulink>.
|
||||
<literallayout class='monospaced'>
|
||||
#DL_DIR ?= "${TOPDIR}/downloads"
|
||||
</literallayout>
|
||||
To specify a different download directory, simply uncomment the line
|
||||
and provide your directory.
|
||||
To specify a different download directory, simply remove
|
||||
the comment from the line and provide your directory.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
During a first build, the system downloads many different source code
|
||||
tarballs from various upstream projects.
|
||||
During a first build, the system downloads many different
|
||||
source code tarballs from various upstream projects.
|
||||
Downloading can take a while, particularly if your network
|
||||
connection is slow.
|
||||
Tarballs are all stored in the directory defined by
|
||||
<filename>DL_DIR</filename> and the build system looks there first
|
||||
to find source tarballs.
|
||||
<filename>DL_DIR</filename> and the build system looks there
|
||||
first to find source tarballs.
|
||||
<note>
|
||||
When wiping and rebuilding, you can preserve this directory to speed
|
||||
up this part of subsequent builds.
|
||||
When wiping and rebuilding, you can preserve this
|
||||
directory to speed up this part of subsequent builds.
|
||||
</note>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You can safely share this directory between multiple builds on the
|
||||
same development machine.
|
||||
You can safely share this directory between multiple builds
|
||||
on the same development machine.
|
||||
For additional information on how the build process gets
|
||||
source files when working behind a firewall or proxy server,
|
||||
see this specific question in the
|
||||
@@ -2784,7 +2872,7 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3"
|
||||
MACHINE ?= "qemuppc"
|
||||
MACHINE ?= "qemux86"
|
||||
MACHINE ?= "qemux86-64"
|
||||
MACHINE ?= "atom-pc"
|
||||
MACHINE ?= "genericx86"
|
||||
MACHINE ?= "beagleboard"
|
||||
MACHINE ?= "mpc8315e-rdb"
|
||||
MACHINE ?= "routerstationpro"
|
||||
@@ -3136,6 +3224,59 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3"
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-NO_RECOMMENDATIONS'><glossterm>NO_RECOMMENDATIONS</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
Prevents installation of all "recommended-only" packages.
|
||||
Recommended-only packages are packages installed only
|
||||
through the
|
||||
<link linkend='var-RRECOMMENDS'><filename>RRECOMMENDS</filename></link>
|
||||
variable).
|
||||
Setting the <filename>NO_RECOMMENDATIONS</filename> variable
|
||||
to "1" turns this feature on:
|
||||
<literallayout class='monospaced'>
|
||||
NO_RECOMMENDATIONS = "1"
|
||||
</literallayout>
|
||||
You can set this variable globally in your
|
||||
<filename>local.conf</filename> file or you can attach it to
|
||||
a specific image recipe by using the recipe name override:
|
||||
<literallayout class='monospaced'>
|
||||
NO_RECOMMENDATIONS_pn-<target_image> = "<package_name>"
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
It is important to realize that if you choose to not install
|
||||
packages using this variable and some other packages are
|
||||
dependent on them (i.e. listed in a recipe's
|
||||
<link linkend='var-RDEPENDS'><filename>RDEPENDS</filename></link>
|
||||
variable), the OpenEmbedded build system ignores your
|
||||
request and will install the packages to avoid dependency
|
||||
errors.
|
||||
<note>
|
||||
Some recommended packages might be required for certain
|
||||
system functionality, such as kernel modules.
|
||||
It is up to you to add packages with
|
||||
<link linkend='var-IMAGE_INSTALL'><filename>IMAGE_INSTALL</filename></link>
|
||||
variable.
|
||||
</note>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Support for this variable exists only when using the
|
||||
IPK and RPM packaging backend.
|
||||
Support does not exist for DEB.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
See the
|
||||
<link linkend='var-BAD_RECOMMENDATIONS'><filename>BAD_RECOMMENDATIONS</filename></link>
|
||||
and the
|
||||
<link linkend='var-PACKAGE_EXCLUDE'><filename>PACKAGE_EXCLUDE</filename></link>
|
||||
variables for related information.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
</glossdiv>
|
||||
|
||||
<glossdiv id='var-glossary-o'><title>O</title>
|
||||
@@ -3215,6 +3356,28 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3"
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-OEROOT'><glossterm>OEROOT</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
The directory from which the top-level build environment
|
||||
setup script is sourced.
|
||||
The Yocto Project makes two top-level build environment
|
||||
setup scripts available:
|
||||
<link linkend='structure-core-script'><filename>&OE_INIT_FILE;</filename></link>
|
||||
and
|
||||
<link linkend='structure-memres-core-script'><filename>oe-init-build-env-memres</filename></link>.
|
||||
When you run one of these scripts, the
|
||||
<filename>OEROOT</filename> variable resolves to the
|
||||
directory that holds the script.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For additional information on how this variable is used,
|
||||
see the initialization scripts.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-OLDEST_KERNEL'><glossterm>OLDEST_KERNEL</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
@@ -3296,6 +3459,51 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3"
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-PACKAGE_EXCLUDE'><glossterm>PACKAGE_EXCLUDE</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
Lists packages that should not be installed into an image.
|
||||
For example:
|
||||
<literallayout class='monospaced'>
|
||||
PACKAGE_EXCLUDE = "<package_name> <package_name> <package_name> ..."
|
||||
</literallayout>
|
||||
You can set this variable globally in your
|
||||
<filename>local.conf</filename> file or you can attach it to
|
||||
a specific image recipe by using the recipe name override:
|
||||
<literallayout class='monospaced'>
|
||||
PACKAGE_EXCLUDE_pn-<target_image> = "<package_name>"
|
||||
</literallayout>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
If you choose to not install
|
||||
a package using this variable and some other package is
|
||||
dependent on it (i.e. listed in a recipe's
|
||||
<link linkend='var-RDEPENDS'><filename>RDEPENDS</filename></link>
|
||||
variable), the OpenEmbedded build system generates a fatal
|
||||
installation error.
|
||||
Because the build system halts the process with a fatal
|
||||
error, you can use the variable with an iterative
|
||||
development process to remove specific components from a
|
||||
system.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Support for this variable exists only when using the
|
||||
IPK and RPM packaging backend.
|
||||
Support does not exist for DEB.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
See the
|
||||
<link linkend='var-NO_RECOMMENDATIONS'><filename>NO_RECOMMENDATIONS</filename></link>
|
||||
and the
|
||||
<link linkend='var-BAD_RECOMMENDATIONS'><filename>BAD_RECOMMENDATIONS</filename></link>
|
||||
variables for related information.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-PACKAGE_EXTRA_ARCHS'><glossterm>PACKAGE_EXTRA_ARCHS</glossterm>
|
||||
<glossdef>
|
||||
<para>Specifies the list of architectures compatible with the device CPU.
|
||||
@@ -3464,6 +3672,18 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3"
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-PE'><glossterm>PE</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
the epoch of the recipe.
|
||||
By default, this variable is unset.
|
||||
The field is used to make upgrades possible when the
|
||||
versioning scheme changes in some backwards incompatible
|
||||
way.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-PF'><glossterm>PF</glossterm>
|
||||
<glossdef>
|
||||
<para>Specifies the recipe or package name and includes all version and revision
|
||||
@@ -3575,6 +3795,46 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3"
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-PREFERRED_PROVIDER'><glossterm>PREFERRED_PROVIDER</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
If multiple recipes provide an item, this variable
|
||||
determines which recipe should be given preference.
|
||||
You should always suffix the variable with the name of the
|
||||
provided item, and you should set it to the
|
||||
<link linkend='var-PN'><filename>PN</filename></link>
|
||||
of the recipe to which you want to give precedence.
|
||||
Here is an example:
|
||||
<literallayout class='monospaced'>
|
||||
PREFERRED_PROVIDER_virtual/xserver = "xserver-xf86"
|
||||
</literallayout>
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-PREFERRED_VERSION'><glossterm>PREFERRED_VERSION</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
If there are multiple versions of recipes available, this
|
||||
variable determines which recipe should be given preference.
|
||||
You must always suffix the variable with the
|
||||
<link linkend='var-PN'><filename>PN</filename></link>
|
||||
you want to select, and you should set to the
|
||||
<link linkend='var-PV'><filename>PV</filename></link>
|
||||
accordingly for precedence.
|
||||
You can use the "<filename>%</filename>" character as a
|
||||
wildcard to match any number of characters, which can be
|
||||
useful when specifying versions that contain long revision
|
||||
numbers that could potentially change.
|
||||
Here are two examples:
|
||||
<literallayout class='monospaced'>
|
||||
PREFERRED_VERSION_python = "2.6.6"
|
||||
PREFERRED_VERSION_linux-yocto = "3.0+git%"
|
||||
</literallayout>
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-PREMIRRORS'><glossterm>PREMIRRORS</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
@@ -3672,57 +3932,6 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3"
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-PE'><glossterm>PE</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
the epoch of the recipe.
|
||||
The default value is "0".
|
||||
The field is used to make upgrades possible when the versioning scheme changes in
|
||||
some backwards incompatible way.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-PREFERRED_PROVIDER'><glossterm>PREFERRED_PROVIDER</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
If multiple recipes provide an item, this variable
|
||||
determines which recipe should be given preference.
|
||||
You should always suffix the variable with the name of the
|
||||
provided item, and you should set it to the
|
||||
<link linkend='var-PN'><filename>PN</filename></link>
|
||||
of the recipe to which you want to give precedence.
|
||||
Here is an example:
|
||||
<literallayout class='monospaced'>
|
||||
PREFERRED_PROVIDER_virtual/xserver = "xserver-xf86"
|
||||
</literallayout>
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
<glossentry id='var-PREFERRED_VERSION'><glossterm>PREFERRED_VERSION</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
If there are multiple versions of recipes available, this
|
||||
variable determines which recipe should be given preference.
|
||||
You must always suffix the variable with the
|
||||
<link linkend='var-PN'><filename>PN</filename></link>
|
||||
you want to select, and you should set to the
|
||||
<link linkend='var-PV'><filename>PV</filename></link>
|
||||
accordingly for precedence.
|
||||
You can use the "<filename>%</filename>" character as a
|
||||
wildcard to match any number of characters, which can be
|
||||
useful when specifying versions that contain long revision
|
||||
numbers that could potentially change.
|
||||
Here are two examples:
|
||||
<literallayout class='monospaced'>
|
||||
PREFERRED_VERSION_python = "2.6.6"
|
||||
PREFERRED_VERSION_linux-yocto = "3.0+git%"
|
||||
</literallayout>
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
</glossdiv>
|
||||
|
||||
<!-- <glossdiv id='var-glossary-q'><title>Q</title>-->
|
||||
@@ -3749,13 +3958,36 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3"
|
||||
<glossentry id='var-RDEPENDS'><glossterm>RDEPENDS</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
Lists a package's run-time dependencies (i.e. other packages)
|
||||
Lists a package's runtime dependencies (i.e. other packages)
|
||||
that must be installed in order for the built package to run
|
||||
correctly.
|
||||
If a package in this list cannot be found during the build,
|
||||
you will get a build error.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
When you use the <filename>RDEPENDS</filename> variable
|
||||
in a recipe, you are essentially stating that the recipe's
|
||||
<filename>do_build</filename> task depends on the existence
|
||||
of a specific package.
|
||||
Consider this simple example for two recipes named "a" and
|
||||
"b" that produce similarly named packages.
|
||||
In this example, the <filename>RDEPENDS</filename>
|
||||
statement appears in the "a" recipe:
|
||||
<literallayout class='monospaced'>
|
||||
RDEPENDS_${PN} = "b"
|
||||
</literallayout>
|
||||
Here, the dependency is such that the
|
||||
<filename>do_build</filename> task for recipe "a" depends
|
||||
on the <filename>do_package_write</filename> task
|
||||
of recipe "b".
|
||||
This means the package file for "b" must be available when
|
||||
the output for recipe "a" has been completely built.
|
||||
More importantly, package "a" will be marked as depending
|
||||
on package "b" in a manner that is understood by the
|
||||
package manager in use (i.e. rpm, opkg, or dpkg).
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The names of the packages you list within
|
||||
<filename>RDEPENDS</filename> must be the names of other
|
||||
@@ -3798,26 +4030,32 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3"
|
||||
|
||||
<para>
|
||||
In many cases you do not need to explicitly add
|
||||
run-time dependencies using
|
||||
runtime dependencies using
|
||||
<filename>RDEPENDS</filename> since some automatic
|
||||
handling occurs:
|
||||
<itemizedlist>
|
||||
<listitem><para><emphasis><filename>shlibdeps</filename></emphasis>: If
|
||||
a run-time package contains a shared library
|
||||
a runtime package contains a shared library
|
||||
(<filename>.so</filename>), the build
|
||||
processes the library in order to determine other
|
||||
libraries to which it is dynamically linked.
|
||||
The build process adds these libraries to
|
||||
<filename>RDEPENDS</filename> when creating the run-time
|
||||
<filename>RDEPENDS</filename> when creating the runtime
|
||||
package.</para></listitem>
|
||||
<listitem><para><emphasis><filename>pcdeps</filename></emphasis>: If
|
||||
the package ships a <filename>pkg-config</filename>
|
||||
information file, the build process uses this file
|
||||
to add items to the <filename>RDEPENDS</filename>
|
||||
variable to create the run-time packages.
|
||||
variable to create the runtime packages.
|
||||
</para></listitem>
|
||||
</itemizedlist>
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For information on build-time dependencies, see the
|
||||
<link linkend='var-DEPENDS'><filename>DEPENDS</filename></link>
|
||||
variable.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
@@ -3878,27 +4116,43 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3"
|
||||
<glossentry id='var-RRECOMMENDS'><glossterm>RRECOMMENDS</glossterm>
|
||||
<glossdef>
|
||||
<para>
|
||||
A list of packages that extends the usability of a package being
|
||||
built.
|
||||
The package being built does not depend on this list of packages in
|
||||
order to successfully build, but needs them for the extended usability.
|
||||
A list of packages that extends the usability of a package
|
||||
being built.
|
||||
The package being built does not depend on this list of
|
||||
packages in order to successfully build, but needs them for
|
||||
the extended usability.
|
||||
To specify runtime dependencies for packages, see the
|
||||
<filename><link linkend='var-RDEPENDS'>RDEPENDS</link></filename> variable.
|
||||
<filename><link linkend='var-RDEPENDS'>RDEPENDS</link></filename>
|
||||
variable.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The OpenEmbedded build process automatically installs the list of packages
|
||||
as part of the built package.
|
||||
However, you can remove them later if you want.
|
||||
If, during the build, a package from the list cannot be found, the build
|
||||
process continues without an error.
|
||||
The OpenEmbedded build process automatically installs the
|
||||
list of packages as part of the built package.
|
||||
However, you can remove these packages later if you want.
|
||||
If, during the build, a package from the
|
||||
<filename>RRECOMMENDS</filename> list cannot be
|
||||
found, the build process continues without an error.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Because the <filename>RRECOMMENDS</filename> variable applies to packages
|
||||
being built, you should
|
||||
always attach an override to the variable to specify the particular package
|
||||
whose usability is being extended.
|
||||
For example, suppose you are building a development package that is extended
|
||||
to support wireless functionality.
|
||||
You can also prevent packages in the list from being
|
||||
installed by using several variables.
|
||||
See the
|
||||
<link linkend='var-BAD_RECOMMENDATIONS'><filename>BAD_RECOMMENDATIONS</filename></link>,
|
||||
<link linkend='var-NO_RECOMMENDATIONS'><filename>NO_RECOMMENDATIONS</filename></link>,
|
||||
and
|
||||
<link linkend='var-PACKAGE_EXCLUDE'><filename>PACKAGE_EXCLUDE</filename></link>
|
||||
variables for more information.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Because the <filename>RRECOMMENDS</filename> variable
|
||||
applies to packages being built, you should always attach
|
||||
an override to the variable to specify the particular
|
||||
package whose usability is being extended.
|
||||
For example, suppose you are building a development package
|
||||
that is extended to support wireless functionality.
|
||||
In this case, you would use the following:
|
||||
<literallayout class='monospaced'>
|
||||
RRECOMMENDS_${PN}-dev += "<wireless_package_name>"
|
||||
@@ -3906,8 +4160,9 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3"
|
||||
In the example, the package name
|
||||
(<filename>${<link linkend='var-PN'>PN</link>}-dev</filename>)
|
||||
must appear as it would in the
|
||||
<filename><link linkend='var-PACKAGES'>PACKAGES</link></filename> namespace before any
|
||||
renaming of the output package by classes like <filename>debian.bbclass</filename>.
|
||||
<filename><link linkend='var-PACKAGES'>PACKAGES</link></filename>
|
||||
namespace before any renaming of the output package by
|
||||
classes such as <filename>debian.bbclass</filename>.
|
||||
</para>
|
||||
</glossdef>
|
||||
</glossentry>
|
||||
|
||||
@@ -27,8 +27,13 @@
|
||||
<title>Build Overview</title>
|
||||
|
||||
<para>
|
||||
The first thing you need to do is set up the OpenEmbedded build environment by sourcing
|
||||
the <link linkend='structure-core-script'>environment setup script</link> as follows:
|
||||
The first thing you need to do is set up the OpenEmbedded build
|
||||
environment by sourcing an environment setup script
|
||||
(i.e.
|
||||
<link linkend='structure-core-script'><filename>&OE_INIT_FILE;</filename></link>
|
||||
or
|
||||
<link linkend='structure-memres-core-script'><filename>oe-init-build-env-memres</filename></link>).
|
||||
Here is an example:
|
||||
<literallayout class='monospaced'>
|
||||
$ source &OE_INIT_FILE; [<build_dir>]
|
||||
</literallayout>
|
||||
|
||||
@@ -11,6 +11,21 @@
|
||||
<holder>Linux Foundation</holder>
|
||||
</copyright>
|
||||
|
||||
<legalnotice>
|
||||
<para>
|
||||
Permission is granted to copy, distribute and/or modify this document under
|
||||
the terms of the <ulink type="http" url="http://creativecommons.org/licenses/by-sa/2.0/uk/">Creative Commons Attribution-Share Alike 2.0 UK: England & Wales</ulink> as published by Creative Commons.
|
||||
</para>
|
||||
<note>
|
||||
Due to production processes, there could be differences between the Yocto Project
|
||||
documentation bundled in the release tarball and the
|
||||
<ulink url='&YOCTO_DOCS_QS_URL;'>Yocto Project Quick Start</ulink> on
|
||||
the <ulink url='&YOCTO_HOME_URL;'>Yocto Project</ulink> website.
|
||||
For the latest version of this manual, see the manual on the website.
|
||||
</note>
|
||||
</legalnotice>
|
||||
|
||||
|
||||
<abstract>
|
||||
<imagedata fileref="figures/yocto-project-transp.png"
|
||||
width="6in" depth="1in"
|
||||
@@ -449,7 +464,12 @@
|
||||
to the Build Directory.
|
||||
Later, when the build completes, the Build Directory contains all the files
|
||||
created during the build.
|
||||
</para></listitem>
|
||||
<note>
|
||||
For information on running a memory-resident BitBake, see
|
||||
the
|
||||
<ulink url='&YOCTO_DOCS_REF_URL;#structure-memres-core-script'><filename>oe-init-build-env-memres</filename></ulink>
|
||||
setup script.
|
||||
</note></para></listitem>
|
||||
</itemizedlist>
|
||||
<para>
|
||||
Take some time to examine your <filename>local.conf</filename> file
|
||||
|
||||
@@ -11,6 +11,6 @@ BBFILE_PRIORITY_yocto = "5"
|
||||
|
||||
# This should only be incremented on significant changes that will
|
||||
# cause compatibility issues with other layers
|
||||
LAYERVERSION_yocto = "1"
|
||||
LAYERVERSION_yocto = "2"
|
||||
|
||||
LAYERDEPENDS_yocto = "core"
|
||||
|
||||
@@ -2,6 +2,10 @@
|
||||
# This class is used for architecture independent recipes/data files (usally scripts)
|
||||
#
|
||||
|
||||
# Expand STAGING_DIR_HOST since for cross-canadian/native/nativesdk, this will
|
||||
# point elsewhere after these changes.
|
||||
STAGING_DIR_HOST := "${STAGING_DIR_HOST}"
|
||||
|
||||
PACKAGE_ARCH = "all"
|
||||
|
||||
python () {
|
||||
|
||||
@@ -108,10 +108,16 @@ CONFIGURESTAMPFILE = "${WORKDIR}/configure.sstate"
|
||||
|
||||
autotools_preconfigure() {
|
||||
if [ -n "${CONFIGURESTAMPFILE}" -a -e "${CONFIGURESTAMPFILE}" ]; then
|
||||
if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" -a "${S}" != "${B}" ]; then
|
||||
echo "Previously configured separate build directory detected, cleaning ${B}"
|
||||
rm -rf ${B}
|
||||
mkdir ${B}
|
||||
if [ "`cat ${CONFIGURESTAMPFILE}`" != "${BB_TASKHASH}" ]; then
|
||||
if [ "${S}" != "${B}" ]; then
|
||||
echo "Previously configured separate build directory detected, cleaning ${B}"
|
||||
rm -rf ${B}
|
||||
mkdir ${B}
|
||||
else
|
||||
# At least remove the .la files since automake won't automatically
|
||||
# regenerate them even if CFLAGS/LDFLAGS are different
|
||||
cd ${S}; find ${S} -name \*.la -delete
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -31,6 +31,29 @@ EXCLUDE_FROM_WORLD = "1"
|
||||
BOOTDD_VOLUME_ID ?= "boot"
|
||||
BOOTDD_EXTRA_SPACE ?= "16384"
|
||||
|
||||
EFI = "${@base_contains("MACHINE_FEATURES", "efi", "1", "0", d)}"
|
||||
EFI_CLASS = "${@base_contains("MACHINE_FEATURES", "efi", "grub-efi", "", d)}"
|
||||
|
||||
# Include legacy boot if MACHINE_FEATURES includes "pcbios" or if it does not
|
||||
# contain "efi". This way legacy is supported by default if neither is
|
||||
# specified, maintaining the original behavior.
|
||||
def pcbios(d):
|
||||
pcbios = base_contains("MACHINE_FEATURES", "pcbios", "1", "0", d)
|
||||
if pcbios == "0":
|
||||
pcbios = base_contains("MACHINE_FEATURES", "efi", "0", "1", d)
|
||||
return pcbios
|
||||
|
||||
def pcbios_class(d):
|
||||
if d.getVar("PCBIOS", True) == "1":
|
||||
return "syslinux"
|
||||
return ""
|
||||
|
||||
PCBIOS = "${@pcbios(d)}"
|
||||
PCBIOS_CLASS = "${@pcbios_class(d)}"
|
||||
|
||||
inherit ${PCBIOS_CLASS}
|
||||
inherit ${EFI_CLASS}
|
||||
|
||||
# Get the build_syslinux_cfg() function from the syslinux class
|
||||
|
||||
AUTO_SYSLINUXCFG = "1"
|
||||
@@ -38,17 +61,32 @@ DISK_SIGNATURE ?= "${DISK_SIGNATURE_GENERATED}"
|
||||
SYSLINUX_ROOT ?= "root=/dev/sda2"
|
||||
SYSLINUX_TIMEOUT ?= "10"
|
||||
|
||||
inherit syslinux
|
||||
|
||||
populate() {
|
||||
DEST=$1
|
||||
install -d ${DEST}
|
||||
|
||||
# Install bzImage, initrd, and rootfs.img in DEST for all loaders to use.
|
||||
install -m 0644 ${STAGING_KERNEL_DIR}/bzImage ${DEST}/vmlinuz
|
||||
|
||||
if [ -n "${INITRD}" ] && [ -s "${INITRD}" ]; then
|
||||
install -m 0644 ${INITRD} ${DEST}/initrd
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
build_boot_dd() {
|
||||
HDDDIR="${S}/hdd/boot"
|
||||
HDDIMG="${S}/hdd.image"
|
||||
IMAGE=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hdddirect
|
||||
|
||||
install -d $HDDDIR
|
||||
install -m 0644 ${STAGING_KERNEL_DIR}/bzImage $HDDDIR/vmlinuz
|
||||
install -m 0644 ${S}/syslinux.cfg $HDDDIR/syslinux.cfg
|
||||
install -m 444 ${STAGING_DATADIR}/syslinux/ldlinux.sys $HDDDIR/ldlinux.sys
|
||||
populate ${HDDDIR}
|
||||
|
||||
if [ "${PCBIOS}" = "1" ]; then
|
||||
syslinux_hddimg_populate
|
||||
fi
|
||||
if [ "${EFI}" = "1" ]; then
|
||||
grubefi_hddimg_populate
|
||||
fi
|
||||
|
||||
BLOCKS=`du -bks $HDDDIR | cut -f 1`
|
||||
BLOCKS=`expr $BLOCKS + ${BOOTDD_EXTRA_SPACE}`
|
||||
@@ -62,7 +100,9 @@ build_boot_dd() {
|
||||
mkdosfs -n ${BOOTDD_VOLUME_ID} -S 512 -C $HDDIMG $BLOCKS
|
||||
mcopy -i $HDDIMG -s $HDDDIR/* ::/
|
||||
|
||||
syslinux $HDDIMG
|
||||
if [ "${PCBIOS}" = "1" ]; then
|
||||
syslinux_hdddirect_install $HDDIMG
|
||||
fi
|
||||
chmod 644 $HDDIMG
|
||||
|
||||
ROOTFSBLOCKS=`du -Lbks ${ROOTFS} | cut -f 1`
|
||||
@@ -85,9 +125,11 @@ build_boot_dd() {
|
||||
dd of=$IMAGE bs=1 seek=440 conv=notrunc
|
||||
|
||||
OFFSET=`expr $END2 / 512`
|
||||
dd if=${STAGING_DATADIR}/syslinux/mbr.bin of=$IMAGE conv=notrunc
|
||||
if [ "${PCBIOS}" = "1" ]; then
|
||||
dd if=${STAGING_DATADIR}/syslinux/mbr.bin of=$IMAGE conv=notrunc
|
||||
fi
|
||||
dd if=$HDDIMG of=$IMAGE conv=notrunc seek=1 bs=512
|
||||
dd if=${ROOTFS} of=$IMAGE conv=notrunc seek=$OFFSET bs=512
|
||||
dd if=${ROOTFS} of=$IMAGE conv=notrunc seek=$OFFSET bs=512
|
||||
|
||||
cd ${DEPLOY_DIR_IMAGE}
|
||||
rm -f ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.hdddirect
|
||||
@@ -96,7 +138,10 @@ build_boot_dd() {
|
||||
|
||||
python do_bootdirectdisk() {
|
||||
validate_disk_signature(d)
|
||||
bb.build.exec_func('build_syslinux_cfg', d)
|
||||
if d.getVar("PCBIOS", True) == "1":
|
||||
bb.build.exec_func('build_syslinux_cfg', d)
|
||||
if d.getVar("EFI", True) == "1":
|
||||
bb.build.exec_func('build_grub_cfg', d)
|
||||
bb.build.exec_func('build_boot_dd', d)
|
||||
}
|
||||
|
||||
|
||||
@@ -111,6 +111,63 @@ build_iso() {
|
||||
ln -s ${IMAGE_NAME}.iso ${DEPLOY_DIR_IMAGE}/${IMAGE_LINK_NAME}.iso
|
||||
}
|
||||
|
||||
build_fat_img() {
|
||||
FATSOURCEDIR=$1
|
||||
FATIMG=$2
|
||||
|
||||
# Calculate the size required for the final image including the
|
||||
# data and filesystem overhead.
|
||||
# Sectors: 512 bytes
|
||||
# Blocks: 1024 bytes
|
||||
|
||||
# Determine the sector count just for the data
|
||||
SECTORS=$(expr $(du --apparent-size -ks ${FATSOURCEDIR} | cut -f 1) \* 2)
|
||||
|
||||
# Account for the filesystem overhead. This includes directory
|
||||
# entries in the clusters as well as the FAT itself.
|
||||
# Assumptions:
|
||||
# FAT32 (12 or 16 may be selected by mkdosfs, but the extra
|
||||
# padding will be minimal on those smaller images and not
|
||||
# worth the logic here to caclulate the smaller FAT sizes)
|
||||
# < 16 entries per directory
|
||||
# 8.3 filenames only
|
||||
|
||||
# 32 bytes per dir entry
|
||||
DIR_BYTES=$(expr $(find ${FATSOURCEDIR} | tail -n +2 | wc -l) \* 32)
|
||||
# 32 bytes for every end-of-directory dir entry
|
||||
DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 32))
|
||||
# 4 bytes per FAT entry per sector of data
|
||||
FAT_BYTES=$(expr $SECTORS \* 4)
|
||||
# 4 bytes per FAT entry per end-of-cluster list
|
||||
FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${FATSOURCEDIR} -type d | tail -n +2 | wc -l) \* 4))
|
||||
|
||||
# Use a ceiling function to determine FS overhead in sectors
|
||||
DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512)
|
||||
# There are two FATs on the image
|
||||
FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2)
|
||||
SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS))
|
||||
|
||||
# Determine the final size in blocks accounting for some padding
|
||||
BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE})
|
||||
|
||||
# Ensure total sectors is an integral number of sectors per
|
||||
# track or mcopy will complain. Sectors are 512 bytes, and we
|
||||
# generate images with 32 sectors per track. This calculation is
|
||||
# done in blocks, thus the mod by 16 instead of 32.
|
||||
BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
|
||||
|
||||
# mkdosfs will sometimes use FAT16 when it is not appropriate,
|
||||
# resulting in a boot failure from SYSLINUX. Use FAT32 for
|
||||
# images larger than 512MB, otherwise let mkdosfs decide.
|
||||
if [ $(expr $BLOCKS / 1024) -gt 512 ]; then
|
||||
FATSIZE="-F 32"
|
||||
fi
|
||||
|
||||
mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${FATIMG} ${BLOCKS}
|
||||
# Copy FATSOURCEDIR recursively into the image file directly
|
||||
mcopy -i ${FATIMG} -s ${FATSOURCEDIR}/* ::/
|
||||
}
|
||||
|
||||
build_hddimg() {
|
||||
# Create an HDD image
|
||||
if [ "${NOHDD}" != "1" ] ; then
|
||||
@@ -123,58 +180,7 @@ build_hddimg() {
|
||||
grubefi_hddimg_populate
|
||||
fi
|
||||
|
||||
# Calculate the size required for the final image including the
|
||||
# data and filesystem overhead.
|
||||
# Sectors: 512 bytes
|
||||
# Blocks: 1024 bytes
|
||||
|
||||
# Determine the sector count just for the data
|
||||
SECTORS=$(expr $(du --apparent-size -ks ${HDDDIR} | cut -f 1) \* 2)
|
||||
|
||||
# Account for the filesystem overhead. This includes directory
|
||||
# entries in the clusters as well as the FAT itself.
|
||||
# Assumptions:
|
||||
# FAT32 (12 or 16 may be selected by mkdosfs, but the extra
|
||||
# padding will be minimal on those smaller images and not
|
||||
# worth the logic here to caclulate the smaller FAT sizes)
|
||||
# < 16 entries per directory
|
||||
# 8.3 filenames only
|
||||
|
||||
# 32 bytes per dir entry
|
||||
DIR_BYTES=$(expr $(find ${HDDDIR} | tail -n +2 | wc -l) \* 32)
|
||||
# 32 bytes for every end-of-directory dir entry
|
||||
DIR_BYTES=$(expr $DIR_BYTES + $(expr $(find ${HDDDIR} -type d | tail -n +2 | wc -l) \* 32))
|
||||
# 4 bytes per FAT entry per sector of data
|
||||
FAT_BYTES=$(expr $SECTORS \* 4)
|
||||
# 4 bytes per FAT entry per end-of-cluster list
|
||||
FAT_BYTES=$(expr $FAT_BYTES + $(expr $(find ${HDDDIR} -type d | tail -n +2 | wc -l) \* 4))
|
||||
|
||||
# Use a ceiling function to determine FS overhead in sectors
|
||||
DIR_SECTORS=$(expr $(expr $DIR_BYTES + 511) / 512)
|
||||
# There are two FATs on the image
|
||||
FAT_SECTORS=$(expr $(expr $(expr $FAT_BYTES + 511) / 512) \* 2)
|
||||
SECTORS=$(expr $SECTORS + $(expr $DIR_SECTORS + $FAT_SECTORS))
|
||||
|
||||
# Determine the final size in blocks accounting for some padding
|
||||
BLOCKS=$(expr $(expr $SECTORS / 2) + ${BOOTIMG_EXTRA_SPACE})
|
||||
|
||||
# Ensure total sectors is an integral number of sectors per
|
||||
# track or mcopy will complain. Sectors are 512 bytes, and we
|
||||
# generate images with 32 sectors per track. This calculation is
|
||||
# done in blocks, thus the mod by 16 instead of 32.
|
||||
BLOCKS=$(expr $BLOCKS + $(expr 16 - $(expr $BLOCKS % 16)))
|
||||
|
||||
# mkdosfs will sometimes use FAT16 when it is not appropriate,
|
||||
# resulting in a boot failure from SYSLINUX. Use FAT32 for
|
||||
# images larger than 512MB, otherwise let mkdosfs decide.
|
||||
if [ $(expr $BLOCKS / 1024) -gt 512 ]; then
|
||||
FATSIZE="-F 32"
|
||||
fi
|
||||
|
||||
IMG=${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
|
||||
mkdosfs ${FATSIZE} -n ${BOOTIMG_VOLUME_ID} -S 512 -C ${IMG} ${BLOCKS}
|
||||
# Copy HDDDIR recursively into the image file directly
|
||||
mcopy -i ${IMG} -s ${HDDDIR}/* ::/
|
||||
build_fat_img ${HDDDIR} ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
|
||||
|
||||
if [ "${PCBIOS}" = "1" ]; then
|
||||
syslinux_hddimg_install
|
||||
|
||||
@@ -340,7 +340,7 @@ buildhistory_get_installed() {
|
||||
cat $pkgcache | while read pkg pkgfile pkgarch
|
||||
do
|
||||
for vendor in ${TARGET_VENDOR} ${MULTILIB_VENDORS} ; do
|
||||
size=`oe-pkgdata-util read-value ${TMPDIR}/pkgdata $vendor-${TARGET_OS} "PKGSIZE" ${pkg}_${pkgarch}`
|
||||
size=`oe-pkgdata-util read-value ${PKGDATA_DIR} "PKGSIZE" ${pkg}_${pkgarch}`
|
||||
if [ "$size" != "" ] ; then
|
||||
echo "$size $pkg" >> $1/installed-package-sizes.tmp
|
||||
fi
|
||||
|
||||
@@ -31,7 +31,6 @@ STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${
|
||||
TOOLCHAIN_OPTIONS = " --sysroot=${STAGING_DIR}/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}"
|
||||
|
||||
PATH_append = ":${TMPDIR}/sysroots/${HOST_ARCH}/${bindir_cross}"
|
||||
PKGDATA_DIR = "${TMPDIR}/pkgdata/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}"
|
||||
PKGHIST_DIR = "${TMPDIR}/pkghistory/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/"
|
||||
|
||||
HOST_ARCH = "${SDK_ARCH}"
|
||||
@@ -89,10 +88,6 @@ FILES_${PN}-dbg += "${prefix}/.debug \
|
||||
export PKG_CONFIG_DIR = "${STAGING_DIR_HOST}${layout_libdir}/pkgconfig"
|
||||
export PKG_CONFIG_SYSROOT_DIR = "${STAGING_DIR_HOST}"
|
||||
|
||||
# Cross-canadian packages need to pull in nativesdk dynamic libs
|
||||
SHLIBSDIRS = "${TMPDIR}/pkgdata/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/shlibs/ ${TMPDIR}/pkgdata/all-${HOST_VENDOR}-${HOST_OS}/shlibs/"
|
||||
SHLIBSDIR = "${TMPDIR}/pkgdata/${HOST_ARCH}-${SDKPKGSUFFIX}${HOST_VENDOR}-${HOST_OS}/shlibs/"
|
||||
|
||||
do_populate_sysroot[stamp-extra-info] = ""
|
||||
|
||||
USE_NLS = "${SDKUSE_NLS}"
|
||||
|
||||
@@ -484,7 +484,7 @@ rootfs_install_complementary() {
|
||||
# Use the magic script to do all the work for us :)
|
||||
: > ${WORKDIR}/complementary_pkgs.txt
|
||||
for vendor in '${TARGET_VENDOR}' ${MULTILIB_VENDORS} ; do
|
||||
oe-pkgdata-util glob ${TMPDIR}/pkgdata $vendor-${TARGET_OS} ${WORKDIR}/installed_pkgs.txt "$GLOBS" >> ${WORKDIR}/complementary_pkgs.txt
|
||||
oe-pkgdata-util glob ${PKGDATA_DIR} ${WORKDIR}/installed_pkgs.txt "$GLOBS" >> ${WORKDIR}/complementary_pkgs.txt
|
||||
done
|
||||
|
||||
# Install the packages, if any
|
||||
|
||||
@@ -28,8 +28,7 @@ license_create_manifest() {
|
||||
fi
|
||||
touch ${LICENSE_MANIFEST}
|
||||
for pkg in ${INSTALLED_PKGS}; do
|
||||
# not the best way to do this but licenses are not arch dependant iirc
|
||||
filename=`ls ${TMPDIR}/pkgdata/*/runtime-reverse/${pkg}| head -1`
|
||||
filename=`ls ${PKGDATA_DIR}/runtime-reverse/${pkg}| head -1`
|
||||
pkged_pn="$(sed -n 's/^PN: //p' ${filename})"
|
||||
|
||||
# check to see if the package name exists in the manifest. if so, bail.
|
||||
|
||||
@@ -47,7 +47,6 @@ python multilib_virtclass_handler () {
|
||||
|
||||
e.data.setVar("MLPREFIX", variant + "-")
|
||||
e.data.setVar("PN", variant + "-" + e.data.getVar("PN", False))
|
||||
e.data.setVar("SHLIBSDIR_virtclass-multilib-" + variant ,e.data.getVar("SHLIBSDIR", False) + "/" + variant)
|
||||
e.data.setVar("OVERRIDES", e.data.getVar("OVERRIDES", False) + override)
|
||||
|
||||
# Expand the WHITELISTs with multilib prefix
|
||||
|
||||
@@ -1304,17 +1304,8 @@ python package_do_filedeps() {
|
||||
d.setVar("FILERPROVIDESFLIST_" + pkg, " ".join(provides_files[pkg]))
|
||||
}
|
||||
|
||||
def getshlibsdirs(d):
|
||||
dirs = []
|
||||
triplets = (d.getVar("PKGTRIPLETS") or "").split()
|
||||
for t in triplets:
|
||||
dirs.append("${TMPDIR}/pkgdata/" + t + "/shlibs/")
|
||||
return " ".join(dirs)
|
||||
getshlibsdirs[vardepsexclude] = "PKGTRIPLETS"
|
||||
|
||||
SHLIBSDIRS = "${@getshlibsdirs(d)}"
|
||||
SHLIBSDIR = "${TMPDIR}/pkgdata/${PACKAGE_ARCH}${TARGET_VENDOR}-${TARGET_OS}/shlibs"
|
||||
SHLIBSWORKDIR = "${PKGDESTWORK}/shlibs"
|
||||
SHLIBSDIRS = "${PKGDATA_DIR}/${MLPREFIX}shlibs"
|
||||
SHLIBSWORKDIR = "${PKGDESTWORK}/${MLPREFIX}shlibs"
|
||||
|
||||
python package_do_shlibs() {
|
||||
import re, pipes
|
||||
@@ -1953,6 +1944,7 @@ do_packagedata[sstate-name] = "packagedata"
|
||||
do_packagedata[sstate-inputdirs] = "${PKGDESTWORK}"
|
||||
do_packagedata[sstate-outputdirs] = "${PKGDATA_DIR}"
|
||||
do_packagedata[sstate-lockfile-shared] = "${PACKAGELOCK}"
|
||||
do_packagedata[stamp-extra-info] = "${MACHINE}"
|
||||
|
||||
python do_packagedata_setscene () {
|
||||
sstate_setscene(d)
|
||||
|
||||
@@ -109,7 +109,7 @@ translate_smart_to_oe() {
|
||||
fi
|
||||
# Workaround for bug 3565
|
||||
# Simply look to see if we know of a package with that name, if not try again!
|
||||
filename=`ls ${TMPDIR}/pkgdata/*/runtime-reverse/$new_pkg 2>/dev/null | head -n 1`
|
||||
filename=`ls ${PKGDATA_DIR}/runtime-reverse/$new_pkg 2>/dev/null | head -n 1`
|
||||
if [ -n "$filename" ] ; then
|
||||
found=1
|
||||
break
|
||||
|
||||
@@ -1,27 +1,20 @@
|
||||
python packageinfo_handler () {
|
||||
import oe.packagedata
|
||||
pkginfolist = []
|
||||
tmpdir = e.data.getVar('TMPDIR', True)
|
||||
target_vendor = e.data.getVar('TARGET_VENDOR', True)
|
||||
target_os = e.data.getVar('TARGET_OS', True)
|
||||
package_archs = e.data.getVar('PACKAGE_ARCHS', True)
|
||||
packaging = e.data.getVar('PACKAGE_CLASSES', True).split()[0].split('_')[1]
|
||||
deploy_dir = e.data.getVar('DEPLOY_DIR', True) + '/' + packaging
|
||||
|
||||
for arch in package_archs.split():
|
||||
pkgdata_dir = tmpdir + '/pkgdata/' + arch + target_vendor + '-' + target_os + '/runtime/'
|
||||
if os.path.exists(pkgdata_dir):
|
||||
for root, dirs, files in os.walk(pkgdata_dir):
|
||||
for pkgname in files:
|
||||
if pkgname.endswith('.packaged'):
|
||||
pkgname = pkgname[:-9]
|
||||
pkgdatafile = root + pkgname
|
||||
try:
|
||||
sdata = oe.packagedata.read_pkgdatafile(pkgdatafile)
|
||||
sdata['PKG'] = pkgname
|
||||
pkginfolist.append(sdata)
|
||||
except Exception as e:
|
||||
bb.warn("Failed to read pkgdata file %s: %s: %s" % (pkgdatafile, e.__class__, str(e)))
|
||||
|
||||
pkgdata_dir = e.data.getVar("PKGDATA_DIR", True) + '/runtime/'
|
||||
if os.path.exists(pkgdata_dir):
|
||||
for root, dirs, files in os.walk(pkgdata_dir):
|
||||
for pkgname in files:
|
||||
if pkgname.endswith('.packaged'):
|
||||
pkgname = pkgname[:-9]
|
||||
pkgdatafile = root + pkgname
|
||||
try:
|
||||
sdata = oe.packagedata.read_pkgdatafile(pkgdatafile)
|
||||
sdata['PKG'] = pkgname
|
||||
pkginfolist.append(sdata)
|
||||
except Exception as e:
|
||||
bb.warn("Failed to read pkgdata file %s: %s: %s" % (pkgdatafile, e.__class__, str(e)))
|
||||
bb.event.fire(bb.event.PackageInfo(pkginfolist), e.data)
|
||||
}
|
||||
|
||||
|
||||
@@ -41,7 +41,7 @@ populate_sdk_rpm () {
|
||||
export INSTALL_ROOTFS_RPM="${SDK_OUTPUT}/${SDKTARGETSYSROOT}"
|
||||
export INSTALL_PLATFORM_RPM="$(echo ${TARGET_ARCH} | tr - _)${TARGET_VENDOR}-${TARGET_OS}"
|
||||
export INSTALL_PACKAGES_RPM="${TOOLCHAIN_TARGET_TASK}"
|
||||
export INSTALL_PACKAGES_ATTEMPTONLY_RPM="$(echo '${TOOLCHAIN_TARGET_TASK_ATTEMPTONLY}' | tr -d ' ')"
|
||||
export INSTALL_PACKAGES_ATTEMPTONLY_RPM="$(echo '${TOOLCHAIN_TARGET_TASK_ATTEMPTONLY}' | sed 's/ *$//g')"
|
||||
export INSTALL_PACKAGES_LINGUAS_RPM=""
|
||||
# We don't need any of these runtime items for the SDK, so
|
||||
# just make the system assume they exist.
|
||||
@@ -98,7 +98,7 @@ populate_sdk_rpm () {
|
||||
export INSTALL_ROOTFS_RPM="${SDK_OUTPUT}"
|
||||
export INSTALL_PLATFORM_RPM="$(echo ${TARGET_ARCH} | tr - _)${SDK_VENDOR}-${SDK_OS}"
|
||||
export INSTALL_PACKAGES_RPM="${TOOLCHAIN_HOST_TASK}"
|
||||
export INSTALL_PACKAGES_ATTEMPTONLY_RPM="$(echo '${TOOLCHAIN_HOST_TASK_ATTEMPTONLY}' | tr -d ' ')"
|
||||
export INSTALL_PACKAGES_ATTEMPTONLY_RPM="$(echo '${TOOLCHAIN_HOST_TASK_ATTEMPTONLY}' | sed 's/ *$//g')"
|
||||
export INSTALL_PACKAGES_LINGUAS_RPM=""
|
||||
export INSTALL_PROVIDENAME_RPM="/bin/sh /bin/bash /usr/bin/env /usr/bin/perl pkgconfig libGL.so()(64bit) libGL.so"
|
||||
export INSTALL_TASK_RPM="populate_sdk_rpm-nativesdk"
|
||||
|
||||
@@ -566,7 +566,7 @@ sstate_create_package () {
|
||||
TFILE=`mktemp ${SSTATE_PKG}.XXXXXXXX`
|
||||
# Need to handle empty directories
|
||||
if [ "$(ls -A)" ]; then
|
||||
tar -czf $TFILE *
|
||||
tar --ignore-failed-read -czf $TFILE *
|
||||
else
|
||||
tar -cz --file=$TFILE --files-from=/dev/null
|
||||
fi
|
||||
|
||||
@@ -64,6 +64,11 @@ syslinux_hddimg_install() {
|
||||
syslinux ${DEPLOY_DIR_IMAGE}/${IMAGE_NAME}.hddimg
|
||||
}
|
||||
|
||||
syslinux_hdddirect_install() {
|
||||
DEST=$1
|
||||
syslinux $DEST
|
||||
}
|
||||
|
||||
python build_syslinux_cfg () {
|
||||
import copy
|
||||
import sys
|
||||
|
||||
@@ -122,6 +122,7 @@ def testimage_main(d):
|
||||
|
||||
qemu = QemuRunner(machine, rootfs)
|
||||
qemu.tmpdir = d.getVar("TMPDIR", True)
|
||||
qemu.deploy_dir_image = d.getVar("DEPLOY_DIR_IMAGE", True)
|
||||
qemu.display = d.getVar("BB_ORIGENV", False).getVar("DISPLAY", True)
|
||||
qemu.logfile = os.path.join(testdir, "qemu_boot_log.%s" % d.getVar('DATETIME', True))
|
||||
try:
|
||||
|
||||
@@ -293,11 +293,9 @@ END
|
||||
}
|
||||
|
||||
def check_app_exists(app, d):
|
||||
from bb import which, data
|
||||
|
||||
app = data.expand(app, d)
|
||||
path = data.getVar('PATH', d, 1)
|
||||
return bool(which(path, app))
|
||||
app = d.expand(app)
|
||||
path = d.getVar('PATH', d, True)
|
||||
return bool(bb.utils.which(path, app))
|
||||
|
||||
def explode_deps(s):
|
||||
return bb.utils.explode_deps(s)
|
||||
|
||||
@@ -253,7 +253,7 @@ DEPCHAIN_POST = "-dev -dbg"
|
||||
DEPENDS = ""
|
||||
RDEPENDS = ""
|
||||
PROVIDES = ""
|
||||
PROVIDES_prepend = "${P} ${PF} ${PN} "
|
||||
PROVIDES_prepend = "${PN} "
|
||||
RPROVIDES = ""
|
||||
|
||||
MULTI_PROVIDER_WHITELIST = "virtual/libintl virtual/libintl-native virtual/nativesdk-libintl virtual/xserver virtual/update-alternatives-native virtual/update-alternatives"
|
||||
@@ -379,10 +379,10 @@ DEPLOY_DIR_TAR = "${DEPLOY_DIR}/tar"
|
||||
DEPLOY_DIR_IPK = "${DEPLOY_DIR}/ipk"
|
||||
DEPLOY_DIR_RPM = "${DEPLOY_DIR}/rpm"
|
||||
DEPLOY_DIR_DEB = "${DEPLOY_DIR}/deb"
|
||||
DEPLOY_DIR_IMAGE ?= "${DEPLOY_DIR}/images"
|
||||
DEPLOY_DIR_IMAGE ?= "${DEPLOY_DIR}/images/${MACHINE}"
|
||||
DEPLOY_DIR_TOOLS = "${DEPLOY_DIR}/tools"
|
||||
|
||||
PKGDATA_DIR = "${TMPDIR}/pkgdata/${MULTIMACH_TARGET_SYS}"
|
||||
PKGDATA_DIR = "${STAGING_DIR_HOST}/pkgdata"
|
||||
|
||||
##################################################################
|
||||
# SDK variables.
|
||||
|
||||
@@ -9,6 +9,7 @@ PREFERRED_PROVIDER_virtual/egl ?= "mesa"
|
||||
PREFERRED_PROVIDER_virtual/libgl ?= "mesa"
|
||||
PREFERRED_PROVIDER_virtual/libgles1 ?= "mesa"
|
||||
PREFERRED_PROVIDER_virtual/libgles2 ?= "mesa"
|
||||
PREFERRED_PROVIDER_virtual/mesa ?= "mesa"
|
||||
PREFERRED_PROVIDER_virtual/update-alternatives ?= "opkg"
|
||||
PREFERRED_PROVIDER_virtual/update-alternatives-native ?= "opkg-native"
|
||||
PREFERRED_PROVIDER_virtual/libx11 ?= "libx11"
|
||||
|
||||
@@ -28,6 +28,7 @@ SECURITY_CFLAGS_pn-libgcc = "${SECURITY_NO_PIE_CFLAGS}"
|
||||
SECURITY_CFLAGS_pn-libglu = "${SECURITY_NO_PIE_CFLAGS}"
|
||||
SECURITY_CFLAGS_pn-libpcre = "${SECURITY_NO_PIE_CFLAGS}"
|
||||
SECURITY_CFLAGS_pn-mesa = "${SECURITY_NO_PIE_CFLAGS}"
|
||||
SECURITY_CFLAGS_pn-mesa-gl = "${SECURITY_NO_PIE_CFLAGS}"
|
||||
SECURITY_CFLAGS_pn-opensp = "${SECURITY_NO_PIE_CFLAGS}"
|
||||
SECURITY_CFLAGS_pn-ppp = "${SECURITY_NO_PIE_CFLAGS}"
|
||||
SECURITY_CFLAGS_pn-python = "${SECURITY_NO_PIE_CFLAGS}"
|
||||
|
||||
@@ -34,9 +34,8 @@ B_pn-autoconf-native = "${SEPB}"
|
||||
B_pn-autogen-native = "${SEPB}"
|
||||
B_pn-automake = "${SEPB}"
|
||||
B_pn-automake-native = "${SEPB}"
|
||||
#configure: error: *** xmltoman was not found or was disabled, it is required to build the manpages as they have not been pre-built, install xmltoman, pass --disable-manpages or dont pass --disable-xmltoman
|
||||
#B_pn-avahi = "${SEPB}"
|
||||
#B_pn-avahi-ui = "${SEPB}"
|
||||
B_pn-avahi = "${SEPB}"
|
||||
B_pn-avahi-ui = "${SEPB}"
|
||||
B_pn-babeltrace = "${SEPB}"
|
||||
B_pn-base-passwd = "${SEPB}"
|
||||
#B_pn-bash = "${SEPB}"
|
||||
@@ -442,6 +441,7 @@ B_pn-menu-cache = "${SEPB}"
|
||||
B_pn-mesa-demos = "${SEPB}"
|
||||
# src/mapi/mapi/stub.c:51:39: error: 'MAPI_TABLE_NUM_DYNAMIC' undeclared here (not in a function)
|
||||
B_pn-mesa = "${SEPB}"
|
||||
B_pn-mesa-gl = "${SEPB}"
|
||||
B_pn-minicom = "${SEPB}"
|
||||
# Not automake, no external tree support
|
||||
#B_pn-mkelfimage = "${SEPB}"
|
||||
|
||||
@@ -9,7 +9,7 @@ BBFILE_PRIORITY_core = "5"
|
||||
|
||||
# This should only be incremented on significant changes that will
|
||||
# cause compatibility issues with other layers
|
||||
LAYERVERSION_core = "2"
|
||||
LAYERVERSION_core = "3"
|
||||
|
||||
# Set a variable to get to the top of the metadata location
|
||||
COREBASE = '${@os.path.normpath("${LAYERDIR}/../")}'
|
||||
|
||||
@@ -8,6 +8,7 @@ MULTILIBS ??= "multilib:lib32"
|
||||
|
||||
STAGING_DIR_HOST = "${STAGING_DIR}/${MLPREFIX}${MACHINE}"
|
||||
STAGING_DIR_TARGET = "${STAGING_DIR}/${MLPREFIX}${MACHINE}"
|
||||
PKGDATA_DIR = "${STAGING_DIR}/${MACHINE}/pkgdata"
|
||||
|
||||
INHERIT += "multilib_global"
|
||||
|
||||
|
||||
@@ -23,21 +23,7 @@ def read_pkgdatafile(fn):
|
||||
|
||||
return pkgdata
|
||||
|
||||
def all_pkgdatadirs(d):
|
||||
dirs = []
|
||||
triplets = (d.getVar("PKGMLTRIPLETS") or "").split()
|
||||
for t in triplets:
|
||||
dirs.append(t + "/runtime/")
|
||||
return dirs
|
||||
|
||||
def get_subpkgedata_fn(pkg, d):
|
||||
dirs = all_pkgdatadirs(d)
|
||||
|
||||
pkgdata = d.expand('${TMPDIR}/pkgdata/')
|
||||
for dir in dirs:
|
||||
fn = pkgdata + dir + pkg
|
||||
if os.path.exists(fn):
|
||||
return fn
|
||||
return d.expand('${PKGDATA_DIR}/runtime/%s' % pkg)
|
||||
|
||||
def has_subpkgdata(pkg, d):
|
||||
@@ -70,29 +56,24 @@ def read_subpkgdata_dict(pkg, d):
|
||||
def _pkgmap(d):
|
||||
"""Return a dictionary mapping package to recipe name."""
|
||||
|
||||
target_os = d.getVar("TARGET_OS", True)
|
||||
target_vendor = d.getVar("TARGET_VENDOR", True)
|
||||
basedir = os.path.dirname(d.getVar("PKGDATA_DIR", True))
|
||||
|
||||
dirs = ("%s%s-%s" % (arch, target_vendor, target_os)
|
||||
for arch in d.getVar("PACKAGE_ARCHS", True).split())
|
||||
pkgdatadir = d.getVar("PKGDATA_DIR", True)
|
||||
|
||||
pkgmap = {}
|
||||
for pkgdatadir in (os.path.join(basedir, sys) for sys in dirs):
|
||||
try:
|
||||
files = os.listdir(pkgdatadir)
|
||||
except OSError:
|
||||
bb.warn("No files in %s?" % pkgdatadir)
|
||||
files = []
|
||||
|
||||
for pn in filter(lambda f: not os.path.isdir(os.path.join(pkgdatadir, f)), files):
|
||||
try:
|
||||
files = os.listdir(pkgdatadir)
|
||||
pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn))
|
||||
except OSError:
|
||||
continue
|
||||
|
||||
for pn in filter(lambda f: not os.path.isdir(os.path.join(pkgdatadir, f)), files):
|
||||
try:
|
||||
pkgdata = read_pkgdatafile(os.path.join(pkgdatadir, pn))
|
||||
except OSError:
|
||||
continue
|
||||
|
||||
packages = pkgdata.get("PACKAGES") or ""
|
||||
for pkg in packages.split():
|
||||
pkgmap[pkg] = pn
|
||||
packages = pkgdata.get("PACKAGES") or ""
|
||||
for pkg in packages.split():
|
||||
pkgmap[pkg] = pn
|
||||
|
||||
return pkgmap
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ class PingTest(oeRuntimeTest):
|
||||
def test_ping(self):
|
||||
output = ''
|
||||
status = None
|
||||
endtime = time.time() + 30
|
||||
endtime = time.time() + 60
|
||||
while status != 0 and time.time() < endtime:
|
||||
proc = subprocess.Popen("ping -c 1 %s" % oeRuntimeTest.tc.qemu.ip, shell=True, stdout=subprocess.PIPE)
|
||||
output += proc.communicate()[0]
|
||||
|
||||
@@ -15,7 +15,7 @@ class SmartTest(oeRuntimeTest):
|
||||
@skipUnlessPassed('test_smart_help')
|
||||
def smart(self, command, expected = 0):
|
||||
command = 'smart %s' % command
|
||||
status, output = self.target.run(command, 500)
|
||||
status, output = self.target.run(command, 1500)
|
||||
message = os.linesep.join([command, output])
|
||||
self.assertEqual(status, expected, message)
|
||||
self.assertFalse("Cannot allocate memory" in output, message)
|
||||
|
||||
@@ -16,7 +16,7 @@ import bb
|
||||
|
||||
class QemuRunner:
|
||||
|
||||
def __init__(self, machine, rootfs, display = None, tmpdir = None, logfile = None, boottime = 400, runqemutime = 60):
|
||||
def __init__(self, machine, rootfs, display = None, tmpdir = None, deploy_dir_image = None, logfile = None, boottime = 400, runqemutime = 60):
|
||||
# Popen object
|
||||
self.runqemu = None
|
||||
|
||||
@@ -28,6 +28,7 @@ class QemuRunner:
|
||||
|
||||
self.display = display
|
||||
self.tmpdir = tmpdir
|
||||
self.deploy_dir_image = deploy_dir_image
|
||||
self.logfile = logfile
|
||||
self.boottime = boottime
|
||||
self.runqemutime = runqemutime
|
||||
@@ -71,6 +72,11 @@ class QemuRunner:
|
||||
return False
|
||||
else:
|
||||
os.environ["OE_TMPDIR"] = self.tmpdir
|
||||
if not os.path.exists(self.deploy_dir_image):
|
||||
bb.error("Invalid DEPLOY_DIR_IMAGE path %s" % self.deploy_dir_image)
|
||||
return False
|
||||
else:
|
||||
os.environ["DEPLOY_DIR_IMAGE"] = self.deploy_dir_image
|
||||
|
||||
self.qemuparams = 'bootparams="console=tty1 console=ttyS0,115200n8" qemuparams="-serial tcp:127.0.0.1:%s"' % self.serverport
|
||||
if qemuparams:
|
||||
|
||||
7
meta/recipes-bsp/apmd/apmd-3.2.2-14/apmd.service
Normal file
@@ -0,0 +1,7 @@
|
||||
[Unit]
|
||||
Description=Advanced Power Management daemon
|
||||
After=remote-fs.target
|
||||
|
||||
[Service]
|
||||
EnvironmentFile-= @SYSCONFDIR@/default/apmd
|
||||
ExecStart=@SBINDIR@/apmd -P @SYSCONFDIR@/apm/apmd_proxy $APMD
|
||||
@@ -17,7 +17,8 @@ SRC_URI = "${DEBIAN_MIRROR}/main/a/apmd/apmd_3.2.2.orig.tar.gz;name=tarball \
|
||||
file://init \
|
||||
file://default \
|
||||
file://apmd_proxy \
|
||||
file://apmd_proxy.conf"
|
||||
file://apmd_proxy.conf \
|
||||
file://apmd.service"
|
||||
|
||||
SRC_URI[tarball.md5sum] = "b1e6309e8331e0f4e6efd311c2d97fa8"
|
||||
SRC_URI[tarball.sha256sum] = "7f7d9f60b7766b852881d40b8ff91d8e39fccb0d1d913102a5c75a2dbb52332d"
|
||||
@@ -27,11 +28,14 @@ SRC_URI[patch.sha256sum] = "7905ff96be93d725544d0040e425c42f9c05580db3c272f11cff
|
||||
|
||||
S = "${WORKDIR}/apmd-3.2.2.orig"
|
||||
|
||||
inherit update-rc.d
|
||||
inherit update-rc.d systemd
|
||||
|
||||
INITSCRIPT_NAME = "apmd"
|
||||
INITSCRIPT_PARAMS = "defaults"
|
||||
|
||||
SYSTEMD_SERVICE_${PN} = "apmd.service"
|
||||
SYSTEMD_AUTO_ENABLE = "disable"
|
||||
|
||||
do_compile() {
|
||||
# apmd doesn't use whole autotools. Just libtool for installation
|
||||
oe_runmake "LIBTOOL=${STAGING_BINDIR_CROSS}/${HOST_SYS}-libtool" apm apmd
|
||||
@@ -63,6 +67,11 @@ do_install() {
|
||||
|
||||
cat ${WORKDIR}/init | sed -e 's,/usr/sbin,${sbindir},g; s,/etc,${sysconfdir},g;' > ${D}${sysconfdir}/init.d/apmd
|
||||
chmod 755 ${D}${sysconfdir}/init.d/apmd
|
||||
|
||||
install -d ${D}${systemd_unitdir}/system
|
||||
install -m 0644 ${WORKDIR}/apmd.service ${D}${systemd_unitdir}/system/
|
||||
sed -i -e 's,@SYSCONFDIR@,${sysconfdir},g' \
|
||||
-e 's,@SBINDIR@,${sbindir},g' ${D}${systemd_unitdir}/system/apmd.service
|
||||
}
|
||||
|
||||
PACKAGES =+ "libapm libapm-dev libapm-staticdev apm"
|
||||
|
||||
31
meta/recipes-bsp/u-boot/u-boot-fw-utils_2013.07.bb
Normal file
@@ -0,0 +1,31 @@
|
||||
DESCRIPTION = "U-boot bootloader fw_printenv/setenv utils"
|
||||
LICENSE = "GPLv2+"
|
||||
LIC_FILES_CHKSUM = "file://COPYING;md5=1707d6db1d42237583f50183a5651ecb"
|
||||
SECTION = "bootloader"
|
||||
DEPENDS = "mtd-utils"
|
||||
|
||||
# This revision corresponds to the tag "v2013.07"
|
||||
# We use the revision in order to avoid having to fetch it from the
|
||||
# repo during parse
|
||||
SRCREV = "62c175fbb8a0f9a926c88294ea9f7e88eb898f6c"
|
||||
|
||||
PV = "v2013.07+git${SRCPV}"
|
||||
|
||||
SRC_URI = "git://git.denx.de/u-boot.git;branch=master;protocol=git"
|
||||
|
||||
S = "${WORKDIR}/git"
|
||||
|
||||
EXTRA_OEMAKE = 'HOSTCC="${CC}" HOSTSTRIP="true"'
|
||||
|
||||
do_compile () {
|
||||
oe_runmake ${UBOOT_MACHINE}
|
||||
oe_runmake env
|
||||
}
|
||||
|
||||
do_install () {
|
||||
install -d ${D}${base_sbindir}
|
||||
install -m 755 ${S}/tools/env/fw_printenv ${D}${base_sbindir}/fw_printenv
|
||||
install -m 755 ${S}/tools/env/fw_printenv ${D}${base_sbindir}/fw_setenv
|
||||
}
|
||||
|
||||
PACKAGE_ARCH = "${MACHINE_ARCH}"
|
||||
@@ -24,6 +24,7 @@ SRC_URI = "http://avahi.org/download/avahi-${PV}.tar.gz \
|
||||
file://initscript.patch \
|
||||
file://avahi_fix_install_issue.patch \
|
||||
file://fix_for_automake_1.12.x.patch \
|
||||
file://out-of-tree.patch \
|
||||
"
|
||||
|
||||
USERADD_PACKAGES = "avahi-daemon avahi-autoipd"
|
||||
|
||||
32
meta/recipes-connectivity/avahi/files/out-of-tree.patch
Normal file
@@ -0,0 +1,32 @@
|
||||
Upstream-Status: Pending
|
||||
Signed-off-by: Ross Burton <ross.burton@intel.com>
|
||||
|
||||
From a62dc95d75691ea4aefa86d8bbe54c62afd78ff6 Mon Sep 17 00:00:00 2001
|
||||
From: Ross Burton <ross.burton@intel.com>
|
||||
Date: Tue, 17 Sep 2013 12:27:36 +0100
|
||||
Subject: [PATCH] build-sys: fix out-of-tree builds without xmltoman
|
||||
|
||||
If manpages are enabled but xmltoman isn't present, out-of-tree builds fail
|
||||
because it checks inside the build directory for the pre-generated manpages.
|
||||
|
||||
Fix this by using $srcdir when looking for files inside the source directory.
|
||||
---
|
||||
configure.ac | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/configure.ac b/configure.ac
|
||||
index 9debce2..047c7ae 100644
|
||||
--- a/configure.ac
|
||||
+++ b/configure.ac
|
||||
@@ -1021,7 +1021,7 @@ if test x$manpages = xyes ; then
|
||||
fi
|
||||
|
||||
if test x$have_xmltoman = xno -o x$xmltoman = xno; then
|
||||
- if ! test -e man/avahi-daemon.8 ; then
|
||||
+ if ! test -e $srcdir/man/avahi-daemon.8 ; then
|
||||
AC_MSG_ERROR([*** xmltoman was not found or was disabled, it is required to build the manpages as they have not been pre-built, install xmltoman, pass --disable-manpages or dont pass --disable-xmltoman])
|
||||
exit 1
|
||||
fi
|
||||
--
|
||||
1.7.10.4
|
||||
|
||||
@@ -30,16 +30,23 @@ do_start() {
|
||||
EXTRA_PARAM=""
|
||||
if test $nfsroot -eq 1 ; then
|
||||
NET_DEVS=`cat /proc/net/dev | sed -ne 's/^\([a-zA-Z0-9 ]*\):.*$/\1/p'`
|
||||
NET_ADDR=`cat /proc/cmdline | sed -ne 's/^.*ip=\([^ ]*\):.*$/\1/p'`
|
||||
NET_ADDR=`cat /proc/cmdline | sed -ne 's/^.*ip=\([^ :]*\).*$/\1/p'`
|
||||
|
||||
if [ x$NET_ADDR != x ]; then
|
||||
for i in $NET_DEVS; do
|
||||
ADDR=`ifconfig $i | sed 's/addr://g' | sed -ne 's/^.*inet \([0-9.]*\) .*$/\1/p'`
|
||||
if [ "$NET_ADDR" = "$ADDR" ]; then
|
||||
EXTRA_PARAM="-I $i"
|
||||
break
|
||||
if [ ! -z "$NET_ADDR" ]; then
|
||||
if [ "$NET_ADDR" = dhcp ]; then
|
||||
ethn=`ifconfig | grep "^eth" | sed -e "s/\(eth[0-9]\)\(.*\)/\1/"`
|
||||
if [ ! -z "$ethn" ]; then
|
||||
EXTRA_PARAM="-I $ethn"
|
||||
fi
|
||||
done
|
||||
else
|
||||
for i in $NET_DEVS; do
|
||||
ADDR=`ifconfig $i | sed 's/addr://g' | sed -ne 's/^.*inet \([0-9.]*\) .*$/\1/p'`
|
||||
if [ "$NET_ADDR" = "$ADDR" ]; then
|
||||
EXTRA_PARAM="-I $i"
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
if [ -f @LIBDIR@/connman/wired-setup ] ; then
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
d root root 0755 /var/run/sshd none
|
||||
@@ -29,7 +29,8 @@ SRC_URI = "ftp://ftp.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-${PV}.tar.
|
||||
${@base_contains('DISTRO_FEATURES', 'pam', '${PAM_SRC_URI}', '', d)} \
|
||||
file://sshd.socket \
|
||||
file://sshd@.service \
|
||||
file://sshdgenkeys.service "
|
||||
file://sshdgenkeys.service \
|
||||
file://volatiles.99_sshd "
|
||||
|
||||
PAM_SRC_URI = "file://sshd"
|
||||
|
||||
@@ -92,6 +93,9 @@ do_install_append () {
|
||||
install -m 0755 ${WORKDIR}/init ${D}${sysconfdir}/init.d/sshd
|
||||
rm -f ${D}${bindir}/slogin ${D}${datadir}/Ssh.bin
|
||||
rmdir ${D}${localstatedir}/run/sshd ${D}${localstatedir}/run ${D}${localstatedir}
|
||||
install -d ${D}/${sysconfdir}/default/volatiles
|
||||
install -m 644 ${WORKDIR}/volatiles.99_sshd ${D}/${sysconfdir}/default/volatiles/99_sshd
|
||||
|
||||
# Create config files for read-only rootfs
|
||||
install -d ${D}${sysconfdir}/ssh
|
||||
install -m 644 ${WORKDIR}/sshd_config ${D}${sysconfdir}/ssh/sshd_config_readonly
|
||||
@@ -116,7 +120,7 @@ PACKAGES =+ "${PN}-keygen ${PN}-scp ${PN}-ssh ${PN}-sshd ${PN}-sftp ${PN}-misc $
|
||||
FILES_${PN}-scp = "${bindir}/scp.${BPN}"
|
||||
FILES_${PN}-ssh = "${bindir}/ssh.${BPN} ${sysconfdir}/ssh/ssh_config"
|
||||
FILES_${PN}-sshd = "${sbindir}/sshd ${sysconfdir}/init.d/sshd"
|
||||
FILES_${PN}-sshd += "${sysconfdir}/ssh/moduli ${sysconfdir}/ssh/sshd_config ${sysconfdir}/ssh/sshd_config_readonly"
|
||||
FILES_${PN}-sshd += "${sysconfdir}/ssh/moduli ${sysconfdir}/ssh/sshd_config ${sysconfdir}/ssh/sshd_config_readonly ${sysconfdir}/default/volatiles/99_sshd"
|
||||
FILES_${PN}-sftp = "${bindir}/sftp"
|
||||
FILES_${PN}-sftp-server = "${libexecdir}/sftp-server"
|
||||
FILES_${PN}-misc = "${bindir}/ssh* ${libexecdir}/ssh*"
|
||||
|
||||
@@ -1,26 +0,0 @@
|
||||
busybox: move /bin/su to /usr/bin/su to match util-linux and shadow
|
||||
|
||||
Both of util-linux and shadow have su binary in /usr/bin, fix busybox
|
||||
to use the same path so they can be properly tracked by alternatives.
|
||||
|
||||
Upstream-Status: Inappropriate [embedded]
|
||||
Signed-off-by: Hongxu Jia <hongxu.jia@windriver.com>
|
||||
---
|
||||
include/applets.src.h | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/include/applets.src.h b/include/applets.src.h
|
||||
--- a/include/applets.src.h
|
||||
+++ b/include/applets.src.h
|
||||
@@ -349,7 +349,7 @@ IF_STAT(APPLET(stat, BB_DIR_USR_BIN, BB_SUID_DROP))
|
||||
IF_STRINGS(APPLET(strings, BB_DIR_USR_BIN, BB_SUID_DROP))
|
||||
IF_STTY(APPLET(stty, BB_DIR_BIN, BB_SUID_DROP))
|
||||
/* Needs to be run by root or be suid root - needs to change uid and gid: */
|
||||
-IF_SU(APPLET(su, BB_DIR_BIN, BB_SUID_REQUIRE))
|
||||
+IF_SU(APPLET(su, BB_DIR_USR_BIN, BB_SUID_REQUIRE))
|
||||
IF_SULOGIN(APPLET(sulogin, BB_DIR_SBIN, BB_SUID_DROP))
|
||||
IF_SUM(APPLET(sum, BB_DIR_USR_BIN, BB_SUID_DROP))
|
||||
IF_SV(APPLET(sv, BB_DIR_USR_BIN, BB_SUID_DROP))
|
||||
--
|
||||
1.8.1.2
|
||||
|
||||
@@ -24,7 +24,6 @@ SRC_URI = "http://www.busybox.net/downloads/busybox-${PV}.tar.bz2;name=tarball \
|
||||
file://umount.busybox \
|
||||
file://defconfig \
|
||||
file://stat-usr-bin.patch \
|
||||
file://su-usr-bin.patch \
|
||||
file://busybox-syslog.service.in \
|
||||
file://busybox-klogd.service.in \
|
||||
file://testsuite-du-du-k-works-fix-false-positive.patch \
|
||||
|
||||
@@ -57,6 +57,12 @@ base_bindir_progs = "cat chgrp chmod chown cp date dd echo false kill ln ls mkdi
|
||||
|
||||
sbindir_progs= "chroot"
|
||||
|
||||
# Let aclocal use the relative path for the m4 file rather than the
|
||||
# absolute since coreutils has a lot of m4 files, otherwise there might
|
||||
# be an "Argument list too long" error when it is built in a long/deep
|
||||
# directory.
|
||||
acpaths = "-I ./m4"
|
||||
|
||||
do_install() {
|
||||
autotools_do_install
|
||||
|
||||
|
||||
@@ -47,6 +47,12 @@ base_bindir_progs = "cat chgrp chmod chown cp date dd echo false kill ln ls mkdi
|
||||
|
||||
sbindir_progs= "chroot"
|
||||
|
||||
# Let aclocal use the relative path for the m4 file rather than the
|
||||
# absolute since coreutils has a lot of m4 files, otherwise there might
|
||||
# be an "Argument list too long" error when it is built in a long/deep
|
||||
# directory.
|
||||
acpaths = "-I ./m4"
|
||||
|
||||
# Deal with a separate builddir failure if src doesn't exist when creating version.c/version.h
|
||||
do_compile_prepend () {
|
||||
mkdir -p ${B}/src
|
||||
|
||||
@@ -0,0 +1,43 @@
|
||||
Upstream-Status: Backport
|
||||
|
||||
From d0721e703d222c01a9e8c329311c4fb01dac6972 Mon Sep 17 00:00:00 2001
|
||||
From: Carlos O'Donell <carlos@redhat.com>
|
||||
Date: Thu, 29 Aug 2013 00:17:33 -0400
|
||||
Subject: [PATCH] ARM: Pass dl_hwcap to IFUNC resolver.
|
||||
|
||||
For REL relocs pass dl_hwcap to the IFUNC resolver
|
||||
as is required by the IFUNC API (bug 15905).
|
||||
---
|
||||
NEWS | 2 +-
|
||||
ports/ChangeLog.arm | 7 +++++++
|
||||
ports/sysdeps/arm/dl-machine.h | 2 +-
|
||||
3 files changed, 9 insertions(+), 2 deletions(-)
|
||||
|
||||
Index: libc/ports/ChangeLog.arm
|
||||
===================================================================
|
||||
--- libc.orig/ports/ChangeLog.arm 2013-07-14 16:08:52.000000000 -0700
|
||||
+++ libc/ports/ChangeLog.arm 2013-09-12 17:17:24.174387150 -0700
|
||||
@@ -1,3 +1,10 @@
|
||||
+2013-08-28 Kyle McMartin <kyle@redhat.com>
|
||||
+ Carlos O'Donell <carlos@redhat.com>
|
||||
+
|
||||
+ [BZ #15905]
|
||||
+ * sysdeps/arm/dl-machine [!RTLD_BOOTSTRAP] (elf_machine_rel):
|
||||
+ Pass GLRO(dl_hwcap) to the IFUNC resolver.
|
||||
+
|
||||
2013-07-03 Joseph Myers <joseph@codesourcery.com>
|
||||
|
||||
* sysdeps/arm/include/bits/setjmp.h [_ISOMAC] (JMP_BUF_REGLIST):
|
||||
Index: libc/ports/sysdeps/arm/dl-machine.h
|
||||
===================================================================
|
||||
--- libc.orig/ports/sysdeps/arm/dl-machine.h 2013-09-12 08:56:03.641743520 -0700
|
||||
+++ libc/ports/sysdeps/arm/dl-machine.h 2013-09-12 17:17:24.174387150 -0700
|
||||
@@ -503,7 +503,7 @@
|
||||
break;
|
||||
case R_ARM_IRELATIVE:
|
||||
value = map->l_addr + *reloc_addr;
|
||||
- value = ((Elf32_Addr (*) (void)) value) ();
|
||||
+ value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap));
|
||||
*reloc_addr = value;
|
||||
break;
|
||||
#endif
|
||||
@@ -26,6 +26,7 @@ SRC_URI = "http://downloads.yoctoproject.org/releases/eglibc/eglibc-${PV}-svnr23
|
||||
file://tzselect-awk.patch \
|
||||
file://0001-eglibc-run-libm-err-tab.pl-with-specific-dirs-in-S.patch \
|
||||
file://fix-tibetian-locales.patch \
|
||||
file://0001-ARM-Pass-dl_hwcap-to-IFUNC-resolver.patch \
|
||||
"
|
||||
SRC_URI[md5sum] = "b395b021422a027d89884992e91734fc"
|
||||
SRC_URI[sha256sum] = "15f564b45dc5dd65faf0875579e3447961ae61e876933384ae05d19328539ad4"
|
||||
|
||||
@@ -11,7 +11,7 @@ ISOLINUX=""
|
||||
# Copied from initramfs-framework. The core of this script probably should be
|
||||
# turned into initramfs-framework modules to reduce duplication.
|
||||
udev_daemon() {
|
||||
OPTIONS="/sbin/udev/udevd /sbin/udevd /lib/udev/udevd /sbin/systemd/systemd-udevd /lib/systemd/systemd-udevd"
|
||||
OPTIONS="/sbin/udev/udevd /sbin/udevd /lib/udev/udevd /lib/systemd/systemd-udevd"
|
||||
|
||||
for o in $OPTIONS; do
|
||||
if [ -x "$o" ]; then
|
||||
|
||||
@@ -11,7 +11,7 @@ udev_shutdown_hook_handler() {
|
||||
}
|
||||
|
||||
udev_daemon() {
|
||||
OPTIONS="/sbin/udev/udevd /sbin/udevd /lib/udev/udevd /sbin/systemd/systemd-udevd /lib/systemd/systemd-udevd"
|
||||
OPTIONS="/sbin/udev/udevd /sbin/udevd /lib/udev/udevd /lib/systemd/systemd-udevd"
|
||||
|
||||
for o in $OPTIONS; do
|
||||
if [ -x "$o" ]; then
|
||||
|
||||
@@ -0,0 +1,28 @@
|
||||
Upstream-status: Accepted
|
||||
This patch had accepted at kdb-1.5.4
|
||||
|
||||
From e069cfa96b41a6714118e15496cdd6b83fc16ad2 Mon Sep 17 00:00:00 2001
|
||||
From: Alexey Gladkov <gladkov.alexey@gmail.com>
|
||||
Date: Mon, 27 Feb 2012 14:30:50 +0400
|
||||
Subject: [PATCH] Allow resizecons on x86_64
|
||||
|
||||
Signed-off-by: Alexey Gladkov <gladkov.alexey@gmail.com>
|
||||
---
|
||||
configure.ac | 1 +
|
||||
1 files changed, 1 insertions(+), 0 deletions(-)
|
||||
|
||||
diff --git a/configure.ac b/configure.ac
|
||||
index a8115ed..b58f3bd 100644
|
||||
--- a/configure.ac
|
||||
+++ b/configure.ac
|
||||
@@ -62,6 +62,7 @@ AM_CONDITIONAL(KEYCODES_PROGS, test "$KEYCODES_PROGS" = "yes")
|
||||
|
||||
case $host_cpu in
|
||||
i?86*) RESIZECONS_PROGS=yes ;;
|
||||
+ x86_64*) RESIZECONS_PROGS=yes ;;
|
||||
*) RESIZECONS_PROGS=no ;;
|
||||
esac
|
||||
AM_CONDITIONAL(RESIZECONS_PROGS, test "$RESIZECONS_PROGS" = "yes")
|
||||
--
|
||||
1.7.1
|
||||
|
||||
@@ -12,7 +12,9 @@ RCONFLICTS_${PN} = "console-tools"
|
||||
|
||||
PR = "r4"
|
||||
|
||||
SRC_URI="${KERNELORG_MIRROR}/linux/utils/kbd/kbd-1.15.2.tar.bz2"
|
||||
SRC_URI = "${KERNELORG_MIRROR}/linux/utils/kbd/kbd-1.15.2.tar.bz2 \
|
||||
file://Allow-resizecons-on-x86_64.patch"
|
||||
|
||||
SRC_URI[md5sum] = "e850eb91e4d3b94b194efe8e953204c5"
|
||||
SRC_URI[sha256sum] = "b3602d191eef7a6a8317fc3cd231efa40a89ac235dce57a77cac825a2a21eba6"
|
||||
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
libxml2 CVE-2012-2871
|
||||
|
||||
the patch come from:
|
||||
http://src.chromium.org/viewvc/chrome/trunk/src/third_party/libxml/src \
|
||||
/include/libxml/tree.h?r1=56276&r2=149930
|
||||
|
||||
libxml2 2.9.0-rc1 and earlier, as used in Google Chrome before 21.0.1180.89,
|
||||
does not properly support a cast of an unspecified variable during handling
|
||||
of XSL transforms, which allows remote attackers to cause a denial of service
|
||||
or possibly have unknown other impact via a crafted document, related to the
|
||||
_xmlNs data structure in include/libxml/tree.h.
|
||||
|
||||
http://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2012-2871
|
||||
|
||||
Signed-off-by: Li Wang <li.wang@windriver.com>
|
||||
---
|
||||
include/libxml/tree.h | 1 +
|
||||
1 files changed, 1 insertions(+), 0 deletions(-)
|
||||
|
||||
diff --git a/include/libxml/tree.h b/include/libxml/tree.h
|
||||
index b733589..5422dda 100644
|
||||
--- a/include/libxml/tree.h
|
||||
+++ b/include/libxml/tree.h
|
||||
@@ -351,6 +351,7 @@ struct _xmlNs {
|
||||
struct _xmlNs *next; /* next Ns link for this node */
|
||||
xmlNsType type; /* global or local */
|
||||
const xmlChar *href; /* URL for the namespace */
|
||||
+ const char *dummy_children; /* lines up with node->children */
|
||||
const xmlChar *prefix; /* prefix for the namespace */
|
||||
void *_private; /* application data */
|
||||
struct _xmlDoc *context; /* normally an xmlDoc */
|
||||
--
|
||||
1.7.0.5
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
require libxml2.inc
|
||||
|
||||
SRC_URI += "file://libxml2-CVE-2012-2871.patch \
|
||||
http://www.w3.org/XML/Test/xmlts20080827.tar.gz;name=testtar \
|
||||
"
|
||||
SRC_URI += "http://www.w3.org/XML/Test/xmlts20080827.tar.gz;name=testtar"
|
||||
|
||||
SRC_URI[libtar.md5sum] = "9c0cfef285d5c4a5c80d00904ddab380"
|
||||
SRC_URI[libtar.sha256sum] = "fd3c64cb66f2c4ea27e934d275904d92cec494a8e8405613780cbc8a71680fdb"
|
||||
|
||||
2
meta/recipes-core/systemd/systemd/run-ptest
Normal file
@@ -0,0 +1,2 @@
|
||||
make test/rules-test.sh.log
|
||||
make test/udev-test.pl.log
|
||||
@@ -15,7 +15,7 @@ DEPENDS += "${@base_contains('DISTRO_FEATURES', 'pam', 'libpam', '', d)}"
|
||||
|
||||
SECTION = "base/shell"
|
||||
|
||||
inherit gtk-doc useradd pkgconfig autotools perlnative update-rc.d update-alternatives qemu systemd
|
||||
inherit gtk-doc useradd pkgconfig autotools perlnative update-rc.d update-alternatives qemu systemd ptest
|
||||
|
||||
SRC_URI = "http://www.freedesktop.org/software/systemd/systemd-${PV}.tar.xz \
|
||||
file://0001-use-CAP_MKNOD-ConditionCapability.patch \
|
||||
@@ -25,6 +25,7 @@ SRC_URI = "http://www.freedesktop.org/software/systemd/systemd-${PV}.tar.xz \
|
||||
${UCLIBCPATCHES} \
|
||||
file://00-create-volatile.conf \
|
||||
file://init \
|
||||
file://run-ptest \
|
||||
"
|
||||
SRC_URI[md5sum] = "89e36f2d3ba963020b72738549954cbc"
|
||||
SRC_URI[sha256sum] = "4c993de071118ea1df7ffc4be26ef0b0d78354ef15b2743a2783d20edfcde9de"
|
||||
@@ -114,6 +115,19 @@ do_install() {
|
||||
fi
|
||||
}
|
||||
|
||||
do_install_ptest () {
|
||||
install -d ${D}${PTEST_PATH}/test
|
||||
install -d ${D}${libdir}/udev/rules.d
|
||||
install ${B}/test/* ${D}${PTEST_PATH}/test
|
||||
install -m 0755 ${B}/test-udev ${D}${PTEST_PATH}/
|
||||
install -d ${D}${PTEST_PATH}/build-aux
|
||||
cp -rf ${B}/rules ${D}${PTEST_PATH}/
|
||||
cp ${B}/Makefile ${D}${PTEST_PATH}/
|
||||
cp ${B}/build-aux/test-driver ${D}${PTEST_PATH}/build-aux/
|
||||
tar -C ${D}${PTEST_PATH}/test -xJf ${B}/test/sys.tar.xz
|
||||
sed -i 's/"tree"/"ls"/' ${D}${PTEST_PATH}/test/udev-test.pl
|
||||
}
|
||||
|
||||
python populate_packages_prepend (){
|
||||
systemdlibdir = d.getVar("rootlibdir", True)
|
||||
do_split_packages(d, systemdlibdir, '^lib(.*)\.so\.*', 'lib%s', 'Systemd %s library', extra_depends='', allow_links=True)
|
||||
@@ -262,7 +276,7 @@ python __anonymous() {
|
||||
# TODO:
|
||||
# u-a for runlevel and telinit
|
||||
|
||||
ALTERNATIVE_${PN} = "init halt reboot shutdown poweroff"
|
||||
ALTERNATIVE_${PN} = "init halt reboot shutdown poweroff runlevel"
|
||||
|
||||
ALTERNATIVE_TARGET[init] = "${rootlibexecdir}/systemd/systemd"
|
||||
ALTERNATIVE_LINK_NAME[init] = "${base_sbindir}/init"
|
||||
@@ -284,6 +298,10 @@ ALTERNATIVE_TARGET[poweroff] = "${base_bindir}/systemctl"
|
||||
ALTERNATIVE_LINK_NAME[poweroff] = "${base_sbindir}/poweroff"
|
||||
ALTERNATIVE_PRIORITY[poweroff] ?= "300"
|
||||
|
||||
ALTERNATIVE_TARGET[runlevel] = "${base_bindir}/systemctl"
|
||||
ALTERNATIVE_LINK_NAME[runlevel] = "${base_sbindir}/runlevel"
|
||||
ALTERNATIVE_PRIORITY[runlevel] ?= "300"
|
||||
|
||||
pkg_postinst_udev-hwdb () {
|
||||
if test -n "$D"; then
|
||||
${@qemu_run_binary(d, '$D', '${base_bindir}/udevadm')} hwdb --update \
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# need binary that doesnt do anything, supplied for simpleinit
|
||||
# vs sysvinit compatibility for the supplied startup scripts
|
||||
|
||||
exit 0
|
||||
@@ -1,6 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
# provide binary that doesnt do anything, supplied for simpleinit
|
||||
# vs sysvinit compatibility for the supplied startup scripts
|
||||
|
||||
exit 0
|
||||