mirror of
https://git.yoctoproject.org/poky
synced 2026-04-25 06:32:12 +02:00
Compare commits
280 Commits
krogoth-15
...
krogoth
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
73cc31c11a | ||
|
|
444dc2e99b | ||
|
|
bddb60b101 | ||
|
|
1083d90888 | ||
|
|
54e3f82bd7 | ||
|
|
426bc4c357 | ||
|
|
3ca9f90dff | ||
|
|
ccc964cf9f | ||
|
|
50fdd78423 | ||
|
|
3cf0e09348 | ||
|
|
4515fc9529 | ||
|
|
628aea354d | ||
|
|
3565a9697f | ||
|
|
fe7fb00221 | ||
|
|
7241042b70 | ||
|
|
546c0cffca | ||
|
|
224e04d6ce | ||
|
|
172105c1ef | ||
|
|
0fa93e1412 | ||
|
|
d54e1f4ff5 | ||
|
|
b24988bec7 | ||
|
|
a220e2ca34 | ||
|
|
3ac7c847e8 | ||
|
|
80b35ed1a2 | ||
|
|
7b9e031355 | ||
|
|
cb5649cbb8 | ||
|
|
dd20601980 | ||
|
|
d3c0a560a8 | ||
|
|
62685cbff5 | ||
|
|
3d9f6dc163 | ||
|
|
8aea6ad597 | ||
|
|
051883f877 | ||
|
|
0c78f81485 | ||
|
|
98f3e83884 | ||
|
|
819f7c3d03 | ||
|
|
4245995f76 | ||
|
|
0e8fcf8c9c | ||
|
|
13f0eee08d | ||
|
|
6eb266a365 | ||
|
|
577eb635ab | ||
|
|
553d5f65e8 | ||
|
|
47ef871649 | ||
|
|
db0832ead6 | ||
|
|
863bfa81af | ||
|
|
014af27dcb | ||
|
|
ca4703b6cf | ||
|
|
98e368e4b6 | ||
|
|
3c61ee2f68 | ||
|
|
ec00137169 | ||
|
|
11b217d60b | ||
|
|
c71ea3831a | ||
|
|
3428c1db71 | ||
|
|
4cd7b56228 | ||
|
|
5dd02c6db1 | ||
|
|
0ed07f2658 | ||
|
|
c33bac8883 | ||
|
|
c76d565ce2 | ||
|
|
04f04d0d17 | ||
|
|
d8cbc618cc | ||
|
|
1c73e41159 | ||
|
|
212ca3bee1 | ||
|
|
384801e827 | ||
|
|
5c9148ff6a | ||
|
|
cec5e508ec | ||
|
|
ddc6a9f5cd | ||
|
|
8b50a8676b | ||
|
|
12afe3c057 | ||
|
|
f5e807efc7 | ||
|
|
cf7507f8c4 | ||
|
|
eb0dff0c98 | ||
|
|
9a72d46aed | ||
|
|
ad2cce0f1e | ||
|
|
c96936cfd9 | ||
|
|
047e58b4ba | ||
|
|
485e244db8 | ||
|
|
e8676b4f1a | ||
|
|
cef5f86f43 | ||
|
|
1a2ec16ec0 | ||
|
|
035c33c405 | ||
|
|
8aaffcd59a | ||
|
|
73274f258a | ||
|
|
b291829cfc | ||
|
|
1bccc216ee | ||
|
|
8f4b7758b5 | ||
|
|
95dae8b598 | ||
|
|
129060f0b7 | ||
|
|
f69b958176 | ||
|
|
c3c14808dc | ||
|
|
c60a0a51d7 | ||
|
|
e59717e80f | ||
|
|
b4df9df462 | ||
|
|
ae9b341ecf | ||
|
|
3bf928a3b6 | ||
|
|
0742e8a43b | ||
|
|
cca8dd15c8 | ||
|
|
8e4188e274 | ||
|
|
0ad194919f | ||
|
|
49a01fd044 | ||
|
|
0aedf304e5 | ||
|
|
f0f6acac03 | ||
|
|
bae35b3e5f | ||
|
|
2de121703d | ||
|
|
8a12e713f9 | ||
|
|
2b0f105e59 | ||
|
|
c9f172aa5e | ||
|
|
f7e1cd9f85 | ||
|
|
ec240f45ae | ||
|
|
e92679a6eb | ||
|
|
f979c50029 | ||
|
|
a6b8fda00c | ||
|
|
1b9a98f78c | ||
|
|
d72e66f34b | ||
|
|
e2c2d723ed | ||
|
|
478a38187f | ||
|
|
cc811f4992 | ||
|
|
f7ec29ca3f | ||
|
|
0d390bfb5a | ||
|
|
ca9d26a08d | ||
|
|
49de8caab0 | ||
|
|
d672a4cc3c | ||
|
|
be15df5099 | ||
|
|
2cb87d12d2 | ||
|
|
57531002b8 | ||
|
|
c4061a0a68 | ||
|
|
6962ee3689 | ||
|
|
191666022a | ||
|
|
53766fb01f | ||
|
|
3134fb2861 | ||
|
|
b169435134 | ||
|
|
95e3d71080 | ||
|
|
2de1a5cefb | ||
|
|
a7c3e18de0 | ||
|
|
bd2cc670be | ||
|
|
b108f2a6de | ||
|
|
2fcc8d6e52 | ||
|
|
c3c25ac53d | ||
|
|
7343438092 | ||
|
|
8f5becc3ab | ||
|
|
732dd581f3 | ||
|
|
40f4a6d075 | ||
|
|
88b7f1a1e2 | ||
|
|
8e2ab57852 | ||
|
|
204b2bae4a | ||
|
|
e93596fe74 | ||
|
|
56a27c9aad | ||
|
|
4b27738c5e | ||
|
|
529bbe2cc2 | ||
|
|
82641d700d | ||
|
|
118f7a2247 | ||
|
|
5b24e5b39b | ||
|
|
a78dddb624 | ||
|
|
3b3cdfd71a | ||
|
|
ed4ed5313b | ||
|
|
de056577ce | ||
|
|
2ea93e2b1d | ||
|
|
2b330e5439 | ||
|
|
e08094e604 | ||
|
|
5f97311702 | ||
|
|
7026b2b05a | ||
|
|
8e5e92193a | ||
|
|
06ed5c5a10 | ||
|
|
9fa0bc4500 | ||
|
|
82017f2367 | ||
|
|
e1e5b18a5e | ||
|
|
9995a7a144 | ||
|
|
9fd6b093a4 | ||
|
|
b7bb83a4bb | ||
|
|
45bc60015c | ||
|
|
e6c1d03d3d | ||
|
|
d2ca721d31 | ||
|
|
260ff60f93 | ||
|
|
71291ed53e | ||
|
|
5b3af2abd7 | ||
|
|
70c4134e4b | ||
|
|
90dd677528 | ||
|
|
6db9299d9e | ||
|
|
2561b58ac8 | ||
|
|
9e14b83fa4 | ||
|
|
a8ac03fce1 | ||
|
|
8b9b998258 | ||
|
|
76aa0c3d5d | ||
|
|
11c8c8aa15 | ||
|
|
5a8a6a753f | ||
|
|
aa4b7b2257 | ||
|
|
ea62893915 | ||
|
|
990b8e7919 | ||
|
|
db8258864e | ||
|
|
58538b0703 | ||
|
|
96fe15caf6 | ||
|
|
b6e4966874 | ||
|
|
a837c6be8f | ||
|
|
414aad04b6 | ||
|
|
8a7607f470 | ||
|
|
cce2867828 | ||
|
|
0458275013 | ||
|
|
6f60d91adc | ||
|
|
642890f5d0 | ||
|
|
5368cfee9e | ||
|
|
e588da43b0 | ||
|
|
c32c7522e5 | ||
|
|
62696defc0 | ||
|
|
deca0d3736 | ||
|
|
a220c3a1a9 | ||
|
|
ef6ff739c7 | ||
|
|
d9369d1ea0 | ||
|
|
17e4586d6e | ||
|
|
6175bd0930 | ||
|
|
016df260e5 | ||
|
|
5d781f41ff | ||
|
|
d3ee5489c9 | ||
|
|
9a1694e242 | ||
|
|
3cc3ff6244 | ||
|
|
16f046f38f | ||
|
|
7639be6851 | ||
|
|
a10c9109e2 | ||
|
|
6ac72e8be2 | ||
|
|
c594ff73ab | ||
|
|
b9e99832b9 | ||
|
|
440e3cd2c2 | ||
|
|
977dd47c69 | ||
|
|
2b029e56f9 | ||
|
|
b6f4d24fbc | ||
|
|
ab4f42608a | ||
|
|
23aabca217 | ||
|
|
d9d046c28a | ||
|
|
b6bb27c4c9 | ||
|
|
0271b3ab00 | ||
|
|
4cbb398d85 | ||
|
|
66a4366e8f | ||
|
|
b64fa0af89 | ||
|
|
8f300880c4 | ||
|
|
28344dfed4 | ||
|
|
8c69f7d56c | ||
|
|
aad7166704 | ||
|
|
6980d4fa2f | ||
|
|
094a36886f | ||
|
|
7e11efef59 | ||
|
|
8854de1ffd | ||
|
|
cefa06d985 | ||
|
|
ecb5183b9a | ||
|
|
2bb93e3567 | ||
|
|
2a17af9652 | ||
|
|
3831cdc1b1 | ||
|
|
e01993c3d5 | ||
|
|
7d70e67479 | ||
|
|
fc75bea445 | ||
|
|
59ef3c315b | ||
|
|
eef3fb99d0 | ||
|
|
12eb72ee3b | ||
|
|
a8377d1073 | ||
|
|
52e13fb007 | ||
|
|
9f0eaae229 | ||
|
|
cf181cdb52 | ||
|
|
6f9ef13d0a | ||
|
|
4c36d5209e | ||
|
|
e177680fa0 | ||
|
|
365f85179d | ||
|
|
8ffab431a2 | ||
|
|
ebed0191f9 | ||
|
|
a779b36e9c | ||
|
|
ea438b421d | ||
|
|
45f2a20349 | ||
|
|
853db300f5 | ||
|
|
f07fedb2fb | ||
|
|
1969871269 | ||
|
|
a8279122b9 | ||
|
|
9adc11d4ac | ||
|
|
b5a67a2f7b | ||
|
|
bd47f3f3e6 | ||
|
|
1d7983106c | ||
|
|
4cf38836ac | ||
|
|
ffb615a50b | ||
|
|
1931dfc1cb | ||
|
|
2be23abe85 | ||
|
|
9891a867ef | ||
|
|
e1f49c6068 | ||
|
|
046fd3cb83 | ||
|
|
913b4e5910 | ||
|
|
046f1e6b4c | ||
|
|
e5353a9158 |
@@ -134,7 +134,7 @@
|
|||||||
<ulink url="http://www.mail-archive.com/yocto@yoctoproject.org/msg09379.html">Mailing List post - The BitBake equivalent of "Hello, World!"</ulink>
|
<ulink url="http://www.mail-archive.com/yocto@yoctoproject.org/msg09379.html">Mailing List post - The BitBake equivalent of "Hello, World!"</ulink>
|
||||||
</para></listitem>
|
</para></listitem>
|
||||||
<listitem><para>
|
<listitem><para>
|
||||||
<ulink url="http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/">Hambedded Linux blog post - From Bitbake Hello World to an Image</ulink>
|
<ulink url="https://web.archive.org/web/20150325165911/http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/">Hambedded Linux blog post - From Bitbake Hello World to an Image</ulink>
|
||||||
</para></listitem>
|
</para></listitem>
|
||||||
</itemizedlist>
|
</itemizedlist>
|
||||||
</note>
|
</note>
|
||||||
@@ -269,7 +269,7 @@
|
|||||||
and define some key BitBake variables.
|
and define some key BitBake variables.
|
||||||
For more information on the <filename>bitbake.conf</filename>,
|
For more information on the <filename>bitbake.conf</filename>,
|
||||||
see
|
see
|
||||||
<ulink url='http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#an-overview-of-bitbakeconf'></ulink>
|
<ulink url='https://web.archive.org/web/20150325165911/http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#an-overview-of-bitbakeconf'></ulink>
|
||||||
</para>
|
</para>
|
||||||
<para>Use the following commands to create the <filename>conf</filename>
|
<para>Use the following commands to create the <filename>conf</filename>
|
||||||
directory in the project directory:
|
directory in the project directory:
|
||||||
@@ -354,7 +354,7 @@ ERROR: Unable to parse base: ParseError in configuration INHERITs: Could not inh
|
|||||||
supporting.
|
supporting.
|
||||||
For more information on the <filename>base.bbclass</filename> file,
|
For more information on the <filename>base.bbclass</filename> file,
|
||||||
you can look at
|
you can look at
|
||||||
<ulink url='http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#tasks'></ulink>.
|
<ulink url='https://web.archive.org/web/20150325165911/http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#tasks'></ulink>.
|
||||||
</para></listitem>
|
</para></listitem>
|
||||||
<listitem><para><emphasis>Run Bitbake:</emphasis>
|
<listitem><para><emphasis>Run Bitbake:</emphasis>
|
||||||
After making sure that the <filename>classes/base.bbclass</filename>
|
After making sure that the <filename>classes/base.bbclass</filename>
|
||||||
@@ -376,7 +376,7 @@ ERROR: Unable to parse base: ParseError in configuration INHERITs: Could not inh
|
|||||||
Thus, this example creates and uses a layer called "mylayer".
|
Thus, this example creates and uses a layer called "mylayer".
|
||||||
<note>
|
<note>
|
||||||
You can find additional information on adding a layer at
|
You can find additional information on adding a layer at
|
||||||
<ulink url='http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#adding-an-example-layer'></ulink>.
|
<ulink url='https://web.archive.org/web/20150325165911/http://hambedded.org/blog/2012/11/24/from-bitbake-hello-world-to-an-image/#adding-an-example-layer'></ulink>.
|
||||||
</note>
|
</note>
|
||||||
</para>
|
</para>
|
||||||
<para>Minimally, you need a recipe file and a layer configuration
|
<para>Minimally, you need a recipe file and a layer configuration
|
||||||
|
|||||||
@@ -646,7 +646,7 @@ def make_stamp(task, d, file_name = None):
|
|||||||
for mask in cleanmask:
|
for mask in cleanmask:
|
||||||
for name in glob.glob(mask):
|
for name in glob.glob(mask):
|
||||||
# Preserve sigdata files in the stamps directory
|
# Preserve sigdata files in the stamps directory
|
||||||
if "sigdata" in name:
|
if "sigdata" in name or "sigbasedata" in name:
|
||||||
continue
|
continue
|
||||||
# Preserve taint files in the stamps directory
|
# Preserve taint files in the stamps directory
|
||||||
if name.endswith('.taint'):
|
if name.endswith('.taint'):
|
||||||
|
|||||||
@@ -127,13 +127,15 @@ class FileChecksumCache(MultiProcessCache):
|
|||||||
checksums.extend(checksum_dir(f))
|
checksums.extend(checksum_dir(f))
|
||||||
else:
|
else:
|
||||||
checksum = checksum_file(f)
|
checksum = checksum_file(f)
|
||||||
checksums.append((f, checksum))
|
if checksum:
|
||||||
|
checksums.append((f, checksum))
|
||||||
elif os.path.isdir(pth):
|
elif os.path.isdir(pth):
|
||||||
if not os.path.islink(pth):
|
if not os.path.islink(pth):
|
||||||
checksums.extend(checksum_dir(pth))
|
checksums.extend(checksum_dir(pth))
|
||||||
else:
|
else:
|
||||||
checksum = checksum_file(pth)
|
checksum = checksum_file(pth)
|
||||||
checksums.append((pth, checksum))
|
if checksum:
|
||||||
|
checksums.append((pth, checksum))
|
||||||
|
|
||||||
checksums.sort(key=operator.itemgetter(1))
|
checksums.sort(key=operator.itemgetter(1))
|
||||||
return checksums
|
return checksums
|
||||||
|
|||||||
@@ -117,22 +117,29 @@ def print_ui_queue():
|
|||||||
logger = logging.getLogger("BitBake")
|
logger = logging.getLogger("BitBake")
|
||||||
if not _uiready:
|
if not _uiready:
|
||||||
from bb.msg import BBLogFormatter
|
from bb.msg import BBLogFormatter
|
||||||
console = logging.StreamHandler(sys.stdout)
|
stdout = logging.StreamHandler(sys.stdout)
|
||||||
console.setFormatter(BBLogFormatter("%(levelname)s: %(message)s"))
|
stderr = logging.StreamHandler(sys.stderr)
|
||||||
logger.handlers = [console]
|
formatter = BBLogFormatter("%(levelname)s: %(message)s")
|
||||||
|
stdout.setFormatter(formatter)
|
||||||
|
stderr.setFormatter(formatter)
|
||||||
|
|
||||||
# First check to see if we have any proper messages
|
# First check to see if we have any proper messages
|
||||||
msgprint = False
|
msgprint = False
|
||||||
for event in ui_queue:
|
for event in ui_queue[:]:
|
||||||
if isinstance(event, logging.LogRecord):
|
if isinstance(event, logging.LogRecord):
|
||||||
if event.levelno > logging.DEBUG:
|
if event.levelno > logging.DEBUG:
|
||||||
|
if event.levelno >= logging.WARNING:
|
||||||
|
logger.addHandler(stderr)
|
||||||
|
else:
|
||||||
|
logger.addHandler(stdout)
|
||||||
logger.handle(event)
|
logger.handle(event)
|
||||||
msgprint = True
|
msgprint = True
|
||||||
if msgprint:
|
if msgprint:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Nope, so just print all of the messages we have (including debug messages)
|
# Nope, so just print all of the messages we have (including debug messages)
|
||||||
for event in ui_queue:
|
logger.addHandler(stdout)
|
||||||
|
for event in ui_queue[:]:
|
||||||
if isinstance(event, logging.LogRecord):
|
if isinstance(event, logging.LogRecord):
|
||||||
logger.handle(event)
|
logger.handle(event)
|
||||||
|
|
||||||
|
|||||||
@@ -1424,7 +1424,7 @@ class FetchMethod(object):
|
|||||||
if urlpath.find("/") != -1:
|
if urlpath.find("/") != -1:
|
||||||
destdir = urlpath.rsplit("/", 1)[0] + '/'
|
destdir = urlpath.rsplit("/", 1)[0] + '/'
|
||||||
bb.utils.mkdirhier("%s/%s" % (unpackdir, destdir))
|
bb.utils.mkdirhier("%s/%s" % (unpackdir, destdir))
|
||||||
cmd = 'cp -fpPR %s %s' % (file, destdir)
|
cmd = 'cp -fpPRH %s %s' % (file, destdir)
|
||||||
|
|
||||||
if not cmd:
|
if not cmd:
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -274,7 +274,7 @@ class Git(FetchMethod):
|
|||||||
branchname = ud.branches[ud.names[0]]
|
branchname = ud.branches[ud.names[0]]
|
||||||
runfetchcmd("%s checkout -B %s %s" % (ud.basecmd, branchname, \
|
runfetchcmd("%s checkout -B %s %s" % (ud.basecmd, branchname, \
|
||||||
ud.revisions[ud.names[0]]), d)
|
ud.revisions[ud.names[0]]), d)
|
||||||
runfetchcmd("%s branch --set-upstream %s origin/%s" % (ud.basecmd, branchname, \
|
runfetchcmd("%s branch %s --set-upstream-to origin/%s" % (ud.basecmd, branchname, \
|
||||||
branchname), d)
|
branchname), d)
|
||||||
else:
|
else:
|
||||||
runfetchcmd("%s checkout %s" % (ud.basecmd, ud.revisions[ud.names[0]]), d)
|
runfetchcmd("%s checkout %s" % (ud.basecmd, ud.revisions[ud.names[0]]), d)
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ class Wget(FetchMethod):
|
|||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def checkstatus(self, fetch, ud, d):
|
def checkstatus(self, fetch, ud, d, try_again=True):
|
||||||
import urllib2, socket, httplib
|
import urllib2, socket, httplib
|
||||||
from urllib import addinfourl
|
from urllib import addinfourl
|
||||||
from bb.fetch2 import FetchConnectionCache
|
from bb.fetch2 import FetchConnectionCache
|
||||||
@@ -278,9 +278,13 @@ class Wget(FetchMethod):
|
|||||||
r.get_method = lambda: "HEAD"
|
r.get_method = lambda: "HEAD"
|
||||||
opener.open(r)
|
opener.open(r)
|
||||||
except urllib2.URLError as e:
|
except urllib2.URLError as e:
|
||||||
# debug for now to avoid spamming the logs in e.g. remote sstate searches
|
if try_again:
|
||||||
logger.debug(2, "checkstatus() urlopen failed: %s" % e)
|
logger.debug(2, "checkstatus: trying again")
|
||||||
return False
|
return self.checkstatus(fetch, ud, d, False)
|
||||||
|
else:
|
||||||
|
# debug for now to avoid spamming the logs in e.g. remote sstate searches
|
||||||
|
logger.debug(2, "checkstatus() urlopen failed: %s" % e)
|
||||||
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _parse_path(self, regex, s):
|
def _parse_path(self, regex, s):
|
||||||
|
|||||||
@@ -35,6 +35,7 @@ class SignatureGenerator(object):
|
|||||||
name = "noop"
|
name = "noop"
|
||||||
|
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
|
self.basehash = {}
|
||||||
self.taskhash = {}
|
self.taskhash = {}
|
||||||
self.runtaskdeps = {}
|
self.runtaskdeps = {}
|
||||||
self.file_checksum_values = {}
|
self.file_checksum_values = {}
|
||||||
@@ -66,11 +67,10 @@ class SignatureGenerator(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
def get_taskdata(self):
|
def get_taskdata(self):
|
||||||
return (self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints)
|
return (self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash)
|
||||||
|
|
||||||
def set_taskdata(self, data):
|
def set_taskdata(self, data):
|
||||||
self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints = data
|
self.runtaskdeps, self.taskhash, self.file_checksum_values, self.taints, self.basehash = data
|
||||||
|
|
||||||
|
|
||||||
class SignatureGeneratorBasic(SignatureGenerator):
|
class SignatureGeneratorBasic(SignatureGenerator):
|
||||||
"""
|
"""
|
||||||
@@ -138,7 +138,11 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
|||||||
var = lookupcache[dep]
|
var = lookupcache[dep]
|
||||||
if var is not None:
|
if var is not None:
|
||||||
data = data + str(var)
|
data = data + str(var)
|
||||||
self.basehash[fn + "." + task] = hashlib.md5(data).hexdigest()
|
datahash = hashlib.md5(data).hexdigest()
|
||||||
|
k = fn + "." + task
|
||||||
|
if k in self.basehash and self.basehash[k] != datahash:
|
||||||
|
bb.error("When reparsing %s, the basehash value changed from %s to %s. The metadata is not deterministic and this needs to be fixed." % (k, self.basehash[k], datahash))
|
||||||
|
self.basehash[k] = datahash
|
||||||
taskdeps[task] = alldeps
|
taskdeps[task] = alldeps
|
||||||
|
|
||||||
self.taskdeps[fn] = taskdeps
|
self.taskdeps[fn] = taskdeps
|
||||||
@@ -186,6 +190,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
|||||||
def get_taskhash(self, fn, task, deps, dataCache):
|
def get_taskhash(self, fn, task, deps, dataCache):
|
||||||
k = fn + "." + task
|
k = fn + "." + task
|
||||||
data = dataCache.basetaskhash[k]
|
data = dataCache.basetaskhash[k]
|
||||||
|
self.basehash[k] = data
|
||||||
self.runtaskdeps[k] = []
|
self.runtaskdeps[k] = []
|
||||||
self.file_checksum_values[k] = []
|
self.file_checksum_values[k] = []
|
||||||
recipename = dataCache.pkg_fn[fn]
|
recipename = dataCache.pkg_fn[fn]
|
||||||
@@ -282,6 +287,15 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
|||||||
if 'nostamp:' in self.taints[k]:
|
if 'nostamp:' in self.taints[k]:
|
||||||
data['taint'] = self.taints[k]
|
data['taint'] = self.taints[k]
|
||||||
|
|
||||||
|
computed_basehash = calc_basehash(data)
|
||||||
|
if computed_basehash != self.basehash[k]:
|
||||||
|
bb.error("Basehash mismatch %s versus %s for %s" % (computed_basehash, self.basehash[k], k))
|
||||||
|
if runtime and k in self.taskhash:
|
||||||
|
computed_taskhash = calc_taskhash(data)
|
||||||
|
if computed_taskhash != self.taskhash[k]:
|
||||||
|
bb.error("Taskhash mismatch %s versus %s for %s" % (computed_taskhash, self.taskhash[k], k))
|
||||||
|
sigfile = sigfile.replace(self.taskhash[k], computed_taskhash)
|
||||||
|
|
||||||
fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
|
fd, tmpfile = tempfile.mkstemp(dir=os.path.dirname(sigfile), prefix="sigtask.")
|
||||||
try:
|
try:
|
||||||
with os.fdopen(fd, "wb") as stream:
|
with os.fdopen(fd, "wb") as stream:
|
||||||
@@ -296,15 +310,6 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
|||||||
pass
|
pass
|
||||||
raise err
|
raise err
|
||||||
|
|
||||||
computed_basehash = calc_basehash(data)
|
|
||||||
if computed_basehash != self.basehash[k]:
|
|
||||||
bb.error("Basehash mismatch %s verses %s for %s" % (computed_basehash, self.basehash[k], k))
|
|
||||||
if k in self.taskhash:
|
|
||||||
computed_taskhash = calc_taskhash(data)
|
|
||||||
if computed_taskhash != self.taskhash[k]:
|
|
||||||
bb.error("Taskhash mismatch %s verses %s for %s" % (computed_taskhash, self.taskhash[k], k))
|
|
||||||
|
|
||||||
|
|
||||||
def dump_sigs(self, dataCache, options):
|
def dump_sigs(self, dataCache, options):
|
||||||
for fn in self.taskdeps:
|
for fn in self.taskdeps:
|
||||||
for task in self.taskdeps[fn]:
|
for task in self.taskdeps[fn]:
|
||||||
@@ -545,7 +550,8 @@ def calc_taskhash(sigdata):
|
|||||||
data = data + sigdata['runtaskhashes'][dep]
|
data = data + sigdata['runtaskhashes'][dep]
|
||||||
|
|
||||||
for c in sigdata['file_checksum_values']:
|
for c in sigdata['file_checksum_values']:
|
||||||
data = data + c[1]
|
if c[1]:
|
||||||
|
data = data + c[1]
|
||||||
|
|
||||||
if 'taint' in sigdata:
|
if 'taint' in sigdata:
|
||||||
if 'nostamp:' in sigdata['taint']:
|
if 'nostamp:' in sigdata['taint']:
|
||||||
|
|||||||
@@ -767,7 +767,6 @@ class FetchLatestVersionTest(FetcherTest):
|
|||||||
|
|
||||||
class FetchCheckStatusTest(FetcherTest):
|
class FetchCheckStatusTest(FetcherTest):
|
||||||
test_wget_uris = ["http://www.cups.org/software/1.7.2/cups-1.7.2-source.tar.bz2",
|
test_wget_uris = ["http://www.cups.org/software/1.7.2/cups-1.7.2-source.tar.bz2",
|
||||||
"http://www.cups.org/software/ipptool/ipptool-20130731-linux-ubuntu-i686.tar.gz",
|
|
||||||
"http://www.cups.org/",
|
"http://www.cups.org/",
|
||||||
"http://downloads.yoctoproject.org/releases/sato/sato-engine-0.1.tar.gz",
|
"http://downloads.yoctoproject.org/releases/sato/sato-engine-0.1.tar.gz",
|
||||||
"http://downloads.yoctoproject.org/releases/sato/sato-engine-0.2.tar.gz",
|
"http://downloads.yoctoproject.org/releases/sato/sato-engine-0.2.tar.gz",
|
||||||
|
|||||||
@@ -11,7 +11,14 @@ from bs4.builder import (
|
|||||||
)
|
)
|
||||||
from bs4.element import NamespacedAttribute
|
from bs4.element import NamespacedAttribute
|
||||||
import html5lib
|
import html5lib
|
||||||
|
try:
|
||||||
|
# html5lib >= 0.99999999/1.0b9
|
||||||
|
from html5lib.treebuilders import base as treebuildersbase
|
||||||
|
except ImportError:
|
||||||
|
# html5lib <= 0.9999999/1.0b8
|
||||||
|
from html5lib.treebuilders import _base as treebuildersbase
|
||||||
from html5lib.constants import namespaces
|
from html5lib.constants import namespaces
|
||||||
|
|
||||||
from bs4.element import (
|
from bs4.element import (
|
||||||
Comment,
|
Comment,
|
||||||
Doctype,
|
Doctype,
|
||||||
@@ -54,7 +61,7 @@ class HTML5TreeBuilder(HTMLTreeBuilder):
|
|||||||
return u'<html><head></head><body>%s</body></html>' % fragment
|
return u'<html><head></head><body>%s</body></html>' % fragment
|
||||||
|
|
||||||
|
|
||||||
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
|
class TreeBuilderForHtml5lib(treebuildersbase.TreeBuilder):
|
||||||
|
|
||||||
def __init__(self, soup, namespaceHTMLElements):
|
def __init__(self, soup, namespaceHTMLElements):
|
||||||
self.soup = soup
|
self.soup = soup
|
||||||
@@ -92,7 +99,7 @@ class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
|
|||||||
return self.soup
|
return self.soup
|
||||||
|
|
||||||
def getFragment(self):
|
def getFragment(self):
|
||||||
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
|
return treebuildersbase.TreeBuilder.getFragment(self).element
|
||||||
|
|
||||||
class AttrList(object):
|
class AttrList(object):
|
||||||
def __init__(self, element):
|
def __init__(self, element):
|
||||||
@@ -115,9 +122,9 @@ class AttrList(object):
|
|||||||
return name in list(self.attrs.keys())
|
return name in list(self.attrs.keys())
|
||||||
|
|
||||||
|
|
||||||
class Element(html5lib.treebuilders._base.Node):
|
class Element(treebuildersbase.Node):
|
||||||
def __init__(self, element, soup, namespace):
|
def __init__(self, element, soup, namespace):
|
||||||
html5lib.treebuilders._base.Node.__init__(self, element.name)
|
treebuildersbase.Node.__init__(self, element.name)
|
||||||
self.element = element
|
self.element = element
|
||||||
self.soup = soup
|
self.soup = soup
|
||||||
self.namespace = namespace
|
self.namespace = namespace
|
||||||
@@ -277,7 +284,7 @@ class Element(html5lib.treebuilders._base.Node):
|
|||||||
|
|
||||||
class TextNode(Element):
|
class TextNode(Element):
|
||||||
def __init__(self, element, soup):
|
def __init__(self, element, soup):
|
||||||
html5lib.treebuilders._base.Node.__init__(self, None)
|
treebuildersbase.Node.__init__(self, None)
|
||||||
self.element = element
|
self.element = element
|
||||||
self.soup = soup
|
self.soup = soup
|
||||||
|
|
||||||
|
|||||||
@@ -2168,7 +2168,7 @@ if True:
|
|||||||
if 'configvarAdd' in request.POST:
|
if 'configvarAdd' in request.POST:
|
||||||
t=request.POST['configvarAdd'].strip()
|
t=request.POST['configvarAdd'].strip()
|
||||||
if ":" in t:
|
if ":" in t:
|
||||||
variable, value = t.split(":")
|
variable, value = t.split(":", 1)
|
||||||
else:
|
else:
|
||||||
variable = t
|
variable = t
|
||||||
value = ""
|
value = ""
|
||||||
@@ -2178,7 +2178,7 @@ if True:
|
|||||||
if 'configvarChange' in request.POST:
|
if 'configvarChange' in request.POST:
|
||||||
t=request.POST['configvarChange'].strip()
|
t=request.POST['configvarChange'].strip()
|
||||||
if ":" in t:
|
if ":" in t:
|
||||||
variable, value = t.split(":")
|
variable, value = t.split(":", 1)
|
||||||
else:
|
else:
|
||||||
variable = t
|
variable = t
|
||||||
value = ""
|
value = ""
|
||||||
|
|||||||
@@ -107,9 +107,19 @@ def getDATABASE_URL():
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Hosts/domain names that are valid for this site; required if DEBUG is False
|
# Update as of django 1.8.16 release, the '*' is needed to allow us to connect while running
|
||||||
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
|
# on hosts without explicitly setting the fqdn for the toaster server.
|
||||||
ALLOWED_HOSTS = []
|
# See https://docs.djangoproject.com/en/dev/ref/settings/ for info on ALLOWED_HOSTS
|
||||||
|
# Previously this setting was not enforced if DEBUG was set but it is now.
|
||||||
|
# The previous behavior was such that ALLOWED_HOSTS defaulted to ['localhost','127.0.0.1','::1']
|
||||||
|
# and if you bound to 0.0.0.0:<port #> then accessing toaster as localhost or fqdn would both work.
|
||||||
|
# To have that same behavior, with a fqdn explicitly enabled you would set
|
||||||
|
# ALLOWED_HOSTS= ['localhost','127.0.0.1','::1','myserver.mycompany.com'] for
|
||||||
|
# Django >= 1.8.16. By default, we are not enforcing this restriction in
|
||||||
|
# DEBUG mode.
|
||||||
|
if DEBUG is True:
|
||||||
|
# this will allow connection via localhost,hostname, or fqdn
|
||||||
|
ALLOWED_HOSTS = ['*']
|
||||||
|
|
||||||
# Local time zone for this installation. Choices can be found here:
|
# Local time zone for this installation. Choices can be found here:
|
||||||
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
|
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
|
||||||
|
|||||||
@@ -108,6 +108,21 @@
|
|||||||
<date>April 2016</date>
|
<date>April 2016</date>
|
||||||
<revremark>Released with the Yocto Project 2.1 Release.</revremark>
|
<revremark>Released with the Yocto Project 2.1 Release.</revremark>
|
||||||
</revision>
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.1</revnumber>
|
||||||
|
<date>August 2016</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.1 Release.</revremark>
|
||||||
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.2</revnumber>
|
||||||
|
<date>December 2016</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.2 Release.</revremark>
|
||||||
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.3</revnumber>
|
||||||
|
<date>June 2017</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.3 Release.</revremark>
|
||||||
|
</revision>
|
||||||
</revhistory>
|
</revhistory>
|
||||||
|
|
||||||
<copyright>
|
<copyright>
|
||||||
@@ -120,12 +135,46 @@
|
|||||||
Permission is granted to copy, distribute and/or modify this document under
|
Permission is granted to copy, distribute and/or modify this document under
|
||||||
the terms of the <ulink type="http" url="http://creativecommons.org/licenses/by-nc-sa/2.0/uk/">Creative Commons Attribution-Share Alike 2.0 UK: England & Wales</ulink> as published by Creative Commons.
|
the terms of the <ulink type="http" url="http://creativecommons.org/licenses/by-nc-sa/2.0/uk/">Creative Commons Attribution-Share Alike 2.0 UK: England & Wales</ulink> as published by Creative Commons.
|
||||||
</para>
|
</para>
|
||||||
<note>
|
<note><title>Manual Notes</title>
|
||||||
For the latest version of this manual associated with this
|
<itemizedlist>
|
||||||
Yocto Project release, see the
|
<listitem><para>
|
||||||
<ulink url='&YOCTO_DOCS_BSP_URL;'>Yocto Project Board Support Package (BSP) Developer's Guide</ulink>
|
This version of the
|
||||||
from the Yocto Project website.
|
<emphasis>Yocto Project Board Support Package (BSP) Developer's Guide</emphasis>
|
||||||
</note>
|
is for the &YOCTO_DOC_VERSION; release of the
|
||||||
|
Yocto Project.
|
||||||
|
To be sure you have the latest version of the manual
|
||||||
|
for this release, go to the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual from that site.
|
||||||
|
Manuals from the site are more up-to-date than manuals
|
||||||
|
derived from the Yocto Project released TAR files.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
If you located this manual through a web search, the
|
||||||
|
version of the manual might not be the one you want
|
||||||
|
(e.g. the search might have returned a manual much
|
||||||
|
older than the Yocto Project version with which you
|
||||||
|
are working).
|
||||||
|
You can see all Yocto Project major releases by
|
||||||
|
visiting the
|
||||||
|
<ulink url='&YOCTO_WIKI_URL;/wiki/Releases'>Releases</ulink>
|
||||||
|
page.
|
||||||
|
If you need a version of this manual for a different
|
||||||
|
Yocto Project release, visit the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual set by using the
|
||||||
|
"ACTIVE RELEASES DOCUMENTATION" or "DOCUMENTS ARCHIVE"
|
||||||
|
pull-down menus.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
To report any inaccuracies or problems with this
|
||||||
|
manual, send an email to the Yocto Project
|
||||||
|
discussion group at
|
||||||
|
<filename>yocto@yoctoproject.com</filename> or log into
|
||||||
|
the freenode <filename>#yocto</filename> channel.
|
||||||
|
</para></listitem>
|
||||||
|
</itemizedlist>
|
||||||
|
</note>
|
||||||
</legalnotice>
|
</legalnotice>
|
||||||
|
|
||||||
</bookinfo>
|
</bookinfo>
|
||||||
|
|||||||
@@ -1460,27 +1460,26 @@
|
|||||||
Done.
|
Done.
|
||||||
|
|
||||||
Which qemu architecture would you like to use? [default: i386]
|
Which qemu architecture would you like to use? [default: i386]
|
||||||
1) i386 (32-bit)
|
1) i386 (32-bit)
|
||||||
2) x86_64 (64-bit)
|
2) x86_64 (64-bit)
|
||||||
3) ARM (32-bit)
|
3) ARM (32-bit)
|
||||||
4) PowerPC (32-bit)
|
4) PowerPC (32-bit)
|
||||||
5) MIPS (32-bit)
|
5) MIPS (32-bit)
|
||||||
6) MIPS64 (64-bit)
|
6) MIPS64 (64-bit)
|
||||||
3
|
3
|
||||||
Would you like to use the default (4.1) kernel? (y/n) [default: y]
|
Would you like to use the default (4.4) kernel? (y/n) [default: y]
|
||||||
Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n] [default: y]
|
Do you need a new machine branch for this BSP (the alternative is to re-use an existing branch)? [y/n] [default: y]
|
||||||
Getting branches from remote repo git://git.yoctoproject.org/linux-yocto-4.1.git...
|
Getting branches from remote repo git://git.yoctoproject.org/linux-yocto-4.4.git...
|
||||||
Please choose a machine branch to base your new BSP branch on: [default: standard/base]
|
Please choose a machine branch to base this BSP on: [default: standard/base]
|
||||||
1) standard/arm-versatile-926ejs
|
1) standard/arm-versatile-926ejs
|
||||||
2) standard/base
|
2) standard/base
|
||||||
3) standard/beagleboard
|
3) standard/beaglebone
|
||||||
4) standard/beaglebone
|
4) standard/edgerouter
|
||||||
5) standard/edgerouter
|
5) standard/fsl-mpc8315e-rdb
|
||||||
6) standard/fsl-mpc8315e-rdb
|
6) standard/mti-malta32
|
||||||
7) standard/mti-malta32
|
7) standard/mti-malta64
|
||||||
8) standard/mti-malta64
|
8) standard/qemuarm64
|
||||||
9) standard/qemuarm64
|
9) standard/qemuppc
|
||||||
10) standard/qemuppc
|
|
||||||
1
|
1
|
||||||
Would you like SMP support? (y/n) [default: y]
|
Would you like SMP support? (y/n) [default: y]
|
||||||
Does your BSP have a touchscreen? (y/n) [default: n]
|
Does your BSP have a touchscreen? (y/n) [default: n]
|
||||||
|
|||||||
@@ -7160,26 +7160,29 @@
|
|||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
If a committed change results in changing the package output,
|
If a committed change results in changing the package
|
||||||
then the value of the PR variable needs to be increased
|
output, then the value of the PR variable needs to be
|
||||||
(or "bumped") as part of that commit.
|
increased (or "bumped") as part of that commit.
|
||||||
For new recipes you should add the <filename>PR</filename>
|
For new recipes you should add the <filename>PR</filename>
|
||||||
variable and set its initial value equal to "r0", which is the default.
|
variable and set its initial value equal to "r0", which is
|
||||||
Even though the default value is "r0", the practice of adding it to a new recipe makes
|
the default.
|
||||||
it harder to forget to bump the variable when you make changes
|
Even though the default value is "r0", the practice of
|
||||||
to the recipe in future.
|
adding it to a new recipe makes it harder to forget to bump
|
||||||
|
the variable when you make changes to the recipe in future.
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
If you are sharing a common <filename>.inc</filename> file with multiple recipes,
|
If you are sharing a common <filename>.inc</filename> file
|
||||||
you can also use the
|
with multiple recipes, you can also use the
|
||||||
<filename><ulink url='&YOCTO_DOCS_REF_URL;#var-INC_PR'>INC_PR</ulink></filename>
|
<filename><ulink url='&YOCTO_DOCS_REF_URL;#var-INC_PR'>INC_PR</ulink></filename>
|
||||||
variable to ensure that
|
variable to ensure that the recipes sharing the
|
||||||
the recipes sharing the <filename>.inc</filename> file are rebuilt when the
|
<filename>.inc</filename> file are rebuilt when the
|
||||||
<filename>.inc</filename> file itself is changed.
|
<filename>.inc</filename> file itself is changed.
|
||||||
The <filename>.inc</filename> file must set <filename>INC_PR</filename>
|
The <filename>.inc</filename> file must set
|
||||||
(initially to "r0"), and all recipes referring to it should set <filename>PR</filename>
|
<filename>INC_PR</filename> (initially to "r0"), and all
|
||||||
to "$(INC_PR).0" initially, incrementing the last number when the recipe is changed.
|
recipes referring to it should set <filename>PR</filename>
|
||||||
|
to "${INC_PR}.0" initially, incrementing the last number
|
||||||
|
when the recipe is changed.
|
||||||
If the <filename>.inc</filename> file is changed then its
|
If the <filename>.inc</filename> file is changed then its
|
||||||
<filename>INC_PR</filename> should be incremented.
|
<filename>INC_PR</filename> should be incremented.
|
||||||
</para>
|
</para>
|
||||||
@@ -7188,14 +7191,14 @@
|
|||||||
When upgrading the version of a package, assuming the
|
When upgrading the version of a package, assuming the
|
||||||
<filename><ulink url='&YOCTO_DOCS_REF_URL;#var-PV'>PV</ulink></filename>
|
<filename><ulink url='&YOCTO_DOCS_REF_URL;#var-PV'>PV</ulink></filename>
|
||||||
changes, the <filename>PR</filename> variable should be
|
changes, the <filename>PR</filename> variable should be
|
||||||
reset to "r0" (or "$(INC_PR).0" if you are using
|
reset to "r0" (or "${INC_PR}.0" if you are using
|
||||||
<filename>INC_PR</filename>).
|
<filename>INC_PR</filename>).
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
Usually, version increases occur only to packages.
|
Usually, version increases occur only to packages.
|
||||||
However, if for some reason <filename>PV</filename> changes but does not
|
However, if for some reason <filename>PV</filename> changes
|
||||||
increase, you can increase the
|
but does not increase, you can increase the
|
||||||
<filename><ulink url='&YOCTO_DOCS_REF_URL;#var-PE'>PE</ulink></filename>
|
<filename><ulink url='&YOCTO_DOCS_REF_URL;#var-PE'>PE</ulink></filename>
|
||||||
variable (Package Epoch).
|
variable (Package Epoch).
|
||||||
The <filename>PE</filename> variable defaults to "0".
|
The <filename>PE</filename> variable defaults to "0".
|
||||||
@@ -7205,7 +7208,8 @@
|
|||||||
Version numbering strives to follow the
|
Version numbering strives to follow the
|
||||||
<ulink url='http://www.debian.org/doc/debian-policy/ch-controlfields.html'>
|
<ulink url='http://www.debian.org/doc/debian-policy/ch-controlfields.html'>
|
||||||
Debian Version Field Policy Guidelines</ulink>.
|
Debian Version Field Policy Guidelines</ulink>.
|
||||||
These guidelines define how versions are compared and what "increasing" a version means.
|
These guidelines define how versions are compared and what
|
||||||
|
"increasing" a version means.
|
||||||
</para>
|
</para>
|
||||||
</section>
|
</section>
|
||||||
</section>
|
</section>
|
||||||
@@ -7528,27 +7532,48 @@
|
|||||||
<title>Build Considerations</title>
|
<title>Build Considerations</title>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
This section describes build considerations that you need
|
This section describes build considerations of which you
|
||||||
to be aware of in order to provide support for runtime
|
need to be aware in order to provide support for runtime
|
||||||
package management.
|
package management.
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
When BitBake generates packages it needs to know
|
When BitBake generates packages, it needs to know
|
||||||
what format or formats to use.
|
what format or formats to use.
|
||||||
In your configuration, you use the
|
In your configuration, you use the
|
||||||
<ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_CLASSES'><filename>PACKAGE_CLASSES</filename></ulink>
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_CLASSES'><filename>PACKAGE_CLASSES</filename></ulink>
|
||||||
variable to specify the format.
|
variable to specify the format:
|
||||||
<note>
|
<orderedlist>
|
||||||
You can choose to have more than one format but you must
|
<listitem><para>
|
||||||
provide at least one.
|
Open the <filename>local.conf</filename> file
|
||||||
</note>
|
inside your
|
||||||
|
<link linkend='build-directory'>Build Directory</link>
|
||||||
|
(e.g. <filename>~/poky/build/conf/local.conf</filename>).
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
Select the desired package format as follows:
|
||||||
|
<literallayout class='monospaced'>
|
||||||
|
PACKAGE_CLASSES ?= “package_<replaceable>packageformat</replaceable>”
|
||||||
|
</literallayout>
|
||||||
|
where <replaceable>packageformat</replaceable>
|
||||||
|
can be "ipk", "rpm", and "deb", which are the
|
||||||
|
supported package formats.
|
||||||
|
<note>
|
||||||
|
Because the Yocto Project supports three
|
||||||
|
different package formats, you can set the
|
||||||
|
variable with more than one argument.
|
||||||
|
However, the OpenEmbedded build system only
|
||||||
|
uses the first argument when creating an image
|
||||||
|
or Software Development Kit (SDK).
|
||||||
|
</note>
|
||||||
|
</para></listitem>
|
||||||
|
</orderedlist>
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
If you would like your image to start off with a basic
|
If you would like your image to start off with a basic
|
||||||
package database of the packages in your current build
|
package database containing the packages in your current
|
||||||
as well as have the relevant tools available on the
|
build as well as to have the relevant tools available on the
|
||||||
target for runtime package management, you can include
|
target for runtime package management, you can include
|
||||||
"package-management" in the
|
"package-management" in the
|
||||||
<ulink url='&YOCTO_DOCS_REF_URL;#var-IMAGE_FEATURES'><filename>IMAGE_FEATURES</filename></ulink>
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-IMAGE_FEATURES'><filename>IMAGE_FEATURES</filename></ulink>
|
||||||
@@ -7583,27 +7608,33 @@
|
|||||||
<literallayout class='monospaced'>
|
<literallayout class='monospaced'>
|
||||||
$ bitbake <replaceable>some-package</replaceable> package-index
|
$ bitbake <replaceable>some-package</replaceable> package-index
|
||||||
</literallayout>
|
</literallayout>
|
||||||
This is because BitBake does not properly schedule the
|
The reason for this restriction is because BitBake does not
|
||||||
<filename>package-index</filename> target fully after any
|
properly schedule the <filename>package-index</filename>
|
||||||
other target has completed.
|
target fully after any other target has completed.
|
||||||
Thus, be sure to run the package update step separately.
|
Thus, be sure to run the package update step separately.
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
As described below in the
|
You can use the
|
||||||
"<link linkend='runtime-package-management-target-ipk'>Using IPK</link>"
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_FEED_ARCHS'><filename>PACKAGE_FEED_ARCHS</filename></ulink>,
|
||||||
section, if you are using IPK as your package format, you
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_FEED_BASE_PATHS'><filename>PACKAGE_FEED_BASE_PATHS</filename></ulink>,
|
||||||
can make use of the
|
and
|
||||||
<filename>distro-feed-configs</filename> recipe provided
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_FEED_URIS'><filename>PACKAGE_FEED_URIS</filename></ulink>
|
||||||
by <filename>meta-oe</filename> in order to configure your
|
variables to pre-configure target images to use a package
|
||||||
target to use your IPK databases.
|
feed.
|
||||||
|
If you do not define these variables, then manual steps
|
||||||
|
as described in the subsequent sections are necessary to
|
||||||
|
configure the target.
|
||||||
|
You should set these variables before building the image
|
||||||
|
in order to produce a correctly configured image.
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
When your build is complete, your packages reside in the
|
When your build is complete, your packages reside in the
|
||||||
<filename>${TMPDIR}/deploy/<replaceable>package-format</replaceable></filename>
|
<filename>${TMPDIR}/deploy/<replaceable>packageformat</replaceable></filename>
|
||||||
directory.
|
directory.
|
||||||
For example, if <filename>${TMPDIR}</filename>
|
For example, if
|
||||||
|
<filename>${</filename><ulink url='&YOCTO_DOCS_REF_URL;#var-TMPDIR'><filename>TMPDIR</filename></ulink><filename>}</filename>
|
||||||
is <filename>tmp</filename> and your selected package type
|
is <filename>tmp</filename> and your selected package type
|
||||||
is IPK, then your IPK packages are available in
|
is IPK, then your IPK packages are available in
|
||||||
<filename>tmp/deploy/ipk</filename>.
|
<filename>tmp/deploy/ipk</filename>.
|
||||||
@@ -7614,121 +7645,38 @@
|
|||||||
<title>Host or Server Machine Setup</title>
|
<title>Host or Server Machine Setup</title>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
Typically, packages are served from a server using
|
Although other protocols are possible, a server using HTTP
|
||||||
HTTP.
|
typically serves packages.
|
||||||
However, other protocols are possible.
|
If you want to use HTTP, then set up and configure a
|
||||||
If you want to use HTTP, then setup and configure a
|
web server such as Apache 2, lighttpd, or
|
||||||
web server, such as Apache 2 or lighttpd, on the machine
|
SimpleHTTPServer on the machine serving the packages.
|
||||||
serving the packages.
|
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
As previously mentioned, the build machine can act as the
|
To keep things simple, this section describes how to set
|
||||||
package server.
|
up a SimpleHTTPServer web server to share package feeds
|
||||||
In the following sections that describe server machine
|
from the developer's machine.
|
||||||
setups, the build machine is assumed to also be the server.
|
Although this server might not be the best for a production
|
||||||
|
environment, the setup is simple and straight forward.
|
||||||
|
Should you want to use a different server more suited for
|
||||||
|
production (e.g. Apache 2, Lighttpd, or Nginx), take the
|
||||||
|
appropriate steps to do so.
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<section id='package-server-apache'>
|
<para>
|
||||||
<title>Serving Packages via Apache 2</title>
|
From within the build directory where you have built an
|
||||||
|
image based on your packaging choice (i.e. the
|
||||||
<para>
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_CLASSES'><filename>PACKAGE_CLASSES</filename></ulink>
|
||||||
This example assumes you are using the Apache 2
|
setting), simply start the server.
|
||||||
server:
|
The following example assumes a build directory of
|
||||||
<orderedlist>
|
<filename>~/poky/build/tmp/deploy/rpm</filename> and a
|
||||||
<listitem><para>
|
<filename>PACKAGE_CLASSES</filename> setting of
|
||||||
Add the directory to your Apache
|
"package_rpm":
|
||||||
configuration, which you can find at
|
<literallayout class='monospaced'>
|
||||||
<filename>/etc/httpd/conf/httpd.conf</filename>.
|
$ cd ~/poky/build/tmp/deploy/rpm
|
||||||
Use commands similar to these on the
|
$ python -m SimpleHTTPServer
|
||||||
development system.
|
</literallayout>
|
||||||
These example commands assume a top-level
|
</para>
|
||||||
<link linkend='source-directory'>Source Directory</link>
|
|
||||||
named <filename>poky</filename> in your home
|
|
||||||
directory.
|
|
||||||
The example also assumes an RPM package type.
|
|
||||||
If you are using a different package type, such
|
|
||||||
as IPK, use "ipk" in the pathnames:
|
|
||||||
<literallayout class='monospaced'>
|
|
||||||
<VirtualHost *:80>
|
|
||||||
....
|
|
||||||
Alias /rpm ~/poky/build/tmp/deploy/rpm
|
|
||||||
<Directory "~/poky/build/tmp/deploy/rpm">
|
|
||||||
Options +Indexes
|
|
||||||
</Directory>
|
|
||||||
</VirtualHost>
|
|
||||||
</literallayout></para></listitem>
|
|
||||||
<listitem><para>
|
|
||||||
Reload the Apache configuration as described
|
|
||||||
in this step.
|
|
||||||
For all commands, be sure you have root
|
|
||||||
privileges.
|
|
||||||
</para>
|
|
||||||
|
|
||||||
<para>
|
|
||||||
If your development system is using Fedora or
|
|
||||||
CentOS, use the following:
|
|
||||||
<literallayout class='monospaced'>
|
|
||||||
# service httpd reload
|
|
||||||
</literallayout>
|
|
||||||
For Ubuntu and Debian, use the following:
|
|
||||||
<literallayout class='monospaced'>
|
|
||||||
# /etc/init.d/apache2 reload
|
|
||||||
</literallayout>
|
|
||||||
For OpenSUSE, use the following:
|
|
||||||
<literallayout class='monospaced'>
|
|
||||||
# /etc/init.d/apache2 reload
|
|
||||||
</literallayout></para></listitem>
|
|
||||||
<listitem><para>
|
|
||||||
If you are using Security-Enhanced Linux
|
|
||||||
(SELinux), you need to label the files as
|
|
||||||
being accessible through Apache.
|
|
||||||
Use the following command from the development
|
|
||||||
host.
|
|
||||||
This example assumes RPM package types:
|
|
||||||
<literallayout class='monospaced'>
|
|
||||||
# chcon -R -h -t httpd_sys_content_t tmp/deploy/rpm
|
|
||||||
</literallayout></para></listitem>
|
|
||||||
</orderedlist>
|
|
||||||
</para>
|
|
||||||
</section>
|
|
||||||
|
|
||||||
<section id='package-server-lighttpd'>
|
|
||||||
<title>Serving Packages via lighttpd</title>
|
|
||||||
|
|
||||||
<para>
|
|
||||||
If you are using lighttpd, all you need
|
|
||||||
to do is to provide a link from your
|
|
||||||
<filename>${TMPDIR}/deploy/<replaceable>package-format</replaceable></filename>
|
|
||||||
directory to lighttpd's document-root.
|
|
||||||
You can determine the specifics of your lighttpd
|
|
||||||
installation by looking through its configuration file,
|
|
||||||
which is usually found at:
|
|
||||||
<filename>/etc/lighttpd/lighttpd.conf</filename>.
|
|
||||||
</para>
|
|
||||||
|
|
||||||
<para>
|
|
||||||
For example, if you are using IPK, lighttpd's
|
|
||||||
document-root is set to
|
|
||||||
<filename>/var/www/lighttpd</filename>, and you had
|
|
||||||
packages for a target named "BOARD",
|
|
||||||
then you might create a link from your build location
|
|
||||||
to lighttpd's document-root as follows:
|
|
||||||
<literallayout class='monospaced'>
|
|
||||||
# ln -s $(PWD)/tmp/deploy/ipk /var/www/lighttpd/BOARD-dir
|
|
||||||
</literallayout>
|
|
||||||
</para>
|
|
||||||
|
|
||||||
<para>
|
|
||||||
At this point, you need to start the lighttpd server.
|
|
||||||
The method used to start the server varies by
|
|
||||||
distribution.
|
|
||||||
However, one basic method that starts it by hand is:
|
|
||||||
<literallayout class='monospaced'>
|
|
||||||
# lighttpd -f /etc/lighttpd/lighttpd.conf
|
|
||||||
</literallayout>
|
|
||||||
</para>
|
|
||||||
</section>
|
|
||||||
</section>
|
</section>
|
||||||
|
|
||||||
<section id='runtime-package-management-target'>
|
<section id='runtime-package-management-target'>
|
||||||
@@ -7737,42 +7685,46 @@
|
|||||||
<para>
|
<para>
|
||||||
Setting up the target differs depending on the
|
Setting up the target differs depending on the
|
||||||
package management system.
|
package management system.
|
||||||
This section provides information for RPM and IPK.
|
This section provides information for RPM, IPK, and DEB.
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<section id='runtime-package-management-target-rpm'>
|
<section id='runtime-package-management-target-rpm'>
|
||||||
<title>Using RPM</title>
|
<title>Using RPM</title>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
The application for performing runtime package
|
The <filename>smart</filename> application performs
|
||||||
management of RPM packages on the target is called
|
runtime package management of RPM packages.
|
||||||
<filename>smart</filename>.
|
You must perform an initial setup for
|
||||||
|
<filename>smart</filename> on the target machine
|
||||||
|
if the
|
||||||
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_FEED_ARCHS'><filename>PACKAGE_FEED_ARCHS</filename></ulink>,
|
||||||
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_FEED_BASE_PATHS'><filename>PACKAGE_FEED_BASE_PATHS</filename></ulink>, and
|
||||||
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_FEED_URIS'><filename>PACKAGE_FEED_URIS</filename></ulink>
|
||||||
|
variables have not been set or the target image was
|
||||||
|
built before the variables were set.
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
On the target machine, you need to inform
|
As an example, assume the target is able to use the
|
||||||
<filename>smart</filename> of every package database
|
following package databases:
|
||||||
you want to use.
|
|
||||||
As an example, suppose your target device can use the
|
|
||||||
following three package databases from a server named
|
|
||||||
<filename>server.name</filename>:
|
|
||||||
<filename>all</filename>, <filename>i586</filename>,
|
<filename>all</filename>, <filename>i586</filename>,
|
||||||
and <filename>qemux86</filename>.
|
and <filename>qemux86</filename> from a server named
|
||||||
Given this example, issue the following commands on the
|
<filename>my.server</filename>.
|
||||||
target:
|
You must inform <filename>smart</filename> of the
|
||||||
|
availability of these databases by issuing the
|
||||||
|
following commands on the target:
|
||||||
<literallayout class='monospaced'>
|
<literallayout class='monospaced'>
|
||||||
# smart channel --add all type=rpm-md baseurl=http://server.name/rpm/all
|
# smart channel --add i585 type=rpm-md baseurl=http://my.server/rpm/i586
|
||||||
# smart channel --add i585 type=rpm-md baseurl=http://server.name/rpm/i586
|
# smart channel --add qemux86 type=rpm-md baseurl=http://my.server/rpm/qemux86
|
||||||
# smart channel --add qemux86 type=rpm-md baseurl=http://server.name/rpm/qemux86
|
# smart channel --add all type=rpm-md baseurl=http://my.server/rpm/all
|
||||||
</literallayout>
|
</literallayout>
|
||||||
Also from the target machine, fetch the repository
|
From the target machine, fetch the repository:
|
||||||
information using this command:
|
|
||||||
<literallayout class='monospaced'>
|
<literallayout class='monospaced'>
|
||||||
# smart update
|
# smart update
|
||||||
</literallayout>
|
</literallayout>
|
||||||
You can now use the <filename>smart query</filename>
|
After everything is set up, <filename>smart</filename>
|
||||||
and <filename>smart install</filename> commands to
|
is able to find, install, and upgrade packages from
|
||||||
find and install packages from the repositories.
|
the specified repository.
|
||||||
</para>
|
</para>
|
||||||
</section>
|
</section>
|
||||||
|
|
||||||
@@ -7780,61 +7732,99 @@
|
|||||||
<title>Using IPK</title>
|
<title>Using IPK</title>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
The application for performing runtime package
|
The <filename>opkg</filename> application performs
|
||||||
management of IPK packages on the target is called
|
runtime package management of IPK packages.
|
||||||
<filename>opkg</filename>.
|
You must perform an initial setup for
|
||||||
|
<filename>opkg</filename> on the target machine
|
||||||
|
if the
|
||||||
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_FEED_ARCHS'><filename>PACKAGE_FEED_ARCHS</filename></ulink>,
|
||||||
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_FEED_BASE_PATHS'><filename>PACKAGE_FEED_BASE_PATHS</filename></ulink>, and
|
||||||
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_FEED_URIS'><filename>PACKAGE_FEED_URIS</filename></ulink>
|
||||||
|
variables have not been set or the target image was
|
||||||
|
built before the variables were set.
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
In order to inform <filename>opkg</filename> of the
|
The <filename>opkg</filename> application uses
|
||||||
package databases you want to use, simply create one
|
configuration files to find available package
|
||||||
or more <filename>*.conf</filename> files in the
|
databases.
|
||||||
<filename>/etc/opkg</filename> directory on the target.
|
Thus, you need to create a configuration file inside
|
||||||
The <filename>opkg</filename> application uses them
|
the <filename>/etc/opkg/</filename> direction, which
|
||||||
to find its available package databases.
|
informs <filename>opkg</filename> of any repository
|
||||||
As an example, suppose you configured your HTTP server
|
you want to use.
|
||||||
on your machine named
|
</para>
|
||||||
<filename>www.mysite.com</filename> to serve files
|
|
||||||
from a <filename>BOARD-dir</filename> directory under
|
<para>
|
||||||
its document-root.
|
As an example, suppose you are serving packages from a
|
||||||
In this case, you might create a configuration
|
<filename>ipk/</filename> directory containing the
|
||||||
file on the target called
|
<filename>i586</filename>,
|
||||||
<filename>/etc/opkg/base-feeds.conf</filename> that
|
<filename>all</filename>, and
|
||||||
contains:
|
<filename>qemux86</filename> databases through an
|
||||||
|
HTTP server named <filename>my.server</filename>.
|
||||||
|
On the target, create a configuration file
|
||||||
|
(e.g. <filename>my_repo.conf</filename>) inside the
|
||||||
|
<filename>/etc/opkg/</filename> directory containing
|
||||||
|
the following:
|
||||||
<literallayout class='monospaced'>
|
<literallayout class='monospaced'>
|
||||||
src/gz all http://www.mysite.com/BOARD-dir/all
|
src/gz all http://my.server/ipk/all
|
||||||
src/gz armv7a http://www.mysite.com/BOARD-dir/armv7a
|
src/gz i586 http://my.server/ipk/i586
|
||||||
src/gz beaglebone http://www.mysite.com/BOARD-dir/beaglebone
|
src/gz qemux86 http://my.server/ipk/qemux86
|
||||||
</literallayout>
|
</literallayout>
|
||||||
</para>
|
Next, instruct <filename>opkg</filename> to fetch
|
||||||
|
the repository information:
|
||||||
<para>
|
|
||||||
As a way of making it easier to generate and make
|
|
||||||
these IPK configuration files available on your
|
|
||||||
target, simply define
|
|
||||||
<ulink url='&YOCTO_DOCS_REF_URL;#var-FEED_DEPLOYDIR_BASE_URI'><filename>FEED_DEPLOYDIR_BASE_URI</filename></ulink>
|
|
||||||
to point to your server and the location within the
|
|
||||||
document-root which contains the databases.
|
|
||||||
For example: if you are serving your packages over
|
|
||||||
HTTP, your server's IP address is 192.168.7.1, and
|
|
||||||
your databases are located in a directory called
|
|
||||||
<filename>BOARD-dir</filename> underneath your HTTP
|
|
||||||
server's document-root, you need to set
|
|
||||||
<filename>FEED_DEPLOYDIR_BASE_URI</filename> to
|
|
||||||
<filename>http://192.168.7.1/BOARD-dir</filename> and
|
|
||||||
a set of configuration files will be generated for you
|
|
||||||
in your target to work with this feed.
|
|
||||||
</para>
|
|
||||||
|
|
||||||
<para>
|
|
||||||
On the target machine, fetch (or refresh) the
|
|
||||||
repository information using this command:
|
|
||||||
<literallayout class='monospaced'>
|
<literallayout class='monospaced'>
|
||||||
# opkg update
|
# opkg update
|
||||||
</literallayout>
|
</literallayout>
|
||||||
You can now use the <filename>opkg list</filename> and
|
The <filename>opkg</filename> application is now able
|
||||||
<filename>opkg install</filename> commands to find and
|
to find, install, and upgrade packages from the
|
||||||
install packages from the repositories.
|
specified repository.
|
||||||
|
</para>
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<section id='runtime-package-management-target-deb'>
|
||||||
|
<title>Using DEB</title>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
The <filename>apt</filename> application performs
|
||||||
|
runtime package management of DEB packages.
|
||||||
|
This application uses a source list file to find
|
||||||
|
available package databases.
|
||||||
|
You must perform an initial setup for
|
||||||
|
<filename>apt</filename> on the target machine
|
||||||
|
if the
|
||||||
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_FEED_ARCHS'><filename>PACKAGE_FEED_ARCHS</filename></ulink>,
|
||||||
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_FEED_BASE_PATHS'><filename>PACKAGE_FEED_BASE_PATHS</filename></ulink>, and
|
||||||
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_FEED_URIS'><filename>PACKAGE_FEED_URIS</filename></ulink>
|
||||||
|
variables have not been set or the target image was
|
||||||
|
built before the variables were set.
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
To inform <filename>apt</filename> of the repository
|
||||||
|
you want to use, you might create a list file (e.g.
|
||||||
|
<filename>my_repo.list</filename>) inside the
|
||||||
|
<filename>/etc/apt/sources.list.d/</filename>
|
||||||
|
directory.
|
||||||
|
As an example, suppose you are serving packages from a
|
||||||
|
<filename>deb/</filename> directory containing the
|
||||||
|
<filename>i586</filename>,
|
||||||
|
<filename>all</filename>, and
|
||||||
|
<filename>qemux86</filename> databases through an
|
||||||
|
HTTP server named <filename>my.server</filename>.
|
||||||
|
The list file should contain:
|
||||||
|
<literallayout class='monospaced'>
|
||||||
|
deb http://my.server/deb/all ./
|
||||||
|
deb http://my.server/deb/i586 ./
|
||||||
|
deb http://my.server/deb/qemux86 ./
|
||||||
|
</literallayout>
|
||||||
|
Next, instruct the <filename>apt</filename>
|
||||||
|
application to fetch the repository information:
|
||||||
|
<literallayout class='monospaced'>
|
||||||
|
# apt-get update
|
||||||
|
</literallayout>
|
||||||
|
After this step, <filename>apt</filename> is able
|
||||||
|
to find, install, and upgrade packages from the
|
||||||
|
specified repository.
|
||||||
</para>
|
</para>
|
||||||
</section>
|
</section>
|
||||||
</section>
|
</section>
|
||||||
@@ -9198,18 +9188,17 @@
|
|||||||
in your <filename>local.conf</filename> file.
|
in your <filename>local.conf</filename> file.
|
||||||
Be sure to provide the IP address you need:
|
Be sure to provide the IP address you need:
|
||||||
<literallayout class='monospaced'>
|
<literallayout class='monospaced'>
|
||||||
TEST_EXPORT_ONLY = "1"
|
INHERIT +="testexport"
|
||||||
TEST_TARGET = "simpleremote"
|
|
||||||
TEST_TARGET_IP = "192.168.7.2"
|
TEST_TARGET_IP = "192.168.7.2"
|
||||||
TEST_SERVER_IP = "192.168.7.1"
|
TEST_SERVER_IP = "192.168.7.1"
|
||||||
</literallayout>
|
</literallayout>
|
||||||
You can then export the tests with the following:
|
You can then export the tests with the following:
|
||||||
<literallayout class='monospaced'>
|
<literallayout class='monospaced'>
|
||||||
$ bitbake core-image-sato -c testimage
|
$ bitbake core-image-sato -c testexport
|
||||||
</literallayout>
|
</literallayout>
|
||||||
Exporting the tests places them in the
|
Exporting the tests places them in the
|
||||||
<link linkend='build-directory'>Build Directory</link> in
|
<link linkend='build-directory'>Build Directory</link> in
|
||||||
<filename>tmp/testimage/core-image-sato</filename>, which
|
<filename>tmp/testexport/core-image-sato</filename>, which
|
||||||
is controlled by the
|
is controlled by the
|
||||||
<filename>TEST_EXPORT_DIR</filename> variable.
|
<filename>TEST_EXPORT_DIR</filename> variable.
|
||||||
</para>
|
</para>
|
||||||
@@ -9217,37 +9206,9 @@
|
|||||||
<para>
|
<para>
|
||||||
You can now run the tests outside of the build environment:
|
You can now run the tests outside of the build environment:
|
||||||
<literallayout class='monospaced'>
|
<literallayout class='monospaced'>
|
||||||
$ cd tmp/testimage/core-image-sato
|
$ cd tmp/testexport/core-image-sato
|
||||||
$ ./runexported.py testdata.json
|
$ ./runexported.py testdata.json
|
||||||
</literallayout>
|
</literallayout>
|
||||||
<note>
|
|
||||||
This "export" feature does not deploy or boot the target
|
|
||||||
image.
|
|
||||||
Your target (be it a Qemu or hardware one)
|
|
||||||
has to already be up and running when you call
|
|
||||||
<filename>runexported.py</filename>
|
|
||||||
</note>
|
|
||||||
</para>
|
|
||||||
|
|
||||||
<para>
|
|
||||||
The exported data (i.e. <filename>testdata.json</filename>)
|
|
||||||
contains paths to the Build Directory.
|
|
||||||
Thus, the contents of the directory can be moved
|
|
||||||
to another machine as long as you update some paths in the
|
|
||||||
JSON.
|
|
||||||
Usually, you only care about the
|
|
||||||
<filename>${DEPLOY_DIR}/rpm</filename> directory
|
|
||||||
(assuming the RPM and Smart tests are enabled).
|
|
||||||
Consequently, running the tests on other machine
|
|
||||||
means that you have to move the contents and call
|
|
||||||
<filename>runexported.py</filename> with
|
|
||||||
"--deploy-dir <replaceable>path</replaceable>" as
|
|
||||||
follows:
|
|
||||||
<literallayout class='monospaced'>
|
|
||||||
./runexported.py --deploy-dir /new/path/on/this/machine testdata.json
|
|
||||||
</literallayout>
|
|
||||||
<filename>runexported.py</filename> accepts other arguments
|
|
||||||
as well as described using <filename>--help</filename>.
|
|
||||||
</para>
|
</para>
|
||||||
</section>
|
</section>
|
||||||
|
|
||||||
@@ -9422,6 +9383,78 @@
|
|||||||
</para>
|
</para>
|
||||||
</section>
|
</section>
|
||||||
</section>
|
</section>
|
||||||
|
|
||||||
|
<section id='installing-packages-in-the-dut-without-the-package-manager'>
|
||||||
|
<title>Installing Packages in the DUT Without the Package Manager</title>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
When a test requires a package built by BitBake, it is possible
|
||||||
|
to install that package.
|
||||||
|
Installing the package does not require a package manager be
|
||||||
|
installed in the device under test (DUT).
|
||||||
|
It does, however, require an SSH connection and the target must
|
||||||
|
be using the <filename>sshcontrol</filename> class.
|
||||||
|
<note>
|
||||||
|
This method uses <filename>scp</filename> to copy files
|
||||||
|
from the host to the target, which causes permissions and
|
||||||
|
special attributes to be lost.
|
||||||
|
</note>
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
A JSON file is used to define the packages needed by a test.
|
||||||
|
This file must be in the same path as the file used to define
|
||||||
|
the tests.
|
||||||
|
Furthermore, the filename must map directly to the test
|
||||||
|
module name with a <filename>.json</filename> extension.
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
The JSON file must include an object with the test name as
|
||||||
|
keys of an object or an array.
|
||||||
|
This object (or array of objects) uses the following data:
|
||||||
|
<itemizedlist>
|
||||||
|
<listitem><para>"pkg" - A mandatory string that is the
|
||||||
|
name of the package to be installed.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>"rm" - An optional boolean, which defaults
|
||||||
|
to "false", that specifies to remove the package after
|
||||||
|
the test.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>"extract" - An optional boolean, which
|
||||||
|
defaults to "false", that specifies if the package must
|
||||||
|
be extracted from the package format.
|
||||||
|
When set to "true", the package is not automatically
|
||||||
|
installed into the DUT.
|
||||||
|
</para></listitem>
|
||||||
|
</itemizedlist>
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
Following is an example JSON file that handles test "foo"
|
||||||
|
installing package "bar" and test "foobar" installing
|
||||||
|
packages "foo" and "bar".
|
||||||
|
Once the test is complete, the packages are removed from the
|
||||||
|
DUT.
|
||||||
|
<literallayout class='monospaced'>
|
||||||
|
{
|
||||||
|
"foo": {
|
||||||
|
"pkg": "bar"
|
||||||
|
},
|
||||||
|
"foobar": [
|
||||||
|
{
|
||||||
|
"pkg": "foo",
|
||||||
|
"rm": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pkg": "bar",
|
||||||
|
"rm": true
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
</literallayout>
|
||||||
|
</para>
|
||||||
|
</section>
|
||||||
</section>
|
</section>
|
||||||
|
|
||||||
<section id="platdev-gdb-remotedebug">
|
<section id="platdev-gdb-remotedebug">
|
||||||
|
|||||||
@@ -86,6 +86,21 @@
|
|||||||
<date>April 2016</date>
|
<date>April 2016</date>
|
||||||
<revremark>Released with the Yocto Project 2.1 Release.</revremark>
|
<revremark>Released with the Yocto Project 2.1 Release.</revremark>
|
||||||
</revision>
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.1</revnumber>
|
||||||
|
<date>August 2016</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.1 Release.</revremark>
|
||||||
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.2</revnumber>
|
||||||
|
<date>December 2016</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.2 Release.</revremark>
|
||||||
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.3</revnumber>
|
||||||
|
<date>June 2017</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.3 Release.</revremark>
|
||||||
|
</revision>
|
||||||
</revhistory>
|
</revhistory>
|
||||||
|
|
||||||
<copyright>
|
<copyright>
|
||||||
@@ -101,12 +116,46 @@
|
|||||||
Creative Commons.
|
Creative Commons.
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<note>
|
<note><title>Manual Notes</title>
|
||||||
For the latest version of this manual associated with this
|
<itemizedlist>
|
||||||
Yocto Project release, see the
|
<listitem><para>
|
||||||
<ulink url='&YOCTO_DOCS_DEV_URL;'>Yocto Project Development Manual</ulink>
|
This version of the
|
||||||
from the Yocto Project website.
|
<emphasis>Yocto Project Development Manual</emphasis>
|
||||||
</note>
|
is for the &YOCTO_DOC_VERSION; release of the
|
||||||
|
Yocto Project.
|
||||||
|
To be sure you have the latest version of the manual
|
||||||
|
for this release, go to the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual from that site.
|
||||||
|
Manuals from the site are more up-to-date than manuals
|
||||||
|
derived from the Yocto Project released TAR files.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
If you located this manual through a web search, the
|
||||||
|
version of the manual might not be the one you want
|
||||||
|
(e.g. the search might have returned a manual much
|
||||||
|
older than the Yocto Project version with which you
|
||||||
|
are working).
|
||||||
|
You can see all Yocto Project major releases by
|
||||||
|
visiting the
|
||||||
|
<ulink url='&YOCTO_WIKI_URL;/wiki/Releases'>Releases</ulink>
|
||||||
|
page.
|
||||||
|
If you need a version of this manual for a different
|
||||||
|
Yocto Project release, visit the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual set by using the
|
||||||
|
"ACTIVE RELEASES DOCUMENTATION" or "DOCUMENTS ARCHIVE"
|
||||||
|
pull-down menus.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
To report any inaccuracies or problems with this
|
||||||
|
manual, send an email to the Yocto Project
|
||||||
|
discussion group at
|
||||||
|
<filename>yocto@yoctoproject.com</filename> or log into
|
||||||
|
the freenode <filename>#yocto</filename> channel.
|
||||||
|
</para></listitem>
|
||||||
|
</itemizedlist>
|
||||||
|
</note>
|
||||||
</legalnotice>
|
</legalnotice>
|
||||||
|
|
||||||
</bookinfo>
|
</bookinfo>
|
||||||
|
|||||||
@@ -384,9 +384,10 @@
|
|||||||
|
|
||||||
<para>
|
<para>
|
||||||
The resulting <filename>.config</filename> file is
|
The resulting <filename>.config</filename> file is
|
||||||
located in
|
located in the build directory,
|
||||||
<filename>${</filename><ulink url='&YOCTO_DOCS_REF_URL;#var-WORKDIR'><filename>WORKDIR</filename></ulink><filename>}</filename> under the
|
<filename>${</filename><ulink url='&YOCTO_DOCS_REF_URL;#var-B'><filename>B</filename></ulink><filename>}</filename>,
|
||||||
<filename>linux-${</filename><ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_ARCH'><filename>PACKAGE_ARCH</filename></ulink><filename>}-${<ulink url='&YOCTO_DOCS_REF_URL;#var-LINUX_KERNEL_TYPE'><filename>LINUX_KERNEL_TYPE</filename></ulink>}-build</filename> directory.
|
which expands to
|
||||||
|
<filename>${</filename><ulink url='&YOCTO_DOCS_REF_URL;#var-WORKDIR'><filename>WORKDIR</filename></ulink><filename>}</filename><filename>/linux-</filename><filename>${</filename><ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_ARCH'><filename>PACKAGE_ARCH</filename></ulink><filename>}-${</filename><ulink url='&YOCTO_DOCS_REF_URL;#var-LINUX_KERNEL_TYPE'><filename>LINUX_KERNEL_TYPE</filename></ulink><filename>}-build</filename>.
|
||||||
You can use the entire <filename>.config</filename> file as the
|
You can use the entire <filename>.config</filename> file as the
|
||||||
<filename>defconfig</filename> file as described in the
|
<filename>defconfig</filename> file as described in the
|
||||||
"<link linkend='changing-the-configuration'>Changing the Configuration</link>" section.
|
"<link linkend='changing-the-configuration'>Changing the Configuration</link>" section.
|
||||||
@@ -394,6 +395,16 @@
|
|||||||
see the
|
see the
|
||||||
"<ulink url='&YOCTO_DOCS_DEV_URL;#using-menuconfig'>Using <filename>menuconfig</filename></ulink>"
|
"<ulink url='&YOCTO_DOCS_DEV_URL;#using-menuconfig'>Using <filename>menuconfig</filename></ulink>"
|
||||||
section in the Yocto Project Development Manual.
|
section in the Yocto Project Development Manual.
|
||||||
|
<note>
|
||||||
|
You can determine what a variable expands to by looking
|
||||||
|
at the output of the <filename>bitbake -e</filename>
|
||||||
|
command:
|
||||||
|
<literallayout class='monospaced'>
|
||||||
|
$ bitbake -e virtual/kernel
|
||||||
|
</literallayout>
|
||||||
|
Search the output for the variable in which you are
|
||||||
|
interested to see exactly how it is expanded and used.
|
||||||
|
</note>
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
@@ -512,8 +523,14 @@
|
|||||||
</literallayout>
|
</literallayout>
|
||||||
Taking this step ensures you have the sources prepared
|
Taking this step ensures you have the sources prepared
|
||||||
and the configuration completed.
|
and the configuration completed.
|
||||||
You can find the sources in the
|
You can find the sources in the build directory within the
|
||||||
<filename>${</filename><ulink url='&YOCTO_DOCS_REF_URL;#var-WORKDIR'><filename>WORKDIR</filename></ulink><filename>}/linux</filename> directory.
|
<filename>source/</filename> directory, which is a symlink
|
||||||
|
(i.e. <filename>${</filename><ulink url='&YOCTO_DOCS_REF_URL;#var-B'><filename>B</filename></ulink><filename>}/source</filename>).
|
||||||
|
The <filename>source/</filename> directory expands to
|
||||||
|
<filename>${</filename><ulink url='&YOCTO_DOCS_REF_URL;#var-WORKDIR'><filename>WORKDIR</filename></ulink><filename>}</filename><filename>/linux-</filename><filename>${</filename><ulink url='&YOCTO_DOCS_REF_URL;#var-PACKAGE_ARCH'><filename>PACKAGE_ARCH</filename></ulink><filename>}-${</filename><ulink url='&YOCTO_DOCS_REF_URL;#var-LINUX_KERNEL_TYPE'><filename>LINUX_KERNEL_TYPE</filename></ulink><filename>}-build/source</filename>.
|
||||||
|
The directory pointed to by the
|
||||||
|
<filename>source/</filename> symlink is also known as
|
||||||
|
<filename>${</filename><ulink url='&YOCTO_DOCS_REF_URL;#var-STAGING_KERNEL_DIR'><filename>STAGING_KERNEL_DIR</filename></ulink><filename>}</filename>.
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
|
|||||||
@@ -71,6 +71,21 @@
|
|||||||
<date>April 2016</date>
|
<date>April 2016</date>
|
||||||
<revremark>Released with the Yocto Project 2.1 Release.</revremark>
|
<revremark>Released with the Yocto Project 2.1 Release.</revremark>
|
||||||
</revision>
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.1</revnumber>
|
||||||
|
<date>August 2016</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.1 Release.</revremark>
|
||||||
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.2</revnumber>
|
||||||
|
<date>December 2016</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.2 Release.</revremark>
|
||||||
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.3</revnumber>
|
||||||
|
<date>June 2017</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.3 Release.</revremark>
|
||||||
|
</revision>
|
||||||
</revhistory>
|
</revhistory>
|
||||||
|
|
||||||
<copyright>
|
<copyright>
|
||||||
@@ -83,12 +98,46 @@
|
|||||||
Permission is granted to copy, distribute and/or modify this document under
|
Permission is granted to copy, distribute and/or modify this document under
|
||||||
the terms of the <ulink type="http" url="http://creativecommons.org/licenses/by-sa/2.0/uk/">Creative Commons Attribution-Share Alike 2.0 UK: England & Wales</ulink> as published by Creative Commons.
|
the terms of the <ulink type="http" url="http://creativecommons.org/licenses/by-sa/2.0/uk/">Creative Commons Attribution-Share Alike 2.0 UK: England & Wales</ulink> as published by Creative Commons.
|
||||||
</para>
|
</para>
|
||||||
<note>
|
<note><title>Manual Notes</title>
|
||||||
For the latest version of this manual associated with this
|
<itemizedlist>
|
||||||
Yocto Project release, see the
|
<listitem><para>
|
||||||
<ulink url='&YOCTO_DOCS_KERNEL_DEV_URL;'>Yocto Project Linux Kernel Development Manual</ulink>
|
This version of the
|
||||||
from the Yocto Project website.
|
<emphasis>Yocto Project Linux Kernel Development Manual</emphasis>
|
||||||
</note>
|
is for the &YOCTO_DOC_VERSION; release of the
|
||||||
|
Yocto Project.
|
||||||
|
To be sure you have the latest version of the manual
|
||||||
|
for this release, go to the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual from that site.
|
||||||
|
Manuals from the site are more up-to-date than manuals
|
||||||
|
derived from the Yocto Project released TAR files.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
If you located this manual through a web search, the
|
||||||
|
version of the manual might not be the one you want
|
||||||
|
(e.g. the search might have returned a manual much
|
||||||
|
older than the Yocto Project version with which you
|
||||||
|
are working).
|
||||||
|
You can see all Yocto Project major releases by
|
||||||
|
visiting the
|
||||||
|
<ulink url='&YOCTO_WIKI_URL;/wiki/Releases'>Releases</ulink>
|
||||||
|
page.
|
||||||
|
If you need a version of this manual for a different
|
||||||
|
Yocto Project release, visit the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual set by using the
|
||||||
|
"ACTIVE RELEASES DOCUMENTATION" or "DOCUMENTS ARCHIVE"
|
||||||
|
pull-down menus.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
To report any inaccuracies or problems with this
|
||||||
|
manual, send an email to the Yocto Project
|
||||||
|
discussion group at
|
||||||
|
<filename>yocto@yoctoproject.com</filename> or log into
|
||||||
|
the freenode <filename>#yocto</filename> channel.
|
||||||
|
</para></listitem>
|
||||||
|
</itemizedlist>
|
||||||
|
</note>
|
||||||
</legalnotice>
|
</legalnotice>
|
||||||
|
|
||||||
</bookinfo>
|
</bookinfo>
|
||||||
|
|||||||
@@ -55,6 +55,21 @@
|
|||||||
<date>April 2016</date>
|
<date>April 2016</date>
|
||||||
<revremark>Released with the Yocto Project 2.1 Release.</revremark>
|
<revremark>Released with the Yocto Project 2.1 Release.</revremark>
|
||||||
</revision>
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.1</revnumber>
|
||||||
|
<date>August 2016</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.1 Release.</revremark>
|
||||||
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.2</revnumber>
|
||||||
|
<date>December 2016</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.2 Release.</revremark>
|
||||||
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.3</revnumber>
|
||||||
|
<date>June 2017</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.3 Release.</revremark>
|
||||||
|
</revision>
|
||||||
</revhistory>
|
</revhistory>
|
||||||
|
|
||||||
<copyright>
|
<copyright>
|
||||||
@@ -67,12 +82,46 @@
|
|||||||
Permission is granted to copy, distribute and/or modify this document under
|
Permission is granted to copy, distribute and/or modify this document under
|
||||||
the terms of the <ulink type="http" url="http://creativecommons.org/licenses/by-sa/2.0/uk/">Creative Commons Attribution-Share Alike 2.0 UK: England & Wales</ulink> as published by Creative Commons.
|
the terms of the <ulink type="http" url="http://creativecommons.org/licenses/by-sa/2.0/uk/">Creative Commons Attribution-Share Alike 2.0 UK: England & Wales</ulink> as published by Creative Commons.
|
||||||
</para>
|
</para>
|
||||||
<note>
|
<note><title>Manual Notes</title>
|
||||||
For the latest version of this manual associated with this
|
<itemizedlist>
|
||||||
Yocto Project release, see the
|
<listitem><para>
|
||||||
<ulink url='&YOCTO_DOCS_MM_URL;'>Yocto Project Mega-Manual</ulink>
|
This version of the
|
||||||
from the Yocto Project website.
|
<emphasis>Yocto Project Mega-Manual</emphasis>
|
||||||
</note>
|
is for the &YOCTO_DOC_VERSION; release of the
|
||||||
|
Yocto Project.
|
||||||
|
To be sure you have the latest version of the manual
|
||||||
|
for this release, go to the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual from that site.
|
||||||
|
Manuals from the site are more up-to-date than manuals
|
||||||
|
derived from the Yocto Project released TAR files.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
If you located this manual through a web search, the
|
||||||
|
version of the manual might not be the one you want
|
||||||
|
(e.g. the search might have returned a manual much
|
||||||
|
older than the Yocto Project version with which you
|
||||||
|
are working).
|
||||||
|
You can see all Yocto Project major releases by
|
||||||
|
visiting the
|
||||||
|
<ulink url='&YOCTO_WIKI_URL;/wiki/Releases'>Releases</ulink>
|
||||||
|
page.
|
||||||
|
If you need a version of this manual for a different
|
||||||
|
Yocto Project release, visit the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual set by using the
|
||||||
|
"ACTIVE RELEASES DOCUMENTATION" or "DOCUMENTS ARCHIVE"
|
||||||
|
pull-down menus.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
To report any inaccuracies or problems with this
|
||||||
|
manual, send an email to the Yocto Project
|
||||||
|
discussion group at
|
||||||
|
<filename>yocto@yoctoproject.com</filename> or log into
|
||||||
|
the freenode <filename>#yocto</filename> channel.
|
||||||
|
</para></listitem>
|
||||||
|
</itemizedlist>
|
||||||
|
</note>
|
||||||
|
|
||||||
</legalnotice>
|
</legalnotice>
|
||||||
|
|
||||||
|
|||||||
@@ -1,12 +1,12 @@
|
|||||||
<!ENTITY DISTRO "2.1">
|
<!ENTITY DISTRO "2.1.3">
|
||||||
<!ENTITY DISTRO_COMPRESSED "21">
|
<!ENTITY DISTRO_COMPRESSED "213">
|
||||||
<!ENTITY DISTRO_NAME_NO_CAP "krogoth">
|
<!ENTITY DISTRO_NAME_NO_CAP "krogoth">
|
||||||
<!ENTITY DISTRO_NAME "Krogoth">
|
<!ENTITY DISTRO_NAME "Krogoth">
|
||||||
<!ENTITY YOCTO_DOC_VERSION "2.1">
|
<!ENTITY YOCTO_DOC_VERSION "2.1.3">
|
||||||
<!ENTITY POKYVERSION "15.0.0">
|
<!ENTITY POKYVERSION "15.0.3">
|
||||||
<!ENTITY POKYVERSION_COMPRESSED "1500">
|
<!ENTITY POKYVERSION_COMPRESSED "1503">
|
||||||
<!ENTITY YOCTO_POKY "poky-&DISTRO_NAME_NO_CAP;-&POKYVERSION;">
|
<!ENTITY YOCTO_POKY "poky-&DISTRO_NAME_NO_CAP;-&POKYVERSION;">
|
||||||
<!ENTITY COPYRIGHT_YEAR "2010-2016">
|
<!ENTITY COPYRIGHT_YEAR "2010-2017">
|
||||||
<!ENTITY YOCTO_DL_URL "http://downloads.yoctoproject.org">
|
<!ENTITY YOCTO_DL_URL "http://downloads.yoctoproject.org">
|
||||||
<!ENTITY YOCTO_HOME_URL "http://www.yoctoproject.org">
|
<!ENTITY YOCTO_HOME_URL "http://www.yoctoproject.org">
|
||||||
<!ENTITY YOCTO_LISTS_URL "http://lists.yoctoproject.org">
|
<!ENTITY YOCTO_LISTS_URL "http://lists.yoctoproject.org">
|
||||||
|
|||||||
@@ -67,8 +67,10 @@
|
|||||||
By default, the Yocto build system strips symbols from the
|
By default, the Yocto build system strips symbols from the
|
||||||
binaries it packages, which makes it difficult to use some
|
binaries it packages, which makes it difficult to use some
|
||||||
of the tools.
|
of the tools.
|
||||||
</para><para>You can prevent that by putting the following
|
</para><para>You can prevent that by setting the
|
||||||
in your local.conf when you build the image:
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-INHIBIT_PACKAGE_STRIP'><filename>INHIBIT_PACKAGE_STRIP</filename></ulink>
|
||||||
|
variable to "1" in your
|
||||||
|
<filename>local.conf</filename> when you build the image:
|
||||||
</para>
|
</para>
|
||||||
</note>
|
</note>
|
||||||
<literallayout class='monospaced'>
|
<literallayout class='monospaced'>
|
||||||
|
|||||||
@@ -60,8 +60,11 @@
|
|||||||
|
|
||||||
<para>
|
<para>
|
||||||
In particular, you'll get the most mileage out of perf if you
|
In particular, you'll get the most mileage out of perf if you
|
||||||
profile an image built with INHIBIT_PACKAGE_STRIP = "1" in your
|
profile an image built with the following in your
|
||||||
local.conf.
|
<filename>local.conf</filename> file:
|
||||||
|
<literallayout class='monospaced'>
|
||||||
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-INHIBIT_PACKAGE_STRIP'>INHIBIT_PACKAGE_STRIP</ulink> = "1"
|
||||||
|
</literallayout>
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
@@ -355,10 +358,10 @@
|
|||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
One way around that is to put the following in your local.conf
|
One way around that is to put the following in your
|
||||||
when you build the image:
|
<filename>local.conf</filename> file when you build the image:
|
||||||
<literallayout class='monospaced'>
|
<literallayout class='monospaced'>
|
||||||
INHIBIT_PACKAGE_STRIP = "1"
|
<ulink url='&YOCTO_DOCS_REF_URL;#var-INHIBIT_PACKAGE_STRIP'>INHIBIT_PACKAGE_STRIP</ulink> = "1"
|
||||||
</literallayout>
|
</literallayout>
|
||||||
However, we already have an image with the binaries stripped,
|
However, we already have an image with the binaries stripped,
|
||||||
so what can we do to get perf to resolve the symbols? Basically
|
so what can we do to get perf to resolve the symbols? Basically
|
||||||
|
|||||||
@@ -71,6 +71,21 @@
|
|||||||
<date>April 2016</date>
|
<date>April 2016</date>
|
||||||
<revremark>Released with the Yocto Project 2.1 Release.</revremark>
|
<revremark>Released with the Yocto Project 2.1 Release.</revremark>
|
||||||
</revision>
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.1</revnumber>
|
||||||
|
<date>August 2016</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.1 Release.</revremark>
|
||||||
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.2</revnumber>
|
||||||
|
<date>December 2016</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.2 Release.</revremark>
|
||||||
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.3</revnumber>
|
||||||
|
<date>June 2017</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.3 Release.</revremark>
|
||||||
|
</revision>
|
||||||
</revhistory>
|
</revhistory>
|
||||||
|
|
||||||
<copyright>
|
<copyright>
|
||||||
@@ -86,12 +101,46 @@
|
|||||||
Creative Commons.
|
Creative Commons.
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<note>
|
<note><title>Manual Notes</title>
|
||||||
For the latest version of this manual associated with this
|
<itemizedlist>
|
||||||
Yocto Project release, see the
|
<listitem><para>
|
||||||
<ulink url='&YOCTO_DOCS_PROF_URL;'>Yocto Project Profiling and Tracing Manual</ulink>
|
This version of the
|
||||||
from the Yocto Project website.
|
<emphasis>Yocto Project Profiling and Tracing Manual</emphasis>
|
||||||
</note>
|
is for the &YOCTO_DOC_VERSION; release of the
|
||||||
|
Yocto Project.
|
||||||
|
To be sure you have the latest version of the manual
|
||||||
|
for this release, go to the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual from that site.
|
||||||
|
Manuals from the site are more up-to-date than manuals
|
||||||
|
derived from the Yocto Project released TAR files.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
If you located this manual through a web search, the
|
||||||
|
version of the manual might not be the one you want
|
||||||
|
(e.g. the search might have returned a manual much
|
||||||
|
older than the Yocto Project version with which you
|
||||||
|
are working).
|
||||||
|
You can see all Yocto Project major releases by
|
||||||
|
visiting the
|
||||||
|
<ulink url='&YOCTO_WIKI_URL;/wiki/Releases'>Releases</ulink>
|
||||||
|
page.
|
||||||
|
If you need a version of this manual for a different
|
||||||
|
Yocto Project release, visit the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual set by using the
|
||||||
|
"ACTIVE RELEASES DOCUMENTATION" or "DOCUMENTS ARCHIVE"
|
||||||
|
pull-down menus.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
To report any inaccuracies or problems with this
|
||||||
|
manual, send an email to the Yocto Project
|
||||||
|
discussion group at
|
||||||
|
<filename>yocto@yoctoproject.com</filename> or log into
|
||||||
|
the freenode <filename>#yocto</filename> channel.
|
||||||
|
</para></listitem>
|
||||||
|
</itemizedlist>
|
||||||
|
</note>
|
||||||
</legalnotice>
|
</legalnotice>
|
||||||
|
|
||||||
</bookinfo>
|
</bookinfo>
|
||||||
|
|||||||
@@ -144,6 +144,27 @@
|
|||||||
</para></listitem>
|
</para></listitem>
|
||||||
<listitem><para><emphasis>bluetooth:</emphasis> Include
|
<listitem><para><emphasis>bluetooth:</emphasis> Include
|
||||||
bluetooth support (integrated BT only).</para></listitem>
|
bluetooth support (integrated BT only).</para></listitem>
|
||||||
|
<listitem><para><emphasis>bluez5:</emphasis> Include
|
||||||
|
BlueZ Version 5, which provides core Bluetooth layers and
|
||||||
|
protocols support.
|
||||||
|
<note>
|
||||||
|
The default value for the
|
||||||
|
<link linkend='var-DISTRO_FEATURES'><filename>DISTRO FEATURES</filename></link>
|
||||||
|
variable includes "bluetooth", which causes bluez5
|
||||||
|
to be backfilled in for bluetooth support.
|
||||||
|
If you do not want bluez5 backfilled and would rather
|
||||||
|
use bluez4, you need to use the
|
||||||
|
<link linkend='var-DISTRO_FEATURES_BACKFILL_CONSIDERED'><filename>DISTRO_FEATURES_BACKFILL_CONSIDERED</filename></link>
|
||||||
|
variable as follows:
|
||||||
|
<literallayout class='monospaced'>
|
||||||
|
DISTRO_FEATURES_BACKFILL_CONSIDERED = "bluez5"
|
||||||
|
</literallayout>
|
||||||
|
Setting this variable tells the OpenEmbedded build
|
||||||
|
system that you have considered but ruled
|
||||||
|
out using the bluez5 feature and that bluez4 will be
|
||||||
|
used.
|
||||||
|
</note>
|
||||||
|
</para></listitem>
|
||||||
<listitem><para><emphasis>cramfs:</emphasis> Include CramFS
|
<listitem><para><emphasis>cramfs:</emphasis> Include CramFS
|
||||||
support.</para></listitem>
|
support.</para></listitem>
|
||||||
<listitem><para><emphasis>directfb:</emphasis>
|
<listitem><para><emphasis>directfb:</emphasis>
|
||||||
|
|||||||
@@ -102,6 +102,21 @@
|
|||||||
<date>April 2016</date>
|
<date>April 2016</date>
|
||||||
<revremark>Released with the Yocto Project 2.1 Release.</revremark>
|
<revremark>Released with the Yocto Project 2.1 Release.</revremark>
|
||||||
</revision>
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.1</revnumber>
|
||||||
|
<date>August 2016</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.1 Release.</revremark>
|
||||||
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.2</revnumber>
|
||||||
|
<date>December 2016</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.2 Release.</revremark>
|
||||||
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.3</revnumber>
|
||||||
|
<date>June 2017</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.3 Release.</revremark>
|
||||||
|
</revision>
|
||||||
</revhistory>
|
</revhistory>
|
||||||
|
|
||||||
<copyright>
|
<copyright>
|
||||||
@@ -114,12 +129,46 @@
|
|||||||
Permission is granted to copy, distribute and/or modify this document under
|
Permission is granted to copy, distribute and/or modify this document under
|
||||||
the terms of the <ulink type="http" url="http://creativecommons.org/licenses/by-sa/2.0/uk/">Creative Commons Attribution-Share Alike 2.0 UK: England & Wales</ulink> as published by Creative Commons.
|
the terms of the <ulink type="http" url="http://creativecommons.org/licenses/by-sa/2.0/uk/">Creative Commons Attribution-Share Alike 2.0 UK: England & Wales</ulink> as published by Creative Commons.
|
||||||
</para>
|
</para>
|
||||||
<note>
|
<note><title>Manual Notes</title>
|
||||||
For the latest version of this manual associated with this
|
<itemizedlist>
|
||||||
Yocto Project release, see the
|
<listitem><para>
|
||||||
<ulink url='&YOCTO_DOCS_REF_URL;'>Yocto Project Reference Manual</ulink>
|
This version of the
|
||||||
from the Yocto Project website.
|
<emphasis>Yocto Project Reference Manual</emphasis>
|
||||||
</note>
|
is for the &YOCTO_DOC_VERSION; release of the
|
||||||
|
Yocto Project.
|
||||||
|
To be sure you have the latest version of the manual
|
||||||
|
for this release, go to the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual from that site.
|
||||||
|
Manuals from the site are more up-to-date than manuals
|
||||||
|
derived from the Yocto Project released TAR files.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
If you located this manual through a web search, the
|
||||||
|
version of the manual might not be the one you want
|
||||||
|
(e.g. the search might have returned a manual much
|
||||||
|
older than the Yocto Project version with which you
|
||||||
|
are working).
|
||||||
|
You can see all Yocto Project major releases by
|
||||||
|
visiting the
|
||||||
|
<ulink url='&YOCTO_WIKI_URL;/wiki/Releases'>Releases</ulink>
|
||||||
|
page.
|
||||||
|
If you need a version of this manual for a different
|
||||||
|
Yocto Project release, visit the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual set by using the
|
||||||
|
"ACTIVE RELEASES DOCUMENTATION" or "DOCUMENTS ARCHIVE"
|
||||||
|
pull-down menus.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
To report any inaccuracies or problems with this
|
||||||
|
manual, send an email to the Yocto Project
|
||||||
|
discussion group at
|
||||||
|
<filename>yocto@yoctoproject.com</filename> or log into
|
||||||
|
the freenode <filename>#yocto</filename> channel.
|
||||||
|
</para></listitem>
|
||||||
|
</itemizedlist>
|
||||||
|
</note>
|
||||||
</legalnotice>
|
</legalnotice>
|
||||||
|
|
||||||
</bookinfo>
|
</bookinfo>
|
||||||
|
|||||||
@@ -5940,7 +5940,7 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3"
|
|||||||
|
|
||||||
<glossentry id='var-INHIBIT_PACKAGE_DEBUG_SPLIT'><glossterm>INHIBIT_PACKAGE_DEBUG_SPLIT</glossterm>
|
<glossentry id='var-INHIBIT_PACKAGE_DEBUG_SPLIT'><glossterm>INHIBIT_PACKAGE_DEBUG_SPLIT</glossterm>
|
||||||
<info>
|
<info>
|
||||||
INHIBIT_PACKAGE_STRIP[doc] = "If set to "1", causes the build to not strip binaries in resulting packages."
|
INHIBIT_PACKAGE_DEBUG_SPLIT[doc] = "If set to "1", prevents the OpenEmbedded build system from splitting out debug information during packaging"
|
||||||
</info>
|
</info>
|
||||||
<glossdef>
|
<glossdef>
|
||||||
<para role="glossdeffirst">
|
<para role="glossdeffirst">
|
||||||
@@ -5976,7 +5976,19 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3"
|
|||||||
<glossdef>
|
<glossdef>
|
||||||
<para role="glossdeffirst">
|
<para role="glossdeffirst">
|
||||||
<!-- <para role="glossdeffirst"><imagedata fileref="figures/define-generic.png" /> -->
|
<!-- <para role="glossdeffirst"><imagedata fileref="figures/define-generic.png" /> -->
|
||||||
If set to "1", causes the build to not strip binaries in resulting packages.
|
If set to "1", causes the build to not strip binaries in
|
||||||
|
resulting packages and prevents the
|
||||||
|
<filename>-dbg</filename> package from containing the
|
||||||
|
source files.
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<para>
|
||||||
|
By default, the OpenEmbedded build system strips
|
||||||
|
binaries and puts the debugging symbols into
|
||||||
|
<filename>${</filename><link linkend='var-PN'><filename>PN</filename></link><filename>}-dbg</filename>.
|
||||||
|
Consequently, you should not set
|
||||||
|
<filename>INHIBIT_PACKAGE_STRIP</filename> when you plan
|
||||||
|
to debug in general.
|
||||||
</para>
|
</para>
|
||||||
</glossdef>
|
</glossdef>
|
||||||
</glossentry>
|
</glossentry>
|
||||||
@@ -9559,8 +9571,48 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3"
|
|||||||
<glossdef>
|
<glossdef>
|
||||||
<para role="glossdeffirst">
|
<para role="glossdeffirst">
|
||||||
<!-- <para role="glossdeffirst"><imagedata fileref="figures/define-generic.png" /> -->
|
<!-- <para role="glossdeffirst"><imagedata fileref="figures/define-generic.png" /> -->
|
||||||
The revision of the recipe.
|
The revision of the recipe. The default value for this
|
||||||
The default value for this variable is "r0".
|
variable is "r0".
|
||||||
|
Subsequent revisions of the recipe conventionally have the
|
||||||
|
values "r1", "r2", and so forth.
|
||||||
|
When
|
||||||
|
<link linkend='var-PV'><filename>PV</filename></link>
|
||||||
|
increases, <filename>PR</filename> is conventionally reset
|
||||||
|
to "r0".
|
||||||
|
<note>
|
||||||
|
The OpenEmbedded build system does not need the aid of
|
||||||
|
<filename>PR</filename> to know when to rebuild a
|
||||||
|
recipe.
|
||||||
|
The build system uses
|
||||||
|
<link linkend='var-STAMP'><filename>STAMP</filename></link>
|
||||||
|
and the
|
||||||
|
<link linkend='shared-state-cache'>shared state cache</link>
|
||||||
|
mechanisms.
|
||||||
|
</note>
|
||||||
|
The <filename>PR</filename> variable primarily becomes
|
||||||
|
significant when a package manager dynamically installs
|
||||||
|
packages on an already built image.
|
||||||
|
In this case, <filename>PR</filename>, which is the default
|
||||||
|
value of
|
||||||
|
<link linkend='var-PKGR'><filename>PKGR</filename></link>,
|
||||||
|
helps the package manager distinguish which package is the
|
||||||
|
most recent one in cases where many packages have the same
|
||||||
|
<filename>PV</filename> (i.e. <filename>PKGV</filename>).
|
||||||
|
A component having many packages with the same
|
||||||
|
<filename>PV</filename> usually means that the packages all
|
||||||
|
install the same upstream version, but with later
|
||||||
|
(<filename>PR</filename>) version packages including
|
||||||
|
packaging fixes.
|
||||||
|
<note>
|
||||||
|
<filename>PR</filename> does not need to be increased
|
||||||
|
for changes that do not change the package contents or
|
||||||
|
metadata.
|
||||||
|
</note>
|
||||||
|
Because manually managing <filename>PR</filename> can be
|
||||||
|
cumbersome and error-prone, an automated solution exists.
|
||||||
|
See the
|
||||||
|
"<ulink url='&YOCTO_DOCS_DEV_URL;#working-with-a-pr-service'>Working With a PR Service</ulink>"
|
||||||
|
section for more information.
|
||||||
</para>
|
</para>
|
||||||
</glossdef>
|
</glossdef>
|
||||||
</glossentry>
|
</glossentry>
|
||||||
@@ -10076,14 +10128,20 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3"
|
|||||||
<filename>RDEPENDS</filename> since some automatic
|
<filename>RDEPENDS</filename> since some automatic
|
||||||
handling occurs:
|
handling occurs:
|
||||||
<itemizedlist>
|
<itemizedlist>
|
||||||
<listitem><para><emphasis><filename>shlibdeps</filename></emphasis>: If
|
<listitem><para><emphasis><filename>shlibdeps</filename></emphasis>:
|
||||||
a runtime package contains a shared library
|
If a runtime package contains a compiled binary,
|
||||||
(<filename>.so</filename>), the build
|
the build processes the binary in order to
|
||||||
processes the library in order to determine other
|
determine any shared libraries
|
||||||
libraries to which it is dynamically linked.
|
(<filename>.so</filename> files) to which it is
|
||||||
The build process adds these libraries to
|
dynamically linked.
|
||||||
<filename>RDEPENDS</filename> when creating the runtime
|
The build process adds the packages containing
|
||||||
package.</para></listitem>
|
these libraries to <filename>RDEPENDS</filename>
|
||||||
|
when creating the runtime package.
|
||||||
|
In addition, if the shared library is versioned,
|
||||||
|
the dependency will have the version appended to it
|
||||||
|
in order to force an upgrade to the appropriate
|
||||||
|
version if needed.
|
||||||
|
</para></listitem>
|
||||||
<listitem><para><emphasis><filename>pcdeps</filename></emphasis>: If
|
<listitem><para><emphasis><filename>pcdeps</filename></emphasis>: If
|
||||||
the package ships a <filename>pkg-config</filename>
|
the package ships a <filename>pkg-config</filename>
|
||||||
information file, the build process uses this file
|
information file, the build process uses this file
|
||||||
@@ -11982,7 +12040,7 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3"
|
|||||||
directory structure.
|
directory structure.
|
||||||
<literallayout class='monospaced'>
|
<literallayout class='monospaced'>
|
||||||
SSTATE_MIRRORS ?= "\
|
SSTATE_MIRRORS ?= "\
|
||||||
file://.* http://<replaceable>someserver</replaceable>.tld/share/sstate/PATH \n \
|
file://.* http://<replaceable>someserver</replaceable>.tld/share/sstate/PATH;downloadfilename=PATH \n \
|
||||||
file://.* file:///<replaceable>some-local-dir</replaceable>/sstate/PATH"
|
file://.* file:///<replaceable>some-local-dir</replaceable>/sstate/PATH"
|
||||||
</literallayout>
|
</literallayout>
|
||||||
</para>
|
</para>
|
||||||
@@ -14300,6 +14358,84 @@ recipes-graphics/xorg-font/font-alias_1.0.3.bb:PR = "${INC_PR}.3"
|
|||||||
</glossdef>
|
</glossdef>
|
||||||
</glossentry>
|
</glossentry>
|
||||||
|
|
||||||
|
<glossentry id='var-UPSTREAM_CHECK_GITTAGREGEX'><glossterm>UPSTREAM_CHECK_GITTAGREGEX</glossterm>
|
||||||
|
<info>
|
||||||
|
UPSTREAM_CHECK_GITTAGREGEX[doc] = "Filters relevant Git tags when fetching source from an upstream Git repository."
|
||||||
|
</info>
|
||||||
|
<glossdef>
|
||||||
|
<para role="glossdeffirst">
|
||||||
|
<!-- <para role="glossdeffirst"><imagedata fileref="figures/define-generic.png" /> -->
|
||||||
|
When the
|
||||||
|
<link linkend='ref-classes-distrodata'><filename>distrodata</filename></link>
|
||||||
|
class is enabled globally, you can perform a per-recipe
|
||||||
|
check for what the latest upstream source code version is
|
||||||
|
by calling
|
||||||
|
<filename>bitbake -c checkpkg</filename> <replaceable>recipe</replaceable>.
|
||||||
|
If the recipe source code is provided from Git
|
||||||
|
repositories, the OpenEmbedded build system determines the
|
||||||
|
latest upstream version by picking the latest tag from the
|
||||||
|
list of all repository tags.
|
||||||
|
You can use the
|
||||||
|
<filename>UPSTREAM_CHECK_GITTAGREGEX</filename>
|
||||||
|
variable to provide a regular expression to filter only the
|
||||||
|
relevant tags should the default filter not work
|
||||||
|
correctly.
|
||||||
|
<literallayout class='monospaced'>
|
||||||
|
UPSTREAM_CHECK_GITTAGREGEX = "git_tag_regex"
|
||||||
|
</literallayout>
|
||||||
|
</para>
|
||||||
|
</glossdef>
|
||||||
|
</glossentry>
|
||||||
|
|
||||||
|
<glossentry id='var-UPSTREAM_CHECK_REGEX'><glossterm>UPSTREAM_CHECK_REGEX</glossterm>
|
||||||
|
<info>
|
||||||
|
UPSTREAM_CHECK_REGEX[doc] = "The regular expression the package checking system uses to parse the page pointed to by UPSTREAM_CHECK_URI."
|
||||||
|
</info>
|
||||||
|
<glossdef>
|
||||||
|
<para role="glossdeffirst">
|
||||||
|
<!-- <para role="glossdeffirst"><imagedata fileref="figures/define-generic.png" /> -->
|
||||||
|
When the
|
||||||
|
<link linkend='ref-classes-distrodata'><filename>distrodata</filename></link>
|
||||||
|
class is enabled globally, use the
|
||||||
|
<filename>UPSTREAM_CHECK_REGEX</filename> variable to
|
||||||
|
specify a different regular expression instead of the
|
||||||
|
default one when the package checking system is parsing
|
||||||
|
the page found using
|
||||||
|
<link linkend='var-UPSTREAM_CHECK_URI'><filename>UPSTREAM_CHECK_URI</filename></link>.
|
||||||
|
<literallayout class='monospaced'>
|
||||||
|
UPSTREAM_CHECK_REGEX = "package_regex"
|
||||||
|
</literallayout>
|
||||||
|
</para>
|
||||||
|
</glossdef>
|
||||||
|
</glossentry>
|
||||||
|
|
||||||
|
<glossentry id='var-UPSTREAM_CHECK_URI'><glossterm>UPSTREAM_CHECK_URI</glossterm>
|
||||||
|
<info>
|
||||||
|
UPSTREAM_CHECK_URI[doc] = "The URL used by the package checking system to get the latest version of the package when source files are fetched from an upstream Git repository."
|
||||||
|
</info>
|
||||||
|
<glossdef>
|
||||||
|
<para role="glossdeffirst">
|
||||||
|
<!-- <para role="glossdeffirst"><imagedata fileref="figures/define-generic.png" /> -->
|
||||||
|
When the
|
||||||
|
<link linkend='ref-classes-distrodata'><filename>distrodata</filename></link>
|
||||||
|
class is enabled globally, you can perform a per-recipe
|
||||||
|
check for what the latest upstream source code version is
|
||||||
|
by calling <filename>bitbake -c checkpkg</filename>
|
||||||
|
<replaceable>recipe</replaceable>.
|
||||||
|
If the source code is provided from tarballs, the latest
|
||||||
|
version is determined by fetching the directory listing
|
||||||
|
where the tarball is and attempting to find a later tarball.
|
||||||
|
When this approach does not work, you can use
|
||||||
|
<filename>UPSTREAM_CHECK_URI</filename> to
|
||||||
|
provide a different URI that contains the link to the
|
||||||
|
latest tarball.
|
||||||
|
<literallayout class='monospaced'>
|
||||||
|
UPSTREAM_CHECK_URI = "recipe_url"
|
||||||
|
</literallayout>
|
||||||
|
</para>
|
||||||
|
</glossdef>
|
||||||
|
</glossentry>
|
||||||
|
|
||||||
<glossentry id='var-USE_DEVFS'><glossterm>USE_DEVFS</glossterm>
|
<glossentry id='var-USE_DEVFS'><glossterm>USE_DEVFS</glossterm>
|
||||||
<info>
|
<info>
|
||||||
USE_DEVFS[doc] = "Determines if devtmpfs is used for /dev population."
|
USE_DEVFS[doc] = "Determines if devtmpfs is used for /dev population."
|
||||||
|
|||||||
@@ -690,6 +690,123 @@
|
|||||||
addtask do_deploy_setscene
|
addtask do_deploy_setscene
|
||||||
do_deploy[dirs] = "${DEPLOYDIR} ${B}"
|
do_deploy[dirs] = "${DEPLOYDIR} ${B}"
|
||||||
</literallayout>
|
</literallayout>
|
||||||
|
The following list explains the previous example:
|
||||||
|
<itemizedlist>
|
||||||
|
<listitem><para>
|
||||||
|
Adding "do_deploy" to <filename>SSTATETASKS</filename>
|
||||||
|
adds some required sstate-related processing, which is
|
||||||
|
implemented in the
|
||||||
|
<link linkend='ref-classes-sstate'><filename>sstate</filename></link>
|
||||||
|
class, to before and after the
|
||||||
|
<link linkend='ref-tasks-deploy'><filename>do_deploy</filename></link>
|
||||||
|
task.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
The
|
||||||
|
<filename>do_deploy[sstate-inputdirs] = "${DEPLOYDIR}"</filename>
|
||||||
|
declares that <filename>do_deploy</filename> places its
|
||||||
|
output in <filename>${DEPLOYDIR}</filename> when run
|
||||||
|
normally (i.e. when not using the sstate cache).
|
||||||
|
This output becomes the input to the shared state cache.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
The
|
||||||
|
<filename>do_deploy[sstate-outputdirs] = "${DEPLOY_DIR_IMAGE}"</filename>
|
||||||
|
line causes the contents of the shared state cache to be
|
||||||
|
copied to <filename>${DEPLOY_DIR_IMAGE}</filename>.
|
||||||
|
<note>
|
||||||
|
If <filename>do_deploy</filename> is not already in
|
||||||
|
the shared state cache or if its input checksum
|
||||||
|
(signature) has changed from when the output was
|
||||||
|
cached, the task will be run to populate the shared
|
||||||
|
state cache, after which the contents of the shared
|
||||||
|
state cache is copied to
|
||||||
|
<filename>${DEPLOY_DIR_IMAGE}</filename>.
|
||||||
|
If <filename>do_deploy</filename> is in the shared
|
||||||
|
state cache and its signature indicates that the
|
||||||
|
cached output is still valid (i.e. if no
|
||||||
|
relevant task inputs have changed), then the contents
|
||||||
|
of the shared state cache will be copied directly to
|
||||||
|
<filename>${DEPLOY_DIR_IMAGE}</filename> by the
|
||||||
|
<filename>do_deploy_setscene</filename> task instead,
|
||||||
|
skipping the <filename>do_deploy</filename> task.
|
||||||
|
</note>
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
The following task definition is glue logic needed to make
|
||||||
|
the previous settings effective:
|
||||||
|
<literallayout class='monospaced'>
|
||||||
|
python do_deploy_setscene () {
|
||||||
|
sstate_setscene(d)
|
||||||
|
}
|
||||||
|
addtask do_deploy_setscene
|
||||||
|
</literallayout>
|
||||||
|
<filename>sstate_setscene()</filename> takes the flags
|
||||||
|
above as input and accelerates the
|
||||||
|
<filename>do_deploy</filename> task through the
|
||||||
|
shared state cache if possible.
|
||||||
|
If the task was accelerated,
|
||||||
|
<filename>sstate_setscene()</filename> returns True.
|
||||||
|
Otherwise, it returns False, and the normal
|
||||||
|
<filename>do_deploy</filename> task runs.
|
||||||
|
For more information, see the
|
||||||
|
"<ulink url='&YOCTO_DOCS_BB_URL;#setscene'>setscene</ulink>"
|
||||||
|
section in the BitBake User Manual.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
The
|
||||||
|
<filename>do_deploy[dirs] = "${DEPLOYDIR} ${B}"</filename>
|
||||||
|
line creates <filename>${DEPLOYDIR}</filename> and
|
||||||
|
<filename>${B}</filename> before the
|
||||||
|
<filename>do_deploy</filename> task runs.
|
||||||
|
For more information, see the
|
||||||
|
"<ulink url='&YOCTO_DOCS_BB_URL;#variable-flags'>Variable Flags</ulink>"
|
||||||
|
section in the BitBake User Manual.
|
||||||
|
<note>
|
||||||
|
In cases where
|
||||||
|
<filename>sstate-inputdirs</filename> and
|
||||||
|
<filename>sstate-outputdirs</filename> would be the
|
||||||
|
same, you can use
|
||||||
|
<filename>sstate-plaindirs</filename>.
|
||||||
|
For example, to preserve the
|
||||||
|
<filename>${PKGD}</filename> and
|
||||||
|
<filename>${PKGDEST}</filename> output from the
|
||||||
|
<link linkend='ref-tasks-package'><filename>do_package</filename></link>
|
||||||
|
task, use the following:
|
||||||
|
<literallayout class='monospaced'>
|
||||||
|
do_package[sstate-plaindirs] = "${PKGD} ${PKGDEST}"
|
||||||
|
</literallayout>
|
||||||
|
</note>
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
<filename>sstate-inputdirs</filename> and
|
||||||
|
<filename>sstate-outputdirs</filename> can also be used
|
||||||
|
with multiple directories.
|
||||||
|
For example, the following declares
|
||||||
|
<filename>PKGDESTWORK</filename> and
|
||||||
|
<filename>SHLIBWORK</filename> as shared state
|
||||||
|
input directories, which populates the shared state
|
||||||
|
cache, and <filename>PKGDATA_DIR</filename> and
|
||||||
|
<filename>SHLIBSDIR</filename> as the corresponding
|
||||||
|
shared state output directories:
|
||||||
|
<literallayout class='monospaced'>
|
||||||
|
do_package[sstate-inputdirs] = "${PKGDESTWORK} ${SHLIBSWORKDIR}"
|
||||||
|
do_package[sstate-outputdirs] = "${PKGDATA_DIR} ${SHLIBSDIR}"
|
||||||
|
</literallayout>
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
These methods also include the ability to take a lockfile
|
||||||
|
when manipulating shared state directory structures,
|
||||||
|
for cases where file additions or removals are sensitive:
|
||||||
|
<literallayout class='monospaced'>
|
||||||
|
do_package[sstate-lockfile] = "${PACKAGELOCK}"
|
||||||
|
</literallayout>
|
||||||
|
</para></listitem>
|
||||||
|
</itemizedlist>
|
||||||
|
</para>
|
||||||
|
|
||||||
|
<!--
|
||||||
|
<para>
|
||||||
In this example, we add some extra flags to the task, a name field ("deploy"), an
|
In this example, we add some extra flags to the task, a name field ("deploy"), an
|
||||||
input directory where the task sends data, and the output
|
input directory where the task sends data, and the output
|
||||||
directory where the data from the task should eventually be copied.
|
directory where the data from the task should eventually be copied.
|
||||||
@@ -713,6 +830,7 @@
|
|||||||
shared state directory structures since some cases are sensitive to file
|
shared state directory structures since some cases are sensitive to file
|
||||||
additions or removals.
|
additions or removals.
|
||||||
</para>
|
</para>
|
||||||
|
-->
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
Behind the scenes, the shared state code works by looking in
|
Behind the scenes, the shared state code works by looking in
|
||||||
@@ -722,7 +840,7 @@
|
|||||||
Here is an example:
|
Here is an example:
|
||||||
<literallayout class='monospaced'>
|
<literallayout class='monospaced'>
|
||||||
SSTATE_MIRRORS ?= "\
|
SSTATE_MIRRORS ?= "\
|
||||||
file://.* http://someserver.tld/share/sstate/PATH \n \
|
file://.* http://someserver.tld/share/sstate/PATH;downloadfilename=PATH \n \
|
||||||
file://.* file:///some/local/dir/sstate/PATH"
|
file://.* file:///some/local/dir/sstate/PATH"
|
||||||
</literallayout>
|
</literallayout>
|
||||||
<note>
|
<note>
|
||||||
|
|||||||
@@ -951,6 +951,19 @@
|
|||||||
* PR changed from "r0" to "r1"
|
* PR changed from "r0" to "r1"
|
||||||
* PV changed from "0.1.10" to "0.1.12"
|
* PV changed from "0.1.10" to "0.1.12"
|
||||||
</literallayout>
|
</literallayout>
|
||||||
|
<note>
|
||||||
|
The <filename>buildhistory-diff</filename> tool requires
|
||||||
|
the <filename>GitPython</filename> package.
|
||||||
|
Be sure to install it using Pip3 as follows:
|
||||||
|
<literallayout class='monospaced'>
|
||||||
|
$ pip3 install GitPython --user
|
||||||
|
</literallayout>
|
||||||
|
Alternatively, you can install
|
||||||
|
<filename>python3-git</filename> using the appropriate
|
||||||
|
distribution package manager (e.g.
|
||||||
|
<filename>apt-get</filename>, <filename>dnf</filename>, or
|
||||||
|
<filename>zipper</filename>).
|
||||||
|
</note>
|
||||||
</para>
|
</para>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
|
|||||||
@@ -205,7 +205,7 @@
|
|||||||
For information about the application development workflow that
|
For information about the application development workflow that
|
||||||
uses the Eclipse IDE and for a detailed example of how to install
|
uses the Eclipse IDE and for a detailed example of how to install
|
||||||
and configure the Eclipse Yocto Project Plug-in, see the
|
and configure the Eclipse Yocto Project Plug-in, see the
|
||||||
"<link link='sdk-developing-applications-using-eclipse'>Developing Applications Using <trademark class='trade'>Eclipse</trademark></link>"
|
"<link linkend='sdk-developing-applications-using-eclipse'>Developing Applications Using <trademark class='trade'>Eclipse</trademark></link>"
|
||||||
section.
|
section.
|
||||||
</para>
|
</para>
|
||||||
</section>
|
</section>
|
||||||
@@ -287,7 +287,7 @@
|
|||||||
<orderedlist>
|
<orderedlist>
|
||||||
<listitem><para><emphasis>Install the SDK for your target hardware:</emphasis>
|
<listitem><para><emphasis>Install the SDK for your target hardware:</emphasis>
|
||||||
For information on how to install the SDK, see the
|
For information on how to install the SDK, see the
|
||||||
"<link url='sdk-installing-the-sdk'>Installing the SDK</link>"
|
"<link linkend='sdk-installing-the-sdk'>Installing the SDK</link>"
|
||||||
section.</para></listitem>
|
section.</para></listitem>
|
||||||
<listitem><para><emphasis>Download the Target Image:</emphasis>
|
<listitem><para><emphasis>Download the Target Image:</emphasis>
|
||||||
The Yocto Project supports several target architectures
|
The Yocto Project supports several target architectures
|
||||||
@@ -320,7 +320,7 @@
|
|||||||
To use the root filesystem in QEMU, you
|
To use the root filesystem in QEMU, you
|
||||||
need to extract it.
|
need to extract it.
|
||||||
See the
|
See the
|
||||||
"<link url='sdk-extracting-the-root-filesystem'>Extracting the Root Filesystem</link>"
|
"<link linkend='sdk-extracting-the-root-filesystem'>Extracting the Root Filesystem</link>"
|
||||||
section for information on how to extract the root
|
section for information on how to extract the root
|
||||||
filesystem.
|
filesystem.
|
||||||
</note>
|
</note>
|
||||||
|
|||||||
@@ -36,6 +36,21 @@
|
|||||||
<date>April 2016</date>
|
<date>April 2016</date>
|
||||||
<revremark>Released with the Yocto Project 2.1 Release.</revremark>
|
<revremark>Released with the Yocto Project 2.1 Release.</revremark>
|
||||||
</revision>
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.1</revnumber>
|
||||||
|
<date>August 2016</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.1 Release.</revremark>
|
||||||
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.2</revnumber>
|
||||||
|
<date>December 2016</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.2 Release.</revremark>
|
||||||
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.3</revnumber>
|
||||||
|
<date>June 2017</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.3 Release.</revremark>
|
||||||
|
</revision>
|
||||||
</revhistory>
|
</revhistory>
|
||||||
|
|
||||||
<copyright>
|
<copyright>
|
||||||
@@ -48,12 +63,46 @@
|
|||||||
Permission is granted to copy, distribute and/or modify this document under
|
Permission is granted to copy, distribute and/or modify this document under
|
||||||
the terms of the <ulink type="http" url="http://creativecommons.org/licenses/by-sa/2.0/uk/">Creative Commons Attribution-Share Alike 2.0 UK: England & Wales</ulink> as published by Creative Commons.
|
the terms of the <ulink type="http" url="http://creativecommons.org/licenses/by-sa/2.0/uk/">Creative Commons Attribution-Share Alike 2.0 UK: England & Wales</ulink> as published by Creative Commons.
|
||||||
</para>
|
</para>
|
||||||
<note>
|
<note><title>Manual Notes</title>
|
||||||
For the latest version of this manual associated with this
|
<itemizedlist>
|
||||||
Yocto Project release, see the
|
<listitem><para>
|
||||||
<ulink url='&YOCTO_DOCS_SDK_URL;'>Yocto Project Software Development Kit (SDK) Developer's Guide</ulink>
|
This version of the
|
||||||
from the Yocto Project website.
|
<emphasis>Yocto Project Software Development Kit (SDK) Developer's Guide</emphasis>
|
||||||
</note>
|
is for the &YOCTO_DOC_VERSION; release of the
|
||||||
|
Yocto Project.
|
||||||
|
To be sure you have the latest version of the manual
|
||||||
|
for this release, go to the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual from that site.
|
||||||
|
Manuals from the site are more up-to-date than manuals
|
||||||
|
derived from the Yocto Project released TAR files.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
If you located this manual through a web search, the
|
||||||
|
version of the manual might not be the one you want
|
||||||
|
(e.g. the search might have returned a manual much
|
||||||
|
older than the Yocto Project version with which you
|
||||||
|
are working).
|
||||||
|
You can see all Yocto Project major releases by
|
||||||
|
visiting the
|
||||||
|
<ulink url='&YOCTO_WIKI_URL;/wiki/Releases'>Releases</ulink>
|
||||||
|
page.
|
||||||
|
If you need a version of this manual for a different
|
||||||
|
Yocto Project release, visit the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual set by using the
|
||||||
|
"ACTIVE RELEASES DOCUMENTATION" or "DOCUMENTS ARCHIVE"
|
||||||
|
pull-down menus.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
To report any inaccuracies or problems with this
|
||||||
|
manual, send an email to the Yocto Project
|
||||||
|
discussion group at
|
||||||
|
<filename>yocto@yoctoproject.com</filename> or log into
|
||||||
|
the freenode <filename>#yocto</filename> channel.
|
||||||
|
</para></listitem>
|
||||||
|
</itemizedlist>
|
||||||
|
</note>
|
||||||
|
|
||||||
</legalnotice>
|
</legalnotice>
|
||||||
|
|
||||||
|
|||||||
@@ -226,7 +226,7 @@
|
|||||||
</literallayout></para></listitem>
|
</literallayout></para></listitem>
|
||||||
<listitem><para><emphasis>Populate the directory:</emphasis>
|
<listitem><para><emphasis>Populate the directory:</emphasis>
|
||||||
Create <filename>hello.c</filename>, <filename>Makefile.am</filename>,
|
Create <filename>hello.c</filename>, <filename>Makefile.am</filename>,
|
||||||
and <filename>configure.in</filename> files as follows:
|
and <filename>configure.ac</filename> files as follows:
|
||||||
<itemizedlist>
|
<itemizedlist>
|
||||||
<listitem><para>For <filename>hello.c</filename>, include
|
<listitem><para>For <filename>hello.c</filename>, include
|
||||||
these lines:
|
these lines:
|
||||||
@@ -247,8 +247,8 @@
|
|||||||
<listitem><para>For <filename>configure.in</filename>,
|
<listitem><para>For <filename>configure.in</filename>,
|
||||||
include these lines:
|
include these lines:
|
||||||
<literallayout class='monospaced'>
|
<literallayout class='monospaced'>
|
||||||
AC_INIT(hello.c)
|
AC_INIT(hello,0.1)
|
||||||
AM_INIT_AUTOMAKE(hello,0.1)
|
AM_INIT_AUTOMAKE([foreign])
|
||||||
AC_PROG_CC
|
AC_PROG_CC
|
||||||
AC_PROG_INSTALL
|
AC_PROG_INSTALL
|
||||||
AC_OUTPUT(Makefile)
|
AC_OUTPUT(Makefile)
|
||||||
|
|||||||
@@ -46,6 +46,21 @@
|
|||||||
<date>April 2016</date>
|
<date>April 2016</date>
|
||||||
<revremark>Released with the Yocto Project 2.1 Release.</revremark>
|
<revremark>Released with the Yocto Project 2.1 Release.</revremark>
|
||||||
</revision>
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.1</revnumber>
|
||||||
|
<date>August 2016</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.1 Release.</revremark>
|
||||||
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.2</revnumber>
|
||||||
|
<date>December 2016</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.2 Release.</revremark>
|
||||||
|
</revision>
|
||||||
|
<revision>
|
||||||
|
<revnumber>2.1.3</revnumber>
|
||||||
|
<date>June 2017</date>
|
||||||
|
<revremark>Released with the Yocto Project 2.1.3 Release.</revremark>
|
||||||
|
</revision>
|
||||||
</revhistory>
|
</revhistory>
|
||||||
|
|
||||||
<copyright>
|
<copyright>
|
||||||
@@ -58,12 +73,46 @@
|
|||||||
Permission is granted to copy, distribute and/or modify this document under
|
Permission is granted to copy, distribute and/or modify this document under
|
||||||
the terms of the <ulink type="http" url="http://creativecommons.org/licenses/by-sa/2.0/uk/">Creative Commons Attribution-Share Alike 2.0 UK: England & Wales</ulink> as published by Creative Commons.
|
the terms of the <ulink type="http" url="http://creativecommons.org/licenses/by-sa/2.0/uk/">Creative Commons Attribution-Share Alike 2.0 UK: England & Wales</ulink> as published by Creative Commons.
|
||||||
</para>
|
</para>
|
||||||
<note>
|
<note><title>Manual Notes</title>
|
||||||
For the latest version of this manual associated with this
|
<itemizedlist>
|
||||||
Yocto Project release, see the
|
<listitem><para>
|
||||||
<ulink url='&YOCTO_DOCS_TOAST_URL;'>Toaster User Manual</ulink>
|
This version of the
|
||||||
from the Yocto Project website.
|
<emphasis>Toaster User Manual</emphasis>
|
||||||
</note>
|
is for the &YOCTO_DOC_VERSION; release of the
|
||||||
|
Yocto Project.
|
||||||
|
To be sure you have the latest version of the manual
|
||||||
|
for this release, go to the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual from that site.
|
||||||
|
Manuals from the site are more up-to-date than manuals
|
||||||
|
derived from the Yocto Project released TAR files.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
If you located this manual through a web search, the
|
||||||
|
version of the manual might not be the one you want
|
||||||
|
(e.g. the search might have returned a manual much
|
||||||
|
older than the Yocto Project version with which you
|
||||||
|
are working).
|
||||||
|
You can see all Yocto Project major releases by
|
||||||
|
visiting the
|
||||||
|
<ulink url='&YOCTO_WIKI_URL;/wiki/Releases'>Releases</ulink>
|
||||||
|
page.
|
||||||
|
If you need a version of this manual for a different
|
||||||
|
Yocto Project release, visit the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual set by using the
|
||||||
|
"ACTIVE RELEASES DOCUMENTATION" or "DOCUMENTS ARCHIVE"
|
||||||
|
pull-down menus.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
To report any inaccuracies or problems with this
|
||||||
|
manual, send an email to the Yocto Project
|
||||||
|
discussion group at
|
||||||
|
<filename>yocto@yoctoproject.com</filename> or log into
|
||||||
|
the freenode <filename>#yocto</filename> channel.
|
||||||
|
</para></listitem>
|
||||||
|
</itemizedlist>
|
||||||
|
</note>
|
||||||
|
|
||||||
</legalnotice>
|
</legalnotice>
|
||||||
|
|
||||||
|
|||||||
@@ -2,32 +2,32 @@
|
|||||||
# This style is for manual folders like "yocto-project-qs" and "poky-ref-manual".
|
# This style is for manual folders like "yocto-project-qs" and "poky-ref-manual".
|
||||||
# This is the old way that did it. Can't do that now that we have "bitbake-user-manual" strings
|
# This is the old way that did it. Can't do that now that we have "bitbake-user-manual" strings
|
||||||
# in the mega-manual.
|
# in the mega-manual.
|
||||||
# s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/[a-z]*-[a-z]*-[a-z]*\/[a-z]*-[a-z]*-[a-z]*.html#/\"link\" href=\"#/g
|
# s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/[a-z]*-[a-z]*-[a-z]*\/[a-z]*-[a-z]*-[a-z]*.html#/\"link\" href=\"#/g
|
||||||
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/yocto-project-qs\/yocto-project-qs.html#/\"link\" href=\"#/g
|
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/yocto-project-qs\/yocto-project-qs.html#/\"link\" href=\"#/g
|
||||||
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/poky-ref-manual\/poky-ref-manual.html#/\"link\" href=\"#/g
|
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/poky-ref-manual\/poky-ref-manual.html#/\"link\" href=\"#/g
|
||||||
|
|
||||||
# Processes all other manuals (<word>-<word> style) except for the BitBake User Manual because
|
# Processes all other manuals (<word>-<word> style) except for the BitBake User Manual because
|
||||||
# it is not included in the mega-manual.
|
# it is not included in the mega-manual.
|
||||||
# This style is for manual folders that use two word, which is the standard now (e.g. "ref-manual").
|
# This style is for manual folders that use two word, which is the standard now (e.g. "ref-manual").
|
||||||
# This was the one-liner that worked before we introduced the BitBake User Manual, which is
|
# This was the one-liner that worked before we introduced the BitBake User Manual, which is
|
||||||
# not in the mega-manual.
|
# not in the mega-manual.
|
||||||
# s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/[a-z]*-[a-z]*\/[a-z]*-[a-z]*.html#/\"link\" href=\"#/g
|
# s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/[a-z]*-[a-z]*\/[a-z]*-[a-z]*.html#/\"link\" href=\"#/g
|
||||||
|
|
||||||
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/sdk-manual\/sdk-manual.html#/\"link\" href=\"#/g
|
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/sdk-manual\/sdk-manual.html#/\"link\" href=\"#/g
|
||||||
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/bsp-guide\/bsp-guide.html#/\"link\" href=\"#/g
|
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/bsp-guide\/bsp-guide.html#/\"link\" href=\"#/g
|
||||||
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/dev-manual\/dev-manual.html#/\"link\" href=\"#/g
|
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/dev-manual\/dev-manual.html#/\"link\" href=\"#/g
|
||||||
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/kernel-dev\/kernel-dev.html#/\"link\" href=\"#/g
|
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/kernel-dev\/kernel-dev.html#/\"link\" href=\"#/g
|
||||||
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/profile-manual\/profile-manual.html#/\"link\" href=\"#/g
|
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/profile-manual\/profile-manual.html#/\"link\" href=\"#/g
|
||||||
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/ref-manual\/ref-manual.html#/\"link\" href=\"#/g
|
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/ref-manual\/ref-manual.html#/\"link\" href=\"#/g
|
||||||
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/toaster-manual\/toaster-manual.html#/\"link\" href=\"#/g
|
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/toaster-manual\/toaster-manual.html#/\"link\" href=\"#/g
|
||||||
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/yocto-project-qs\/yocto-project-qs.html#/\"link\" href=\"#/g
|
s/\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/yocto-project-qs\/yocto-project-qs.html#/\"link\" href=\"#/g
|
||||||
|
|
||||||
# Process cases where just an external manual is referenced without an id anchor
|
# Process cases where just an external manual is referenced without an id anchor
|
||||||
s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/yocto-project-qs\/yocto-project-qs.html\" target=\"_top\">Yocto Project Quick Start<\/a>/Yocto Project Quick Start/g
|
s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/yocto-project-qs\/yocto-project-qs.html\" target=\"_top\">Yocto Project Quick Start<\/a>/Yocto Project Quick Start/g
|
||||||
s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/dev-manual\/dev-manual.html\" target=\"_top\">Yocto Project Development Manual<\/a>/Yocto Project Development Manual/g
|
s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/dev-manual\/dev-manual.html\" target=\"_top\">Yocto Project Development Manual<\/a>/Yocto Project Development Manual/g
|
||||||
s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/sdk-manual\/sdk-manual.html\" target=\"_top\">Yocto Project Software Development Kit (SDK) Developer's Guide<\/a>/Yocto Project Software Development Kit (SDK) Developer's Guide/g
|
s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/sdk-manual\/sdk-manual.html\" target=\"_top\">Yocto Project Software Development Kit (SDK) Developer's Guide<\/a>/Yocto Project Software Development Kit (SDK) Developer's Guide/g
|
||||||
s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/bsp-guide\/bsp-guide.html\" target=\"_top\">Yocto Project Board Support Package (BSP) Developer's Guide<\/a>/Yocto Project Board Support Package (BSP) Developer's Guide/g
|
s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/bsp-guide\/bsp-guide.html\" target=\"_top\">Yocto Project Board Support Package (BSP) Developer's Guide<\/a>/Yocto Project Board Support Package (BSP) Developer's Guide/g
|
||||||
s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/profile-manual\/profile-manual.html\" target=\"_top\">Yocto Project Profiling and Tracing Manual<\/a>/Yocto Project Profiling and Tracing Manual/g
|
s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/profile-manual\/profile-manual.html\" target=\"_top\">Yocto Project Profiling and Tracing Manual<\/a>/Yocto Project Profiling and Tracing Manual/g
|
||||||
s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/kernel-dev\/kernel-dev.html\" target=\"_top\">Yocto Project Linux Kernel Development Manual<\/a>/Yocto Project Linux Kernel Development Manual/g
|
s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/kernel-dev\/kernel-dev.html\" target=\"_top\">Yocto Project Linux Kernel Development Manual<\/a>/Yocto Project Linux Kernel Development Manual/g
|
||||||
s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/ref-manual\/ref-manual.html\" target=\"_top\">Yocto Project Reference Manual<\/a>/Yocto Project Reference Manual/g
|
s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/ref-manual\/ref-manual.html\" target=\"_top\">Yocto Project Reference Manual<\/a>/Yocto Project Reference Manual/g
|
||||||
s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1\/toaster-manual\/toaster-manual.html\" target=\"_top\">Toaster User Manual<\/a>/Toaster User Manual/g
|
s/<a class=\"ulink\" href=\"http:\/\/www.yoctoproject.org\/docs\/2.1.3\/toaster-manual\/toaster-manual.html\" target=\"_top\">Toaster User Manual<\/a>/Toaster User Manual/g
|
||||||
|
|||||||
@@ -16,12 +16,46 @@
|
|||||||
Permission is granted to copy, distribute and/or modify this document under
|
Permission is granted to copy, distribute and/or modify this document under
|
||||||
the terms of the <ulink type="http" url="http://creativecommons.org/licenses/by-sa/2.0/uk/">Creative Commons Attribution-Share Alike 2.0 UK: England & Wales</ulink> as published by Creative Commons.
|
the terms of the <ulink type="http" url="http://creativecommons.org/licenses/by-sa/2.0/uk/">Creative Commons Attribution-Share Alike 2.0 UK: England & Wales</ulink> as published by Creative Commons.
|
||||||
</para>
|
</para>
|
||||||
<note>
|
<note><title>Manual Notes</title>
|
||||||
For the latest version of this manual associated with this
|
<itemizedlist>
|
||||||
Yocto Project release, see the
|
<listitem><para>
|
||||||
<ulink url='&YOCTO_DOCS_QS_URL;'>Yocto Project Quick Start</ulink>
|
This version of the
|
||||||
from the Yocto Project website.
|
<emphasis>Yocto Project Quick Start</emphasis>
|
||||||
</note>
|
is for the &YOCTO_DOC_VERSION; release of the
|
||||||
|
Yocto Project.
|
||||||
|
To be sure you have the latest version of the manual
|
||||||
|
for this release, go to the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual from that site.
|
||||||
|
Manuals from the site are more up-to-date than manuals
|
||||||
|
derived from the Yocto Project released TAR files.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
If you located this manual through a web search, the
|
||||||
|
version of the manual might not be the one you want
|
||||||
|
(e.g. the search might have returned a manual much
|
||||||
|
older than the Yocto Project version with which you
|
||||||
|
are working).
|
||||||
|
You can see all Yocto Project major releases by
|
||||||
|
visiting the
|
||||||
|
<ulink url='&YOCTO_WIKI_URL;/wiki/Releases'>Releases</ulink>
|
||||||
|
page.
|
||||||
|
If you need a version of this manual for a different
|
||||||
|
Yocto Project release, visit the
|
||||||
|
<ulink url='&YOCTO_HOME_URL;/documentation'>Yocto Project documentation page</ulink>
|
||||||
|
and select the manual set by using the
|
||||||
|
"ACTIVE RELEASES DOCUMENTATION" or "DOCUMENTS ARCHIVE"
|
||||||
|
pull-down menus.
|
||||||
|
</para></listitem>
|
||||||
|
<listitem><para>
|
||||||
|
To report any inaccuracies or problems with this
|
||||||
|
manual, send an email to the Yocto Project
|
||||||
|
discussion group at
|
||||||
|
<filename>yocto@yoctoproject.com</filename> or log into
|
||||||
|
the freenode <filename>#yocto</filename> channel.
|
||||||
|
</para></listitem>
|
||||||
|
</itemizedlist>
|
||||||
|
</note>
|
||||||
</legalnotice>
|
</legalnotice>
|
||||||
|
|
||||||
|
|
||||||
@@ -689,21 +723,37 @@
|
|||||||
</para>
|
</para>
|
||||||
</note>
|
</note>
|
||||||
</para></listitem>
|
</para></listitem>
|
||||||
<listitem><para><emphasis>Build a Minimal Image for MinnowBoard MAX:</emphasis>
|
<listitem><para><emphasis>Build an Image for MinnowBoard MAX:</emphasis>
|
||||||
Use the following command to build the minimal image for
|
The type of image you build depends on your goals.
|
||||||
|
For example, the previous build created a
|
||||||
|
<filename>core-image-sato</filename> image, which is an
|
||||||
|
image with Sato support.
|
||||||
|
It is possible to build many image types for the
|
||||||
MinnowBoard MAX.
|
MinnowBoard MAX.
|
||||||
Because configuration changes are minimal to set up for
|
Some possibilities are <filename>core-image-base</filename>,
|
||||||
this second build, the OpenEmbedded build system can
|
which is a console-only image.
|
||||||
|
Another choice could be a
|
||||||
|
<filename>core-image-full-cmdline</filename>, which is
|
||||||
|
another console-only image but has more full-features
|
||||||
|
Linux system functionality installed.
|
||||||
|
For types of images you can build using the Yocto
|
||||||
|
Project, see the
|
||||||
|
"<ulink url='&YOCTO_DOCS_REF_URL;#ref-images'>Images</ulink>"
|
||||||
|
chapter in the Yocto Project Reference Manual.</para>
|
||||||
|
<para>Because configuration changes are minimal to set up
|
||||||
|
for this second build, the OpenEmbedded build system can
|
||||||
re-use files from previous builds as much as possible.
|
re-use files from previous builds as much as possible.
|
||||||
Re-using files means this second build will be much faster
|
Re-using files means this second build will be much faster
|
||||||
than an initial build.
|
than an initial build.
|
||||||
|
For this example, the <filename>core-image-base</filename>
|
||||||
|
image is built:
|
||||||
<literallayout class='monospaced'>
|
<literallayout class='monospaced'>
|
||||||
$ bitbake core-image-minimal
|
$ bitbake core-image-base
|
||||||
</literallayout>
|
</literallayout>
|
||||||
Once the build completes, the resulting basic console image
|
Once the build completes, the resulting console-only image
|
||||||
is located in the Build Directory here:
|
is located in the Build Directory here:
|
||||||
<literallayout class='monospaced'>
|
<literallayout class='monospaced'>
|
||||||
tmp/deploy/images/intel-corei7-64/core-image-minimal-intel-corei7-64.hddimg
|
tmp/deploy/images/intel-corei7-64/core-image-base-intel-corei7-64.hddimg
|
||||||
</literallayout>
|
</literallayout>
|
||||||
</para></listitem>
|
</para></listitem>
|
||||||
<listitem><para><emphasis>Write the Image:</emphasis>
|
<listitem><para><emphasis>Write the Image:</emphasis>
|
||||||
@@ -714,7 +764,7 @@
|
|||||||
<filename>scripts/contrib/mkefidisk.sh</filename>:
|
<filename>scripts/contrib/mkefidisk.sh</filename>:
|
||||||
<literallayout class='monospaced'>
|
<literallayout class='monospaced'>
|
||||||
$ sudo $HOME/source/poky/scripts/contrib/mkefidisk.sh <replaceable>HOST_DEVICE</replaceable> \
|
$ sudo $HOME/source/poky/scripts/contrib/mkefidisk.sh <replaceable>HOST_DEVICE</replaceable> \
|
||||||
tmp/deploy/images/intel-corei7-64/core-image-minimal-intel-corei7-64.hddimg <replaceable>TARGET_DEVICE</replaceable>
|
tmp/deploy/images/intel-corei7-64/core-image-base-intel-corei7-64.hddimg <replaceable>TARGET_DEVICE</replaceable>
|
||||||
</literallayout>
|
</literallayout>
|
||||||
In the previous command,
|
In the previous command,
|
||||||
<replaceable>HOST_DEVICE</replaceable> is the device node
|
<replaceable>HOST_DEVICE</replaceable> is the device node
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
DISTRO = "poky"
|
DISTRO = "poky"
|
||||||
DISTRO_NAME = "Poky (Yocto Project Reference Distro)"
|
DISTRO_NAME = "Poky (Yocto Project Reference Distro)"
|
||||||
DISTRO_VERSION = "2.1.1"
|
DISTRO_VERSION = "2.1.3"
|
||||||
DISTRO_CODENAME = "krogoth"
|
DISTRO_CODENAME = "krogoth"
|
||||||
SDK_VENDOR = "-pokysdk"
|
SDK_VENDOR = "-pokysdk"
|
||||||
SDK_VERSION := "${@'${DISTRO_VERSION}'.replace('snapshot-${DATE}','snapshot')}"
|
SDK_VERSION := "${@'${DISTRO_VERSION}'.replace('snapshot-${DATE}','snapshot')}"
|
||||||
|
|||||||
@@ -192,7 +192,7 @@ PATCHRESOLVE = "noop"
|
|||||||
# files and damages the build in ways which may not be easily recoverable.
|
# files and damages the build in ways which may not be easily recoverable.
|
||||||
# It's necesary to monitor /tmp, if there is no space left the build will fail
|
# It's necesary to monitor /tmp, if there is no space left the build will fail
|
||||||
# with very exotic errors.
|
# with very exotic errors.
|
||||||
BB_DISKMON_DIRS = "\
|
BB_DISKMON_DIRS ??= "\
|
||||||
STOPTASKS,${TMPDIR},1G,100K \
|
STOPTASKS,${TMPDIR},1G,100K \
|
||||||
STOPTASKS,${DL_DIR},1G,100K \
|
STOPTASKS,${DL_DIR},1G,100K \
|
||||||
STOPTASKS,${SSTATE_DIR},1G,100K \
|
STOPTASKS,${SSTATE_DIR},1G,100K \
|
||||||
|
|||||||
@@ -7,11 +7,11 @@ KBRANCH_mpc8315e-rdb = "standard/fsl-mpc8315e-rdb"
|
|||||||
KMACHINE_genericx86 ?= "common-pc"
|
KMACHINE_genericx86 ?= "common-pc"
|
||||||
KMACHINE_genericx86-64 ?= "common-pc-64"
|
KMACHINE_genericx86-64 ?= "common-pc-64"
|
||||||
|
|
||||||
SRCREV_machine_genericx86 ?= "304caa9480f19875e717faf3cad8cb7ecd758733"
|
SRCREV_machine_genericx86 ?= "f4d0900b2851e829e990e0f64b09ed3b8e355fae"
|
||||||
SRCREV_machine_genericx86-64 ?= "304caa9480f19875e717faf3cad8cb7ecd758733"
|
SRCREV_machine_genericx86-64 ?= "f4d0900b2851e829e990e0f64b09ed3b8e355fae"
|
||||||
SRCREV_machine_edgerouter ?= "79a31b9d23db126f8a6be3eb88fd683056a213f1"
|
SRCREV_machine_edgerouter ?= "f4d0900b2851e829e990e0f64b09ed3b8e355fae"
|
||||||
SRCREV_machine_beaglebone ?= "efb6ffb2ca96a364f916c9890ad023fc595e0e6e"
|
SRCREV_machine_beaglebone ?= "12532e753b50997690923e03edb3ac3368817a26"
|
||||||
SRCREV_machine_mpc8315e-rdb ?= "79a31b9d23db126f8a6be3eb88fd683056a213f1"
|
SRCREV_machine_mpc8315e-rdb ?= "f4d0900b2851e829e990e0f64b09ed3b8e355fae"
|
||||||
|
|
||||||
COMPATIBLE_MACHINE_genericx86 = "genericx86"
|
COMPATIBLE_MACHINE_genericx86 = "genericx86"
|
||||||
COMPATIBLE_MACHINE_genericx86-64 = "genericx86-64"
|
COMPATIBLE_MACHINE_genericx86-64 = "genericx86-64"
|
||||||
@@ -19,5 +19,8 @@ COMPATIBLE_MACHINE_edgerouter = "edgerouter"
|
|||||||
COMPATIBLE_MACHINE_beaglebone = "beaglebone"
|
COMPATIBLE_MACHINE_beaglebone = "beaglebone"
|
||||||
COMPATIBLE_MACHINE_mpc8315e-rdb = "mpc8315e-rdb"
|
COMPATIBLE_MACHINE_mpc8315e-rdb = "mpc8315e-rdb"
|
||||||
|
|
||||||
LINUX_VERSION_genericx86 = "4.1.18"
|
LINUX_VERSION_genericx86 = "4.1.33"
|
||||||
LINUX_VERSION_genericx86-64 = "4.1.18"
|
LINUX_VERSION_genericx86-64 = "4.1.33"
|
||||||
|
LINUX_VERSION_edgerouter = "4.1.33"
|
||||||
|
LINUX_VERSION_beaglebone = "4.1.33"
|
||||||
|
LINUX_VERSION_mpc8315e-rdb = "4.1.33"
|
||||||
|
|||||||
@@ -7,11 +7,11 @@ KBRANCH_edgerouter = "standard/edgerouter"
|
|||||||
KBRANCH_beaglebone = "standard/beaglebone"
|
KBRANCH_beaglebone = "standard/beaglebone"
|
||||||
KBRANCH_mpc8315e-rdb = "standard/fsl-mpc8315e-rdb"
|
KBRANCH_mpc8315e-rdb = "standard/fsl-mpc8315e-rdb"
|
||||||
|
|
||||||
SRCREV_machine_genericx86 ?= "3d2455f9da30f923c6bd69014fad4cc4ea738be6"
|
SRCREV_machine_genericx86 ?= "ca6a08bd7f86ebef11f763d26f787f7d65270473"
|
||||||
SRCREV_machine_genericx86-64 ?= "3d2455f9da30f923c6bd69014fad4cc4ea738be6"
|
SRCREV_machine_genericx86-64 ?= "ca6a08bd7f86ebef11f763d26f787f7d65270473"
|
||||||
SRCREV_machine_edgerouter ?= "ff4c4ef15b51f45b9106d71bf1f62fe7c02e63c2"
|
SRCREV_machine_edgerouter ?= "ca6a08bd7f86ebef11f763d26f787f7d65270473"
|
||||||
SRCREV_machine_beaglebone ?= "ff4c4ef15b51f45b9106d71bf1f62fe7c02e63c2"
|
SRCREV_machine_beaglebone ?= "ca6a08bd7f86ebef11f763d26f787f7d65270473"
|
||||||
SRCREV_machine_mpc8315e-rdb ?= "df00877ef9387b38b9601c82db57de2a1b23ce53"
|
SRCREV_machine_mpc8315e-rdb ?= "7fa42ad9a43ca4bb1e578e208ffeddae2d6150e2"
|
||||||
|
|
||||||
COMPATIBLE_MACHINE_genericx86 = "genericx86"
|
COMPATIBLE_MACHINE_genericx86 = "genericx86"
|
||||||
COMPATIBLE_MACHINE_genericx86-64 = "genericx86-64"
|
COMPATIBLE_MACHINE_genericx86-64 = "genericx86-64"
|
||||||
@@ -19,5 +19,8 @@ COMPATIBLE_MACHINE_edgerouter = "edgerouter"
|
|||||||
COMPATIBLE_MACHINE_beaglebone = "beaglebone"
|
COMPATIBLE_MACHINE_beaglebone = "beaglebone"
|
||||||
COMPATIBLE_MACHINE_mpc8315e-rdb = "mpc8315e-rdb"
|
COMPATIBLE_MACHINE_mpc8315e-rdb = "mpc8315e-rdb"
|
||||||
|
|
||||||
LINUX_VERSION_genericx86 = "4.4.3"
|
LINUX_VERSION_genericx86 = "4.4.26"
|
||||||
LINUX_VERSION_genericx86-64 = "4.4.3"
|
LINUX_VERSION_genericx86-64 = "4.4.26"
|
||||||
|
LINUX_VERSION_edgerouter = "4.4.26"
|
||||||
|
LINUX_VERSION_beaglebone = "4.4.26"
|
||||||
|
LINUX_VERSION_mpc8315e-rdb = "4.4.26"
|
||||||
|
|||||||
@@ -270,9 +270,10 @@ python do_unpack_and_patch() {
|
|||||||
return
|
return
|
||||||
ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
|
ar_outdir = d.getVar('ARCHIVER_OUTDIR', True)
|
||||||
ar_workdir = d.getVar('ARCHIVER_WORKDIR', True)
|
ar_workdir = d.getVar('ARCHIVER_WORKDIR', True)
|
||||||
|
pn = d.getVar('PN', True)
|
||||||
|
|
||||||
# The kernel class functions require it to be on work-shared, so we dont change WORKDIR
|
# The kernel class functions require it to be on work-shared, so we dont change WORKDIR
|
||||||
if not bb.data.inherits_class('kernel-yocto', d):
|
if not (bb.data.inherits_class('kernel-yocto', d) or pn.startswith('gcc-source')):
|
||||||
# Change the WORKDIR to make do_unpack do_patch run in another dir.
|
# Change the WORKDIR to make do_unpack do_patch run in another dir.
|
||||||
d.setVar('WORKDIR', ar_workdir)
|
d.setVar('WORKDIR', ar_workdir)
|
||||||
|
|
||||||
@@ -290,7 +291,7 @@ python do_unpack_and_patch() {
|
|||||||
oe.path.copytree(src, src_orig)
|
oe.path.copytree(src, src_orig)
|
||||||
|
|
||||||
# Make sure gcc and kernel sources are patched only once
|
# Make sure gcc and kernel sources are patched only once
|
||||||
if not ((d.getVar('SRC_URI', True) == "" or bb.data.inherits_class('kernel-yocto', d))):
|
if not (d.getVar('SRC_URI', True) == "" or (bb.data.inherits_class('kernel-yocto', d) or pn.startswith('gcc-source'))):
|
||||||
bb.build.exec_func('do_patch', d)
|
bb.build.exec_func('do_patch', d)
|
||||||
|
|
||||||
# Create the patches
|
# Create the patches
|
||||||
|
|||||||
@@ -229,6 +229,8 @@ python autotools_copy_aclocals () {
|
|||||||
}
|
}
|
||||||
autotools_copy_aclocals[vardepsexclude] += "MACHINE SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
|
autotools_copy_aclocals[vardepsexclude] += "MACHINE SDK_ARCH BUILD_ARCH SDK_OS BB_TASKDEPDATA"
|
||||||
|
|
||||||
|
CONFIGURE_FILES = "${S}/configure.in ${S}/configure.ac ${S}/config.h.in ${S}/acinclude.m4 Makefile.am"
|
||||||
|
|
||||||
autotools_do_configure() {
|
autotools_do_configure() {
|
||||||
# WARNING: gross hack follows:
|
# WARNING: gross hack follows:
|
||||||
# An autotools built package generally needs these scripts, however only
|
# An autotools built package generally needs these scripts, however only
|
||||||
|
|||||||
@@ -136,23 +136,21 @@ python base_do_fetch() {
|
|||||||
|
|
||||||
addtask unpack after do_fetch
|
addtask unpack after do_fetch
|
||||||
do_unpack[dirs] = "${WORKDIR}"
|
do_unpack[dirs] = "${WORKDIR}"
|
||||||
|
|
||||||
|
python () {
|
||||||
|
if d.getVar('S', True) != d.getVar('WORKDIR', True):
|
||||||
|
d.setVarFlag('do_unpack', 'cleandirs', '${S}')
|
||||||
|
else:
|
||||||
|
d.setVarFlag('do_unpack', 'cleandirs', os.path.join('${S}', 'patches'))
|
||||||
|
}
|
||||||
python base_do_unpack() {
|
python base_do_unpack() {
|
||||||
src_uri = (d.getVar('SRC_URI', True) or "").split()
|
src_uri = (d.getVar('SRC_URI', True) or "").split()
|
||||||
if len(src_uri) == 0:
|
if len(src_uri) == 0:
|
||||||
return
|
return
|
||||||
|
|
||||||
rootdir = d.getVar('WORKDIR', True)
|
|
||||||
|
|
||||||
# Ensure that we cleanup ${S}/patches
|
|
||||||
# TODO: Investigate if we can remove
|
|
||||||
# the entire ${S} in this case.
|
|
||||||
s_dir = d.getVar('S', True)
|
|
||||||
p_dir = os.path.join(s_dir, 'patches')
|
|
||||||
bb.utils.remove(p_dir, True)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
fetcher = bb.fetch2.Fetch(src_uri, d)
|
fetcher = bb.fetch2.Fetch(src_uri, d)
|
||||||
fetcher.unpack(rootdir)
|
fetcher.unpack(d.getVar('WORKDIR', True))
|
||||||
except bb.fetch2.BBFetchException as e:
|
except bb.fetch2.BBFetchException as e:
|
||||||
raise bb.build.FuncFailed(e)
|
raise bb.build.FuncFailed(e)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -84,6 +84,8 @@ EOF
|
|||||||
|
|
||||||
addtask generate_toolchain_file after do_patch before do_configure
|
addtask generate_toolchain_file after do_patch before do_configure
|
||||||
|
|
||||||
|
CONFIGURE_FILES = "CMakeLists.txt"
|
||||||
|
|
||||||
cmake_do_configure() {
|
cmake_do_configure() {
|
||||||
if [ "${OECMAKE_BUILDPATH}" ]; then
|
if [ "${OECMAKE_BUILDPATH}" ]; then
|
||||||
bbnote "cmake.bbclass no longer uses OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
|
bbnote "cmake.bbclass no longer uses OECMAKE_BUILDPATH. The default behaviour is now out-of-tree builds with B=WORKDIR/build."
|
||||||
@@ -108,15 +110,15 @@ cmake_do_configure() {
|
|||||||
${OECMAKE_SITEFILE} \
|
${OECMAKE_SITEFILE} \
|
||||||
${OECMAKE_SOURCEPATH} \
|
${OECMAKE_SOURCEPATH} \
|
||||||
-DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
|
-DCMAKE_INSTALL_PREFIX:PATH=${prefix} \
|
||||||
-DCMAKE_INSTALL_BINDIR:PATH=${bindir} \
|
-DCMAKE_INSTALL_BINDIR:PATH=${@os.path.relpath(d.getVar('bindir', True), d.getVar('prefix', True))} \
|
||||||
-DCMAKE_INSTALL_SBINDIR:PATH=${sbindir} \
|
-DCMAKE_INSTALL_SBINDIR:PATH=${@os.path.relpath(d.getVar('sbindir', True), d.getVar('prefix', True))} \
|
||||||
-DCMAKE_INSTALL_LIBEXECDIR:PATH=${libexecdir} \
|
-DCMAKE_INSTALL_LIBEXECDIR:PATH=${@os.path.relpath(d.getVar('libexecdir', True), d.getVar('prefix', True))} \
|
||||||
-DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
|
-DCMAKE_INSTALL_SYSCONFDIR:PATH=${sysconfdir} \
|
||||||
-DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${sharedstatedir} \
|
-DCMAKE_INSTALL_SHAREDSTATEDIR:PATH=${@os.path.relpath(d.getVar('sharedstatedir', True), d. getVar('prefix', True))} \
|
||||||
-DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
|
-DCMAKE_INSTALL_LOCALSTATEDIR:PATH=${localstatedir} \
|
||||||
-DCMAKE_INSTALL_LIBDIR:PATH=${libdir} \
|
-DCMAKE_INSTALL_LIBDIR:PATH=${@os.path.relpath(d.getVar('libdir', True), d.getVar('prefix', True))} \
|
||||||
-DCMAKE_INSTALL_INCLUDEDIR:PATH=${includedir} \
|
-DCMAKE_INSTALL_INCLUDEDIR:PATH=${@os.path.relpath(d.getVar('includedir', True), d.getVar('prefix', True))} \
|
||||||
-DCMAKE_INSTALL_DATAROOTDIR:PATH=${datadir} \
|
-DCMAKE_INSTALL_DATAROOTDIR:PATH=${@os.path.relpath(d.getVar('datadir', True), d.getVar('prefix', True))} \
|
||||||
-DCMAKE_INSTALL_SO_NO_EXE=0 \
|
-DCMAKE_INSTALL_SO_NO_EXE=0 \
|
||||||
-DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
|
-DCMAKE_TOOLCHAIN_FILE=${WORKDIR}/toolchain.cmake \
|
||||||
-DCMAKE_VERBOSE_MAKEFILE=1 \
|
-DCMAKE_VERBOSE_MAKEFILE=1 \
|
||||||
|
|||||||
@@ -42,6 +42,7 @@ python do_menuconfig() {
|
|||||||
}
|
}
|
||||||
do_menuconfig[depends] += "ncurses-native:do_populate_sysroot"
|
do_menuconfig[depends] += "ncurses-native:do_populate_sysroot"
|
||||||
do_menuconfig[nostamp] = "1"
|
do_menuconfig[nostamp] = "1"
|
||||||
|
do_menuconfig[dirs] = "${B}"
|
||||||
addtask menuconfig after do_configure
|
addtask menuconfig after do_configure
|
||||||
|
|
||||||
python do_diffconfig() {
|
python do_diffconfig() {
|
||||||
@@ -73,4 +74,5 @@ python do_diffconfig() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
do_diffconfig[nostamp] = "1"
|
do_diffconfig[nostamp] = "1"
|
||||||
|
do_diffconfig[dirs] = "${B}"
|
||||||
addtask diffconfig
|
addtask diffconfig
|
||||||
|
|||||||
@@ -17,6 +17,8 @@ HOST_CC_ARCH = "${BUILD_CC_ARCH}"
|
|||||||
HOST_LD_ARCH = "${BUILD_LD_ARCH}"
|
HOST_LD_ARCH = "${BUILD_LD_ARCH}"
|
||||||
HOST_AS_ARCH = "${BUILD_AS_ARCH}"
|
HOST_AS_ARCH = "${BUILD_AS_ARCH}"
|
||||||
|
|
||||||
|
export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64"
|
||||||
|
|
||||||
STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}${HOST_VENDOR}-${HOST_OS}"
|
STAGING_DIR_HOST = "${STAGING_DIR}/${HOST_ARCH}${HOST_VENDOR}-${HOST_OS}"
|
||||||
|
|
||||||
PACKAGE_ARCH = "${BUILD_ARCH}"
|
PACKAGE_ARCH = "${BUILD_ARCH}"
|
||||||
|
|||||||
@@ -89,6 +89,7 @@ python () {
|
|||||||
# function is run every time
|
# function is run every time
|
||||||
d.setVar('BB_DONT_CACHE', '1')
|
d.setVar('BB_DONT_CACHE', '1')
|
||||||
d.setVarFlag('do_compile', 'file-checksums', '${@srctree_hash_files(d)}')
|
d.setVarFlag('do_compile', 'file-checksums', '${@srctree_hash_files(d)}')
|
||||||
|
d.setVarFlag('do_configure', 'file-checksums', '${@srctree_configure_hash_files(d)}')
|
||||||
|
|
||||||
# We don't want the workdir to go away
|
# We don't want the workdir to go away
|
||||||
d.appendVar('RM_WORK_EXCLUDE', ' ' + d.getVar('PN', True))
|
d.appendVar('RM_WORK_EXCLUDE', ' ' + d.getVar('PN', True))
|
||||||
@@ -152,3 +153,24 @@ def srctree_hash_files(d):
|
|||||||
else:
|
else:
|
||||||
ret = d.getVar('EXTERNALSRC', True) + '/*:True'
|
ret = d.getVar('EXTERNALSRC', True) + '/*:True'
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
def srctree_configure_hash_files(d):
|
||||||
|
"""
|
||||||
|
Get the list of files that should trigger do_configure to re-execute,
|
||||||
|
based on the value of CONFIGURE_FILES
|
||||||
|
"""
|
||||||
|
in_files = (d.getVar('CONFIGURE_FILES', True) or '').split()
|
||||||
|
out_items = []
|
||||||
|
search_files = []
|
||||||
|
for entry in in_files:
|
||||||
|
if entry.startswith('/'):
|
||||||
|
out_items.append('%s:%s' % (entry, os.path.exists(entry)))
|
||||||
|
else:
|
||||||
|
search_files.append(entry)
|
||||||
|
if search_files:
|
||||||
|
s_dir = d.getVar('EXTERNALSRC', True)
|
||||||
|
for root, _, files in os.walk(s_dir):
|
||||||
|
for f in files:
|
||||||
|
if f in search_files:
|
||||||
|
out_items.append('%s:True' % os.path.join(root, f))
|
||||||
|
return ' '.join(out_items)
|
||||||
|
|||||||
@@ -3,13 +3,23 @@
|
|||||||
# This sets up autoconf-based recipes to build introspection data (or not),
|
# This sets up autoconf-based recipes to build introspection data (or not),
|
||||||
# depending on distro and machine features (see gobject-introspection-data class).
|
# depending on distro and machine features (see gobject-introspection-data class).
|
||||||
inherit gobject-introspection-data
|
inherit gobject-introspection-data
|
||||||
EXTRA_OECONF_prepend = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
|
EXTRA_OECONF_prepend_class-target = "${@bb.utils.contains('GI_DATA_ENABLED', 'True', '--enable-introspection', '--disable-introspection', d)} "
|
||||||
|
|
||||||
|
# When building native recipes, disable introspection, as it is not necessary,
|
||||||
|
# pulls in additional dependencies, and makes build times longer
|
||||||
|
EXTRA_OECONF_prepend_class-native = "--disable-introspection "
|
||||||
|
EXTRA_OECONF_prepend_class-nativesdk = "--disable-introspection "
|
||||||
|
|
||||||
UNKNOWN_CONFIGURE_WHITELIST_append = " --enable-introspection --disable-introspection"
|
UNKNOWN_CONFIGURE_WHITELIST_append = " --enable-introspection --disable-introspection"
|
||||||
|
|
||||||
# Generating introspection data depends on a combination of native and target
|
# Generating introspection data depends on a combination of native and target
|
||||||
# introspection tools, and qemu to run the target tools.
|
# introspection tools, and qemu to run the target tools.
|
||||||
DEPENDS_append = " gobject-introspection gobject-introspection-native qemu-native"
|
DEPENDS_append_class-target = " gobject-introspection gobject-introspection-native qemu-native"
|
||||||
|
|
||||||
|
# Even though introspection is disabled on -native, gobject-introspection package is still
|
||||||
|
# needed for m4 macros.
|
||||||
|
DEPENDS_append_class-native = " gobject-introspection-native"
|
||||||
|
DEPENDS_append_class-nativesdk = " gobject-introspection-native"
|
||||||
|
|
||||||
# This is necessary for python scripts to succeed - distutils fails if these
|
# This is necessary for python scripts to succeed - distutils fails if these
|
||||||
# are not set
|
# are not set
|
||||||
|
|||||||
@@ -93,7 +93,7 @@ python build_efi_cfg() {
|
|||||||
try:
|
try:
|
||||||
cfgfile = file(cfile, 'w')
|
cfgfile = file(cfile, 'w')
|
||||||
except OSError:
|
except OSError:
|
||||||
raise bb.build.funcFailed('Unable to open %s' % (cfile))
|
raise bb.build.FuncFailed('Unable to open %s' % (cfile))
|
||||||
|
|
||||||
cfgfile.write('# Automatically created by OE\n')
|
cfgfile.write('# Automatically created by OE\n')
|
||||||
|
|
||||||
|
|||||||
@@ -72,7 +72,7 @@ python build_efi_cfg() {
|
|||||||
try:
|
try:
|
||||||
cfgfile = open(cfile, 'w')
|
cfgfile = open(cfile, 'w')
|
||||||
except OSError:
|
except OSError:
|
||||||
raise bb.build.funcFailed('Unable to open %s' % (cfile))
|
raise bb.build.FuncFailed('Unable to open %s' % (cfile))
|
||||||
|
|
||||||
cfgfile.write('# Automatically created by OE\n')
|
cfgfile.write('# Automatically created by OE\n')
|
||||||
cfgfile.write('default %s\n' % (labels.split()[0]))
|
cfgfile.write('default %s\n' % (labels.split()[0]))
|
||||||
@@ -95,7 +95,7 @@ python build_efi_cfg() {
|
|||||||
try:
|
try:
|
||||||
entrycfg = open(entryfile, "w")
|
entrycfg = open(entryfile, "w")
|
||||||
except OSError:
|
except OSError:
|
||||||
raise bb.build.funcFailed('Unable to open %s' % (entryfile))
|
raise bb.build.FuncFailed('Unable to open %s' % (entryfile))
|
||||||
localdata.setVar('OVERRIDES', label + ':' + overrides)
|
localdata.setVar('OVERRIDES', label + ':' + overrides)
|
||||||
bb.data.update_data(localdata)
|
bb.data.update_data(localdata)
|
||||||
|
|
||||||
|
|||||||
@@ -72,6 +72,7 @@ def preferred_ml_updates(d):
|
|||||||
pkg = pkg.replace("virtual/", "")
|
pkg = pkg.replace("virtual/", "")
|
||||||
virt = "virtual/"
|
virt = "virtual/"
|
||||||
for p in prefixes:
|
for p in prefixes:
|
||||||
|
newval = None
|
||||||
if pkg != "kernel":
|
if pkg != "kernel":
|
||||||
newval = p + "-" + val
|
newval = p + "-" + val
|
||||||
|
|
||||||
@@ -86,7 +87,7 @@ def preferred_ml_updates(d):
|
|||||||
|
|
||||||
# implement alternative multilib name
|
# implement alternative multilib name
|
||||||
newname = localdata.expand("PREFERRED_PROVIDER_" + virt + p + "-" + pkg)
|
newname = localdata.expand("PREFERRED_PROVIDER_" + virt + p + "-" + pkg)
|
||||||
if not d.getVar(newname, False):
|
if not d.getVar(newname, False) and newval != None:
|
||||||
d.setVar(newname, localdata.expand(newval))
|
d.setVar(newname, localdata.expand(newval))
|
||||||
# Avoid future variable key expansion
|
# Avoid future variable key expansion
|
||||||
provexp = d.expand(prov)
|
provexp = d.expand(prov)
|
||||||
|
|||||||
@@ -52,3 +52,8 @@ oe_multilib_header() {
|
|||||||
oe_multilib_header_class-native () {
|
oe_multilib_header_class-native () {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Nor do we need multilib headers for nativesdk builds.
|
||||||
|
oe_multilib_header_class-nativesdk () {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|||||||
@@ -87,8 +87,7 @@ datadir = "${STAGING_DATADIR_NATIVE}"
|
|||||||
|
|
||||||
baselib = "lib"
|
baselib = "lib"
|
||||||
|
|
||||||
# Libtool's default paths are correct for the native machine
|
export lt_cv_sys_lib_dlsearch_path_spec = "${libdir} ${base_libdir} /lib /lib64 /usr/lib /usr/lib64"
|
||||||
lt_cv_sys_lib_dlsearch_path_spec[unexport] = "1"
|
|
||||||
|
|
||||||
NATIVE_PACKAGE_PATH_SUFFIX ?= ""
|
NATIVE_PACKAGE_PATH_SUFFIX ?= ""
|
||||||
bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
|
bindir .= "${NATIVE_PACKAGE_PATH_SUFFIX}"
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
DEPENDS_prepend = "nodejs-native "
|
DEPENDS_prepend = "nodejs-native "
|
||||||
|
RDEPENDS_${PN}_prepend = "nodejs "
|
||||||
S = "${WORKDIR}/npmpkg"
|
S = "${WORKDIR}/npmpkg"
|
||||||
|
|
||||||
NPM_INSTALLDIR = "${D}${libdir}/node_modules/${PN}"
|
NPM_INSTALLDIR = "${D}${libdir}/node_modules/${PN}"
|
||||||
|
|||||||
@@ -45,6 +45,8 @@ python do_package_ipk () {
|
|||||||
if os.path.exists(p):
|
if os.path.exists(p):
|
||||||
bb.utils.prunedir(p)
|
bb.utils.prunedir(p)
|
||||||
|
|
||||||
|
recipesource = os.path.basename(d.getVar('FILE', True))
|
||||||
|
|
||||||
for pkg in packages.split():
|
for pkg in packages.split():
|
||||||
localdata = bb.data.createCopy(d)
|
localdata = bb.data.createCopy(d)
|
||||||
root = "%s/%s" % (pkgdest, pkg)
|
root = "%s/%s" % (pkgdest, pkg)
|
||||||
@@ -208,10 +210,7 @@ python do_package_ipk () {
|
|||||||
ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
|
ctrlfile.write("Replaces: %s\n" % bb.utils.join_deps(rreplaces))
|
||||||
if rconflicts:
|
if rconflicts:
|
||||||
ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
|
ctrlfile.write("Conflicts: %s\n" % bb.utils.join_deps(rconflicts))
|
||||||
src_uri = localdata.getVar("SRC_URI", True).strip() or "None"
|
ctrlfile.write("Source: %s\n" % recipesource)
|
||||||
if src_uri:
|
|
||||||
src_uri = re.sub("\s+", " ", src_uri)
|
|
||||||
ctrlfile.write("Source: %s\n" % " ".join(src_uri.split()))
|
|
||||||
ctrlfile.close()
|
ctrlfile.close()
|
||||||
|
|
||||||
for script in ["preinst", "postinst", "prerm", "postrm"]:
|
for script in ["preinst", "postinst", "prerm", "postrm"]:
|
||||||
|
|||||||
@@ -223,8 +223,14 @@ python copy_buildsystem () {
|
|||||||
# the sig computed from the metadata.
|
# the sig computed from the metadata.
|
||||||
f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n\n')
|
f.write('SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "warn"\n\n')
|
||||||
|
|
||||||
|
# Set up whitelist for run on install
|
||||||
|
f.write('BB_SETSCENE_ENFORCE_WHITELIST = "%:* *:do_shared_workdir *:do_rm_work *:do_package"\n\n')
|
||||||
|
|
||||||
# Hide the config information from bitbake output (since it's fixed within the SDK)
|
# Hide the config information from bitbake output (since it's fixed within the SDK)
|
||||||
f.write('BUILDCFG_HEADER = ""\n')
|
f.write('BUILDCFG_HEADER = ""\n\n')
|
||||||
|
|
||||||
|
# Map gcc-dependent uninative sstate cache for installer usage
|
||||||
|
f.write('SSTATE_MIRRORS = "file://universal/(.*) file://universal-4.9/\\1\\nfile://universal-4.9/(.*) file://universal-4.8/\\1"\n\n')
|
||||||
|
|
||||||
# Allow additional config through sdk-extra.conf
|
# Allow additional config through sdk-extra.conf
|
||||||
fn = bb.cookerdata.findConfigFile('sdk-extra.conf', d)
|
fn = bb.cookerdata.findConfigFile('sdk-extra.conf', d)
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ do_rm_work () {
|
|||||||
*do_setscene*)
|
*do_setscene*)
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
*sigdata*)
|
*sigdata*|*sigbasedata*)
|
||||||
i=dummy
|
i=dummy
|
||||||
break
|
break
|
||||||
;;
|
;;
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ ROOTFS_POSTUNINSTALL_COMMAND =+ "write_image_manifest ; "
|
|||||||
POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
|
POSTINST_LOGFILE ?= "${localstatedir}/log/postinstall.log"
|
||||||
# Set default target for systemd images
|
# Set default target for systemd images
|
||||||
SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains("IMAGE_FEATURES", "x11-base", "graphical.target", "multi-user.target", d)}'
|
SYSTEMD_DEFAULT_TARGET ?= '${@bb.utils.contains("IMAGE_FEATURES", "x11-base", "graphical.target", "multi-user.target", d)}'
|
||||||
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; ", "", d)}'
|
ROOTFS_POSTPROCESS_COMMAND += '${@bb.utils.contains("DISTRO_FEATURES", "systemd", "set_systemd_default_target; systemd_create_users;", "", d)}'
|
||||||
|
|
||||||
ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
|
ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
|
||||||
|
|
||||||
@@ -30,7 +30,25 @@ ROOTFS_POSTPROCESS_COMMAND += 'empty_var_volatile;'
|
|||||||
SSH_DISABLE_DNS_LOOKUP ?= " ssh_disable_dns_lookup ; "
|
SSH_DISABLE_DNS_LOOKUP ?= " ssh_disable_dns_lookup ; "
|
||||||
ROOTFS_POSTPROCESS_COMMAND_append_qemuall = "${SSH_DISABLE_DNS_LOOKUP}"
|
ROOTFS_POSTPROCESS_COMMAND_append_qemuall = "${SSH_DISABLE_DNS_LOOKUP}"
|
||||||
|
|
||||||
|
systemd_create_users () {
|
||||||
|
for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd.conf ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd-remote.conf; do
|
||||||
|
[ -e $conffile ] || continue
|
||||||
|
grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do
|
||||||
|
if [ "$type" = "u" ]; then
|
||||||
|
useradd_params="--shell /sbin/nologin"
|
||||||
|
[ "$id" != "-" ] && useradd_params="$useradd_params --uid $id"
|
||||||
|
[ "$comment" != "-" ] && useradd_params="$useradd_params --comment $comment"
|
||||||
|
useradd_params="$useradd_params --system $name"
|
||||||
|
eval useradd --root ${IMAGE_ROOTFS} $useradd_params || true
|
||||||
|
elif [ "$type" = "g" ]; then
|
||||||
|
groupadd_params=""
|
||||||
|
[ "$id" != "-" ] && groupadd_params="$groupadd_params --gid $id"
|
||||||
|
groupadd_params="$groupadd_params --system $name"
|
||||||
|
eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
#
|
#
|
||||||
# A hook function to support read-only-rootfs IMAGE_FEATURES
|
# A hook function to support read-only-rootfs IMAGE_FEATURES
|
||||||
@@ -73,27 +91,6 @@ read_only_rootfs_hook () {
|
|||||||
${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
|
${IMAGE_ROOTFS}/etc/init.d/populate-volatile.sh
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ${@bb.utils.contains("DISTRO_FEATURES", "systemd", "true", "false", d)}; then
|
|
||||||
# Update user database files so that services don't fail for a read-only systemd system
|
|
||||||
for conffile in ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd.conf ${IMAGE_ROOTFS}/usr/lib/sysusers.d/systemd-remote.conf; do
|
|
||||||
[ -e $conffile ] || continue
|
|
||||||
grep -v "^#" $conffile | sed -e '/^$/d' | while read type name id comment; do
|
|
||||||
if [ "$type" = "u" ]; then
|
|
||||||
useradd_params=""
|
|
||||||
[ "$id" != "-" ] && useradd_params="$useradd_params --uid $id"
|
|
||||||
[ "$comment" != "-" ] && useradd_params="$useradd_params --comment $comment"
|
|
||||||
useradd_params="$useradd_params --system $name"
|
|
||||||
eval useradd --root ${IMAGE_ROOTFS} $useradd_params || true
|
|
||||||
elif [ "$type" = "g" ]; then
|
|
||||||
groupadd_params=""
|
|
||||||
[ "$id" != "-" ] && groupadd_params="$groupadd_params --gid $id"
|
|
||||||
groupadd_params="$groupadd_params --system $name"
|
|
||||||
eval groupadd --root ${IMAGE_ROOTFS} $groupadd_params || true
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#
|
#
|
||||||
|
|||||||
@@ -5,8 +5,8 @@
|
|||||||
ROOTFS_PKGMANAGE = "rpm smartpm"
|
ROOTFS_PKGMANAGE = "rpm smartpm"
|
||||||
ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
|
ROOTFS_PKGMANAGE_BOOTSTRAP = "run-postinsts"
|
||||||
|
|
||||||
# Add 50Meg of extra space for Smart
|
# Add 100Meg of extra space for Smart
|
||||||
IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "smartpm", " + 51200", "" ,d)}"
|
IMAGE_ROOTFS_EXTRA_SPACE_append = "${@bb.utils.contains("PACKAGE_INSTALL", "smartpm", " + 102400", "" ,d)}"
|
||||||
|
|
||||||
# Smart is python based, so be sure python-native is available to us.
|
# Smart is python based, so be sure python-native is available to us.
|
||||||
EXTRANATIVEPATH += "python-native"
|
EXTRANATIVEPATH += "python-native"
|
||||||
|
|||||||
@@ -442,7 +442,7 @@ def sstate_clean(ss, d):
|
|||||||
rm_nohash = ".do_%s" % ss['task']
|
rm_nohash = ".do_%s" % ss['task']
|
||||||
for stfile in glob.glob(wildcard_stfile):
|
for stfile in glob.glob(wildcard_stfile):
|
||||||
# Keep the sigdata
|
# Keep the sigdata
|
||||||
if ".sigdata." in stfile:
|
if ".sigdata." in stfile or ".sigbasedata." in stfile:
|
||||||
continue
|
continue
|
||||||
# Preserve taint files in the stamps directory
|
# Preserve taint files in the stamps directory
|
||||||
if stfile.endswith('.taint'):
|
if stfile.endswith('.taint'):
|
||||||
|
|||||||
@@ -105,7 +105,7 @@ python build_syslinux_cfg () {
|
|||||||
try:
|
try:
|
||||||
cfgfile = file(cfile, 'w')
|
cfgfile = file(cfile, 'w')
|
||||||
except OSError:
|
except OSError:
|
||||||
raise bb.build.funcFailed('Unable to open %s' % (cfile))
|
raise bb.build.FuncFailed('Unable to open %s' % (cfile))
|
||||||
|
|
||||||
cfgfile.write('# Automatically created by OE\n')
|
cfgfile.write('# Automatically created by OE\n')
|
||||||
|
|
||||||
|
|||||||
@@ -6,6 +6,9 @@ REAL_MULTIMACH_TARGET_SYS ?= "${MULTIMACH_TARGET_SYS}"
|
|||||||
TARGET_CC_ARCH_append_libc-uclibc = " -muclibc"
|
TARGET_CC_ARCH_append_libc-uclibc = " -muclibc"
|
||||||
TARGET_CC_ARCH_append_libc-musl = " -mmusl"
|
TARGET_CC_ARCH_append_libc-musl = " -mmusl"
|
||||||
|
|
||||||
|
# default debug prefix map isn't valid in the SDK
|
||||||
|
DEBUG_PREFIX_MAP = ""
|
||||||
|
|
||||||
# This function creates an environment-setup-script for use in a deployable SDK
|
# This function creates an environment-setup-script for use in a deployable SDK
|
||||||
toolchain_create_sdk_env_script () {
|
toolchain_create_sdk_env_script () {
|
||||||
# Create environment setup script
|
# Create environment setup script
|
||||||
|
|||||||
@@ -54,15 +54,15 @@ if test "x`echo $GROUPADD_PARAM | tr -d '[:space:]'`" != "x"; then
|
|||||||
echo "Running groupadd commands..."
|
echo "Running groupadd commands..."
|
||||||
# Invoke multiple instances of groupadd for parameter lists
|
# Invoke multiple instances of groupadd for parameter lists
|
||||||
# separated by ';'
|
# separated by ';'
|
||||||
opts=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1`
|
opts=`echo "$GROUPADD_PARAM" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
|
||||||
remaining=`echo "$GROUPADD_PARAM" | cut -d ';' -f 2-`
|
remaining=`echo "$GROUPADD_PARAM" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
|
||||||
while test "x$opts" != "x"; do
|
while test "x$opts" != "x"; do
|
||||||
perform_groupadd "$SYSROOT" "$OPT $opts"
|
perform_groupadd "$SYSROOT" "$OPT $opts"
|
||||||
if test "x$opts" = "x$remaining"; then
|
if test "x$opts" = "x$remaining"; then
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
opts=`echo "$remaining" | cut -d ';' -f 1`
|
opts=`echo "$remaining" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
|
||||||
remaining=`echo "$remaining" | cut -d ';' -f 2-`
|
remaining=`echo "$remaining" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -70,15 +70,15 @@ if test "x`echo $USERADD_PARAM | tr -d '[:space:]'`" != "x"; then
|
|||||||
echo "Running useradd commands..."
|
echo "Running useradd commands..."
|
||||||
# Invoke multiple instances of useradd for parameter lists
|
# Invoke multiple instances of useradd for parameter lists
|
||||||
# separated by ';'
|
# separated by ';'
|
||||||
opts=`echo "$USERADD_PARAM" | cut -d ';' -f 1`
|
opts=`echo "$USERADD_PARAM" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
|
||||||
remaining=`echo "$USERADD_PARAM" | cut -d ';' -f 2-`
|
remaining=`echo "$USERADD_PARAM" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
|
||||||
while test "x$opts" != "x"; do
|
while test "x$opts" != "x"; do
|
||||||
perform_useradd "$SYSROOT" "$OPT $opts"
|
perform_useradd "$SYSROOT" "$OPT $opts"
|
||||||
if test "x$opts" = "x$remaining"; then
|
if test "x$opts" = "x$remaining"; then
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
opts=`echo "$remaining" | cut -d ';' -f 1`
|
opts=`echo "$remaining" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
|
||||||
remaining=`echo "$remaining" | cut -d ';' -f 2-`
|
remaining=`echo "$remaining" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -86,15 +86,15 @@ if test "x`echo $GROUPMEMS_PARAM | tr -d '[:space:]'`" != "x"; then
|
|||||||
echo "Running groupmems commands..."
|
echo "Running groupmems commands..."
|
||||||
# Invoke multiple instances of groupmems for parameter lists
|
# Invoke multiple instances of groupmems for parameter lists
|
||||||
# separated by ';'
|
# separated by ';'
|
||||||
opts=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 1`
|
opts=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
|
||||||
remaining=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 2-`
|
remaining=`echo "$GROUPMEMS_PARAM" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
|
||||||
while test "x$opts" != "x"; do
|
while test "x$opts" != "x"; do
|
||||||
perform_groupmems "$SYSROOT" "$OPT $opts"
|
perform_groupmems "$SYSROOT" "$OPT $opts"
|
||||||
if test "x$opts" = "x$remaining"; then
|
if test "x$opts" = "x$remaining"; then
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
opts=`echo "$remaining" | cut -d ';' -f 1`
|
opts=`echo "$remaining" | cut -d ';' -f 1 | sed -e 's#[ \t]*$##'`
|
||||||
remaining=`echo "$remaining" | cut -d ';' -f 2-`
|
remaining=`echo "$remaining" | cut -d ';' -f 2- | sed -e 's#[ \t]*$##'`
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@@ -103,7 +103,7 @@ useradd_sysroot () {
|
|||||||
# Pseudo may (do_install) or may not (do_populate_sysroot_setscene) be running
|
# Pseudo may (do_install) or may not (do_populate_sysroot_setscene) be running
|
||||||
# at this point so we're explicit about the environment so pseudo can load if
|
# at this point so we're explicit about the environment so pseudo can load if
|
||||||
# not already present.
|
# not already present.
|
||||||
export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir}/pseudo"
|
export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir_native}/pseudo"
|
||||||
|
|
||||||
# Explicitly set $D since it isn't set to anything
|
# Explicitly set $D since it isn't set to anything
|
||||||
# before do_install
|
# before do_install
|
||||||
@@ -130,7 +130,7 @@ useradd_sysroot_sstate () {
|
|||||||
userdel_sysroot_sstate () {
|
userdel_sysroot_sstate () {
|
||||||
if test "x${STAGING_DIR_TARGET}" != "x"; then
|
if test "x${STAGING_DIR_TARGET}" != "x"; then
|
||||||
if [ "${BB_CURRENTTASK}" = "configure" -o "${BB_CURRENTTASK}" = "clean" ]; then
|
if [ "${BB_CURRENTTASK}" = "configure" -o "${BB_CURRENTTASK}" = "clean" ]; then
|
||||||
export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir}/pseudo"
|
export PSEUDO="${FAKEROOTENV} PSEUDO_LOCALSTATEDIR=${STAGING_DIR_TARGET}${localstatedir}/pseudo ${STAGING_DIR_NATIVE}${bindir_native}/pseudo"
|
||||||
OPT="--root ${STAGING_DIR_TARGET}"
|
OPT="--root ${STAGING_DIR_TARGET}"
|
||||||
|
|
||||||
# Remove groups and users defined for package
|
# Remove groups and users defined for package
|
||||||
@@ -203,7 +203,7 @@ def get_all_cmd_params(d, cmd_type):
|
|||||||
for pkg in useradd_packages.split():
|
for pkg in useradd_packages.split():
|
||||||
param = d.getVar(param_type % pkg, True)
|
param = d.getVar(param_type % pkg, True)
|
||||||
if param:
|
if param:
|
||||||
params.append(param)
|
params.append(param.rstrip(" ;"))
|
||||||
|
|
||||||
return "; ".join(params)
|
return "; ".join(params)
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,6 @@ perform_groupadd () {
|
|||||||
local groupname=`echo "$opts" | awk '{ print $NF }'`
|
local groupname=`echo "$opts" | awk '{ print $NF }'`
|
||||||
local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
|
local group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
|
||||||
if test "x$group_exists" = "x"; then
|
if test "x$group_exists" = "x"; then
|
||||||
opts=`echo $opts | sed s/\'/\"/g`
|
|
||||||
eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupadd \$opts\" || true
|
eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO groupadd \$opts\" || true
|
||||||
group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
|
group_exists="`grep "^$groupname:" $rootdir/etc/group || true`"
|
||||||
if test "x$group_exists" = "x"; then
|
if test "x$group_exists" = "x"; then
|
||||||
@@ -35,7 +34,6 @@ perform_useradd () {
|
|||||||
local username=`echo "$opts" | awk '{ print $NF }'`
|
local username=`echo "$opts" | awk '{ print $NF }'`
|
||||||
local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
|
local user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
|
||||||
if test "x$user_exists" = "x"; then
|
if test "x$user_exists" = "x"; then
|
||||||
opts=`echo $opts | sed s/\'/\"/g`
|
|
||||||
eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO useradd \$opts\" || true
|
eval flock -x $rootdir${sysconfdir} -c \"$PSEUDO useradd \$opts\" || true
|
||||||
user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
|
user_exists="`grep "^$username:" $rootdir/etc/passwd || true`"
|
||||||
if test "x$user_exists" = "x"; then
|
if test "x$user_exists" = "x"; then
|
||||||
|
|||||||
@@ -465,6 +465,7 @@ export OBJCOPY = "${HOST_PREFIX}objcopy"
|
|||||||
export OBJDUMP = "${HOST_PREFIX}objdump"
|
export OBJDUMP = "${HOST_PREFIX}objdump"
|
||||||
export STRINGS = "${HOST_PREFIX}strings"
|
export STRINGS = "${HOST_PREFIX}strings"
|
||||||
export NM = "${HOST_PREFIX}nm"
|
export NM = "${HOST_PREFIX}nm"
|
||||||
|
export READELF = "${HOST_PREFIX}readelf"
|
||||||
PYTHON = "${@sys.executable}"
|
PYTHON = "${@sys.executable}"
|
||||||
|
|
||||||
export BUILD_CC = "${CCACHE}${BUILD_PREFIX}gcc ${BUILD_CC_ARCH}"
|
export BUILD_CC = "${CCACHE}${BUILD_PREFIX}gcc ${BUILD_CC_ARCH}"
|
||||||
@@ -549,11 +550,11 @@ EXTRA_OEMAKE_prepend_task-install = "${PARALLEL_MAKEINST} "
|
|||||||
##################################################################
|
##################################################################
|
||||||
# Optimization flags.
|
# Optimization flags.
|
||||||
##################################################################
|
##################################################################
|
||||||
DEBUG_FLAGS ?= "-g -feliminate-unused-debug-types \
|
DEBUG_PREFIX_MAP ?= "-fdebug-prefix-map=${WORKDIR}=/usr/src/debug/${PN}/${EXTENDPE}${PV}-${PR} \
|
||||||
-fdebug-prefix-map=${WORKDIR}=/usr/src/debug/${PN}/${EXTENDPE}${PV}-${PR} \
|
-fdebug-prefix-map=${STAGING_DIR_NATIVE}= \
|
||||||
-fdebug-prefix-map=${STAGING_DIR_NATIVE}= \
|
-fdebug-prefix-map=${STAGING_DIR_HOST}= \
|
||||||
-fdebug-prefix-map=${STAGING_DIR_HOST}= \
|
|
||||||
"
|
"
|
||||||
|
DEBUG_FLAGS ?= "-g -feliminate-unused-debug-types ${DEBUG_PREFIX_MAP}"
|
||||||
|
|
||||||
# Disabled until the option works properly -feliminate-dwarf2-dups
|
# Disabled until the option works properly -feliminate-dwarf2-dups
|
||||||
FULL_OPTIMIZATION = "-O2 -pipe ${DEBUG_FLAGS}"
|
FULL_OPTIMIZATION = "-O2 -pipe ${DEBUG_FLAGS}"
|
||||||
|
|||||||
@@ -6,6 +6,6 @@
|
|||||||
# to the distro running on the build machine.
|
# to the distro running on the build machine.
|
||||||
#
|
#
|
||||||
|
|
||||||
UNINATIVE_URL = "http://downloads.yoctoproject.org/releases/uninative/0.95/"
|
UNINATIVE_URL = "http://downloads.yoctoproject.org/releases/uninative/1.0.1/"
|
||||||
UNINATIVE_CHECKSUM[i686] ?= "5f27d7e0f4dd2ed80a7ff6a0d88af107b08e00765b31ed3aa180cc5ce15b0811"
|
UNINATIVE_CHECKSUM[i686] ?= "de51bc9162b07694d3462352ab25f636a6b50235438c1b09440d7569e009123b"
|
||||||
UNINATIVE_CHECKSUM[x86_64] ?= "26d46c61ad88cc245e31c88549717c0092a838d111b93ec169d88b08cc027581"
|
UNINATIVE_CHECKSUM[x86_64] ?= "acf1e44a0ac2e855e81da6426197d36358bf7b4e88e552ef933128498c8910f8"
|
||||||
|
|||||||
@@ -20,6 +20,9 @@ TUNECONFLICTS[n32] = "o32 n64"
|
|||||||
ABIEXTENSION .= "${@bb.utils.contains('TUNE_FEATURES', 'n32', 'n32', '' ,d)}"
|
ABIEXTENSION .= "${@bb.utils.contains('TUNE_FEATURES', 'n32', 'n32', '' ,d)}"
|
||||||
TUNE_CCARGS .= "${@bb.utils.contains('TUNE_FEATURES', 'n32', ' -mabi=n32', '', d)}"
|
TUNE_CCARGS .= "${@bb.utils.contains('TUNE_FEATURES', 'n32', ' -mabi=n32', '', d)}"
|
||||||
|
|
||||||
|
# user mode qemu doesn't support mips64 n32: "Invalid ELF image for this architecture"
|
||||||
|
MACHINE_FEATURES_BACKFILL_CONSIDERED_append = " ${@bb.utils.contains('TUNE_FEATURES', 'n32', 'qemu-usermode', '', d)}"
|
||||||
|
|
||||||
TUNEVALID[n64] = "MIPS64 n64 ABI"
|
TUNEVALID[n64] = "MIPS64 n64 ABI"
|
||||||
TUNECONFLICTS[n64] = "o32 n32"
|
TUNECONFLICTS[n64] = "o32 n32"
|
||||||
TUNE_CCARGS .= "${@bb.utils.contains('TUNE_FEATURES', 'n64', ' -mabi=64', '', d)}"
|
TUNE_CCARGS .= "${@bb.utils.contains('TUNE_FEATURES', 'n64', ' -mabi=64', '', d)}"
|
||||||
|
|||||||
@@ -22,7 +22,10 @@ XSERVER = "xserver-xorg \
|
|||||||
xf86-input-evdev \
|
xf86-input-evdev \
|
||||||
xf86-video-cirrus \
|
xf86-video-cirrus \
|
||||||
xf86-video-fbdev \
|
xf86-video-fbdev \
|
||||||
xf86-video-vmware"
|
xf86-video-vmware \
|
||||||
|
xf86-video-modesetting \
|
||||||
|
xserver-xorg-module-libint10 \
|
||||||
|
"
|
||||||
|
|
||||||
MACHINE_FEATURES += "x86"
|
MACHINE_FEATURES += "x86"
|
||||||
|
|
||||||
|
|||||||
@@ -21,7 +21,10 @@ XSERVER = "xserver-xorg \
|
|||||||
xf86-input-evdev \
|
xf86-input-evdev \
|
||||||
xf86-video-cirrus \
|
xf86-video-cirrus \
|
||||||
xf86-video-fbdev \
|
xf86-video-fbdev \
|
||||||
xf86-video-vmware"
|
xf86-video-vmware \
|
||||||
|
xf86-video-modesetting \
|
||||||
|
xserver-xorg-module-libint10 \
|
||||||
|
"
|
||||||
|
|
||||||
MACHINE_FEATURES += "x86"
|
MACHINE_FEATURES += "x86"
|
||||||
|
|
||||||
|
|||||||
@@ -255,7 +255,7 @@ def patch_recipe_file(fn, values, patch=False, relpath=''):
|
|||||||
changed, tolines = bb.utils.edit_metadata(fromlines, varlist, patch_recipe_varfunc, match_overrides=True)
|
changed, tolines = bb.utils.edit_metadata(fromlines, varlist, patch_recipe_varfunc, match_overrides=True)
|
||||||
|
|
||||||
if remainingnames:
|
if remainingnames:
|
||||||
if tolines[-1].strip() != '':
|
if tolines and tolines[-1].strip() != '':
|
||||||
tolines.append('\n')
|
tolines.append('\n')
|
||||||
for k in remainingnames.keys():
|
for k in remainingnames.keys():
|
||||||
outputvalue(k, tolines)
|
outputvalue(k, tolines)
|
||||||
|
|||||||
@@ -58,14 +58,24 @@ class oeTest(unittest.TestCase):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def hasPackage(self, pkg):
|
def hasPackage(self, pkg):
|
||||||
for item in oeTest.tc.pkgmanifest.split('\n'):
|
"""
|
||||||
if re.match(pkg, item):
|
True if the full package name exists in the manifest, False otherwise.
|
||||||
|
"""
|
||||||
|
return pkg in oeTest.tc.pkgmanifest
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def hasPackageMatch(self, match):
|
||||||
|
"""
|
||||||
|
True if match exists in the manifest as a regular expression substring,
|
||||||
|
False otherwise.
|
||||||
|
"""
|
||||||
|
for s in oeTest.tc.pkgmanifest:
|
||||||
|
if re.match(match, s):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def hasFeature(self,feature):
|
def hasFeature(self,feature):
|
||||||
|
|
||||||
if feature in oeTest.tc.imagefeatures or \
|
if feature in oeTest.tc.imagefeatures or \
|
||||||
feature in oeTest.tc.distrofeatures:
|
feature in oeTest.tc.distrofeatures:
|
||||||
return True
|
return True
|
||||||
@@ -340,17 +350,18 @@ class ImageTestContext(TestContext):
|
|||||||
self.target = target
|
self.target = target
|
||||||
self.host_dumper = host_dumper
|
self.host_dumper = host_dumper
|
||||||
|
|
||||||
|
self.pkgmanifest = {}
|
||||||
manifest = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True),
|
manifest = os.path.join(d.getVar("DEPLOY_DIR_IMAGE", True),
|
||||||
d.getVar("IMAGE_LINK_NAME", True) + ".manifest")
|
d.getVar("IMAGE_LINK_NAME", True) + ".manifest")
|
||||||
nomanifest = d.getVar("IMAGE_NO_MANIFEST", True)
|
nomanifest = d.getVar("IMAGE_NO_MANIFEST", True)
|
||||||
if nomanifest is None or nomanifest != "1":
|
if nomanifest is None or nomanifest != "1":
|
||||||
try:
|
try:
|
||||||
with open(manifest) as f:
|
with open(manifest) as f:
|
||||||
self.pkgmanifest = f.read()
|
for line in f:
|
||||||
|
(pkg, arch, version) = line.strip().split()
|
||||||
|
self.pkgmanifest[pkg] = (version, arch)
|
||||||
except IOError as e:
|
except IOError as e:
|
||||||
bb.fatal("No package manifest file found. Did you build the image?\n%s" % e)
|
bb.fatal("No package manifest file found. Did you build the image?\n%s" % e)
|
||||||
else:
|
|
||||||
self.pkgmanifest = ""
|
|
||||||
|
|
||||||
self.sigterm = False
|
self.sigterm = False
|
||||||
self.origsigtermhandler = signal.getsignal(signal.SIGTERM)
|
self.origsigtermhandler = signal.getsignal(signal.SIGTERM)
|
||||||
@@ -396,8 +407,11 @@ class SDKTestContext(TestContext):
|
|||||||
if not hasattr(self, 'target_manifest'):
|
if not hasattr(self, 'target_manifest'):
|
||||||
self.target_manifest = d.getVar("SDK_TARGET_MANIFEST", True)
|
self.target_manifest = d.getVar("SDK_TARGET_MANIFEST", True)
|
||||||
try:
|
try:
|
||||||
|
self.pkgmanifest = {}
|
||||||
with open(self.target_manifest) as f:
|
with open(self.target_manifest) as f:
|
||||||
self.pkgmanifest = f.read()
|
for line in f:
|
||||||
|
(pkg, arch, version) = line.strip().split()
|
||||||
|
self.pkgmanifest[pkg] = (version, arch)
|
||||||
except IOError as e:
|
except IOError as e:
|
||||||
bb.fatal("No package manifest file found. Did you build the sdk image?\n%s" % e)
|
bb.fatal("No package manifest file found. Did you build the sdk image?\n%s" % e)
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import subprocess
|
|||||||
def setUpModule():
|
def setUpModule():
|
||||||
if not oeRuntimeTest.hasFeature("package-management"):
|
if not oeRuntimeTest.hasFeature("package-management"):
|
||||||
skipModule("Image doesn't have package management feature")
|
skipModule("Image doesn't have package management feature")
|
||||||
if not oeRuntimeTest.hasPackage("smart"):
|
if not oeRuntimeTest.hasPackage("smartpm"):
|
||||||
skipModule("Image doesn't have smart installed")
|
skipModule("Image doesn't have smart installed")
|
||||||
if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]:
|
if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]:
|
||||||
skipModule("Rpm is not the primary package manager")
|
skipModule("Rpm is not the primary package manager")
|
||||||
@@ -105,7 +105,7 @@ class PtestRunnerTest(oeRuntimeTest):
|
|||||||
def test_ptestrunner(self):
|
def test_ptestrunner(self):
|
||||||
self.add_smart_channel()
|
self.add_smart_channel()
|
||||||
(runnerstatus, result) = self.target.run('which ptest-runner', 0)
|
(runnerstatus, result) = self.target.run('which ptest-runner', 0)
|
||||||
cond = oeRuntimeTest.hasPackage("ptest-runner") and oeRuntimeTest.hasFeature("ptest") and oeRuntimeTest.hasPackage("-ptest") and (runnerstatus != 0)
|
cond = oeRuntimeTest.hasPackage("ptest-runner") and oeRuntimeTest.hasFeature("ptest") and oeRuntimeTest.hasPackageMatch("-ptest") and (runnerstatus != 0)
|
||||||
if cond:
|
if cond:
|
||||||
self.install_packages(self.install_complementary("*-ptest"))
|
self.install_packages(self.install_complementary("*-ptest"))
|
||||||
self.install_packages(['ptest-runner'])
|
self.install_packages(['ptest-runner'])
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ class BuildIptablesTest(oeRuntimeTest):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def setUpClass(self):
|
def setUpClass(self):
|
||||||
self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d,
|
self.project = TargetBuildProject(oeRuntimeTest.tc.target, oeRuntimeTest.tc.d,
|
||||||
"http://netfilter.org/projects/iptables/files/iptables-1.4.13.tar.bz2")
|
"http://downloads.yoctoproject.org/mirror/sources/iptables-1.4.13.tar.bz2")
|
||||||
self.project.download_archive()
|
self.project.download_archive()
|
||||||
|
|
||||||
@testcase(206)
|
@testcase(206)
|
||||||
|
|||||||
@@ -55,12 +55,21 @@ x86_common = [
|
|||||||
'Could not enable PowerButton event',
|
'Could not enable PowerButton event',
|
||||||
'probe of LNXPWRBN:00 failed with error -22',
|
'probe of LNXPWRBN:00 failed with error -22',
|
||||||
'pmd_set_huge: Cannot satisfy',
|
'pmd_set_huge: Cannot satisfy',
|
||||||
|
'failed to setup card detect gpio',
|
||||||
|
'amd_nb: Cannot enumerate AMD northbridges',
|
||||||
|
'failed to retrieve link info, disabling eDP',
|
||||||
] + common_errors
|
] + common_errors
|
||||||
|
|
||||||
qemux86_common = [
|
qemux86_common = [
|
||||||
'wrong ELF class',
|
'wrong ELF class',
|
||||||
"fail to add MMCONFIG information, can't access extended PCI configuration space under this bridge.",
|
"fail to add MMCONFIG information, can't access extended PCI configuration space under this bridge.",
|
||||||
"can't claim BAR ",
|
"can't claim BAR ",
|
||||||
|
'amd_nb: Cannot enumerate AMD northbridges',
|
||||||
|
'uvesafb: 5000 ms task timeout error',
|
||||||
|
'detected fb_set_par error, error code: -22',
|
||||||
|
'Getting VBE info block failed',
|
||||||
|
'vbe_init() failed with -22',
|
||||||
|
'uvesafb: mode switch failed',
|
||||||
] + common_errors
|
] + common_errors
|
||||||
|
|
||||||
ignore_errors = {
|
ignore_errors = {
|
||||||
@@ -110,11 +119,19 @@ ignore_errors = {
|
|||||||
'(EE) Failed to load module psbdrv',
|
'(EE) Failed to load module psbdrv',
|
||||||
'(EE) open /dev/fb0: No such file or directory',
|
'(EE) open /dev/fb0: No such file or directory',
|
||||||
'(EE) AIGLX: reverting to software rendering',
|
'(EE) AIGLX: reverting to software rendering',
|
||||||
|
'dmi: Firmware registration failed.',
|
||||||
|
'ioremap error for 0x78',
|
||||||
] + x86_common,
|
] + x86_common,
|
||||||
'intel-corei7-64' : x86_common,
|
'intel-corei7-64' : x86_common,
|
||||||
'crownbay' : x86_common,
|
'crownbay' : x86_common,
|
||||||
'genericx86' : x86_common,
|
'genericx86' : x86_common,
|
||||||
'genericx86-64' : x86_common,
|
'genericx86-64' : [
|
||||||
|
'Direct firmware load for i915',
|
||||||
|
'Failed to load firmware i915',
|
||||||
|
'Failed to fetch GuC',
|
||||||
|
'Failed to initialize GuC',
|
||||||
|
'The driver is built-in, so to load the firmware you need to',
|
||||||
|
] + x86_common,
|
||||||
'edgerouter' : [
|
'edgerouter' : [
|
||||||
'Fatal server error:',
|
'Fatal server error:',
|
||||||
] + common_errors,
|
] + common_errors,
|
||||||
@@ -153,6 +170,9 @@ class ParseLogsTest(oeRuntimeTest):
|
|||||||
def getMachine(self):
|
def getMachine(self):
|
||||||
return oeRuntimeTest.tc.d.getVar("MACHINE", True)
|
return oeRuntimeTest.tc.d.getVar("MACHINE", True)
|
||||||
|
|
||||||
|
def getWorkdir(self):
|
||||||
|
return oeRuntimeTest.tc.d.getVar("WORKDIR", True)
|
||||||
|
|
||||||
#get some information on the CPU of the machine to display at the beginning of the output. This info might be useful in some cases.
|
#get some information on the CPU of the machine to display at the beginning of the output. This info might be useful in some cases.
|
||||||
def getHardwareInfo(self):
|
def getHardwareInfo(self):
|
||||||
hwi = ""
|
hwi = ""
|
||||||
@@ -190,16 +210,19 @@ class ParseLogsTest(oeRuntimeTest):
|
|||||||
|
|
||||||
#copy the log files to be parsed locally
|
#copy the log files to be parsed locally
|
||||||
def transfer_logs(self, log_list):
|
def transfer_logs(self, log_list):
|
||||||
target_logs = 'target_logs'
|
workdir = self.getWorkdir()
|
||||||
|
self.target_logs = workdir + '/' + 'target_logs'
|
||||||
|
target_logs = self.target_logs
|
||||||
if not os.path.exists(target_logs):
|
if not os.path.exists(target_logs):
|
||||||
os.makedirs(target_logs)
|
os.makedirs(target_logs)
|
||||||
|
bb.utils.remove(self.target_logs + "/*")
|
||||||
for f in log_list:
|
for f in log_list:
|
||||||
self.target.copy_from(f, target_logs)
|
self.target.copy_from(f, target_logs)
|
||||||
|
|
||||||
#get the local list of logs
|
#get the local list of logs
|
||||||
def get_local_log_list(self, log_locations):
|
def get_local_log_list(self, log_locations):
|
||||||
self.transfer_logs(self.getLogList(log_locations))
|
self.transfer_logs(self.getLogList(log_locations))
|
||||||
logs = [ os.path.join('target_logs',f) for f in os.listdir('target_logs') if os.path.isfile(os.path.join('target_logs',f)) ]
|
logs = [ os.path.join(self.target_logs, f) for f in os.listdir(self.target_logs) if os.path.isfile(os.path.join(self.target_logs, f)) ]
|
||||||
return logs
|
return logs
|
||||||
|
|
||||||
#build the grep command to be used with filters and exclusions
|
#build the grep command to be used with filters and exclusions
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from oeqa.oetest import oeRuntimeTest, skipModule
|
|||||||
from oeqa.utils.decorators import *
|
from oeqa.utils.decorators import *
|
||||||
|
|
||||||
def setUpModule():
|
def setUpModule():
|
||||||
if not oeRuntimeTest.hasPackage("python"):
|
if not oeRuntimeTest.hasPackage("python-core"):
|
||||||
skipModule("No python package in the image")
|
skipModule("No python package in the image")
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -53,9 +53,9 @@ class RpmInstallRemoveTest(oeRuntimeTest):
|
|||||||
def test_rpm_query_nonroot(self):
|
def test_rpm_query_nonroot(self):
|
||||||
(status, output) = self.target.run('useradd test1')
|
(status, output) = self.target.run('useradd test1')
|
||||||
self.assertTrue(status == 0, msg="Failed to create new user: " + output)
|
self.assertTrue(status == 0, msg="Failed to create new user: " + output)
|
||||||
(status, output) = self.target.run('sudo -u test1 id')
|
(status, output) = self.target.run('su -c id test1')
|
||||||
self.assertTrue('(test1)' in output, msg="Failed to execute as new user")
|
self.assertTrue('(test1)' in output, msg="Failed to execute as new user")
|
||||||
(status, output) = self.target.run('sudo -u test1 rpm -qa')
|
(status, output) = self.target.run('su -c "rpm -qa" test1 ')
|
||||||
self.assertEqual(status, 0, msg="status: %s. Cannot run rpm -qa: %s" % (status, output))
|
self.assertEqual(status, 0, msg="status: %s. Cannot run rpm -qa: %s" % (status, output))
|
||||||
|
|
||||||
@testcase(195)
|
@testcase(195)
|
||||||
@@ -98,4 +98,3 @@ class RpmInstallRemoveTest(oeRuntimeTest):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def tearDownClass(self):
|
def tearDownClass(self):
|
||||||
oeRuntimeTest.tc.target.run('rm -f /tmp/rpm-doc.rpm')
|
oeRuntimeTest.tc.target.run('rm -f /tmp/rpm-doc.rpm')
|
||||||
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ from oeqa.utils.httpserver import HTTPService
|
|||||||
def setUpModule():
|
def setUpModule():
|
||||||
if not oeRuntimeTest.hasFeature("package-management"):
|
if not oeRuntimeTest.hasFeature("package-management"):
|
||||||
skipModule("Image doesn't have package management feature")
|
skipModule("Image doesn't have package management feature")
|
||||||
if not oeRuntimeTest.hasPackage("smart"):
|
if not oeRuntimeTest.hasPackage("smartpm"):
|
||||||
skipModule("Image doesn't have smart installed")
|
skipModule("Image doesn't have smart installed")
|
||||||
if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]:
|
if "package_rpm" != oeRuntimeTest.tc.d.getVar("PACKAGE_CLASSES", True).split()[0]:
|
||||||
skipModule("Rpm is not the primary package manager")
|
skipModule("Rpm is not the primary package manager")
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ class BuildIptablesTest(oeSDKTest):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def setUpClass(self):
|
def setUpClass(self):
|
||||||
self.project = SDKBuildProject(oeSDKTest.tc.sdktestdir + "/iptables/", oeSDKTest.tc.sdkenv, oeSDKTest.tc.d,
|
self.project = SDKBuildProject(oeSDKTest.tc.sdktestdir + "/iptables/", oeSDKTest.tc.sdkenv, oeSDKTest.tc.d,
|
||||||
"http://netfilter.org/projects/iptables/files/iptables-1.4.13.tar.bz2")
|
"http://downloads.yoctoproject.org/mirror/sources/iptables-1.4.13.tar.bz2")
|
||||||
self.project.download_archive()
|
self.project.download_archive()
|
||||||
|
|
||||||
def test_iptables(self):
|
def test_iptables(self):
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ from oeqa.utils.decorators import *
|
|||||||
from oeqa.utils.targetbuild import SDKBuildProject
|
from oeqa.utils.targetbuild import SDKBuildProject
|
||||||
|
|
||||||
def setUpModule():
|
def setUpModule():
|
||||||
if not oeSDKTest.hasPackage("gtk\+"):
|
if not oeSDKTest.hasPackage("gtk+"):
|
||||||
skipModule("Image doesn't have gtk+ in manifest")
|
skipModule("Image doesn't have gtk+ in manifest")
|
||||||
|
|
||||||
class SudokuTest(oeSDKTest):
|
class SudokuTest(oeSDKTest):
|
||||||
|
|||||||
@@ -442,6 +442,49 @@ class RecipetoolTests(RecipetoolBase):
|
|||||||
inherits = ['cmake', 'python-dir', 'gettext', 'pkgconfig']
|
inherits = ['cmake', 'python-dir', 'gettext', 'pkgconfig']
|
||||||
self._test_recipe_contents(recipefile, checkvars, inherits)
|
self._test_recipe_contents(recipefile, checkvars, inherits)
|
||||||
|
|
||||||
|
def test_recipetool_create_github(self):
|
||||||
|
# Basic test to see if github URL mangling works
|
||||||
|
temprecipe = os.path.join(self.tempdir, 'recipe')
|
||||||
|
os.makedirs(temprecipe)
|
||||||
|
recipefile = os.path.join(temprecipe, 'meson_git.bb')
|
||||||
|
srcuri = 'https://github.com/mesonbuild/meson;rev=0.32.0'
|
||||||
|
result = runCmd(['recipetool', 'create', '-o', recipefile, srcuri])
|
||||||
|
self.assertTrue(os.path.isfile(recipefile))
|
||||||
|
checkvars = {}
|
||||||
|
checkvars['LICENSE'] = set(['Apache-2.0'])
|
||||||
|
checkvars['SRC_URI'] = 'git://github.com/mesonbuild/meson;protocol=https'
|
||||||
|
inherits = ['setuptools']
|
||||||
|
self._test_recipe_contents(recipefile, checkvars, inherits)
|
||||||
|
|
||||||
|
def test_recipetool_create_github_tarball(self):
|
||||||
|
# Basic test to ensure github URL mangling doesn't apply to release tarballs
|
||||||
|
temprecipe = os.path.join(self.tempdir, 'recipe')
|
||||||
|
os.makedirs(temprecipe)
|
||||||
|
pv = '0.32.0'
|
||||||
|
recipefile = os.path.join(temprecipe, 'meson_%s.bb' % pv)
|
||||||
|
srcuri = 'https://github.com/mesonbuild/meson/releases/download/%s/meson-%s.tar.gz' % (pv, pv)
|
||||||
|
result = runCmd('recipetool create -o %s %s' % (temprecipe, srcuri))
|
||||||
|
self.assertTrue(os.path.isfile(recipefile))
|
||||||
|
checkvars = {}
|
||||||
|
checkvars['LICENSE'] = set(['Apache-2.0'])
|
||||||
|
checkvars['SRC_URI'] = 'https://github.com/mesonbuild/meson/releases/download/${PV}/meson-${PV}.tar.gz'
|
||||||
|
inherits = ['setuptools']
|
||||||
|
self._test_recipe_contents(recipefile, checkvars, inherits)
|
||||||
|
|
||||||
|
def test_recipetool_create_git_http(self):
|
||||||
|
# Basic test to check http git URL mangling works
|
||||||
|
temprecipe = os.path.join(self.tempdir, 'recipe')
|
||||||
|
os.makedirs(temprecipe)
|
||||||
|
recipefile = os.path.join(temprecipe, 'matchbox-terminal_git.bb')
|
||||||
|
srcuri = 'http://git.yoctoproject.org/git/matchbox-terminal'
|
||||||
|
result = runCmd('recipetool create -o %s %s' % (temprecipe, srcuri))
|
||||||
|
self.assertTrue(os.path.isfile(recipefile))
|
||||||
|
checkvars = {}
|
||||||
|
checkvars['LICENSE'] = set(['GPLv2'])
|
||||||
|
checkvars['SRC_URI'] = 'git://git.yoctoproject.org/git/matchbox-terminal;protocol=http'
|
||||||
|
inherits = ['pkgconfig', 'autotools']
|
||||||
|
self._test_recipe_contents(recipefile, checkvars, inherits)
|
||||||
|
|
||||||
class RecipetoolAppendsrcBase(RecipetoolBase):
|
class RecipetoolAppendsrcBase(RecipetoolBase):
|
||||||
def _try_recipetool_appendsrcfile(self, testrecipe, newfile, destfile, options, expectedlines, expectedfiles):
|
def _try_recipetool_appendsrcfile(self, testrecipe, newfile, destfile, options, expectedlines, expectedfiles):
|
||||||
cmd = 'recipetool appendsrcfile %s %s %s %s %s' % (options, self.templayerdir, testrecipe, newfile, destfile)
|
cmd = 'recipetool appendsrcfile %s %s %s %s %s' % (options, self.templayerdir, testrecipe, newfile, destfile)
|
||||||
|
|||||||
@@ -0,0 +1,45 @@
|
|||||||
|
From 6186bcf1bcaaa0f16e79339e07c64c841d4d957d Mon Sep 17 00:00:00 2001
|
||||||
|
From: Alexander Kanavin <alex.kanavin@gmail.com>
|
||||||
|
Date: Fri, 2 Dec 2016 20:52:40 +0200
|
||||||
|
Subject: [PATCH] Enforce -no-pie, if the compiler supports it.
|
||||||
|
|
||||||
|
Add a -no-pie as recent (2 Dec 2016) Debian testing compiler
|
||||||
|
seems to default to enabling PIE when linking. See
|
||||||
|
https://wiki.ubuntu.com/SecurityTeam/PIE
|
||||||
|
|
||||||
|
Upstream-Status: Pending
|
||||||
|
Signed-off-by: Alexander Kanavin <alex.kanavin@gmail.com>
|
||||||
|
---
|
||||||
|
acinclude.m4 | 2 +-
|
||||||
|
configure.ac | 2 +-
|
||||||
|
2 files changed, 2 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/acinclude.m4 b/acinclude.m4
|
||||||
|
index 19200b0..a713923 100644
|
||||||
|
--- a/acinclude.m4
|
||||||
|
+++ b/acinclude.m4
|
||||||
|
@@ -416,7 +416,7 @@ int main() {
|
||||||
|
|
||||||
|
[# `$CC -c -o ...' might not be portable. But, oh, well... Is calling
|
||||||
|
# `ac_compile' like this correct, after all?
|
||||||
|
-if eval "$ac_compile -S -o conftest.s" 2> /dev/null; then]
|
||||||
|
+if eval "$ac_compile -S -o conftest.s" 2> /dev/null && eval "$CC -dumpspecs 2>/dev/null | grep -e no-pie" ; then]
|
||||||
|
AC_MSG_RESULT([yes])
|
||||||
|
[# Should we clear up other files as well, having called `AC_LANG_CONFTEST'?
|
||||||
|
rm -f conftest.s
|
||||||
|
diff --git a/configure.ac b/configure.ac
|
||||||
|
index df20991..506c6b4 100644
|
||||||
|
--- a/configure.ac
|
||||||
|
+++ b/configure.ac
|
||||||
|
@@ -603,7 +603,7 @@ grub_CHECK_PIE
|
||||||
|
[# Need that, because some distributions ship compilers that include
|
||||||
|
# `-fPIE' in the default specs.
|
||||||
|
if [ x"$pie_possible" = xyes ]; then
|
||||||
|
- TARGET_CFLAGS="$TARGET_CFLAGS -fno-PIE"
|
||||||
|
+ TARGET_CFLAGS="$TARGET_CFLAGS -fno-PIE -no-pie"
|
||||||
|
fi]
|
||||||
|
|
||||||
|
# Position independent executable.
|
||||||
|
--
|
||||||
|
2.10.2
|
||||||
|
|
||||||
@@ -31,6 +31,7 @@ SRC_URI = "ftp://ftp.gnu.org/gnu/grub/grub-${PV}.tar.gz \
|
|||||||
file://0001-Remove-direct-_llseek-code-and-require-long-filesyst.patch \
|
file://0001-Remove-direct-_llseek-code-and-require-long-filesyst.patch \
|
||||||
file://fix-texinfo.patch \
|
file://fix-texinfo.patch \
|
||||||
file://0001-grub-core-gettext-gettext.c-main_context-secondary_c.patch \
|
file://0001-grub-core-gettext-gettext.c-main_context-secondary_c.patch \
|
||||||
|
file://0001-Enforce-no-pie-if-the-compiler-supports-it.patch \
|
||||||
"
|
"
|
||||||
|
|
||||||
DEPENDS = "flex-native bison-native"
|
DEPENDS = "flex-native bison-native"
|
||||||
|
|||||||
90
meta/recipes-connectivity/bind/bind/CVE-2016-2775.patch
Normal file
90
meta/recipes-connectivity/bind/bind/CVE-2016-2775.patch
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
From 9d8aba8a7778721ae2cee6e4670a8e6be6590b05 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Mark Andrews <marka@isc.org>
|
||||||
|
Date: Wed, 12 Oct 2016 19:52:59 +0900
|
||||||
|
Subject: [PATCH]
|
||||||
|
4406. [security] getrrsetbyname with a non absolute name could
|
||||||
|
trigger an infinite recursion bug in lwresd
|
||||||
|
and named with lwres configured if when combined
|
||||||
|
with a search list entry the resulting name is
|
||||||
|
too long. (CVE-2016-2775) [RT #42694]
|
||||||
|
|
||||||
|
Backport commit 38cc2d14e218e536e0102fa70deef99461354232 from the
|
||||||
|
v9.11.0_patch branch.
|
||||||
|
|
||||||
|
CVE: CVE-2016-2775
|
||||||
|
Upstream-Status: Backport
|
||||||
|
|
||||||
|
Signed-off-by: zhengruoqin <zhengrq.fnst@cn.fujitsu.com>
|
||||||
|
|
||||||
|
---
|
||||||
|
CHANGES | 6 ++++++
|
||||||
|
bin/named/lwdgrbn.c | 16 ++++++++++------
|
||||||
|
bin/tests/system/lwresd/lwtest.c | 9 ++++++++-
|
||||||
|
3 files changed, 24 insertions(+), 7 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/CHANGES b/CHANGES
|
||||||
|
index d2e3360..d0a9d12 100644
|
||||||
|
--- a/CHANGES
|
||||||
|
+++ b/CHANGES
|
||||||
|
@@ -1,3 +1,9 @@
|
||||||
|
+4406. [security] getrrsetbyname with a non absolute name could
|
||||||
|
+ trigger an infinite recursion bug in lwresd
|
||||||
|
+ and named with lwres configured if when combined
|
||||||
|
+ with a search list entry the resulting name is
|
||||||
|
+ too long. (CVE-2016-2775) [RT #42694]
|
||||||
|
+
|
||||||
|
4322. [security] Duplicate EDNS COOKIE options in a response could
|
||||||
|
trigger an assertion failure. (CVE-2016-2088)
|
||||||
|
[RT #41809]
|
||||||
|
diff --git a/bin/named/lwdgrbn.c b/bin/named/lwdgrbn.c
|
||||||
|
index 3e7b15b..e1e9adc 100644
|
||||||
|
--- a/bin/named/lwdgrbn.c
|
||||||
|
+++ b/bin/named/lwdgrbn.c
|
||||||
|
@@ -403,14 +403,18 @@ start_lookup(ns_lwdclient_t *client) {
|
||||||
|
INSIST(client->lookup == NULL);
|
||||||
|
|
||||||
|
dns_fixedname_init(&absname);
|
||||||
|
- result = ns_lwsearchctx_current(&client->searchctx,
|
||||||
|
- dns_fixedname_name(&absname));
|
||||||
|
+
|
||||||
|
/*
|
||||||
|
- * This will return failure if relative name + suffix is too long.
|
||||||
|
- * In this case, just go on to the next entry in the search path.
|
||||||
|
+ * Perform search across all search domains until success
|
||||||
|
+ * is returned. Return in case of failure.
|
||||||
|
*/
|
||||||
|
- if (result != ISC_R_SUCCESS)
|
||||||
|
- start_lookup(client);
|
||||||
|
+ while (ns_lwsearchctx_current(&client->searchctx,
|
||||||
|
+ dns_fixedname_name(&absname)) != ISC_R_SUCCESS) {
|
||||||
|
+ if (ns_lwsearchctx_next(&client->searchctx) != ISC_R_SUCCESS) {
|
||||||
|
+ ns_lwdclient_errorpktsend(client, LWRES_R_FAILURE);
|
||||||
|
+ return;
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
|
||||||
|
result = dns_lookup_create(cm->mctx,
|
||||||
|
dns_fixedname_name(&absname),
|
||||||
|
diff --git a/bin/tests/system/lwresd/lwtest.c b/bin/tests/system/lwresd/lwtest.c
|
||||||
|
index ad9b551..3eb4a66 100644
|
||||||
|
--- a/bin/tests/system/lwresd/lwtest.c
|
||||||
|
+++ b/bin/tests/system/lwresd/lwtest.c
|
||||||
|
@@ -768,7 +768,14 @@ main(void) {
|
||||||
|
test_getrrsetbyname("e.example1.", 1, 2, 1, 1, 1);
|
||||||
|
test_getrrsetbyname("e.example1.", 1, 46, 2, 0, 1);
|
||||||
|
test_getrrsetbyname("", 1, 1, 0, 0, 0);
|
||||||
|
-
|
||||||
|
+ test_getrrsetbyname("123456789.123456789.123456789.123456789."
|
||||||
|
+ "123456789.123456789.123456789.123456789."
|
||||||
|
+ "123456789.123456789.123456789.123456789."
|
||||||
|
+ "123456789.123456789.123456789.123456789."
|
||||||
|
+ "123456789.123456789.123456789.123456789."
|
||||||
|
+ "123456789.123456789.123456789.123456789."
|
||||||
|
+ "123456789", 1, 1, 0, 0, 0);
|
||||||
|
+
|
||||||
|
if (fails == 0)
|
||||||
|
printf("I:ok\n");
|
||||||
|
return (fails);
|
||||||
|
--
|
||||||
|
2.7.4
|
||||||
|
|
||||||
123
meta/recipes-connectivity/bind/bind/CVE-2016-2776.patch
Normal file
123
meta/recipes-connectivity/bind/bind/CVE-2016-2776.patch
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
From 1171111657081970585f9f0e03b476358c33a6c0 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Mark Andrews <marka@isc.org>
|
||||||
|
Date: Wed, 12 Oct 2016 20:36:52 +0900
|
||||||
|
Subject: [PATCH]
|
||||||
|
4467. [security] It was possible to trigger an assertion when
|
||||||
|
rendering a message. (CVE-2016-2776) [RT #43139]
|
||||||
|
|
||||||
|
Backport commit 2bd0922cf995b9ac205fc83baf7e220b95c6bf12 from the
|
||||||
|
v9.11.0_patch branch.
|
||||||
|
|
||||||
|
CVE: CVE-2016-2776
|
||||||
|
Upstream-Status: Backport
|
||||||
|
|
||||||
|
Signed-off-by: zhengruoqin <zhengrq.fnst@cn.fujitsu.com>
|
||||||
|
|
||||||
|
---
|
||||||
|
CHANGES | 3 +++
|
||||||
|
lib/dns/message.c | 42 +++++++++++++++++++++++++++++++-----------
|
||||||
|
2 files changed, 34 insertions(+), 11 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/CHANGES b/CHANGES
|
||||||
|
index d0a9d12..5c8c61a 100644
|
||||||
|
--- a/CHANGES
|
||||||
|
+++ b/CHANGES
|
||||||
|
@@ -1,3 +1,6 @@
|
||||||
|
+4467. [security] It was possible to trigger an assertion when
|
||||||
|
+ rendering a message. (CVE-2016-2776) [RT #43139]
|
||||||
|
+
|
||||||
|
4406. [security] getrrsetbyname with a non absolute name could
|
||||||
|
trigger an infinite recursion bug in lwresd
|
||||||
|
and named with lwres configured if when combined
|
||||||
|
diff --git a/lib/dns/message.c b/lib/dns/message.c
|
||||||
|
index 6b5b4bb..b74dc81 100644
|
||||||
|
--- a/lib/dns/message.c
|
||||||
|
+++ b/lib/dns/message.c
|
||||||
|
@@ -1754,7 +1754,7 @@ dns_message_renderbegin(dns_message_t *msg, dns_compress_t *cctx,
|
||||||
|
if (r.length < DNS_MESSAGE_HEADERLEN)
|
||||||
|
return (ISC_R_NOSPACE);
|
||||||
|
|
||||||
|
- if (r.length < msg->reserved)
|
||||||
|
+ if (r.length - DNS_MESSAGE_HEADERLEN < msg->reserved)
|
||||||
|
return (ISC_R_NOSPACE);
|
||||||
|
|
||||||
|
/*
|
||||||
|
@@ -1895,8 +1895,29 @@ norender_rdataset(const dns_rdataset_t *rdataset, unsigned int options,
|
||||||
|
|
||||||
|
return (ISC_TRUE);
|
||||||
|
}
|
||||||
|
-
|
||||||
|
#endif
|
||||||
|
+
|
||||||
|
+static isc_result_t
|
||||||
|
+renderset(dns_rdataset_t *rdataset, dns_name_t *owner_name,
|
||||||
|
+ dns_compress_t *cctx, isc_buffer_t *target,
|
||||||
|
+ unsigned int reserved, unsigned int options, unsigned int *countp)
|
||||||
|
+{
|
||||||
|
+ isc_result_t result;
|
||||||
|
+
|
||||||
|
+ /*
|
||||||
|
+ * Shrink the space in the buffer by the reserved amount.
|
||||||
|
+ */
|
||||||
|
+ if (target->length - target->used < reserved)
|
||||||
|
+ return (ISC_R_NOSPACE);
|
||||||
|
+
|
||||||
|
+ target->length -= reserved;
|
||||||
|
+ result = dns_rdataset_towire(rdataset, owner_name,
|
||||||
|
+ cctx, target, options, countp);
|
||||||
|
+ target->length += reserved;
|
||||||
|
+
|
||||||
|
+ return (result);
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
isc_result_t
|
||||||
|
dns_message_rendersection(dns_message_t *msg, dns_section_t sectionid,
|
||||||
|
unsigned int options)
|
||||||
|
@@ -1939,6 +1960,8 @@ dns_message_rendersection(dns_message_t *msg, dns_section_t sectionid,
|
||||||
|
/*
|
||||||
|
* Shrink the space in the buffer by the reserved amount.
|
||||||
|
*/
|
||||||
|
+ if (msg->buffer->length - msg->buffer->used < msg->reserved)
|
||||||
|
+ return (ISC_R_NOSPACE);
|
||||||
|
msg->buffer->length -= msg->reserved;
|
||||||
|
|
||||||
|
total = 0;
|
||||||
|
@@ -2214,9 +2237,8 @@ dns_message_renderend(dns_message_t *msg) {
|
||||||
|
* Render.
|
||||||
|
*/
|
||||||
|
count = 0;
|
||||||
|
- result = dns_rdataset_towire(msg->opt, dns_rootname,
|
||||||
|
- msg->cctx, msg->buffer, 0,
|
||||||
|
- &count);
|
||||||
|
+ result = renderset(msg->opt, dns_rootname, msg->cctx,
|
||||||
|
+ msg->buffer, msg->reserved, 0, &count);
|
||||||
|
msg->counts[DNS_SECTION_ADDITIONAL] += count;
|
||||||
|
if (result != ISC_R_SUCCESS)
|
||||||
|
return (result);
|
||||||
|
@@ -2232,9 +2254,8 @@ dns_message_renderend(dns_message_t *msg) {
|
||||||
|
if (result != ISC_R_SUCCESS)
|
||||||
|
return (result);
|
||||||
|
count = 0;
|
||||||
|
- result = dns_rdataset_towire(msg->tsig, msg->tsigname,
|
||||||
|
- msg->cctx, msg->buffer, 0,
|
||||||
|
- &count);
|
||||||
|
+ result = renderset(msg->tsig, msg->tsigname, msg->cctx,
|
||||||
|
+ msg->buffer, msg->reserved, 0, &count);
|
||||||
|
msg->counts[DNS_SECTION_ADDITIONAL] += count;
|
||||||
|
if (result != ISC_R_SUCCESS)
|
||||||
|
return (result);
|
||||||
|
@@ -2255,9 +2276,8 @@ dns_message_renderend(dns_message_t *msg) {
|
||||||
|
* the owner name of a SIG(0) is irrelevant, and will not
|
||||||
|
* be set in a message being rendered.
|
||||||
|
*/
|
||||||
|
- result = dns_rdataset_towire(msg->sig0, dns_rootname,
|
||||||
|
- msg->cctx, msg->buffer, 0,
|
||||||
|
- &count);
|
||||||
|
+ result = renderset(msg->sig0, dns_rootname, msg->cctx,
|
||||||
|
+ msg->buffer, msg->reserved, 0, &count);
|
||||||
|
msg->counts[DNS_SECTION_ADDITIONAL] += count;
|
||||||
|
if (result != ISC_R_SUCCESS)
|
||||||
|
return (result);
|
||||||
|
--
|
||||||
|
2.7.4
|
||||||
|
|
||||||
@@ -25,6 +25,8 @@ SRC_URI = "ftp://ftp.isc.org/isc/bind9/${PV}/${BPN}-${PV}.tar.gz \
|
|||||||
file://CVE-2016-1286_1.patch \
|
file://CVE-2016-1286_1.patch \
|
||||||
file://CVE-2016-1286_2.patch \
|
file://CVE-2016-1286_2.patch \
|
||||||
file://CVE-2016-2088.patch \
|
file://CVE-2016-2088.patch \
|
||||||
|
file://CVE-2016-2775.patch \
|
||||||
|
file://CVE-2016-2776.patch \
|
||||||
"
|
"
|
||||||
|
|
||||||
SRC_URI[md5sum] = "bcf7e772b616f7259420a3edc5df350a"
|
SRC_URI[md5sum] = "bcf7e772b616f7259420a3edc5df350a"
|
||||||
|
|||||||
@@ -0,0 +1,33 @@
|
|||||||
|
From 85bdcd7c92fe7ff133bbc4e10a65c91810f88755 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Damien Miller <djm@mindrot.org>
|
||||||
|
Date: Wed, 13 Apr 2016 10:39:57 +1000
|
||||||
|
Subject: [PATCH] ignore PAM environment vars when UseLogin=yes
|
||||||
|
|
||||||
|
If PAM is configured to read user-specified environment variables
|
||||||
|
and UseLogin=yes in sshd_config, then a hostile local user may
|
||||||
|
attack /bin/login via LD_PRELOAD or similar environment variables
|
||||||
|
set via PAM.
|
||||||
|
|
||||||
|
CVE-2015-8325, found by Shayan Sadigh, via Colin Watson
|
||||||
|
|
||||||
|
Upstream-Status: Backport
|
||||||
|
CVE: CVE-2015-8325
|
||||||
|
Signed-off-by: Armin Kuster <akuster@mvista.com>
|
||||||
|
|
||||||
|
---
|
||||||
|
session.c | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
Index: openssh-7.1p2/session.c
|
||||||
|
===================================================================
|
||||||
|
--- openssh-7.1p2.orig/session.c
|
||||||
|
+++ openssh-7.1p2/session.c
|
||||||
|
@@ -1315,7 +1315,7 @@ do_setup_env(Session *s, const char *she
|
||||||
|
* Pull in any environment variables that may have
|
||||||
|
* been set by PAM.
|
||||||
|
*/
|
||||||
|
- if (options.use_pam) {
|
||||||
|
+ if (options.use_pam && !options.use_login) {
|
||||||
|
char **p;
|
||||||
|
|
||||||
|
p = fetch_pam_child_environment();
|
||||||
114
meta/recipes-connectivity/openssh/openssh/CVE-2016-6210.patch
Normal file
114
meta/recipes-connectivity/openssh/openssh/CVE-2016-6210.patch
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
From 9286875a73b2de7736b5e50692739d314cd8d9dc Mon Sep 17 00:00:00 2001
|
||||||
|
From: Darren Tucker <dtucker@zip.com.au>
|
||||||
|
Date: Fri, 15 Jul 2016 13:32:45 +1000
|
||||||
|
Subject: [PATCH] Determine appropriate salt for invalid users.
|
||||||
|
|
||||||
|
When sshd is processing a non-PAM login for a non-existent user it uses
|
||||||
|
the string from the fakepw structure as the salt for crypt(3)ing the
|
||||||
|
password supplied by the client. That string has a Blowfish prefix, so on
|
||||||
|
systems that don't understand that crypt will fail fast due to an invalid
|
||||||
|
salt, and even on those that do it may have significantly different timing
|
||||||
|
from the hash methods used for real accounts (eg sha512). This allows
|
||||||
|
user enumeration by, eg, sending large password strings. This was noted
|
||||||
|
by EddieEzra.Harari at verint.com (CVE-2016-6210).
|
||||||
|
|
||||||
|
To mitigate, use the same hash algorithm that root uses for hashing
|
||||||
|
passwords for users that do not exist on the system. ok djm@
|
||||||
|
|
||||||
|
Upstream-Status: Backport
|
||||||
|
OpenSSH < 7.3
|
||||||
|
CVE: CVE-2016-6210 patch1
|
||||||
|
Signed-off-by: Armin Kuster <akuster@mvista.com>
|
||||||
|
|
||||||
|
---
|
||||||
|
auth-passwd.c | 12 ++++++++----
|
||||||
|
openbsd-compat/xcrypt.c | 34 ++++++++++++++++++++++++++++++++++
|
||||||
|
2 files changed, 42 insertions(+), 4 deletions(-)
|
||||||
|
|
||||||
|
Index: openssh-7.1p2/auth-passwd.c
|
||||||
|
===================================================================
|
||||||
|
--- openssh-7.1p2.orig/auth-passwd.c
|
||||||
|
+++ openssh-7.1p2/auth-passwd.c
|
||||||
|
@@ -198,7 +198,7 @@ int
|
||||||
|
sys_auth_passwd(Authctxt *authctxt, const char *password)
|
||||||
|
{
|
||||||
|
struct passwd *pw = authctxt->pw;
|
||||||
|
- char *encrypted_password;
|
||||||
|
+ char *encrypted_password, *salt = NULL;
|
||||||
|
|
||||||
|
/* Just use the supplied fake password if authctxt is invalid */
|
||||||
|
char *pw_password = authctxt->valid ? shadow_pw(pw) : pw->pw_passwd;
|
||||||
|
@@ -207,9 +207,13 @@ sys_auth_passwd(Authctxt *authctxt, cons
|
||||||
|
if (strcmp(pw_password, "") == 0 && strcmp(password, "") == 0)
|
||||||
|
return (1);
|
||||||
|
|
||||||
|
- /* Encrypt the candidate password using the proper salt. */
|
||||||
|
- encrypted_password = xcrypt(password,
|
||||||
|
- (pw_password[0] && pw_password[1]) ? pw_password : "xx");
|
||||||
|
+ /*
|
||||||
|
+ * Encrypt the candidate password using the proper salt, or pass a
|
||||||
|
+ * NULL and let xcrypt pick one.
|
||||||
|
+ */
|
||||||
|
+ if (authctxt->valid && pw_password[0] && pw_password[1])
|
||||||
|
+ salt = pw_password;
|
||||||
|
+ encrypted_password = xcrypt(password, salt);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Authentication is accepted if the encrypted passwords
|
||||||
|
Index: openssh-7.1p2/openbsd-compat/xcrypt.c
|
||||||
|
===================================================================
|
||||||
|
--- openssh-7.1p2.orig/openbsd-compat/xcrypt.c
|
||||||
|
+++ openssh-7.1p2/openbsd-compat/xcrypt.c
|
||||||
|
@@ -25,6 +25,7 @@
|
||||||
|
#include "includes.h"
|
||||||
|
|
||||||
|
#include <sys/types.h>
|
||||||
|
+#include <string.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <pwd.h>
|
||||||
|
|
||||||
|
@@ -62,11 +63,44 @@
|
||||||
|
# define crypt DES_crypt
|
||||||
|
# endif
|
||||||
|
|
||||||
|
+/*
|
||||||
|
+ * Pick an appropriate password encryption type and salt for the running
|
||||||
|
+ * system.
|
||||||
|
+ */
|
||||||
|
+static const char *
|
||||||
|
+pick_salt(void)
|
||||||
|
+{
|
||||||
|
+ struct passwd *pw;
|
||||||
|
+ char *passwd, *p;
|
||||||
|
+ size_t typelen;
|
||||||
|
+ static char salt[32];
|
||||||
|
+
|
||||||
|
+ if (salt[0] != '\0')
|
||||||
|
+ return salt;
|
||||||
|
+ strlcpy(salt, "xx", sizeof(salt));
|
||||||
|
+ if ((pw = getpwuid(0)) == NULL)
|
||||||
|
+ return salt;
|
||||||
|
+ passwd = shadow_pw(pw);
|
||||||
|
+ if (passwd[0] != '$' || (p = strrchr(passwd + 1, '$')) == NULL)
|
||||||
|
+ return salt; /* no $, DES */
|
||||||
|
+ typelen = p - passwd + 1;
|
||||||
|
+ strlcpy(salt, passwd, MIN(typelen, sizeof(salt)));
|
||||||
|
+ explicit_bzero(passwd, strlen(passwd));
|
||||||
|
+ return salt;
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
char *
|
||||||
|
xcrypt(const char *password, const char *salt)
|
||||||
|
{
|
||||||
|
char *crypted;
|
||||||
|
|
||||||
|
+ /*
|
||||||
|
+ * If we don't have a salt we are encrypting a fake password for
|
||||||
|
+ * for timing purposes. Pick an appropriate salt.
|
||||||
|
+ */
|
||||||
|
+ if (salt == NULL)
|
||||||
|
+ salt = pick_salt();
|
||||||
|
+
|
||||||
|
# ifdef HAVE_MD5_PASSWORDS
|
||||||
|
if (is_md5_salt(salt))
|
||||||
|
crypted = md5_crypt(password, salt);
|
||||||
110
meta/recipes-connectivity/openssh/openssh/CVE-2016-6210_p2.patch
Normal file
110
meta/recipes-connectivity/openssh/openssh/CVE-2016-6210_p2.patch
Normal file
@@ -0,0 +1,110 @@
|
|||||||
|
From 283b97ff33ea2c641161950849931bd578de6946 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Darren Tucker <dtucker@zip.com.au>
|
||||||
|
Date: Fri, 15 Jul 2016 13:49:44 +1000
|
||||||
|
Subject: [PATCH] Mitigate timing of disallowed users PAM logins.
|
||||||
|
|
||||||
|
When sshd decides to not allow a login (eg PermitRootLogin=no) and
|
||||||
|
it's using PAM, it sends a fake password to PAM so that the timing for
|
||||||
|
the failure is not noticeably different whether or not the password
|
||||||
|
is correct. This behaviour can be detected by sending a very long
|
||||||
|
password string which is slower to hash than the fake password.
|
||||||
|
|
||||||
|
Mitigate by constructing an invalid password that is the same length
|
||||||
|
as the one from the client and thus takes the same time to hash.
|
||||||
|
Diff from djm@
|
||||||
|
|
||||||
|
Upstream-Status: Backport
|
||||||
|
CVE: CVE-2016-6210 patch2
|
||||||
|
Signed-off-by: Armin Kuster <akuster@mvista.com>
|
||||||
|
|
||||||
|
---
|
||||||
|
auth-pam.c | 35 +++++++++++++++++++++++++++++++----
|
||||||
|
1 file changed, 31 insertions(+), 4 deletions(-)
|
||||||
|
|
||||||
|
Index: openssh-7.1p2/auth-pam.c
|
||||||
|
===================================================================
|
||||||
|
--- openssh-7.1p2.orig/auth-pam.c
|
||||||
|
+++ openssh-7.1p2/auth-pam.c
|
||||||
|
@@ -231,7 +231,6 @@ static int sshpam_account_status = -1;
|
||||||
|
static char **sshpam_env = NULL;
|
||||||
|
static Authctxt *sshpam_authctxt = NULL;
|
||||||
|
static const char *sshpam_password = NULL;
|
||||||
|
-static char badpw[] = "\b\n\r\177INCORRECT";
|
||||||
|
|
||||||
|
/* Some PAM implementations don't implement this */
|
||||||
|
#ifndef HAVE_PAM_GETENVLIST
|
||||||
|
@@ -809,12 +808,35 @@ sshpam_query(void *ctx, char **name, cha
|
||||||
|
return (-1);
|
||||||
|
}
|
||||||
|
|
||||||
|
+/*
|
||||||
|
+ * Returns a junk password of identical length to that the user supplied.
|
||||||
|
+ * Used to mitigate timing attacks against crypt(3)/PAM stacks that
|
||||||
|
+ * vary processing time in proportion to password length.
|
||||||
|
+ */
|
||||||
|
+static char *
|
||||||
|
+fake_password(const char *wire_password)
|
||||||
|
+{
|
||||||
|
+ const char junk[] = "\b\n\r\177INCORRECT";
|
||||||
|
+ char *ret = NULL;
|
||||||
|
+ size_t i, l = wire_password != NULL ? strlen(wire_password) : 0;
|
||||||
|
+
|
||||||
|
+ if (l >= INT_MAX)
|
||||||
|
+ fatal("%s: password length too long: %zu", __func__, l);
|
||||||
|
+
|
||||||
|
+ ret = malloc(l + 1);
|
||||||
|
+ for (i = 0; i < l; i++)
|
||||||
|
+ ret[i] = junk[i % (sizeof(junk) - 1)];
|
||||||
|
+ ret[i] = '\0';
|
||||||
|
+ return ret;
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
/* XXX - see also comment in auth-chall.c:verify_response */
|
||||||
|
static int
|
||||||
|
sshpam_respond(void *ctx, u_int num, char **resp)
|
||||||
|
{
|
||||||
|
Buffer buffer;
|
||||||
|
struct pam_ctxt *ctxt = ctx;
|
||||||
|
+ char *fake;
|
||||||
|
|
||||||
|
debug2("PAM: %s entering, %u responses", __func__, num);
|
||||||
|
switch (ctxt->pam_done) {
|
||||||
|
@@ -835,8 +857,11 @@ sshpam_respond(void *ctx, u_int num, cha
|
||||||
|
(sshpam_authctxt->pw->pw_uid != 0 ||
|
||||||
|
options.permit_root_login == PERMIT_YES))
|
||||||
|
buffer_put_cstring(&buffer, *resp);
|
||||||
|
- else
|
||||||
|
- buffer_put_cstring(&buffer, badpw);
|
||||||
|
+ else {
|
||||||
|
+ fake = fake_password(*resp);
|
||||||
|
+ buffer_put_cstring(&buffer, fake);
|
||||||
|
+ free(fake);
|
||||||
|
+ }
|
||||||
|
if (ssh_msg_send(ctxt->pam_psock, PAM_AUTHTOK, &buffer) == -1) {
|
||||||
|
buffer_free(&buffer);
|
||||||
|
return (-1);
|
||||||
|
@@ -1180,6 +1205,7 @@ sshpam_auth_passwd(Authctxt *authctxt, c
|
||||||
|
{
|
||||||
|
int flags = (options.permit_empty_passwd == 0 ?
|
||||||
|
PAM_DISALLOW_NULL_AUTHTOK : 0);
|
||||||
|
+ char *fake = NULL;
|
||||||
|
|
||||||
|
if (!options.use_pam || sshpam_handle == NULL)
|
||||||
|
fatal("PAM: %s called when PAM disabled or failed to "
|
||||||
|
@@ -1195,7 +1221,7 @@ sshpam_auth_passwd(Authctxt *authctxt, c
|
||||||
|
*/
|
||||||
|
if (!authctxt->valid || (authctxt->pw->pw_uid == 0 &&
|
||||||
|
options.permit_root_login != PERMIT_YES))
|
||||||
|
- sshpam_password = badpw;
|
||||||
|
+ sshpam_password = fake = fake_password(password);
|
||||||
|
|
||||||
|
sshpam_err = pam_set_item(sshpam_handle, PAM_CONV,
|
||||||
|
(const void *)&passwd_conv);
|
||||||
|
@@ -1205,6 +1231,7 @@ sshpam_auth_passwd(Authctxt *authctxt, c
|
||||||
|
|
||||||
|
sshpam_err = pam_authenticate(sshpam_handle, flags);
|
||||||
|
sshpam_password = NULL;
|
||||||
|
+ free(fake);
|
||||||
|
if (sshpam_err == PAM_SUCCESS && authctxt->valid) {
|
||||||
|
debug("PAM: password authentication accepted for %.100s",
|
||||||
|
authctxt->user);
|
||||||
@@ -0,0 +1,62 @@
|
|||||||
|
From dbf788b4d9d9490a5fff08a7b09888272bb10fcc Mon Sep 17 00:00:00 2001
|
||||||
|
From: Darren Tucker <dtucker@zip.com.au>
|
||||||
|
Date: Thu, 21 Jul 2016 14:17:31 +1000
|
||||||
|
Subject: [PATCH] Search users for one with a valid salt.
|
||||||
|
|
||||||
|
If the root account is locked (eg password "!!" or "*LK*") keep looking
|
||||||
|
until we find a user with a valid salt to use for crypting passwords of
|
||||||
|
invalid users. ok djm@
|
||||||
|
|
||||||
|
Upstream-Status: Backport
|
||||||
|
CVE: CVE-2016-6210
|
||||||
|
Signed-off-by: Armin Kuster <akuster@mvista.com>
|
||||||
|
|
||||||
|
---
|
||||||
|
openbsd-compat/xcrypt.c | 24 +++++++++++++++---------
|
||||||
|
1 file changed, 15 insertions(+), 9 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/openbsd-compat/xcrypt.c b/openbsd-compat/xcrypt.c
|
||||||
|
index 8913bb8..cf6a9b9 100644
|
||||||
|
--- a/openbsd-compat/xcrypt.c
|
||||||
|
+++ b/openbsd-compat/xcrypt.c
|
||||||
|
@@ -65,7 +65,9 @@
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Pick an appropriate password encryption type and salt for the running
|
||||||
|
- * system.
|
||||||
|
+ * system by searching through accounts until we find one that has a valid
|
||||||
|
+ * salt. Usually this will be root unless the root account is locked out.
|
||||||
|
+ * If we don't find one we return a traditional DES-based salt.
|
||||||
|
*/
|
||||||
|
static const char *
|
||||||
|
pick_salt(void)
|
||||||
|
@@ -78,14 +80,18 @@ pick_salt(void)
|
||||||
|
if (salt[0] != '\0')
|
||||||
|
return salt;
|
||||||
|
strlcpy(salt, "xx", sizeof(salt));
|
||||||
|
- if ((pw = getpwuid(0)) == NULL)
|
||||||
|
- return salt;
|
||||||
|
- passwd = shadow_pw(pw);
|
||||||
|
- if (passwd[0] != '$' || (p = strrchr(passwd + 1, '$')) == NULL)
|
||||||
|
- return salt; /* no $, DES */
|
||||||
|
- typelen = p - passwd + 1;
|
||||||
|
- strlcpy(salt, passwd, MIN(typelen, sizeof(salt)));
|
||||||
|
- explicit_bzero(passwd, strlen(passwd));
|
||||||
|
+ setpwent();
|
||||||
|
+ while ((pw = getpwent()) != NULL) {
|
||||||
|
+ passwd = shadow_pw(pw);
|
||||||
|
+ if (passwd[0] == '$' && (p = strrchr(passwd+1, '$')) != NULL) {
|
||||||
|
+ typelen = p - passwd + 1;
|
||||||
|
+ strlcpy(salt, passwd, MIN(typelen, sizeof(salt)));
|
||||||
|
+ explicit_bzero(passwd, strlen(passwd));
|
||||||
|
+ goto out;
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+ out:
|
||||||
|
+ endpwent();
|
||||||
|
return salt;
|
||||||
|
}
|
||||||
|
|
||||||
|
--
|
||||||
|
2.7.4
|
||||||
|
|
||||||
@@ -0,0 +1,54 @@
|
|||||||
|
From fcd135c9df440bcd2d5870405ad3311743d78d97 Mon Sep 17 00:00:00 2001
|
||||||
|
From: "dtucker@openbsd.org" <dtucker@openbsd.org>
|
||||||
|
Date: Thu, 21 Jul 2016 01:39:35 +0000
|
||||||
|
Subject: [PATCH] upstream commit
|
||||||
|
|
||||||
|
Skip passwords longer than 1k in length so clients can't
|
||||||
|
easily DoS sshd by sending very long passwords, causing it to spend CPU
|
||||||
|
hashing them. feedback djm@, ok markus@.
|
||||||
|
|
||||||
|
Brought to our attention by tomas.kuthan at oracle.com, shilei-c at
|
||||||
|
360.cn and coredump at autistici.org
|
||||||
|
|
||||||
|
Upstream-ID: d0af7d4a2190b63ba1d38eec502bc4be0be9e333
|
||||||
|
|
||||||
|
Upstream-Status: Backport
|
||||||
|
CVE: CVE-2016-6515
|
||||||
|
Signed-off-by: Armin Kuster <akuster@mvista.com>
|
||||||
|
|
||||||
|
---
|
||||||
|
auth-passwd.c | 7 ++++++-
|
||||||
|
1 file changed, 6 insertions(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/auth-passwd.c b/auth-passwd.c
|
||||||
|
index 530b5d4..996c2cf 100644
|
||||||
|
--- a/auth-passwd.c
|
||||||
|
+++ b/auth-passwd.c
|
||||||
|
@@ -1,4 +1,4 @@
|
||||||
|
-/* $OpenBSD: auth-passwd.c,v 1.44 2014/07/15 15:54:14 millert Exp $ */
|
||||||
|
+/* $OpenBSD: auth-passwd.c,v 1.45 2016/07/21 01:39:35 dtucker Exp $ */
|
||||||
|
/*
|
||||||
|
* Author: Tatu Ylonen <ylo@cs.hut.fi>
|
||||||
|
* Copyright (c) 1995 Tatu Ylonen <ylo@cs.hut.fi>, Espoo, Finland
|
||||||
|
@@ -66,6 +66,8 @@ extern login_cap_t *lc;
|
||||||
|
#define DAY (24L * 60 * 60) /* 1 day in seconds */
|
||||||
|
#define TWO_WEEKS (2L * 7 * DAY) /* 2 weeks in seconds */
|
||||||
|
|
||||||
|
+#define MAX_PASSWORD_LEN 1024
|
||||||
|
+
|
||||||
|
void
|
||||||
|
disable_forwarding(void)
|
||||||
|
{
|
||||||
|
@@ -87,6 +89,9 @@ auth_password(Authctxt *authctxt, const char *password)
|
||||||
|
static int expire_checked = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
+ if (strlen(password) > MAX_PASSWORD_LEN)
|
||||||
|
+ return 0;
|
||||||
|
+
|
||||||
|
#ifndef HAVE_CYGWIN
|
||||||
|
if (pw->pw_uid == 0 && options.permit_root_login != PERMIT_YES)
|
||||||
|
ok = 0;
|
||||||
|
--
|
||||||
|
2.7.4
|
||||||
|
|
||||||
@@ -25,6 +25,11 @@ SRC_URI = "http://ftp.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-${PV}.tar
|
|||||||
file://CVE-2016-1907_2.patch \
|
file://CVE-2016-1907_2.patch \
|
||||||
file://CVE-2016-1907_3.patch \
|
file://CVE-2016-1907_3.patch \
|
||||||
file://CVE-2016-3115.patch \
|
file://CVE-2016-3115.patch \
|
||||||
|
file://CVE-2016-6210.patch \
|
||||||
|
file://CVE-2016-6210_p2.patch \
|
||||||
|
file://CVE-2016-6210_p3.patch \
|
||||||
|
file://CVE-2016-6515.patch \
|
||||||
|
file://CVE-2015-8325.patch \
|
||||||
"
|
"
|
||||||
|
|
||||||
PAM_SRC_URI = "file://sshd"
|
PAM_SRC_URI = "file://sshd"
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ SECTION = "libs/network"
|
|||||||
LICENSE = "openssl"
|
LICENSE = "openssl"
|
||||||
LIC_FILES_CHKSUM = "file://LICENSE;md5=f9a8f968107345e0b75aa8c2ecaa7ec8"
|
LIC_FILES_CHKSUM = "file://LICENSE;md5=f9a8f968107345e0b75aa8c2ecaa7ec8"
|
||||||
|
|
||||||
DEPENDS = "hostperl-runtime-native"
|
DEPENDS = "makedepend-native hostperl-runtime-native"
|
||||||
DEPENDS_append_class-target = " openssl-native"
|
DEPENDS_append_class-target = " openssl-native"
|
||||||
|
|
||||||
SRC_URI = "http://www.openssl.org/source/openssl-${PV}.tar.gz \
|
SRC_URI = "http://www.openssl.org/source/openssl-${PV}.tar.gz \
|
||||||
@@ -36,15 +36,15 @@ PACKAGES =+ "libcrypto libssl ${PN}-misc openssl-conf"
|
|||||||
FILES_libcrypto = "${libdir}/libcrypto${SOLIBS}"
|
FILES_libcrypto = "${libdir}/libcrypto${SOLIBS}"
|
||||||
FILES_libssl = "${libdir}/libssl${SOLIBS}"
|
FILES_libssl = "${libdir}/libssl${SOLIBS}"
|
||||||
FILES_${PN} =+ " ${libdir}/ssl/*"
|
FILES_${PN} =+ " ${libdir}/ssl/*"
|
||||||
FILES_${PN}-misc = "${libdir}/ssl/misc ${bindir}/c_rehash"
|
FILES_${PN}-misc = "${libdir}/ssl/misc"
|
||||||
RDEPENDS_${PN}-misc = "${@bb.utils.contains('PACKAGECONFIG', 'perl', 'perl', '', d)}"
|
RDEPENDS_${PN}-misc = "${@bb.utils.contains('PACKAGECONFIG', 'perl', 'perl', '', d)}"
|
||||||
|
|
||||||
# Add the openssl.cnf file to the openssl-conf package. Make the libcrypto
|
# Add the openssl.cnf file to the openssl-conf package. Make the libcrypto
|
||||||
# package RRECOMMENDS on this package. This will enable the configuration
|
# package RRECOMMENDS on this package. This will enable the configuration
|
||||||
# file to be installed for both the base openssl package and the libcrypto
|
# file to be installed for both the base openssl package and the libcrypto
|
||||||
# package since the base openssl package depends on the libcrypto package.
|
# package since the base openssl package depends on the libcrypto package.
|
||||||
FILES_openssl-conf = "${libdir}/ssl/openssl.cnf"
|
FILES_openssl-conf = "${sysconfdir}/ssl/openssl.cnf"
|
||||||
CONFFILES_openssl-conf = "${libdir}/ssl/openssl.cnf"
|
CONFFILES_openssl-conf = "${sysconfdir}/ssl/openssl.cnf"
|
||||||
RRECOMMENDS_libcrypto += "openssl-conf"
|
RRECOMMENDS_libcrypto += "openssl-conf"
|
||||||
RDEPENDS_${PN}-ptest += "${PN}-misc make perl perl-module-filehandle bc"
|
RDEPENDS_${PN}-ptest += "${PN}-misc make perl perl-module-filehandle bc"
|
||||||
|
|
||||||
@@ -114,7 +114,10 @@ do_configure () {
|
|||||||
target=debian-mipsel
|
target=debian-mipsel
|
||||||
;;
|
;;
|
||||||
linux-*-mips64 | linux-mips64)
|
linux-*-mips64 | linux-mips64)
|
||||||
target=linux-mips
|
target=debian-mips64
|
||||||
|
;;
|
||||||
|
linux-*-mips64el | linux-mips64el)
|
||||||
|
target=debian-mips64el
|
||||||
;;
|
;;
|
||||||
linux-microblaze*|linux-nios2*)
|
linux-microblaze*|linux-nios2*)
|
||||||
target=linux-generic32
|
target=linux-generic32
|
||||||
@@ -149,10 +152,14 @@ do_compile_prepend_class-target () {
|
|||||||
}
|
}
|
||||||
|
|
||||||
do_compile () {
|
do_compile () {
|
||||||
|
oe_runmake depend
|
||||||
oe_runmake
|
oe_runmake
|
||||||
}
|
}
|
||||||
|
|
||||||
do_compile_ptest () {
|
do_compile_ptest () {
|
||||||
|
# build dependencies for test directory too
|
||||||
|
export DIRS="$DIRS test"
|
||||||
|
oe_runmake depend
|
||||||
oe_runmake buildtest
|
oe_runmake buildtest
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -168,17 +175,27 @@ do_install () {
|
|||||||
install -d ${D}${includedir}
|
install -d ${D}${includedir}
|
||||||
cp --dereference -R include/openssl ${D}${includedir}
|
cp --dereference -R include/openssl ${D}${includedir}
|
||||||
|
|
||||||
|
install -Dm 0755 ${WORKDIR}/openssl-c_rehash.sh ${D}${bindir}/c_rehash
|
||||||
|
sed -i -e 's,/etc/openssl,${sysconfdir}/ssl,g' ${D}${bindir}/c_rehash
|
||||||
|
|
||||||
oe_multilib_header openssl/opensslconf.h
|
oe_multilib_header openssl/opensslconf.h
|
||||||
if [ "${@bb.utils.contains('PACKAGECONFIG', 'perl', 'perl', '', d)}" = "perl" ]; then
|
if [ "${@bb.utils.contains('PACKAGECONFIG', 'perl', 'perl', '', d)}" = "perl" ]; then
|
||||||
install -m 0755 ${S}/tools/c_rehash ${D}${bindir}
|
|
||||||
sed -i -e '1s,.*,#!${bindir}/env perl,' ${D}${bindir}/c_rehash
|
|
||||||
sed -i -e '1s,.*,#!${bindir}/env perl,' ${D}${libdir}/ssl/misc/CA.pl
|
sed -i -e '1s,.*,#!${bindir}/env perl,' ${D}${libdir}/ssl/misc/CA.pl
|
||||||
sed -i -e '1s,.*,#!${bindir}/env perl,' ${D}${libdir}/ssl/misc/tsget
|
sed -i -e '1s,.*,#!${bindir}/env perl,' ${D}${libdir}/ssl/misc/tsget
|
||||||
# The c_rehash utility isn't installed by the normal installation process.
|
|
||||||
else
|
else
|
||||||
rm -f ${D}${bindir}/c_rehash
|
|
||||||
rm -f ${D}${libdir}/ssl/misc/CA.pl ${D}${libdir}/ssl/misc/tsget
|
rm -f ${D}${libdir}/ssl/misc/CA.pl ${D}${libdir}/ssl/misc/tsget
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Create SSL structure
|
||||||
|
install -d ${D}${sysconfdir}/ssl/
|
||||||
|
mv ${D}${libdir}/ssl/openssl.cnf \
|
||||||
|
${D}${libdir}/ssl/certs \
|
||||||
|
${D}${libdir}/ssl/private \
|
||||||
|
\
|
||||||
|
${D}${sysconfdir}/ssl/
|
||||||
|
ln -sf ${sysconfdir}/ssl/certs ${D}${libdir}/ssl/certs
|
||||||
|
ln -sf ${sysconfdir}/ssl/private ${D}${libdir}/ssl/private
|
||||||
|
ln -sf ${sysconfdir}/ssl/openssl.cnf ${D}${libdir}/ssl/openssl.cnf
|
||||||
}
|
}
|
||||||
|
|
||||||
do_install_ptest () {
|
do_install_ptest () {
|
||||||
@@ -192,12 +209,25 @@ do_install_ptest () {
|
|||||||
cp -r certs ${D}${PTEST_PATH}
|
cp -r certs ${D}${PTEST_PATH}
|
||||||
mkdir -p ${D}${PTEST_PATH}/apps
|
mkdir -p ${D}${PTEST_PATH}/apps
|
||||||
ln -sf ${libdir}/ssl/misc/CA.sh ${D}${PTEST_PATH}/apps
|
ln -sf ${libdir}/ssl/misc/CA.sh ${D}${PTEST_PATH}/apps
|
||||||
ln -sf ${libdir}/ssl/openssl.cnf ${D}${PTEST_PATH}/apps
|
ln -sf ${sysconfdir}/ssl/openssl.cnf ${D}${PTEST_PATH}/apps
|
||||||
ln -sf ${bindir}/openssl ${D}${PTEST_PATH}/apps
|
ln -sf ${bindir}/openssl ${D}${PTEST_PATH}/apps
|
||||||
cp apps/server2.pem ${D}${PTEST_PATH}/apps
|
cp apps/server2.pem ${D}${PTEST_PATH}/apps
|
||||||
mkdir -p ${D}${PTEST_PATH}/util
|
mkdir -p ${D}${PTEST_PATH}/util
|
||||||
install util/opensslwrap.sh ${D}${PTEST_PATH}/util
|
install util/opensslwrap.sh ${D}${PTEST_PATH}/util
|
||||||
install util/shlib_wrap.sh ${D}${PTEST_PATH}/util
|
install util/shlib_wrap.sh ${D}${PTEST_PATH}/util
|
||||||
|
# Time stamps are relevant for "make alltests", otherwise
|
||||||
|
# make may try to recompile binaries. Not only must the
|
||||||
|
# binary files be newer than the sources, they also must
|
||||||
|
# be more recent than the header files in /usr/include.
|
||||||
|
#
|
||||||
|
# Using "cp -a" is not sufficient, because do_install
|
||||||
|
# does not preserve the original time stamps.
|
||||||
|
#
|
||||||
|
# So instead of using the original file stamps, we set
|
||||||
|
# the current time for all files. Binaries will get
|
||||||
|
# modified again later when stripping them, but that's okay.
|
||||||
|
touch ${D}${PTEST_PATH}
|
||||||
|
find ${D}${PTEST_PATH} -type f -print0 | xargs --verbose -0 touch -r ${D}${PTEST_PATH}
|
||||||
}
|
}
|
||||||
|
|
||||||
do_install_append_class-native() {
|
do_install_append_class-native() {
|
||||||
|
|||||||
286
meta/recipes-connectivity/openssl/openssl/CVE-2016-2177.patch
Normal file
286
meta/recipes-connectivity/openssl/openssl/CVE-2016-2177.patch
Normal file
@@ -0,0 +1,286 @@
|
|||||||
|
From a004e72b95835136d3f1ea90517f706c24c03da7 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Matt Caswell <matt@openssl.org>
|
||||||
|
Date: Thu, 5 May 2016 11:10:26 +0100
|
||||||
|
Subject: [PATCH] Avoid some undefined pointer arithmetic
|
||||||
|
|
||||||
|
A common idiom in the codebase is:
|
||||||
|
|
||||||
|
if (p + len > limit)
|
||||||
|
{
|
||||||
|
return; /* Too long */
|
||||||
|
}
|
||||||
|
|
||||||
|
Where "p" points to some malloc'd data of SIZE bytes and
|
||||||
|
limit == p + SIZE
|
||||||
|
|
||||||
|
"len" here could be from some externally supplied data (e.g. from a TLS
|
||||||
|
message).
|
||||||
|
|
||||||
|
The rules of C pointer arithmetic are such that "p + len" is only well
|
||||||
|
defined where len <= SIZE. Therefore the above idiom is actually
|
||||||
|
undefined behaviour.
|
||||||
|
|
||||||
|
For example this could cause problems if some malloc implementation
|
||||||
|
provides an address for "p" such that "p + len" actually overflows for
|
||||||
|
values of len that are too big and therefore p + len < limit!
|
||||||
|
|
||||||
|
Issue reported by Guido Vranken.
|
||||||
|
|
||||||
|
CVE-2016-2177
|
||||||
|
|
||||||
|
Reviewed-by: Rich Salz <rsalz@openssl.org>
|
||||||
|
|
||||||
|
Upstream-Status: Backport
|
||||||
|
CVE: CVE-2016-2177
|
||||||
|
|
||||||
|
Signed-off-by: Armin Kuster <akuster@mvista.com>
|
||||||
|
|
||||||
|
|
||||||
|
---
|
||||||
|
ssl/s3_srvr.c | 14 +++++++-------
|
||||||
|
ssl/ssl_sess.c | 2 +-
|
||||||
|
ssl/t1_lib.c | 56 ++++++++++++++++++++++++++++++--------------------------
|
||||||
|
3 files changed, 38 insertions(+), 34 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/ssl/s3_srvr.c b/ssl/s3_srvr.c
|
||||||
|
index ab28702..ab7f690 100644
|
||||||
|
--- a/ssl/s3_srvr.c
|
||||||
|
+++ b/ssl/s3_srvr.c
|
||||||
|
@@ -980,7 +980,7 @@ int ssl3_get_client_hello(SSL *s)
|
||||||
|
|
||||||
|
session_length = *(p + SSL3_RANDOM_SIZE);
|
||||||
|
|
||||||
|
- if (p + SSL3_RANDOM_SIZE + session_length + 1 >= d + n) {
|
||||||
|
+ if (SSL3_RANDOM_SIZE + session_length + 1 >= (d + n) - p) {
|
||||||
|
al = SSL_AD_DECODE_ERROR;
|
||||||
|
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_LENGTH_TOO_SHORT);
|
||||||
|
goto f_err;
|
||||||
|
@@ -998,7 +998,7 @@ int ssl3_get_client_hello(SSL *s)
|
||||||
|
/* get the session-id */
|
||||||
|
j = *(p++);
|
||||||
|
|
||||||
|
- if (p + j > d + n) {
|
||||||
|
+ if ((d + n) - p < j) {
|
||||||
|
al = SSL_AD_DECODE_ERROR;
|
||||||
|
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_LENGTH_TOO_SHORT);
|
||||||
|
goto f_err;
|
||||||
|
@@ -1054,14 +1054,14 @@ int ssl3_get_client_hello(SSL *s)
|
||||||
|
|
||||||
|
if (SSL_IS_DTLS(s)) {
|
||||||
|
/* cookie stuff */
|
||||||
|
- if (p + 1 > d + n) {
|
||||||
|
+ if ((d + n) - p < 1) {
|
||||||
|
al = SSL_AD_DECODE_ERROR;
|
||||||
|
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_LENGTH_TOO_SHORT);
|
||||||
|
goto f_err;
|
||||||
|
}
|
||||||
|
cookie_len = *(p++);
|
||||||
|
|
||||||
|
- if (p + cookie_len > d + n) {
|
||||||
|
+ if ((d + n ) - p < cookie_len) {
|
||||||
|
al = SSL_AD_DECODE_ERROR;
|
||||||
|
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_LENGTH_TOO_SHORT);
|
||||||
|
goto f_err;
|
||||||
|
@@ -1131,7 +1131,7 @@ int ssl3_get_client_hello(SSL *s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- if (p + 2 > d + n) {
|
||||||
|
+ if ((d + n ) - p < 2) {
|
||||||
|
al = SSL_AD_DECODE_ERROR;
|
||||||
|
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_LENGTH_TOO_SHORT);
|
||||||
|
goto f_err;
|
||||||
|
@@ -1145,7 +1145,7 @@ int ssl3_get_client_hello(SSL *s)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* i bytes of cipher data + 1 byte for compression length later */
|
||||||
|
- if ((p + i + 1) > (d + n)) {
|
||||||
|
+ if ((d + n) - p < i + 1) {
|
||||||
|
/* not enough data */
|
||||||
|
al = SSL_AD_DECODE_ERROR;
|
||||||
|
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_LENGTH_MISMATCH);
|
||||||
|
@@ -1211,7 +1211,7 @@ int ssl3_get_client_hello(SSL *s)
|
||||||
|
|
||||||
|
/* compression */
|
||||||
|
i = *(p++);
|
||||||
|
- if ((p + i) > (d + n)) {
|
||||||
|
+ if ((d + n) - p < i) {
|
||||||
|
/* not enough data */
|
||||||
|
al = SSL_AD_DECODE_ERROR;
|
||||||
|
SSLerr(SSL_F_SSL3_GET_CLIENT_HELLO, SSL_R_LENGTH_MISMATCH);
|
||||||
|
diff --git a/ssl/ssl_sess.c b/ssl/ssl_sess.c
|
||||||
|
index b182998..54ee783 100644
|
||||||
|
--- a/ssl/ssl_sess.c
|
||||||
|
+++ b/ssl/ssl_sess.c
|
||||||
|
@@ -573,7 +573,7 @@ int ssl_get_prev_session(SSL *s, unsigned char *session_id, int len,
|
||||||
|
int r;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
- if (session_id + len > limit) {
|
||||||
|
+ if (limit - session_id < len) {
|
||||||
|
fatal = 1;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
diff --git a/ssl/t1_lib.c b/ssl/t1_lib.c
|
||||||
|
index fb64607..cdac011 100644
|
||||||
|
--- a/ssl/t1_lib.c
|
||||||
|
+++ b/ssl/t1_lib.c
|
||||||
|
@@ -1867,11 +1867,11 @@ static void ssl_check_for_safari(SSL *s, const unsigned char *data,
|
||||||
|
0x02, 0x03, /* SHA-1/ECDSA */
|
||||||
|
};
|
||||||
|
|
||||||
|
- if (data >= (limit - 2))
|
||||||
|
+ if (limit - data <= 2)
|
||||||
|
return;
|
||||||
|
data += 2;
|
||||||
|
|
||||||
|
- if (data > (limit - 4))
|
||||||
|
+ if (limit - data < 4)
|
||||||
|
return;
|
||||||
|
n2s(data, type);
|
||||||
|
n2s(data, size);
|
||||||
|
@@ -1879,7 +1879,7 @@ static void ssl_check_for_safari(SSL *s, const unsigned char *data,
|
||||||
|
if (type != TLSEXT_TYPE_server_name)
|
||||||
|
return;
|
||||||
|
|
||||||
|
- if (data + size > limit)
|
||||||
|
+ if (limit - data < size)
|
||||||
|
return;
|
||||||
|
data += size;
|
||||||
|
|
||||||
|
@@ -1887,7 +1887,7 @@ static void ssl_check_for_safari(SSL *s, const unsigned char *data,
|
||||||
|
const size_t len1 = sizeof(kSafariExtensionsBlock);
|
||||||
|
const size_t len2 = sizeof(kSafariTLS12ExtensionsBlock);
|
||||||
|
|
||||||
|
- if (data + len1 + len2 != limit)
|
||||||
|
+ if (limit - data != (int)(len1 + len2))
|
||||||
|
return;
|
||||||
|
if (memcmp(data, kSafariExtensionsBlock, len1) != 0)
|
||||||
|
return;
|
||||||
|
@@ -1896,7 +1896,7 @@ static void ssl_check_for_safari(SSL *s, const unsigned char *data,
|
||||||
|
} else {
|
||||||
|
const size_t len = sizeof(kSafariExtensionsBlock);
|
||||||
|
|
||||||
|
- if (data + len != limit)
|
||||||
|
+ if (limit - data != (int)(len))
|
||||||
|
return;
|
||||||
|
if (memcmp(data, kSafariExtensionsBlock, len) != 0)
|
||||||
|
return;
|
||||||
|
@@ -2053,19 +2053,19 @@ static int ssl_scan_clienthello_tlsext(SSL *s, unsigned char **p,
|
||||||
|
if (data == limit)
|
||||||
|
goto ri_check;
|
||||||
|
|
||||||
|
- if (data > (limit - 2))
|
||||||
|
+ if (limit - data < 2)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
n2s(data, len);
|
||||||
|
|
||||||
|
- if (data + len != limit)
|
||||||
|
+ if (limit - data != len)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
- while (data <= (limit - 4)) {
|
||||||
|
+ while (limit - data >= 4) {
|
||||||
|
n2s(data, type);
|
||||||
|
n2s(data, size);
|
||||||
|
|
||||||
|
- if (data + size > (limit))
|
||||||
|
+ if (limit - data < size)
|
||||||
|
goto err;
|
||||||
|
# if 0
|
||||||
|
fprintf(stderr, "Received extension type %d size %d\n", type, size);
|
||||||
|
@@ -2472,18 +2472,18 @@ static int ssl_scan_clienthello_custom_tlsext(SSL *s,
|
||||||
|
if (s->hit || s->cert->srv_ext.meths_count == 0)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
- if (data >= limit - 2)
|
||||||
|
+ if (limit - data <= 2)
|
||||||
|
return 1;
|
||||||
|
n2s(data, len);
|
||||||
|
|
||||||
|
- if (data > limit - len)
|
||||||
|
+ if (limit - data < len)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
- while (data <= limit - 4) {
|
||||||
|
+ while (limit - data >= 4) {
|
||||||
|
n2s(data, type);
|
||||||
|
n2s(data, size);
|
||||||
|
|
||||||
|
- if (data + size > limit)
|
||||||
|
+ if (limit - data < size)
|
||||||
|
return 1;
|
||||||
|
if (custom_ext_parse(s, 1 /* server */ , type, data, size, al) <= 0)
|
||||||
|
return 0;
|
||||||
|
@@ -2569,20 +2569,20 @@ static int ssl_scan_serverhello_tlsext(SSL *s, unsigned char **p,
|
||||||
|
SSL_TLSEXT_HB_DONT_SEND_REQUESTS);
|
||||||
|
# endif
|
||||||
|
|
||||||
|
- if (data >= (d + n - 2))
|
||||||
|
+ if ((d + n) - data <= 2)
|
||||||
|
goto ri_check;
|
||||||
|
|
||||||
|
n2s(data, length);
|
||||||
|
- if (data + length != d + n) {
|
||||||
|
+ if ((d + n) - data != length) {
|
||||||
|
*al = SSL_AD_DECODE_ERROR;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
- while (data <= (d + n - 4)) {
|
||||||
|
+ while ((d + n) - data >= 4) {
|
||||||
|
n2s(data, type);
|
||||||
|
n2s(data, size);
|
||||||
|
|
||||||
|
- if (data + size > (d + n))
|
||||||
|
+ if ((d + n) - data < size)
|
||||||
|
goto ri_check;
|
||||||
|
|
||||||
|
if (s->tlsext_debug_cb)
|
||||||
|
@@ -3307,29 +3307,33 @@ int tls1_process_ticket(SSL *s, unsigned char *session_id, int len,
|
||||||
|
/* Skip past DTLS cookie */
|
||||||
|
if (SSL_IS_DTLS(s)) {
|
||||||
|
i = *(p++);
|
||||||
|
- p += i;
|
||||||
|
- if (p >= limit)
|
||||||
|
+
|
||||||
|
+ if (limit - p <= i)
|
||||||
|
return -1;
|
||||||
|
+
|
||||||
|
+ p += i;
|
||||||
|
}
|
||||||
|
/* Skip past cipher list */
|
||||||
|
n2s(p, i);
|
||||||
|
- p += i;
|
||||||
|
- if (p >= limit)
|
||||||
|
+ if (limit - p <= i)
|
||||||
|
return -1;
|
||||||
|
+ p += i;
|
||||||
|
+
|
||||||
|
/* Skip past compression algorithm list */
|
||||||
|
i = *(p++);
|
||||||
|
- p += i;
|
||||||
|
- if (p > limit)
|
||||||
|
+ if (limit - p < i)
|
||||||
|
return -1;
|
||||||
|
+ p += i;
|
||||||
|
+
|
||||||
|
/* Now at start of extensions */
|
||||||
|
- if ((p + 2) >= limit)
|
||||||
|
+ if (limit - p <= 2)
|
||||||
|
return 0;
|
||||||
|
n2s(p, i);
|
||||||
|
- while ((p + 4) <= limit) {
|
||||||
|
+ while (limit - p >= 4) {
|
||||||
|
unsigned short type, size;
|
||||||
|
n2s(p, type);
|
||||||
|
n2s(p, size);
|
||||||
|
- if (p + size > limit)
|
||||||
|
+ if (limit - p < size)
|
||||||
|
return 0;
|
||||||
|
if (type == TLSEXT_TYPE_session_ticket) {
|
||||||
|
int r;
|
||||||
|
--
|
||||||
|
2.3.5
|
||||||
|
|
||||||
@@ -0,0 +1,54 @@
|
|||||||
|
From 621eaf49a289bfac26d4cbcdb7396e796784c534 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Cesar Pereida <cesar.pereida@aalto.fi>
|
||||||
|
Date: Mon, 23 May 2016 12:45:25 +0300
|
||||||
|
Subject: [PATCH] Fix DSA, preserve BN_FLG_CONSTTIME
|
||||||
|
|
||||||
|
Operations in the DSA signing algorithm should run in constant time in
|
||||||
|
order to avoid side channel attacks. A flaw in the OpenSSL DSA
|
||||||
|
implementation means that a non-constant time codepath is followed for
|
||||||
|
certain operations. This has been demonstrated through a cache-timing
|
||||||
|
attack to be sufficient for an attacker to recover the private DSA key.
|
||||||
|
|
||||||
|
CVE-2016-2178
|
||||||
|
|
||||||
|
Reviewed-by: Richard Levitte <levitte@openssl.org>
|
||||||
|
Reviewed-by: Matt Caswell <matt@openssl.org>
|
||||||
|
|
||||||
|
Upstream-Status: Backport
|
||||||
|
CVE: CVE-2016-2178
|
||||||
|
Signed-off-by: Armin Kuster <akuster@mvista.com>
|
||||||
|
|
||||||
|
---
|
||||||
|
crypto/dsa/dsa_ossl.c | 6 +++---
|
||||||
|
1 file changed, 3 insertions(+), 3 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/crypto/dsa/dsa_ossl.c b/crypto/dsa/dsa_ossl.c
|
||||||
|
index efc4f1b..b29eb4b 100644
|
||||||
|
--- a/crypto/dsa/dsa_ossl.c
|
||||||
|
+++ b/crypto/dsa/dsa_ossl.c
|
||||||
|
@@ -248,9 +248,6 @@ static int dsa_sign_setup(DSA *dsa, BN_CTX *ctx_in, BIGNUM **kinvp,
|
||||||
|
if (!BN_rand_range(&k, dsa->q))
|
||||||
|
goto err;
|
||||||
|
while (BN_is_zero(&k)) ;
|
||||||
|
- if ((dsa->flags & DSA_FLAG_NO_EXP_CONSTTIME) == 0) {
|
||||||
|
- BN_set_flags(&k, BN_FLG_CONSTTIME);
|
||||||
|
- }
|
||||||
|
|
||||||
|
if (dsa->flags & DSA_FLAG_CACHE_MONT_P) {
|
||||||
|
if (!BN_MONT_CTX_set_locked(&dsa->method_mont_p,
|
||||||
|
@@ -279,9 +276,12 @@ static int dsa_sign_setup(DSA *dsa, BN_CTX *ctx_in, BIGNUM **kinvp,
|
||||||
|
}
|
||||||
|
|
||||||
|
K = &kq;
|
||||||
|
+
|
||||||
|
+ BN_set_flags(K, BN_FLG_CONSTTIME);
|
||||||
|
} else {
|
||||||
|
K = &k;
|
||||||
|
}
|
||||||
|
+
|
||||||
|
DSA_BN_MOD_EXP(goto err, dsa, r, dsa->g, K, dsa->p, ctx,
|
||||||
|
dsa->method_mont_p);
|
||||||
|
if (!BN_mod(r, r, dsa->q, ctx))
|
||||||
|
--
|
||||||
|
2.7.4
|
||||||
|
|
||||||
255
meta/recipes-connectivity/openssl/openssl/CVE-2016-2179.patch
Normal file
255
meta/recipes-connectivity/openssl/openssl/CVE-2016-2179.patch
Normal file
@@ -0,0 +1,255 @@
|
|||||||
|
From 26f2c5774f117aea588e8f31fad38bcf14e83bec Mon Sep 17 00:00:00 2001
|
||||||
|
From: Matt Caswell <matt@openssl.org>
|
||||||
|
Date: Thu, 30 Jun 2016 13:17:08 +0100
|
||||||
|
Subject: [PATCH] Fix DTLS buffered message DoS attack
|
||||||
|
|
||||||
|
DTLS can handle out of order record delivery. Additionally since
|
||||||
|
handshake messages can be bigger than will fit into a single packet, the
|
||||||
|
messages can be fragmented across multiple records (as with normal TLS).
|
||||||
|
That means that the messages can arrive mixed up, and we have to
|
||||||
|
reassemble them. We keep a queue of buffered messages that are "from the
|
||||||
|
future", i.e. messages we're not ready to deal with yet but have arrived
|
||||||
|
early. The messages held there may not be full yet - they could be one
|
||||||
|
or more fragments that are still in the process of being reassembled.
|
||||||
|
|
||||||
|
The code assumes that we will eventually complete the reassembly and
|
||||||
|
when that occurs the complete message is removed from the queue at the
|
||||||
|
point that we need to use it.
|
||||||
|
|
||||||
|
However, DTLS is also tolerant of packet loss. To get around that DTLS
|
||||||
|
messages can be retransmitted. If we receive a full (non-fragmented)
|
||||||
|
message from the peer after previously having received a fragment of
|
||||||
|
that message, then we ignore the message in the queue and just use the
|
||||||
|
non-fragmented version. At that point the queued message will never get
|
||||||
|
removed.
|
||||||
|
|
||||||
|
Additionally the peer could send "future" messages that we never get to
|
||||||
|
in order to complete the handshake. Each message has a sequence number
|
||||||
|
(starting from 0). We will accept a message fragment for the current
|
||||||
|
message sequence number, or for any sequence up to 10 into the future.
|
||||||
|
However if the Finished message has a sequence number of 2, anything
|
||||||
|
greater than that in the queue is just left there.
|
||||||
|
|
||||||
|
So, in those two ways we can end up with "orphaned" data in the queue
|
||||||
|
that will never get removed - except when the connection is closed. At
|
||||||
|
that point all the queues are flushed.
|
||||||
|
|
||||||
|
An attacker could seek to exploit this by filling up the queues with
|
||||||
|
lots of large messages that are never going to be used in order to
|
||||||
|
attempt a DoS by memory exhaustion.
|
||||||
|
|
||||||
|
I will assume that we are only concerned with servers here. It does not
|
||||||
|
seem reasonable to be concerned about a memory exhaustion attack on a
|
||||||
|
client. They are unlikely to process enough connections for this to be
|
||||||
|
an issue.
|
||||||
|
|
||||||
|
A "long" handshake with many messages might be 5 messages long (in the
|
||||||
|
incoming direction), e.g. ClientHello, Certificate, ClientKeyExchange,
|
||||||
|
CertificateVerify, Finished. So this would be message sequence numbers 0
|
||||||
|
to 4. Additionally we can buffer up to 10 messages in the future.
|
||||||
|
Therefore the maximum number of messages that an attacker could send
|
||||||
|
that could get orphaned would typically be 15.
|
||||||
|
|
||||||
|
The maximum size that a DTLS message is allowed to be is defined by
|
||||||
|
max_cert_list, which by default is 100k. Therefore the maximum amount of
|
||||||
|
"orphaned" memory per connection is 1500k.
|
||||||
|
|
||||||
|
Message sequence numbers get reset after the Finished message, so
|
||||||
|
renegotiation will not extend the maximum number of messages that can be
|
||||||
|
orphaned per connection.
|
||||||
|
|
||||||
|
As noted above, the queues do get cleared when the connection is closed.
|
||||||
|
Therefore in order to mount an effective attack, an attacker would have
|
||||||
|
to open many simultaneous connections.
|
||||||
|
|
||||||
|
Issue reported by Quan Luo.
|
||||||
|
|
||||||
|
CVE-2016-2179
|
||||||
|
|
||||||
|
Reviewed-by: Richard Levitte <levitte@openssl.org>
|
||||||
|
|
||||||
|
Upstream-Status: Backport
|
||||||
|
CVE: CVE-2016-2179
|
||||||
|
Signed-off-by: Armin Kuster <akuster@mvista.com>
|
||||||
|
|
||||||
|
---
|
||||||
|
ssl/d1_both.c | 32 ++++++++++++++++----------------
|
||||||
|
ssl/d1_clnt.c | 1 +
|
||||||
|
ssl/d1_lib.c | 37 ++++++++++++++++++++++++++-----------
|
||||||
|
ssl/d1_srvr.c | 3 ++-
|
||||||
|
ssl/ssl_locl.h | 3 ++-
|
||||||
|
5 files changed, 47 insertions(+), 29 deletions(-)
|
||||||
|
|
||||||
|
Index: openssl-1.0.2h/ssl/d1_both.c
|
||||||
|
===================================================================
|
||||||
|
--- openssl-1.0.2h.orig/ssl/d1_both.c
|
||||||
|
+++ openssl-1.0.2h/ssl/d1_both.c
|
||||||
|
@@ -618,11 +618,23 @@ static int dtls1_retrieve_buffered_fragm
|
||||||
|
int al;
|
||||||
|
|
||||||
|
*ok = 0;
|
||||||
|
- item = pqueue_peek(s->d1->buffered_messages);
|
||||||
|
- if (item == NULL)
|
||||||
|
- return 0;
|
||||||
|
+ do {
|
||||||
|
+ item = pqueue_peek(s->d1->buffered_messages);
|
||||||
|
+ if (item == NULL)
|
||||||
|
+ return 0;
|
||||||
|
+
|
||||||
|
+ frag = (hm_fragment *)item->data;
|
||||||
|
+
|
||||||
|
+ if (frag->msg_header.seq < s->d1->handshake_read_seq) {
|
||||||
|
+ /* This is a stale message that has been buffered so clear it */
|
||||||
|
+ pqueue_pop(s->d1->buffered_messages);
|
||||||
|
+ dtls1_hm_fragment_free(frag);
|
||||||
|
+ pitem_free(item);
|
||||||
|
+ item = NULL;
|
||||||
|
+ frag = NULL;
|
||||||
|
+ }
|
||||||
|
+ } while (item == NULL);
|
||||||
|
|
||||||
|
- frag = (hm_fragment *)item->data;
|
||||||
|
|
||||||
|
/* Don't return if reassembly still in progress */
|
||||||
|
if (frag->reassembly != NULL)
|
||||||
|
@@ -1296,18 +1308,6 @@ dtls1_retransmit_message(SSL *s, unsigne
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
-/* call this function when the buffered messages are no longer needed */
|
||||||
|
-void dtls1_clear_record_buffer(SSL *s)
|
||||||
|
-{
|
||||||
|
- pitem *item;
|
||||||
|
-
|
||||||
|
- for (item = pqueue_pop(s->d1->sent_messages);
|
||||||
|
- item != NULL; item = pqueue_pop(s->d1->sent_messages)) {
|
||||||
|
- dtls1_hm_fragment_free((hm_fragment *)item->data);
|
||||||
|
- pitem_free(item);
|
||||||
|
- }
|
||||||
|
-}
|
||||||
|
-
|
||||||
|
unsigned char *dtls1_set_message_header(SSL *s, unsigned char *p,
|
||||||
|
unsigned char mt, unsigned long len,
|
||||||
|
unsigned long frag_off,
|
||||||
|
Index: openssl-1.0.2h/ssl/d1_clnt.c
|
||||||
|
===================================================================
|
||||||
|
--- openssl-1.0.2h.orig/ssl/d1_clnt.c
|
||||||
|
+++ openssl-1.0.2h/ssl/d1_clnt.c
|
||||||
|
@@ -769,6 +769,7 @@ int dtls1_connect(SSL *s)
|
||||||
|
/* done with handshaking */
|
||||||
|
s->d1->handshake_read_seq = 0;
|
||||||
|
s->d1->next_handshake_write_seq = 0;
|
||||||
|
+ dtls1_clear_received_buffer(s);
|
||||||
|
goto end;
|
||||||
|
/* break; */
|
||||||
|
|
||||||
|
Index: openssl-1.0.2h/ssl/d1_lib.c
|
||||||
|
===================================================================
|
||||||
|
--- openssl-1.0.2h.orig/ssl/d1_lib.c
|
||||||
|
+++ openssl-1.0.2h/ssl/d1_lib.c
|
||||||
|
@@ -170,7 +170,6 @@ int dtls1_new(SSL *s)
|
||||||
|
static void dtls1_clear_queues(SSL *s)
|
||||||
|
{
|
||||||
|
pitem *item = NULL;
|
||||||
|
- hm_fragment *frag = NULL;
|
||||||
|
DTLS1_RECORD_DATA *rdata;
|
||||||
|
|
||||||
|
while ((item = pqueue_pop(s->d1->unprocessed_rcds.q)) != NULL) {
|
||||||
|
@@ -191,28 +190,44 @@ static void dtls1_clear_queues(SSL *s)
|
||||||
|
pitem_free(item);
|
||||||
|
}
|
||||||
|
|
||||||
|
+ while ((item = pqueue_pop(s->d1->buffered_app_data.q)) != NULL) {
|
||||||
|
+ rdata = (DTLS1_RECORD_DATA *)item->data;
|
||||||
|
+ if (rdata->rbuf.buf) {
|
||||||
|
+ OPENSSL_free(rdata->rbuf.buf);
|
||||||
|
+ }
|
||||||
|
+ OPENSSL_free(item->data);
|
||||||
|
+ pitem_free(item);
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ dtls1_clear_received_buffer(s);
|
||||||
|
+ dtls1_clear_sent_buffer(s);
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+void dtls1_clear_received_buffer(SSL *s)
|
||||||
|
+{
|
||||||
|
+ pitem *item = NULL;
|
||||||
|
+ hm_fragment *frag = NULL;
|
||||||
|
+
|
||||||
|
while ((item = pqueue_pop(s->d1->buffered_messages)) != NULL) {
|
||||||
|
frag = (hm_fragment *)item->data;
|
||||||
|
dtls1_hm_fragment_free(frag);
|
||||||
|
pitem_free(item);
|
||||||
|
}
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+void dtls1_clear_sent_buffer(SSL *s)
|
||||||
|
+{
|
||||||
|
+ pitem *item = NULL;
|
||||||
|
+ hm_fragment *frag = NULL;
|
||||||
|
|
||||||
|
while ((item = pqueue_pop(s->d1->sent_messages)) != NULL) {
|
||||||
|
frag = (hm_fragment *)item->data;
|
||||||
|
dtls1_hm_fragment_free(frag);
|
||||||
|
pitem_free(item);
|
||||||
|
}
|
||||||
|
-
|
||||||
|
- while ((item = pqueue_pop(s->d1->buffered_app_data.q)) != NULL) {
|
||||||
|
- rdata = (DTLS1_RECORD_DATA *)item->data;
|
||||||
|
- if (rdata->rbuf.buf) {
|
||||||
|
- OPENSSL_free(rdata->rbuf.buf);
|
||||||
|
- }
|
||||||
|
- OPENSSL_free(item->data);
|
||||||
|
- pitem_free(item);
|
||||||
|
- }
|
||||||
|
}
|
||||||
|
|
||||||
|
+
|
||||||
|
void dtls1_free(SSL *s)
|
||||||
|
{
|
||||||
|
ssl3_free(s);
|
||||||
|
@@ -456,7 +471,7 @@ void dtls1_stop_timer(SSL *s)
|
||||||
|
BIO_ctrl(SSL_get_rbio(s), BIO_CTRL_DGRAM_SET_NEXT_TIMEOUT, 0,
|
||||||
|
&(s->d1->next_timeout));
|
||||||
|
/* Clear retransmission buffer */
|
||||||
|
- dtls1_clear_record_buffer(s);
|
||||||
|
+ dtls1_clear_sent_buffer(s);
|
||||||
|
}
|
||||||
|
|
||||||
|
int dtls1_check_timeout_num(SSL *s)
|
||||||
|
Index: openssl-1.0.2h/ssl/d1_srvr.c
|
||||||
|
===================================================================
|
||||||
|
--- openssl-1.0.2h.orig/ssl/d1_srvr.c
|
||||||
|
+++ openssl-1.0.2h/ssl/d1_srvr.c
|
||||||
|
@@ -313,7 +313,7 @@ int dtls1_accept(SSL *s)
|
||||||
|
case SSL3_ST_SW_HELLO_REQ_B:
|
||||||
|
|
||||||
|
s->shutdown = 0;
|
||||||
|
- dtls1_clear_record_buffer(s);
|
||||||
|
+ dtls1_clear_sent_buffer(s);
|
||||||
|
dtls1_start_timer(s);
|
||||||
|
ret = ssl3_send_hello_request(s);
|
||||||
|
if (ret <= 0)
|
||||||
|
@@ -894,6 +894,7 @@ int dtls1_accept(SSL *s)
|
||||||
|
/* next message is server hello */
|
||||||
|
s->d1->handshake_write_seq = 0;
|
||||||
|
s->d1->next_handshake_write_seq = 0;
|
||||||
|
+ dtls1_clear_received_buffer(s);
|
||||||
|
goto end;
|
||||||
|
/* break; */
|
||||||
|
|
||||||
|
Index: openssl-1.0.2h/ssl/ssl_locl.h
|
||||||
|
===================================================================
|
||||||
|
--- openssl-1.0.2h.orig/ssl/ssl_locl.h
|
||||||
|
+++ openssl-1.0.2h/ssl/ssl_locl.h
|
||||||
|
@@ -1242,7 +1242,8 @@ int dtls1_retransmit_message(SSL *s, uns
|
||||||
|
unsigned long frag_off, int *found);
|
||||||
|
int dtls1_get_queue_priority(unsigned short seq, int is_ccs);
|
||||||
|
int dtls1_retransmit_buffered_messages(SSL *s);
|
||||||
|
-void dtls1_clear_record_buffer(SSL *s);
|
||||||
|
+void dtls1_clear_received_buffer(SSL *s);
|
||||||
|
+void dtls1_clear_sent_buffer(SSL *s);
|
||||||
|
void dtls1_get_message_header(unsigned char *data,
|
||||||
|
struct hm_header_st *msg_hdr);
|
||||||
|
void dtls1_get_ccs_header(unsigned char *data, struct ccs_header_st *ccs_hdr);
|
||||||
@@ -0,0 +1,44 @@
|
|||||||
|
From b746aa3fe05b5b5f7126df247ac3eceeb995e2a0 Mon Sep 17 00:00:00 2001
|
||||||
|
From: "Dr. Stephen Henson" <steve@openssl.org>
|
||||||
|
Date: Thu, 21 Jul 2016 15:24:16 +0100
|
||||||
|
Subject: [PATCH] Fix OOB read in TS_OBJ_print_bio().
|
||||||
|
|
||||||
|
TS_OBJ_print_bio() misuses OBJ_txt2obj: it should print the result
|
||||||
|
as a null terminated buffer. The length value returned is the total
|
||||||
|
length the complete text reprsentation would need not the amount of
|
||||||
|
data written.
|
||||||
|
|
||||||
|
CVE-2016-2180
|
||||||
|
|
||||||
|
Thanks to Shi Lei for reporting this bug.
|
||||||
|
|
||||||
|
Reviewed-by: Matt Caswell <matt@openssl.org>
|
||||||
|
(cherry picked from commit 0ed26acce328ec16a3aa635f1ca37365e8c7403a)
|
||||||
|
|
||||||
|
Upstream-Status: Backport
|
||||||
|
CVE: CVE-2016-2180
|
||||||
|
Signed-off-by: Armin Kuster <akuster@mvista.com>
|
||||||
|
|
||||||
|
---
|
||||||
|
crypto/ts/ts_lib.c | 5 ++---
|
||||||
|
1 file changed, 2 insertions(+), 3 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/crypto/ts/ts_lib.c b/crypto/ts/ts_lib.c
|
||||||
|
index c51538a..e0f1063 100644
|
||||||
|
--- a/crypto/ts/ts_lib.c
|
||||||
|
+++ b/crypto/ts/ts_lib.c
|
||||||
|
@@ -90,9 +90,8 @@ int TS_OBJ_print_bio(BIO *bio, const ASN1_OBJECT *obj)
|
||||||
|
{
|
||||||
|
char obj_txt[128];
|
||||||
|
|
||||||
|
- int len = OBJ_obj2txt(obj_txt, sizeof(obj_txt), obj, 0);
|
||||||
|
- BIO_write(bio, obj_txt, len);
|
||||||
|
- BIO_write(bio, "\n", 1);
|
||||||
|
+ OBJ_obj2txt(obj_txt, sizeof(obj_txt), obj, 0);
|
||||||
|
+ BIO_printf(bio, "%s\n", obj_txt);
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
--
|
||||||
|
2.7.4
|
||||||
|
|
||||||
@@ -0,0 +1,91 @@
|
|||||||
|
From 20744f6b40b5ded059a848f66d6ba922f2a62eb3 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Matt Caswell <matt@openssl.org>
|
||||||
|
Date: Tue, 5 Jul 2016 11:46:26 +0100
|
||||||
|
Subject: [PATCH] Fix DTLS unprocessed records bug
|
||||||
|
|
||||||
|
During a DTLS handshake we may get records destined for the next epoch
|
||||||
|
arrive before we have processed the CCS. In that case we can't decrypt or
|
||||||
|
verify the record yet, so we buffer it for later use. When we do receive
|
||||||
|
the CCS we work through the queue of unprocessed records and process them.
|
||||||
|
|
||||||
|
Unfortunately the act of processing wipes out any existing packet data
|
||||||
|
that we were still working through. This includes any records from the new
|
||||||
|
epoch that were in the same packet as the CCS. We should only process the
|
||||||
|
buffered records if we've not got any data left.
|
||||||
|
|
||||||
|
Reviewed-by: Richard Levitte <levitte@openssl.org>
|
||||||
|
|
||||||
|
Upstream-Status: Backport
|
||||||
|
CVE: CVE-2016-2181 patch 1
|
||||||
|
Signed-off-by: Armin Kuster <akuster@mvista.com>
|
||||||
|
|
||||||
|
---
|
||||||
|
ssl/d1_pkt.c | 23 +++++++++++++++++++++--
|
||||||
|
1 file changed, 21 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/ssl/d1_pkt.c b/ssl/d1_pkt.c
|
||||||
|
index fe30ec7..1fb119d 100644
|
||||||
|
--- a/ssl/d1_pkt.c
|
||||||
|
+++ b/ssl/d1_pkt.c
|
||||||
|
@@ -319,6 +319,7 @@ static int dtls1_retrieve_buffered_record(SSL *s, record_pqueue *queue)
|
||||||
|
static int dtls1_process_buffered_records(SSL *s)
|
||||||
|
{
|
||||||
|
pitem *item;
|
||||||
|
+ SSL3_BUFFER *rb;
|
||||||
|
|
||||||
|
item = pqueue_peek(s->d1->unprocessed_rcds.q);
|
||||||
|
if (item) {
|
||||||
|
@@ -326,6 +327,19 @@ static int dtls1_process_buffered_records(SSL *s)
|
||||||
|
if (s->d1->unprocessed_rcds.epoch != s->d1->r_epoch)
|
||||||
|
return (1); /* Nothing to do. */
|
||||||
|
|
||||||
|
+ rb = &s->s3->rbuf;
|
||||||
|
+
|
||||||
|
+ if (rb->left > 0) {
|
||||||
|
+ /*
|
||||||
|
+ * We've still got data from the current packet to read. There could
|
||||||
|
+ * be a record from the new epoch in it - so don't overwrite it
|
||||||
|
+ * with the unprocessed records yet (we'll do it when we've
|
||||||
|
+ * finished reading the current packet).
|
||||||
|
+ */
|
||||||
|
+ return 1;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+
|
||||||
|
/* Process all the records. */
|
||||||
|
while (pqueue_peek(s->d1->unprocessed_rcds.q)) {
|
||||||
|
dtls1_get_unprocessed_record(s);
|
||||||
|
@@ -581,6 +595,7 @@ int dtls1_get_record(SSL *s)
|
||||||
|
|
||||||
|
rr = &(s->s3->rrec);
|
||||||
|
|
||||||
|
+ again:
|
||||||
|
/*
|
||||||
|
* The epoch may have changed. If so, process all the pending records.
|
||||||
|
* This is a non-blocking operation.
|
||||||
|
@@ -593,7 +608,6 @@ int dtls1_get_record(SSL *s)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
/* get something from the wire */
|
||||||
|
- again:
|
||||||
|
/* check if we have the header */
|
||||||
|
if ((s->rstate != SSL_ST_READ_BODY) ||
|
||||||
|
(s->packet_length < DTLS1_RT_HEADER_LENGTH)) {
|
||||||
|
@@ -1830,8 +1844,13 @@ static DTLS1_BITMAP *dtls1_get_bitmap(SSL *s, SSL3_RECORD *rr,
|
||||||
|
if (rr->epoch == s->d1->r_epoch)
|
||||||
|
return &s->d1->bitmap;
|
||||||
|
|
||||||
|
- /* Only HM and ALERT messages can be from the next epoch */
|
||||||
|
+ /*
|
||||||
|
+ * Only HM and ALERT messages can be from the next epoch and only if we
|
||||||
|
+ * have already processed all of the unprocessed records from the last
|
||||||
|
+ * epoch
|
||||||
|
+ */
|
||||||
|
else if (rr->epoch == (unsigned long)(s->d1->r_epoch + 1) &&
|
||||||
|
+ s->d1->unprocessed_rcds.epoch != s->d1->r_epoch &&
|
||||||
|
(rr->type == SSL3_RT_HANDSHAKE || rr->type == SSL3_RT_ALERT)) {
|
||||||
|
*is_next_epoch = 1;
|
||||||
|
return &s->d1->next_bitmap;
|
||||||
|
--
|
||||||
|
2.7.4
|
||||||
|
|
||||||
239
meta/recipes-connectivity/openssl/openssl/CVE-2016-2181_p2.patch
Normal file
239
meta/recipes-connectivity/openssl/openssl/CVE-2016-2181_p2.patch
Normal file
@@ -0,0 +1,239 @@
|
|||||||
|
From 3884b47b7c255c2e94d9b387ee83c7e8bb981258 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Matt Caswell <matt@openssl.org>
|
||||||
|
Date: Tue, 5 Jul 2016 12:04:37 +0100
|
||||||
|
Subject: [PATCH] Fix DTLS replay protection
|
||||||
|
|
||||||
|
The DTLS implementation provides some protection against replay attacks
|
||||||
|
in accordance with RFC6347 section 4.1.2.6.
|
||||||
|
|
||||||
|
A sliding "window" of valid record sequence numbers is maintained with
|
||||||
|
the "right" hand edge of the window set to the highest sequence number we
|
||||||
|
have received so far. Records that arrive that are off the "left" hand
|
||||||
|
edge of the window are rejected. Records within the window are checked
|
||||||
|
against a list of records received so far. If we already received it then
|
||||||
|
we also reject the new record.
|
||||||
|
|
||||||
|
If we have not already received the record, or the sequence number is off
|
||||||
|
the right hand edge of the window then we verify the MAC of the record.
|
||||||
|
If MAC verification fails then we discard the record. Otherwise we mark
|
||||||
|
the record as received. If the sequence number was off the right hand edge
|
||||||
|
of the window, then we slide the window along so that the right hand edge
|
||||||
|
is in line with the newly received sequence number.
|
||||||
|
|
||||||
|
Records may arrive for future epochs, i.e. a record from after a CCS being
|
||||||
|
sent, can arrive before the CCS does if the packets get re-ordered. As we
|
||||||
|
have not yet received the CCS we are not yet in a position to decrypt or
|
||||||
|
validate the MAC of those records. OpenSSL places those records on an
|
||||||
|
unprocessed records queue. It additionally updates the window immediately,
|
||||||
|
even though we have not yet verified the MAC. This will only occur if
|
||||||
|
currently in a handshake/renegotiation.
|
||||||
|
|
||||||
|
This could be exploited by an attacker by sending a record for the next
|
||||||
|
epoch (which does not have to decrypt or have a valid MAC), with a very
|
||||||
|
large sequence number. This means the right hand edge of the window is
|
||||||
|
moved very far to the right, and all subsequent legitimate packets are
|
||||||
|
dropped causing a denial of service.
|
||||||
|
|
||||||
|
A similar effect can be achieved during the initial handshake. In this
|
||||||
|
case there is no MAC key negotiated yet. Therefore an attacker can send a
|
||||||
|
message for the current epoch with a very large sequence number. The code
|
||||||
|
will process the record as normal. If the hanshake message sequence number
|
||||||
|
(as opposed to the record sequence number that we have been talking about
|
||||||
|
so far) is in the future then the injected message is bufferred to be
|
||||||
|
handled later, but the window is still updated. Therefore all subsequent
|
||||||
|
legitimate handshake records are dropped. This aspect is not considered a
|
||||||
|
security issue because there are many ways for an attacker to disrupt the
|
||||||
|
initial handshake and prevent it from completing successfully (e.g.
|
||||||
|
injection of a handshake message will cause the Finished MAC to fail and
|
||||||
|
the handshake to be aborted). This issue comes about as a result of trying
|
||||||
|
to do replay protection, but having no integrity mechanism in place yet.
|
||||||
|
Does it even make sense to have replay protection in epoch 0? That
|
||||||
|
issue isn't addressed here though.
|
||||||
|
|
||||||
|
This addressed an OCAP Audit issue.
|
||||||
|
|
||||||
|
CVE-2016-2181
|
||||||
|
|
||||||
|
Upstream-Status: Backport
|
||||||
|
CVE: CVE-2016-2181 patch2
|
||||||
|
Signed-off-by: Armin Kuster <akuster@mvista.com>
|
||||||
|
|
||||||
|
|
||||||
|
Reviewed-by: Richard Levitte <levitte@openssl.org>
|
||||||
|
---
|
||||||
|
ssl/d1_pkt.c | 60 +++++++++++++++++++++++++++++++++++++++++++++++------------
|
||||||
|
ssl/ssl.h | 1 +
|
||||||
|
ssl/ssl_err.c | 4 +++-
|
||||||
|
3 files changed, 52 insertions(+), 13 deletions(-)
|
||||||
|
|
||||||
|
Index: openssl-1.0.2h/ssl/d1_pkt.c
|
||||||
|
===================================================================
|
||||||
|
--- openssl-1.0.2h.orig/ssl/d1_pkt.c
|
||||||
|
+++ openssl-1.0.2h/ssl/d1_pkt.c
|
||||||
|
@@ -194,7 +194,7 @@ static int dtls1_record_needs_buffering(
|
||||||
|
#endif
|
||||||
|
static int dtls1_buffer_record(SSL *s, record_pqueue *q,
|
||||||
|
unsigned char *priority);
|
||||||
|
-static int dtls1_process_record(SSL *s);
|
||||||
|
+static int dtls1_process_record(SSL *s, DTLS1_BITMAP *bitmap);
|
||||||
|
|
||||||
|
/* copy buffered record into SSL structure */
|
||||||
|
static int dtls1_copy_record(SSL *s, pitem *item)
|
||||||
|
@@ -320,13 +320,18 @@ static int dtls1_process_buffered_record
|
||||||
|
{
|
||||||
|
pitem *item;
|
||||||
|
SSL3_BUFFER *rb;
|
||||||
|
+ SSL3_RECORD *rr;
|
||||||
|
+ DTLS1_BITMAP *bitmap;
|
||||||
|
+ unsigned int is_next_epoch;
|
||||||
|
+ int replayok = 1;
|
||||||
|
|
||||||
|
item = pqueue_peek(s->d1->unprocessed_rcds.q);
|
||||||
|
if (item) {
|
||||||
|
/* Check if epoch is current. */
|
||||||
|
if (s->d1->unprocessed_rcds.epoch != s->d1->r_epoch)
|
||||||
|
- return (1); /* Nothing to do. */
|
||||||
|
+ return 1; /* Nothing to do. */
|
||||||
|
|
||||||
|
+ rr = &s->s3->rrec;
|
||||||
|
rb = &s->s3->rbuf;
|
||||||
|
|
||||||
|
if (rb->left > 0) {
|
||||||
|
@@ -343,11 +348,41 @@ static int dtls1_process_buffered_record
|
||||||
|
/* Process all the records. */
|
||||||
|
while (pqueue_peek(s->d1->unprocessed_rcds.q)) {
|
||||||
|
dtls1_get_unprocessed_record(s);
|
||||||
|
- if (!dtls1_process_record(s))
|
||||||
|
- return (0);
|
||||||
|
+ bitmap = dtls1_get_bitmap(s, rr, &is_next_epoch);
|
||||||
|
+ if (bitmap == NULL) {
|
||||||
|
+ /*
|
||||||
|
+ * Should not happen. This will only ever be NULL when the
|
||||||
|
+ * current record is from a different epoch. But that cannot
|
||||||
|
+ * be the case because we already checked the epoch above
|
||||||
|
+ */
|
||||||
|
+ SSLerr(SSL_F_DTLS1_PROCESS_BUFFERED_RECORDS,
|
||||||
|
+ ERR_R_INTERNAL_ERROR);
|
||||||
|
+ return 0;
|
||||||
|
+ }
|
||||||
|
+#ifndef OPENSSL_NO_SCTP
|
||||||
|
+ /* Only do replay check if no SCTP bio */
|
||||||
|
+ if (!BIO_dgram_is_sctp(SSL_get_rbio(s)))
|
||||||
|
+#endif
|
||||||
|
+ {
|
||||||
|
+ /*
|
||||||
|
+ * Check whether this is a repeat, or aged record. We did this
|
||||||
|
+ * check once already when we first received the record - but
|
||||||
|
+ * we might have updated the window since then due to
|
||||||
|
+ * records we subsequently processed.
|
||||||
|
+ */
|
||||||
|
+ replayok = dtls1_record_replay_check(s, bitmap);
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ if (!replayok || !dtls1_process_record(s, bitmap)) {
|
||||||
|
+ /* dump this record */
|
||||||
|
+ rr->length = 0;
|
||||||
|
+ s->packet_length = 0;
|
||||||
|
+ continue;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
if (dtls1_buffer_record(s, &(s->d1->processed_rcds),
|
||||||
|
s->s3->rrec.seq_num) < 0)
|
||||||
|
- return -1;
|
||||||
|
+ return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@@ -358,7 +393,7 @@ static int dtls1_process_buffered_record
|
||||||
|
s->d1->processed_rcds.epoch = s->d1->r_epoch;
|
||||||
|
s->d1->unprocessed_rcds.epoch = s->d1->r_epoch + 1;
|
||||||
|
|
||||||
|
- return (1);
|
||||||
|
+ return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
@@ -405,7 +440,7 @@ static int dtls1_get_buffered_record(SSL
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
-static int dtls1_process_record(SSL *s)
|
||||||
|
+static int dtls1_process_record(SSL *s, DTLS1_BITMAP *bitmap)
|
||||||
|
{
|
||||||
|
int i, al;
|
||||||
|
int enc_err;
|
||||||
|
@@ -565,6 +600,10 @@ static int dtls1_process_record(SSL *s)
|
||||||
|
|
||||||
|
/* we have pulled in a full packet so zero things */
|
||||||
|
s->packet_length = 0;
|
||||||
|
+
|
||||||
|
+ /* Mark receipt of record. */
|
||||||
|
+ dtls1_record_bitmap_update(s, bitmap);
|
||||||
|
+
|
||||||
|
return (1);
|
||||||
|
|
||||||
|
f_err:
|
||||||
|
@@ -600,7 +639,7 @@ int dtls1_get_record(SSL *s)
|
||||||
|
* The epoch may have changed. If so, process all the pending records.
|
||||||
|
* This is a non-blocking operation.
|
||||||
|
*/
|
||||||
|
- if (dtls1_process_buffered_records(s) < 0)
|
||||||
|
+ if (!dtls1_process_buffered_records(s))
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
/* if we're renegotiating, then there may be buffered records */
|
||||||
|
@@ -735,20 +774,17 @@ int dtls1_get_record(SSL *s)
|
||||||
|
if (dtls1_buffer_record
|
||||||
|
(s, &(s->d1->unprocessed_rcds), rr->seq_num) < 0)
|
||||||
|
return -1;
|
||||||
|
- /* Mark receipt of record. */
|
||||||
|
- dtls1_record_bitmap_update(s, bitmap);
|
||||||
|
}
|
||||||
|
rr->length = 0;
|
||||||
|
s->packet_length = 0;
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
|
|
||||||
|
- if (!dtls1_process_record(s)) {
|
||||||
|
+ if (!dtls1_process_record(s, bitmap)) {
|
||||||
|
rr->length = 0;
|
||||||
|
s->packet_length = 0; /* dump this record */
|
||||||
|
goto again; /* get another record */
|
||||||
|
}
|
||||||
|
- dtls1_record_bitmap_update(s, bitmap); /* Mark receipt of record. */
|
||||||
|
|
||||||
|
return (1);
|
||||||
|
|
||||||
|
Index: openssl-1.0.2h/ssl/ssl.h
|
||||||
|
===================================================================
|
||||||
|
--- openssl-1.0.2h.orig/ssl/ssl.h
|
||||||
|
+++ openssl-1.0.2h/ssl/ssl.h
|
||||||
|
@@ -2623,6 +2623,7 @@ void ERR_load_SSL_strings(void);
|
||||||
|
# define SSL_F_DTLS1_HEARTBEAT 305
|
||||||
|
# define SSL_F_DTLS1_OUTPUT_CERT_CHAIN 255
|
||||||
|
# define SSL_F_DTLS1_PREPROCESS_FRAGMENT 288
|
||||||
|
+# define SSL_F_DTLS1_PROCESS_BUFFERED_RECORDS 404
|
||||||
|
# define SSL_F_DTLS1_PROCESS_OUT_OF_SEQ_MESSAGE 256
|
||||||
|
# define SSL_F_DTLS1_PROCESS_RECORD 257
|
||||||
|
# define SSL_F_DTLS1_READ_BYTES 258
|
||||||
|
Index: openssl-1.0.2h/ssl/ssl_err.c
|
||||||
|
===================================================================
|
||||||
|
--- openssl-1.0.2h.orig/ssl/ssl_err.c
|
||||||
|
+++ openssl-1.0.2h/ssl/ssl_err.c
|
||||||
|
@@ -1,6 +1,6 @@
|
||||||
|
/* ssl/ssl_err.c */
|
||||||
|
/* ====================================================================
|
||||||
|
- * Copyright (c) 1999-2015 The OpenSSL Project. All rights reserved.
|
||||||
|
+ * Copyright (c) 1999-2016 The OpenSSL Project. All rights reserved.
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions
|
||||||
|
@@ -93,6 +93,8 @@ static ERR_STRING_DATA SSL_str_functs[]
|
||||||
|
{ERR_FUNC(SSL_F_DTLS1_HEARTBEAT), "dtls1_heartbeat"},
|
||||||
|
{ERR_FUNC(SSL_F_DTLS1_OUTPUT_CERT_CHAIN), "dtls1_output_cert_chain"},
|
||||||
|
{ERR_FUNC(SSL_F_DTLS1_PREPROCESS_FRAGMENT), "DTLS1_PREPROCESS_FRAGMENT"},
|
||||||
|
+ {ERR_FUNC(SSL_F_DTLS1_PROCESS_BUFFERED_RECORDS),
|
||||||
|
+ "DTLS1_PROCESS_BUFFERED_RECORDS"},
|
||||||
|
{ERR_FUNC(SSL_F_DTLS1_PROCESS_OUT_OF_SEQ_MESSAGE),
|
||||||
|
"DTLS1_PROCESS_OUT_OF_SEQ_MESSAGE"},
|
||||||
|
{ERR_FUNC(SSL_F_DTLS1_PROCESS_RECORD), "DTLS1_PROCESS_RECORD"},
|
||||||
@@ -0,0 +1,30 @@
|
|||||||
|
From 26aebca74e38ae09f673c2045cc8e2ef762d265a Mon Sep 17 00:00:00 2001
|
||||||
|
From: Matt Caswell <matt@openssl.org>
|
||||||
|
Date: Wed, 17 Aug 2016 17:55:36 +0100
|
||||||
|
Subject: [PATCH] Update function error code
|
||||||
|
|
||||||
|
A function error code needed updating due to merge issues.
|
||||||
|
|
||||||
|
Reviewed-by: Richard Levitte <levitte@openssl.org>
|
||||||
|
|
||||||
|
Upstream-Status: Backport
|
||||||
|
CVE: CVE-2016-2181 patch 3
|
||||||
|
Signed-off-by: Armin Kuster <akuster@mvista.com>
|
||||||
|
|
||||||
|
---
|
||||||
|
ssl/ssl.h | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
Index: openssl-1.0.2h/ssl/ssl.h
|
||||||
|
===================================================================
|
||||||
|
--- openssl-1.0.2h.orig/ssl/ssl.h
|
||||||
|
+++ openssl-1.0.2h/ssl/ssl.h
|
||||||
|
@@ -2623,7 +2623,7 @@ void ERR_load_SSL_strings(void);
|
||||||
|
# define SSL_F_DTLS1_HEARTBEAT 305
|
||||||
|
# define SSL_F_DTLS1_OUTPUT_CERT_CHAIN 255
|
||||||
|
# define SSL_F_DTLS1_PREPROCESS_FRAGMENT 288
|
||||||
|
-# define SSL_F_DTLS1_PROCESS_BUFFERED_RECORDS 404
|
||||||
|
+# define SSL_F_DTLS1_PROCESS_BUFFERED_RECORDS 424
|
||||||
|
# define SSL_F_DTLS1_PROCESS_OUT_OF_SEQ_MESSAGE 256
|
||||||
|
# define SSL_F_DTLS1_PROCESS_RECORD 257
|
||||||
|
# define SSL_F_DTLS1_READ_BYTES 258
|
||||||
@@ -0,0 +1,70 @@
|
|||||||
|
From e36f27ddb80a48e579783bc29fb3758988342b71 Mon Sep 17 00:00:00 2001
|
||||||
|
From: "Dr. Stephen Henson" <steve@openssl.org>
|
||||||
|
Date: Fri, 5 Aug 2016 14:26:03 +0100
|
||||||
|
Subject: [PATCH] Check for errors in BN_bn2dec()
|
||||||
|
|
||||||
|
If an oversize BIGNUM is presented to BN_bn2dec() it can cause
|
||||||
|
BN_div_word() to fail and not reduce the value of 't' resulting
|
||||||
|
in OOB writes to the bn_data buffer and eventually crashing.
|
||||||
|
|
||||||
|
Fix by checking return value of BN_div_word() and checking writes
|
||||||
|
don't overflow buffer.
|
||||||
|
|
||||||
|
Thanks to Shi Lei for reporting this bug.
|
||||||
|
|
||||||
|
CVE-2016-2182
|
||||||
|
|
||||||
|
Reviewed-by: Tim Hudson <tjh@openssl.org>
|
||||||
|
(cherry picked from commit 07bed46f332fce8c1d157689a2cdf915a982ae34)
|
||||||
|
|
||||||
|
Conflicts:
|
||||||
|
crypto/bn/bn_print.c
|
||||||
|
|
||||||
|
Upstream-Status: Backport
|
||||||
|
CVE: CVE-2016-2182
|
||||||
|
Signed-off-by: Armin Kuster <akuster@mvista.com>
|
||||||
|
|
||||||
|
---
|
||||||
|
crypto/bn/bn_print.c | 11 ++++++++---
|
||||||
|
1 file changed, 8 insertions(+), 3 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/crypto/bn/bn_print.c b/crypto/bn/bn_print.c
|
||||||
|
index bfa31ef..b44403e 100644
|
||||||
|
--- a/crypto/bn/bn_print.c
|
||||||
|
+++ b/crypto/bn/bn_print.c
|
||||||
|
@@ -111,6 +111,7 @@ char *BN_bn2dec(const BIGNUM *a)
|
||||||
|
char *p;
|
||||||
|
BIGNUM *t = NULL;
|
||||||
|
BN_ULONG *bn_data = NULL, *lp;
|
||||||
|
+ int bn_data_num;
|
||||||
|
|
||||||
|
/*-
|
||||||
|
* get an upper bound for the length of the decimal integer
|
||||||
|
@@ -120,9 +121,9 @@ char *BN_bn2dec(const BIGNUM *a)
|
||||||
|
*/
|
||||||
|
i = BN_num_bits(a) * 3;
|
||||||
|
num = (i / 10 + i / 1000 + 1) + 1;
|
||||||
|
- bn_data =
|
||||||
|
- (BN_ULONG *)OPENSSL_malloc((num / BN_DEC_NUM + 1) * sizeof(BN_ULONG));
|
||||||
|
- buf = (char *)OPENSSL_malloc(num + 3);
|
||||||
|
+ bn_data_num = num / BN_DEC_NUM + 1;
|
||||||
|
+ bn_data = OPENSSL_malloc(bn_data_num * sizeof(BN_ULONG));
|
||||||
|
+ buf = OPENSSL_malloc(num + 3);
|
||||||
|
if ((buf == NULL) || (bn_data == NULL)) {
|
||||||
|
BNerr(BN_F_BN_BN2DEC, ERR_R_MALLOC_FAILURE);
|
||||||
|
goto err;
|
||||||
|
@@ -143,7 +144,11 @@ char *BN_bn2dec(const BIGNUM *a)
|
||||||
|
i = 0;
|
||||||
|
while (!BN_is_zero(t)) {
|
||||||
|
*lp = BN_div_word(t, BN_DEC_CONV);
|
||||||
|
+ if (*lp == (BN_ULONG)-1)
|
||||||
|
+ goto err;
|
||||||
|
lp++;
|
||||||
|
+ if (lp - bn_data >= bn_data_num)
|
||||||
|
+ goto err;
|
||||||
|
}
|
||||||
|
lp--;
|
||||||
|
/*
|
||||||
|
--
|
||||||
|
2.7.4
|
||||||
|
|
||||||
@@ -0,0 +1,53 @@
|
|||||||
|
From baaabfd8fdcec04a691695fad9a664bea43202b6 Mon Sep 17 00:00:00 2001
|
||||||
|
From: "Dr. Stephen Henson" <steve@openssl.org>
|
||||||
|
Date: Tue, 23 Aug 2016 18:14:54 +0100
|
||||||
|
Subject: [PATCH] Sanity check ticket length.
|
||||||
|
|
||||||
|
If a ticket callback changes the HMAC digest to SHA512 the existing
|
||||||
|
sanity checks are not sufficient and an attacker could perform a DoS
|
||||||
|
attack with a malformed ticket. Add additional checks based on
|
||||||
|
HMAC size.
|
||||||
|
|
||||||
|
Thanks to Shi Lei for reporting this bug.
|
||||||
|
|
||||||
|
CVE-2016-6302
|
||||||
|
|
||||||
|
Reviewed-by: Rich Salz <rsalz@openssl.org>
|
||||||
|
|
||||||
|
Upstream-Status: Backport
|
||||||
|
CVE: CVE-2016-6302
|
||||||
|
Signed-off-by: Armin Kuster <akuster@mvista.com>
|
||||||
|
|
||||||
|
---
|
||||||
|
ssl/t1_lib.c | 11 ++++++++---
|
||||||
|
1 file changed, 8 insertions(+), 3 deletions(-)
|
||||||
|
|
||||||
|
Index: openssl-1.0.2h/ssl/t1_lib.c
|
||||||
|
===================================================================
|
||||||
|
--- openssl-1.0.2h.orig/ssl/t1_lib.c
|
||||||
|
+++ openssl-1.0.2h/ssl/t1_lib.c
|
||||||
|
@@ -3397,9 +3397,7 @@ static int tls_decrypt_ticket(SSL *s, co
|
||||||
|
HMAC_CTX hctx;
|
||||||
|
EVP_CIPHER_CTX ctx;
|
||||||
|
SSL_CTX *tctx = s->initial_ctx;
|
||||||
|
- /* Need at least keyname + iv + some encrypted data */
|
||||||
|
- if (eticklen < 48)
|
||||||
|
- return 2;
|
||||||
|
+
|
||||||
|
/* Initialize session ticket encryption and HMAC contexts */
|
||||||
|
HMAC_CTX_init(&hctx);
|
||||||
|
EVP_CIPHER_CTX_init(&ctx);
|
||||||
|
@@ -3433,6 +3431,13 @@ static int tls_decrypt_ticket(SSL *s, co
|
||||||
|
if (mlen < 0) {
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
+ /* Sanity check ticket length: must exceed keyname + IV + HMAC */
|
||||||
|
+ if (eticklen <= 16 + EVP_CIPHER_CTX_iv_length(&ctx) + mlen) {
|
||||||
|
+ HMAC_CTX_cleanup(&hctx);
|
||||||
|
+ EVP_CIPHER_CTX_cleanup(&ctx);
|
||||||
|
+ return 2;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
eticklen -= mlen;
|
||||||
|
/* Check HMAC of encrypted ticket */
|
||||||
|
if (HMAC_Update(&hctx, etick, eticklen) <= 0
|
||||||
@@ -0,0 +1,36 @@
|
|||||||
|
From 1027ad4f34c30b8585592764b9a670ba36888269 Mon Sep 17 00:00:00 2001
|
||||||
|
From: "Dr. Stephen Henson" <steve@openssl.org>
|
||||||
|
Date: Fri, 19 Aug 2016 23:28:29 +0100
|
||||||
|
Subject: [PATCH] Avoid overflow in MDC2_Update()
|
||||||
|
|
||||||
|
Thanks to Shi Lei for reporting this issue.
|
||||||
|
|
||||||
|
CVE-2016-6303
|
||||||
|
|
||||||
|
Reviewed-by: Matt Caswell <matt@openssl.org>
|
||||||
|
(cherry picked from commit 55d83bf7c10c7b205fffa23fa7c3977491e56c07)
|
||||||
|
|
||||||
|
Upstream-Status: Backport
|
||||||
|
CVE: CVE-2016-6303
|
||||||
|
Signed-off-by: Armin Kuster <akuster@mvista.com>
|
||||||
|
|
||||||
|
---
|
||||||
|
crypto/mdc2/mdc2dgst.c | 2 +-
|
||||||
|
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||||
|
|
||||||
|
diff --git a/crypto/mdc2/mdc2dgst.c b/crypto/mdc2/mdc2dgst.c
|
||||||
|
index 6615cf8..2dce493 100644
|
||||||
|
--- a/crypto/mdc2/mdc2dgst.c
|
||||||
|
+++ b/crypto/mdc2/mdc2dgst.c
|
||||||
|
@@ -91,7 +91,7 @@ int MDC2_Update(MDC2_CTX *c, const unsigned char *in, size_t len)
|
||||||
|
|
||||||
|
i = c->num;
|
||||||
|
if (i != 0) {
|
||||||
|
- if (i + len < MDC2_BLOCK) {
|
||||||
|
+ if (len < MDC2_BLOCK - i) {
|
||||||
|
/* partial block */
|
||||||
|
memcpy(&(c->data[i]), in, len);
|
||||||
|
c->num += (int)len;
|
||||||
|
--
|
||||||
|
2.7.4
|
||||||
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user