mirror of
https://git.yoctoproject.org/poky
synced 2026-02-15 21:23:04 +01:00
Compare commits
159 Commits
yocto-5.2.
...
scarthgap-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f7def85be9 | ||
|
|
5d657e0f47 | ||
|
|
2d92aefd4b | ||
|
|
bc322a184d | ||
|
|
d2d129fabc | ||
|
|
abcd5a88a3 | ||
|
|
4a1234df65 | ||
|
|
32ddc0417f | ||
|
|
da1e522530 | ||
|
|
502e5a8f51 | ||
|
|
3c7d9c28ac | ||
|
|
ad1147aa7e | ||
|
|
4884f3edb8 | ||
|
|
723d7451a3 | ||
|
|
408d77ae35 | ||
|
|
83b2701c97 | ||
|
|
6a6402dfdf | ||
|
|
a2ae5627ab | ||
|
|
59c0bbcdb9 | ||
|
|
84595983b8 | ||
|
|
0c585f6b66 | ||
|
|
3f86c9b37c | ||
|
|
7d8845f154 | ||
|
|
6570fd5116 | ||
|
|
b2286f9882 | ||
|
|
818fe3d7df | ||
|
|
0b35aab466 | ||
|
|
554e8de6a4 | ||
|
|
afba27a984 | ||
|
|
35bd8118fc | ||
|
|
18c80c1c36 | ||
|
|
a1d5e69a2e | ||
|
|
d6ae637763 | ||
|
|
c27fdaedbd | ||
|
|
ba430c14d8 | ||
|
|
949f078058 | ||
|
|
5837e5e945 | ||
|
|
f90ddfe6ab | ||
|
|
f0a906d661 | ||
|
|
98cf20c194 | ||
|
|
5372b83b13 | ||
|
|
fc979d94f6 | ||
|
|
b4ab121b5f | ||
|
|
99d16e4c35 | ||
|
|
a39410ff48 | ||
|
|
a4cd5dc1ee | ||
|
|
c3235e3ae9 | ||
|
|
b1a0ca3cab | ||
|
|
5ff13f1b01 | ||
|
|
10e6cc830c | ||
|
|
5c20153f7f | ||
|
|
8163fc2f6c | ||
|
|
f22145f3bf | ||
|
|
34efd9bbf4 | ||
|
|
7fbd3442ce | ||
|
|
0efe15c5ba | ||
|
|
6a8b1d85e7 | ||
|
|
331a3b87a7 | ||
|
|
bafda5147a | ||
|
|
463ef7f618 | ||
|
|
fa18cd0932 | ||
|
|
fdfdd7b319 | ||
|
|
ef6b1510d5 | ||
|
|
1bd707ab14 | ||
|
|
750ceb4b76 | ||
|
|
125ca0ff2f | ||
|
|
83793ccd86 | ||
|
|
934c533196 | ||
|
|
d1811356b7 | ||
|
|
bcb569e698 | ||
|
|
cc341e44bf | ||
|
|
fa23d0fc6d | ||
|
|
fb60467f0e | ||
|
|
bb93b6d1fb | ||
|
|
ae6cebca5b | ||
|
|
d37ef8925a | ||
|
|
fb4eb8a09d | ||
|
|
1d751a0e83 | ||
|
|
184753b1af | ||
|
|
a905386e10 | ||
|
|
d00c23ef3e | ||
|
|
df183ca5b0 | ||
|
|
4ffcc07420 | ||
|
|
0163ea4421 | ||
|
|
ce5fdf4ff6 | ||
|
|
6caf81a6eb | ||
|
|
d7cbee7a50 | ||
|
|
59c0a35bb8 | ||
|
|
96b48c195a | ||
|
|
0c079e62fb | ||
|
|
235050fbfa | ||
|
|
3a3c1f7dc6 | ||
|
|
349e4c816a | ||
|
|
c78f26dbfd | ||
|
|
50b2758182 | ||
|
|
c5df9c829a | ||
|
|
74ea960113 | ||
|
|
107a6cec75 | ||
|
|
68f9a4b73d | ||
|
|
86ea22d047 | ||
|
|
0fe72b1a67 | ||
|
|
f490519999 | ||
|
|
1ba6abfd4a | ||
|
|
1a52a8e93e | ||
|
|
2e084b7b80 | ||
|
|
bef4fd23a0 | ||
|
|
9980bf64cc | ||
|
|
b79fbd4a0f | ||
|
|
1148564ab7 | ||
|
|
b00462d6fb | ||
|
|
c9e5f6d0b0 | ||
|
|
f4ae486b78 | ||
|
|
8205ff560c | ||
|
|
ee2d64d759 | ||
|
|
3c1d8fd96e | ||
|
|
e7d86da7aa | ||
|
|
ea63f4e0ed | ||
|
|
22357a9a04 | ||
|
|
1e83e98723 | ||
|
|
4b07a5316e | ||
|
|
b9b47b1a39 | ||
|
|
c4a4246f79 | ||
|
|
f450ddee3e | ||
|
|
478a0070e1 | ||
|
|
22927ccce3 | ||
|
|
cc5aededce | ||
|
|
af21f39987 | ||
|
|
a4e6110a91 | ||
|
|
b1c0e46031 | ||
|
|
0d895fa4c1 | ||
|
|
94dc3e62a5 | ||
|
|
64c3534366 | ||
|
|
02eacd385e | ||
|
|
0be9782b02 | ||
|
|
6f69040e89 | ||
|
|
7721813410 | ||
|
|
46e334391e | ||
|
|
cb75a627c2 | ||
|
|
13f0a89c07 | ||
|
|
bdf47412b2 | ||
|
|
fc2cd22e49 | ||
|
|
7b3bce6d48 | ||
|
|
43167d556a | ||
|
|
73b5770461 | ||
|
|
250d1c29ee | ||
|
|
6600be67d8 | ||
|
|
5d33880b9d | ||
|
|
7dddfb7420 | ||
|
|
3e10e2afba | ||
|
|
6c9d0bddbc | ||
|
|
3c9778fbc8 | ||
|
|
eb723d4a10 | ||
|
|
299ead538a | ||
|
|
795511f0ea | ||
|
|
8eb06eb958 | ||
|
|
fb91a49387 | ||
|
|
25b05cb80d | ||
|
|
5b727a8fa1 | ||
|
|
845626a36b |
@@ -1,4 +0,0 @@
|
||||
[b4]
|
||||
prep-perpatch-check-cmd = ./scripts/b4-wrapper-poky.py prep-perpatch-check-cmd
|
||||
send-auto-cc-cmd = ./scripts/b4-wrapper-poky.py send-auto-cc-cmd
|
||||
send-auto-to-cmd = ./scripts/b4-wrapper-poky.py send-auto-to-cmd
|
||||
@@ -41,7 +41,6 @@ Component/Subsystem Maintainers
|
||||
* devtool: Saul Wold
|
||||
* eSDK: Saul Wold
|
||||
* overlayfs: Vyacheslav Yurkov
|
||||
* Patchtest: Trevor Gamblin
|
||||
|
||||
Maintainers needed
|
||||
------------------
|
||||
@@ -53,6 +52,7 @@ Maintainers needed
|
||||
* error reporting system/web UI
|
||||
* wic
|
||||
* Patchwork
|
||||
* Patchtest
|
||||
* Matchbox
|
||||
* Sato
|
||||
* Autobuilder
|
||||
@@ -67,3 +67,5 @@ Shadow maintainers/development needed
|
||||
|
||||
* toaster
|
||||
* bitbake
|
||||
|
||||
|
||||
|
||||
@@ -6,27 +6,28 @@ of OpenEmbedded. It is distro-less (can build a functional image with
|
||||
DISTRO = "nodistro") and contains only emulated machine support.
|
||||
|
||||
For information about OpenEmbedded, see the OpenEmbedded website:
|
||||
<https://www.openembedded.org/>
|
||||
https://www.openembedded.org/
|
||||
|
||||
The Yocto Project has extensive documentation about OE including a reference manual
|
||||
which can be found at:
|
||||
<https://docs.yoctoproject.org/>
|
||||
https://docs.yoctoproject.org/
|
||||
|
||||
|
||||
Contributing
|
||||
------------
|
||||
|
||||
Please refer to our contributor guide here: <https://docs.yoctoproject.org/dev/contributor-guide/>
|
||||
Please refer to our contributor guide here: https://docs.yoctoproject.org/dev/contributor-guide/
|
||||
for full details on how to submit changes.
|
||||
|
||||
As a quick guide, patches should be sent to openembedded-core@lists.openembedded.org
|
||||
The git command to do that would be:
|
||||
|
||||
```
|
||||
git send-email -M -1 --to openembedded-core@lists.openembedded.org
|
||||
```
|
||||
git send-email -M -1 --to openembedded-core@lists.openembedded.org
|
||||
|
||||
Mailing list:
|
||||
<https://lists.openembedded.org/g/openembedded-core>
|
||||
|
||||
https://lists.openembedded.org/g/openembedded-core
|
||||
|
||||
Source code:
|
||||
<https://git.openembedded.org/openembedded-core/>
|
||||
|
||||
https://git.openembedded.org/openembedded-core/
|
||||
|
||||
@@ -5,10 +5,10 @@ To simplify development, the build system supports building images to
|
||||
work with the QEMU emulator in system emulation mode. Several architectures
|
||||
are currently supported in 32 and 64 bit variants:
|
||||
|
||||
* ARM (qemuarm + qemuarm64)
|
||||
* x86 (qemux86 + qemux86-64)
|
||||
* PowerPC (qemuppc only)
|
||||
* MIPS (qemumips + qemumips64)
|
||||
* ARM (qemuarm + qemuarm64)
|
||||
* x86 (qemux86 + qemux86-64)
|
||||
* PowerPC (qemuppc only)
|
||||
* MIPS (qemumips + qemumips64)
|
||||
|
||||
Use of the QEMU images is covered in the Yocto Project Reference Manual.
|
||||
The appropriate MACHINE variable value corresponding to the target is given
|
||||
|
||||
16
SECURITY.md
16
SECURITY.md
@@ -1,9 +1,9 @@
|
||||
How to Report a Potential Vulnerability
|
||||
=======================================
|
||||
How to Report a Potential Vulnerability?
|
||||
========================================
|
||||
|
||||
If you would like to report a public issue (for example, one with a released
|
||||
CVE number), please report it using the
|
||||
[Security Bugzilla](https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security)
|
||||
[https://bugzilla.yoctoproject.org/enter_bug.cgi?product=Security Security Bugzilla]
|
||||
|
||||
If you are dealing with a not-yet released or urgent issue, please send a
|
||||
message to security AT yoctoproject DOT org, including as many details as
|
||||
@@ -13,10 +13,10 @@ and any example code, if available.
|
||||
Branches maintained with security fixes
|
||||
---------------------------------------
|
||||
|
||||
See [Stable release and LTS](https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS)
|
||||
See [https://wiki.yoctoproject.org/wiki/Stable_Release_and_LTS Stable release and LTS]
|
||||
for detailed info regarding the policies and maintenance of Stable branches.
|
||||
|
||||
The [Release page](https://wiki.yoctoproject.org/wiki/Releases) contains
|
||||
a list of all releases of the Yocto Project. Versions in grey are no longer
|
||||
actively maintained with security patches, but well-tested patches may still
|
||||
be accepted for them for significant issues.
|
||||
The [https://wiki.yoctoproject.org/wiki/Releases Release page] contains a list of all
|
||||
releases of the Yocto Project. Versions in grey are no longer actively maintained with
|
||||
security patches, but well-tested patches may still be accepted for them for
|
||||
significant issues.
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
[b4]
|
||||
send-series-to = bitbake-devel@lists.openembedded.org
|
||||
send-auto-cc-cmd = ./contrib/b4-wrapper-bitbake.py send-auto-cc-cmd
|
||||
prep-pre-flight-checks = disable-needs-checking
|
||||
@@ -27,7 +27,7 @@ from bb.main import bitbake_main, BitBakeConfigParameters, BBMainException
|
||||
|
||||
bb.utils.check_system_locale()
|
||||
|
||||
__version__ = "2.12.0"
|
||||
__version__ = "2.8.0"
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __version__ != bb.__version__:
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
bitbake-layers
|
||||
@@ -72,17 +72,16 @@ def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
|
||||
elif sig2 not in sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig2))
|
||||
sys.exit(1)
|
||||
|
||||
latestfiles = [sigfiles[sig1]['path'], sigfiles[sig2]['path']]
|
||||
else:
|
||||
sigfiles = find_siginfo(bbhandler, pn, taskname)
|
||||
latestsigs = sorted(sigfiles.keys(), key=lambda h: sigfiles[h]['time'])[-2:]
|
||||
if not latestsigs:
|
||||
logger.error('No sigdata files found matching %s %s' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
latestfiles = [sigfiles[latestsigs[0]]['path']]
|
||||
if len(latestsigs) > 1:
|
||||
latestfiles.append(sigfiles[latestsigs[1]]['path'])
|
||||
sig1 = latestsigs[0]
|
||||
sig2 = latestsigs[1]
|
||||
|
||||
latestfiles = [sigfiles[sig1]['path'], sigfiles[sig2]['path']]
|
||||
|
||||
return latestfiles
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ bindir = os.path.dirname(__file__)
|
||||
topdir = os.path.dirname(bindir)
|
||||
sys.path[0:0] = [os.path.join(topdir, 'lib')]
|
||||
|
||||
import bb.providers
|
||||
import bb.tinfoil
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -41,15 +40,10 @@ if __name__ == "__main__":
|
||||
with bb.tinfoil.Tinfoil(tracking=True, setup_logging=not quiet) as tinfoil:
|
||||
if args.recipe:
|
||||
tinfoil.prepare(quiet=3 if quiet else 2)
|
||||
try:
|
||||
d = tinfoil.parse_recipe(args.recipe)
|
||||
except bb.providers.NoProvider as e:
|
||||
sys.exit(str(e))
|
||||
d = tinfoil.parse_recipe(args.recipe)
|
||||
else:
|
||||
tinfoil.prepare(quiet=2, config_only=True)
|
||||
# Expand keys and run anonymous functions to get identical result to
|
||||
# "bitbake -e"
|
||||
d = tinfoil.finalizeData()
|
||||
d = tinfoil.config_data
|
||||
|
||||
value = None
|
||||
if args.flag:
|
||||
|
||||
@@ -16,8 +16,6 @@ import time
|
||||
import warnings
|
||||
import netrc
|
||||
import json
|
||||
import statistics
|
||||
import textwrap
|
||||
warnings.simplefilter("default")
|
||||
|
||||
try:
|
||||
@@ -83,7 +81,6 @@ def main():
|
||||
nonlocal found_hashes
|
||||
nonlocal missed_hashes
|
||||
nonlocal max_time
|
||||
nonlocal times
|
||||
|
||||
with hashserv.create_client(args.address) as client:
|
||||
for i in range(args.requests):
|
||||
@@ -101,41 +98,29 @@ def main():
|
||||
else:
|
||||
missed_hashes += 1
|
||||
|
||||
times.append(elapsed)
|
||||
max_time = max(elapsed, max_time)
|
||||
pbar.update()
|
||||
|
||||
max_time = 0
|
||||
found_hashes = 0
|
||||
missed_hashes = 0
|
||||
lock = threading.Lock()
|
||||
times = []
|
||||
total_requests = args.clients * args.requests
|
||||
start_time = time.perf_counter()
|
||||
with ProgressBar(total=args.clients * args.requests) as pbar:
|
||||
with ProgressBar(total=total_requests) as pbar:
|
||||
threads = [threading.Thread(target=thread_main, args=(pbar, lock), daemon=False) for _ in range(args.clients)]
|
||||
for t in threads:
|
||||
t.start()
|
||||
|
||||
for t in threads:
|
||||
t.join()
|
||||
total_elapsed = time.perf_counter() - start_time
|
||||
|
||||
elapsed = time.perf_counter() - start_time
|
||||
with lock:
|
||||
mean = statistics.mean(times)
|
||||
median = statistics.median(times)
|
||||
stddev = statistics.pstdev(times)
|
||||
|
||||
print(f"Number of clients: {args.clients}")
|
||||
print(f"Requests per client: {args.requests}")
|
||||
print(f"Number of requests: {len(times)}")
|
||||
print(f"Total elapsed time: {total_elapsed:.3f}s")
|
||||
print(f"Total request rate: {len(times)/total_elapsed:.3f} req/s")
|
||||
print(f"Average request time: {mean:.3f}s")
|
||||
print(f"Median request time: {median:.3f}s")
|
||||
print(f"Request time std dev: {stddev:.3f}s")
|
||||
print(f"Maximum request time: {max(times):.3f}s")
|
||||
print(f"Minimum request time: {min(times):.3f}s")
|
||||
print(f"Hashes found: {found_hashes}")
|
||||
print(f"Hashes missed: {missed_hashes}")
|
||||
print("%d requests in %.1fs. %.1f requests per second" % (total_requests, elapsed, total_requests / elapsed))
|
||||
print("Average request time %.8fs" % (elapsed / total_requests))
|
||||
print("Max request time was %.8fs" % max_time)
|
||||
print("Found %d hashes, missed %d" % (found_hashes, missed_hashes))
|
||||
|
||||
if args.report:
|
||||
with ProgressBar(total=args.requests) as pbar:
|
||||
@@ -227,27 +212,6 @@ def main():
|
||||
print("New hashes marked: %d" % result["count"])
|
||||
return 0
|
||||
|
||||
def handle_gc_mark_stream(args, client):
|
||||
stdin = (l.strip() for l in sys.stdin)
|
||||
marked_hashes = 0
|
||||
|
||||
try:
|
||||
result = client.gc_mark_stream(args.mark, stdin)
|
||||
marked_hashes = result["count"]
|
||||
except ConnectionError:
|
||||
logger.warning(
|
||||
"Server doesn't seem to support `gc-mark-stream`. Sending "
|
||||
"hashes sequentially using `gc-mark` API."
|
||||
)
|
||||
for line in stdin:
|
||||
pairs = line.split()
|
||||
condition = dict(zip(pairs[::2], pairs[1::2]))
|
||||
result = client.gc_mark(args.mark, condition)
|
||||
marked_hashes += result["count"]
|
||||
|
||||
print("New hashes marked: %d" % marked_hashes)
|
||||
return 0
|
||||
|
||||
def handle_gc_sweep(args, client):
|
||||
result = client.gc_sweep(args.mark)
|
||||
print("Removed %d rows" % result["count"])
|
||||
@@ -261,45 +225,7 @@ def main():
|
||||
print("true" if result else "false")
|
||||
return 0
|
||||
|
||||
def handle_ping(args, client):
|
||||
times = []
|
||||
for i in range(1, args.count + 1):
|
||||
if not args.quiet:
|
||||
print(f"Ping {i} of {args.count}... ", end="")
|
||||
start_time = time.perf_counter()
|
||||
client.ping()
|
||||
elapsed = time.perf_counter() - start_time
|
||||
times.append(elapsed)
|
||||
if not args.quiet:
|
||||
print(f"{elapsed:.3f}s")
|
||||
|
||||
mean = statistics.mean(times)
|
||||
median = statistics.median(times)
|
||||
std_dev = statistics.pstdev(times)
|
||||
|
||||
if not args.quiet:
|
||||
print("------------------------")
|
||||
print(f"Number of pings: {len(times)}")
|
||||
print(f"Average round trip time: {mean:.3f}s")
|
||||
print(f"Median round trip time: {median:.3f}s")
|
||||
print(f"Round trip time std dev: {std_dev:.3f}s")
|
||||
print(f"Min time is: {min(times):.3f}s")
|
||||
print(f"Max time is: {max(times):.3f}s")
|
||||
return 0
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
description='Hash Equivalence Client',
|
||||
epilog=textwrap.dedent(
|
||||
"""
|
||||
Possible ADDRESS options are:
|
||||
unix://PATH Connect to UNIX domain socket at PATH
|
||||
ws://HOST[:PORT] Connect to websocket at HOST:PORT (default port is 80)
|
||||
wss://HOST[:PORT] Connect to secure websocket at HOST:PORT (default port is 443)
|
||||
HOST:PORT Connect to TCP server at HOST:PORT
|
||||
"""
|
||||
),
|
||||
)
|
||||
parser = argparse.ArgumentParser(description='Hash Equivalence Client')
|
||||
parser.add_argument('--address', default=DEFAULT_ADDRESS, help='Server address (default "%(default)s")')
|
||||
parser.add_argument('--log', default='WARNING', help='Set logging level')
|
||||
parser.add_argument('--login', '-l', metavar="USERNAME", help="Authenticate as USERNAME")
|
||||
@@ -387,16 +313,6 @@ def main():
|
||||
help="Keep entries in table where KEY == VALUE")
|
||||
gc_mark_parser.set_defaults(func=handle_gc_mark)
|
||||
|
||||
gc_mark_parser_stream = subparsers.add_parser(
|
||||
'gc-mark-stream',
|
||||
help=(
|
||||
"Mark multiple hashes to be retained for garbage collection. Input should be provided via stdin, "
|
||||
"with each line formatted as key-value pairs separated by spaces, for example 'column1 foo column2 bar'."
|
||||
)
|
||||
)
|
||||
gc_mark_parser_stream.add_argument("mark", help="Mark for this garbage collection operation")
|
||||
gc_mark_parser_stream.set_defaults(func=handle_gc_mark_stream)
|
||||
|
||||
gc_sweep_parser = subparsers.add_parser('gc-sweep', help="Perform garbage collection and delete any entries that are not marked")
|
||||
gc_sweep_parser.add_argument("mark", help="Mark for this garbage collection operation")
|
||||
gc_sweep_parser.set_defaults(func=handle_gc_sweep)
|
||||
@@ -406,11 +322,6 @@ def main():
|
||||
unihash_exists_parser.add_argument("unihash", help="Unihash to check")
|
||||
unihash_exists_parser.set_defaults(func=handle_unihash_exists)
|
||||
|
||||
ping_parser = subparsers.add_parser('ping', help="Ping server")
|
||||
ping_parser.add_argument("-n", "--count", type=int, help="Number of pings. Default is %(default)s", default=10)
|
||||
ping_parser.add_argument("-q", "--quiet", action="store_true", help="Don't print each ping; only print results")
|
||||
ping_parser.set_defaults(func=handle_ping)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logger = logging.getLogger('hashserv')
|
||||
|
||||
@@ -125,11 +125,6 @@ The following permissions are supported by the server:
|
||||
default=os.environ.get("HASHSERVER_ADMIN_PASSWORD", None),
|
||||
help="Create default admin user with password ADMIN_PASSWORD ($HASHSERVER_ADMIN_PASSWORD)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--reuseport",
|
||||
action="store_true",
|
||||
help="Enable SO_REUSEPORT, allowing multiple servers to bind to the same port for load balancing",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -137,9 +132,7 @@ The following permissions are supported by the server:
|
||||
|
||||
level = getattr(logging, args.log.upper(), None)
|
||||
if not isinstance(level, int):
|
||||
raise ValueError(
|
||||
"Invalid log level: %s (Try ERROR/WARNING/INFO/DEBUG)" % args.log
|
||||
)
|
||||
raise ValueError("Invalid log level: %s (Try ERROR/WARNING/INFO/DEBUG)" % args.log)
|
||||
|
||||
logger.setLevel(level)
|
||||
console = logging.StreamHandler()
|
||||
@@ -162,7 +155,6 @@ The following permissions are supported by the server:
|
||||
anon_perms=anon_perms,
|
||||
admin_username=args.admin_user,
|
||||
admin_password=args.admin_password,
|
||||
reuseport=args.reuseport,
|
||||
)
|
||||
server.serve_forever()
|
||||
return 0
|
||||
|
||||
@@ -18,14 +18,13 @@ import warnings
|
||||
warnings.simplefilter("default")
|
||||
|
||||
bindir = os.path.dirname(__file__)
|
||||
toolname = os.path.basename(__file__).split(".")[0]
|
||||
topdir = os.path.dirname(bindir)
|
||||
sys.path[0:0] = [os.path.join(topdir, 'lib')]
|
||||
|
||||
import bb.tinfoil
|
||||
import bb.msg
|
||||
|
||||
logger = bb.msg.logger_create(toolname, sys.stdout)
|
||||
logger = bb.msg.logger_create('bitbake-layers', sys.stdout)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
@@ -34,7 +33,7 @@ def main():
|
||||
add_help=False)
|
||||
parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
|
||||
parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
|
||||
parser.add_argument('-F', '--force', help='Forced execution: can be specified multiple times. -F will force add without recipe parse verification and -FF will additionally force the run withput layer parsing.', action='count', default=0)
|
||||
parser.add_argument('-F', '--force', help='Force add without recipe parse verification', action='store_true')
|
||||
parser.add_argument('--color', choices=['auto', 'always', 'never'], default='auto', help='Colorize output (where %(metavar)s is %(choices)s)', metavar='COLOR')
|
||||
|
||||
global_args, unparsed_args = parser.parse_known_args()
|
||||
@@ -58,23 +57,18 @@ def main():
|
||||
level=logger.getEffectiveLevel())
|
||||
|
||||
plugins = []
|
||||
with bb.tinfoil.Tinfoil(tracking=True) as tinfoil:
|
||||
tinfoil.logger.setLevel(logger.getEffectiveLevel())
|
||||
|
||||
if global_args.force > 1:
|
||||
bbpaths = []
|
||||
else:
|
||||
tinfoil.prepare(True)
|
||||
bbpaths = tinfoil.config_data.getVar('BBPATH').split(':')
|
||||
|
||||
for path in ([topdir] + bbpaths):
|
||||
pluginbasepath = {"bitbake-layers":'bblayers', 'bitbake-config-build':'bbconfigbuild'}[toolname]
|
||||
pluginpath = os.path.join(path, 'lib', pluginbasepath)
|
||||
tinfoil = bb.tinfoil.Tinfoil(tracking=True)
|
||||
tinfoil.logger.setLevel(logger.getEffectiveLevel())
|
||||
try:
|
||||
tinfoil.prepare(True)
|
||||
for path in ([topdir] +
|
||||
tinfoil.config_data.getVar('BBPATH').split(':')):
|
||||
pluginpath = os.path.join(path, 'lib', 'bblayers')
|
||||
bb.utils.load_plugins(logger, plugins, pluginpath)
|
||||
|
||||
registered = False
|
||||
for plugin in plugins:
|
||||
if hasattr(plugin, 'tinfoil_init') and global_args.force <= 1:
|
||||
if hasattr(plugin, 'tinfoil_init'):
|
||||
plugin.tinfoil_init(tinfoil)
|
||||
if hasattr(plugin, 'register_commands'):
|
||||
registered = True
|
||||
@@ -92,6 +86,8 @@ def main():
|
||||
tinfoil.config_data.enableTracking()
|
||||
|
||||
return args.func(args)
|
||||
finally:
|
||||
tinfoil.shutdown()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -16,18 +16,11 @@ sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), "lib
|
||||
import prserv
|
||||
import prserv.serv
|
||||
|
||||
VERSION = "2.0.0"
|
||||
VERSION = "1.1.0"
|
||||
|
||||
PRHOST_DEFAULT="0.0.0.0"
|
||||
PRPORT_DEFAULT=8585
|
||||
|
||||
def init_logger(logfile, loglevel):
|
||||
numeric_level = getattr(logging, loglevel.upper(), None)
|
||||
if not isinstance(numeric_level, int):
|
||||
raise ValueError("Invalid log level: %s" % loglevel)
|
||||
FORMAT = "%(asctime)-15s %(message)s"
|
||||
logging.basicConfig(level=numeric_level, filename=logfile, format=FORMAT)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="BitBake PR Server. Version=%s" % VERSION,
|
||||
@@ -77,25 +70,12 @@ def main():
|
||||
action="store_true",
|
||||
help="open database in read-only mode",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u",
|
||||
"--upstream",
|
||||
default=os.environ.get("PRSERV_UPSTREAM", None),
|
||||
help="Upstream PR service (host:port)",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
init_logger(os.path.abspath(args.log), args.loglevel)
|
||||
prserv.init_logger(os.path.abspath(args.log), args.loglevel)
|
||||
|
||||
if args.start:
|
||||
ret=prserv.serv.start_daemon(
|
||||
args.file,
|
||||
args.host,
|
||||
args.port,
|
||||
os.path.abspath(args.log),
|
||||
args.read_only,
|
||||
args.upstream
|
||||
)
|
||||
ret=prserv.serv.start_daemon(args.file, args.host, args.port, os.path.abspath(args.log), args.read_only)
|
||||
elif args.stop:
|
||||
ret=prserv.serv.stop_daemon(args.host, args.port)
|
||||
else:
|
||||
|
||||
@@ -15,7 +15,6 @@ import unittest
|
||||
try:
|
||||
import bb
|
||||
import hashserv
|
||||
import prserv
|
||||
import layerindexlib
|
||||
except RuntimeError as exc:
|
||||
sys.exit(str(exc))
|
||||
@@ -28,12 +27,12 @@ tests = ["bb.tests.codeparser",
|
||||
"bb.tests.event",
|
||||
"bb.tests.fetch",
|
||||
"bb.tests.parse",
|
||||
"bb.tests.persist_data",
|
||||
"bb.tests.runqueue",
|
||||
"bb.tests.siggen",
|
||||
"bb.tests.utils",
|
||||
"bb.tests.compression",
|
||||
"hashserv.tests",
|
||||
"prserv.tests",
|
||||
"layerindexlib.tests.layerindexobj",
|
||||
"layerindexlib.tests.restapi",
|
||||
"layerindexlib.tests.cooker"]
|
||||
|
||||
@@ -9,7 +9,6 @@ import os
|
||||
import sys
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning, message=".*use.of.fork.*may.lead.to.deadlocks.in.the.child.*")
|
||||
import logging
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
|
||||
@@ -39,9 +38,9 @@ if xmlrpcinterface[0] == "None":
|
||||
with open('/dev/null', 'r') as si:
|
||||
os.dup2(si.fileno(), sys.stdin.fileno())
|
||||
|
||||
with open(logfile, 'a+') as so:
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(so.fileno(), sys.stderr.fileno())
|
||||
so = open(logfile, 'a+')
|
||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||
os.dup2(so.fileno(), sys.stderr.fileno())
|
||||
|
||||
# Have stdout and stderr be the same so log output matches chronologically
|
||||
# and there aren't two seperate buffers
|
||||
|
||||
@@ -9,7 +9,6 @@ import os
|
||||
import sys
|
||||
import warnings
|
||||
warnings.simplefilter("default")
|
||||
warnings.filterwarnings("ignore", category=DeprecationWarning, message=".*use.of.fork.*may.lead.to.deadlocks.in.the.child.*")
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib'))
|
||||
from bb import fetch2
|
||||
import logging
|
||||
@@ -22,14 +21,9 @@ import traceback
|
||||
import queue
|
||||
import shlex
|
||||
import subprocess
|
||||
import fcntl
|
||||
from multiprocessing import Lock
|
||||
from threading import Thread
|
||||
|
||||
# Remove when we have a minimum of python 3.10
|
||||
if not hasattr(fcntl, 'F_SETPIPE_SZ'):
|
||||
fcntl.F_SETPIPE_SZ = 1031
|
||||
|
||||
bb.utils.check_system_locale()
|
||||
|
||||
# Users shouldn't be running this code directly
|
||||
@@ -50,6 +44,7 @@ if sys.argv[1].startswith("decafbadbad"):
|
||||
# updates to log files for use with tail
|
||||
try:
|
||||
if sys.stdout.name == '<stdout>':
|
||||
import fcntl
|
||||
fl = fcntl.fcntl(sys.stdout.fileno(), fcntl.F_GETFL)
|
||||
fl |= os.O_SYNC
|
||||
fcntl.fcntl(sys.stdout.fileno(), fcntl.F_SETFL, fl)
|
||||
@@ -61,12 +56,6 @@ logger = logging.getLogger("BitBake")
|
||||
|
||||
worker_pipe = sys.stdout.fileno()
|
||||
bb.utils.nonblockingfd(worker_pipe)
|
||||
# Try to make the pipe buffers larger as it is much more efficient. If we can't
|
||||
# e.g. out of buffer space (/proc/sys/fs/pipe-user-pages-soft) then just pass over.
|
||||
try:
|
||||
fcntl.fcntl(worker_pipe, fcntl.F_SETPIPE_SZ, 512 * 1024)
|
||||
except:
|
||||
pass
|
||||
# Need to guard against multiprocessing being used in child processes
|
||||
# and multiple processes trying to write to the parent at the same time
|
||||
worker_pipe_lock = None
|
||||
@@ -116,7 +105,7 @@ def worker_flush(worker_queue):
|
||||
if not worker_queue.empty():
|
||||
worker_queue_int.extend(worker_queue.get())
|
||||
written = os.write(worker_pipe, worker_queue_int)
|
||||
del worker_queue_int[0:written]
|
||||
worker_queue_int = worker_queue_int[written:]
|
||||
except (IOError, OSError) as e:
|
||||
if e.errno != errno.EAGAIN and e.errno != errno.EPIPE:
|
||||
raise
|
||||
@@ -368,7 +357,7 @@ class runQueueWorkerPipe():
|
||||
def read(self):
|
||||
start = len(self.queue)
|
||||
try:
|
||||
self.queue.extend(self.input.read(512*1024) or b"")
|
||||
self.queue.extend(self.input.read(102400) or b"")
|
||||
except (OSError, IOError) as e:
|
||||
if e.errno != errno.EAGAIN:
|
||||
raise
|
||||
|
||||
@@ -115,8 +115,8 @@ def filter_refs(refs):
|
||||
all_refs = get_all_refs()
|
||||
to_remove = set(all_refs) - set(refs)
|
||||
if to_remove:
|
||||
check_output(git_cmd + ['update-ref', '--no-deref', '--stdin', '-z'],
|
||||
input=''.join('delete ' + l + '\0\0' for l in to_remove))
|
||||
check_output(['xargs', '-0', '-n', '1'] + git_cmd + ['update-ref', '-d', '--no-deref'],
|
||||
input=''.join(l + '\0' for l in to_remove))
|
||||
|
||||
|
||||
def follow_history_intersections(revisions, refs):
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# Copyright OpenEmbedded Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: MIT
|
||||
#
|
||||
# This script is to be called by b4:
|
||||
# - through b4.send-auto-cc-cmd with "send-auto-cc-cmd" as first argument,
|
||||
#
|
||||
# When send-auto-cc-cmd is passed:
|
||||
#
|
||||
# This returns the list of Cc recipients for a patch.
|
||||
#
|
||||
# This script takes as stdin a patch.
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
cmd = sys.argv[1]
|
||||
if cmd != "send-auto-cc-cmd":
|
||||
sys.exit(-1)
|
||||
|
||||
patch = sys.stdin.read()
|
||||
|
||||
if subprocess.call(["which", "lsdiff"], stdout=subprocess.DEVNULL) != 0:
|
||||
print("lsdiff missing from host, please install patchutils")
|
||||
sys.exit(-1)
|
||||
|
||||
files = subprocess.check_output(["lsdiff", "--strip-match=1", "--strip=1", "--include=doc/*"],
|
||||
input=patch, text=True)
|
||||
if len(files):
|
||||
print("docs@lists.yoctoproject.org")
|
||||
else:
|
||||
# Handle patches made with --no-prefix
|
||||
files = subprocess.check_output(["lsdiff", "--include=doc/*"],
|
||||
input=patch, text=True)
|
||||
if len(files):
|
||||
print("docs@lists.yoctoproject.org")
|
||||
|
||||
sys.exit(0)
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
FROM alpine:3.13.1
|
||||
|
||||
RUN apk add --no-cache python3 libgcc
|
||||
RUN apk add --no-cache python3
|
||||
|
||||
COPY bin/bitbake-hashserv /opt/bbhashserv/bin/
|
||||
COPY lib/hashserv /opt/bbhashserv/lib/hashserv/
|
||||
|
||||
@@ -11,18 +11,10 @@ if &compatible || version < 600 || exists("b:loaded_bitbake_plugin")
|
||||
endif
|
||||
|
||||
" .bb, .bbappend and .bbclass
|
||||
au BufNewFile,BufRead *.{bb,bbappend,bbclass} setfiletype bitbake
|
||||
au BufNewFile,BufRead *.{bb,bbappend,bbclass} set filetype=bitbake
|
||||
|
||||
" .inc -- meanwhile included upstream
|
||||
if !has("patch-9.0.0055")
|
||||
au BufNewFile,BufRead *.inc call s:BBIncDetect()
|
||||
def s:BBIncDetect()
|
||||
l:lines = getline(1) .. getline(2) .. getline(3)
|
||||
if l:lines =~# '\<\%(require\|inherit\)\>' || lines =~# '[A-Z][A-Za-z0-9_:${}]*\s\+\%(??\|[?:+]\)\?= '
|
||||
set filetype bitbake
|
||||
endif
|
||||
enddef
|
||||
endif
|
||||
" .inc
|
||||
au BufNewFile,BufRead *.inc set filetype=bitbake
|
||||
|
||||
" .conf
|
||||
au BufNewFile,BufRead *.conf
|
||||
|
||||
@@ -349,84 +349,40 @@ Usage and syntax
|
||||
Following is the usage and syntax for BitBake::
|
||||
|
||||
$ bitbake -h
|
||||
usage: bitbake [-s] [-e] [-g] [-u UI] [--version] [-h] [-f] [-c CMD]
|
||||
[-C INVALIDATE_STAMP] [--runall RUNALL] [--runonly RUNONLY]
|
||||
[--no-setscene] [--skip-setscene] [--setscene-only] [-n] [-p]
|
||||
[-k] [-P] [-S SIGNATURE_HANDLER] [--revisions-changed]
|
||||
[-b BUILDFILE] [-D] [-l DEBUG_DOMAINS] [-v] [-q]
|
||||
[-w WRITEEVENTLOG] [-B BIND] [-T SERVER_TIMEOUT]
|
||||
[--remote-server REMOTE_SERVER] [-m] [--token XMLRPCTOKEN]
|
||||
[--observe-only] [--status-only] [--server-only] [-r PREFILE]
|
||||
[-R POSTFILE] [-I EXTRA_ASSUME_PROVIDED]
|
||||
[recipename/target ...]
|
||||
Usage: bitbake [options] [recipename/target recipe:do_task ...]
|
||||
|
||||
It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH
|
||||
which will provide the layer, BBFILES and other configuration information.
|
||||
Executes the specified task (default is 'build') for a given set of target recipes (.bb files).
|
||||
It is assumed there is a conf/bblayers.conf available in cwd or in BBPATH which
|
||||
will provide the layer, BBFILES and other configuration information.
|
||||
|
||||
General options:
|
||||
recipename/target Execute the specified task (default is 'build') for
|
||||
these target recipes (.bb files).
|
||||
-s, --show-versions Show current and preferred versions of all recipes.
|
||||
-e, --environment Show the global or per-recipe environment complete
|
||||
with information about where variables were
|
||||
set/changed.
|
||||
-g, --graphviz Save dependency tree information for the specified
|
||||
targets in the dot syntax.
|
||||
-u UI, --ui UI The user interface to use (knotty, ncurses, taskexp,
|
||||
taskexp_ncurses or teamcity - default knotty).
|
||||
--version Show programs version and exit.
|
||||
-h, --help Show this help message and exit.
|
||||
|
||||
Task control options:
|
||||
-f, --force Force the specified targets/task to run (invalidating
|
||||
any existing stamp file).
|
||||
-c CMD, --cmd CMD Specify the task to execute. The exact options
|
||||
available depend on the metadata. Some examples might
|
||||
be 'compile' or 'populate_sysroot' or 'listtasks' may
|
||||
give a list of the tasks available.
|
||||
-C INVALIDATE_STAMP, --clear-stamp INVALIDATE_STAMP
|
||||
Invalidate the stamp for the specified task such as
|
||||
'compile' and then run the default task for the
|
||||
specified target(s).
|
||||
--runall RUNALL Run the specified task for any recipe in the taskgraph
|
||||
of the specified target (even if it wouldn't otherwise
|
||||
have run).
|
||||
--runonly RUNONLY Run only the specified task within the taskgraph of
|
||||
the specified targets (and any task dependencies those
|
||||
tasks may have).
|
||||
--no-setscene Do not run any setscene tasks. sstate will be ignored
|
||||
and everything needed, built.
|
||||
--skip-setscene Skip setscene tasks if they would be executed. Tasks
|
||||
previously restored from sstate will be kept, unlike
|
||||
--no-setscene.
|
||||
--setscene-only Only run setscene tasks, don't run any real tasks.
|
||||
|
||||
Execution control options:
|
||||
-n, --dry-run Don't execute, just go through the motions.
|
||||
-p, --parse-only Quit after parsing the BB recipes.
|
||||
Options:
|
||||
--version show program's version number and exit
|
||||
-h, --help show this help message and exit
|
||||
-b BUILDFILE, --buildfile=BUILDFILE
|
||||
Execute tasks from a specific .bb recipe directly.
|
||||
WARNING: Does not handle any dependencies from other
|
||||
recipes.
|
||||
-k, --continue Continue as much as possible after an error. While the
|
||||
target that failed and anything depending on it cannot
|
||||
be built, as much as possible will be built before
|
||||
stopping.
|
||||
-P, --profile Profile the command and save reports.
|
||||
-S SIGNATURE_HANDLER, --dump-signatures SIGNATURE_HANDLER
|
||||
Dump out the signature construction information, with
|
||||
no task execution. The SIGNATURE_HANDLER parameter is
|
||||
passed to the handler. Two common values are none and
|
||||
printdiff but the handler may define more/less. none
|
||||
means only dump the signature, printdiff means
|
||||
recursively compare the dumped signature with the most
|
||||
recent one in a local build or sstate cache (can be
|
||||
used to find out why tasks re-run when that is not
|
||||
expected)
|
||||
--revisions-changed Set the exit code depending on whether upstream
|
||||
floating revisions have changed or not.
|
||||
-b BUILDFILE, --buildfile BUILDFILE
|
||||
Execute tasks from a specific .bb recipe directly.
|
||||
WARNING: Does not handle any dependencies from other
|
||||
recipes.
|
||||
|
||||
Logging/output control options:
|
||||
-f, --force Force the specified targets/task to run (invalidating
|
||||
any existing stamp file).
|
||||
-c CMD, --cmd=CMD Specify the task to execute. The exact options
|
||||
available depend on the metadata. Some examples might
|
||||
be 'compile' or 'populate_sysroot' or 'listtasks' may
|
||||
give a list of the tasks available.
|
||||
-C INVALIDATE_STAMP, --clear-stamp=INVALIDATE_STAMP
|
||||
Invalidate the stamp for the specified task such as
|
||||
'compile' and then run the default task for the
|
||||
specified target(s).
|
||||
-r PREFILE, --read=PREFILE
|
||||
Read the specified file before bitbake.conf.
|
||||
-R POSTFILE, --postread=POSTFILE
|
||||
Read the specified file after bitbake.conf.
|
||||
-v, --verbose Enable tracing of shell tasks (with 'set -x'). Also
|
||||
print bb.note(...) messages to stdout (in addition to
|
||||
writing them to ${T}/log.do_<task>).
|
||||
-D, --debug Increase the debug level. You can specify this more
|
||||
than once. -D sets the debug level to 1, where only
|
||||
bb.debug(1, ...) messages are printed to stdout; -DD
|
||||
@@ -436,47 +392,65 @@ Following is the usage and syntax for BitBake::
|
||||
-D only affects output to stdout. All debug messages
|
||||
are written to ${T}/log.do_taskname, regardless of the
|
||||
debug level.
|
||||
-l DEBUG_DOMAINS, --log-domains DEBUG_DOMAINS
|
||||
Show debug logging for the specified logging domains.
|
||||
-v, --verbose Enable tracing of shell tasks (with 'set -x'). Also
|
||||
print bb.note(...) messages to stdout (in addition to
|
||||
writing them to ${T}/log.do_<task>).
|
||||
-q, --quiet Output less log message data to the terminal. You can
|
||||
specify this more than once.
|
||||
-w WRITEEVENTLOG, --write-log WRITEEVENTLOG
|
||||
Writes the event log of the build to a bitbake event
|
||||
json file. Use '' (empty string) to assign the name
|
||||
automatically.
|
||||
|
||||
Server options:
|
||||
-B BIND, --bind BIND The name/address for the bitbake xmlrpc server to bind
|
||||
-n, --dry-run Don't execute, just go through the motions.
|
||||
-S SIGNATURE_HANDLER, --dump-signatures=SIGNATURE_HANDLER
|
||||
Dump out the signature construction information, with
|
||||
no task execution. The SIGNATURE_HANDLER parameter is
|
||||
passed to the handler. Two common values are none and
|
||||
printdiff but the handler may define more/less. none
|
||||
means only dump the signature, printdiff means compare
|
||||
the dumped signature with the cached one.
|
||||
-p, --parse-only Quit after parsing the BB recipes.
|
||||
-s, --show-versions Show current and preferred versions of all recipes.
|
||||
-e, --environment Show the global or per-recipe environment complete
|
||||
with information about where variables were
|
||||
set/changed.
|
||||
-g, --graphviz Save dependency tree information for the specified
|
||||
targets in the dot syntax.
|
||||
-I EXTRA_ASSUME_PROVIDED, --ignore-deps=EXTRA_ASSUME_PROVIDED
|
||||
Assume these dependencies don't exist and are already
|
||||
provided (equivalent to ASSUME_PROVIDED). Useful to
|
||||
make dependency graphs more appealing
|
||||
-l DEBUG_DOMAINS, --log-domains=DEBUG_DOMAINS
|
||||
Show debug logging for the specified logging domains
|
||||
-P, --profile Profile the command and save reports.
|
||||
-u UI, --ui=UI The user interface to use (knotty, ncurses, taskexp or
|
||||
teamcity - default knotty).
|
||||
--token=XMLRPCTOKEN Specify the connection token to be used when
|
||||
connecting to a remote server.
|
||||
--revisions-changed Set the exit code depending on whether upstream
|
||||
floating revisions have changed or not.
|
||||
--server-only Run bitbake without a UI, only starting a server
|
||||
(cooker) process.
|
||||
-B BIND, --bind=BIND The name/address for the bitbake xmlrpc server to bind
|
||||
to.
|
||||
-T SERVER_TIMEOUT, --idle-timeout SERVER_TIMEOUT
|
||||
-T SERVER_TIMEOUT, --idle-timeout=SERVER_TIMEOUT
|
||||
Set timeout to unload bitbake server due to
|
||||
inactivity, set to -1 means no unload, default:
|
||||
Environment variable BB_SERVER_TIMEOUT.
|
||||
--remote-server REMOTE_SERVER
|
||||
--no-setscene Do not run any setscene tasks. sstate will be ignored
|
||||
and everything needed, built.
|
||||
--skip-setscene Skip setscene tasks if they would be executed. Tasks
|
||||
previously restored from sstate will be kept, unlike
|
||||
--no-setscene
|
||||
--setscene-only Only run setscene tasks, don't run any real tasks.
|
||||
--remote-server=REMOTE_SERVER
|
||||
Connect to the specified server.
|
||||
-m, --kill-server Terminate any running bitbake server.
|
||||
--token XMLRPCTOKEN Specify the connection token to be used when
|
||||
connecting to a remote server.
|
||||
--observe-only Connect to a server as an observing-only client.
|
||||
--status-only Check the status of the remote bitbake server.
|
||||
--server-only Run bitbake without a UI, only starting a server
|
||||
(cooker) process.
|
||||
|
||||
Configuration options:
|
||||
-r PREFILE, --read PREFILE
|
||||
Read the specified file before bitbake.conf.
|
||||
-R POSTFILE, --postread POSTFILE
|
||||
Read the specified file after bitbake.conf.
|
||||
-I EXTRA_ASSUME_PROVIDED, --ignore-deps EXTRA_ASSUME_PROVIDED
|
||||
Assume these dependencies don't exist and are already
|
||||
provided (equivalent to ASSUME_PROVIDED). Useful to
|
||||
make dependency graphs more appealing.
|
||||
|
||||
..
|
||||
Bitbake help output generated with "stty columns 80; bin/bitbake -h"
|
||||
-w WRITEEVENTLOG, --write-log=WRITEEVENTLOG
|
||||
Writes the event log of the build to a bitbake event
|
||||
json file. Use '' (empty string) to assign the name
|
||||
automatically.
|
||||
--runall=RUNALL Run the specified task for any recipe in the taskgraph
|
||||
of the specified target (even if it wouldn't otherwise
|
||||
have run).
|
||||
--runonly=RUNONLY Run only the specified task within the taskgraph of
|
||||
the specified targets (and any task dependencies those
|
||||
tasks may have).
|
||||
|
||||
.. _bitbake-examples:
|
||||
|
||||
|
||||
@@ -754,9 +754,7 @@ share the task.
|
||||
This section presents the mechanisms BitBake provides to allow you to
|
||||
share functionality between recipes. Specifically, the mechanisms
|
||||
include ``include``, ``inherit``, :term:`INHERIT`, and ``require``
|
||||
directives. There is also a higher-level abstraction called
|
||||
``configuration fragments`` that is enabled with ``addfragments``
|
||||
directive.
|
||||
directives.
|
||||
|
||||
Locating Include and Class Files
|
||||
--------------------------------
|
||||
@@ -773,8 +771,6 @@ In order for include and class files to be found by BitBake, they need
|
||||
to be located in a "classes" subdirectory that can be found in
|
||||
:term:`BBPATH`.
|
||||
|
||||
.. _ref-bitbake-user-manual-metadata-inherit:
|
||||
|
||||
``inherit`` Directive
|
||||
---------------------
|
||||
|
||||
@@ -813,43 +809,19 @@ An advantage with the inherit directive as compared to both the
|
||||
:ref:`include <bitbake-user-manual/bitbake-user-manual-metadata:\`\`include\`\` directive>` and :ref:`require <bitbake-user-manual/bitbake-user-manual-metadata:\`\`require\`\` directive>`
|
||||
directives is that you can inherit class files conditionally. You can
|
||||
accomplish this by using a variable expression after the ``inherit``
|
||||
statement.
|
||||
statement. Here is an example::
|
||||
|
||||
For inheriting classes conditionally, using the :ref:`inherit_defer
|
||||
<ref-bitbake-user-manual-metadata-inherit-defer>` directive is advised as
|
||||
:ref:`inherit_defer <ref-bitbake-user-manual-metadata-inherit-defer>` is
|
||||
evaluated at the end of parsing.
|
||||
|
||||
.. _ref-bitbake-user-manual-metadata-inherit-defer:
|
||||
|
||||
``inherit_defer`` Directive
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The :ref:`inherit_defer <ref-bitbake-user-manual-metadata-inherit-defer>`
|
||||
directive works like the :ref:`inherit
|
||||
<ref-bitbake-user-manual-metadata-inherit>` directive, except that it is only
|
||||
evaluated at the end of parsing. Its usage is recommended when a conditional
|
||||
expression is used.
|
||||
|
||||
This allows conditional expressions to be evaluated "late", meaning changes to
|
||||
the variable after the line is parsed will take effect. With the :ref:`inherit
|
||||
<ref-bitbake-user-manual-metadata-inherit>` directive this is not the case.
|
||||
|
||||
Here is an example::
|
||||
|
||||
inherit_defer ${VARNAME}
|
||||
inherit ${VARNAME}
|
||||
|
||||
If ``VARNAME`` is
|
||||
going to be set, it needs to be set before the ``inherit_defer`` statement is
|
||||
going to be set, it needs to be set before the ``inherit`` statement is
|
||||
parsed. One way to achieve a conditional inherit in this case is to use
|
||||
overrides::
|
||||
|
||||
VARIABLE = ""
|
||||
VARIABLE:someoverride = "myclass"
|
||||
|
||||
Another method is by using :ref:`anonymous Python
|
||||
<bitbake-user-manual/bitbake-user-manual-metadata:Anonymous Python Functions>`.
|
||||
Here is an example::
|
||||
Another method is by using anonymous Python. Here is an example::
|
||||
|
||||
python () {
|
||||
if condition == value:
|
||||
@@ -858,14 +830,11 @@ Here is an example::
|
||||
d.setVar('VARIABLE', '')
|
||||
}
|
||||
|
||||
Alternatively, you could use an inline Python expression in the
|
||||
Alternatively, you could use an in-line Python expression in the
|
||||
following form::
|
||||
|
||||
inherit_defer ${@'classname' if condition else ''}
|
||||
|
||||
Or::
|
||||
|
||||
inherit_defer ${@bb.utils.contains('VARIABLE', 'something', 'classname', '', d)}
|
||||
inherit ${@'classname' if condition else ''}
|
||||
inherit ${@functionname(params)}
|
||||
|
||||
In all cases, if the expression evaluates to an
|
||||
empty string, the statement does not trigger a syntax error because it
|
||||
@@ -900,33 +869,6 @@ definitions::
|
||||
of include . Doing so makes sure that an error is produced if the file cannot
|
||||
be found.
|
||||
|
||||
``include_all`` Directive
|
||||
-------------------------
|
||||
|
||||
The ``include_all`` directive works like the :ref:`include
|
||||
<bitbake-user-manual/bitbake-user-manual-metadata:\`\`include\`\` directive>`
|
||||
directive but will include all of the files that match the specified path in
|
||||
the enabled layers (layers part of :term:`BBLAYERS`).
|
||||
|
||||
For example, let's say a ``maintainers.inc`` file is present in different layers
|
||||
and is conventionally placed in the ``conf/distro/include`` directory of each
|
||||
layer. In that case the ``include_all`` directive can be used to include
|
||||
the ``maintainers.inc`` file for all of these layers::
|
||||
|
||||
include_all conf/distro/include/maintainers.inc
|
||||
|
||||
In other words, the ``maintainers.inc`` file for each layer is included through
|
||||
the :ref:`include <bitbake-user-manual/bitbake-user-manual-metadata:\`\`include\`\` directive>`
|
||||
directive.
|
||||
|
||||
BitBake will iterate through the colon-separated :term:`BBPATH` list to look for
|
||||
matching files to include, from left to right. As a consequence, matching files
|
||||
are included in that order.
|
||||
|
||||
As the ``include_all`` directive uses the :ref:`include
|
||||
<bitbake-user-manual/bitbake-user-manual-metadata:\`\`include\`\` directive>`
|
||||
directive in the background, no error is produced if no files are matched.
|
||||
|
||||
.. _require-inclusion:
|
||||
|
||||
``require`` Directive
|
||||
@@ -991,50 +933,6 @@ the ``autotools`` and ``pkgconfig`` classes::
|
||||
|
||||
INHERIT += "autotools pkgconfig"
|
||||
|
||||
``addfragments`` Directive
|
||||
--------------------------
|
||||
|
||||
This directive allows fine-tuning local configurations with configuration
|
||||
snippets contained in layers in a structured, controlled way. Typically it would
|
||||
go into ``bitbake.conf``, for example::
|
||||
|
||||
addfragments conf/fragments OE_FRAGMENTS OE_FRAGMENTS_METADATA_VARS
|
||||
|
||||
``addfragments`` takes three parameters:
|
||||
|
||||
- path prefix for fragment files inside the layer file tree that bitbake
|
||||
uses to construct full paths to the fragment files
|
||||
|
||||
- name of variable that holds the list of enabled fragments in an
|
||||
active build
|
||||
|
||||
- name of variable that contains a list of variable names containing
|
||||
fragment-specific metadata (such as descriptions)
|
||||
|
||||
This allows listing enabled configuration fragments in ``OE_FRAGMENTS``
|
||||
variable like this::
|
||||
|
||||
OE_FRAGMENTS = "core/domain/somefragment core/someotherfragment anotherlayer/anotherdomain/anotherfragment"
|
||||
|
||||
Fragment names listed in this variable must be prefixed by the layer name
|
||||
where a fragment file is located, defined by :term:`BBFILE_COLLECTIONS` in ``layer.conf``.
|
||||
|
||||
The implementation then expands this list into
|
||||
:ref:`require <bitbake-user-manual/bitbake-user-manual-metadata:\`\`require\`\` directive>`
|
||||
directives with full paths to respective layers::
|
||||
|
||||
require /path/to/core-layer/conf/fragments/domain/somefragment.conf
|
||||
require /path/to/core-layer/conf/fragments/someotherfragment.conf
|
||||
require /path/to/another-layer/conf/fragments/anotherdomain/anotherfragment.conf
|
||||
|
||||
The variable containing a list of fragment metadata variables could look like this::
|
||||
|
||||
OE_FRAGMENTS_METADATA_VARS = "BB_CONF_FRAGMENT_SUMMARY BB_CONF_FRAGMENT_DESCRIPTION"
|
||||
|
||||
The implementation will add a flag containing the fragment name to each of those variables
|
||||
when parsing fragments, so that the variables are namespaced by fragment name, and do not override
|
||||
each other when several fragments are enabled.
|
||||
|
||||
Functions
|
||||
=========
|
||||
|
||||
|
||||
@@ -127,10 +127,17 @@ overview of their function and contents.
|
||||
Contains the name of the currently running task. The name does not
|
||||
include the ``do_`` prefix.
|
||||
|
||||
:term:`BB_CURRENT_MC`
|
||||
Contains the name of the current multiconfig a task is being run under.
|
||||
The name is taken from the multiconfig configuration file (a file
|
||||
``mc1.conf`` would make this variable equal to ``mc1``).
|
||||
:term:`BB_DANGLINGAPPENDS_WARNONLY`
|
||||
Defines how BitBake handles situations where an append file
|
||||
(``.bbappend``) has no corresponding recipe file (``.bb``). This
|
||||
condition often occurs when layers get out of sync (e.g. ``oe-core``
|
||||
bumps a recipe version and the old recipe no longer exists and the
|
||||
other layer has not been updated to the new version of the recipe
|
||||
yet).
|
||||
|
||||
The default fatal behavior is safest because it is the sane reaction
|
||||
given something is out of sync. It is important to realize when your
|
||||
changes are no longer being applied.
|
||||
|
||||
:term:`BB_DEFAULT_TASK`
|
||||
The default task to use when none is specified (e.g. with the ``-c``
|
||||
@@ -320,26 +327,11 @@ overview of their function and contents.
|
||||
mirror tarball. If the shallow mirror tarball cannot be fetched, it will
|
||||
try to fetch the full mirror tarball and use that.
|
||||
|
||||
This setting causes an initial shallow clone instead of an initial full bare clone.
|
||||
The amount of data transferred during the initial clone will be significantly reduced.
|
||||
|
||||
However, every time the source revision (referenced in :term:`SRCREV`)
|
||||
changes, regardless of whether the cache within the download directory
|
||||
(defined by :term:`DL_DIR`) has been cleaned up or not,
|
||||
the data transfer may be significantly higher because entirely
|
||||
new shallow clones are required for each source revision change.
|
||||
|
||||
Over time, numerous shallow clones may cumulatively transfer
|
||||
the same amount of data as an initial full bare clone.
|
||||
This is especially the case with very large repositories.
|
||||
|
||||
Existing initial full bare clones, created without this setting,
|
||||
will still be utilized.
|
||||
|
||||
If the Git error "Server does not allow request for unadvertised object"
|
||||
occurs, an initial full bare clone is fetched automatically.
|
||||
This may happen if the Git server does not allow the request
|
||||
or if the Git client has issues with this functionality.
|
||||
When a mirror tarball is not available, a full git clone will be performed
|
||||
regardless of whether this variable is set or not. Support for shallow
|
||||
clones is not currently implemented as git does not directly support
|
||||
shallow cloning a particular git commit hash (it only supports cloning
|
||||
from a tag or branch reference).
|
||||
|
||||
See also :term:`BB_GIT_SHALLOW_DEPTH` and
|
||||
:term:`BB_GENERATE_SHALLOW_TARBALLS`.
|
||||
@@ -432,7 +424,7 @@ overview of their function and contents.
|
||||
|
||||
Example usage::
|
||||
|
||||
BB_HASHSERVE_UPSTREAM = "hashserv.yoctoproject.org:8686"
|
||||
BB_HASHSERVE_UPSTREAM = "hashserv.yocto.io:8687"
|
||||
|
||||
:term:`BB_INVALIDCONF`
|
||||
Used in combination with the ``ConfigParsed`` event to trigger
|
||||
@@ -707,12 +699,6 @@ overview of their function and contents.
|
||||
Within an executing task, this variable holds the hash of the task as
|
||||
returned by the currently enabled signature generator.
|
||||
|
||||
:term:`BB_USE_HOME_NPMRC`
|
||||
Controls whether or not BitBake uses the user's .npmrc file within their
|
||||
home directory within the npm fetcher. This can be used for authentication
|
||||
of private NPM registries, among other uses. This is turned off by default
|
||||
and requires the user to explicitly set it to "1" to enable.
|
||||
|
||||
:term:`BB_VERBOSE_LOGS`
|
||||
Controls how verbose BitBake is during builds. If set, shell scripts
|
||||
echo commands and shell script output appears on standard out
|
||||
@@ -780,10 +766,6 @@ overview of their function and contents.
|
||||
:term:`BBFILE_PRIORITY`
|
||||
Assigns the priority for recipe files in each layer.
|
||||
|
||||
This variable is used in the ``conf/layer.conf`` file and must be
|
||||
suffixed with a `_` followed by the name of the specific layer (e.g.
|
||||
``BBFILE_PRIORITY_emenlow``). Colon as separator is not supported.
|
||||
|
||||
This variable is useful in situations where the same recipe appears
|
||||
in more than one layer. Setting this variable allows you to
|
||||
prioritize a layer against other layers that contain the same recipe
|
||||
@@ -798,7 +780,7 @@ overview of their function and contents.
|
||||
higher precedence. For example, the value 6 has a higher precedence
|
||||
than the value 5. If not specified, the :term:`BBFILE_PRIORITY` variable
|
||||
is set based on layer dependencies (see the :term:`LAYERDEPENDS` variable
|
||||
for more information). The default priority, if unspecified for a
|
||||
for more information. The default priority, if unspecified for a
|
||||
layer with no dependencies, is the lowest defined priority + 1 (or 1
|
||||
if no priorities are defined).
|
||||
|
||||
|
||||
@@ -4,17 +4,11 @@
|
||||
BitBake Supported Release Manuals
|
||||
=================================
|
||||
|
||||
****************************
|
||||
Release Series 5.1 (styhead)
|
||||
****************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.10 User Manual </bitbake/2.10/>`
|
||||
|
||||
*******************************
|
||||
Release Series 5.0 (scarthgap)
|
||||
Release Series 4.2 (mickledore)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.8 User Manual </bitbake/2.8/>`
|
||||
- :yocto_docs:`BitBake 2.4 User Manual </bitbake/2.4/>`
|
||||
|
||||
******************************
|
||||
Release Series 4.0 (kirkstone)
|
||||
@@ -22,22 +16,16 @@ Release Series 4.0 (kirkstone)
|
||||
|
||||
- :yocto_docs:`BitBake 2.0 User Manual </bitbake/2.0/>`
|
||||
|
||||
****************************
|
||||
Release Series 3.1 (dunfell)
|
||||
****************************
|
||||
|
||||
- :yocto_docs:`BitBake 1.46 User Manual </bitbake/1.46/>`
|
||||
|
||||
================================
|
||||
BitBake Outdated Release Manuals
|
||||
================================
|
||||
|
||||
*******************************
|
||||
Release Series 4.3 (nanbield)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.6 User Manual </bitbake/2.6/>`
|
||||
|
||||
*******************************
|
||||
Release Series 4.2 (mickledore)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.4 User Manual </bitbake/2.4/>`
|
||||
|
||||
*****************************
|
||||
Release Series 4.1 (langdale)
|
||||
*****************************
|
||||
@@ -62,11 +50,10 @@ Release Series 3.2 (gatesgarth)
|
||||
|
||||
- :yocto_docs:`BitBake 1.48 User Manual </bitbake/1.48/>`
|
||||
|
||||
****************************
|
||||
Release Series 3.1 (dunfell)
|
||||
****************************
|
||||
*******************************************
|
||||
Release Series 3.1 (dunfell first versions)
|
||||
*******************************************
|
||||
|
||||
- :yocto_docs:`BitBake 1.46 User Manual </bitbake/1.46/>`
|
||||
- :yocto_docs:`3.1 BitBake User Manual </3.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.1 BitBake User Manual </3.1.1/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
- :yocto_docs:`3.1.2 BitBake User Manual </3.1.2/bitbake-user-manual/bitbake-user-manual.html>`
|
||||
|
||||
@@ -36,9 +36,8 @@ class COWDictMeta(COWMeta):
|
||||
__marker__ = tuple()
|
||||
|
||||
def __str__(cls):
|
||||
ignored_keys = set(["__count__", "__doc__", "__module__", "__firstlineno__", "__static_attributes__"])
|
||||
keys = set(cls.__dict__.keys()) - ignored_keys
|
||||
return "<COWDict Level: %i Current Keys: %i>" % (cls.__count__, len(keys))
|
||||
# FIXME: I have magic numbers!
|
||||
return "<COWDict Level: %i Current Keys: %i>" % (cls.__count__, len(cls.__dict__) - 3)
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
@@ -162,9 +161,8 @@ class COWDictMeta(COWMeta):
|
||||
|
||||
class COWSetMeta(COWDictMeta):
|
||||
def __str__(cls):
|
||||
ignored_keys = set(["__count__", "__doc__", "__module__", "__firstlineno__", "__static_attributes__"])
|
||||
keys = set(cls.__dict__.keys()) - ignored_keys
|
||||
return "<COWSet Level: %i Current Keys: %i>" % (cls.__count__, len(keys))
|
||||
# FIXME: I have magic numbers!
|
||||
return "<COWSet Level: %i Current Keys: %i>" % (cls.__count__, len(cls.__dict__) - 3)
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
|
||||
@@ -9,11 +9,11 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
__version__ = "2.12.0"
|
||||
__version__ = "2.8.0"
|
||||
|
||||
import sys
|
||||
if sys.version_info < (3, 9, 0):
|
||||
raise RuntimeError("Sorry, python 3.9.0 or later is required for this version of bitbake")
|
||||
if sys.version_info < (3, 8, 0):
|
||||
raise RuntimeError("Sorry, python 3.8.0 or later is required for this version of bitbake")
|
||||
|
||||
if sys.version_info < (3, 10, 0):
|
||||
# With python 3.8 and 3.9, we see errors of "libgcc_s.so.1 must be installed for pthread_cancel to work"
|
||||
@@ -104,6 +104,26 @@ class BBLoggerAdapter(logging.LoggerAdapter, BBLoggerMixin):
|
||||
self.setup_bblogger(logger.name)
|
||||
super().__init__(logger, *args, **kwargs)
|
||||
|
||||
if sys.version_info < (3, 6):
|
||||
# These properties were added in Python 3.6. Add them in older versions
|
||||
# for compatibility
|
||||
@property
|
||||
def manager(self):
|
||||
return self.logger.manager
|
||||
|
||||
@manager.setter
|
||||
def manager(self, value):
|
||||
self.logger.manager = value
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.logger.name
|
||||
|
||||
def __repr__(self):
|
||||
logger = self.logger
|
||||
level = logger.getLevelName(logger.getEffectiveLevel())
|
||||
return '<%s %s (%s)>' % (self.__class__.__name__, logger.name, level)
|
||||
|
||||
logging.LoggerAdapter = BBLoggerAdapter
|
||||
|
||||
logger = logging.getLogger("BitBake")
|
||||
@@ -194,6 +214,7 @@ def deprecated(func, name=None, advice=""):
|
||||
# For compatibility
|
||||
def deprecate_import(current, modulename, fromlist, renames = None):
|
||||
"""Import objects from one module into another, wrapping them with a DeprecationWarning"""
|
||||
import sys
|
||||
|
||||
module = __import__(modulename, fromlist = fromlist)
|
||||
for position, objname in enumerate(fromlist):
|
||||
|
||||
@@ -195,6 +195,8 @@ class ACL(object):
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
import pwd
|
||||
import grp
|
||||
from pathlib import Path
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#
|
||||
|
||||
|
||||
from .client import AsyncClient, Client
|
||||
from .client import AsyncClient, Client, ClientPool
|
||||
from .serv import AsyncServer, AsyncServerConnection
|
||||
from .connection import DEFAULT_MAX_CHUNK
|
||||
from .exceptions import (
|
||||
|
||||
@@ -24,12 +24,6 @@ ADDR_TYPE_UNIX = 0
|
||||
ADDR_TYPE_TCP = 1
|
||||
ADDR_TYPE_WS = 2
|
||||
|
||||
WEBSOCKETS_MIN_VERSION = (9, 1)
|
||||
# Need websockets 10 with python 3.10+
|
||||
if sys.version_info >= (3, 10, 0):
|
||||
WEBSOCKETS_MIN_VERSION = (10, 0)
|
||||
|
||||
|
||||
def parse_address(addr):
|
||||
if addr.startswith(UNIX_PREFIX):
|
||||
return (ADDR_TYPE_UNIX, (addr[len(UNIX_PREFIX) :],))
|
||||
@@ -45,7 +39,6 @@ def parse_address(addr):
|
||||
|
||||
return (ADDR_TYPE_TCP, (host, int(port)))
|
||||
|
||||
|
||||
class AsyncClient(object):
|
||||
def __init__(
|
||||
self,
|
||||
@@ -93,35 +86,8 @@ class AsyncClient(object):
|
||||
async def connect_websocket(self, uri):
|
||||
import websockets
|
||||
|
||||
try:
|
||||
version = tuple(
|
||||
int(v)
|
||||
for v in websockets.__version__.split(".")[
|
||||
0 : len(WEBSOCKETS_MIN_VERSION)
|
||||
]
|
||||
)
|
||||
except ValueError:
|
||||
raise ImportError(
|
||||
f"Unable to parse websockets version '{websockets.__version__}'"
|
||||
)
|
||||
|
||||
if version < WEBSOCKETS_MIN_VERSION:
|
||||
min_ver_str = ".".join(str(v) for v in WEBSOCKETS_MIN_VERSION)
|
||||
raise ImportError(
|
||||
f"Websockets version {websockets.__version__} is less than minimum required version {min_ver_str}"
|
||||
)
|
||||
|
||||
async def connect_sock():
|
||||
try:
|
||||
websocket = await websockets.connect(
|
||||
uri,
|
||||
ping_interval=None,
|
||||
open_timeout=self.timeout,
|
||||
)
|
||||
except asyncio.exceptions.TimeoutError:
|
||||
raise ConnectionError("Timeout while connecting to websocket")
|
||||
except (OSError, websockets.InvalidHandshake, websockets.InvalidURI) as exc:
|
||||
raise ConnectionError(f"Could not connect to websocket: {exc}") from exc
|
||||
websocket = await websockets.connect(uri, ping_interval=None)
|
||||
return WebsocketConnection(websocket, self.timeout)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
@@ -259,7 +225,8 @@ class Client(object):
|
||||
def close(self):
|
||||
if self.loop:
|
||||
self.loop.run_until_complete(self.client.close())
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
self.loop = None
|
||||
|
||||
@@ -269,3 +236,78 @@ class Client(object):
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
return False
|
||||
|
||||
|
||||
class ClientPool(object):
|
||||
def __init__(self, max_clients):
|
||||
self.avail_clients = []
|
||||
self.num_clients = 0
|
||||
self.max_clients = max_clients
|
||||
self.loop = None
|
||||
self.client_condition = None
|
||||
|
||||
@abc.abstractmethod
|
||||
async def _new_client(self):
|
||||
raise NotImplementedError("Must be implemented in derived class")
|
||||
|
||||
def close(self):
|
||||
if self.client_condition:
|
||||
self.client_condition = None
|
||||
|
||||
if self.loop:
|
||||
self.loop.run_until_complete(self.__close_clients())
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
self.loop = None
|
||||
|
||||
def run_tasks(self, tasks):
|
||||
if not self.loop:
|
||||
self.loop = asyncio.new_event_loop()
|
||||
|
||||
thread = Thread(target=self.__thread_main, args=(tasks,))
|
||||
thread.start()
|
||||
thread.join()
|
||||
|
||||
@contextlib.asynccontextmanager
|
||||
async def get_client(self):
|
||||
async with self.client_condition:
|
||||
if self.avail_clients:
|
||||
client = self.avail_clients.pop()
|
||||
elif self.num_clients < self.max_clients:
|
||||
self.num_clients += 1
|
||||
client = await self._new_client()
|
||||
else:
|
||||
while not self.avail_clients:
|
||||
await self.client_condition.wait()
|
||||
client = self.avail_clients.pop()
|
||||
|
||||
try:
|
||||
yield client
|
||||
finally:
|
||||
async with self.client_condition:
|
||||
self.avail_clients.append(client)
|
||||
self.client_condition.notify()
|
||||
|
||||
def __thread_main(self, tasks):
|
||||
async def process_task(task):
|
||||
async with self.get_client() as client:
|
||||
await task(client)
|
||||
|
||||
asyncio.set_event_loop(self.loop)
|
||||
if not self.client_condition:
|
||||
self.client_condition = asyncio.Condition()
|
||||
tasks = [process_task(t) for t in tasks]
|
||||
self.loop.run_until_complete(asyncio.gather(*tasks))
|
||||
|
||||
async def __close_clients(self):
|
||||
for c in self.avail_clients:
|
||||
await c.close()
|
||||
self.avail_clients = []
|
||||
self.num_clients = 0
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
return False
|
||||
|
||||
@@ -138,20 +138,14 @@ class StreamServer(object):
|
||||
|
||||
|
||||
class TCPStreamServer(StreamServer):
|
||||
def __init__(self, host, port, handler, logger, *, reuseport=False):
|
||||
def __init__(self, host, port, handler, logger):
|
||||
super().__init__(handler, logger)
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.reuseport = reuseport
|
||||
|
||||
def start(self, loop):
|
||||
self.server = loop.run_until_complete(
|
||||
asyncio.start_server(
|
||||
self.handle_stream_client,
|
||||
self.host,
|
||||
self.port,
|
||||
reuse_port=self.reuseport,
|
||||
)
|
||||
asyncio.start_server(self.handle_stream_client, self.host, self.port)
|
||||
)
|
||||
|
||||
for s in self.server.sockets:
|
||||
@@ -215,12 +209,11 @@ class UnixStreamServer(StreamServer):
|
||||
|
||||
|
||||
class WebsocketsServer(object):
|
||||
def __init__(self, host, port, handler, logger, *, reuseport=False):
|
||||
def __init__(self, host, port, handler, logger):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.handler = handler
|
||||
self.logger = logger
|
||||
self.reuseport = reuseport
|
||||
|
||||
def start(self, loop):
|
||||
import websockets.server
|
||||
@@ -231,7 +224,6 @@ class WebsocketsServer(object):
|
||||
self.host,
|
||||
self.port,
|
||||
ping_interval=None,
|
||||
reuse_port=self.reuseport,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -270,26 +262,14 @@ class AsyncServer(object):
|
||||
self.loop = None
|
||||
self.run_tasks = []
|
||||
|
||||
def start_tcp_server(self, host, port, *, reuseport=False):
|
||||
self.server = TCPStreamServer(
|
||||
host,
|
||||
port,
|
||||
self._client_handler,
|
||||
self.logger,
|
||||
reuseport=reuseport,
|
||||
)
|
||||
def start_tcp_server(self, host, port):
|
||||
self.server = TCPStreamServer(host, port, self._client_handler, self.logger)
|
||||
|
||||
def start_unix_server(self, path):
|
||||
self.server = UnixStreamServer(path, self._client_handler, self.logger)
|
||||
|
||||
def start_websocket_server(self, host, port, reuseport=False):
|
||||
self.server = WebsocketsServer(
|
||||
host,
|
||||
port,
|
||||
self._client_handler,
|
||||
self.logger,
|
||||
reuseport=reuseport,
|
||||
)
|
||||
def start_websocket_server(self, host, port):
|
||||
self.server = WebsocketsServer(host, port, self._client_handler, self.logger)
|
||||
|
||||
async def _client_handler(self, socket):
|
||||
address = socket.address
|
||||
@@ -388,7 +368,8 @@ class AsyncServer(object):
|
||||
|
||||
self._serve_forever(tasks)
|
||||
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
|
||||
queue = multiprocessing.Queue()
|
||||
|
||||
@@ -197,8 +197,6 @@ def exec_func(func, d, dirs = None):
|
||||
for cdir in d.expand(cleandirs).split():
|
||||
bb.utils.remove(cdir, True)
|
||||
bb.utils.mkdirhier(cdir)
|
||||
if cdir == oldcwd:
|
||||
os.chdir(cdir)
|
||||
|
||||
if flags and dirs is None:
|
||||
dirs = flags.get('dirs')
|
||||
@@ -397,7 +395,7 @@ def create_progress_handler(func, progress, logfile, d):
|
||||
# Use specified regex
|
||||
return bb.progress.OutOfProgressHandler(d, regex=progress.split(':', 1)[1], outfile=logfile)
|
||||
elif progress.startswith("custom:"):
|
||||
# Use a custom progress handler that was injected via other means
|
||||
# Use a custom progress handler that was injected via OE_EXTRA_IMPORTS or __builtins__
|
||||
import functools
|
||||
from types import ModuleType
|
||||
|
||||
@@ -743,7 +741,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
|
||||
if quieterr:
|
||||
if not handled:
|
||||
logger.warning(str(exc))
|
||||
logger.warning(repr(exc))
|
||||
event.fire(TaskFailedSilent(task, fn, logfn, localdata), localdata)
|
||||
else:
|
||||
errprinted = errchk.triggered
|
||||
@@ -752,7 +750,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
if verboseStdoutLogging or handled:
|
||||
errprinted = True
|
||||
if not handled:
|
||||
logger.error(str(exc))
|
||||
logger.error(repr(exc))
|
||||
event.fire(TaskFailed(task, fn, logfn, localdata, errprinted), localdata)
|
||||
return 1
|
||||
|
||||
@@ -932,13 +930,9 @@ def add_tasks(tasklist, d):
|
||||
# don't assume holding a reference
|
||||
d.setVar('_task_deps', task_deps)
|
||||
|
||||
def ensure_task_prefix(name):
|
||||
if name[:3] != "do_":
|
||||
name = "do_" + name
|
||||
return name
|
||||
|
||||
def addtask(task, before, after, d):
|
||||
task = ensure_task_prefix(task)
|
||||
if task[:3] != "do_":
|
||||
task = "do_" + task
|
||||
|
||||
d.setVarFlag(task, "task", 1)
|
||||
bbtasks = d.getVar('__BBTASKS', False) or []
|
||||
@@ -950,20 +944,19 @@ def addtask(task, before, after, d):
|
||||
if after is not None:
|
||||
# set up deps for function
|
||||
for entry in after.split():
|
||||
entry = ensure_task_prefix(entry)
|
||||
if entry not in existing:
|
||||
existing.append(entry)
|
||||
d.setVarFlag(task, "deps", existing)
|
||||
if before is not None:
|
||||
# set up things that depend on this func
|
||||
for entry in before.split():
|
||||
entry = ensure_task_prefix(entry)
|
||||
existing = d.getVarFlag(entry, "deps", False) or []
|
||||
if task not in existing:
|
||||
d.setVarFlag(entry, "deps", [task] + existing)
|
||||
|
||||
def deltask(task, d):
|
||||
task = ensure_task_prefix(task)
|
||||
if task[:3] != "do_":
|
||||
task = "do_" + task
|
||||
|
||||
bbtasks = d.getVar('__BBTASKS', False) or []
|
||||
if task in bbtasks:
|
||||
@@ -1028,9 +1021,3 @@ def tasksbetween(task_start, task_end, d):
|
||||
chain.pop()
|
||||
follow_chain(task_start, task_end)
|
||||
return outtasks
|
||||
|
||||
def listtasks(d):
|
||||
"""
|
||||
Return the list of tasks in the current recipe.
|
||||
"""
|
||||
return tuple(d.getVar('__BBTASKS', False) or ())
|
||||
|
||||
@@ -28,7 +28,7 @@ import shutil
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
|
||||
__cache_version__ = "156"
|
||||
__cache_version__ = "155"
|
||||
|
||||
def getCacheFile(path, filename, mc, data_hash):
|
||||
mcspec = ''
|
||||
@@ -395,7 +395,7 @@ class Cache(object):
|
||||
# It will be used later for deciding whether we
|
||||
# need extra cache file dump/load support
|
||||
self.mc = mc
|
||||
self.logger = PrefixLoggerAdapter("Cache: %s: " % (mc if mc else ''), logger)
|
||||
self.logger = PrefixLoggerAdapter("Cache: %s: " % (mc if mc else "default"), logger)
|
||||
self.caches_array = caches_array
|
||||
self.cachedir = self.data.getVar("CACHE")
|
||||
self.clean = set()
|
||||
@@ -441,7 +441,7 @@ class Cache(object):
|
||||
else:
|
||||
symlink = os.path.join(self.cachedir, "bb_cache.dat")
|
||||
|
||||
if os.path.exists(symlink) or os.path.islink(symlink):
|
||||
if os.path.exists(symlink):
|
||||
bb.utils.remove(symlink)
|
||||
try:
|
||||
os.symlink(os.path.basename(self.cachefile), symlink)
|
||||
@@ -779,6 +779,25 @@ class MulticonfigCache(Mapping):
|
||||
for k in self.__caches:
|
||||
yield k
|
||||
|
||||
def init(cooker):
|
||||
"""
|
||||
The Objective: Cache the minimum amount of data possible yet get to the
|
||||
stage of building packages (i.e. tryBuild) without reparsing any .bb files.
|
||||
|
||||
To do this, we intercept getVar calls and only cache the variables we see
|
||||
being accessed. We rely on the cache getVar calls being made for all
|
||||
variables bitbake might need to use to reach this stage. For each cached
|
||||
file we need to track:
|
||||
|
||||
* Its mtime
|
||||
* The mtimes of all its dependencies
|
||||
* Whether it caused a parse.SkipRecipe exception
|
||||
|
||||
Files causing parsing errors are evicted from the cache.
|
||||
|
||||
"""
|
||||
return Cache(cooker.configuration.data, cooker.configuration.data_hash)
|
||||
|
||||
|
||||
class CacheData(object):
|
||||
"""
|
||||
@@ -847,16 +866,6 @@ class MultiProcessCache(object):
|
||||
data = [{}]
|
||||
return data
|
||||
|
||||
def clear_cache(self):
|
||||
if not self.cachefile:
|
||||
bb.fatal("Can't clear invalid cachefile")
|
||||
|
||||
self.cachedata = self.create_cachedata()
|
||||
self.cachedata_extras = self.create_cachedata()
|
||||
with bb.utils.fileslocked([self.cachefile + ".lock"]):
|
||||
bb.utils.remove(self.cachefile)
|
||||
bb.utils.remove(self.cachefile + "-*")
|
||||
|
||||
def save_extras(self):
|
||||
if not self.cachefile:
|
||||
return
|
||||
|
||||
@@ -142,28 +142,3 @@ class FileChecksumCache(MultiProcessCache):
|
||||
|
||||
checksums.sort(key=operator.itemgetter(1))
|
||||
return checksums
|
||||
|
||||
class RevisionsCache(MultiProcessCache):
|
||||
cache_file_name = "local_srcrevisions.dat"
|
||||
CACHE_VERSION = 1
|
||||
|
||||
def __init__(self):
|
||||
MultiProcessCache.__init__(self)
|
||||
|
||||
def get_revs(self):
|
||||
return self.cachedata[0]
|
||||
|
||||
def get_rev(self, k):
|
||||
if k in self.cachedata_extras[0]:
|
||||
return self.cachedata_extras[0][k]
|
||||
if k in self.cachedata[0]:
|
||||
return self.cachedata[0][k]
|
||||
return None
|
||||
|
||||
def set_rev(self, k, v):
|
||||
self.cachedata[0][k] = v
|
||||
self.cachedata_extras[0][k] = v
|
||||
|
||||
def merge_data(self, source, dest):
|
||||
for h in source[0]:
|
||||
dest[0][h] = source[0][h]
|
||||
|
||||
@@ -72,20 +72,7 @@ def add_module_functions(fn, functions, namespace):
|
||||
parser.parse_python(None, filename=fn, lineno=1, fixedhash=fixedhash+f)
|
||||
#bb.warn("Cached %s" % f)
|
||||
except KeyError:
|
||||
try:
|
||||
targetfn = inspect.getsourcefile(functions[f])
|
||||
except TypeError:
|
||||
# Builtin
|
||||
continue
|
||||
if fn != targetfn:
|
||||
# Skip references to other modules outside this file
|
||||
#bb.warn("Skipping %s" % name)
|
||||
continue
|
||||
try:
|
||||
lines, lineno = inspect.getsourcelines(functions[f])
|
||||
except TypeError:
|
||||
# Builtin
|
||||
continue
|
||||
lines, lineno = inspect.getsourcelines(functions[f])
|
||||
src = "".join(lines)
|
||||
parser.parse_python(src, filename=fn, lineno=lineno, fixedhash=fixedhash+f)
|
||||
#bb.warn("Not cached %s" % f)
|
||||
@@ -95,17 +82,14 @@ def add_module_functions(fn, functions, namespace):
|
||||
if e in functions:
|
||||
execs.remove(e)
|
||||
execs.add(namespace + "." + e)
|
||||
visitorcode = None
|
||||
if hasattr(functions[f], 'visitorcode'):
|
||||
visitorcode = getattr(functions[f], "visitorcode")
|
||||
modulecode_deps[name] = [parser.references.copy(), execs, parser.var_execs.copy(), parser.contains.copy(), parser.extra, visitorcode]
|
||||
modulecode_deps[name] = [parser.references.copy(), execs, parser.var_execs.copy(), parser.contains.copy()]
|
||||
#bb.warn("%s: %s\nRefs:%s Execs: %s %s %s" % (name, fn, parser.references, parser.execs, parser.var_execs, parser.contains))
|
||||
|
||||
def update_module_dependencies(d):
|
||||
for mod in modulecode_deps:
|
||||
excludes = set((d.getVarFlag(mod, "vardepsexclude") or "").split())
|
||||
if excludes:
|
||||
modulecode_deps[mod] = [modulecode_deps[mod][0] - excludes, modulecode_deps[mod][1] - excludes, modulecode_deps[mod][2] - excludes, modulecode_deps[mod][3], modulecode_deps[mod][4], modulecode_deps[mod][5]]
|
||||
modulecode_deps[mod] = [modulecode_deps[mod][0] - excludes, modulecode_deps[mod][1] - excludes, modulecode_deps[mod][2] - excludes, modulecode_deps[mod][3]]
|
||||
|
||||
# A custom getstate/setstate using tuples is actually worth 15% cachesize by
|
||||
# avoiding duplication of the attribute names!
|
||||
@@ -128,22 +112,21 @@ class SetCache(object):
|
||||
codecache = SetCache()
|
||||
|
||||
class pythonCacheLine(object):
|
||||
def __init__(self, refs, execs, contains, extra):
|
||||
def __init__(self, refs, execs, contains):
|
||||
self.refs = codecache.internSet(refs)
|
||||
self.execs = codecache.internSet(execs)
|
||||
self.contains = {}
|
||||
for c in contains:
|
||||
self.contains[c] = codecache.internSet(contains[c])
|
||||
self.extra = extra
|
||||
|
||||
def __getstate__(self):
|
||||
return (self.refs, self.execs, self.contains, self.extra)
|
||||
return (self.refs, self.execs, self.contains)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(refs, execs, contains, extra) = state
|
||||
self.__init__(refs, execs, contains, extra)
|
||||
(refs, execs, contains) = state
|
||||
self.__init__(refs, execs, contains)
|
||||
def __hash__(self):
|
||||
l = (hash(self.refs), hash(self.execs), hash(self.extra))
|
||||
l = (hash(self.refs), hash(self.execs))
|
||||
for c in sorted(self.contains.keys()):
|
||||
l = l + (c, hash(self.contains[c]))
|
||||
return hash(l)
|
||||
@@ -172,7 +155,7 @@ class CodeParserCache(MultiProcessCache):
|
||||
# so that an existing cache gets invalidated. Additionally you'll need
|
||||
# to increment __cache_version__ in cache.py in order to ensure that old
|
||||
# recipe caches don't trigger "Taskhash mismatch" errors.
|
||||
CACHE_VERSION = 14
|
||||
CACHE_VERSION = 11
|
||||
|
||||
def __init__(self):
|
||||
MultiProcessCache.__init__(self)
|
||||
@@ -186,8 +169,8 @@ class CodeParserCache(MultiProcessCache):
|
||||
self.pythoncachelines = {}
|
||||
self.shellcachelines = {}
|
||||
|
||||
def newPythonCacheLine(self, refs, execs, contains, extra):
|
||||
cacheline = pythonCacheLine(refs, execs, contains, extra)
|
||||
def newPythonCacheLine(self, refs, execs, contains):
|
||||
cacheline = pythonCacheLine(refs, execs, contains)
|
||||
h = hash(cacheline)
|
||||
if h in self.pythoncachelines:
|
||||
return self.pythoncachelines[h]
|
||||
@@ -272,15 +255,7 @@ class PythonParser():
|
||||
|
||||
def visit_Call(self, node):
|
||||
name = self.called_node_name(node.func)
|
||||
if name and name in modulecode_deps and modulecode_deps[name][5]:
|
||||
visitorcode = modulecode_deps[name][5]
|
||||
contains, execs, warn = visitorcode(name, node.args)
|
||||
for i in contains:
|
||||
self.contains[i] = contains[i]
|
||||
self.execs |= execs
|
||||
if warn:
|
||||
self.warn(node.func, warn)
|
||||
elif name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs):
|
||||
if name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs):
|
||||
if isinstance(node.args[0], ast.Constant) and isinstance(node.args[0].value, str):
|
||||
varname = node.args[0].value
|
||||
if name in self.containsfuncs and isinstance(node.args[1], ast.Constant):
|
||||
@@ -363,7 +338,6 @@ class PythonParser():
|
||||
self.contains = {}
|
||||
for i in codeparsercache.pythoncache[h].contains:
|
||||
self.contains[i] = set(codeparsercache.pythoncache[h].contains[i])
|
||||
self.extra = codeparsercache.pythoncache[h].extra
|
||||
return
|
||||
|
||||
if h in codeparsercache.pythoncacheextras:
|
||||
@@ -372,7 +346,6 @@ class PythonParser():
|
||||
self.contains = {}
|
||||
for i in codeparsercache.pythoncacheextras[h].contains:
|
||||
self.contains[i] = set(codeparsercache.pythoncacheextras[h].contains[i])
|
||||
self.extra = codeparsercache.pythoncacheextras[h].extra
|
||||
return
|
||||
|
||||
if fixedhash and not node:
|
||||
@@ -391,11 +364,8 @@ class PythonParser():
|
||||
self.visit_Call(n)
|
||||
|
||||
self.execs.update(self.var_execs)
|
||||
self.extra = None
|
||||
if fixedhash:
|
||||
self.extra = bbhash(str(node))
|
||||
|
||||
codeparsercache.pythoncacheextras[h] = codeparsercache.newPythonCacheLine(self.references, self.execs, self.contains, self.extra)
|
||||
codeparsercache.pythoncacheextras[h] = codeparsercache.newPythonCacheLine(self.references, self.execs, self.contains)
|
||||
|
||||
class ShellParser():
|
||||
def __init__(self, name, log):
|
||||
@@ -514,34 +484,19 @@ class ShellParser():
|
||||
"""
|
||||
|
||||
words = list(words)
|
||||
for word in words:
|
||||
for word in list(words):
|
||||
wtree = pyshlex.make_wordtree(word[1])
|
||||
for part in wtree:
|
||||
if not isinstance(part, list):
|
||||
continue
|
||||
|
||||
candidates = [part]
|
||||
if part[0] in ('`', '$('):
|
||||
command = pyshlex.wordtree_as_string(part[1:-1])
|
||||
self._parse_shell(command)
|
||||
|
||||
# If command is of type:
|
||||
#
|
||||
# var="... $(cmd [...]) ..."
|
||||
#
|
||||
# Then iterate on what's between the quotes and if we find a
|
||||
# list, make that what we check for below.
|
||||
if len(part) >= 3 and part[0] == '"':
|
||||
for p in part[1:-1]:
|
||||
if isinstance(p, list):
|
||||
candidates.append(p)
|
||||
|
||||
for candidate in candidates:
|
||||
if len(candidate) >= 2:
|
||||
if candidate[0] in ('`', '$('):
|
||||
command = pyshlex.wordtree_as_string(candidate[1:-1])
|
||||
self._parse_shell(command)
|
||||
|
||||
if word[0] in ("cmd_name", "cmd_word"):
|
||||
if word in words:
|
||||
words.remove(word)
|
||||
if word[0] in ("cmd_name", "cmd_word"):
|
||||
if word in words:
|
||||
words.remove(word)
|
||||
|
||||
usetoken = False
|
||||
for word in words:
|
||||
|
||||
@@ -24,7 +24,6 @@ import io
|
||||
import bb.event
|
||||
import bb.cooker
|
||||
import bb.remotedata
|
||||
import bb.parse
|
||||
|
||||
class DataStoreConnectionHandle(object):
|
||||
def __init__(self, dsindex=0):
|
||||
@@ -109,7 +108,7 @@ class Command:
|
||||
|
||||
def runAsyncCommand(self, _, process_server, halt):
|
||||
try:
|
||||
if self.cooker.state in (bb.cooker.State.ERROR, bb.cooker.State.SHUTDOWN, bb.cooker.State.FORCE_SHUTDOWN):
|
||||
if self.cooker.state in (bb.cooker.state.error, bb.cooker.state.shutdown, bb.cooker.state.forceshutdown):
|
||||
# updateCache will trigger a shutdown of the parser
|
||||
# and then raise BBHandledException triggering an exit
|
||||
self.cooker.updateCache()
|
||||
@@ -119,7 +118,7 @@ class Command:
|
||||
(command, options) = cmd
|
||||
commandmethod = getattr(CommandsAsync, command)
|
||||
needcache = getattr( commandmethod, "needcache" )
|
||||
if needcache and self.cooker.state != bb.cooker.State.RUNNING:
|
||||
if needcache and self.cooker.state != bb.cooker.state.running:
|
||||
self.cooker.updateCache()
|
||||
return True
|
||||
else:
|
||||
@@ -143,14 +142,14 @@ class Command:
|
||||
return bb.server.process.idleFinish(traceback.format_exc())
|
||||
|
||||
def finishAsyncCommand(self, msg=None, code=None):
|
||||
self.cooker.finishcommand()
|
||||
self.process_server.clear_async_cmd()
|
||||
if msg or msg == "":
|
||||
bb.event.fire(CommandFailed(msg), self.cooker.data)
|
||||
elif code:
|
||||
bb.event.fire(CommandExit(code), self.cooker.data)
|
||||
else:
|
||||
bb.event.fire(CommandCompleted(), self.cooker.data)
|
||||
self.cooker.finishcommand()
|
||||
self.process_server.clear_async_cmd()
|
||||
|
||||
def reset(self):
|
||||
if self.remotedatastores:
|
||||
@@ -311,7 +310,7 @@ class CommandsSync:
|
||||
def revalidateCaches(self, command, params):
|
||||
"""Called by UI clients when metadata may have changed"""
|
||||
command.cooker.revalidateCaches()
|
||||
revalidateCaches.needconfig = False
|
||||
parseConfiguration.needconfig = False
|
||||
|
||||
def getRecipes(self, command, params):
|
||||
try:
|
||||
@@ -421,30 +420,15 @@ class CommandsSync:
|
||||
return command.cooker.recipecaches[mc].pkg_dp
|
||||
getDefaultPreference.readonly = True
|
||||
|
||||
|
||||
def getSkippedRecipes(self, command, params):
|
||||
"""
|
||||
Get the map of skipped recipes for the specified multiconfig/mc name (`params[0]`).
|
||||
|
||||
Invoked by `bb.tinfoil.Tinfoil.get_skipped_recipes`
|
||||
|
||||
:param command: Internally used parameter.
|
||||
:param params: Parameter array. params[0] is multiconfig/mc name. If not given, then default mc '' is assumed.
|
||||
:return: Dict whose keys are virtualfns and values are `bb.cooker.SkippedPackage`
|
||||
"""
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
|
||||
# Return list sorted by reverse priority order
|
||||
import bb.cache
|
||||
def sortkey(x):
|
||||
vfn, _ = x
|
||||
realfn, _, item_mc = bb.cache.virtualfn2realfn(vfn)
|
||||
return -command.cooker.collections[item_mc].calc_bbfile_priority(realfn)[0], vfn
|
||||
realfn, _, mc = bb.cache.virtualfn2realfn(vfn)
|
||||
return (-command.cooker.collections[mc].calc_bbfile_priority(realfn)[0], vfn)
|
||||
|
||||
skipdict = OrderedDict(sorted(command.cooker.skiplist_by_mc[mc].items(), key=sortkey))
|
||||
skipdict = OrderedDict(sorted(command.cooker.skiplist.items(), key=sortkey))
|
||||
return list(skipdict.items())
|
||||
getSkippedRecipes.readonly = True
|
||||
|
||||
@@ -598,13 +582,6 @@ class CommandsSync:
|
||||
return DataStoreConnectionHandle(idx)
|
||||
parseRecipeFile.readonly = True
|
||||
|
||||
def finalizeData(self, command, params):
|
||||
newdata = command.cooker.data.createCopy()
|
||||
bb.data.expandKeys(newdata)
|
||||
bb.parse.ast.runAnonFuncs(newdata)
|
||||
idx = command.remotedatastores.store(newdata)
|
||||
return DataStoreConnectionHandle(idx)
|
||||
|
||||
class CommandsAsync:
|
||||
"""
|
||||
A class of asynchronous commands
|
||||
|
||||
@@ -13,7 +13,7 @@ def open(*args, **kwargs):
|
||||
|
||||
class LZ4File(bb.compress._pipecompress.PipeFile):
|
||||
def get_compress(self):
|
||||
return ["lz4", "-z", "-c"]
|
||||
return ["lz4c", "-z", "-c"]
|
||||
|
||||
def get_decompress(self):
|
||||
return ["lz4", "-d", "-c"]
|
||||
return ["lz4c", "-d", "-c"]
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
import enum
|
||||
|
||||
import sys, os, glob, os.path, re, time
|
||||
import itertools
|
||||
import logging
|
||||
@@ -17,7 +17,7 @@ import threading
|
||||
from io import StringIO, UnsupportedOperation
|
||||
from contextlib import closing
|
||||
from collections import defaultdict, namedtuple
|
||||
import bb, bb.command
|
||||
import bb, bb.exceptions, bb.command
|
||||
from bb import utils, data, parse, event, cache, providers, taskdata, runqueue, build
|
||||
import queue
|
||||
import signal
|
||||
@@ -48,15 +48,16 @@ class CollectionError(bb.BBHandledException):
|
||||
Exception raised when layer configuration is incorrect
|
||||
"""
|
||||
|
||||
class state:
|
||||
initial, parsing, running, shutdown, forceshutdown, stopped, error = list(range(7))
|
||||
|
||||
class State(enum.Enum):
|
||||
INITIAL = 0,
|
||||
PARSING = 1,
|
||||
RUNNING = 2,
|
||||
SHUTDOWN = 3,
|
||||
FORCE_SHUTDOWN = 4,
|
||||
STOPPED = 5,
|
||||
ERROR = 6
|
||||
@classmethod
|
||||
def get_name(cls, code):
|
||||
for name in dir(cls):
|
||||
value = getattr(cls, name)
|
||||
if type(value) == type(cls.initial) and value == code:
|
||||
return name
|
||||
raise ValueError("Invalid status code: %s" % code)
|
||||
|
||||
|
||||
class SkippedPackage:
|
||||
@@ -133,8 +134,7 @@ class BBCooker:
|
||||
self.baseconfig_valid = False
|
||||
self.parsecache_valid = False
|
||||
self.eventlog = None
|
||||
# The skiplists, one per multiconfig
|
||||
self.skiplist_by_mc = defaultdict(dict)
|
||||
self.skiplist = {}
|
||||
self.featureset = CookerFeatures()
|
||||
if featureSet:
|
||||
for f in featureSet:
|
||||
@@ -180,7 +180,7 @@ class BBCooker:
|
||||
pass
|
||||
|
||||
self.command = bb.command.Command(self, self.process_server)
|
||||
self.state = State.INITIAL
|
||||
self.state = state.initial
|
||||
|
||||
self.parser = None
|
||||
|
||||
@@ -226,22 +226,23 @@ class BBCooker:
|
||||
bb.warn("Cooker received SIGTERM, shutting down...")
|
||||
elif signum == signal.SIGHUP:
|
||||
bb.warn("Cooker received SIGHUP, shutting down...")
|
||||
self.state = State.FORCE_SHUTDOWN
|
||||
self.state = state.forceshutdown
|
||||
bb.event._should_exit.set()
|
||||
|
||||
def setFeatures(self, features):
|
||||
# we only accept a new feature set if we're in state initial, so we can reset without problems
|
||||
if not self.state in [State.INITIAL, State.SHUTDOWN, State.FORCE_SHUTDOWN, State.STOPPED, State.ERROR]:
|
||||
if not self.state in [state.initial, state.shutdown, state.forceshutdown, state.stopped, state.error]:
|
||||
raise Exception("Illegal state for feature set change")
|
||||
original_featureset = list(self.featureset)
|
||||
for feature in features:
|
||||
self.featureset.setFeature(feature)
|
||||
bb.debug(1, "Features set %s (was %s)" % (original_featureset, list(self.featureset)))
|
||||
if (original_featureset != list(self.featureset)) and self.state != State.ERROR and hasattr(self, "data"):
|
||||
if (original_featureset != list(self.featureset)) and self.state != state.error and hasattr(self, "data"):
|
||||
self.reset()
|
||||
|
||||
def initConfigurationData(self):
|
||||
self.state = State.INITIAL
|
||||
|
||||
self.state = state.initial
|
||||
self.caches_array = []
|
||||
|
||||
sys.path = self.orig_syspath.copy()
|
||||
@@ -280,6 +281,7 @@ class BBCooker:
|
||||
self.databuilder = bb.cookerdata.CookerDataBuilder(self.configuration, False)
|
||||
self.databuilder.parseBaseConfiguration()
|
||||
self.data = self.databuilder.data
|
||||
self.data_hash = self.databuilder.data_hash
|
||||
self.extraconfigdata = {}
|
||||
|
||||
eventlog = self.data.getVar("BB_DEFAULT_EVENTLOG")
|
||||
@@ -316,14 +318,8 @@ class BBCooker:
|
||||
try:
|
||||
with hashserv.create_client(upstream) as client:
|
||||
client.ping()
|
||||
except ImportError as e:
|
||||
bb.fatal(""""Unable to use hash equivalence server at '%s' due to missing or incorrect python module:
|
||||
%s
|
||||
Please install the needed module on the build host, or use an environment containing it (e.g a pip venv or OpenEmbedded's buildtools tarball).
|
||||
You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in significantly longer build times as bitbake will be unable to reuse prebuilt sstate artefacts."""
|
||||
% (upstream, repr(e)))
|
||||
except ConnectionError as e:
|
||||
bb.warn("Unable to connect to hash equivalence server at '%s', please correct or remove BB_HASHSERVE_UPSTREAM:\n%s"
|
||||
except (ConnectionError, ImportError) as e:
|
||||
bb.warn("BB_HASHSERVE_UPSTREAM is not valid, unable to connect hash equivalence server at '%s': %s"
|
||||
% (upstream, repr(e)))
|
||||
upstream = None
|
||||
|
||||
@@ -374,11 +370,6 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
if not clean:
|
||||
bb.parse.BBHandler.cached_statements = {}
|
||||
|
||||
# If writes were made to any of the data stores, we need to recalculate the data
|
||||
# store cache
|
||||
if hasattr(self, "databuilder"):
|
||||
self.databuilder.calc_datastore_hashes()
|
||||
|
||||
def parseConfiguration(self):
|
||||
self.updateCacheSync()
|
||||
|
||||
@@ -621,8 +612,8 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
localdata = {}
|
||||
|
||||
for mc in self.multiconfigs:
|
||||
taskdata[mc] = bb.taskdata.TaskData(halt, skiplist=self.skiplist_by_mc[mc], allowincomplete=allowincomplete)
|
||||
localdata[mc] = bb.data.createCopy(self.databuilder.mcdata[mc])
|
||||
taskdata[mc] = bb.taskdata.TaskData(halt, skiplist=self.skiplist, allowincomplete=allowincomplete)
|
||||
localdata[mc] = data.createCopy(self.databuilder.mcdata[mc])
|
||||
bb.data.expandKeys(localdata[mc])
|
||||
|
||||
current = 0
|
||||
@@ -689,14 +680,14 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
bb.event.fire(bb.event.TreeDataPreparationCompleted(len(fulltargetlist)), self.data)
|
||||
return taskdata, runlist
|
||||
|
||||
def prepareTreeData(self, pkgs_to_build, task, halt=False):
|
||||
def prepareTreeData(self, pkgs_to_build, task):
|
||||
"""
|
||||
Prepare a runqueue and taskdata object for iteration over pkgs_to_build
|
||||
"""
|
||||
|
||||
# We set halt to False here to prevent unbuildable targets raising
|
||||
# an exception when we're just generating data
|
||||
taskdata, runlist = self.buildTaskData(pkgs_to_build, task, halt, allowincomplete=True)
|
||||
taskdata, runlist = self.buildTaskData(pkgs_to_build, task, False, allowincomplete=True)
|
||||
|
||||
return runlist, taskdata
|
||||
|
||||
@@ -710,7 +701,7 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
if not task.startswith("do_"):
|
||||
task = "do_%s" % task
|
||||
|
||||
runlist, taskdata = self.prepareTreeData(pkgs_to_build, task, halt=True)
|
||||
runlist, taskdata = self.prepareTreeData(pkgs_to_build, task)
|
||||
rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
|
||||
rq.rqdata.prepare()
|
||||
return self.buildDependTree(rq, taskdata)
|
||||
@@ -905,11 +896,10 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
|
||||
depgraph = self.generateTaskDepTreeData(pkgs_to_build, task)
|
||||
|
||||
pns = depgraph["pn"].keys()
|
||||
if pns:
|
||||
with open('pn-buildlist', 'w') as f:
|
||||
f.write("%s\n" % "\n".join(sorted(pns)))
|
||||
logger.info("PN build list saved to 'pn-buildlist'")
|
||||
with open('pn-buildlist', 'w') as f:
|
||||
for pn in depgraph["pn"]:
|
||||
f.write(pn + "\n")
|
||||
logger.info("PN build list saved to 'pn-buildlist'")
|
||||
|
||||
# Remove old format output files to ensure no confusion with stale data
|
||||
try:
|
||||
@@ -943,7 +933,7 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
for mc in self.multiconfigs:
|
||||
# First get list of recipes, including skipped
|
||||
recipefns = list(self.recipecaches[mc].pkg_fn.keys())
|
||||
recipefns.extend(self.skiplist_by_mc[mc].keys())
|
||||
recipefns.extend(self.skiplist.keys())
|
||||
|
||||
# Work out list of bbappends that have been applied
|
||||
applied_appends = []
|
||||
@@ -962,7 +952,13 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
'\n '.join(appends_without_recipes[mc])))
|
||||
|
||||
if msgs:
|
||||
bb.fatal("\n".join(msgs))
|
||||
msg = "\n".join(msgs)
|
||||
warn_only = self.databuilder.mcdata[mc].getVar("BB_DANGLINGAPPENDS_WARNONLY", \
|
||||
False) or "no"
|
||||
if warn_only.lower() in ("1", "yes", "true"):
|
||||
bb.warn(msg)
|
||||
else:
|
||||
bb.fatal(msg)
|
||||
|
||||
def handlePrefProviders(self):
|
||||
|
||||
@@ -1342,7 +1338,7 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
self.buildSetVars()
|
||||
self.reset_mtime_caches()
|
||||
|
||||
bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.databuilder.data_hash, self.caches_array)
|
||||
bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.data_hash, self.caches_array)
|
||||
|
||||
layername = self.collections[mc].calc_bbfile_priority(fn)[2]
|
||||
infos = bb_caches[mc].parse(fn, self.collections[mc].get_file_appends(fn), layername)
|
||||
@@ -1403,11 +1399,11 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
|
||||
msg = None
|
||||
interrupted = 0
|
||||
if halt or self.state == State.FORCE_SHUTDOWN:
|
||||
if halt or self.state == state.forceshutdown:
|
||||
rq.finish_runqueue(True)
|
||||
msg = "Forced shutdown"
|
||||
interrupted = 2
|
||||
elif self.state == State.SHUTDOWN:
|
||||
elif self.state == state.shutdown:
|
||||
rq.finish_runqueue(False)
|
||||
msg = "Stopped build"
|
||||
interrupted = 1
|
||||
@@ -1477,12 +1473,12 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
def buildTargetsIdle(server, rq, halt):
|
||||
msg = None
|
||||
interrupted = 0
|
||||
if halt or self.state == State.FORCE_SHUTDOWN:
|
||||
if halt or self.state == state.forceshutdown:
|
||||
bb.event._should_exit.set()
|
||||
rq.finish_runqueue(True)
|
||||
msg = "Forced shutdown"
|
||||
interrupted = 2
|
||||
elif self.state == State.SHUTDOWN:
|
||||
elif self.state == state.shutdown:
|
||||
rq.finish_runqueue(False)
|
||||
msg = "Stopped build"
|
||||
interrupted = 1
|
||||
@@ -1577,7 +1573,7 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
|
||||
|
||||
def updateCacheSync(self):
|
||||
if self.state == State.RUNNING:
|
||||
if self.state == state.running:
|
||||
return
|
||||
|
||||
if not self.baseconfig_valid:
|
||||
@@ -1587,19 +1583,19 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
|
||||
# This is called for all async commands when self.state != running
|
||||
def updateCache(self):
|
||||
if self.state == State.RUNNING:
|
||||
if self.state == state.running:
|
||||
return
|
||||
|
||||
if self.state in (State.SHUTDOWN, State.FORCE_SHUTDOWN, State.ERROR):
|
||||
if self.state in (state.shutdown, state.forceshutdown, state.error):
|
||||
if hasattr(self.parser, 'shutdown'):
|
||||
self.parser.shutdown(clean=False)
|
||||
self.parser.final_cleanup()
|
||||
raise bb.BBHandledException()
|
||||
|
||||
if self.state != State.PARSING:
|
||||
if self.state != state.parsing:
|
||||
self.updateCacheSync()
|
||||
|
||||
if self.state != State.PARSING and not self.parsecache_valid:
|
||||
if self.state != state.parsing and not self.parsecache_valid:
|
||||
bb.server.process.serverlog("Parsing started")
|
||||
self.parsewatched = {}
|
||||
|
||||
@@ -1633,10 +1629,9 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
self.parser = CookerParser(self, mcfilelist, total_masked)
|
||||
self._parsecache_set(True)
|
||||
|
||||
self.state = State.PARSING
|
||||
self.state = state.parsing
|
||||
|
||||
if not self.parser.parse_next():
|
||||
bb.server.process.serverlog("Parsing completed")
|
||||
collectlog.debug("parsing complete")
|
||||
if self.parser.error:
|
||||
raise bb.BBHandledException()
|
||||
@@ -1644,7 +1639,7 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
self.handlePrefProviders()
|
||||
for mc in self.multiconfigs:
|
||||
self.recipecaches[mc].bbfile_priority = self.collections[mc].collection_priorities(self.recipecaches[mc].pkg_fn, self.parser.mcfilelist[mc], self.data)
|
||||
self.state = State.RUNNING
|
||||
self.state = state.running
|
||||
|
||||
# Send an event listing all stamps reachable after parsing
|
||||
# which the metadata may use to clean up stale data
|
||||
@@ -1717,10 +1712,10 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
|
||||
def shutdown(self, force=False):
|
||||
if force:
|
||||
self.state = State.FORCE_SHUTDOWN
|
||||
self.state = state.forceshutdown
|
||||
bb.event._should_exit.set()
|
||||
else:
|
||||
self.state = State.SHUTDOWN
|
||||
self.state = state.shutdown
|
||||
|
||||
if self.parser:
|
||||
self.parser.shutdown(clean=False)
|
||||
@@ -1730,7 +1725,7 @@ You can also remove the BB_HASHSERVE_UPSTREAM setting, but this may result in si
|
||||
if hasattr(self.parser, 'shutdown'):
|
||||
self.parser.shutdown(clean=False)
|
||||
self.parser.final_cleanup()
|
||||
self.state = State.INITIAL
|
||||
self.state = state.initial
|
||||
bb.event._should_exit.clear()
|
||||
|
||||
def reset(self):
|
||||
@@ -1817,8 +1812,8 @@ class CookerCollectFiles(object):
|
||||
bb.event.fire(CookerExit(), eventdata)
|
||||
|
||||
# We need to track where we look so that we can know when the cache is invalid. There
|
||||
# is no nice way to do this, this is horrid. We intercept the os.listdir() and os.scandir()
|
||||
# calls while we run glob().
|
||||
# is no nice way to do this, this is horrid. We intercept the os.listdir()
|
||||
# (or os.scandir() for python 3.6+) calls while we run glob().
|
||||
origlistdir = os.listdir
|
||||
if hasattr(os, 'scandir'):
|
||||
origscandir = os.scandir
|
||||
@@ -2102,6 +2097,7 @@ class Parser(multiprocessing.Process):
|
||||
except Exception as exc:
|
||||
tb = sys.exc_info()[2]
|
||||
exc.recipe = filename
|
||||
exc.traceback = list(bb.exceptions.extract_traceback(tb, context=3))
|
||||
return True, None, exc
|
||||
# Need to turn BaseExceptions into Exceptions here so we gracefully shutdown
|
||||
# and for example a worker thread doesn't just exit on its own in response to
|
||||
@@ -2116,7 +2112,7 @@ class CookerParser(object):
|
||||
self.mcfilelist = mcfilelist
|
||||
self.cooker = cooker
|
||||
self.cfgdata = cooker.data
|
||||
self.cfghash = cooker.databuilder.data_hash
|
||||
self.cfghash = cooker.data_hash
|
||||
self.cfgbuilder = cooker.databuilder
|
||||
|
||||
# Accounting statistics
|
||||
@@ -2228,8 +2224,9 @@ class CookerParser(object):
|
||||
|
||||
for process in self.processes:
|
||||
process.join()
|
||||
# clean up zombies
|
||||
process.close()
|
||||
# Added in 3.7, cleans up zombies
|
||||
if hasattr(process, "close"):
|
||||
process.close()
|
||||
|
||||
bb.codeparser.parser_cache_save()
|
||||
bb.codeparser.parser_cache_savemerge()
|
||||
@@ -2239,13 +2236,12 @@ class CookerParser(object):
|
||||
profiles = []
|
||||
for i in self.process_names:
|
||||
logfile = "profile-parse-%s.log" % i
|
||||
if os.path.exists(logfile) and os.path.getsize(logfile):
|
||||
if os.path.exists(logfile):
|
||||
profiles.append(logfile)
|
||||
|
||||
if profiles:
|
||||
pout = "profile-parse.log.processed"
|
||||
bb.utils.process_profilelog(profiles, pout = pout)
|
||||
print("Processed parsing statistics saved to %s" % (pout))
|
||||
pout = "profile-parse.log.processed"
|
||||
bb.utils.process_profilelog(profiles, pout = pout)
|
||||
print("Processed parsing statistics saved to %s" % (pout))
|
||||
|
||||
def final_cleanup(self):
|
||||
if self.syncthread:
|
||||
@@ -2302,12 +2298,8 @@ class CookerParser(object):
|
||||
return False
|
||||
except ParsingFailure as exc:
|
||||
self.error += 1
|
||||
|
||||
exc_desc = str(exc)
|
||||
if isinstance(exc, SystemExit) and not isinstance(exc.code, str):
|
||||
exc_desc = 'Exited with "%d"' % exc.code
|
||||
|
||||
logger.error('Unable to parse %s: %s' % (exc.recipe, exc_desc))
|
||||
logger.error('Unable to parse %s: %s' %
|
||||
(exc.recipe, bb.exceptions.to_string(exc.realexception)))
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
except bb.parse.ParseError as exc:
|
||||
@@ -2316,33 +2308,20 @@ class CookerParser(object):
|
||||
self.shutdown(clean=False, eventmsg=str(exc))
|
||||
return False
|
||||
except bb.data_smart.ExpansionError as exc:
|
||||
def skip_frames(f, fn_prefix):
|
||||
while f and f.tb_frame.f_code.co_filename.startswith(fn_prefix):
|
||||
f = f.tb_next
|
||||
return f
|
||||
|
||||
self.error += 1
|
||||
bbdir = os.path.dirname(__file__) + os.sep
|
||||
etype, value, tb = sys.exc_info()
|
||||
|
||||
# Remove any frames where the code comes from bitbake. This
|
||||
# prevents deep (and pretty useless) backtraces for expansion error
|
||||
tb = skip_frames(tb, bbdir)
|
||||
cur = tb
|
||||
while cur:
|
||||
cur.tb_next = skip_frames(cur.tb_next, bbdir)
|
||||
cur = cur.tb_next
|
||||
|
||||
etype, value, _ = sys.exc_info()
|
||||
tb = list(itertools.dropwhile(lambda e: e.filename.startswith(bbdir), exc.traceback))
|
||||
logger.error('ExpansionError during parsing %s', value.recipe,
|
||||
exc_info=(etype, value, tb))
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
except Exception as exc:
|
||||
self.error += 1
|
||||
_, value, _ = sys.exc_info()
|
||||
etype, value, tb = sys.exc_info()
|
||||
if hasattr(value, "recipe"):
|
||||
logger.error('Unable to parse %s' % value.recipe,
|
||||
exc_info=sys.exc_info())
|
||||
exc_info=(etype, value, exc.traceback))
|
||||
else:
|
||||
# Most likely, an exception occurred during raising an exception
|
||||
import traceback
|
||||
@@ -2363,7 +2342,7 @@ class CookerParser(object):
|
||||
for virtualfn, info_array in result:
|
||||
if info_array[0].skipped:
|
||||
self.skipped += 1
|
||||
self.cooker.skiplist_by_mc[mc][virtualfn] = SkippedPackage(info_array[0])
|
||||
self.cooker.skiplist[virtualfn] = SkippedPackage(info_array[0])
|
||||
self.bb_caches[mc].add_info(virtualfn, info_array, self.cooker.recipecaches[mc],
|
||||
parsed=parsed, watcher = self.cooker.add_filewatch)
|
||||
return True
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
|
||||
#
|
||||
# Copyright (C) 2003, 2004 Chris Larson
|
||||
# Copyright (C) 2003, 2004 Phil Blundell
|
||||
@@ -255,21 +254,14 @@ class CookerDataBuilder(object):
|
||||
self.data = self.basedata
|
||||
self.mcdata = {}
|
||||
|
||||
def calc_datastore_hashes(self):
|
||||
data_hash = hashlib.sha256()
|
||||
data_hash.update(self.data.get_hash().encode('utf-8'))
|
||||
multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
|
||||
for config in multiconfig:
|
||||
data_hash.update(self.mcdata[config].get_hash().encode('utf-8'))
|
||||
self.data_hash = data_hash.hexdigest()
|
||||
|
||||
def parseBaseConfiguration(self, worker=False):
|
||||
mcdata = {}
|
||||
data_hash = hashlib.sha256()
|
||||
try:
|
||||
self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles)
|
||||
|
||||
servercontext = self.data.getVar("BB_WORKERCONTEXT", False) is None and not worker
|
||||
bb.fetch.fetcher_init(self.data, servercontext)
|
||||
if self.data.getVar("BB_WORKERCONTEXT", False) is None and not worker:
|
||||
bb.fetch.fetcher_init(self.data)
|
||||
bb.parse.init_parser(self.data)
|
||||
|
||||
bb.event.fire(bb.event.ConfigParsed(), self.data)
|
||||
@@ -287,6 +279,7 @@ class CookerDataBuilder(object):
|
||||
bb.event.fire(bb.event.ConfigParsed(), self.data)
|
||||
|
||||
bb.parse.init_parser(self.data)
|
||||
data_hash.update(self.data.get_hash().encode('utf-8'))
|
||||
mcdata[''] = self.data
|
||||
|
||||
multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
|
||||
@@ -296,9 +289,11 @@ class CookerDataBuilder(object):
|
||||
parsed_mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config)
|
||||
bb.event.fire(bb.event.ConfigParsed(), parsed_mcdata)
|
||||
mcdata[config] = parsed_mcdata
|
||||
data_hash.update(parsed_mcdata.get_hash().encode('utf-8'))
|
||||
if multiconfig:
|
||||
bb.event.fire(bb.event.MultiConfigParsed(mcdata), self.data)
|
||||
|
||||
self.data_hash = data_hash.hexdigest()
|
||||
except bb.data_smart.ExpansionError as e:
|
||||
logger.error(str(e))
|
||||
raise bb.BBHandledException()
|
||||
@@ -333,7 +328,6 @@ class CookerDataBuilder(object):
|
||||
for mc in mcdata:
|
||||
self.mcdata[mc] = bb.data.createCopy(mcdata[mc])
|
||||
self.data = self.mcdata['']
|
||||
self.calc_datastore_hashes()
|
||||
|
||||
def reset(self):
|
||||
# We may not have run parseBaseConfiguration() yet
|
||||
@@ -346,7 +340,7 @@ class CookerDataBuilder(object):
|
||||
def _findLayerConf(self, data):
|
||||
return findConfigFile("bblayers.conf", data)
|
||||
|
||||
def parseConfigurationFiles(self, prefiles, postfiles, mc = ""):
|
||||
def parseConfigurationFiles(self, prefiles, postfiles, mc = "default"):
|
||||
data = bb.data.createCopy(self.basedata)
|
||||
data.setVar("BB_CURRENT_MC", mc)
|
||||
|
||||
|
||||
@@ -293,7 +293,7 @@ def build_dependencies(key, keys, mod_funcs, shelldeps, varflagsexcl, ignored_va
|
||||
if key in mod_funcs:
|
||||
exclusions = set()
|
||||
moddep = bb.codeparser.modulecode_deps[key]
|
||||
value = handle_contains(moddep[4], moddep[3], exclusions, d)
|
||||
value = handle_contains("", moddep[3], exclusions, d)
|
||||
return frozenset((moddep[0] | keys & moddep[1]) - ignored_vars), value
|
||||
|
||||
if key[-1] == ']':
|
||||
|
||||
@@ -31,7 +31,7 @@ logger = logging.getLogger("BitBake.Data")
|
||||
|
||||
__setvar_keyword__ = [":append", ":prepend", ":remove"]
|
||||
__setvar_regexp__ = re.compile(r'(?P<base>.*?)(?P<keyword>:append|:prepend|:remove)(:(?P<add>[^A-Z]*))?$')
|
||||
__expand_var_regexp__ = re.compile(r"\${[a-zA-Z0-9\-_+./~:]+}")
|
||||
__expand_var_regexp__ = re.compile(r"\${[a-zA-Z0-9\-_+./~:]+?}")
|
||||
__expand_python_regexp__ = re.compile(r"\${@(?:{.*?}|.)+?}")
|
||||
__whitespace_split__ = re.compile(r'(\s)')
|
||||
__override_regexp__ = re.compile(r'[a-z0-9]+')
|
||||
@@ -106,52 +106,52 @@ class VariableParse:
|
||||
self.contains = {}
|
||||
|
||||
def var_sub(self, match):
|
||||
key = match.group()[2:-1]
|
||||
if self.varname and key:
|
||||
if self.varname == key:
|
||||
raise Exception("variable %s references itself!" % self.varname)
|
||||
var = self.d.getVarFlag(key, "_content")
|
||||
self.references.add(key)
|
||||
if var is not None:
|
||||
return var
|
||||
else:
|
||||
return match.group()
|
||||
key = match.group()[2:-1]
|
||||
if self.varname and key:
|
||||
if self.varname == key:
|
||||
raise Exception("variable %s references itself!" % self.varname)
|
||||
var = self.d.getVarFlag(key, "_content")
|
||||
self.references.add(key)
|
||||
if var is not None:
|
||||
return var
|
||||
else:
|
||||
return match.group()
|
||||
|
||||
def python_sub(self, match):
|
||||
if isinstance(match, str):
|
||||
code = match
|
||||
else:
|
||||
code = match.group()[3:-1]
|
||||
|
||||
# Do not run code that contains one or more unexpanded variables
|
||||
# instead return the code with the characters we removed put back
|
||||
if __expand_var_regexp__.findall(code):
|
||||
return "${@" + code + "}"
|
||||
|
||||
if self.varname:
|
||||
varname = 'Var <%s>' % self.varname
|
||||
else:
|
||||
varname = '<expansion>'
|
||||
codeobj = compile(code.strip(), varname, "eval")
|
||||
|
||||
parser = bb.codeparser.PythonParser(self.varname, logger)
|
||||
parser.parse_python(code)
|
||||
if self.varname:
|
||||
vardeps = self.d.getVarFlag(self.varname, "vardeps")
|
||||
if vardeps is None:
|
||||
parser.log.flush()
|
||||
else:
|
||||
parser.log.flush()
|
||||
self.references |= parser.references
|
||||
self.execs |= parser.execs
|
||||
|
||||
for k in parser.contains:
|
||||
if k not in self.contains:
|
||||
self.contains[k] = parser.contains[k].copy()
|
||||
if isinstance(match, str):
|
||||
code = match
|
||||
else:
|
||||
self.contains[k].update(parser.contains[k])
|
||||
value = utils.better_eval(codeobj, DataContext(self.d), {'d' : self.d})
|
||||
return str(value)
|
||||
code = match.group()[3:-1]
|
||||
|
||||
# Do not run code that contains one or more unexpanded variables
|
||||
# instead return the code with the characters we removed put back
|
||||
if __expand_var_regexp__.findall(code):
|
||||
return "${@" + code + "}"
|
||||
|
||||
if self.varname:
|
||||
varname = 'Var <%s>' % self.varname
|
||||
else:
|
||||
varname = '<expansion>'
|
||||
codeobj = compile(code.strip(), varname, "eval")
|
||||
|
||||
parser = bb.codeparser.PythonParser(self.varname, logger)
|
||||
parser.parse_python(code)
|
||||
if self.varname:
|
||||
vardeps = self.d.getVarFlag(self.varname, "vardeps")
|
||||
if vardeps is None:
|
||||
parser.log.flush()
|
||||
else:
|
||||
parser.log.flush()
|
||||
self.references |= parser.references
|
||||
self.execs |= parser.execs
|
||||
|
||||
for k in parser.contains:
|
||||
if k not in self.contains:
|
||||
self.contains[k] = parser.contains[k].copy()
|
||||
else:
|
||||
self.contains[k].update(parser.contains[k])
|
||||
value = utils.better_eval(codeobj, DataContext(self.d), {'d' : self.d})
|
||||
return str(value)
|
||||
|
||||
class DataContext(dict):
|
||||
excluded = set([i for i in dir(builtins) if not i.startswith('_')] + ['oe'])
|
||||
@@ -272,9 +272,12 @@ class VariableHistory(object):
|
||||
return
|
||||
if 'op' not in loginfo or not loginfo['op']:
|
||||
loginfo['op'] = 'set'
|
||||
if 'detail' in loginfo:
|
||||
loginfo['detail'] = str(loginfo['detail'])
|
||||
if 'variable' not in loginfo or 'file' not in loginfo:
|
||||
raise ValueError("record() missing variable or file.")
|
||||
var = loginfo['variable']
|
||||
|
||||
if var not in self.variables:
|
||||
self.variables[var] = []
|
||||
if not isinstance(self.variables[var], list):
|
||||
@@ -333,8 +336,7 @@ class VariableHistory(object):
|
||||
flag = '[%s] ' % (event['flag'])
|
||||
else:
|
||||
flag = ''
|
||||
o.write("# %s %s:%s%s\n# %s\"%s\"\n" % \
|
||||
(event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', str(event['detail']))))
|
||||
o.write("# %s %s:%s%s\n# %s\"%s\"\n" % (event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', event['detail'])))
|
||||
if len(history) > 1:
|
||||
o.write("# pre-expansion value:\n")
|
||||
o.write('# "%s"\n' % (commentVal))
|
||||
@@ -388,7 +390,7 @@ class VariableHistory(object):
|
||||
if isset and event['op'] == 'set?':
|
||||
continue
|
||||
isset = True
|
||||
items = d.expand(str(event['detail'])).split()
|
||||
items = d.expand(event['detail']).split()
|
||||
for item in items:
|
||||
# This is a little crude but is belt-and-braces to avoid us
|
||||
# having to handle every possible operation type specifically
|
||||
@@ -580,10 +582,12 @@ class DataSmart(MutableMapping):
|
||||
else:
|
||||
loginfo['op'] = keyword
|
||||
self.varhistory.record(**loginfo)
|
||||
# todo make sure keyword is not __doc__ or __module__
|
||||
# pay the cookie monster
|
||||
|
||||
# more cookies for the cookie monster
|
||||
self._setvar_update_overrides(base, **loginfo)
|
||||
if ':' in var:
|
||||
self._setvar_update_overrides(base, **loginfo)
|
||||
|
||||
if base in self.overridevars:
|
||||
self._setvar_update_overridevars(var, value)
|
||||
@@ -636,7 +640,6 @@ class DataSmart(MutableMapping):
|
||||
nextnew.update(vardata.contains.keys())
|
||||
new = nextnew
|
||||
self.overrides = None
|
||||
self.expand_cache = {}
|
||||
|
||||
def _setvar_update_overrides(self, var, **loginfo):
|
||||
# aka pay the cookie monster
|
||||
@@ -826,8 +829,6 @@ class DataSmart(MutableMapping):
|
||||
value = copy.copy(local_var[flag])
|
||||
elif flag == "_content" and "_defaultval" in local_var and not noweakdefault:
|
||||
value = copy.copy(local_var["_defaultval"])
|
||||
elif "_defaultval_flag_"+flag in local_var and not noweakdefault:
|
||||
value = copy.copy(local_var["_defaultval_flag_"+flag])
|
||||
|
||||
|
||||
if flag == "_content" and local_var is not None and ":append" in local_var and not parsing:
|
||||
@@ -919,8 +920,6 @@ class DataSmart(MutableMapping):
|
||||
self.varhistory.record(**loginfo)
|
||||
|
||||
del self.dict[var][flag]
|
||||
if ("_defaultval_flag_" + flag) in self.dict[var]:
|
||||
del self.dict[var]["_defaultval_flag_" + flag]
|
||||
|
||||
def appendVarFlag(self, var, flag, value, **loginfo):
|
||||
loginfo['op'] = 'append'
|
||||
@@ -955,22 +954,17 @@ class DataSmart(MutableMapping):
|
||||
flags = {}
|
||||
|
||||
if local_var:
|
||||
for i, val in local_var.items():
|
||||
if i.startswith("_defaultval_flag_") and not internalflags:
|
||||
i = i[len("_defaultval_flag_"):]
|
||||
if i not in local_var:
|
||||
flags[i] = val
|
||||
elif i.startswith(("_", ":")) and not internalflags:
|
||||
for i in local_var:
|
||||
if i.startswith(("_", ":")) and not internalflags:
|
||||
continue
|
||||
else:
|
||||
flags[i] = val
|
||||
|
||||
flags[i] = local_var[i]
|
||||
if expand and i in expand:
|
||||
flags[i] = self.expand(flags[i], var + "[" + i + "]")
|
||||
if len(flags) == 0:
|
||||
return None
|
||||
return flags
|
||||
|
||||
|
||||
def delVarFlags(self, var, **loginfo):
|
||||
self.expand_cache = {}
|
||||
if not var in self.dict:
|
||||
@@ -1120,10 +1114,5 @@ class DataSmart(MutableMapping):
|
||||
value = d.getVar(i, False) or ""
|
||||
data.update({i:value})
|
||||
|
||||
moddeps = bb.codeparser.modulecode_deps
|
||||
for dep in sorted(moddeps):
|
||||
# Ignore visitor code, sort sets
|
||||
data.update({'moddep[%s]' % dep : [sorted(moddeps[dep][0]), sorted(moddeps[dep][1]), sorted(moddeps[dep][2]), sorted(moddeps[dep][3]), moddeps[dep][4]]})
|
||||
|
||||
data_str = str([(k, data[k]) for k in sorted(data.keys())])
|
||||
return hashlib.sha256(data_str.encode("utf-8")).hexdigest()
|
||||
|
||||
@@ -19,6 +19,7 @@ import sys
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
import bb.exceptions
|
||||
import bb.utils
|
||||
|
||||
# This is the pid for which we should generate the event. This is set when
|
||||
@@ -194,12 +195,7 @@ def fire_ui_handlers(event, d):
|
||||
ui_queue.append(event)
|
||||
return
|
||||
|
||||
with bb.utils.lock_timeout_nocheck(_thread_lock) as lock:
|
||||
if not lock:
|
||||
# If we can't get the lock, we may be recursively called, queue and return
|
||||
ui_queue.append(event)
|
||||
return
|
||||
|
||||
with bb.utils.lock_timeout(_thread_lock):
|
||||
errors = []
|
||||
for h in _ui_handlers:
|
||||
#print "Sending event %s" % event
|
||||
@@ -218,9 +214,6 @@ def fire_ui_handlers(event, d):
|
||||
for h in errors:
|
||||
del _ui_handlers[h]
|
||||
|
||||
while ui_queue:
|
||||
fire_ui_handlers(ui_queue.pop(), d)
|
||||
|
||||
def fire(event, d):
|
||||
"""Fire off an Event"""
|
||||
|
||||
@@ -766,7 +759,13 @@ class LogHandler(logging.Handler):
|
||||
|
||||
def emit(self, record):
|
||||
if record.exc_info:
|
||||
record.bb_exc_formatted = traceback.format_exception(*record.exc_info)
|
||||
etype, value, tb = record.exc_info
|
||||
if hasattr(tb, 'tb_next'):
|
||||
tb = list(bb.exceptions.extract_traceback(tb, context=3))
|
||||
# Need to turn the value into something the logging system can pickle
|
||||
record.bb_exc_info = (etype, value, tb)
|
||||
record.bb_exc_formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
|
||||
value = str(value)
|
||||
record.exc_info = None
|
||||
fire(record, None)
|
||||
|
||||
|
||||
96
bitbake/lib/bb/exceptions.py
Normal file
96
bitbake/lib/bb/exceptions.py
Normal file
@@ -0,0 +1,96 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import inspect
|
||||
import traceback
|
||||
import bb.namedtuple_with_abc
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
class TracebackEntry(namedtuple.abc):
|
||||
"""Pickleable representation of a traceback entry"""
|
||||
_fields = 'filename lineno function args code_context index'
|
||||
_header = ' File "{0.filename}", line {0.lineno}, in {0.function}{0.args}'
|
||||
|
||||
def format(self, formatter=None):
|
||||
if not self.code_context:
|
||||
return self._header.format(self) + '\n'
|
||||
|
||||
formatted = [self._header.format(self) + ':\n']
|
||||
|
||||
for lineindex, line in enumerate(self.code_context):
|
||||
if formatter:
|
||||
line = formatter(line)
|
||||
|
||||
if lineindex == self.index:
|
||||
formatted.append(' >%s' % line)
|
||||
else:
|
||||
formatted.append(' %s' % line)
|
||||
return formatted
|
||||
|
||||
def __str__(self):
|
||||
return ''.join(self.format())
|
||||
|
||||
def _get_frame_args(frame):
|
||||
"""Get the formatted arguments and class (if available) for a frame"""
|
||||
arginfo = inspect.getargvalues(frame)
|
||||
|
||||
try:
|
||||
if not arginfo.args:
|
||||
return '', None
|
||||
# There have been reports from the field of python 2.6 which doesn't
|
||||
# return a namedtuple here but simply a tuple so fallback gracefully if
|
||||
# args isn't present.
|
||||
except AttributeError:
|
||||
return '', None
|
||||
|
||||
firstarg = arginfo.args[0]
|
||||
if firstarg == 'self':
|
||||
self = arginfo.locals['self']
|
||||
cls = self.__class__.__name__
|
||||
|
||||
arginfo.args.pop(0)
|
||||
del arginfo.locals['self']
|
||||
else:
|
||||
cls = None
|
||||
|
||||
formatted = inspect.formatargvalues(*arginfo)
|
||||
return formatted, cls
|
||||
|
||||
def extract_traceback(tb, context=1):
|
||||
frames = inspect.getinnerframes(tb, context)
|
||||
for frame, filename, lineno, function, code_context, index in frames:
|
||||
formatted_args, cls = _get_frame_args(frame)
|
||||
if cls:
|
||||
function = '%s.%s' % (cls, function)
|
||||
yield TracebackEntry(filename, lineno, function, formatted_args,
|
||||
code_context, index)
|
||||
|
||||
def format_extracted(extracted, formatter=None, limit=None):
|
||||
if limit:
|
||||
extracted = extracted[-limit:]
|
||||
|
||||
formatted = []
|
||||
for tracebackinfo in extracted:
|
||||
formatted.extend(tracebackinfo.format(formatter))
|
||||
return formatted
|
||||
|
||||
|
||||
def format_exception(etype, value, tb, context=1, limit=None, formatter=None):
|
||||
formatted = ['Traceback (most recent call last):\n']
|
||||
|
||||
if hasattr(tb, 'tb_next'):
|
||||
tb = extract_traceback(tb, context)
|
||||
|
||||
formatted.extend(format_extracted(tb, formatter, limit))
|
||||
formatted.extend(traceback.format_exception_only(etype, value))
|
||||
return formatted
|
||||
|
||||
def to_string(exc):
|
||||
if isinstance(exc, SystemExit):
|
||||
if not isinstance(exc.code, str):
|
||||
return 'Exited with "%d"' % exc.code
|
||||
return str(exc)
|
||||
@@ -23,18 +23,17 @@ import collections
|
||||
import subprocess
|
||||
import pickle
|
||||
import errno
|
||||
import bb.utils
|
||||
import bb.persist_data, bb.utils
|
||||
import bb.checksum
|
||||
import bb.process
|
||||
import bb.event
|
||||
|
||||
__version__ = "2"
|
||||
_checksum_cache = bb.checksum.FileChecksumCache()
|
||||
_revisions_cache = bb.checksum.RevisionsCache()
|
||||
|
||||
logger = logging.getLogger("BitBake.Fetcher")
|
||||
|
||||
CHECKSUM_LIST = [ "goh1", "md5", "sha256", "sha1", "sha384", "sha512" ]
|
||||
CHECKSUM_LIST = [ "md5", "sha256", "sha1", "sha384", "sha512" ]
|
||||
SHOWN_CHECKSUM_LIST = ["sha256"]
|
||||
|
||||
class BBFetchException(Exception):
|
||||
@@ -238,7 +237,7 @@ class URI(object):
|
||||
# to RFC compliant URL format. E.g.:
|
||||
# file://foo.diff -> file:foo.diff
|
||||
if urlp.scheme in self._netloc_forbidden:
|
||||
uri = re.sub(r"(?<=:)//(?!/)", "", uri, count=1)
|
||||
uri = re.sub("(?<=:)//(?!/)", "", uri, 1)
|
||||
reparse = 1
|
||||
|
||||
if reparse:
|
||||
@@ -353,14 +352,6 @@ def decodeurl(url):
|
||||
user, password, parameters).
|
||||
"""
|
||||
|
||||
uri = URI(url)
|
||||
path = uri.path if uri.path else "/"
|
||||
return uri.scheme, uri.hostport, path, uri.username, uri.password, uri.params
|
||||
|
||||
def decodemirrorurl(url):
|
||||
"""Decodes a mirror URL into the tokens (scheme, network location, path,
|
||||
user, password, parameters).
|
||||
"""
|
||||
m = re.compile('(?P<type>[^:]*)://((?P<user>[^/;]+)@)?(?P<location>[^;]+)(;(?P<parm>.*))?').match(url)
|
||||
if not m:
|
||||
raise MalformedUrl(url)
|
||||
@@ -379,9 +370,6 @@ def decodemirrorurl(url):
|
||||
elif type.lower() == 'file':
|
||||
host = ""
|
||||
path = location
|
||||
if user:
|
||||
path = user + '@' + path
|
||||
user = ""
|
||||
else:
|
||||
host = location
|
||||
path = "/"
|
||||
@@ -414,34 +402,32 @@ def encodeurl(decoded):
|
||||
|
||||
if not type:
|
||||
raise MissingParameterError('type', "encoded from the data %s" % str(decoded))
|
||||
uri = URI()
|
||||
uri.scheme = type
|
||||
url = ['%s://' % type]
|
||||
if user and type != "file":
|
||||
uri.username = user
|
||||
url.append("%s" % user)
|
||||
if pswd:
|
||||
uri.password = pswd
|
||||
url.append(":%s" % pswd)
|
||||
url.append("@")
|
||||
if host and type != "file":
|
||||
uri.hostname = host
|
||||
url.append("%s" % host)
|
||||
if path:
|
||||
# Standardise path to ensure comparisons work
|
||||
while '//' in path:
|
||||
path = path.replace("//", "/")
|
||||
uri.path = path
|
||||
if type == "file":
|
||||
# Use old not IETF compliant style
|
||||
uri.relative = False
|
||||
url.append("%s" % urllib.parse.quote(path))
|
||||
if p:
|
||||
uri.params = p
|
||||
for parm in p:
|
||||
url.append(";%s=%s" % (parm, p[parm]))
|
||||
|
||||
return str(uri)
|
||||
return "".join(url)
|
||||
|
||||
def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
if not ud.url or not uri_find or not uri_replace:
|
||||
logger.error("uri_replace: passed an undefined value, not replacing")
|
||||
return None
|
||||
uri_decoded = list(decodemirrorurl(ud.url))
|
||||
uri_find_decoded = list(decodemirrorurl(uri_find))
|
||||
uri_replace_decoded = list(decodemirrorurl(uri_replace))
|
||||
uri_decoded = list(decodeurl(ud.url))
|
||||
uri_find_decoded = list(decodeurl(uri_find))
|
||||
uri_replace_decoded = list(decodeurl(uri_replace))
|
||||
logger.debug2("For url %s comparing %s to %s" % (uri_decoded, uri_find_decoded, uri_replace_decoded))
|
||||
result_decoded = ['', '', '', '', '', {}]
|
||||
# 0 - type, 1 - host, 2 - path, 3 - user, 4- pswd, 5 - params
|
||||
@@ -474,7 +460,7 @@ def uri_replace(ud, uri_find, uri_replace, replacements, d, mirrortarball=None):
|
||||
for k in replacements:
|
||||
uri_replace_decoded[loc] = uri_replace_decoded[loc].replace(k, replacements[k])
|
||||
#bb.note("%s %s %s" % (regexp, uri_replace_decoded[loc], uri_decoded[loc]))
|
||||
result_decoded[loc] = re.sub(regexp, uri_replace_decoded[loc], uri_decoded[loc], count=1)
|
||||
result_decoded[loc] = re.sub(regexp, uri_replace_decoded[loc], uri_decoded[loc], 1)
|
||||
if loc == 2:
|
||||
# Handle path manipulations
|
||||
basename = None
|
||||
@@ -507,23 +493,18 @@ methods = []
|
||||
urldata_cache = {}
|
||||
saved_headrevs = {}
|
||||
|
||||
def fetcher_init(d, servercontext=True):
|
||||
def fetcher_init(d):
|
||||
"""
|
||||
Called to initialize the fetchers once the configuration data is known.
|
||||
Calls before this must not hit the cache.
|
||||
"""
|
||||
|
||||
_checksum_cache.init_cache(d.getVar("BB_CACHEDIR"))
|
||||
_revisions_cache.init_cache(d.getVar("BB_CACHEDIR"))
|
||||
|
||||
if not servercontext:
|
||||
return
|
||||
|
||||
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
|
||||
try:
|
||||
# fetcher_init is called multiple times, so make sure we only save the
|
||||
# revs the first time it is called.
|
||||
if not bb.fetch2.saved_headrevs:
|
||||
bb.fetch2.saved_headrevs = _revisions_cache.get_revs()
|
||||
bb.fetch2.saved_headrevs = dict(revs)
|
||||
except:
|
||||
pass
|
||||
|
||||
@@ -533,10 +514,11 @@ def fetcher_init(d, servercontext=True):
|
||||
logger.debug("Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
elif srcrev_policy == "clear":
|
||||
logger.debug("Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
_revisions_cache.clear_cache()
|
||||
revs.clear()
|
||||
else:
|
||||
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
|
||||
|
||||
_checksum_cache.init_cache(d.getVar("BB_CACHEDIR"))
|
||||
|
||||
for m in methods:
|
||||
if hasattr(m, "init"):
|
||||
@@ -544,11 +526,9 @@ def fetcher_init(d, servercontext=True):
|
||||
|
||||
def fetcher_parse_save():
|
||||
_checksum_cache.save_extras()
|
||||
_revisions_cache.save_extras()
|
||||
|
||||
def fetcher_parse_done():
|
||||
_checksum_cache.save_merge()
|
||||
_revisions_cache.save_merge()
|
||||
|
||||
def fetcher_compare_revisions(d):
|
||||
"""
|
||||
@@ -556,7 +536,7 @@ def fetcher_compare_revisions(d):
|
||||
when bitbake was started and return true if they have changed.
|
||||
"""
|
||||
|
||||
headrevs = _revisions_cache.get_revs()
|
||||
headrevs = dict(bb.persist_data.persist('BB_URI_HEADREVS', d))
|
||||
return headrevs != bb.fetch2.saved_headrevs
|
||||
|
||||
def mirror_from_string(data):
|
||||
@@ -806,8 +786,8 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
return "", revs
|
||||
|
||||
|
||||
if len(scms) == 1:
|
||||
autoinc, rev = getattr(urldata[scms[0]].method, method_name)(urldata[scms[0]], d, urldata[scms[0]].name)
|
||||
if len(scms) == 1 and len(urldata[scms[0]].names) == 1:
|
||||
autoinc, rev = getattr(urldata[scms[0]].method, method_name)(urldata[scms[0]], d, urldata[scms[0]].names[0])
|
||||
revs.append(rev)
|
||||
if len(rev) > 10:
|
||||
rev = rev[:10]
|
||||
@@ -828,12 +808,13 @@ def _get_srcrev(d, method_name='sortable_revision'):
|
||||
seenautoinc = False
|
||||
for scm in scms:
|
||||
ud = urldata[scm]
|
||||
autoinc, rev = getattr(ud.method, method_name)(ud, d, ud.name)
|
||||
revs.append(rev)
|
||||
seenautoinc = seenautoinc or autoinc
|
||||
if len(rev) > 10:
|
||||
rev = rev[:10]
|
||||
name_to_rev[ud.name] = rev
|
||||
for name in ud.names:
|
||||
autoinc, rev = getattr(ud.method, method_name)(ud, d, name)
|
||||
revs.append(rev)
|
||||
seenautoinc = seenautoinc or autoinc
|
||||
if len(rev) > 10:
|
||||
rev = rev[:10]
|
||||
name_to_rev[name] = rev
|
||||
# Replace names by revisions in the SRCREV_FORMAT string. The approach used
|
||||
# here can handle names being prefixes of other names and names appearing
|
||||
# as substrings in revisions (in which case the name should not be
|
||||
@@ -897,7 +878,6 @@ FETCH_EXPORT_VARS = ['HOME', 'PATH',
|
||||
'AWS_SESSION_TOKEN',
|
||||
'GIT_CACHE_PATH',
|
||||
'REMOTE_CONTAINERS_IPC',
|
||||
'GITHUB_TOKEN',
|
||||
'SSL_CERT_DIR']
|
||||
|
||||
def get_fetcher_environment(d):
|
||||
@@ -1194,7 +1174,7 @@ def trusted_network(d, url):
|
||||
if bb.utils.to_boolean(d.getVar("BB_NO_NETWORK")):
|
||||
return True
|
||||
|
||||
pkgname = d.getVar('PN')
|
||||
pkgname = d.expand(d.getVar('PN', False))
|
||||
trusted_hosts = None
|
||||
if pkgname:
|
||||
trusted_hosts = d.getVarFlag('BB_ALLOWED_NETWORKS', pkgname, False)
|
||||
@@ -1247,17 +1227,20 @@ def srcrev_internal_helper(ud, d, name):
|
||||
if srcrev and srcrev != "INVALID":
|
||||
break
|
||||
|
||||
if 'rev' in ud.parm:
|
||||
parmrev = ud.parm['rev']
|
||||
if 'rev' in ud.parm and 'tag' in ud.parm:
|
||||
raise FetchError("Please specify a ;rev= parameter or a ;tag= parameter in the url %s but not both." % (ud.url))
|
||||
|
||||
if 'rev' in ud.parm or 'tag' in ud.parm:
|
||||
if 'rev' in ud.parm:
|
||||
parmrev = ud.parm['rev']
|
||||
else:
|
||||
parmrev = ud.parm['tag']
|
||||
if srcrev == "INVALID" or not srcrev:
|
||||
return parmrev
|
||||
if srcrev != parmrev:
|
||||
raise FetchError("Conflicting revisions (%s from SRCREV and %s from the url) found, please specify one valid value" % (srcrev, parmrev))
|
||||
return parmrev
|
||||
|
||||
if 'tag' in ud.parm and (srcrev == "INVALID" or not srcrev):
|
||||
return ud.parm['tag']
|
||||
|
||||
if srcrev == "INVALID" or not srcrev:
|
||||
raise FetchError("Please set a valid SRCREV for url %s (possible key names are %s, or use a ;rev=X URL parameter)" % (str(attempts), ud.url), ud.url)
|
||||
if srcrev == "AUTOINC":
|
||||
@@ -1280,7 +1263,7 @@ def get_checksum_file_list(d):
|
||||
found = False
|
||||
paths = ud.method.localfile_searchpaths(ud, d)
|
||||
for f in paths:
|
||||
pth = ud.path
|
||||
pth = ud.decodedurl
|
||||
if os.path.exists(f):
|
||||
found = True
|
||||
filelist.append(f + ":" + str(os.path.exists(f)))
|
||||
@@ -1325,28 +1308,23 @@ class FetchData(object):
|
||||
self.setup = False
|
||||
|
||||
def configure_checksum(checksum_id):
|
||||
checksum_plain_name = "%ssum" % checksum_id
|
||||
if "name" in self.parm:
|
||||
checksum_name = "%s.%ssum" % (self.parm["name"], checksum_id)
|
||||
else:
|
||||
checksum_name = checksum_plain_name
|
||||
checksum_name = "%ssum" % checksum_id
|
||||
|
||||
setattr(self, "%s_name" % checksum_id, checksum_name)
|
||||
|
||||
if checksum_name in self.parm:
|
||||
checksum_expected = self.parm[checksum_name]
|
||||
elif checksum_plain_name in self.parm:
|
||||
checksum_expected = self.parm[checksum_plain_name]
|
||||
checksum_name = checksum_plain_name
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3", "az", "crate", "gs", "gomod", "npm"]:
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3", "az", "crate", "gs"]:
|
||||
checksum_expected = None
|
||||
else:
|
||||
checksum_expected = d.getVarFlag("SRC_URI", checksum_name)
|
||||
|
||||
setattr(self, "%s_name" % checksum_id, checksum_name)
|
||||
setattr(self, "%s_expected" % checksum_id, checksum_expected)
|
||||
|
||||
self.name = self.parm.get("name",'default')
|
||||
if "," in self.name:
|
||||
raise ParameterError("The fetcher no longer supports multiple name parameters in a single url", self.url)
|
||||
self.names = self.parm.get("name",'default').split(',')
|
||||
|
||||
self.method = None
|
||||
for m in methods:
|
||||
@@ -1398,7 +1376,13 @@ class FetchData(object):
|
||||
self.lockfile = basepath + '.lock'
|
||||
|
||||
def setup_revisions(self, d):
|
||||
self.revision = srcrev_internal_helper(self, d, self.name)
|
||||
self.revisions = {}
|
||||
for name in self.names:
|
||||
self.revisions[name] = srcrev_internal_helper(self, d, name)
|
||||
|
||||
# add compatibility code for non name specified case
|
||||
if len(self.names) == 1:
|
||||
self.revision = self.revisions[self.names[0]]
|
||||
|
||||
def setup_localpath(self, d):
|
||||
if not self.localpath:
|
||||
@@ -1526,7 +1510,7 @@ class FetchMethod(object):
|
||||
(file, urldata.parm.get('unpack')))
|
||||
|
||||
base, ext = os.path.splitext(file)
|
||||
if ext in ['.gz', '.bz2', '.Z', '.xz', '.lz', '.zst']:
|
||||
if ext in ['.gz', '.bz2', '.Z', '.xz', '.lz']:
|
||||
efile = os.path.join(rootdir, os.path.basename(base))
|
||||
else:
|
||||
efile = file
|
||||
@@ -1622,7 +1606,7 @@ class FetchMethod(object):
|
||||
if urlpath.find("/") != -1:
|
||||
destdir = urlpath.rsplit("/", 1)[0] + '/'
|
||||
bb.utils.mkdirhier("%s/%s" % (unpackdir, destdir))
|
||||
cmd = 'cp --force --preserve=timestamps --no-dereference --recursive -H "%s" "%s"' % (file, destdir)
|
||||
cmd = 'cp -fpPRH "%s" "%s"' % (file, destdir)
|
||||
else:
|
||||
urldata.unpack_tracer.unpack("archive-extract", unpackdir)
|
||||
|
||||
@@ -1678,13 +1662,13 @@ class FetchMethod(object):
|
||||
if not hasattr(self, "_latest_revision"):
|
||||
raise ParameterError("The fetcher for this URL does not support _latest_revision", ud.url)
|
||||
|
||||
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
|
||||
key = self.generate_revision_key(ud, d, name)
|
||||
|
||||
rev = _revisions_cache.get_rev(key)
|
||||
if rev is None:
|
||||
rev = self._latest_revision(ud, d, name)
|
||||
_revisions_cache.set_rev(key, rev)
|
||||
return rev
|
||||
try:
|
||||
return revs[key]
|
||||
except KeyError:
|
||||
revs[key] = rev = self._latest_revision(ud, d, name)
|
||||
return rev
|
||||
|
||||
def sortable_revision(self, ud, d, name):
|
||||
latest_rev = self._build_revision(ud, d, name)
|
||||
@@ -1822,7 +1806,7 @@ class Fetch(object):
|
||||
self.ud[url] = FetchData(url, self.d)
|
||||
|
||||
self.ud[url].setup_localpath(self.d)
|
||||
return self.ud[url].localpath
|
||||
return self.d.expand(self.ud[url].localpath)
|
||||
|
||||
def localpaths(self):
|
||||
"""
|
||||
@@ -1875,28 +1859,25 @@ class Fetch(object):
|
||||
logger.debug(str(e))
|
||||
done = False
|
||||
|
||||
d = self.d
|
||||
if premirroronly:
|
||||
# Only disable the network in a copy
|
||||
d = bb.data.createCopy(self.d)
|
||||
d.setVar("BB_NO_NETWORK", "1")
|
||||
self.d.setVar("BB_NO_NETWORK", "1")
|
||||
|
||||
firsterr = None
|
||||
verified_stamp = False
|
||||
if done:
|
||||
verified_stamp = m.verify_donestamp(ud, d)
|
||||
if not done and (not verified_stamp or m.need_update(ud, d)):
|
||||
verified_stamp = m.verify_donestamp(ud, self.d)
|
||||
if not done and (not verified_stamp or m.need_update(ud, self.d)):
|
||||
try:
|
||||
if not trusted_network(d, ud.url):
|
||||
if not trusted_network(self.d, ud.url):
|
||||
raise UntrustedUrl(ud.url)
|
||||
logger.debug("Trying Upstream")
|
||||
m.download(ud, d)
|
||||
m.download(ud, self.d)
|
||||
if hasattr(m, "build_mirror_data"):
|
||||
m.build_mirror_data(ud, d)
|
||||
m.build_mirror_data(ud, self.d)
|
||||
done = True
|
||||
# early checksum verify, so that if checksum mismatched,
|
||||
# fetcher still have chance to fetch from mirror
|
||||
m.update_donestamp(ud, d)
|
||||
m.update_donestamp(ud, self.d)
|
||||
|
||||
except bb.fetch2.NetworkAccess:
|
||||
raise
|
||||
@@ -1915,17 +1896,17 @@ class Fetch(object):
|
||||
firsterr = e
|
||||
# Remove any incomplete fetch
|
||||
if not verified_stamp and m.cleanup_upon_failure():
|
||||
m.clean(ud, d)
|
||||
m.clean(ud, self.d)
|
||||
logger.debug("Trying MIRRORS")
|
||||
mirrors = mirror_from_string(d.getVar('MIRRORS'))
|
||||
done = m.try_mirrors(self, ud, d, mirrors)
|
||||
mirrors = mirror_from_string(self.d.getVar('MIRRORS'))
|
||||
done = m.try_mirrors(self, ud, self.d, mirrors)
|
||||
|
||||
if not done or not m.done(ud, d):
|
||||
if not done or not m.done(ud, self.d):
|
||||
if firsterr:
|
||||
logger.error(str(firsterr))
|
||||
raise FetchError("Unable to fetch URL from any source.", u)
|
||||
|
||||
m.update_donestamp(ud, d)
|
||||
m.update_donestamp(ud, self.d)
|
||||
|
||||
except IOError as e:
|
||||
if e.errno in [errno.ESTALE]:
|
||||
@@ -2107,7 +2088,6 @@ from . import npmsw
|
||||
from . import az
|
||||
from . import crate
|
||||
from . import gcp
|
||||
from . import gomod
|
||||
|
||||
methods.append(local.Local())
|
||||
methods.append(wget.Wget())
|
||||
@@ -2130,5 +2110,3 @@ methods.append(npmsw.NpmShrinkWrap())
|
||||
methods.append(az.Az())
|
||||
methods.append(crate.Crate())
|
||||
methods.append(gcp.GCP())
|
||||
methods.append(gomod.GoMod())
|
||||
methods.append(gomod.GoModGit())
|
||||
|
||||
@@ -66,12 +66,11 @@ class Az(Wget):
|
||||
else:
|
||||
azuri = '%s%s%s' % ('https://', ud.host, ud.path)
|
||||
|
||||
dldir = d.getVar("DL_DIR")
|
||||
if os.path.exists(ud.localpath):
|
||||
# file exists, but we didnt complete it.. trying again.
|
||||
fetchcmd += " -c -P %s '%s'" % (dldir, azuri)
|
||||
fetchcmd += d.expand(" -c -P ${DL_DIR} '%s'" % azuri)
|
||||
else:
|
||||
fetchcmd += " -P %s '%s'" % (dldir, azuri)
|
||||
fetchcmd += d.expand(" -P ${DL_DIR} '%s'" % azuri)
|
||||
|
||||
try:
|
||||
self._runwget(ud, d, fetchcmd, False)
|
||||
|
||||
@@ -108,7 +108,7 @@ class ClearCase(FetchMethod):
|
||||
ud.module.replace("/", "."),
|
||||
ud.label.replace("/", "."))
|
||||
|
||||
ud.viewname = "%s-view%s" % (ud.identifier, d.getVar("DATETIME"))
|
||||
ud.viewname = "%s-view%s" % (ud.identifier, d.getVar("DATETIME", d, True))
|
||||
ud.csname = "%s-config-spec" % (ud.identifier)
|
||||
ud.ccasedir = os.path.join(d.getVar("DL_DIR"), ud.type)
|
||||
ud.viewdir = os.path.join(ud.ccasedir, ud.viewname)
|
||||
@@ -130,6 +130,8 @@ class ClearCase(FetchMethod):
|
||||
self.debug("configspecfile = %s" % ud.configspecfile)
|
||||
self.debug("localfile = %s" % ud.localfile)
|
||||
|
||||
ud.localfile = os.path.join(d.getVar("DL_DIR"), ud.localfile)
|
||||
|
||||
def _build_ccase_command(self, ud, command):
|
||||
"""
|
||||
Build up a commandline based on ud
|
||||
@@ -194,7 +196,7 @@ class ClearCase(FetchMethod):
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if ("LATEST" in ud.label) or (ud.customspec and "LATEST" in ud.customspec):
|
||||
ud.identifier += "-%s" % d.getVar("DATETIME")
|
||||
ud.identifier += "-%s" % d.getVar("DATETIME",d, True)
|
||||
return True
|
||||
if os.path.exists(ud.localpath):
|
||||
return False
|
||||
|
||||
@@ -70,7 +70,6 @@ class Crate(Wget):
|
||||
host = 'crates.io/api/v1/crates'
|
||||
|
||||
ud.url = "https://%s/%s/%s/download" % (host, name, version)
|
||||
ud.versionsurl = "https://%s/%s/versions" % (host, name)
|
||||
ud.parm['downloadfilename'] = "%s-%s.crate" % (name, version)
|
||||
if 'name' not in ud.parm:
|
||||
ud.parm['name'] = '%s-%s' % (name, version)
|
||||
@@ -140,11 +139,3 @@ class Crate(Wget):
|
||||
mdpath = os.path.join(bbpath, cratepath, mdfile)
|
||||
with open(mdpath, "w") as f:
|
||||
json.dump(metadata, f)
|
||||
|
||||
def latest_versionstring(self, ud, d):
|
||||
from functools import cmp_to_key
|
||||
json_data = json.loads(self._fetch_index(ud.versionsurl, ud, d))
|
||||
versions = [(0, i["num"], "") for i in json_data["versions"]]
|
||||
versions = sorted(versions, key=cmp_to_key(bb.utils.vercmp))
|
||||
|
||||
return (versions[-1][1], "")
|
||||
|
||||
@@ -23,6 +23,7 @@ import urllib.parse, urllib.error
|
||||
from bb.fetch2 import FetchMethod
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import logger
|
||||
from bb.fetch2 import runfetchcmd
|
||||
|
||||
class GCP(FetchMethod):
|
||||
"""
|
||||
@@ -46,7 +47,8 @@ class GCP(FetchMethod):
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = ud.basename
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
ud.basecmd = "gsutil stat"
|
||||
|
||||
def get_gcp_client(self):
|
||||
from google.cloud import storage
|
||||
@@ -57,20 +59,17 @@ class GCP(FetchMethod):
|
||||
Fetch urls using the GCP API.
|
||||
Assumes localpath was called first.
|
||||
"""
|
||||
from google.api_core.exceptions import NotFound
|
||||
logger.debug2(f"Trying to download gs://{ud.host}{ud.path} to {ud.localpath}")
|
||||
if self.gcp_client is None:
|
||||
self.get_gcp_client()
|
||||
|
||||
bb.fetch2.check_network_access(d, "blob.download_to_filename", f"gs://{ud.host}{ud.path}")
|
||||
bb.fetch2.check_network_access(d, ud.basecmd, f"gs://{ud.host}{ud.path}")
|
||||
runfetchcmd("%s %s" % (ud.basecmd, f"gs://{ud.host}{ud.path}"), d)
|
||||
|
||||
# Path sometimes has leading slash, so strip it
|
||||
path = ud.path.lstrip("/")
|
||||
blob = self.gcp_client.bucket(ud.host).blob(path)
|
||||
try:
|
||||
blob.download_to_filename(ud.localpath)
|
||||
except NotFound:
|
||||
raise FetchError("The GCP API threw a NotFound exception")
|
||||
blob.download_to_filename(ud.localpath)
|
||||
|
||||
# Additional sanity checks copied from the wget class (although there
|
||||
# are no known issues which mean these are required, treat the GCP API
|
||||
@@ -92,7 +91,8 @@ class GCP(FetchMethod):
|
||||
if self.gcp_client is None:
|
||||
self.get_gcp_client()
|
||||
|
||||
bb.fetch2.check_network_access(d, "gcp_client.bucket(ud.host).blob(path).exists()", f"gs://{ud.host}{ud.path}")
|
||||
bb.fetch2.check_network_access(d, ud.basecmd, f"gs://{ud.host}{ud.path}")
|
||||
runfetchcmd("%s %s" % (ud.basecmd, f"gs://{ud.host}{ud.path}"), d)
|
||||
|
||||
# Path sometimes has leading slash, so strip it
|
||||
path = ud.path.lstrip("/")
|
||||
|
||||
@@ -9,6 +9,15 @@ Supported SRC_URI options are:
|
||||
- branch
|
||||
The git branch to retrieve from. The default is "master"
|
||||
|
||||
This option also supports multiple branch fetching, with branches
|
||||
separated by commas. In multiple branches case, the name option
|
||||
must have the same number of names to match the branches, which is
|
||||
used to specify the SRC_REV for the branch
|
||||
e.g:
|
||||
SRC_URI="git://some.host/somepath;branch=branchX,branchY;name=nameX,nameY"
|
||||
SRCREV_nameX = "xxxxxxxxxxxxxxxxxxxx"
|
||||
SRCREV_nameY = "YYYYYYYYYYYYYYYYYYYY"
|
||||
|
||||
- tag
|
||||
The git tag to retrieve. The default is "master"
|
||||
|
||||
@@ -72,7 +81,6 @@ import shlex
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import urllib
|
||||
import bb
|
||||
import bb.progress
|
||||
from contextlib import contextmanager
|
||||
@@ -183,10 +191,13 @@ class Git(FetchMethod):
|
||||
if ud.bareclone:
|
||||
ud.nocheckout = 1
|
||||
|
||||
ud.unresolvedrev = ""
|
||||
ud.branch = ud.parm.get("branch", "")
|
||||
if not ud.branch and not ud.nobranch:
|
||||
raise bb.fetch2.ParameterError("The url does not set any branch parameter or set nobranch=1.", ud.url)
|
||||
ud.unresolvedrev = {}
|
||||
branches = ud.parm.get("branch", "").split(',')
|
||||
if branches == [""] and not ud.nobranch:
|
||||
bb.warn("URL: %s does not set any branch parameter. The future default branch used by tools and repositories is uncertain and we will therefore soon require this is set in all git urls." % ud.url)
|
||||
branches = ["master"]
|
||||
if len(branches) != len(ud.names):
|
||||
raise bb.fetch2.ParameterError("The number of name and branch parameters is not balanced", ud.url)
|
||||
|
||||
ud.noshared = d.getVar("BB_GIT_NOSHARED") == "1"
|
||||
|
||||
@@ -196,7 +207,6 @@ class Git(FetchMethod):
|
||||
if ud.bareclone:
|
||||
ud.cloneflags += " --mirror"
|
||||
|
||||
ud.shallow_skip_fast = False
|
||||
ud.shallow = d.getVar("BB_GIT_SHALLOW") == "1"
|
||||
ud.shallow_extra_refs = (d.getVar("BB_GIT_SHALLOW_EXTRA_REFS") or "").split()
|
||||
|
||||
@@ -215,27 +225,32 @@ class Git(FetchMethod):
|
||||
|
||||
revs_default = d.getVar("BB_GIT_SHALLOW_REVS")
|
||||
ud.shallow_revs = []
|
||||
ud.branches = {}
|
||||
for pos, name in enumerate(ud.names):
|
||||
branch = branches[pos]
|
||||
ud.branches[name] = branch
|
||||
ud.unresolvedrev[name] = branch
|
||||
|
||||
ud.unresolvedrev = ud.branch
|
||||
shallow_depth = d.getVar("BB_GIT_SHALLOW_DEPTH_%s" % name)
|
||||
if shallow_depth is not None:
|
||||
try:
|
||||
shallow_depth = int(shallow_depth or 0)
|
||||
except ValueError:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth))
|
||||
else:
|
||||
if shallow_depth < 0:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (name, shallow_depth))
|
||||
ud.shallow_depths[name] = shallow_depth
|
||||
|
||||
shallow_depth = d.getVar("BB_GIT_SHALLOW_DEPTH_%s" % ud.name)
|
||||
if shallow_depth is not None:
|
||||
try:
|
||||
shallow_depth = int(shallow_depth or 0)
|
||||
except ValueError:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (ud.name, shallow_depth))
|
||||
else:
|
||||
if shallow_depth < 0:
|
||||
raise bb.fetch2.FetchError("Invalid depth for BB_GIT_SHALLOW_DEPTH_%s: %s" % (ud.name, shallow_depth))
|
||||
ud.shallow_depths[ud.name] = shallow_depth
|
||||
revs = d.getVar("BB_GIT_SHALLOW_REVS_%s" % name)
|
||||
if revs is not None:
|
||||
ud.shallow_revs.extend(revs.split())
|
||||
elif revs_default is not None:
|
||||
ud.shallow_revs.extend(revs_default.split())
|
||||
|
||||
revs = d.getVar("BB_GIT_SHALLOW_REVS_%s" % ud.name)
|
||||
if revs is not None:
|
||||
ud.shallow_revs.extend(revs.split())
|
||||
elif revs_default is not None:
|
||||
ud.shallow_revs.extend(revs_default.split())
|
||||
|
||||
if ud.shallow and not ud.shallow_revs and ud.shallow_depths[ud.name] == 0:
|
||||
if (ud.shallow and
|
||||
not ud.shallow_revs and
|
||||
all(ud.shallow_depths[n] == 0 for n in ud.names)):
|
||||
# Shallow disabled for this URL
|
||||
ud.shallow = False
|
||||
|
||||
@@ -244,9 +259,10 @@ class Git(FetchMethod):
|
||||
# rev of this repository. This will get resolved into a revision
|
||||
# later. If an actual revision happens to have also been provided
|
||||
# then this setting will be overridden.
|
||||
ud.unresolvedrev = 'HEAD'
|
||||
for name in ud.names:
|
||||
ud.unresolvedrev[name] = 'HEAD'
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c gc.autoDetach=false -c core.pager=cat -c safe.bareRepository=all -c clone.defaultRemoteName=origin"
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c gc.autoDetach=false -c core.pager=cat -c safe.bareRepository=all"
|
||||
|
||||
write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0"
|
||||
ud.write_tarballs = write_tarballs != "0" or ud.rebaseable
|
||||
@@ -254,11 +270,12 @@ class Git(FetchMethod):
|
||||
|
||||
ud.setup_revisions(d)
|
||||
|
||||
# Ensure any revision that doesn't look like a SHA-1 is translated into one
|
||||
if not sha1_re.match(ud.revision or ''):
|
||||
if ud.revision:
|
||||
ud.unresolvedrev = ud.revision
|
||||
ud.revision = self.latest_revision(ud, d, ud.name)
|
||||
for name in ud.names:
|
||||
# Ensure any revision that doesn't look like a SHA-1 is translated into one
|
||||
if not sha1_re.match(ud.revisions[name] or ''):
|
||||
if ud.revisions[name]:
|
||||
ud.unresolvedrev[name] = ud.revisions[name]
|
||||
ud.revisions[name] = self.latest_revision(ud, d, name)
|
||||
|
||||
gitsrcname = '%s%s' % (ud.host.replace(':', '.'), ud.path.replace('/', '.').replace('*', '.').replace(' ','_').replace('(', '_').replace(')', '_'))
|
||||
if gitsrcname.startswith('.'):
|
||||
@@ -269,7 +286,8 @@ class Git(FetchMethod):
|
||||
# upstream repo in the future, the mirror will remain intact and still
|
||||
# contain the revision
|
||||
if ud.rebaseable:
|
||||
gitsrcname = gitsrcname + '_' + ud.revision
|
||||
for name in ud.names:
|
||||
gitsrcname = gitsrcname + '_' + ud.revisions[name]
|
||||
|
||||
dl_dir = d.getVar("DL_DIR")
|
||||
gitdir = d.getVar("GITDIR") or (dl_dir + "/git2")
|
||||
@@ -287,14 +305,15 @@ class Git(FetchMethod):
|
||||
if ud.shallow_revs:
|
||||
tarballname = "%s_%s" % (tarballname, "_".join(sorted(ud.shallow_revs)))
|
||||
|
||||
tarballname = "%s_%s" % (tarballname, ud.revision[:7])
|
||||
depth = ud.shallow_depths[ud.name]
|
||||
if depth:
|
||||
tarballname = "%s-%s" % (tarballname, depth)
|
||||
for name, revision in sorted(ud.revisions.items()):
|
||||
tarballname = "%s_%s" % (tarballname, ud.revisions[name][:7])
|
||||
depth = ud.shallow_depths[name]
|
||||
if depth:
|
||||
tarballname = "%s-%s" % (tarballname, depth)
|
||||
|
||||
shallow_refs = []
|
||||
if not ud.nobranch:
|
||||
shallow_refs.append(ud.branch)
|
||||
shallow_refs.extend(ud.branches.values())
|
||||
if ud.shallow_extra_refs:
|
||||
shallow_refs.extend(r.replace('refs/heads/', '').replace('*', 'ALL') for r in ud.shallow_extra_refs)
|
||||
if shallow_refs:
|
||||
@@ -319,16 +338,18 @@ class Git(FetchMethod):
|
||||
return True
|
||||
if ud.shallow and ud.write_shallow_tarballs and self.clonedir_need_shallow_revs(ud, d):
|
||||
return True
|
||||
if not self._contains_ref(ud, d, ud.name, ud.clonedir):
|
||||
return True
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
return True
|
||||
return False
|
||||
|
||||
def lfs_need_update(self, ud, d):
|
||||
if self.clonedir_need_update(ud, d):
|
||||
return True
|
||||
|
||||
if not self._lfs_objects_downloaded(ud, d, ud.name, ud.clonedir):
|
||||
return True
|
||||
for name in ud.names:
|
||||
if not self._lfs_objects_downloaded(ud, d, name, ud.clonedir):
|
||||
return True
|
||||
return False
|
||||
|
||||
def clonedir_need_shallow_revs(self, ud, d):
|
||||
@@ -425,24 +446,6 @@ class Git(FetchMethod):
|
||||
if ud.proto.lower() != 'file':
|
||||
bb.fetch2.check_network_access(d, clone_cmd, ud.url)
|
||||
progresshandler = GitProgressHandler(d)
|
||||
|
||||
# Try creating a fast initial shallow clone
|
||||
# Enabling ud.shallow_skip_fast will skip this
|
||||
# If the Git error "Server does not allow request for unadvertised object"
|
||||
# occurs, shallow_skip_fast is enabled automatically.
|
||||
# This may happen if the Git server does not allow the request
|
||||
# or if the Git client has issues with this functionality.
|
||||
if ud.shallow and not ud.shallow_skip_fast:
|
||||
try:
|
||||
self.clone_shallow_with_tarball(ud, d)
|
||||
# When the shallow clone has succeeded, use the shallow tarball
|
||||
ud.localpath = ud.fullshallow
|
||||
return
|
||||
except:
|
||||
logger.warning("Creating fast initial shallow clone failed, try initial regular clone now.")
|
||||
|
||||
# When skipping fast initial shallow or the fast inital shallow clone failed:
|
||||
# Try again with an initial regular clone
|
||||
runfetchcmd(clone_cmd, d, log=progresshandler)
|
||||
|
||||
# Update the checkout if needed
|
||||
@@ -470,8 +473,9 @@ class Git(FetchMethod):
|
||||
if exc.errno != errno.ENOENT:
|
||||
raise
|
||||
|
||||
if not self._contains_ref(ud, d, ud.name, ud.clonedir):
|
||||
raise bb.fetch2.FetchError("Unable to find revision %s in branch %s even from upstream" % (ud.revision, ud.branch))
|
||||
for name in ud.names:
|
||||
if not self._contains_ref(ud, d, name, ud.clonedir):
|
||||
raise bb.fetch2.FetchError("Unable to find revision %s in branch %s even from upstream" % (ud.revisions[name], ud.branches[name]))
|
||||
|
||||
if ud.shallow and ud.write_shallow_tarballs:
|
||||
missing_rev = self.clonedir_need_shallow_revs(ud, d)
|
||||
@@ -504,167 +508,103 @@ class Git(FetchMethod):
|
||||
if os.path.exists(os.path.join(ud.destdir, ".git", "lfs")):
|
||||
runfetchcmd("tar -cf - lfs | tar -xf - -C %s" % ud.clonedir, d, workdir="%s/.git" % ud.destdir)
|
||||
|
||||
def lfs_fetch(self, ud, d, clonedir, revision, fetchall=False, progresshandler=None):
|
||||
"""Helper method for fetching Git LFS data"""
|
||||
try:
|
||||
if self._need_lfs(ud) and self._contains_lfs(ud, d, clonedir) and self._find_git_lfs(d) and len(revision):
|
||||
# Using worktree with the revision because .lfsconfig may exists
|
||||
worktree_add_cmd = "%s worktree add wt %s" % (ud.basecmd, revision)
|
||||
runfetchcmd(worktree_add_cmd, d, log=progresshandler, workdir=clonedir)
|
||||
lfs_fetch_cmd = "%s lfs fetch %s" % (ud.basecmd, "--all" if fetchall else "")
|
||||
runfetchcmd(lfs_fetch_cmd, d, log=progresshandler, workdir=(clonedir + "/wt"))
|
||||
worktree_rem_cmd = "%s worktree remove -f wt" % ud.basecmd
|
||||
runfetchcmd(worktree_rem_cmd, d, log=progresshandler, workdir=clonedir)
|
||||
except:
|
||||
logger.warning("Fetching LFS did not succeed.")
|
||||
|
||||
@contextmanager
|
||||
def create_atomic(self, filename):
|
||||
"""Create as a temp file and move atomically into position to avoid races"""
|
||||
fd, tfile = tempfile.mkstemp(dir=os.path.dirname(filename))
|
||||
try:
|
||||
yield tfile
|
||||
umask = os.umask(0o666)
|
||||
os.umask(umask)
|
||||
os.chmod(tfile, (0o666 & ~umask))
|
||||
os.rename(tfile, filename)
|
||||
finally:
|
||||
os.close(fd)
|
||||
|
||||
def build_mirror_data(self, ud, d):
|
||||
|
||||
# Create as a temp file and move atomically into position to avoid races
|
||||
@contextmanager
|
||||
def create_atomic(filename):
|
||||
fd, tfile = tempfile.mkstemp(dir=os.path.dirname(filename))
|
||||
try:
|
||||
yield tfile
|
||||
umask = os.umask(0o666)
|
||||
os.umask(umask)
|
||||
os.chmod(tfile, (0o666 & ~umask))
|
||||
os.rename(tfile, filename)
|
||||
finally:
|
||||
os.close(fd)
|
||||
|
||||
if ud.shallow and ud.write_shallow_tarballs:
|
||||
if not os.path.exists(ud.fullshallow):
|
||||
if os.path.islink(ud.fullshallow):
|
||||
os.unlink(ud.fullshallow)
|
||||
self.clone_shallow_with_tarball(ud, d)
|
||||
tempdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
|
||||
shallowclone = os.path.join(tempdir, 'git')
|
||||
try:
|
||||
self.clone_shallow_local(ud, shallowclone, d)
|
||||
|
||||
logger.info("Creating tarball of git repository")
|
||||
with create_atomic(ud.fullshallow) as tfile:
|
||||
runfetchcmd("tar -czf %s ." % tfile, d, workdir=shallowclone)
|
||||
runfetchcmd("touch %s.done" % ud.fullshallow, d)
|
||||
finally:
|
||||
bb.utils.remove(tempdir, recurse=True)
|
||||
elif ud.write_tarballs and not os.path.exists(ud.fullmirror):
|
||||
if os.path.islink(ud.fullmirror):
|
||||
os.unlink(ud.fullmirror)
|
||||
|
||||
logger.info("Creating tarball of git repository")
|
||||
with self.create_atomic(ud.fullmirror) as tfile:
|
||||
with create_atomic(ud.fullmirror) as tfile:
|
||||
mtime = runfetchcmd("{} log --all -1 --format=%cD".format(ud.basecmd), d,
|
||||
quiet=True, workdir=ud.clonedir)
|
||||
runfetchcmd("tar -czf %s --owner oe:0 --group oe:0 --mtime \"%s\" ."
|
||||
% (tfile, mtime), d, workdir=ud.clonedir)
|
||||
runfetchcmd("touch %s.done" % ud.fullmirror, d)
|
||||
|
||||
def clone_shallow_with_tarball(self, ud, d):
|
||||
ret = False
|
||||
tempdir = tempfile.mkdtemp(dir=d.getVar('DL_DIR'))
|
||||
shallowclone = os.path.join(tempdir, 'git')
|
||||
try:
|
||||
try:
|
||||
self.clone_shallow_local(ud, shallowclone, d)
|
||||
except:
|
||||
logger.warning("Fash shallow clone failed, try to skip fast mode now.")
|
||||
bb.utils.remove(tempdir, recurse=True)
|
||||
os.mkdir(tempdir)
|
||||
ud.shallow_skip_fast = True
|
||||
self.clone_shallow_local(ud, shallowclone, d)
|
||||
logger.info("Creating tarball of git repository")
|
||||
with self.create_atomic(ud.fullshallow) as tfile:
|
||||
runfetchcmd("tar -czf %s ." % tfile, d, workdir=shallowclone)
|
||||
runfetchcmd("touch %s.done" % ud.fullshallow, d)
|
||||
ret = True
|
||||
finally:
|
||||
bb.utils.remove(tempdir, recurse=True)
|
||||
|
||||
return ret
|
||||
|
||||
def clone_shallow_local(self, ud, dest, d):
|
||||
"""
|
||||
Shallow fetch from ud.clonedir (${DL_DIR}/git2/<gitrepo> by default):
|
||||
- For BB_GIT_SHALLOW_DEPTH: git fetch --depth <depth> rev
|
||||
- For BB_GIT_SHALLOW_REVS: git fetch --shallow-exclude=<revs> rev
|
||||
"""
|
||||
"""Clone the repo and make it shallow.
|
||||
|
||||
progresshandler = GitProgressHandler(d)
|
||||
repourl = self._get_repo_url(ud)
|
||||
bb.utils.mkdirhier(dest)
|
||||
init_cmd = "%s init -q" % ud.basecmd
|
||||
if ud.bareclone:
|
||||
init_cmd += " --bare"
|
||||
runfetchcmd(init_cmd, d, workdir=dest)
|
||||
# Use repourl when creating a fast initial shallow clone
|
||||
# Prefer already existing full bare clones if available
|
||||
if not ud.shallow_skip_fast and not os.path.exists(ud.clonedir):
|
||||
remote = shlex.quote(repourl)
|
||||
else:
|
||||
remote = ud.clonedir
|
||||
runfetchcmd("%s remote add origin %s" % (ud.basecmd, remote), d, workdir=dest)
|
||||
The upstream url of the new clone isn't set at this time, as it'll be
|
||||
set correctly when unpacked."""
|
||||
runfetchcmd("%s clone %s %s %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, dest), d)
|
||||
|
||||
# Check the histories which should be excluded
|
||||
shallow_exclude = ''
|
||||
for revision in ud.shallow_revs:
|
||||
shallow_exclude += " --shallow-exclude=%s" % revision
|
||||
to_parse, shallow_branches = [], []
|
||||
for name in ud.names:
|
||||
revision = ud.revisions[name]
|
||||
depth = ud.shallow_depths[name]
|
||||
if depth:
|
||||
to_parse.append('%s~%d^{}' % (revision, depth - 1))
|
||||
|
||||
revision = ud.revision
|
||||
depth = ud.shallow_depths[ud.name]
|
||||
# For nobranch, we need a ref, otherwise the commits will be
|
||||
# removed, and for non-nobranch, we truncate the branch to our
|
||||
# srcrev, to avoid keeping unnecessary history beyond that.
|
||||
branch = ud.branches[name]
|
||||
if ud.nobranch:
|
||||
ref = "refs/shallow/%s" % name
|
||||
elif ud.bareclone:
|
||||
ref = "refs/heads/%s" % branch
|
||||
else:
|
||||
ref = "refs/remotes/origin/%s" % branch
|
||||
|
||||
# The --depth and --shallow-exclude can't be used together
|
||||
if depth and shallow_exclude:
|
||||
raise bb.fetch2.FetchError("BB_GIT_SHALLOW_REVS is set, but BB_GIT_SHALLOW_DEPTH is not 0.")
|
||||
shallow_branches.append(ref)
|
||||
runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest)
|
||||
|
||||
# For nobranch, we need a ref, otherwise the commits will be
|
||||
# removed, and for non-nobranch, we truncate the branch to our
|
||||
# srcrev, to avoid keeping unnecessary history beyond that.
|
||||
branch = ud.branch
|
||||
if ud.nobranch:
|
||||
ref = "refs/shallow/%s" % ud.name
|
||||
elif ud.bareclone:
|
||||
ref = "refs/heads/%s" % branch
|
||||
else:
|
||||
ref = "refs/remotes/origin/%s" % branch
|
||||
# Map srcrev+depths to revisions
|
||||
parsed_depths = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join(to_parse)), d, workdir=dest)
|
||||
|
||||
fetch_cmd = "%s fetch origin %s" % (ud.basecmd, revision)
|
||||
if depth:
|
||||
fetch_cmd += " --depth %s" % depth
|
||||
|
||||
if shallow_exclude:
|
||||
fetch_cmd += shallow_exclude
|
||||
|
||||
# Advertise the revision for lower version git such as 2.25.1:
|
||||
# error: Server does not allow request for unadvertised object.
|
||||
# The ud.clonedir is a local temporary dir, will be removed when
|
||||
# fetch is done, so we can do anything on it.
|
||||
adv_cmd = 'git branch -f advertise-%s %s' % (revision, revision)
|
||||
if ud.shallow_skip_fast:
|
||||
runfetchcmd(adv_cmd, d, workdir=ud.clonedir)
|
||||
|
||||
runfetchcmd(fetch_cmd, d, workdir=dest)
|
||||
runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest)
|
||||
# Fetch Git LFS data for fast shallow clones
|
||||
if not ud.shallow_skip_fast:
|
||||
self.lfs_fetch(ud, d, dest, ud.revision)
|
||||
# Resolve specified revisions
|
||||
parsed_revs = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join('"%s^{}"' % r for r in ud.shallow_revs)), d, workdir=dest)
|
||||
shallow_revisions = parsed_depths.splitlines() + parsed_revs.splitlines()
|
||||
|
||||
# Apply extra ref wildcards
|
||||
all_refs_remote = runfetchcmd("%s ls-remote origin 'refs/*'" % ud.basecmd, \
|
||||
d, workdir=dest).splitlines()
|
||||
all_refs = []
|
||||
for line in all_refs_remote:
|
||||
all_refs.append(line.split()[-1])
|
||||
extra_refs = []
|
||||
if 'tag' in ud.parm:
|
||||
extra_refs.append(ud.parm['tag'])
|
||||
all_refs = runfetchcmd('%s for-each-ref "--format=%%(refname)"' % ud.basecmd,
|
||||
d, workdir=dest).splitlines()
|
||||
for r in ud.shallow_extra_refs:
|
||||
if not ud.bareclone:
|
||||
r = r.replace('refs/heads/', 'refs/remotes/origin/')
|
||||
|
||||
if '*' in r:
|
||||
matches = filter(lambda a: fnmatch.fnmatchcase(a, r), all_refs)
|
||||
extra_refs.extend(matches)
|
||||
shallow_branches.extend(matches)
|
||||
else:
|
||||
extra_refs.append(r)
|
||||
shallow_branches.append(r)
|
||||
|
||||
for ref in extra_refs:
|
||||
ref_fetch = os.path.basename(ref)
|
||||
runfetchcmd("%s fetch origin --depth 1 %s" % (ud.basecmd, ref_fetch), d, workdir=dest)
|
||||
revision = runfetchcmd("%s rev-parse FETCH_HEAD" % ud.basecmd, d, workdir=dest)
|
||||
runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest)
|
||||
|
||||
# The url is local ud.clonedir, set it to upstream one
|
||||
runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, shlex.quote(repourl)), d, workdir=dest)
|
||||
# Make the repository shallow
|
||||
shallow_cmd = [self.make_shallow_path, '-s']
|
||||
for b in shallow_branches:
|
||||
shallow_cmd.append('-r')
|
||||
shallow_cmd.append(b)
|
||||
shallow_cmd.extend(shallow_revisions)
|
||||
runfetchcmd(subprocess.list2cmdline(shallow_cmd), d, workdir=dest)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
""" unpack the downloaded src to destdir"""
|
||||
@@ -724,14 +664,6 @@ class Git(FetchMethod):
|
||||
if not source_found:
|
||||
raise bb.fetch2.UnpackError("No up to date source found: " + "; ".join(source_error), ud.url)
|
||||
|
||||
# If there is a tag parameter in the url and we also have a fixed srcrev, check the tag
|
||||
# matches the revision
|
||||
if 'tag' in ud.parm and sha1_re.match(ud.revision):
|
||||
output = runfetchcmd("%s rev-list -n 1 %s" % (ud.basecmd, ud.parm['tag']), d, workdir=destdir)
|
||||
output = output.strip()
|
||||
if output != ud.revision:
|
||||
raise bb.fetch2.FetchError("The revision the git tag '%s' resolved to didn't match the SRCREV in use (%s vs %s)" % (ud.parm['tag'], output, ud.revision), ud.url)
|
||||
|
||||
repourl = self._get_repo_url(ud)
|
||||
runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, shlex.quote(repourl)), d, workdir=destdir)
|
||||
|
||||
@@ -745,17 +677,17 @@ class Git(FetchMethod):
|
||||
|
||||
if not ud.nocheckout:
|
||||
if subpath:
|
||||
runfetchcmd("%s read-tree %s%s" % (ud.basecmd, ud.revision, readpathspec), d,
|
||||
runfetchcmd("%s read-tree %s%s" % (ud.basecmd, ud.revisions[ud.names[0]], readpathspec), d,
|
||||
workdir=destdir)
|
||||
runfetchcmd("%s checkout-index -q -f -a" % ud.basecmd, d, workdir=destdir)
|
||||
elif not ud.nobranch:
|
||||
branchname = ud.branch
|
||||
branchname = ud.branches[ud.names[0]]
|
||||
runfetchcmd("%s checkout -B %s %s" % (ud.basecmd, branchname, \
|
||||
ud.revision), d, workdir=destdir)
|
||||
ud.revisions[ud.names[0]]), d, workdir=destdir)
|
||||
runfetchcmd("%s branch %s --set-upstream-to origin/%s" % (ud.basecmd, branchname, \
|
||||
branchname), d, workdir=destdir)
|
||||
else:
|
||||
runfetchcmd("%s checkout %s" % (ud.basecmd, ud.revision), d, workdir=destdir)
|
||||
runfetchcmd("%s checkout %s" % (ud.basecmd, ud.revisions[ud.names[0]]), d, workdir=destdir)
|
||||
|
||||
return True
|
||||
|
||||
@@ -769,13 +701,8 @@ class Git(FetchMethod):
|
||||
clonedir = os.path.realpath(ud.localpath)
|
||||
to_remove.append(clonedir)
|
||||
|
||||
# Remove shallow mirror tarball
|
||||
if ud.shallow:
|
||||
to_remove.append(ud.fullshallow)
|
||||
to_remove.append(ud.fullshallow + ".done")
|
||||
|
||||
for r in to_remove:
|
||||
if os.path.exists(r) or os.path.islink(r):
|
||||
if os.path.exists(r):
|
||||
bb.note('Removing %s' % r)
|
||||
bb.utils.remove(r, True)
|
||||
|
||||
@@ -786,10 +713,10 @@ class Git(FetchMethod):
|
||||
cmd = ""
|
||||
if ud.nobranch:
|
||||
cmd = "%s log --pretty=oneline -n 1 %s -- 2> /dev/null | wc -l" % (
|
||||
ud.basecmd, ud.revision)
|
||||
ud.basecmd, ud.revisions[name])
|
||||
else:
|
||||
cmd = "%s branch --contains %s --list %s 2> /dev/null | wc -l" % (
|
||||
ud.basecmd, ud.revision, ud.branch)
|
||||
ud.basecmd, ud.revisions[name], ud.branches[name])
|
||||
try:
|
||||
output = runfetchcmd(cmd, d, quiet=True, workdir=wd)
|
||||
except bb.fetch2.FetchError:
|
||||
@@ -810,7 +737,7 @@ class Git(FetchMethod):
|
||||
# existence.
|
||||
# [1] https://github.com/git-lfs/git-lfs/blob/main/docs/spec.md#intercepting-git
|
||||
cmd = "%s lfs ls-files -l %s" \
|
||||
% (ud.basecmd, ud.revision)
|
||||
% (ud.basecmd, ud.revisions[name])
|
||||
output = runfetchcmd(cmd, d, quiet=True, workdir=wd).rstrip()
|
||||
# Do not do any further matching if no objects are managed by LFS
|
||||
if not output:
|
||||
@@ -837,12 +764,12 @@ class Git(FetchMethod):
|
||||
|
||||
if ud.nobranch:
|
||||
# If no branch is specified, use the current git commit
|
||||
refname = self._build_revision(ud, d, ud.name)
|
||||
refname = self._build_revision(ud, d, ud.names[0])
|
||||
elif wd == ud.clonedir:
|
||||
# The bare clonedir doesn't use the remote names; it has the branch immediately.
|
||||
refname = ud.branch
|
||||
refname = ud.branches[ud.names[0]]
|
||||
else:
|
||||
refname = "origin/%s" % ud.branch
|
||||
refname = "origin/%s" % ud.branches[ud.names[0]]
|
||||
|
||||
cmd = "%s grep lfs %s:.gitattributes | wc -l" % (
|
||||
ud.basecmd, refname)
|
||||
@@ -859,6 +786,7 @@ class Git(FetchMethod):
|
||||
"""
|
||||
Return True if git-lfs can be found, False otherwise.
|
||||
"""
|
||||
import shutil
|
||||
return shutil.which("git-lfs", path=d.getVar('PATH')) is not None
|
||||
|
||||
def _get_repo_url(self, ud):
|
||||
@@ -874,14 +802,14 @@ class Git(FetchMethod):
|
||||
username = ud.user + '@'
|
||||
else:
|
||||
username = ""
|
||||
return "%s://%s%s%s" % (ud.proto, username, ud.host, urllib.parse.quote(ud.path))
|
||||
return "%s://%s%s%s" % (ud.proto, username, ud.host, ud.path)
|
||||
|
||||
def _revision_key(self, ud, d, name):
|
||||
"""
|
||||
Return a unique key for the url
|
||||
"""
|
||||
# Collapse adjacent slashes
|
||||
return "git:" + ud.host + slash_re.sub(".", ud.path) + ud.unresolvedrev
|
||||
return "git:" + ud.host + slash_re.sub(".", ud.path) + ud.unresolvedrev[name]
|
||||
|
||||
def _lsremote(self, ud, d, search):
|
||||
"""
|
||||
@@ -914,26 +842,26 @@ class Git(FetchMethod):
|
||||
Compute the HEAD revision for the url
|
||||
"""
|
||||
if not d.getVar("__BBSRCREV_SEEN"):
|
||||
raise bb.fetch2.FetchError("Recipe uses a floating tag/branch '%s' for repo '%s' without a fixed SRCREV yet doesn't call bb.fetch2.get_srcrev() (use SRCPV in PV for OE)." % (ud.unresolvedrev, ud.host+ud.path))
|
||||
raise bb.fetch2.FetchError("Recipe uses a floating tag/branch '%s' for repo '%s' without a fixed SRCREV yet doesn't call bb.fetch2.get_srcrev() (use SRCPV in PV for OE)." % (ud.unresolvedrev[name], ud.host+ud.path))
|
||||
|
||||
# Ensure we mark as not cached
|
||||
bb.fetch2.mark_recipe_nocache(d)
|
||||
|
||||
output = self._lsremote(ud, d, "")
|
||||
# Tags of the form ^{} may not work, need to fallback to other form
|
||||
if ud.unresolvedrev[:5] == "refs/" or ud.usehead:
|
||||
head = ud.unresolvedrev
|
||||
tag = ud.unresolvedrev
|
||||
if ud.unresolvedrev[name][:5] == "refs/" or ud.usehead:
|
||||
head = ud.unresolvedrev[name]
|
||||
tag = ud.unresolvedrev[name]
|
||||
else:
|
||||
head = "refs/heads/%s" % ud.unresolvedrev
|
||||
tag = "refs/tags/%s" % ud.unresolvedrev
|
||||
head = "refs/heads/%s" % ud.unresolvedrev[name]
|
||||
tag = "refs/tags/%s" % ud.unresolvedrev[name]
|
||||
for s in [head, tag + "^{}", tag]:
|
||||
for l in output.strip().split('\n'):
|
||||
sha1, ref = l.split()
|
||||
if s == ref:
|
||||
return sha1
|
||||
raise bb.fetch2.FetchError("Unable to resolve '%s' in upstream git repository in git ls-remote output for %s" % \
|
||||
(ud.unresolvedrev, ud.host+ud.path))
|
||||
(ud.unresolvedrev[name], ud.host+ud.path))
|
||||
|
||||
def latest_versionstring(self, ud, d):
|
||||
"""
|
||||
@@ -984,7 +912,7 @@ class Git(FetchMethod):
|
||||
return pupver
|
||||
|
||||
def _build_revision(self, ud, d, name):
|
||||
return ud.revision
|
||||
return ud.revisions[name]
|
||||
|
||||
def gitpkgv_revision(self, ud, d, name):
|
||||
"""
|
||||
@@ -998,8 +926,9 @@ class Git(FetchMethod):
|
||||
commits = None
|
||||
else:
|
||||
if not os.path.exists(rev_file) or not os.path.getsize(rev_file):
|
||||
from pipes import quote
|
||||
commits = bb.fetch2.runfetchcmd(
|
||||
"git rev-list %s -- | wc -l" % shlex.quote(rev),
|
||||
"git rev-list %s -- | wc -l" % quote(rev),
|
||||
d, quiet=True).strip().lstrip('0')
|
||||
if commits:
|
||||
open(rev_file, "w").write("%d\n" % int(commits))
|
||||
|
||||
@@ -62,35 +62,36 @@ class GitSM(Git):
|
||||
return modules
|
||||
|
||||
# Collect the defined submodules, and their attributes
|
||||
try:
|
||||
gitmodules = runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revision), d, quiet=True, workdir=workdir)
|
||||
except:
|
||||
# No submodules to update
|
||||
gitmodules = ""
|
||||
|
||||
for m, md in parse_gitmodules(gitmodules).items():
|
||||
for name in ud.names:
|
||||
try:
|
||||
module_hash = runfetchcmd("%s ls-tree -z -d %s %s" % (ud.basecmd, ud.revision, md['path']), d, quiet=True, workdir=workdir)
|
||||
gitmodules = runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revisions[name]), d, quiet=True, workdir=workdir)
|
||||
except:
|
||||
# If the command fails, we don't have a valid file to check. If it doesn't
|
||||
# fail -- it still might be a failure, see next check...
|
||||
module_hash = ""
|
||||
|
||||
if not module_hash:
|
||||
logger.debug("submodule %s is defined, but is not initialized in the repository. Skipping", m)
|
||||
# No submodules to update
|
||||
continue
|
||||
|
||||
submodules.append(m)
|
||||
paths[m] = md['path']
|
||||
revision[m] = ud.revision
|
||||
uris[m] = md['url']
|
||||
subrevision[m] = module_hash.split()[2]
|
||||
for m, md in parse_gitmodules(gitmodules).items():
|
||||
try:
|
||||
module_hash = runfetchcmd("%s ls-tree -z -d %s %s" % (ud.basecmd, ud.revisions[name], md['path']), d, quiet=True, workdir=workdir)
|
||||
except:
|
||||
# If the command fails, we don't have a valid file to check. If it doesn't
|
||||
# fail -- it still might be a failure, see next check...
|
||||
module_hash = ""
|
||||
|
||||
# Convert relative to absolute uri based on parent uri
|
||||
if uris[m].startswith('..') or uris[m].startswith('./'):
|
||||
newud = copy.copy(ud)
|
||||
newud.path = os.path.normpath(os.path.join(newud.path, uris[m]))
|
||||
uris[m] = Git._get_repo_url(self, newud)
|
||||
if not module_hash:
|
||||
logger.debug("submodule %s is defined, but is not initialized in the repository. Skipping", m)
|
||||
continue
|
||||
|
||||
submodules.append(m)
|
||||
paths[m] = md['path']
|
||||
revision[m] = ud.revisions[name]
|
||||
uris[m] = md['url']
|
||||
subrevision[m] = module_hash.split()[2]
|
||||
|
||||
# Convert relative to absolute uri based on parent uri
|
||||
if uris[m].startswith('..') or uris[m].startswith('./'):
|
||||
newud = copy.copy(ud)
|
||||
newud.path = os.path.normpath(os.path.join(newud.path, uris[m]))
|
||||
uris[m] = Git._get_repo_url(self, newud)
|
||||
|
||||
for module in submodules:
|
||||
# Translate the module url into a SRC_URI
|
||||
@@ -146,22 +147,6 @@ class GitSM(Git):
|
||||
|
||||
return submodules != []
|
||||
|
||||
def call_process_submodules(self, ud, d, extra_check, subfunc):
|
||||
# If we're using a shallow mirror tarball it needs to be
|
||||
# unpacked temporarily so that we can examine the .gitmodules file
|
||||
# Unpack even when ud.clonedir is not available,
|
||||
# which may occur during a fast shallow clone
|
||||
unpack = extra_check or not os.path.exists(ud.clonedir)
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and unpack:
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
try:
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, subfunc, d)
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, subfunc, d)
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if Git.need_update(self, ud, d):
|
||||
return True
|
||||
@@ -179,7 +164,15 @@ class GitSM(Git):
|
||||
logger.error('gitsm: submodule update check failed: %s %s' % (type(e).__name__, str(e)))
|
||||
need_update_result = True
|
||||
|
||||
self.call_process_submodules(ud, d, not os.path.exists(ud.clonedir), need_update_submodule)
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and not os.path.exists(ud.clonedir):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, need_update_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, need_update_submodule, d)
|
||||
|
||||
if need_update_list:
|
||||
logger.debug('gitsm: Submodules requiring update: %s' % (' '.join(need_update_list)))
|
||||
@@ -202,7 +195,16 @@ class GitSM(Git):
|
||||
raise
|
||||
|
||||
Git.download(self, ud, d)
|
||||
self.call_process_submodules(ud, d, self.need_update(ud, d), download_submodule)
|
||||
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and self.need_update(ud, d):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, download_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, download_submodule, d)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
def unpack_submodules(ud, url, module, modpath, workdir, d):
|
||||
@@ -251,22 +253,9 @@ class GitSM(Git):
|
||||
# should also be skipped as these files were already smudged in the fetch stage if lfs
|
||||
# was enabled.
|
||||
runfetchcmd("GIT_LFS_SKIP_SMUDGE=1 %s submodule update --recursive --no-fetch" % (ud.basecmd), d, quiet=True, workdir=ud.destdir)
|
||||
def clean(self, ud, d):
|
||||
def clean_submodule(ud, url, module, modpath, workdir, d):
|
||||
url += ";bareclone=1;nobranch=1"
|
||||
try:
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
newfetch.clean()
|
||||
except Exception as e:
|
||||
logger.warning('gitsm: submodule clean failed: %s %s' % (type(e).__name__, str(e)))
|
||||
|
||||
self.call_process_submodules(ud, d, True, clean_submodule)
|
||||
|
||||
# Clean top git dir
|
||||
Git.clean(self, ud, d)
|
||||
|
||||
def implicit_urldata(self, ud, d):
|
||||
import subprocess
|
||||
import shutil, subprocess, tempfile
|
||||
|
||||
urldata = []
|
||||
def add_submodule(ud, url, module, modpath, workdir, d):
|
||||
@@ -274,6 +263,14 @@ class GitSM(Git):
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
urldata.extend(newfetch.expanded_urldata())
|
||||
|
||||
self.call_process_submodules(ud, d, ud.method.need_update(ud, d), add_submodule)
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and ud.method.need_update(ud, d):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
subprocess.check_call("tar -xzf %s" % ud.fullshallow, cwd=tmpdir, shell=True)
|
||||
self.process_submodules(ud, tmpdir, add_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, add_submodule, d)
|
||||
|
||||
return urldata
|
||||
|
||||
@@ -1,273 +0,0 @@
|
||||
"""
|
||||
BitBake 'Fetch' implementation for Go modules
|
||||
|
||||
The gomod/gomodgit fetchers are used to download Go modules to the module cache
|
||||
from a module proxy or directly from a version control repository.
|
||||
|
||||
Example SRC_URI:
|
||||
|
||||
SRC_URI += "gomod://golang.org/x/net;version=v0.9.0;sha256sum=..."
|
||||
SRC_URI += "gomodgit://golang.org/x/net;version=v0.9.0;repo=go.googlesource.com/net;srcrev=..."
|
||||
|
||||
Required SRC_URI parameters:
|
||||
|
||||
- version
|
||||
The version of the module.
|
||||
|
||||
Optional SRC_URI parameters:
|
||||
|
||||
- mod
|
||||
Fetch and unpack the go.mod file only instead of the complete module.
|
||||
The go command may need to download go.mod files for many different modules
|
||||
when computing the build list, and go.mod files are much smaller than
|
||||
module zip files.
|
||||
The default is "0", set mod=1 for the go.mod file only.
|
||||
|
||||
- sha256sum
|
||||
The checksum of the module zip file, or the go.mod file in case of fetching
|
||||
only the go.mod file. Alternatively, set the SRC_URI varible flag for
|
||||
"module@version.sha256sum".
|
||||
|
||||
- protocol
|
||||
The method used when fetching directly from a version control repository.
|
||||
The default is "https" for git.
|
||||
|
||||
- repo
|
||||
The URL when fetching directly from a version control repository. Required
|
||||
when the URL is different from the module path.
|
||||
|
||||
- srcrev
|
||||
The revision identifier used when fetching directly from a version control
|
||||
repository. Alternatively, set the SRCREV varible for "module@version".
|
||||
|
||||
- subdir
|
||||
The module subdirectory when fetching directly from a version control
|
||||
repository. Required when the module is not located in the root of the
|
||||
repository.
|
||||
|
||||
Related variables:
|
||||
|
||||
- GO_MOD_PROXY
|
||||
The module proxy used by the fetcher.
|
||||
|
||||
- GO_MOD_CACHE_DIR
|
||||
The directory where the module cache is located.
|
||||
This must match the exported GOMODCACHE variable for the go command to find
|
||||
the downloaded modules.
|
||||
|
||||
See the Go modules reference, https://go.dev/ref/mod, for more information
|
||||
about the module cache, module proxies and version control systems.
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import zipfile
|
||||
|
||||
import bb
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import MissingParameterError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import subprocess_setup
|
||||
from bb.fetch2.git import Git
|
||||
from bb.fetch2.wget import Wget
|
||||
|
||||
|
||||
def escape(path):
|
||||
"""Escape capital letters using exclamation points."""
|
||||
return re.sub(r'([A-Z])', lambda m: '!' + m.group(1).lower(), path)
|
||||
|
||||
|
||||
class GoMod(Wget):
|
||||
"""Class to fetch Go modules from a Go module proxy via wget"""
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""Check to see if a given URL is for this fetcher."""
|
||||
return ud.type == 'gomod'
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""Set up to download the module from the module proxy.
|
||||
|
||||
Set up to download the module zip file to the module cache directory
|
||||
and unpack the go.mod file (unless downloading only the go.mod file):
|
||||
|
||||
cache/download/<module>/@v/<version>.zip: The module zip file.
|
||||
cache/download/<module>/@v/<version>.mod: The go.mod file.
|
||||
"""
|
||||
|
||||
proxy = d.getVar('GO_MOD_PROXY') or 'proxy.golang.org'
|
||||
moddir = d.getVar('GO_MOD_CACHE_DIR') or 'pkg/mod'
|
||||
|
||||
if 'version' not in ud.parm:
|
||||
raise MissingParameterError('version', ud.url)
|
||||
|
||||
module = ud.host
|
||||
if ud.path != '/':
|
||||
module += ud.path
|
||||
ud.parm['module'] = module
|
||||
version = ud.parm['version']
|
||||
|
||||
# Set URL and filename for wget download
|
||||
if ud.parm.get('mod', '0') == '1':
|
||||
ext = '.mod'
|
||||
else:
|
||||
ext = '.zip'
|
||||
path = escape(f"{module}/@v/{version}{ext}")
|
||||
ud.url = bb.fetch2.encodeurl(
|
||||
('https', proxy, '/' + path, None, None, None))
|
||||
ud.parm['downloadfilename'] = f"{module.replace('/', '.')}@{version}{ext}"
|
||||
|
||||
# Set name for checksum verification
|
||||
ud.parm['name'] = f"{module}@{version}"
|
||||
|
||||
# Set path for unpack
|
||||
ud.parm['unpackpath'] = os.path.join(moddir, 'cache/download', path)
|
||||
|
||||
super().urldata_init(ud, d)
|
||||
|
||||
def unpack(self, ud, rootdir, d):
|
||||
"""Unpack the module in the module cache."""
|
||||
|
||||
# Unpack the module zip file or go.mod file
|
||||
unpackpath = os.path.join(rootdir, ud.parm['unpackpath'])
|
||||
unpackdir = os.path.dirname(unpackpath)
|
||||
bb.utils.mkdirhier(unpackdir)
|
||||
ud.unpack_tracer.unpack("file-copy", unpackdir)
|
||||
cmd = f"cp {ud.localpath} {unpackpath}"
|
||||
path = d.getVar('PATH')
|
||||
if path:
|
||||
cmd = f"PATH={path} {cmd}"
|
||||
name = os.path.basename(unpackpath)
|
||||
bb.note(f"Unpacking {name} to {unpackdir}/")
|
||||
subprocess.check_call(cmd, shell=True, preexec_fn=subprocess_setup)
|
||||
|
||||
if name.endswith('.zip'):
|
||||
# Unpack the go.mod file from the zip file
|
||||
module = ud.parm['module']
|
||||
name = name.rsplit('.', 1)[0] + '.mod'
|
||||
bb.note(f"Unpacking {name} to {unpackdir}/")
|
||||
with zipfile.ZipFile(ud.localpath) as zf:
|
||||
with open(os.path.join(unpackdir, name), mode='wb') as mf:
|
||||
try:
|
||||
f = module + '@' + ud.parm['version'] + '/go.mod'
|
||||
shutil.copyfileobj(zf.open(f), mf)
|
||||
except KeyError:
|
||||
# If the module does not have a go.mod file, synthesize
|
||||
# one containing only a module statement.
|
||||
mf.write(f'module {module}\n'.encode())
|
||||
|
||||
|
||||
class GoModGit(Git):
|
||||
"""Class to fetch Go modules directly from a git repository"""
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""Check to see if a given URL is for this fetcher."""
|
||||
return ud.type == 'gomodgit'
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""Set up to download the module from the git repository.
|
||||
|
||||
Set up to download the git repository to the module cache directory and
|
||||
unpack the module zip file and the go.mod file:
|
||||
|
||||
cache/vcs/<hash>: The bare git repository.
|
||||
cache/download/<module>/@v/<version>.zip: The module zip file.
|
||||
cache/download/<module>/@v/<version>.mod: The go.mod file.
|
||||
"""
|
||||
|
||||
moddir = d.getVar('GO_MOD_CACHE_DIR') or 'pkg/mod'
|
||||
|
||||
if 'version' not in ud.parm:
|
||||
raise MissingParameterError('version', ud.url)
|
||||
|
||||
module = ud.host
|
||||
if ud.path != '/':
|
||||
module += ud.path
|
||||
ud.parm['module'] = module
|
||||
|
||||
# Set host, path and srcrev for git download
|
||||
if 'repo' in ud.parm:
|
||||
repo = ud.parm['repo']
|
||||
idx = repo.find('/')
|
||||
if idx != -1:
|
||||
ud.host = repo[:idx]
|
||||
ud.path = repo[idx:]
|
||||
else:
|
||||
ud.host = repo
|
||||
ud.path = ''
|
||||
if 'protocol' not in ud.parm:
|
||||
ud.parm['protocol'] = 'https'
|
||||
ud.name = f"{module}@{ud.parm['version']}"
|
||||
srcrev = d.getVar('SRCREV_' + ud.name)
|
||||
if srcrev:
|
||||
if 'srcrev' not in ud.parm:
|
||||
ud.parm['srcrev'] = srcrev
|
||||
else:
|
||||
if 'srcrev' in ud.parm:
|
||||
d.setVar('SRCREV_' + ud.name, ud.parm['srcrev'])
|
||||
if 'branch' not in ud.parm:
|
||||
ud.parm['nobranch'] = '1'
|
||||
|
||||
# Set subpath, subdir and bareclone for git unpack
|
||||
if 'subdir' in ud.parm:
|
||||
ud.parm['subpath'] = ud.parm['subdir']
|
||||
key = f"git3:{ud.parm['protocol']}://{ud.host}{ud.path}".encode()
|
||||
ud.parm['key'] = key
|
||||
ud.parm['subdir'] = os.path.join(moddir, 'cache/vcs',
|
||||
hashlib.sha256(key).hexdigest())
|
||||
ud.parm['bareclone'] = '1'
|
||||
|
||||
super().urldata_init(ud, d)
|
||||
|
||||
def unpack(self, ud, rootdir, d):
|
||||
"""Unpack the module in the module cache."""
|
||||
|
||||
# Unpack the bare git repository
|
||||
super().unpack(ud, rootdir, d)
|
||||
|
||||
moddir = d.getVar('GO_MOD_CACHE_DIR') or 'pkg/mod'
|
||||
|
||||
# Create the info file
|
||||
module = ud.parm['module']
|
||||
repodir = os.path.join(rootdir, ud.parm['subdir'])
|
||||
with open(repodir + '.info', 'wb') as f:
|
||||
f.write(ud.parm['key'])
|
||||
|
||||
# Unpack the go.mod file from the repository
|
||||
unpackdir = os.path.join(rootdir, moddir, 'cache/download',
|
||||
escape(module), '@v')
|
||||
bb.utils.mkdirhier(unpackdir)
|
||||
srcrev = ud.parm['srcrev']
|
||||
version = ud.parm['version']
|
||||
escaped_version = escape(version)
|
||||
cmd = f"git ls-tree -r --name-only '{srcrev}'"
|
||||
if 'subpath' in ud.parm:
|
||||
cmd += f" '{ud.parm['subpath']}'"
|
||||
files = runfetchcmd(cmd, d, workdir=repodir).split()
|
||||
name = escaped_version + '.mod'
|
||||
bb.note(f"Unpacking {name} to {unpackdir}/")
|
||||
with open(os.path.join(unpackdir, name), mode='wb') as mf:
|
||||
f = 'go.mod'
|
||||
if 'subpath' in ud.parm:
|
||||
f = os.path.join(ud.parm['subpath'], f)
|
||||
if f in files:
|
||||
cmd = ['git', 'cat-file', 'blob', srcrev + ':' + f]
|
||||
subprocess.check_call(cmd, stdout=mf, cwd=repodir,
|
||||
preexec_fn=subprocess_setup)
|
||||
else:
|
||||
# If the module does not have a go.mod file, synthesize one
|
||||
# containing only a module statement.
|
||||
mf.write(f'module {module}\n'.encode())
|
||||
|
||||
# Synthesize the module zip file from the repository
|
||||
name = escaped_version + '.zip'
|
||||
bb.note(f"Unpacking {name} to {unpackdir}/")
|
||||
with zipfile.ZipFile(os.path.join(unpackdir, name), mode='w') as zf:
|
||||
prefix = module + '@' + version + '/'
|
||||
for f in files:
|
||||
cmd = ['git', 'cat-file', 'blob', srcrev + ':' + f]
|
||||
data = subprocess.check_output(cmd, cwd=repodir,
|
||||
preexec_fn=subprocess_setup)
|
||||
zf.writestr(prefix + f, data)
|
||||
@@ -29,10 +29,11 @@ class Local(FetchMethod):
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
# We don't set localfile as for this fetcher the file is already local!
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
ud.basepath = ud.path
|
||||
ud.decodedurl = urllib.parse.unquote(ud.url.split("://")[1].split(";")[0])
|
||||
ud.basename = os.path.basename(ud.decodedurl)
|
||||
ud.basepath = ud.decodedurl
|
||||
ud.needdonestamp = False
|
||||
if "*" in ud.path:
|
||||
if "*" in ud.decodedurl:
|
||||
raise bb.fetch2.ParameterError("file:// urls using globbing are no longer supported. Please place the files in a directory and reference that instead.", ud.url)
|
||||
return
|
||||
|
||||
@@ -47,7 +48,7 @@ class Local(FetchMethod):
|
||||
Return the local filename of a given url assuming a successful fetch.
|
||||
"""
|
||||
searched = []
|
||||
path = urldata.path
|
||||
path = urldata.decodedurl
|
||||
newpath = path
|
||||
if path[0] == "/":
|
||||
logger.debug2("Using absolute %s" % (path))
|
||||
|
||||
@@ -42,12 +42,11 @@ from bb.utils import is_semver
|
||||
|
||||
def npm_package(package):
|
||||
"""Convert the npm package name to remove unsupported character"""
|
||||
# For scoped package names ('@user/package') the '/' is replaced by a '-'.
|
||||
# This is similar to what 'npm pack' does, but 'npm pack' also strips the
|
||||
# leading '@', which can lead to ambiguous package names.
|
||||
# Scoped package names (with the @) use the same naming convention
|
||||
# as the 'npm pack' command.
|
||||
name = re.sub("/", "-", package)
|
||||
name = name.lower()
|
||||
name = re.sub(r"[^\-a-z0-9@]", "", name)
|
||||
name = re.sub(r"[^\-a-z0-9]", "", name)
|
||||
name = name.strip("-")
|
||||
return name
|
||||
|
||||
@@ -91,12 +90,6 @@ class NpmEnvironment(object):
|
||||
self.d = d
|
||||
|
||||
self.user_config = tempfile.NamedTemporaryFile(mode="w", buffering=1)
|
||||
|
||||
hn = self._home_npmrc(d)
|
||||
if hn is not None:
|
||||
with open(hn, 'r') as hnf:
|
||||
self.user_config.write(hnf.read())
|
||||
|
||||
for key, value in configs:
|
||||
self.user_config.write("%s=%s\n" % (key, value))
|
||||
|
||||
@@ -109,15 +102,6 @@ class NpmEnvironment(object):
|
||||
if self.user_config:
|
||||
self.user_config.close()
|
||||
|
||||
def _home_npmrc(self, d):
|
||||
"""Function to return user's HOME .npmrc file (or None if it doesn't exist)"""
|
||||
home_npmrc_file = os.path.join(os.environ.get("HOME"), ".npmrc")
|
||||
if d.getVar("BB_USE_HOME_NPMRC") == "1" and os.path.exists(home_npmrc_file):
|
||||
bb.warn(f"BB_USE_HOME_NPMRC flag set and valid .npmrc detected - "\
|
||||
f"npm fetcher will use {home_npmrc_file}")
|
||||
return home_npmrc_file
|
||||
return None
|
||||
|
||||
def run(self, cmd, args=None, configs=None, workdir=None):
|
||||
"""Run npm command in a controlled environment"""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
@@ -181,7 +165,7 @@ class Npm(FetchMethod):
|
||||
# Using the 'downloadfilename' parameter as local filename
|
||||
# or the npm package name.
|
||||
if "downloadfilename" in ud.parm:
|
||||
ud.localfile = npm_localfile(ud.parm["downloadfilename"])
|
||||
ud.localfile = npm_localfile(d.expand(ud.parm["downloadfilename"]))
|
||||
else:
|
||||
ud.localfile = npm_localfile(ud.package, ud.version)
|
||||
|
||||
|
||||
@@ -37,26 +37,38 @@ def foreach_dependencies(shrinkwrap, callback=None, dev=False):
|
||||
"""
|
||||
Run a callback for each dependencies of a shrinkwrap file.
|
||||
The callback is using the format:
|
||||
callback(name, data, location)
|
||||
callback(name, params, deptree)
|
||||
with:
|
||||
name = the package name (string)
|
||||
data = the package data (dictionary)
|
||||
location = the location of the package (string)
|
||||
params = the package parameters (dictionary)
|
||||
destdir = the destination of the package (string)
|
||||
"""
|
||||
packages = shrinkwrap.get("packages")
|
||||
if not packages:
|
||||
raise FetchError("Invalid shrinkwrap file format")
|
||||
# For handling old style dependencies entries in shinkwrap files
|
||||
def _walk_deps(deps, deptree):
|
||||
for name in deps:
|
||||
subtree = [*deptree, name]
|
||||
_walk_deps(deps[name].get("dependencies", {}), subtree)
|
||||
if callback is not None:
|
||||
if deps[name].get("dev", False) and not dev:
|
||||
continue
|
||||
elif deps[name].get("bundled", False):
|
||||
continue
|
||||
destsubdirs = [os.path.join("node_modules", dep) for dep in subtree]
|
||||
destsuffix = os.path.join(*destsubdirs)
|
||||
callback(name, deps[name], destsuffix)
|
||||
|
||||
for location, data in packages.items():
|
||||
# Skip empty main and local link target packages
|
||||
if not location.startswith('node_modules/'):
|
||||
continue
|
||||
elif not dev and data.get("dev", False):
|
||||
continue
|
||||
elif data.get("inBundle", False):
|
||||
continue
|
||||
name = location.split('node_modules/')[-1]
|
||||
callback(name, data, location)
|
||||
# packages entry means new style shrinkwrap file, else use dependencies
|
||||
packages = shrinkwrap.get("packages", None)
|
||||
if packages is not None:
|
||||
for package in packages:
|
||||
if package != "":
|
||||
name = package.split('node_modules/')[-1]
|
||||
package_infos = packages.get(package, {})
|
||||
if dev == False and package_infos.get("dev", False):
|
||||
continue
|
||||
callback(name, package_infos, package)
|
||||
else:
|
||||
_walk_deps(shrinkwrap.get("dependencies", {}), [])
|
||||
|
||||
class NpmShrinkWrap(FetchMethod):
|
||||
"""Class to fetch all package from a shrinkwrap file"""
|
||||
@@ -83,18 +95,12 @@ class NpmShrinkWrap(FetchMethod):
|
||||
extrapaths = []
|
||||
unpack = True
|
||||
|
||||
integrity = params.get("integrity")
|
||||
resolved = params.get("resolved")
|
||||
version = params.get("version")
|
||||
link = params.get("link", False)
|
||||
|
||||
# Handle link sources
|
||||
if link:
|
||||
localpath = resolved
|
||||
unpack = False
|
||||
integrity = params.get("integrity", None)
|
||||
resolved = params.get("resolved", None)
|
||||
version = params.get("version", None)
|
||||
|
||||
# Handle registry sources
|
||||
elif version and is_semver(version) and integrity:
|
||||
if is_semver(version) and integrity:
|
||||
# Handle duplicate dependencies without url
|
||||
if not resolved:
|
||||
return
|
||||
@@ -122,10 +128,10 @@ class NpmShrinkWrap(FetchMethod):
|
||||
extrapaths.append(resolvefile)
|
||||
|
||||
# Handle http tarball sources
|
||||
elif resolved.startswith("http") and integrity:
|
||||
localfile = npm_localfile(os.path.basename(resolved))
|
||||
elif version.startswith("http") and integrity:
|
||||
localfile = npm_localfile(os.path.basename(version))
|
||||
|
||||
uri = URI(resolved)
|
||||
uri = URI(version)
|
||||
uri.params["downloadfilename"] = localfile
|
||||
|
||||
checksum_name, checksum_expected = npm_integrity(integrity)
|
||||
@@ -135,12 +141,28 @@ class NpmShrinkWrap(FetchMethod):
|
||||
|
||||
localpath = os.path.join(d.getVar("DL_DIR"), localfile)
|
||||
|
||||
# Handle local tarball sources
|
||||
elif resolved.startswith("file"):
|
||||
localpath = resolved[5:]
|
||||
# Handle local tarball and link sources
|
||||
elif version.startswith("file"):
|
||||
localpath = version[5:]
|
||||
if not version.endswith(".tgz"):
|
||||
unpack = False
|
||||
|
||||
# Handle git sources
|
||||
elif resolved.startswith("git"):
|
||||
elif version.startswith(("git", "bitbucket","gist")) or (
|
||||
not version.endswith((".tgz", ".tar", ".tar.gz"))
|
||||
and not version.startswith((".", "@", "/"))
|
||||
and "/" in version
|
||||
):
|
||||
if version.startswith("github:"):
|
||||
version = "git+https://github.com/" + version[len("github:"):]
|
||||
elif version.startswith("gist:"):
|
||||
version = "git+https://gist.github.com/" + version[len("gist:"):]
|
||||
elif version.startswith("bitbucket:"):
|
||||
version = "git+https://bitbucket.org/" + version[len("bitbucket:"):]
|
||||
elif version.startswith("gitlab:"):
|
||||
version = "git+https://gitlab.com/" + version[len("gitlab:"):]
|
||||
elif not version.startswith(("git+","git:")):
|
||||
version = "git+https://github.com/" + version
|
||||
regex = re.compile(r"""
|
||||
^
|
||||
git\+
|
||||
@@ -152,16 +174,16 @@ class NpmShrinkWrap(FetchMethod):
|
||||
$
|
||||
""", re.VERBOSE)
|
||||
|
||||
match = regex.match(resolved)
|
||||
match = regex.match(version)
|
||||
|
||||
if not match:
|
||||
raise ParameterError("Invalid git url: %s" % resolved, ud.url)
|
||||
raise ParameterError("Invalid git url: %s" % version, ud.url)
|
||||
|
||||
groups = match.groupdict()
|
||||
|
||||
uri = URI("git://" + str(groups["url"]))
|
||||
uri.params["protocol"] = str(groups["protocol"])
|
||||
uri.params["rev"] = str(groups["rev"])
|
||||
uri.params["nobranch"] = "1"
|
||||
uri.params["destsuffix"] = destsuffix
|
||||
|
||||
url = str(uri)
|
||||
@@ -246,7 +268,7 @@ class NpmShrinkWrap(FetchMethod):
|
||||
|
||||
def unpack(self, ud, rootdir, d):
|
||||
"""Unpack the downloaded dependencies"""
|
||||
destdir = rootdir
|
||||
destdir = d.getVar("S")
|
||||
destsuffix = ud.parm.get("destsuffix")
|
||||
if destsuffix:
|
||||
destdir = os.path.join(rootdir, destsuffix)
|
||||
|
||||
@@ -77,7 +77,7 @@ class S3(FetchMethod):
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = ud.basename
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_s3") or "/usr/bin/env aws s3"
|
||||
|
||||
|
||||
@@ -77,7 +77,7 @@ class SFTP(FetchMethod):
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = ud.basename
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
|
||||
def download(self, ud, d):
|
||||
"""Fetch urls"""
|
||||
|
||||
@@ -73,7 +73,8 @@ class SSH(FetchMethod):
|
||||
path = m.group('path')
|
||||
path = urllib.parse.unquote(path)
|
||||
host = m.group('host')
|
||||
urldata.localfile = os.path.basename(os.path.normpath(path))
|
||||
urldata.localpath = os.path.join(d.getVar('DL_DIR'),
|
||||
os.path.basename(os.path.normpath(path)))
|
||||
|
||||
def download(self, urldata, d):
|
||||
dldir = d.getVar('DL_DIR')
|
||||
|
||||
@@ -210,6 +210,3 @@ class Svn(FetchMethod):
|
||||
|
||||
def _build_revision(self, ud, d):
|
||||
return ud.revision
|
||||
|
||||
def supports_checksum(self, urldata):
|
||||
return False
|
||||
|
||||
@@ -53,6 +53,11 @@ class WgetProgressHandler(bb.progress.LineFilterProgressHandler):
|
||||
class Wget(FetchMethod):
|
||||
"""Class to fetch urls via 'wget'"""
|
||||
|
||||
# CDNs like CloudFlare may do a 'browser integrity test' which can fail
|
||||
# with the standard wget/urllib User-Agent, so pretend to be a modern
|
||||
# browser.
|
||||
user_agent = "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:84.0) Gecko/20100101 Firefox/84.0"
|
||||
|
||||
def check_certs(self, d):
|
||||
"""
|
||||
Should certificates be checked?
|
||||
@@ -78,11 +83,11 @@ class Wget(FetchMethod):
|
||||
else:
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = ud.basename
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
if not ud.localfile:
|
||||
ud.localfile = ud.host + ud.path.replace("/", ".")
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.host + ud.path).replace("/", "."))
|
||||
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget --tries=2 --timeout=100"
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 30"
|
||||
|
||||
if ud.type == 'ftp' or ud.type == 'ftps':
|
||||
self.basecmd += " --passive-ftp"
|
||||
@@ -96,7 +101,7 @@ class Wget(FetchMethod):
|
||||
|
||||
logger.debug2("Fetching %s using command '%s'" % (ud.url, command))
|
||||
bb.fetch2.check_network_access(d, command, ud.url)
|
||||
runfetchcmd(command + ' --progress=dot --verbose', d, quiet, log=progresshandler, workdir=workdir)
|
||||
runfetchcmd(command + ' --progress=dot -v', d, quiet, log=progresshandler, workdir=workdir)
|
||||
|
||||
def download(self, ud, d):
|
||||
"""Fetch urls"""
|
||||
@@ -106,7 +111,7 @@ class Wget(FetchMethod):
|
||||
dldir = os.path.realpath(d.getVar("DL_DIR"))
|
||||
localpath = os.path.join(dldir, ud.localfile) + ".tmp"
|
||||
bb.utils.mkdirhier(os.path.dirname(localpath))
|
||||
fetchcmd += " --output-document=%s" % shlex.quote(localpath)
|
||||
fetchcmd += " -O %s" % shlex.quote(localpath)
|
||||
|
||||
if ud.user and ud.pswd:
|
||||
fetchcmd += " --auth-no-challenge"
|
||||
@@ -122,7 +127,12 @@ class Wget(FetchMethod):
|
||||
fetchcmd += " --user=%s --password=%s" % (ud.user, ud.pswd)
|
||||
|
||||
uri = ud.url.split(";")[0]
|
||||
fetchcmd += " --continue --directory-prefix=%s '%s'" % (dldir, uri)
|
||||
if os.path.exists(ud.localpath):
|
||||
# file exists, but we didnt complete it.. trying again..
|
||||
fetchcmd += " -c -P " + dldir + " '" + uri + "'"
|
||||
else:
|
||||
fetchcmd += " -P " + dldir + " '" + uri + "'"
|
||||
|
||||
self._runwget(ud, d, fetchcmd, False)
|
||||
|
||||
# Sanity check since wget can pretend it succeed when it didn't
|
||||
@@ -234,12 +244,7 @@ class Wget(FetchMethod):
|
||||
fetch.connection_cache.remove_connection(h.host, h.port)
|
||||
raise urllib.error.URLError(err)
|
||||
else:
|
||||
try:
|
||||
r = h.getresponse()
|
||||
except TimeoutError as e:
|
||||
if fetch.connection_cache:
|
||||
fetch.connection_cache.remove_connection(h.host, h.port)
|
||||
raise TimeoutError(e)
|
||||
r = h.getresponse()
|
||||
|
||||
# Pick apart the HTTPResponse object to get the addinfourl
|
||||
# object initialized properly.
|
||||
@@ -300,45 +305,13 @@ class Wget(FetchMethod):
|
||||
|
||||
class FixedHTTPRedirectHandler(urllib.request.HTTPRedirectHandler):
|
||||
"""
|
||||
urllib2.HTTPRedirectHandler before 3.13 has two flaws:
|
||||
|
||||
It resets the method to GET on redirect when we want to follow
|
||||
redirects using the original method (typically HEAD). This was fixed
|
||||
in 759e8e7.
|
||||
|
||||
It also doesn't handle 308 (Permanent Redirect). This was fixed in
|
||||
c379bc5.
|
||||
|
||||
Until we depend on Python 3.13 onwards, copy the redirect_request
|
||||
method to fix these issues.
|
||||
urllib2.HTTPRedirectHandler resets the method to GET on redirect,
|
||||
when we want to follow redirects using the original method.
|
||||
"""
|
||||
def redirect_request(self, req, fp, code, msg, headers, newurl):
|
||||
m = req.get_method()
|
||||
if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
|
||||
or code in (301, 302, 303) and m == "POST")):
|
||||
raise urllib.HTTPError(req.full_url, code, msg, headers, fp)
|
||||
|
||||
# Strictly (according to RFC 2616), 301 or 302 in response to
|
||||
# a POST MUST NOT cause a redirection without confirmation
|
||||
# from the user (of urllib.request, in this case). In practice,
|
||||
# essentially all clients do redirect in this case, so we do
|
||||
# the same.
|
||||
|
||||
# Be conciliant with URIs containing a space. This is mainly
|
||||
# redundant with the more complete encoding done in http_error_302(),
|
||||
# but it is kept for compatibility with other callers.
|
||||
newurl = newurl.replace(' ', '%20')
|
||||
|
||||
CONTENT_HEADERS = ("content-length", "content-type")
|
||||
newheaders = {k: v for k, v in req.headers.items()
|
||||
if k.lower() not in CONTENT_HEADERS}
|
||||
return urllib.request.Request(newurl,
|
||||
method="HEAD" if m == "HEAD" else "GET",
|
||||
headers=newheaders,
|
||||
origin_req_host=req.origin_req_host,
|
||||
unverifiable=True)
|
||||
|
||||
http_error_308 = urllib.request.HTTPRedirectHandler.http_error_302
|
||||
newreq = urllib.request.HTTPRedirectHandler.redirect_request(self, req, fp, code, msg, headers, newurl)
|
||||
newreq.get_method = req.get_method
|
||||
return newreq
|
||||
|
||||
# We need to update the environment here as both the proxy and HTTPS
|
||||
# handlers need variables set. The proxy needs http_proxy and friends to
|
||||
@@ -371,14 +344,14 @@ class Wget(FetchMethod):
|
||||
opener = urllib.request.build_opener(*handlers)
|
||||
|
||||
try:
|
||||
parts = urllib.parse.urlparse(ud.url.split(";")[0])
|
||||
uri = "{}://{}{}".format(parts.scheme, parts.netloc, parts.path)
|
||||
uri_base = ud.url.split(";")[0]
|
||||
uri = "{}://{}{}".format(urllib.parse.urlparse(uri_base).scheme, ud.host, ud.path)
|
||||
r = urllib.request.Request(uri)
|
||||
r.get_method = lambda: "HEAD"
|
||||
# Some servers (FusionForge, as used on Alioth) require that the
|
||||
# optional Accept header is set.
|
||||
r.add_header("Accept", "*/*")
|
||||
r.add_header("User-Agent", "bitbake/{}".format(bb.__version__))
|
||||
r.add_header("User-Agent", self.user_agent)
|
||||
def add_basic_auth(login_str, request):
|
||||
'''Adds Basic auth to http request, pass in login:password as string'''
|
||||
import base64
|
||||
@@ -398,7 +371,7 @@ class Wget(FetchMethod):
|
||||
except (FileNotFoundError, netrc.NetrcParseError):
|
||||
pass
|
||||
|
||||
with opener.open(r, timeout=100) as response:
|
||||
with opener.open(r, timeout=30) as response:
|
||||
pass
|
||||
except (urllib.error.URLError, ConnectionResetError, TimeoutError) as e:
|
||||
if try_again:
|
||||
@@ -485,7 +458,7 @@ class Wget(FetchMethod):
|
||||
f = tempfile.NamedTemporaryFile()
|
||||
with tempfile.TemporaryDirectory(prefix="wget-index-") as workdir, tempfile.NamedTemporaryFile(dir=workdir, prefix="wget-listing-") as f:
|
||||
fetchcmd = self.basecmd
|
||||
fetchcmd += " --output-document=%s '%s'" % (f.name, uri)
|
||||
fetchcmd += " -O " + f.name + " --user-agent='" + self.user_agent + "' '" + uri + "'"
|
||||
try:
|
||||
self._runwget(ud, d, fetchcmd, True, workdir=workdir)
|
||||
fetchresult = f.read()
|
||||
@@ -645,17 +618,13 @@ class Wget(FetchMethod):
|
||||
|
||||
sanity check to ensure same name and type.
|
||||
"""
|
||||
if 'downloadfilename' in ud.parm:
|
||||
package = ud.parm['downloadfilename']
|
||||
else:
|
||||
package = ud.path.split("/")[-1]
|
||||
package = ud.path.split("/")[-1]
|
||||
current_version = ['', d.getVar('PV'), '']
|
||||
|
||||
"""possible to have no version in pkg name, such as spectrum-fw"""
|
||||
if not re.search(r"\d+", package):
|
||||
current_version[1] = re.sub('_', '.', current_version[1])
|
||||
current_version[1] = re.sub('-', '.', current_version[1])
|
||||
bb.debug(3, "latest_versionstring: no version found in %s" % package)
|
||||
return (current_version[1], '')
|
||||
|
||||
package_regex = self._init_regexes(package, ud, d)
|
||||
|
||||
@@ -89,6 +89,10 @@ class BBLogFormatter(logging.Formatter):
|
||||
msg = logging.Formatter.format(self, record)
|
||||
if hasattr(record, 'bb_exc_formatted'):
|
||||
msg += '\n' + ''.join(record.bb_exc_formatted)
|
||||
elif hasattr(record, 'bb_exc_info'):
|
||||
etype, value, tb = record.bb_exc_info
|
||||
formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
|
||||
msg += '\n' + ''.join(formatted)
|
||||
return msg
|
||||
|
||||
def colorize(self, record):
|
||||
|
||||
@@ -43,21 +43,6 @@ class IncludeNode(AstNode):
|
||||
else:
|
||||
bb.parse.ConfHandler.include(self.filename, s, self.lineno, data, False)
|
||||
|
||||
class IncludeAllNode(AstNode):
|
||||
def __init__(self, filename, lineno, what_file):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.what_file = what_file
|
||||
|
||||
def eval(self, data):
|
||||
"""
|
||||
Include the file and evaluate the statements
|
||||
"""
|
||||
s = data.expand(self.what_file)
|
||||
logger.debug2("CONF %s:%s: including %s", self.filename, self.lineno, s)
|
||||
|
||||
for path in data.getVar("BBPATH").split(":"):
|
||||
bb.parse.ConfHandler.include(self.filename, os.path.join(path, s), self.lineno, data, False)
|
||||
|
||||
class ExportNode(AstNode):
|
||||
def __init__(self, filename, lineno, var):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
@@ -152,10 +137,7 @@ class DataNode(AstNode):
|
||||
|
||||
flag = None
|
||||
if 'flag' in groupd and groupd['flag'] is not None:
|
||||
if groupd["lazyques"]:
|
||||
flag = "_defaultval_flag_"+groupd['flag']
|
||||
else:
|
||||
flag = groupd['flag']
|
||||
flag = groupd['flag']
|
||||
elif groupd["lazyques"]:
|
||||
flag = "_defaultval"
|
||||
|
||||
@@ -258,16 +240,14 @@ class ExportFuncsNode(AstNode):
|
||||
data.setVar(func, sentinel + " " + calledfunc + "\n", parsing=True)
|
||||
|
||||
class AddTaskNode(AstNode):
|
||||
def __init__(self, filename, lineno, tasks, before, after):
|
||||
def __init__(self, filename, lineno, func, before, after):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.tasks = tasks
|
||||
self.func = func
|
||||
self.before = before
|
||||
self.after = after
|
||||
|
||||
def eval(self, data):
|
||||
tasks = self.tasks.split()
|
||||
for task in tasks:
|
||||
bb.build.addtask(task, self.before, self.after, data)
|
||||
bb.build.addtask(self.func, self.before, self.after, data)
|
||||
|
||||
class DelTaskNode(AstNode):
|
||||
def __init__(self, filename, lineno, tasks):
|
||||
@@ -344,49 +324,9 @@ class InheritDeferredNode(AstNode):
|
||||
inherits.append(self.inherit)
|
||||
data.setVar('__BBDEFINHERITS', inherits)
|
||||
|
||||
class AddFragmentsNode(AstNode):
|
||||
def __init__(self, filename, lineno, fragments_path_prefix, fragments_variable, flagged_variables_list_variable):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.fragments_path_prefix = fragments_path_prefix
|
||||
self.fragments_variable = fragments_variable
|
||||
self.flagged_variables_list_variable = flagged_variables_list_variable
|
||||
|
||||
def eval(self, data):
|
||||
# No need to use mark_dependency since we would only match a fragment
|
||||
# from a specific layer and there can only be a single layer with a
|
||||
# given namespace.
|
||||
def find_fragment(layers, layerid, full_fragment_name):
|
||||
for layerpath in layers.split():
|
||||
candidate_fragment_path = os.path.join(layerpath, full_fragment_name)
|
||||
if os.path.exists(candidate_fragment_path) and bb.utils.get_file_layer(candidate_fragment_path, data) == layerid:
|
||||
return candidate_fragment_path
|
||||
return None
|
||||
|
||||
fragments = data.getVar(self.fragments_variable)
|
||||
layers = data.getVar('BBLAYERS')
|
||||
flagged_variables = data.getVar(self.flagged_variables_list_variable).split()
|
||||
|
||||
if not fragments:
|
||||
return
|
||||
for f in fragments.split():
|
||||
layerid, fragment_name = f.split('/', 1)
|
||||
full_fragment_name = data.expand("{}/{}.conf".format(self.fragments_path_prefix, fragment_name))
|
||||
fragment_path = find_fragment(layers, layerid, full_fragment_name)
|
||||
if fragment_path:
|
||||
bb.parse.ConfHandler.include(self.filename, fragment_path, self.lineno, data, "include fragment")
|
||||
for flagged_var in flagged_variables:
|
||||
val = data.getVar(flagged_var)
|
||||
data.setVarFlag(flagged_var, f, val)
|
||||
data.setVar(flagged_var, None)
|
||||
else:
|
||||
bb.error("Could not find fragment {} in enabled layers: {}".format(f, layers))
|
||||
|
||||
def handleInclude(statements, filename, lineno, m, force):
|
||||
statements.append(IncludeNode(filename, lineno, m.group(1), force))
|
||||
|
||||
def handleIncludeAll(statements, filename, lineno, m):
|
||||
statements.append(IncludeAllNode(filename, lineno, m.group(1)))
|
||||
|
||||
def handleExport(statements, filename, lineno, m):
|
||||
statements.append(ExportNode(filename, lineno, m.group(1)))
|
||||
|
||||
@@ -408,11 +348,21 @@ def handlePythonMethod(statements, filename, lineno, funcname, modulename, body)
|
||||
def handleExportFuncs(statements, filename, lineno, m, classname):
|
||||
statements.append(ExportFuncsNode(filename, lineno, m.group(1), classname))
|
||||
|
||||
def handleAddTask(statements, filename, lineno, tasks, before, after):
|
||||
statements.append(AddTaskNode(filename, lineno, tasks, before, after))
|
||||
def handleAddTask(statements, filename, lineno, m):
|
||||
func = m.group("func")
|
||||
before = m.group("before")
|
||||
after = m.group("after")
|
||||
if func is None:
|
||||
return
|
||||
|
||||
def handleDelTask(statements, filename, lineno, tasks):
|
||||
statements.append(DelTaskNode(filename, lineno, tasks))
|
||||
statements.append(AddTaskNode(filename, lineno, func, before, after))
|
||||
|
||||
def handleDelTask(statements, filename, lineno, m):
|
||||
func = m.group(1)
|
||||
if func is None:
|
||||
return
|
||||
|
||||
statements.append(DelTaskNode(filename, lineno, func))
|
||||
|
||||
def handleBBHandlers(statements, filename, lineno, m):
|
||||
statements.append(BBHandlerNode(filename, lineno, m.group(1)))
|
||||
@@ -428,42 +378,12 @@ def handleInheritDeferred(statements, filename, lineno, m):
|
||||
classes = m.group(1)
|
||||
statements.append(InheritDeferredNode(filename, lineno, classes))
|
||||
|
||||
def handleAddFragments(statements, filename, lineno, m):
|
||||
fragments_path_prefix = m.group(1)
|
||||
fragments_variable = m.group(2)
|
||||
flagged_variables_list_variable = m.group(3)
|
||||
statements.append(AddFragmentsNode(filename, lineno, fragments_path_prefix, fragments_variable, flagged_variables_list_variable))
|
||||
|
||||
def runAnonFuncs(d):
|
||||
code = []
|
||||
for funcname in d.getVar("__BBANONFUNCS", False) or []:
|
||||
code.append("%s(d)" % funcname)
|
||||
bb.utils.better_exec("\n".join(code), {"d": d})
|
||||
|
||||
# Handle recipe level PREFERRED_PROVIDERs
|
||||
def handleVirtRecipeProviders(tasklist, d):
|
||||
depends = (d.getVar("DEPENDS") or "").split()
|
||||
virtprovs = (d.getVar("BB_RECIPE_VIRTUAL_PROVIDERS") or "").split()
|
||||
newdeps = []
|
||||
for dep in depends:
|
||||
if dep in virtprovs:
|
||||
newdep = d.getVar("PREFERRED_PROVIDER_" + dep)
|
||||
if not newdep:
|
||||
bb.fatal("Error, recipe virtual provider PREFERRED_PROVIDER_%s not set" % dep)
|
||||
newdeps.append(newdep)
|
||||
else:
|
||||
newdeps.append(dep)
|
||||
d.setVar("DEPENDS", " ".join(newdeps))
|
||||
for task in tasklist:
|
||||
taskdeps = (d.getVarFlag(task, "depends") or "").split()
|
||||
remapped = []
|
||||
for entry in taskdeps:
|
||||
r, t = entry.split(":")
|
||||
if r in virtprovs:
|
||||
r = d.getVar("PREFERRED_PROVIDER_" + r)
|
||||
remapped.append("%s:%s" % (r, t))
|
||||
d.setVarFlag(task, "depends", " ".join(remapped))
|
||||
|
||||
def finalize(fn, d, variant = None):
|
||||
saved_handlers = bb.event.get_handlers().copy()
|
||||
try:
|
||||
@@ -489,7 +409,6 @@ def finalize(fn, d, variant = None):
|
||||
|
||||
tasklist = d.getVar('__BBTASKS', False) or []
|
||||
bb.event.fire(bb.event.RecipeTaskPreProcess(fn, list(tasklist)), d)
|
||||
handleVirtRecipeProviders(tasklist, d)
|
||||
bb.build.add_tasks(tasklist, d)
|
||||
|
||||
bb.parse.siggen.finalise(fn, d, variant)
|
||||
|
||||
@@ -23,8 +23,8 @@ __func_start_regexp__ = re.compile(r"(((?P<py>python(?=(\s|\()))|(?P<fr>faker
|
||||
__inherit_regexp__ = re.compile(r"inherit\s+(.+)" )
|
||||
__inherit_def_regexp__ = re.compile(r"inherit_defer\s+(.+)" )
|
||||
__export_func_regexp__ = re.compile(r"EXPORT_FUNCTIONS\s+(.+)" )
|
||||
__addtask_regexp__ = re.compile(r"addtask\s+([^#\n]+)(?P<comment>#.*|.*?)")
|
||||
__deltask_regexp__ = re.compile(r"deltask\s+([^#\n]+)(?P<comment>#.*|.*?)")
|
||||
__addtask_regexp__ = re.compile(r"addtask\s+(?P<func>\w+)\s*((before\s*(?P<before>((.*(?=after))|(.*))))|(after\s*(?P<after>((.*(?=before))|(.*)))))*")
|
||||
__deltask_regexp__ = re.compile(r"deltask\s+(.+)")
|
||||
__addhandler_regexp__ = re.compile(r"addhandler\s+(.+)" )
|
||||
__def_regexp__ = re.compile(r"def\s+(\w+).*:" )
|
||||
__python_func_regexp__ = re.compile(r"(\s+.*)|(^$)|(^#)" )
|
||||
@@ -239,38 +239,29 @@ def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
|
||||
m = __addtask_regexp__.match(s)
|
||||
if m:
|
||||
after = ""
|
||||
before = ""
|
||||
if len(m.group().split()) == 2:
|
||||
# Check and warn for "addtask task1 task2"
|
||||
m2 = re.match(r"addtask\s+(?P<func>\w+)(?P<ignores>.*)", s)
|
||||
if m2 and m2.group('ignores'):
|
||||
logger.warning('addtask ignored: "%s"' % m2.group('ignores'))
|
||||
|
||||
# This code splits on 'before' and 'after' instead of on whitespace so we can defer
|
||||
# evaluation to as late as possible.
|
||||
tasks = m.group(1).split(" before ")[0].split(" after ")[0]
|
||||
|
||||
for exp in m.group(1).split(" before "):
|
||||
exp2 = exp.split(" after ")
|
||||
if len(exp2) > 1:
|
||||
after = after + " ".join(exp2[1:])
|
||||
|
||||
for exp in m.group(1).split(" after "):
|
||||
exp2 = exp.split(" before ")
|
||||
if len(exp2) > 1:
|
||||
before = before + " ".join(exp2[1:])
|
||||
|
||||
# Check and warn for having task with a keyword as part of task name
|
||||
# Check and warn for "addtask task1 before task2 before task3", the
|
||||
# similar to "after"
|
||||
taskexpression = s.split()
|
||||
for word in ('before', 'after'):
|
||||
if taskexpression.count(word) > 1:
|
||||
logger.warning("addtask contained multiple '%s' keywords, only one is supported" % word)
|
||||
|
||||
# Check and warn for having task with exprssion as part of task name
|
||||
for te in taskexpression:
|
||||
if any( ( "%s_" % keyword ) in te for keyword in bb.data_smart.__setvar_keyword__ ):
|
||||
raise ParseError("Task name '%s' contains a keyword which is not recommended/supported.\nPlease rename the task not to include the keyword.\n%s" % (te, ("\n".join(map(str, bb.data_smart.__setvar_keyword__)))), fn)
|
||||
|
||||
if tasks is not None:
|
||||
ast.handleAddTask(statements, fn, lineno, tasks, before, after)
|
||||
ast.handleAddTask(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __deltask_regexp__.match(s)
|
||||
if m:
|
||||
task = m.group(1)
|
||||
if task is not None:
|
||||
ast.handleDelTask(statements, fn, lineno, task)
|
||||
ast.handleDelTask(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __addhandler_regexp__.match(s)
|
||||
|
||||
@@ -20,8 +20,8 @@ from bb.parse import ParseError, resolve_file, ast, logger, handle
|
||||
__config_regexp__ = re.compile( r"""
|
||||
^
|
||||
(?P<exp>export\s+)?
|
||||
(?P<var>[a-zA-Z0-9\-_+.${}/~:]*?)
|
||||
(\[(?P<flag>[a-zA-Z0-9\-_+.][a-zA-Z0-9\-_+.@/]*)\])?
|
||||
(?P<var>[a-zA-Z0-9\-_+.${}/~:]+?)
|
||||
(\[(?P<flag>[a-zA-Z0-9\-_+.][a-zA-Z0-9\-_+.@]*)\])?
|
||||
|
||||
\s* (
|
||||
(?P<colon>:=) |
|
||||
@@ -43,12 +43,10 @@ __config_regexp__ = re.compile( r"""
|
||||
""", re.X)
|
||||
__include_regexp__ = re.compile( r"include\s+(.+)" )
|
||||
__require_regexp__ = re.compile( r"require\s+(.+)" )
|
||||
__includeall_regexp__ = re.compile( r"include_all\s+(.+)" )
|
||||
__export_regexp__ = re.compile( r"export\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
|
||||
__unset_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)$" )
|
||||
__unset_flag_regexp__ = re.compile( r"unset\s+([a-zA-Z0-9\-_+.${}/~]+)\[([a-zA-Z0-9\-_+.][a-zA-Z0-9\-_+.@]+)\]$" )
|
||||
__addpylib_regexp__ = re.compile(r"addpylib\s+(.+)\s+(.+)" )
|
||||
__addfragments_regexp__ = re.compile(r"addfragments\s+(.+)\s+(.+)\s+(.+)" )
|
||||
|
||||
def init(data):
|
||||
return
|
||||
@@ -166,8 +164,6 @@ def feeder(lineno, s, fn, statements, baseconfig=False, conffile=True):
|
||||
m = __config_regexp__.match(s)
|
||||
if m:
|
||||
groupd = m.groupdict()
|
||||
if groupd['var'] == "":
|
||||
raise ParseError("Empty variable name in assignment: '%s'" % s, fn, lineno);
|
||||
ast.handleData(statements, fn, lineno, groupd)
|
||||
return
|
||||
|
||||
@@ -181,11 +177,6 @@ def feeder(lineno, s, fn, statements, baseconfig=False, conffile=True):
|
||||
ast.handleInclude(statements, fn, lineno, m, True)
|
||||
return
|
||||
|
||||
m = __includeall_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleIncludeAll(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __export_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleExport(statements, fn, lineno, m)
|
||||
@@ -206,11 +197,6 @@ def feeder(lineno, s, fn, statements, baseconfig=False, conffile=True):
|
||||
ast.handlePyLib(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __addfragments_regexp__.match(s)
|
||||
if m:
|
||||
ast.handleAddFragments(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
raise ParseError("unparsed line: '%s'" % s, fn, lineno);
|
||||
|
||||
# Add us to the handlers list
|
||||
|
||||
271
bitbake/lib/bb/persist_data.py
Normal file
271
bitbake/lib/bb/persist_data.py
Normal file
@@ -0,0 +1,271 @@
|
||||
"""BitBake Persistent Data Store
|
||||
|
||||
Used to store data in a central location such that other threads/tasks can
|
||||
access them at some future date. Acts as a convenience wrapper around sqlite,
|
||||
currently, providing a key/value store accessed by 'domain'.
|
||||
"""
|
||||
|
||||
# Copyright (C) 2007 Richard Purdie
|
||||
# Copyright (C) 2010 Chris Larson <chris_larson@mentor.com>
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import collections
|
||||
import collections.abc
|
||||
import contextlib
|
||||
import functools
|
||||
import logging
|
||||
import os.path
|
||||
import sqlite3
|
||||
import sys
|
||||
from collections.abc import Mapping
|
||||
|
||||
sqlversion = sqlite3.sqlite_version_info
|
||||
if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
|
||||
raise Exception("sqlite3 version 3.3.0 or later is required.")
|
||||
|
||||
|
||||
logger = logging.getLogger("BitBake.PersistData")
|
||||
|
||||
@functools.total_ordering
|
||||
class SQLTable(collections.abc.MutableMapping):
|
||||
class _Decorators(object):
|
||||
@staticmethod
|
||||
def retry(*, reconnect=True):
|
||||
"""
|
||||
Decorator that restarts a function if a database locked sqlite
|
||||
exception occurs. If reconnect is True, the database connection
|
||||
will be closed and reopened each time a failure occurs
|
||||
"""
|
||||
def retry_wrapper(f):
|
||||
def wrap_func(self, *args, **kwargs):
|
||||
# Reconnect if necessary
|
||||
if self.connection is None and reconnect:
|
||||
self.reconnect()
|
||||
|
||||
count = 0
|
||||
while True:
|
||||
try:
|
||||
return f(self, *args, **kwargs)
|
||||
except sqlite3.OperationalError as exc:
|
||||
if count < 500 and ('is locked' in str(exc) or 'locking protocol' in str(exc)):
|
||||
count = count + 1
|
||||
if reconnect:
|
||||
self.reconnect()
|
||||
continue
|
||||
raise
|
||||
return wrap_func
|
||||
return retry_wrapper
|
||||
|
||||
@staticmethod
|
||||
def transaction(f):
|
||||
"""
|
||||
Decorator that starts a database transaction and creates a database
|
||||
cursor for performing queries. If no exception is thrown, the
|
||||
database results are committed. If an exception occurs, the database
|
||||
is rolled back. In all cases, the cursor is closed after the
|
||||
function ends.
|
||||
|
||||
Note that the cursor is passed as an extra argument to the function
|
||||
after `self` and before any of the normal arguments
|
||||
"""
|
||||
def wrap_func(self, *args, **kwargs):
|
||||
# Context manager will COMMIT the database on success,
|
||||
# or ROLLBACK on an exception
|
||||
with self.connection:
|
||||
# Automatically close the cursor when done
|
||||
with contextlib.closing(self.connection.cursor()) as cursor:
|
||||
return f(self, cursor, *args, **kwargs)
|
||||
return wrap_func
|
||||
|
||||
"""Object representing a table/domain in the database"""
|
||||
def __init__(self, cachefile, table):
|
||||
self.cachefile = cachefile
|
||||
self.table = table
|
||||
|
||||
self.connection = None
|
||||
self._execute_single("CREATE TABLE IF NOT EXISTS %s(key TEXT PRIMARY KEY NOT NULL, value TEXT);" % table)
|
||||
|
||||
@_Decorators.retry(reconnect=False)
|
||||
@_Decorators.transaction
|
||||
def _setup_database(self, cursor):
|
||||
cursor.execute("pragma synchronous = off;")
|
||||
# Enable WAL and keep the autocheckpoint length small (the default is
|
||||
# usually 1000). Persistent caches are usually read-mostly, so keeping
|
||||
# this short will keep readers running quickly
|
||||
cursor.execute("pragma journal_mode = WAL;")
|
||||
cursor.execute("pragma wal_autocheckpoint = 100;")
|
||||
|
||||
def reconnect(self):
|
||||
if self.connection is not None:
|
||||
self.connection.close()
|
||||
self.connection = sqlite3.connect(self.cachefile, timeout=5)
|
||||
self.connection.text_factory = str
|
||||
self._setup_database()
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def _execute_single(self, cursor, *query):
|
||||
"""
|
||||
Executes a single query and discards the results. This correctly closes
|
||||
the database cursor when finished
|
||||
"""
|
||||
cursor.execute(*query)
|
||||
|
||||
@_Decorators.retry()
|
||||
def _row_iter(self, f, *query):
|
||||
"""
|
||||
Helper function that returns a row iterator. Each time __next__ is
|
||||
called on the iterator, the provided function is evaluated to determine
|
||||
the return value
|
||||
"""
|
||||
class CursorIter(object):
|
||||
def __init__(self, cursor):
|
||||
self.cursor = cursor
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def __next__(self):
|
||||
row = self.cursor.fetchone()
|
||||
if row is None:
|
||||
self.cursor.close()
|
||||
raise StopIteration
|
||||
return f(row)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, typ, value, traceback):
|
||||
self.cursor.close()
|
||||
return False
|
||||
|
||||
cursor = self.connection.cursor()
|
||||
try:
|
||||
cursor.execute(*query)
|
||||
return CursorIter(cursor)
|
||||
except:
|
||||
cursor.close()
|
||||
|
||||
def __enter__(self):
|
||||
self.connection.__enter__()
|
||||
return self
|
||||
|
||||
def __exit__(self, *excinfo):
|
||||
self.connection.__exit__(*excinfo)
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __getitem__(self, cursor, key):
|
||||
cursor.execute("SELECT * from %s where key=?;" % self.table, [key])
|
||||
row = cursor.fetchone()
|
||||
if row is not None:
|
||||
return row[1]
|
||||
raise KeyError(key)
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __delitem__(self, cursor, key):
|
||||
if key not in self:
|
||||
raise KeyError(key)
|
||||
cursor.execute("DELETE from %s where key=?;" % self.table, [key])
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __setitem__(self, cursor, key, value):
|
||||
if not isinstance(key, str):
|
||||
raise TypeError('Only string keys are supported')
|
||||
elif not isinstance(value, str):
|
||||
raise TypeError('Only string values are supported')
|
||||
|
||||
# Ensure the entire transaction (including SELECT) executes under write lock
|
||||
cursor.execute("BEGIN EXCLUSIVE")
|
||||
|
||||
cursor.execute("SELECT * from %s where key=?;" % self.table, [key])
|
||||
row = cursor.fetchone()
|
||||
if row is not None:
|
||||
cursor.execute("UPDATE %s SET value=? WHERE key=?;" % self.table, [value, key])
|
||||
else:
|
||||
cursor.execute("INSERT into %s(key, value) values (?, ?);" % self.table, [key, value])
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __contains__(self, cursor, key):
|
||||
cursor.execute('SELECT * from %s where key=?;' % self.table, [key])
|
||||
return cursor.fetchone() is not None
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def __len__(self, cursor):
|
||||
cursor.execute("SELECT COUNT(key) FROM %s;" % self.table)
|
||||
row = cursor.fetchone()
|
||||
if row is not None:
|
||||
return row[0]
|
||||
|
||||
def __iter__(self):
|
||||
return self._row_iter(lambda row: row[0], "SELECT key from %s;" % self.table)
|
||||
|
||||
def __lt__(self, other):
|
||||
if not isinstance(other, Mapping):
|
||||
raise NotImplementedError()
|
||||
|
||||
return len(self) < len(other)
|
||||
|
||||
def get_by_pattern(self, pattern):
|
||||
return self._row_iter(lambda row: row[1], "SELECT * FROM %s WHERE key LIKE ?;" %
|
||||
self.table, [pattern])
|
||||
|
||||
def values(self):
|
||||
return list(self.itervalues())
|
||||
|
||||
def itervalues(self):
|
||||
return self._row_iter(lambda row: row[0], "SELECT value FROM %s;" %
|
||||
self.table)
|
||||
|
||||
def items(self):
|
||||
return list(self.iteritems())
|
||||
|
||||
def iteritems(self):
|
||||
return self._row_iter(lambda row: (row[0], row[1]), "SELECT * FROM %s;" %
|
||||
self.table)
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
def clear(self, cursor):
|
||||
cursor.execute("DELETE FROM %s;" % self.table)
|
||||
|
||||
def has_key(self, key):
|
||||
return key in self
|
||||
|
||||
def persist(domain, d):
|
||||
"""Convenience factory for SQLTable objects based upon metadata"""
|
||||
import bb.utils
|
||||
cachedir = (d.getVar("PERSISTENT_DIR") or
|
||||
d.getVar("CACHE"))
|
||||
if not cachedir:
|
||||
logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
|
||||
sys.exit(1)
|
||||
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
cachefile = os.path.join(cachedir, "bb_persist_data.sqlite3")
|
||||
|
||||
try:
|
||||
return SQLTable(cachefile, domain)
|
||||
except sqlite3.OperationalError:
|
||||
# Sqlite fails to open database when its path is too long.
|
||||
# After testing, 504 is the biggest path length that can be opened by
|
||||
# sqlite.
|
||||
# Note: This code is called before sanity.bbclass and its path length
|
||||
# check
|
||||
max_len = 504
|
||||
if len(cachefile) > max_len:
|
||||
logger.critical("The path of the cache file is too long "
|
||||
"({0} chars > {1}) to be opened by sqlite! "
|
||||
"Your cache file is \"{2}\"".format(
|
||||
len(cachefile),
|
||||
max_len,
|
||||
cachefile))
|
||||
sys.exit(1)
|
||||
else:
|
||||
raise
|
||||
@@ -14,7 +14,6 @@ import os
|
||||
import sys
|
||||
import stat
|
||||
import errno
|
||||
import itertools
|
||||
import logging
|
||||
import re
|
||||
import bb
|
||||
@@ -129,7 +128,6 @@ class RunQueueStats:
|
||||
# runQueue state machine
|
||||
runQueuePrepare = 2
|
||||
runQueueSceneInit = 3
|
||||
runQueueDumpSigs = 4
|
||||
runQueueRunning = 6
|
||||
runQueueFailed = 7
|
||||
runQueueCleanUp = 8
|
||||
@@ -477,6 +475,7 @@ class RunQueueData:
|
||||
self.runtaskentries = {}
|
||||
|
||||
def runq_depends_names(self, ids):
|
||||
import re
|
||||
ret = []
|
||||
for id in ids:
|
||||
nam = os.path.basename(id)
|
||||
@@ -729,8 +728,6 @@ class RunQueueData:
|
||||
if mc == frommc:
|
||||
fn = taskData[mcdep].build_targets[pn][0]
|
||||
newdep = '%s:%s' % (fn,deptask)
|
||||
if newdep not in taskData[mcdep].taskentries:
|
||||
bb.fatal("Task mcdepends on non-existent task %s" % (newdep))
|
||||
taskData[mc].taskentries[tid].tdepends.append(newdep)
|
||||
|
||||
for mc in taskData:
|
||||
@@ -1591,19 +1588,14 @@ class RunQueue:
|
||||
self.rqdata.init_progress_reporter.next_stage()
|
||||
self.rqexe = RunQueueExecute(self)
|
||||
|
||||
dumpsigs = self.cooker.configuration.dump_signatures
|
||||
if dumpsigs:
|
||||
dump = self.cooker.configuration.dump_signatures
|
||||
if dump:
|
||||
self.rqdata.init_progress_reporter.finish()
|
||||
if 'printdiff' in dumpsigs:
|
||||
self.invalidtasks_dump = self.print_diffscenetasks()
|
||||
self.state = runQueueDumpSigs
|
||||
|
||||
if self.state is runQueueDumpSigs:
|
||||
dumpsigs = self.cooker.configuration.dump_signatures
|
||||
retval = self.dump_signatures(dumpsigs)
|
||||
if retval is False:
|
||||
if 'printdiff' in dumpsigs:
|
||||
self.write_diffscenetasks(self.invalidtasks_dump)
|
||||
if 'printdiff' in dump:
|
||||
invalidtasks = self.print_diffscenetasks()
|
||||
self.dump_signatures(dump)
|
||||
if 'printdiff' in dump:
|
||||
self.write_diffscenetasks(invalidtasks)
|
||||
self.state = runQueueComplete
|
||||
|
||||
if self.state is runQueueSceneInit:
|
||||
@@ -1694,42 +1686,33 @@ class RunQueue:
|
||||
bb.parse.siggen.dump_sigtask(taskfn, taskname, dataCaches[mc].stamp[taskfn], True)
|
||||
|
||||
def dump_signatures(self, options):
|
||||
if not hasattr(self, "dumpsigs_launched"):
|
||||
if bb.cooker.CookerFeatures.RECIPE_SIGGEN_INFO not in self.cooker.featureset:
|
||||
bb.fatal("The dump signatures functionality needs the RECIPE_SIGGEN_INFO feature enabled")
|
||||
if bb.cooker.CookerFeatures.RECIPE_SIGGEN_INFO not in self.cooker.featureset:
|
||||
bb.fatal("The dump signatures functionality needs the RECIPE_SIGGEN_INFO feature enabled")
|
||||
|
||||
bb.note("Writing task signature files")
|
||||
bb.note("Writing task signature files")
|
||||
|
||||
max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
|
||||
def chunkify(l, n):
|
||||
return [l[i::n] for i in range(n)]
|
||||
dumpsigs_tids = chunkify(list(self.rqdata.runtaskentries), max_process)
|
||||
|
||||
# We cannot use the real multiprocessing.Pool easily due to some local data
|
||||
# that can't be pickled. This is a cheap multi-process solution.
|
||||
self.dumpsigs_launched = []
|
||||
|
||||
for tids in dumpsigs_tids:
|
||||
p = Process(target=self._rq_dump_sigtid, args=(tids, ))
|
||||
max_process = int(self.cfgData.getVar("BB_NUMBER_PARSE_THREADS") or os.cpu_count() or 1)
|
||||
def chunkify(l, n):
|
||||
return [l[i::n] for i in range(n)]
|
||||
tids = chunkify(list(self.rqdata.runtaskentries), max_process)
|
||||
# We cannot use the real multiprocessing.Pool easily due to some local data
|
||||
# that can't be pickled. This is a cheap multi-process solution.
|
||||
launched = []
|
||||
while tids:
|
||||
if len(launched) < max_process:
|
||||
p = Process(target=self._rq_dump_sigtid, args=(tids.pop(), ))
|
||||
p.start()
|
||||
self.dumpsigs_launched.append(p)
|
||||
|
||||
return 1.0
|
||||
|
||||
for q in self.dumpsigs_launched:
|
||||
# The finished processes are joined when calling is_alive()
|
||||
if not q.is_alive():
|
||||
self.dumpsigs_launched.remove(q)
|
||||
|
||||
if self.dumpsigs_launched:
|
||||
return 1.0
|
||||
|
||||
for p in self.dumpsigs_launched:
|
||||
launched.append(p)
|
||||
for q in launched:
|
||||
# The finished processes are joined when calling is_alive()
|
||||
if not q.is_alive():
|
||||
launched.remove(q)
|
||||
for p in launched:
|
||||
p.join()
|
||||
|
||||
bb.parse.siggen.dump_sigs(self.rqdata.dataCaches, options)
|
||||
|
||||
return False
|
||||
return
|
||||
|
||||
def print_diffscenetasks(self):
|
||||
def get_root_invalid_tasks(task, taskdepends, valid, noexec, visited_invalid):
|
||||
@@ -2206,20 +2189,12 @@ class RunQueueExecute:
|
||||
if not hasattr(self, "sorted_setscene_tids"):
|
||||
# Don't want to sort this set every execution
|
||||
self.sorted_setscene_tids = sorted(self.rqdata.runq_setscene_tids)
|
||||
# Resume looping where we left off when we returned to feed the mainloop
|
||||
self.setscene_tids_generator = itertools.cycle(self.rqdata.runq_setscene_tids)
|
||||
|
||||
task = None
|
||||
if not self.sqdone and self.can_start_task():
|
||||
loopcount = 0
|
||||
# Find the next setscene to run, exit the loop when we've processed all tids or found something to execute
|
||||
while loopcount < len(self.rqdata.runq_setscene_tids):
|
||||
loopcount += 1
|
||||
nexttask = next(self.setscene_tids_generator)
|
||||
# Find the next setscene to run
|
||||
for nexttask in self.sorted_setscene_tids:
|
||||
if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values() and nexttask not in self.sq_harddep_deferred:
|
||||
if nexttask in self.sq_deferred and self.sq_deferred[nexttask] not in self.runq_complete:
|
||||
# Skip deferred tasks quickly before the 'expensive' tests below - this is key to performant multiconfig builds
|
||||
continue
|
||||
if nexttask not in self.sqdata.unskippable and self.sqdata.sq_revdeps[nexttask] and \
|
||||
nexttask not in self.sq_needed_harddeps and \
|
||||
self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and \
|
||||
@@ -2249,7 +2224,8 @@ class RunQueueExecute:
|
||||
if t in self.runq_running and t not in self.runq_complete:
|
||||
continue
|
||||
if nexttask in self.sq_deferred:
|
||||
# Deferred tasks that were still deferred were skipped above so we now need to process
|
||||
if self.sq_deferred[nexttask] not in self.runq_complete:
|
||||
continue
|
||||
logger.debug("Task %s no longer deferred" % nexttask)
|
||||
del self.sq_deferred[nexttask]
|
||||
valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False, summary=False)
|
||||
@@ -2574,6 +2550,9 @@ class RunQueueExecute:
|
||||
self.rqdata.runtaskentries[hashtid].unihash = unihash
|
||||
bb.parse.siggen.set_unihash(hashtid, unihash)
|
||||
toprocess.add(hashtid)
|
||||
if torehash:
|
||||
# Need to save after set_unihash above
|
||||
bb.parse.siggen.save_unitaskhashes()
|
||||
|
||||
# Work out all tasks which depend upon these
|
||||
total = set()
|
||||
@@ -2772,12 +2751,8 @@ class RunQueueExecute:
|
||||
logger.debug2("%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
|
||||
self.sq_task_failoutright(dep)
|
||||
continue
|
||||
|
||||
# For performance, only compute allcovered once if needed
|
||||
if self.sqdata.sq_deps[task]:
|
||||
allcovered = self.scenequeue_covered | self.scenequeue_notcovered
|
||||
for dep in sorted(self.sqdata.sq_deps[task]):
|
||||
if self.sqdata.sq_revdeps[dep].issubset(allcovered):
|
||||
if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
|
||||
if dep not in self.sq_buildable:
|
||||
self.sq_buildable.add(dep)
|
||||
|
||||
@@ -3331,7 +3306,7 @@ class runQueuePipe():
|
||||
|
||||
start = len(self.queue)
|
||||
try:
|
||||
self.queue.extend(self.input.read(512 * 1024) or b"")
|
||||
self.queue.extend(self.input.read(102400) or b"")
|
||||
except (OSError, IOError) as e:
|
||||
if e.errno != errno.EAGAIN:
|
||||
raise
|
||||
|
||||
@@ -321,22 +321,7 @@ class ProcessServer():
|
||||
bb.warn('Ignoring invalid BB_SERVER_TIMEOUT=%s, must be a float specifying seconds.' % self.timeout)
|
||||
seendata = True
|
||||
|
||||
if not self.idle:
|
||||
self.idle = threading.Thread(target=self.idle_thread)
|
||||
self.idle.start()
|
||||
elif self.idle and not self.idle.is_alive():
|
||||
serverlog("Idle thread terminated, main thread exiting too")
|
||||
bb.error("Idle thread terminated, main thread exiting too")
|
||||
self.quit = True
|
||||
|
||||
nextsleep = 1.0
|
||||
if self.xmlrpc:
|
||||
nextsleep = self.xmlrpc.get_timeout(nextsleep)
|
||||
try:
|
||||
ready = select.select(fds,[],[],nextsleep)[0]
|
||||
except InterruptedError:
|
||||
# Ignore EINTR
|
||||
ready = []
|
||||
ready = self.idle_commands(.1, fds)
|
||||
|
||||
if self.idle:
|
||||
self.idle.join()
|
||||
@@ -439,7 +424,7 @@ class ProcessServer():
|
||||
self.idle_cond.notify_all()
|
||||
|
||||
while not self.quit:
|
||||
nextsleep = 1.0
|
||||
nextsleep = 0.1
|
||||
fds = []
|
||||
|
||||
with bb.utils.lock_timeout(self._idlefuncsLock):
|
||||
@@ -477,7 +462,7 @@ class ProcessServer():
|
||||
|
||||
# Create new heartbeat event?
|
||||
now = time.time()
|
||||
if items and bb.event._heartbeat_enabled and now >= self.next_heartbeat:
|
||||
if bb.event._heartbeat_enabled and now >= self.next_heartbeat:
|
||||
# We might have missed heartbeats. Just trigger once in
|
||||
# that case and continue after the usual delay.
|
||||
self.next_heartbeat += self.heartbeat_seconds
|
||||
@@ -500,6 +485,31 @@ class ProcessServer():
|
||||
if nextsleep is not None:
|
||||
select.select(fds,[],[],nextsleep)[0]
|
||||
|
||||
def idle_commands(self, delay, fds=None):
|
||||
nextsleep = delay
|
||||
if not fds:
|
||||
fds = []
|
||||
|
||||
if not self.idle:
|
||||
self.idle = threading.Thread(target=self.idle_thread)
|
||||
self.idle.start()
|
||||
elif self.idle and not self.idle.is_alive():
|
||||
serverlog("Idle thread terminated, main thread exiting too")
|
||||
bb.error("Idle thread terminated, main thread exiting too")
|
||||
self.quit = True
|
||||
|
||||
if nextsleep is not None:
|
||||
if self.xmlrpc:
|
||||
nextsleep = self.xmlrpc.get_timeout(nextsleep)
|
||||
try:
|
||||
return select.select(fds,[],[],nextsleep)[0]
|
||||
except InterruptedError:
|
||||
# Ignore EINTR
|
||||
return []
|
||||
else:
|
||||
return select.select(fds,[],[],0)[0]
|
||||
|
||||
|
||||
class ServerCommunicator():
|
||||
def __init__(self, connection, recv):
|
||||
self.connection = connection
|
||||
|
||||
@@ -14,8 +14,6 @@ from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
|
||||
import bb.server.xmlrpcclient
|
||||
|
||||
import bb
|
||||
import bb.cooker
|
||||
import bb.event
|
||||
|
||||
# This request handler checks if the request has a "Bitbake-token" header
|
||||
# field (this comes from the client side) and compares it with its internal
|
||||
@@ -56,7 +54,7 @@ class BitBakeXMLRPCServer(SimpleXMLRPCServer):
|
||||
|
||||
def __init__(self, interface, cooker, parent):
|
||||
# Use auto port configuration
|
||||
if interface[1] == -1:
|
||||
if (interface[1] == -1):
|
||||
interface = (interface[0], 0)
|
||||
SimpleXMLRPCServer.__init__(self, interface,
|
||||
requestHandler=BitBakeXMLRPCRequestHandler,
|
||||
@@ -89,12 +87,11 @@ class BitBakeXMLRPCServer(SimpleXMLRPCServer):
|
||||
def handle_requests(self):
|
||||
self._handle_request_noblock()
|
||||
|
||||
class BitBakeXMLRPCServerCommands:
|
||||
class BitBakeXMLRPCServerCommands():
|
||||
|
||||
def __init__(self, server):
|
||||
self.server = server
|
||||
self.has_client = False
|
||||
self.event_handle = None
|
||||
|
||||
def registerEventHandler(self, host, port):
|
||||
"""
|
||||
@@ -103,8 +100,8 @@ class BitBakeXMLRPCServerCommands:
|
||||
s, t = bb.server.xmlrpcclient._create_server(host, port)
|
||||
|
||||
# we don't allow connections if the cooker is running
|
||||
if self.server.cooker.state in [bb.cooker.State.PARSING, bb.cooker.State.RUNNING]:
|
||||
return None, f"Cooker is busy: {self.server.cooker.state.name}"
|
||||
if (self.server.cooker.state in [bb.cooker.state.parsing, bb.cooker.state.running]):
|
||||
return None, "Cooker is busy: %s" % bb.cooker.state.get_name(self.server.cooker.state)
|
||||
|
||||
self.event_handle = bb.event.register_UIHhandler(s, True)
|
||||
return self.event_handle, 'OK'
|
||||
|
||||
@@ -201,6 +201,9 @@ class SignatureGenerator(object):
|
||||
def save_unitaskhashes(self):
|
||||
return
|
||||
|
||||
def copy_unitaskhashes(self, targetdir):
|
||||
return
|
||||
|
||||
def set_setscene_tasks(self, setscene_tasks):
|
||||
return
|
||||
|
||||
@@ -415,6 +418,9 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
def save_unitaskhashes(self):
|
||||
self.unihash_cache.save(self.unitaskhashes)
|
||||
|
||||
def copy_unitaskhashes(self, targetdir):
|
||||
self.unihash_cache.copyfile(targetdir)
|
||||
|
||||
def dump_sigtask(self, mcfn, task, stampbase, runtime):
|
||||
tid = mcfn + ":" + task
|
||||
mc = bb.runqueue.mc_from_tid(mcfn)
|
||||
@@ -534,7 +540,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
def __init__(self, data):
|
||||
self.extramethod = {}
|
||||
# NOTE: The cache only tracks hashes that exist. Hashes that don't
|
||||
# exist are always queried from the server since it is possible for
|
||||
# exist are always queries from the server since it is possible for
|
||||
# hashes to appear over time, but much less likely for them to
|
||||
# disappear
|
||||
self.unihash_exists_cache = set()
|
||||
@@ -552,11 +558,11 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
super().__init__(data)
|
||||
|
||||
def get_taskdata(self):
|
||||
return (self.server, self.method, self.extramethod, self.username, self.password, self.env) + super().get_taskdata()
|
||||
return (self.server, self.method, self.extramethod, self.max_parallel, self.username, self.password, self.env) + super().get_taskdata()
|
||||
|
||||
def set_taskdata(self, data):
|
||||
self.server, self.method, self.extramethod, self.username, self.password, self.env = data[:6]
|
||||
super().set_taskdata(data[6:])
|
||||
self.server, self.method, self.extramethod, self.max_parallel, self.username, self.password, self.env = data[:7]
|
||||
super().set_taskdata(data[7:])
|
||||
|
||||
def get_hashserv_creds(self):
|
||||
if self.username and self.password:
|
||||
@@ -589,6 +595,13 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
self._client = hashserv.create_client(self.server, **self.get_hashserv_creds())
|
||||
yield self._client
|
||||
|
||||
@contextmanager
|
||||
def client_pool(self):
|
||||
with self._client_env():
|
||||
if getattr(self, '_client_pool', None) is None:
|
||||
self._client_pool = hashserv.client.ClientPool(self.server, self.max_parallel, **self.get_hashserv_creds())
|
||||
yield self._client_pool
|
||||
|
||||
def reset(self, data):
|
||||
self.__close_clients()
|
||||
return super().reset(data)
|
||||
@@ -665,20 +678,25 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
if len(query) == 0:
|
||||
return {}
|
||||
|
||||
query_keys = []
|
||||
uncached_query = {}
|
||||
result = {}
|
||||
for key, unihash in query.items():
|
||||
if unihash in self.unihash_exists_cache:
|
||||
result[key] = True
|
||||
else:
|
||||
query_keys.append(key)
|
||||
uncached_query[key] = unihash
|
||||
|
||||
if query_keys:
|
||||
if self.max_parallel <= 1 or len(uncached_query) <= 1:
|
||||
# No parallelism required. Make the query serially with the single client
|
||||
with self.client() as client:
|
||||
query_result = client.unihash_exists_batch(query[k] for k in query_keys)
|
||||
uncached_result = {
|
||||
key: client.unihash_exists(value) for key, value in uncached_query.items()
|
||||
}
|
||||
else:
|
||||
with self.client_pool() as client_pool:
|
||||
uncached_result = client_pool.unihashes_exist(uncached_query)
|
||||
|
||||
for idx, key in enumerate(query_keys):
|
||||
exists = query_result[idx]
|
||||
for key, exists in uncached_result.items():
|
||||
if exists:
|
||||
self.unihash_exists_cache.add(query[key])
|
||||
result[key] = exists
|
||||
@@ -694,24 +712,32 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
unihash
|
||||
"""
|
||||
result = {}
|
||||
query_tids = []
|
||||
queries = {}
|
||||
query_result = {}
|
||||
|
||||
for tid in tids:
|
||||
unihash = self.get_cached_unihash(tid)
|
||||
if unihash:
|
||||
result[tid] = unihash
|
||||
else:
|
||||
query_tids.append(tid)
|
||||
queries[tid] = (self._get_method(tid), self.taskhash[tid])
|
||||
|
||||
if query_tids:
|
||||
unihashes = []
|
||||
try:
|
||||
with self.client() as client:
|
||||
unihashes = client.get_unihash_batch((self._get_method(tid), self.taskhash[tid]) for tid in query_tids)
|
||||
except (ConnectionError, FileNotFoundError) as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
if len(queries) == 0:
|
||||
return result
|
||||
|
||||
for idx, tid in enumerate(query_tids):
|
||||
if self.max_parallel <= 1 or len(queries) <= 1:
|
||||
# No parallelism required. Make the query using a single client
|
||||
with self.client() as client:
|
||||
keys = list(queries.keys())
|
||||
unihashes = client.get_unihash_batch(queries[k] for k in keys)
|
||||
|
||||
for idx, k in enumerate(keys):
|
||||
query_result[k] = unihashes[idx]
|
||||
else:
|
||||
with self.client_pool() as client_pool:
|
||||
query_result = client_pool.get_unihashes(queries)
|
||||
|
||||
for tid, unihash in query_result.items():
|
||||
# In the absence of being able to discover a unique hash from the
|
||||
# server, make it be equivalent to the taskhash. The unique "hash" only
|
||||
# really needs to be a unique string (not even necessarily a hash), but
|
||||
@@ -726,9 +752,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
# to the server, there is a better chance that they will agree on
|
||||
# the unique hash.
|
||||
taskhash = self.taskhash[tid]
|
||||
|
||||
if unihashes and unihashes[idx]:
|
||||
unihash = unihashes[idx]
|
||||
if unihash:
|
||||
# A unique hash equal to the taskhash is not very interesting,
|
||||
# so it is reported it at debug level 2. If they differ, that
|
||||
# is much more interesting, so it is reported at debug level 1
|
||||
@@ -737,6 +761,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
hashequiv_logger.debug2('No reported unihash for %s:%s from %s' % (tid, taskhash, self.server))
|
||||
unihash = taskhash
|
||||
|
||||
|
||||
self.set_unihash(tid, unihash)
|
||||
self.unihash[tid] = unihash
|
||||
result[tid] = unihash
|
||||
@@ -817,7 +842,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
d.setVar('BB_UNIHASH', new_unihash)
|
||||
else:
|
||||
hashequiv_logger.debug('Reported task %s as unihash %s to %s' % (taskhash, unihash, self.server))
|
||||
except (ConnectionError, FileNotFoundError) as e:
|
||||
except ConnectionError as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
finally:
|
||||
if sigfile:
|
||||
@@ -859,7 +884,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
# TODO: What to do here?
|
||||
hashequiv_logger.verbose('Task %s unihash reported as unwanted hash %s' % (tid, finalunihash))
|
||||
|
||||
except (ConnectionError, FileNotFoundError) as e:
|
||||
except ConnectionError as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
|
||||
return False
|
||||
@@ -873,12 +898,13 @@ class SignatureGeneratorTestEquivHash(SignatureGeneratorUniHashMixIn, SignatureG
|
||||
super().init_rundepcheck(data)
|
||||
self.server = data.getVar('BB_HASHSERVE')
|
||||
self.method = "sstate_output_hash"
|
||||
self.max_parallel = 1
|
||||
|
||||
def clean_checksum_file_path(file_checksum_tuple):
|
||||
f, cs = file_checksum_tuple
|
||||
if "/./" in f:
|
||||
return "./" + f.split("/./")[1]
|
||||
return os.path.basename(f)
|
||||
return f
|
||||
|
||||
def dump_this_task(outfile, d):
|
||||
import bb.parse
|
||||
|
||||
@@ -106,46 +106,6 @@ ${D}${libdir}/pkgconfig/*.pc
|
||||
self.parseExpression("foo=$(echo bar)")
|
||||
self.assertExecs(set(["echo"]))
|
||||
|
||||
def test_assign_subshell_expansion_quotes(self):
|
||||
self.parseExpression('foo="$(echo bar)"')
|
||||
self.assertExecs(set(["echo"]))
|
||||
|
||||
def test_assign_subshell_expansion_nested(self):
|
||||
self.parseExpression('foo="$(func1 "$(func2 bar$(func3))")"')
|
||||
self.assertExecs(set(["func1", "func2", "func3"]))
|
||||
|
||||
def test_assign_subshell_expansion_multiple(self):
|
||||
self.parseExpression('foo="$(func1 "$(func2)") $(func3)"')
|
||||
self.assertExecs(set(["func1", "func2", "func3"]))
|
||||
|
||||
def test_assign_subshell_expansion_escaped_quotes(self):
|
||||
self.parseExpression('foo="\\"fo\\"o$(func1)"')
|
||||
self.assertExecs(set(["func1"]))
|
||||
|
||||
def test_assign_subshell_expansion_empty(self):
|
||||
self.parseExpression('foo="bar$()foo"')
|
||||
self.assertExecs(set())
|
||||
|
||||
def test_assign_subshell_backticks(self):
|
||||
self.parseExpression("foo=`echo bar`")
|
||||
self.assertExecs(set(["echo"]))
|
||||
|
||||
def test_assign_subshell_backticks_quotes(self):
|
||||
self.parseExpression('foo="`echo bar`"')
|
||||
self.assertExecs(set(["echo"]))
|
||||
|
||||
def test_assign_subshell_backticks_multiple(self):
|
||||
self.parseExpression('foo="`func1 bar` `func2`"')
|
||||
self.assertExecs(set(["func1", "func2"]))
|
||||
|
||||
def test_assign_subshell_backticks_escaped_quotes(self):
|
||||
self.parseExpression('foo="\\"fo\\"o`func1`"')
|
||||
self.assertExecs(set(["func1"]))
|
||||
|
||||
def test_assign_subshell_backticks_empty(self):
|
||||
self.parseExpression('foo="bar``foo"')
|
||||
self.assertExecs(set())
|
||||
|
||||
def test_shell_unexpanded(self):
|
||||
self.setEmptyVars(["QT_BASE_NAME"])
|
||||
self.parseExpression('echo "${QT_BASE_NAME}"')
|
||||
|
||||
@@ -66,8 +66,8 @@ class CompressionTests(object):
|
||||
|
||||
class LZ4Tests(CompressionTests, unittest.TestCase):
|
||||
def setUp(self):
|
||||
if shutil.which("lz4") is None:
|
||||
self.skipTest("'lz4' not found")
|
||||
if shutil.which("lz4c") is None:
|
||||
self.skipTest("'lz4c' not found")
|
||||
super().setUp()
|
||||
|
||||
@contextlib.contextmanager
|
||||
|
||||
@@ -450,64 +450,17 @@ class TestFlags(unittest.TestCase):
|
||||
self.d = bb.data.init()
|
||||
self.d.setVar("foo", "value of foo")
|
||||
self.d.setVarFlag("foo", "flag1", "value of flag1")
|
||||
self.d.setVarFlag("foo", "_defaultval_flag_flag1", "default of flag1")
|
||||
self.d.setVarFlag("foo", "flag2", "value of flag2")
|
||||
self.d.setVarFlag("foo", "_defaultval_flag_flag2", "default of flag2")
|
||||
self.d.setVarFlag("foo", "flag3", "value of flag3")
|
||||
self.d.setVarFlag("foo", "_defaultval_flag_flagnovalue", "default of flagnovalue")
|
||||
|
||||
def test_setflag(self):
|
||||
self.assertEqual(self.d.getVarFlag("foo", "flag1", False), "value of flag1")
|
||||
self.assertEqual(self.d.getVarFlag("foo", "flag2", False), "value of flag2")
|
||||
self.assertDictEqual(
|
||||
self.d.getVarFlags("foo"),
|
||||
{
|
||||
"flag1": "value of flag1",
|
||||
"flag2": "value of flag2",
|
||||
"flag3": "value of flag3",
|
||||
"flagnovalue": "default of flagnovalue",
|
||||
}
|
||||
)
|
||||
self.assertDictEqual(
|
||||
self.d.getVarFlags("foo", internalflags=True),
|
||||
{
|
||||
"_content": "value of foo",
|
||||
"flag1": "value of flag1",
|
||||
"flag2": "value of flag2",
|
||||
"flag3": "value of flag3",
|
||||
"_defaultval_flag_flag1": "default of flag1",
|
||||
"_defaultval_flag_flag2": "default of flag2",
|
||||
"_defaultval_flag_flagnovalue": "default of flagnovalue",
|
||||
}
|
||||
)
|
||||
|
||||
def test_delflag(self):
|
||||
self.d.delVarFlag("foo", "flag2")
|
||||
self.d.delVarFlag("foo", "flag3")
|
||||
self.assertEqual(self.d.getVarFlag("foo", "flag1", False), "value of flag1")
|
||||
self.assertEqual(self.d.getVarFlag("foo", "flag2", False), None)
|
||||
self.assertDictEqual(
|
||||
self.d.getVarFlags("foo"),
|
||||
{
|
||||
"flag1": "value of flag1",
|
||||
"flagnovalue": "default of flagnovalue",
|
||||
}
|
||||
)
|
||||
self.assertDictEqual(
|
||||
self.d.getVarFlags("foo", internalflags=True),
|
||||
{
|
||||
"_content": "value of foo",
|
||||
"flag1": "value of flag1",
|
||||
"_defaultval_flag_flag1": "default of flag1",
|
||||
"_defaultval_flag_flagnovalue": "default of flagnovalue",
|
||||
}
|
||||
)
|
||||
|
||||
def test_delvar(self):
|
||||
self.d.delVar("foo")
|
||||
self.assertEqual(self.d.getVarFlag("foo", "flag1", False), None)
|
||||
self.assertEqual(self.d.getVarFlag("foo", "flag2", False), None)
|
||||
self.assertEqual(self.d.getVarFlags("foo", internalflags=True), None)
|
||||
|
||||
class Contains(unittest.TestCase):
|
||||
def setUp(self):
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -75,59 +75,6 @@ unset B[flag]
|
||||
self.assertEqual(d.getVarFlag("A","flag"), None)
|
||||
self.assertEqual(d.getVar("B"), "2")
|
||||
|
||||
defaulttest = """
|
||||
A = "set value"
|
||||
A ??= "default value"
|
||||
|
||||
A[flag_set_vs_question] = "set flag"
|
||||
A[flag_set_vs_question] ?= "question flag"
|
||||
|
||||
A[flag_set_vs_default] = "set flag"
|
||||
A[flag_set_vs_default] ??= "default flag"
|
||||
|
||||
A[flag_question] ?= "question flag"
|
||||
|
||||
A[flag_default] ??= "default flag"
|
||||
|
||||
A[flag_question_vs_default] ?= "question flag"
|
||||
A[flag_question_vs_default] ??= "default flag"
|
||||
|
||||
A[flag_default_vs_question] ??= "default flag"
|
||||
A[flag_default_vs_question] ?= "question flag"
|
||||
|
||||
A[flag_set_question_default] = "set flag"
|
||||
A[flag_set_question_default] ?= "question flag"
|
||||
A[flag_set_question_default] ??= "default flag"
|
||||
|
||||
A[flag_set_default_question] = "set flag"
|
||||
A[flag_set_default_question] ??= "default flag"
|
||||
A[flag_set_default_question] ?= "question flag"
|
||||
|
||||
A[flag_set_twice] = "set flag first"
|
||||
A[flag_set_twice] = "set flag second"
|
||||
|
||||
A[flag_question_twice] ?= "question flag first"
|
||||
A[flag_question_twice] ?= "question flag second"
|
||||
|
||||
A[flag_default_twice] ??= "default flag first"
|
||||
A[flag_default_twice] ??= "default flag second"
|
||||
"""
|
||||
def test_parse_defaulttest(self):
|
||||
f = self.parsehelper(self.defaulttest)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
self.assertEqual(d.getVar("A"), "set value")
|
||||
self.assertEqual(d.getVarFlag("A","flag_set_vs_question"), "set flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_set_vs_default"), "set flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_question"), "question flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_default"), "default flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_question_vs_default"), "question flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_default_vs_question"), "question flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_set_question_default"), "set flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_set_default_question"), "set flag")
|
||||
self.assertEqual(d.getVarFlag("A","flag_set_twice"), "set flag second")
|
||||
self.assertEqual(d.getVarFlag("A","flag_question_twice"), "question flag first")
|
||||
self.assertEqual(d.getVarFlag("A","flag_default_twice"), "default flag second")
|
||||
|
||||
exporttest = """
|
||||
A = "a"
|
||||
export B = "b"
|
||||
@@ -230,19 +177,7 @@ python () {
|
||||
|
||||
addtask_deltask = """
|
||||
addtask do_patch after do_foo after do_unpack before do_configure before do_compile
|
||||
addtask do_fetch2 do_patch2
|
||||
|
||||
addtask do_myplaintask
|
||||
addtask do_myplaintask2
|
||||
deltask do_myplaintask2
|
||||
addtask do_mytask# comment
|
||||
addtask do_mytask2 # comment2
|
||||
addtask do_mytask3
|
||||
deltask do_mytask3# comment
|
||||
deltask do_mytask4 # comment2
|
||||
|
||||
# Ensure a missing task prefix on after works
|
||||
addtask do_mytask5 after mytask
|
||||
addtask do_fetch do_patch
|
||||
|
||||
MYVAR = "do_patch"
|
||||
EMPTYVAR = ""
|
||||
@@ -250,12 +185,17 @@ deltask do_fetch ${MYVAR} ${EMPTYVAR}
|
||||
deltask ${EMPTYVAR}
|
||||
"""
|
||||
def test_parse_addtask_deltask(self):
|
||||
import sys
|
||||
|
||||
f = self.parsehelper(self.addtask_deltask)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
with self.assertLogs() as logs:
|
||||
f = self.parsehelper(self.addtask_deltask)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
self.assertSequenceEqual(['do_fetch2', 'do_patch2', 'do_myplaintask', 'do_mytask', 'do_mytask2', 'do_mytask5'], bb.build.listtasks(d))
|
||||
self.assertEqual(['do_mytask'], d.getVarFlag("do_mytask5", "deps"))
|
||||
output = "".join(logs.output)
|
||||
self.assertTrue("addtask contained multiple 'before' keywords" in output)
|
||||
self.assertTrue("addtask contained multiple 'after' keywords" in output)
|
||||
self.assertTrue('addtask ignored: " do_patch"' in output)
|
||||
#self.assertTrue('dependent task do_foo for do_patch does not exist' in output)
|
||||
|
||||
broken_multiline_comment = """
|
||||
# First line of comment \\
|
||||
@@ -401,65 +341,3 @@ EXPORT_FUNCTIONS do_compile do_compilepython
|
||||
self.assertIn("else", d.getVar("do_compilepython"))
|
||||
check_function_flags(d)
|
||||
|
||||
export_function_unclosed_tab = """
|
||||
do_compile () {
|
||||
bb.note("Something")
|
||||
\t}
|
||||
"""
|
||||
export_function_unclosed_space = """
|
||||
do_compile () {
|
||||
bb.note("Something")
|
||||
}
|
||||
"""
|
||||
export_function_residue = """
|
||||
do_compile () {
|
||||
bb.note("Something")
|
||||
}
|
||||
|
||||
include \\
|
||||
"""
|
||||
|
||||
def test_unclosed_functions(self):
|
||||
def test_helper(content, expected_error):
|
||||
with tempfile.TemporaryDirectory() as tempdir:
|
||||
recipename = tempdir + "/recipe_unclosed.bb"
|
||||
with open(recipename, "w") as f:
|
||||
f.write(content)
|
||||
f.flush()
|
||||
os.chdir(tempdir)
|
||||
with self.assertRaises(bb.parse.ParseError) as error:
|
||||
bb.parse.handle(recipename, bb.data.createCopy(self.d))
|
||||
self.assertIn(expected_error, str(error.exception))
|
||||
|
||||
with tempfile.TemporaryDirectory() as tempdir:
|
||||
test_helper(self.export_function_unclosed_tab, "Unparsed lines from unclosed function")
|
||||
test_helper(self.export_function_unclosed_space, "Unparsed lines from unclosed function")
|
||||
test_helper(self.export_function_residue, "Unparsed lines")
|
||||
|
||||
recipename_closed = tempdir + "/recipe_closed.bb"
|
||||
with open(recipename_closed, "w") as in_file:
|
||||
lines = self.export_function_unclosed_tab.split("\n")
|
||||
lines[3] = "}"
|
||||
in_file.write("\n".join(lines))
|
||||
in_file.flush()
|
||||
bb.parse.handle(recipename_closed, bb.data.createCopy(self.d))
|
||||
|
||||
special_character_assignment = """
|
||||
A+="a"
|
||||
A+ = "b"
|
||||
+ = "c"
|
||||
"""
|
||||
ambigous_assignment = """
|
||||
+= "d"
|
||||
"""
|
||||
def test_parse_special_character_assignment(self):
|
||||
f = self.parsehelper(self.special_character_assignment)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
self.assertEqual(d.getVar("A"), " a")
|
||||
self.assertEqual(d.getVar("A+"), "b")
|
||||
self.assertEqual(d.getVar("+"), "c")
|
||||
|
||||
f = self.parsehelper(self.ambigous_assignment)
|
||||
with self.assertRaises(bb.parse.ParseError) as error:
|
||||
bb.parse.handle(f.name, self.d)
|
||||
self.assertIn("Empty variable name in assignment", str(error.exception))
|
||||
|
||||
129
bitbake/lib/bb/tests/persist_data.py
Normal file
129
bitbake/lib/bb/tests/persist_data.py
Normal file
@@ -0,0 +1,129 @@
|
||||
#
|
||||
# BitBake Test for lib/bb/persist_data/
|
||||
#
|
||||
# Copyright (C) 2018 Garmin Ltd.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import unittest
|
||||
import bb.data
|
||||
import bb.persist_data
|
||||
import tempfile
|
||||
import threading
|
||||
|
||||
class PersistDataTest(unittest.TestCase):
|
||||
def _create_data(self):
|
||||
return bb.persist_data.persist('TEST_PERSIST_DATA', self.d)
|
||||
|
||||
def setUp(self):
|
||||
self.d = bb.data.init()
|
||||
self.tempdir = tempfile.TemporaryDirectory()
|
||||
self.d['PERSISTENT_DIR'] = self.tempdir.name
|
||||
self.data = self._create_data()
|
||||
self.items = {
|
||||
'A1': '1',
|
||||
'B1': '2',
|
||||
'C2': '3'
|
||||
}
|
||||
self.stress_count = 10000
|
||||
self.thread_count = 5
|
||||
|
||||
for k,v in self.items.items():
|
||||
self.data[k] = v
|
||||
|
||||
def tearDown(self):
|
||||
self.tempdir.cleanup()
|
||||
|
||||
def _iter_helper(self, seen, iterator):
|
||||
with iter(iterator):
|
||||
for v in iterator:
|
||||
self.assertTrue(v in seen)
|
||||
seen.remove(v)
|
||||
self.assertEqual(len(seen), 0, '%s not seen' % seen)
|
||||
|
||||
def test_get(self):
|
||||
for k, v in self.items.items():
|
||||
self.assertEqual(self.data[k], v)
|
||||
|
||||
self.assertIsNone(self.data.get('D'))
|
||||
with self.assertRaises(KeyError):
|
||||
self.data['D']
|
||||
|
||||
def test_set(self):
|
||||
for k, v in self.items.items():
|
||||
self.data[k] += '-foo'
|
||||
|
||||
for k, v in self.items.items():
|
||||
self.assertEqual(self.data[k], v + '-foo')
|
||||
|
||||
def test_delete(self):
|
||||
self.data['D'] = '4'
|
||||
self.assertEqual(self.data['D'], '4')
|
||||
del self.data['D']
|
||||
self.assertIsNone(self.data.get('D'))
|
||||
with self.assertRaises(KeyError):
|
||||
self.data['D']
|
||||
|
||||
def test_contains(self):
|
||||
for k in self.items:
|
||||
self.assertTrue(k in self.data)
|
||||
self.assertTrue(self.data.has_key(k))
|
||||
self.assertFalse('NotFound' in self.data)
|
||||
self.assertFalse(self.data.has_key('NotFound'))
|
||||
|
||||
def test_len(self):
|
||||
self.assertEqual(len(self.data), len(self.items))
|
||||
|
||||
def test_iter(self):
|
||||
self._iter_helper(set(self.items.keys()), self.data)
|
||||
|
||||
def test_itervalues(self):
|
||||
self._iter_helper(set(self.items.values()), self.data.itervalues())
|
||||
|
||||
def test_iteritems(self):
|
||||
self._iter_helper(set(self.items.items()), self.data.iteritems())
|
||||
|
||||
def test_get_by_pattern(self):
|
||||
self._iter_helper({'1', '2'}, self.data.get_by_pattern('_1'))
|
||||
|
||||
def _stress_read(self, data):
|
||||
for i in range(self.stress_count):
|
||||
for k in self.items:
|
||||
data[k]
|
||||
|
||||
def _stress_write(self, data):
|
||||
for i in range(self.stress_count):
|
||||
for k, v in self.items.items():
|
||||
data[k] = v + str(i)
|
||||
|
||||
def _validate_stress(self):
|
||||
for k, v in self.items.items():
|
||||
self.assertEqual(self.data[k], v + str(self.stress_count - 1))
|
||||
|
||||
def test_stress(self):
|
||||
self._stress_read(self.data)
|
||||
self._stress_write(self.data)
|
||||
self._validate_stress()
|
||||
|
||||
def test_stress_threads(self):
|
||||
def read_thread():
|
||||
data = self._create_data()
|
||||
self._stress_read(data)
|
||||
|
||||
def write_thread():
|
||||
data = self._create_data()
|
||||
self._stress_write(data)
|
||||
|
||||
threads = []
|
||||
for i in range(self.thread_count):
|
||||
threads.append(threading.Thread(target=read_thread))
|
||||
threads.append(threading.Thread(target=write_thread))
|
||||
|
||||
for t in threads:
|
||||
t.start()
|
||||
self._stress_read(self.data)
|
||||
for t in threads:
|
||||
t.join()
|
||||
self._validate_stress()
|
||||
|
||||
@@ -9,7 +9,7 @@ def stamptask(d):
|
||||
with open(stampname, "a+") as f:
|
||||
f.write(d.getVar("BB_UNIHASH") + "\n")
|
||||
|
||||
if d.getVar("BB_CURRENT_MC") != "":
|
||||
if d.getVar("BB_CURRENT_MC") != "default":
|
||||
thistask = d.expand("${BB_CURRENT_MC}:${PN}:${BB_CURRENTTASK}")
|
||||
if thistask in d.getVar("SLOWTASKS").split():
|
||||
bb.note("Slowing task %s" % thistask)
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
do_build[mcdepends] = "mc::mc-1:h1:do_invalid"
|
||||
|
||||
@@ -26,7 +26,7 @@ class RunQueueTests(unittest.TestCase):
|
||||
a1_sstatevalid = "a1:do_package a1:do_package_qa a1:do_packagedata a1:do_package_write_ipk a1:do_package_write_rpm a1:do_populate_lic a1:do_populate_sysroot"
|
||||
b1_sstatevalid = "b1:do_package b1:do_package_qa b1:do_packagedata b1:do_package_write_ipk b1:do_package_write_rpm b1:do_populate_lic b1:do_populate_sysroot"
|
||||
|
||||
def run_bitbakecmd(self, cmd, builddir, sstatevalid="", slowtasks="", extraenv=None, cleanup=False, allowfailure=False):
|
||||
def run_bitbakecmd(self, cmd, builddir, sstatevalid="", slowtasks="", extraenv=None, cleanup=False):
|
||||
env = os.environ.copy()
|
||||
env["BBPATH"] = os.path.realpath(os.path.join(os.path.dirname(__file__), "runqueue-tests"))
|
||||
env["BB_ENV_PASSTHROUGH_ADDITIONS"] = "SSTATEVALID SLOWTASKS TOPDIR"
|
||||
@@ -41,8 +41,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
output = subprocess.check_output(cmd, env=env, stderr=subprocess.STDOUT,universal_newlines=True, cwd=builddir)
|
||||
print(output)
|
||||
except subprocess.CalledProcessError as e:
|
||||
if allowfailure:
|
||||
return e.output
|
||||
self.fail("Command %s failed with %s" % (cmd, e.output))
|
||||
tasks = []
|
||||
tasklog = builddir + "/task.log"
|
||||
@@ -316,13 +314,6 @@ class RunQueueTests(unittest.TestCase):
|
||||
["mc_2:a1:%s" % t for t in rerun_tasks]
|
||||
self.assertEqual(set(tasks), set(expected))
|
||||
|
||||
# Check that a multiconfig that doesn't exist rasies a correct error message
|
||||
error_output = self.run_bitbakecmd(["bitbake", "g1"], tempdir, "", extraenv=extraenv, cleanup=True, allowfailure=True)
|
||||
self.assertIn("non-existent task", error_output)
|
||||
# If the word 'Traceback' or 'KeyError' is in the output we've regressed
|
||||
self.assertNotIn("Traceback", error_output)
|
||||
self.assertNotIn("KeyError", error_output)
|
||||
|
||||
self.shutdown(tempdir)
|
||||
|
||||
def test_hashserv_single(self):
|
||||
|
||||
@@ -130,14 +130,6 @@ class Checksum(unittest.TestCase):
|
||||
checksum = bb.utils.sha256_file(f.name)
|
||||
self.assertEqual(checksum, "fcfbae8bf6b721dbb9d2dc6a9334a58f2031a9a9b302999243f99da4d7f12d0f")
|
||||
|
||||
def test_goh1(self):
|
||||
import hashlib
|
||||
with tempfile.NamedTemporaryFile() as f:
|
||||
f.write(self.filler)
|
||||
f.flush()
|
||||
checksum = bb.utils.goh1_file(f.name)
|
||||
self.assertEqual(checksum, "81191f04d4abf413e5badd234814e4202d9efa73e6f9437e9ddd6b8165b569ef")
|
||||
|
||||
class EditMetadataFile(unittest.TestCase):
|
||||
_origfile = """
|
||||
# A comment
|
||||
|
||||
@@ -15,7 +15,6 @@ import atexit
|
||||
import re
|
||||
from collections import OrderedDict, defaultdict
|
||||
from functools import partial
|
||||
from contextlib import contextmanager
|
||||
|
||||
import bb.cache
|
||||
import bb.cooker
|
||||
@@ -189,19 +188,11 @@ class TinfoilCookerAdapter:
|
||||
self._cache[name] = attrvalue
|
||||
return attrvalue
|
||||
|
||||
class TinfoilSkiplistByMcAdapter:
|
||||
def __init__(self, tinfoil):
|
||||
self.tinfoil = tinfoil
|
||||
|
||||
def __getitem__(self, mc):
|
||||
return self.tinfoil.get_skipped_recipes(mc)
|
||||
|
||||
def __init__(self, tinfoil):
|
||||
self.tinfoil = tinfoil
|
||||
self.multiconfigs = [''] + (tinfoil.config_data.getVar('BBMULTICONFIG') or '').split()
|
||||
self.collections = {}
|
||||
self.recipecaches = {}
|
||||
self.skiplist_by_mc = self.TinfoilSkiplistByMcAdapter(tinfoil)
|
||||
for mc in self.multiconfigs:
|
||||
self.collections[mc] = self.TinfoilCookerCollectionAdapter(tinfoil, mc)
|
||||
self.recipecaches[mc] = self.TinfoilRecipeCacheAdapter(tinfoil, mc)
|
||||
@@ -210,6 +201,8 @@ class TinfoilCookerAdapter:
|
||||
# Grab these only when they are requested since they aren't always used
|
||||
if name in self._cache:
|
||||
return self._cache[name]
|
||||
elif name == 'skiplist':
|
||||
attrvalue = self.tinfoil.get_skipped_recipes()
|
||||
elif name == 'bbfile_config_priorities':
|
||||
ret = self.tinfoil.run_command('getLayerPriorities')
|
||||
bbfile_config_priorities = []
|
||||
@@ -521,12 +514,12 @@ class Tinfoil:
|
||||
"""
|
||||
return defaultdict(list, self.run_command('getOverlayedRecipes', mc))
|
||||
|
||||
def get_skipped_recipes(self, mc=''):
|
||||
def get_skipped_recipes(self):
|
||||
"""
|
||||
Find recipes which were skipped (i.e. SkipRecipe was raised
|
||||
during parsing).
|
||||
"""
|
||||
return OrderedDict(self.run_command('getSkippedRecipes', mc))
|
||||
return OrderedDict(self.run_command('getSkippedRecipes'))
|
||||
|
||||
def get_all_providers(self, mc=''):
|
||||
return defaultdict(list, self.run_command('allProviders', mc))
|
||||
@@ -540,7 +533,6 @@ class Tinfoil:
|
||||
def get_runtime_providers(self, rdep):
|
||||
return self.run_command('getRuntimeProviders', rdep)
|
||||
|
||||
# TODO: teach this method about mc
|
||||
def get_recipe_file(self, pn):
|
||||
"""
|
||||
Get the file name for the specified recipe/target. Raises
|
||||
@@ -549,7 +541,6 @@ class Tinfoil:
|
||||
"""
|
||||
best = self.find_best_provider(pn)
|
||||
if not best or (len(best) > 3 and not best[3]):
|
||||
# TODO: pass down mc
|
||||
skiplist = self.get_skipped_recipes()
|
||||
taskdata = bb.taskdata.TaskData(None, skiplist=skiplist)
|
||||
skipreasons = taskdata.get_reasons(pn)
|
||||
@@ -642,29 +633,6 @@ class Tinfoil:
|
||||
fn = self.get_recipe_file(pn)
|
||||
return self.parse_recipe_file(fn)
|
||||
|
||||
@contextmanager
|
||||
def _data_tracked_if_enabled(self):
|
||||
"""
|
||||
A context manager to enable data tracking for a code segment if data
|
||||
tracking was enabled for this tinfoil instance.
|
||||
"""
|
||||
if self.tracking:
|
||||
# Enable history tracking just for the operation
|
||||
self.run_command('enableDataTracking')
|
||||
|
||||
# Here goes the operation with the optional data tracking
|
||||
yield
|
||||
|
||||
if self.tracking:
|
||||
self.run_command('disableDataTracking')
|
||||
|
||||
def finalizeData(self):
|
||||
"""
|
||||
Run anonymous functions and expand keys
|
||||
"""
|
||||
with self._data_tracked_if_enabled():
|
||||
return self._reconvert_type(self.run_command('finalizeData'), 'DataStoreConnectionHandle')
|
||||
|
||||
def parse_recipe_file(self, fn, appends=True, appendlist=None, config_data=None):
|
||||
"""
|
||||
Parse the specified recipe file (with or without bbappends)
|
||||
@@ -677,7 +645,10 @@ class Tinfoil:
|
||||
appendlist: optional list of bbappend files to apply, if you
|
||||
want to filter them
|
||||
"""
|
||||
with self._data_tracked_if_enabled():
|
||||
if self.tracking:
|
||||
# Enable history tracking just for the parse operation
|
||||
self.run_command('enableDataTracking')
|
||||
try:
|
||||
if appends and appendlist == []:
|
||||
appends = False
|
||||
if config_data:
|
||||
@@ -689,6 +660,9 @@ class Tinfoil:
|
||||
return self._reconvert_type(dscon, 'DataStoreConnectionHandle')
|
||||
else:
|
||||
return None
|
||||
finally:
|
||||
if self.tracking:
|
||||
self.run_command('disableDataTracking')
|
||||
|
||||
def build_file(self, buildfile, task, internal=True):
|
||||
"""
|
||||
|
||||
@@ -559,10 +559,7 @@ class ORMWrapper(object):
|
||||
# we might have an invalid link; no way to detect this. just set it to None
|
||||
filetarget_obj = None
|
||||
|
||||
try:
|
||||
parent_obj = Target_File.objects.get(target = target_obj, path = parent_path, inodetype = Target_File.ITYPE_DIRECTORY)
|
||||
except Target_File.DoesNotExist:
|
||||
parent_obj = None
|
||||
parent_obj = Target_File.objects.get(target = target_obj, path = parent_path, inodetype = Target_File.ITYPE_DIRECTORY)
|
||||
|
||||
Target_File.objects.create(
|
||||
target = target_obj,
|
||||
|
||||
@@ -24,12 +24,6 @@ import atexit
|
||||
from itertools import groupby
|
||||
|
||||
from bb.ui import uihelper
|
||||
import bb.build
|
||||
import bb.command
|
||||
import bb.cooker
|
||||
import bb.event
|
||||
import bb.runqueue
|
||||
import bb.utils
|
||||
|
||||
featureSet = [bb.cooker.CookerFeatures.SEND_SANITYEVENTS, bb.cooker.CookerFeatures.BASEDATASTORE_TRACKING]
|
||||
|
||||
@@ -109,7 +103,7 @@ def new_progress(msg, maxval):
|
||||
return NonInteractiveProgress(msg, maxval)
|
||||
|
||||
def pluralise(singular, plural, qty):
|
||||
if qty == 1:
|
||||
if(qty == 1):
|
||||
return singular % qty
|
||||
else:
|
||||
return plural % qty
|
||||
@@ -118,7 +112,6 @@ def pluralise(singular, plural, qty):
|
||||
class InteractConsoleLogFilter(logging.Filter):
|
||||
def __init__(self, tf):
|
||||
self.tf = tf
|
||||
super().__init__()
|
||||
|
||||
def filter(self, record):
|
||||
if record.levelno == bb.msg.BBLogFormatter.NOTE and (record.msg.startswith("Running") or record.msg.startswith("recipe ")):
|
||||
@@ -562,23 +555,13 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
}
|
||||
})
|
||||
|
||||
consolelogdirname = os.path.dirname(consolelogfile)
|
||||
# `bb.utils.mkdirhier` has this check, but it reports failure using bb.fatal, which logs
|
||||
# to the very logger we are trying to set up.
|
||||
if '${' in str(consolelogdirname):
|
||||
print(
|
||||
"FATAL: Directory name {} contains unexpanded bitbake variable. This may cause build failures and WORKDIR pollution.".format(
|
||||
consolelogdirname))
|
||||
if '${MACHINE}' in consolelogdirname:
|
||||
print("HINT: It looks like you forgot to set MACHINE in local.conf.")
|
||||
|
||||
bb.utils.mkdirhier(consolelogdirname)
|
||||
loglink = os.path.join(consolelogdirname, 'console-latest.log')
|
||||
bb.utils.mkdirhier(os.path.dirname(consolelogfile))
|
||||
loglink = os.path.join(os.path.dirname(consolelogfile), 'console-latest.log')
|
||||
bb.utils.remove(loglink)
|
||||
try:
|
||||
os.symlink(os.path.basename(consolelogfile), loglink)
|
||||
os.symlink(os.path.basename(consolelogfile), loglink)
|
||||
except OSError:
|
||||
pass
|
||||
pass
|
||||
|
||||
# Add the logging domains specified by the user on the command line
|
||||
for (domainarg, iterator) in groupby(params.debug_domains):
|
||||
@@ -594,8 +577,6 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
else:
|
||||
log_exec_tty = False
|
||||
|
||||
should_print_hyperlinks = sys.stdout.isatty() and os.environ.get('NO_COLOR', '') == ''
|
||||
|
||||
helper = uihelper.BBUIHelper()
|
||||
|
||||
# Look for the specially designated handlers which need to be passed to the
|
||||
@@ -659,7 +640,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
return_value = 0
|
||||
errors = 0
|
||||
warnings = 0
|
||||
taskfailures = {}
|
||||
taskfailures = []
|
||||
|
||||
printintervaldelta = 10 * 60 # 10 minutes
|
||||
printinterval = printintervaldelta
|
||||
@@ -745,8 +726,6 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
if isinstance(event, bb.build.TaskFailed):
|
||||
return_value = 1
|
||||
print_event_log(event, includelogs, loglines, termfilter)
|
||||
k = "{}:{}".format(event._fn, event._task)
|
||||
taskfailures[k] = event.logfile
|
||||
if isinstance(event, bb.build.TaskBase):
|
||||
logger.info(event._message)
|
||||
continue
|
||||
@@ -842,7 +821,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
|
||||
if isinstance(event, bb.runqueue.runQueueTaskFailed):
|
||||
return_value = 1
|
||||
taskfailures.setdefault(event.taskstring)
|
||||
taskfailures.append(event.taskstring)
|
||||
logger.error(str(event))
|
||||
continue
|
||||
|
||||
@@ -963,21 +942,11 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
try:
|
||||
termfilter.clearFooter()
|
||||
summary = ""
|
||||
def format_hyperlink(url, link_text):
|
||||
if should_print_hyperlinks:
|
||||
start = f'\033]8;;{url}\033\\'
|
||||
end = '\033]8;;\033\\'
|
||||
return f'{start}{link_text}{end}'
|
||||
return link_text
|
||||
|
||||
if taskfailures:
|
||||
summary += pluralise("\nSummary: %s task failed:",
|
||||
"\nSummary: %s tasks failed:", len(taskfailures))
|
||||
for (failure, log_file) in taskfailures.items():
|
||||
for failure in taskfailures:
|
||||
summary += "\n %s" % failure
|
||||
if log_file:
|
||||
hyperlink = format_hyperlink(f"file://{log_file}", log_file)
|
||||
summary += "\n log: {}".format(hyperlink)
|
||||
if warnings:
|
||||
summary += pluralise("\nSummary: There was %s WARNING message.",
|
||||
"\nSummary: There were %s WARNING messages.", warnings)
|
||||
|
||||
@@ -30,6 +30,7 @@ import bb.build
|
||||
import bb.command
|
||||
import bb.cooker
|
||||
import bb.event
|
||||
import bb.exceptions
|
||||
import bb.runqueue
|
||||
from bb.ui import uihelper
|
||||
|
||||
@@ -101,6 +102,10 @@ class TeamcityLogFormatter(logging.Formatter):
|
||||
details = ""
|
||||
if hasattr(record, 'bb_exc_formatted'):
|
||||
details = ''.join(record.bb_exc_formatted)
|
||||
elif hasattr(record, 'bb_exc_info'):
|
||||
etype, value, tb = record.bb_exc_info
|
||||
formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
|
||||
details = ''.join(formatted)
|
||||
|
||||
if record.levelno in [bb.msg.BBLogFormatter.ERROR, bb.msg.BBLogFormatter.CRITICAL]:
|
||||
# ERROR gets a separate errorDetails field
|
||||
|
||||
@@ -31,7 +31,7 @@ class BBUIHelper:
|
||||
|
||||
if isinstance(event, bb.build.TaskStarted):
|
||||
tid = event._fn + ":" + event._task
|
||||
if event._mc != "":
|
||||
if event._mc != "default":
|
||||
self.running_tasks[tid] = { 'title' : "mc:%s:%s %s" % (event._mc, event._package, event._task), 'starttime' : time.time(), 'pid' : event.pid }
|
||||
else:
|
||||
self.running_tasks[tid] = { 'title' : "%s %s" % (event._package, event._task), 'starttime' : time.time(), 'pid' : event.pid }
|
||||
|
||||
@@ -11,8 +11,11 @@ import re, fcntl, os, string, stat, shutil, time
|
||||
import sys
|
||||
import errno
|
||||
import logging
|
||||
import bb
|
||||
import bb.msg
|
||||
import locale
|
||||
import multiprocessing
|
||||
import fcntl
|
||||
import importlib
|
||||
import importlib.machinery
|
||||
import importlib.util
|
||||
@@ -21,6 +24,7 @@ import subprocess
|
||||
import glob
|
||||
import fnmatch
|
||||
import traceback
|
||||
import errno
|
||||
import signal
|
||||
import collections
|
||||
import copy
|
||||
@@ -32,8 +36,6 @@ import tempfile
|
||||
from subprocess import getstatusoutput
|
||||
from contextlib import contextmanager
|
||||
from ctypes import cdll
|
||||
import bb
|
||||
import bb.msg
|
||||
|
||||
logger = logging.getLogger("BitBake.Util")
|
||||
python_extensions = importlib.machinery.all_suffixes()
|
||||
@@ -444,7 +446,6 @@ def fileslocked(files, *args, **kwargs):
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
locks.reverse()
|
||||
for lock in locks:
|
||||
bb.utils.unlockfile(lock)
|
||||
|
||||
@@ -583,31 +584,6 @@ def sha512_file(filename):
|
||||
import hashlib
|
||||
return _hasher(hashlib.sha512(), filename)
|
||||
|
||||
def goh1_file(filename):
|
||||
"""
|
||||
Return the hex string representation of the Go mod h1 checksum of the
|
||||
filename. The Go mod h1 checksum uses the Go dirhash package. The package
|
||||
defines hashes over directory trees and is used by go mod for mod files and
|
||||
zip archives.
|
||||
"""
|
||||
import hashlib
|
||||
import zipfile
|
||||
|
||||
lines = []
|
||||
if zipfile.is_zipfile(filename):
|
||||
with zipfile.ZipFile(filename) as archive:
|
||||
for fn in sorted(archive.namelist()):
|
||||
method = hashlib.sha256()
|
||||
method.update(archive.read(fn))
|
||||
hash = method.hexdigest()
|
||||
lines.append("%s %s\n" % (hash, fn))
|
||||
else:
|
||||
hash = _hasher(hashlib.sha256(), filename)
|
||||
lines.append("%s go.mod\n" % hash)
|
||||
method = hashlib.sha256()
|
||||
method.update("".join(lines).encode('utf-8'))
|
||||
return method.hexdigest()
|
||||
|
||||
def preserved_envvars_exported():
|
||||
"""Variables which are taken from the environment and placed in and exported
|
||||
from the metadata"""
|
||||
@@ -1455,6 +1431,8 @@ def edit_bblayers_conf(bblayers_conf, add, remove, edit_cb=None):
|
||||
but weren't (because they weren't in the list)
|
||||
"""
|
||||
|
||||
import fnmatch
|
||||
|
||||
def remove_trailing_sep(pth):
|
||||
if pth and pth[-1] == os.sep:
|
||||
pth = pth[:-1]
|
||||
@@ -1645,7 +1623,7 @@ def ioprio_set(who, cls, value):
|
||||
bb.warn("Unable to set IO Prio for arch %s" % _unamearch)
|
||||
|
||||
def set_process_name(name):
|
||||
from ctypes import byref, create_string_buffer
|
||||
from ctypes import cdll, byref, create_string_buffer
|
||||
# This is nice to have for debugging, not essential
|
||||
try:
|
||||
libc = cdll.LoadLibrary('libc.so.6')
|
||||
@@ -1879,30 +1857,12 @@ def path_is_descendant(descendant, ancestor):
|
||||
# If we don't have a timeout of some kind and a process/thread exits badly (for example
|
||||
# OOM killed) and held a lock, we'd just hang in the lock futex forever. It is better
|
||||
# we exit at some point than hang. 5 minutes with no progress means we're probably deadlocked.
|
||||
# This function can still deadlock python since it can't signal the other threads to exit
|
||||
# (signals are handled in the main thread) and even os._exit() will wait on non-daemon threads
|
||||
# to exit.
|
||||
@contextmanager
|
||||
def lock_timeout(lock):
|
||||
held = lock.acquire(timeout=5*60)
|
||||
try:
|
||||
s = signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
|
||||
held = lock.acquire(timeout=5*60)
|
||||
if not held:
|
||||
bb.server.process.serverlog("Couldn't get the lock for 5 mins, timed out, exiting.\n%s" % traceback.format_stack())
|
||||
os._exit(1)
|
||||
yield held
|
||||
finally:
|
||||
lock.release()
|
||||
signal.pthread_sigmask(signal.SIG_SETMASK, s)
|
||||
|
||||
# A version of lock_timeout without the check that the lock was locked and a shorter timeout
|
||||
@contextmanager
|
||||
def lock_timeout_nocheck(lock):
|
||||
try:
|
||||
s = signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
|
||||
l = lock.acquire(timeout=10)
|
||||
yield l
|
||||
finally:
|
||||
if l:
|
||||
lock.release()
|
||||
signal.pthread_sigmask(signal.SIG_SETMASK, s)
|
||||
|
||||
@@ -50,8 +50,8 @@ class ActionPlugin(LayerPlugin):
|
||||
|
||||
try:
|
||||
notadded, _ = bb.utils.edit_bblayers_conf(bblayers_conf, layerdirs, None)
|
||||
self.tinfoil.modified_files()
|
||||
if not (args.force or notadded):
|
||||
self.tinfoil.modified_files()
|
||||
try:
|
||||
self.tinfoil.run_command('parseConfiguration')
|
||||
except (bb.tinfoil.TinfoilUIException, bb.BBHandledException):
|
||||
@@ -83,8 +83,6 @@ class ActionPlugin(LayerPlugin):
|
||||
layerdir = os.path.abspath(item)
|
||||
layerdirs.append(layerdir)
|
||||
(_, notremoved) = bb.utils.edit_bblayers_conf(bblayers_conf, None, layerdirs)
|
||||
if args.force > 1:
|
||||
return 0
|
||||
self.tinfoil.modified_files()
|
||||
if notremoved:
|
||||
for item in notremoved:
|
||||
|
||||
@@ -142,10 +142,10 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
|
||||
# Ensure we list skipped recipes
|
||||
# We are largely guessing about PN, PV and the preferred version here,
|
||||
# but we have no choice since skipped recipes are not fully parsed
|
||||
skiplist = list(self.tinfoil.cooker.skiplist_by_mc[mc].keys())
|
||||
|
||||
skiplist = list(self.tinfoil.cooker.skiplist.keys())
|
||||
mcspec = 'mc:%s:' % mc
|
||||
if mc:
|
||||
skiplist = [s.removeprefix(f'mc:{mc}:') for s in skiplist]
|
||||
skiplist = [s[len(mcspec):] for s in skiplist if s.startswith(mcspec)]
|
||||
|
||||
for fn in skiplist:
|
||||
recipe_parts = os.path.splitext(os.path.basename(fn))[0].split('_')
|
||||
@@ -162,7 +162,7 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
|
||||
def print_item(f, pn, ver, layer, ispref):
|
||||
if not selected_layer or layer == selected_layer:
|
||||
if not bare and f in skiplist:
|
||||
skipped = ' (skipped: %s)' % self.tinfoil.cooker.skiplist_by_mc[mc][f].skipreason
|
||||
skipped = ' (skipped: %s)' % self.tinfoil.cooker.skiplist[f].skipreason
|
||||
else:
|
||||
skipped = ''
|
||||
if show_filenames:
|
||||
@@ -301,7 +301,7 @@ Lists recipes with the bbappends that apply to them as subitems.
|
||||
if self.show_appends_for_pn(pn, cooker_data, args.mc):
|
||||
appends = True
|
||||
|
||||
if not args.pnspec and self.show_appends_for_skipped(args.mc):
|
||||
if not args.pnspec and self.show_appends_for_skipped():
|
||||
appends = True
|
||||
|
||||
if not appends:
|
||||
@@ -317,9 +317,9 @@ Lists recipes with the bbappends that apply to them as subitems.
|
||||
|
||||
return self.show_appends_output(filenames, best_filename)
|
||||
|
||||
def show_appends_for_skipped(self, mc):
|
||||
def show_appends_for_skipped(self):
|
||||
filenames = [os.path.basename(f)
|
||||
for f in self.tinfoil.cooker.skiplist_by_mc[mc].keys()]
|
||||
for f in self.tinfoil.cooker.skiplist.keys()]
|
||||
return self.show_appends_output(filenames, None, " (skipped)")
|
||||
|
||||
def show_appends_output(self, filenames, best_filename, name_suffix = ''):
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
Behold, mortal, the origins of Beautiful Soup...
|
||||
================================================
|
||||
|
||||
Leonard Richardson is the primary maintainer.
|
||||
|
||||
Aaron DeVore and Isaac Muse have made significant contributions to the
|
||||
code base.
|
||||
|
||||
Mark Pilgrim provided the encoding detection code that forms the base
|
||||
of UnicodeDammit.
|
||||
|
||||
Thomas Kluyver and Ezio Melotti finished the work of getting Beautiful
|
||||
Soup 4 working under Python 3.
|
||||
|
||||
Simon Willison wrote soupselect, which was used to make Beautiful Soup
|
||||
support CSS selectors. Isaac Muse wrote SoupSieve, which made it
|
||||
possible to _remove_ the CSS selector code from Beautiful Soup.
|
||||
|
||||
Sam Ruby helped with a lot of edge cases.
|
||||
|
||||
Jonathan Ellis was awarded the prestigious Beau Potage D'Or for his
|
||||
work in solving the nestable tags conundrum.
|
||||
|
||||
An incomplete list of people have contributed patches to Beautiful
|
||||
Soup:
|
||||
|
||||
Istvan Albert, Andrew Lin, Anthony Baxter, Oliver Beattie, Andrew
|
||||
Boyko, Tony Chang, Francisco Canas, "Delong", Zephyr Fang, Fuzzy,
|
||||
Roman Gaufman, Yoni Gilad, Richie Hindle, Toshihiro Kamiya, Peteris
|
||||
Krumins, Kent Johnson, Marek Kapolka, Andreas Kostyrka, Roel Kramer,
|
||||
Ben Last, Robert Leftwich, Stefaan Lippens, "liquider", Staffan
|
||||
Malmgren, Ksenia Marasanova, JP Moins, Adam Monsen, John Nagle, "Jon",
|
||||
Ed Oskiewicz, Martijn Peters, Greg Phillips, Giles Radford, Stefano
|
||||
Revera, Arthur Rudolph, Marko Samastur, James Salter, Jouni Sepp<70>nen,
|
||||
Alexander Schmolck, Tim Shirley, Geoffrey Sneddon, Ville Skytt<74>,
|
||||
"Vikas", Jens Svalgaard, Andy Theyers, Eric Weiser, Glyn Webster, John
|
||||
Wiseman, Paul Wright, Danny Yoo
|
||||
|
||||
An incomplete list of people who made suggestions or found bugs or
|
||||
found ways to break Beautiful Soup:
|
||||
|
||||
Hanno B<>ck, Matteo Bertini, Chris Curvey, Simon Cusack, Bruce Eckel,
|
||||
Matt Ernst, Michael Foord, Tom Harris, Bill de hOra, Donald Howes,
|
||||
Matt Patterson, Scott Roberts, Steve Strassmann, Mike Williams,
|
||||
warchild at redho dot com, Sami Kuisma, Carlos Rocha, Bob Hutchison,
|
||||
Joren Mc, Michal Migurski, John Kleven, Tim Heaney, Tripp Lilley, Ed
|
||||
Summers, Dennis Sutch, Chris Smith, Aaron Swartz, Stuart
|
||||
Turner, Greg Edwards, Kevin J Kalupson, Nikos Kouremenos, Artur de
|
||||
Sousa Rocha, Yichun Wei, Per Vognsen
|
||||
43
bitbake/lib/bs4/AUTHORS.txt
Normal file
43
bitbake/lib/bs4/AUTHORS.txt
Normal file
@@ -0,0 +1,43 @@
|
||||
Behold, mortal, the origins of Beautiful Soup...
|
||||
================================================
|
||||
|
||||
Leonard Richardson is the primary programmer.
|
||||
|
||||
Aaron DeVore is awesome.
|
||||
|
||||
Mark Pilgrim provided the encoding detection code that forms the base
|
||||
of UnicodeDammit.
|
||||
|
||||
Thomas Kluyver and Ezio Melotti finished the work of getting Beautiful
|
||||
Soup 4 working under Python 3.
|
||||
|
||||
Simon Willison wrote soupselect, which was used to make Beautiful Soup
|
||||
support CSS selectors.
|
||||
|
||||
Sam Ruby helped with a lot of edge cases.
|
||||
|
||||
Jonathan Ellis was awarded the prestigous Beau Potage D'Or for his
|
||||
work in solving the nestable tags conundrum.
|
||||
|
||||
An incomplete list of people have contributed patches to Beautiful
|
||||
Soup:
|
||||
|
||||
Istvan Albert, Andrew Lin, Anthony Baxter, Andrew Boyko, Tony Chang,
|
||||
Zephyr Fang, Fuzzy, Roman Gaufman, Yoni Gilad, Richie Hindle, Peteris
|
||||
Krumins, Kent Johnson, Ben Last, Robert Leftwich, Staffan Malmgren,
|
||||
Ksenia Marasanova, JP Moins, Adam Monsen, John Nagle, "Jon", Ed
|
||||
Oskiewicz, Greg Phillips, Giles Radford, Arthur Rudolph, Marko
|
||||
Samastur, Jouni Sepp<70>nen, Alexander Schmolck, Andy Theyers, Glyn
|
||||
Webster, Paul Wright, Danny Yoo
|
||||
|
||||
An incomplete list of people who made suggestions or found bugs or
|
||||
found ways to break Beautiful Soup:
|
||||
|
||||
Hanno B<>ck, Matteo Bertini, Chris Curvey, Simon Cusack, Bruce Eckel,
|
||||
Matt Ernst, Michael Foord, Tom Harris, Bill de hOra, Donald Howes,
|
||||
Matt Patterson, Scott Roberts, Steve Strassmann, Mike Williams,
|
||||
warchild at redho dot com, Sami Kuisma, Carlos Rocha, Bob Hutchison,
|
||||
Joren Mc, Michal Migurski, John Kleven, Tim Heaney, Tripp Lilley, Ed
|
||||
Summers, Dennis Sutch, Chris Smith, Aaron Sweep^W Swartz, Stuart
|
||||
Turner, Greg Edwards, Kevin J Kalupson, Nikos Kouremenos, Artur de
|
||||
Sousa Rocha, Yichun Wei, Per Vognsen
|
||||
@@ -1,6 +1,6 @@
|
||||
Beautiful Soup is made available under the MIT license:
|
||||
|
||||
Copyright (c) Leonard Richardson
|
||||
Copyright (c) 2004-2012 Leonard Richardson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
@@ -20,12 +20,7 @@ Beautiful Soup is made available under the MIT license:
|
||||
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
SOFTWARE, DAMMIT.
|
||||
|
||||
Beautiful Soup incorporates code from the html5lib library, which is
|
||||
also made available under the MIT license. Copyright (c) James Graham
|
||||
and other contributors
|
||||
|
||||
Beautiful Soup has an optional dependency on the soupsieve library,
|
||||
which is also made available under the MIT license. Copyright (c)
|
||||
Isaac Muse
|
||||
also made available under the MIT license.
|
||||
@@ -1,776 +1,3 @@
|
||||
= 4.12.3 (20240117)
|
||||
|
||||
* The Beautiful Soup documentation now has a Spanish translation, thanks
|
||||
to Carlos Romero. Delong Wang's Chinese translation has been updated
|
||||
to cover Beautiful Soup 4.12.0.
|
||||
|
||||
* Fixed a regression such that if you set .hidden on a tag, the tag
|
||||
becomes invisible but its contents are still visible. User manipulation
|
||||
of .hidden is not a documented or supported feature, so don't do this,
|
||||
but it wasn't too difficult to keep the old behavior working.
|
||||
|
||||
* Fixed a case found by Mengyuhan where html.parser giving up on
|
||||
markup would result in an AssertionError instead of a
|
||||
ParserRejectedMarkup exception.
|
||||
|
||||
* Added the correct stacklevel to instances of the XMLParsedAsHTMLWarning.
|
||||
[bug=2034451]
|
||||
|
||||
* Corrected the syntax of the license definition in pyproject.toml. Patch
|
||||
by Louis Maddox. [bug=2032848]
|
||||
|
||||
* Corrected a typo in a test that was causing test failures when run against
|
||||
libxml2 2.12.1. [bug=2045481]
|
||||
|
||||
= 4.12.2 (20230407)
|
||||
|
||||
* Fixed an unhandled exception in BeautifulSoup.decode_contents
|
||||
and methods that call it. [bug=2015545]
|
||||
|
||||
= 4.12.1 (20230405)
|
||||
|
||||
NOTE: the following things are likely to be dropped in the next
|
||||
feature release of Beautiful Soup:
|
||||
|
||||
Official support for Python 3.6.
|
||||
Inclusion of unit tests and test data in the wheel file.
|
||||
Two scripts: demonstrate_parser_differences.py and test-all-versions.
|
||||
|
||||
Changes:
|
||||
|
||||
* This version of Beautiful Soup replaces setup.py and setup.cfg
|
||||
with pyproject.toml. Beautiful Soup now uses tox as its test backend
|
||||
and hatch to do builds.
|
||||
|
||||
* The main functional improvement in this version is a nonrecursive technique
|
||||
for regenerating a tree. This technique is used to avoid situations where,
|
||||
in previous versions, doing something to a very deeply nested tree
|
||||
would overflow the Python interpreter stack:
|
||||
|
||||
1. Outputting a tree as a string, e.g. with
|
||||
BeautifulSoup.encode() [bug=1471755]
|
||||
|
||||
2. Making copies of trees (copy.copy() and
|
||||
copy.deepcopy() from the Python standard library). [bug=1709837]
|
||||
|
||||
3. Pickling a BeautifulSoup object. (Note that pickling a Tag
|
||||
object can still cause an overflow.)
|
||||
|
||||
* Making a copy of a BeautifulSoup object no longer parses the
|
||||
document again, which should improve performance significantly.
|
||||
|
||||
* When a BeautifulSoup object is unpickled, Beautiful Soup now
|
||||
tries to associate an appropriate TreeBuilder object with it.
|
||||
|
||||
* Tag.prettify() will now consistently end prettified markup with
|
||||
a newline.
|
||||
|
||||
* Added unit tests for fuzz test cases created by third
|
||||
parties. Some of these tests are skipped since they point
|
||||
to problems outside of Beautiful Soup, but this change
|
||||
puts them all in one convenient place.
|
||||
|
||||
* PageElement now implements the known_xml attribute. (This was technically
|
||||
a bug, but it shouldn't be an issue in normal use.) [bug=2007895]
|
||||
|
||||
* The demonstrate_parser_differences.py script was still written in
|
||||
Python 2. I've converted it to Python 3, but since no one has
|
||||
mentioned this over the years, it's a sign that no one uses this
|
||||
script and it's not serving its purpose.
|
||||
|
||||
= 4.12.0 (20230320)
|
||||
|
||||
* Introduced the .css property, which centralizes all access to
|
||||
the Soup Sieve API. This allows Beautiful Soup to give direct
|
||||
access to as much of Soup Sieve that makes sense, without cluttering
|
||||
the BeautifulSoup and Tag classes with a lot of new methods.
|
||||
|
||||
This does mean one addition to the BeautifulSoup and Tag classes
|
||||
(the .css property itself), so this might be a breaking change if you
|
||||
happen to use Beautiful Soup to parse XML that includes a tag called
|
||||
<css>. In particular, code like this will stop working in 4.12.0:
|
||||
|
||||
soup.css['id']
|
||||
|
||||
Code like this will work just as before:
|
||||
|
||||
soup.find_one('css')['id']
|
||||
|
||||
The Soup Sieve methods supported through the .css property are
|
||||
select(), select_one(), iselect(), closest(), match(), filter(),
|
||||
escape(), and compile(). The BeautifulSoup and Tag classes still
|
||||
support the select() and select_one() methods; they have not been
|
||||
deprecated, but they have been demoted to convenience methods.
|
||||
|
||||
[bug=2003677]
|
||||
|
||||
* When the html.parser parser decides it can't parse a document, Beautiful
|
||||
Soup now consistently propagates this fact by raising a
|
||||
ParserRejectedMarkup error. [bug=2007343]
|
||||
|
||||
* Removed some error checking code from diagnose(), which is redundant with
|
||||
similar (but more Pythonic) code in the BeautifulSoup constructor.
|
||||
[bug=2007344]
|
||||
|
||||
* Added intersphinx references to the documentation so that other
|
||||
projects have a target to point to when they reference Beautiful
|
||||
Soup classes. [bug=1453370]
|
||||
|
||||
= 4.11.2 (20230131)
|
||||
|
||||
* Fixed test failures caused by nondeterministic behavior of
|
||||
UnicodeDammit's character detection, depending on the platform setup.
|
||||
[bug=1973072]
|
||||
|
||||
* Fixed another crash when overriding multi_valued_attributes and using the
|
||||
html5lib parser. [bug=1948488]
|
||||
|
||||
* The HTMLFormatter and XMLFormatter constructors no longer return a
|
||||
value. [bug=1992693]
|
||||
|
||||
* Tag.interesting_string_types is now propagated when a tag is
|
||||
copied. [bug=1990400]
|
||||
|
||||
* Warnings now do their best to provide an appropriate stacklevel,
|
||||
improving the usefulness of the message. [bug=1978744]
|
||||
|
||||
* Passing a Tag's .contents into PageElement.extend() now works the
|
||||
same way as passing the Tag itself.
|
||||
|
||||
* Soup Sieve tests will be skipped if the library is not installed.
|
||||
|
||||
= 4.11.1 (20220408)
|
||||
|
||||
This release was done to ensure that the unit tests are packaged along
|
||||
with the released source. There are no functionality changes in this
|
||||
release, but there are a few other packaging changes:
|
||||
|
||||
* The Japanese and Korean translations of the documentation are included.
|
||||
* The changelog is now packaged as CHANGELOG, and the license file is
|
||||
packaged as LICENSE. NEWS.txt and COPYING.txt are still present,
|
||||
but may be removed in the future.
|
||||
* TODO.txt is no longer packaged, since a TODO is not relevant for released
|
||||
code.
|
||||
|
||||
= 4.11.0 (20220407)
|
||||
|
||||
* Ported unit tests to use pytest.
|
||||
|
||||
* Added special string classes, RubyParenthesisString and RubyTextString,
|
||||
to make it possible to treat ruby text specially in get_text() calls.
|
||||
[bug=1941980]
|
||||
|
||||
* It's now possible to customize the way output is indented by
|
||||
providing a value for the 'indent' argument to the Formatter
|
||||
constructor. The 'indent' argument works very similarly to the
|
||||
argument of the same name in the Python standard library's
|
||||
json.dump() function. [bug=1955497]
|
||||
|
||||
* If the charset-normalizer Python module
|
||||
(https://pypi.org/project/charset-normalizer/) is installed, Beautiful
|
||||
Soup will use it to detect the character sets of incoming documents.
|
||||
This is also the module used by newer versions of the Requests library.
|
||||
For the sake of backwards compatibility, chardet and cchardet both take
|
||||
precedence if installed. [bug=1955346]
|
||||
|
||||
* Added a workaround for an lxml bug
|
||||
(https://bugs.launchpad.net/lxml/+bug/1948551) that causes
|
||||
problems when parsing a Unicode string beginning with BYTE ORDER MARK.
|
||||
[bug=1947768]
|
||||
|
||||
* Issue a warning when an HTML parser is used to parse a document that
|
||||
looks like XML but not XHTML. [bug=1939121]
|
||||
|
||||
* Do a better job of keeping track of namespaces as an XML document is
|
||||
parsed, so that CSS selectors that use namespaces will do the right
|
||||
thing more often. [bug=1946243]
|
||||
|
||||
* Some time ago, the misleadingly named "text" argument to find-type
|
||||
methods was renamed to the more accurate "string." But this supposed
|
||||
"renaming" didn't make it into important places like the method
|
||||
signatures or the docstrings. That's corrected in this
|
||||
version. "text" still works, but will give a DeprecationWarning.
|
||||
[bug=1947038]
|
||||
|
||||
* Fixed a crash when pickling a BeautifulSoup object that has no
|
||||
tree builder. [bug=1934003]
|
||||
|
||||
* Fixed a crash when overriding multi_valued_attributes and using the
|
||||
html5lib parser. [bug=1948488]
|
||||
|
||||
* Standardized the wording of the MarkupResemblesLocatorWarning
|
||||
warnings to omit untrusted input and make the warnings less
|
||||
judgmental about what you ought to be doing. [bug=1955450]
|
||||
|
||||
* Removed support for the iconv_codec library, which doesn't seem
|
||||
to exist anymore and was never put up on PyPI. (The closest
|
||||
replacement on PyPI, iconv_codecs, is GPL-licensed, so we can't use
|
||||
it--it's also quite old.)
|
||||
|
||||
= 4.10.0 (20210907)
|
||||
|
||||
* This is the first release of Beautiful Soup to only support Python
|
||||
3. I dropped Python 2 support to maintain support for newer versions
|
||||
(58 and up) of setuptools. See:
|
||||
https://github.com/pypa/setuptools/issues/2769 [bug=1942919]
|
||||
|
||||
* The behavior of methods like .get_text() and .strings now differs
|
||||
depending on the type of tag. The change is visible with HTML tags
|
||||
like <script>, <style>, and <template>. Starting in 4.9.0, methods
|
||||
like get_text() returned no results on such tags, because the
|
||||
contents of those tags are not considered 'text' within the document
|
||||
as a whole.
|
||||
|
||||
But a user who calls script.get_text() is working from a different
|
||||
definition of 'text' than a user who calls div.get_text()--otherwise
|
||||
there would be no need to call script.get_text() at all. In 4.10.0,
|
||||
the contents of (e.g.) a <script> tag are considered 'text' during a
|
||||
get_text() call on the tag itself, but not considered 'text' during
|
||||
a get_text() call on the tag's parent.
|
||||
|
||||
Because of this change, calling get_text() on each child of a tag
|
||||
may now return a different result than calling get_text() on the tag
|
||||
itself. That's because different tags now have different
|
||||
understandings of what counts as 'text'. [bug=1906226] [bug=1868861]
|
||||
|
||||
* NavigableString and its subclasses now implement the get_text()
|
||||
method, as well as the properties .strings and
|
||||
.stripped_strings. These methods will either return the string
|
||||
itself, or nothing, so the only reason to use this is when iterating
|
||||
over a list of mixed Tag and NavigableString objects. [bug=1904309]
|
||||
|
||||
* The 'html5' formatter now treats attributes whose values are the
|
||||
empty string as HTML boolean attributes. Previously (and in other
|
||||
formatters), an attribute value must be set as None to be treated as
|
||||
a boolean attribute. In a future release, I plan to also give this
|
||||
behavior to the 'html' formatter. Patch by Isaac Muse. [bug=1915424]
|
||||
|
||||
* The 'replace_with()' method now takes a variable number of arguments,
|
||||
and can be used to replace a single element with a sequence of elements.
|
||||
Patch by Bill Chandos. [rev=605]
|
||||
|
||||
* Corrected output when the namespace prefix associated with a
|
||||
namespaced attribute is the empty string, as opposed to
|
||||
None. [bug=1915583]
|
||||
|
||||
* Performance improvement when processing tags that speeds up overall
|
||||
tree construction by 2%. Patch by Morotti. [bug=1899358]
|
||||
|
||||
* Corrected the use of special string container classes in cases when a
|
||||
single tag may contain strings with different containers; such as
|
||||
the <template> tag, which may contain both TemplateString objects
|
||||
and Comment objects. [bug=1913406]
|
||||
|
||||
* The html.parser tree builder can now handle named entities
|
||||
found in the HTML5 spec in much the same way that the html5lib
|
||||
tree builder does. Note that the lxml HTML tree builder doesn't handle
|
||||
named entities this way. [bug=1924908]
|
||||
|
||||
* Added a second way to pass specify encodings to UnicodeDammit and
|
||||
EncodingDetector, based on the order of precedence defined in the
|
||||
HTML5 spec, starting at:
|
||||
https://html.spec.whatwg.org/multipage/parsing.html#parsing-with-a-known-character-encoding
|
||||
|
||||
Encodings in 'known_definite_encodings' are tried first, then
|
||||
byte-order-mark sniffing is run, then encodings in 'user_encodings'
|
||||
are tried. The old argument, 'override_encodings', is now a
|
||||
deprecated alias for 'known_definite_encodings'.
|
||||
|
||||
This changes the default behavior of the html.parser and lxml tree
|
||||
builders, in a way that may slightly improve encoding
|
||||
detection but will probably have no effect. [bug=1889014]
|
||||
|
||||
* Improve the warning issued when a directory name (as opposed to
|
||||
the name of a regular file) is passed as markup into the BeautifulSoup
|
||||
constructor. [bug=1913628]
|
||||
|
||||
= 4.9.3 (20201003)
|
||||
|
||||
This is the final release of Beautiful Soup to support Python
|
||||
2. Beautiful Soup's official support for Python 2 ended on 01 January,
|
||||
2021. In the Launchpad Git repository, the final revision to support
|
||||
Python 2 was revision 70f546b1e689a70e2f103795efce6d261a3dadf7; it is
|
||||
tagged as "python2".
|
||||
|
||||
* Implemented a significant performance optimization to the process of
|
||||
searching the parse tree. Patch by Morotti. [bug=1898212]
|
||||
|
||||
= 4.9.2 (20200926)
|
||||
|
||||
* Fixed a bug that caused too many tags to be popped from the tag
|
||||
stack during tree building, when encountering a closing tag that had
|
||||
no matching opening tag. [bug=1880420]
|
||||
|
||||
* Fixed a bug that inconsistently moved elements over when passing
|
||||
a Tag, rather than a list, into Tag.extend(). [bug=1885710]
|
||||
|
||||
* Specify the soupsieve dependency in a way that complies with
|
||||
PEP 508. Patch by Mike Nerone. [bug=1893696]
|
||||
|
||||
* Change the signatures for BeautifulSoup.insert_before and insert_after
|
||||
(which are not implemented) to match PageElement.insert_before and
|
||||
insert_after, quieting warnings in some IDEs. [bug=1897120]
|
||||
|
||||
= 4.9.1 (20200517)
|
||||
|
||||
* Added a keyword argument 'on_duplicate_attribute' to the
|
||||
BeautifulSoupHTMLParser constructor (used by the html.parser tree
|
||||
builder) which lets you customize the handling of markup that
|
||||
contains the same attribute more than once, as in:
|
||||
<a href="url1" href="url2"> [bug=1878209]
|
||||
|
||||
* Added a distinct subclass, GuessedAtParserWarning, for the warning
|
||||
issued when BeautifulSoup is instantiated without a parser being
|
||||
specified. [bug=1873787]
|
||||
|
||||
* Added a distinct subclass, MarkupResemblesLocatorWarning, for the
|
||||
warning issued when BeautifulSoup is instantiated with 'markup' that
|
||||
actually seems to be a URL or the path to a file on
|
||||
disk. [bug=1873787]
|
||||
|
||||
* The new NavigableString subclasses (Stylesheet, Script, and
|
||||
TemplateString) can now be imported directly from the bs4 package.
|
||||
|
||||
* If you encode a document with a Python-specific encoding like
|
||||
'unicode_escape', that encoding is no longer mentioned in the final
|
||||
XML or HTML document. Instead, encoding information is omitted or
|
||||
left blank. [bug=1874955]
|
||||
|
||||
* Fixed test failures when run against soupselect 2.0. Patch by Tomáš
|
||||
Chvátal. [bug=1872279]
|
||||
|
||||
= 4.9.0 (20200405)
|
||||
|
||||
* Added PageElement.decomposed, a new property which lets you
|
||||
check whether you've already called decompose() on a Tag or
|
||||
NavigableString.
|
||||
|
||||
* Embedded CSS and Javascript is now stored in distinct Stylesheet and
|
||||
Script tags, which are ignored by methods like get_text() since most
|
||||
people don't consider this sort of content to be 'text'. This
|
||||
feature is not supported by the html5lib treebuilder. [bug=1868861]
|
||||
|
||||
* Added a Russian translation by 'authoress' to the repository.
|
||||
|
||||
* Fixed an unhandled exception when formatting a Tag that had been
|
||||
decomposed.[bug=1857767]
|
||||
|
||||
* Fixed a bug that happened when passing a Unicode filename containing
|
||||
non-ASCII characters as markup into Beautiful Soup, on a system that
|
||||
allows Unicode filenames. [bug=1866717]
|
||||
|
||||
* Added a performance optimization to PageElement.extract(). Patch by
|
||||
Arthur Darcet.
|
||||
|
||||
= 4.8.2 (20191224)
|
||||
|
||||
* Added Python docstrings to all public methods of the most commonly
|
||||
used classes.
|
||||
|
||||
* Added a Chinese translation by Deron Wang and a Brazilian Portuguese
|
||||
translation by Cezar Peixeiro to the repository.
|
||||
|
||||
* Fixed two deprecation warnings. Patches by Colin
|
||||
Watson and Nicholas Neumann. [bug=1847592] [bug=1855301]
|
||||
|
||||
* The html.parser tree builder now correctly handles DOCTYPEs that are
|
||||
not uppercase. [bug=1848401]
|
||||
|
||||
* PageElement.select() now returns a ResultSet rather than a regular
|
||||
list, making it consistent with methods like find_all().
|
||||
|
||||
= 4.8.1 (20191006)
|
||||
|
||||
* When the html.parser or html5lib parsers are in use, Beautiful Soup
|
||||
will, by default, record the position in the original document where
|
||||
each tag was encountered. This includes line number (Tag.sourceline)
|
||||
and position within a line (Tag.sourcepos). Based on code by Chris
|
||||
Mayo. [bug=1742921]
|
||||
|
||||
* When instantiating a BeautifulSoup object, it's now possible to
|
||||
provide a dictionary ('element_classes') of the classes you'd like to be
|
||||
instantiated instead of Tag, NavigableString, etc.
|
||||
|
||||
* Fixed the definition of the default XML namespace when using
|
||||
lxml 4.4. Patch by Isaac Muse. [bug=1840141]
|
||||
|
||||
* Fixed a crash when pretty-printing tags that were not created
|
||||
during initial parsing. [bug=1838903]
|
||||
|
||||
* Copying a Tag preserves information that was originally obtained from
|
||||
the TreeBuilder used to build the original Tag. [bug=1838903]
|
||||
|
||||
* Raise an explanatory exception when the underlying parser
|
||||
completely rejects the incoming markup. [bug=1838877]
|
||||
|
||||
* Avoid a crash when trying to detect the declared encoding of a
|
||||
Unicode document. [bug=1838877]
|
||||
|
||||
* Avoid a crash when unpickling certain parse trees generated
|
||||
using html5lib on Python 3. [bug=1843545]
|
||||
|
||||
= 4.8.0 (20190720, "One Small Soup")
|
||||
|
||||
This release focuses on making it easier to customize Beautiful Soup's
|
||||
input mechanism (the TreeBuilder) and output mechanism (the Formatter).
|
||||
|
||||
* You can customize the TreeBuilder object by passing keyword
|
||||
arguments into the BeautifulSoup constructor. Those keyword
|
||||
arguments will be passed along into the TreeBuilder constructor.
|
||||
|
||||
The main reason to do this right now is to change how which
|
||||
attributes are treated as multi-valued attributes (the way 'class'
|
||||
is treated by default). You can do this with the
|
||||
'multi_valued_attributes' argument. [bug=1832978]
|
||||
|
||||
* The role of Formatter objects has been greatly expanded. The Formatter
|
||||
class now controls the following:
|
||||
|
||||
- The function to call to perform entity substitution. (This was
|
||||
previously Formatter's only job.)
|
||||
- Which tags should be treated as containing CDATA and have their
|
||||
contents exempt from entity substitution.
|
||||
- The order in which a tag's attributes are output. [bug=1812422]
|
||||
- Whether or not to put a '/' inside a void element, e.g. '<br/>' vs '<br>'
|
||||
|
||||
All preexisting code should work as before.
|
||||
|
||||
* Added a new method to the API, Tag.smooth(), which consolidates
|
||||
multiple adjacent NavigableString elements. [bug=1697296]
|
||||
|
||||
* ' (which is valid in XML, XHTML, and HTML 5, but not HTML 4) is always
|
||||
recognized as a named entity and converted to a single quote. [bug=1818721]
|
||||
|
||||
= 4.7.1 (20190106)
|
||||
|
||||
* Fixed a significant performance problem introduced in 4.7.0. [bug=1810617]
|
||||
|
||||
* Fixed an incorrectly raised exception when inserting a tag before or
|
||||
after an identical tag. [bug=1810692]
|
||||
|
||||
* Beautiful Soup will no longer try to keep track of namespaces that
|
||||
are not defined with a prefix; this can confuse soupselect. [bug=1810680]
|
||||
|
||||
* Tried even harder to avoid the deprecation warning originally fixed in
|
||||
4.6.1. [bug=1778909]
|
||||
|
||||
= 4.7.0 (20181231)
|
||||
|
||||
* Beautiful Soup's CSS Selector implementation has been replaced by a
|
||||
dependency on Isaac Muse's SoupSieve project (the soupsieve package
|
||||
on PyPI). The good news is that SoupSieve has a much more robust and
|
||||
complete implementation of CSS selectors, resolving a large number
|
||||
of longstanding issues. The bad news is that from this point onward,
|
||||
SoupSieve must be installed if you want to use the select() method.
|
||||
|
||||
You don't have to change anything lf you installed Beautiful Soup
|
||||
through pip (SoupSieve will be automatically installed when you
|
||||
upgrade Beautiful Soup) or if you don't use CSS selectors from
|
||||
within Beautiful Soup.
|
||||
|
||||
SoupSieve documentation: https://facelessuser.github.io/soupsieve/
|
||||
|
||||
* Added the PageElement.extend() method, which works like list.append().
|
||||
[bug=1514970]
|
||||
|
||||
* PageElement.insert_before() and insert_after() now take a variable
|
||||
number of arguments. [bug=1514970]
|
||||
|
||||
* Fix a number of problems with the tree builder that caused
|
||||
trees that were superficially okay, but which fell apart when bits
|
||||
were extracted. Patch by Isaac Muse. [bug=1782928,1809910]
|
||||
|
||||
* Fixed a problem with the tree builder in which elements that
|
||||
contained no content (such as empty comments and all-whitespace
|
||||
elements) were not being treated as part of the tree. Patch by Isaac
|
||||
Muse. [bug=1798699]
|
||||
|
||||
* Fixed a problem with multi-valued attributes where the value
|
||||
contained whitespace. Thanks to Jens Svalgaard for the
|
||||
fix. [bug=1787453]
|
||||
|
||||
* Clarified ambiguous license statements in the source code. Beautiful
|
||||
Soup is released under the MIT license, and has been since 4.4.0.
|
||||
|
||||
* This file has been renamed from NEWS.txt to CHANGELOG.
|
||||
|
||||
= 4.6.3 (20180812)
|
||||
|
||||
* Exactly the same as 4.6.2. Re-released to make the README file
|
||||
render properly on PyPI.
|
||||
|
||||
= 4.6.2 (20180812)
|
||||
|
||||
* Fix an exception when a custom formatter was asked to format a void
|
||||
element. [bug=1784408]
|
||||
|
||||
= 4.6.1 (20180728)
|
||||
|
||||
* Stop data loss when encountering an empty numeric entity, and
|
||||
possibly in other cases. Thanks to tos.kamiya for the fix. [bug=1698503]
|
||||
|
||||
* Preserve XML namespaces introduced inside an XML document, not just
|
||||
the ones introduced at the top level. [bug=1718787]
|
||||
|
||||
* Added a new formatter, "html5", which represents void elements
|
||||
as "<element>" rather than "<element/>". [bug=1716272]
|
||||
|
||||
* Fixed a problem where the html.parser tree builder interpreted
|
||||
a string like "&foo " as the character entity "&foo;" [bug=1728706]
|
||||
|
||||
* Correctly handle invalid HTML numeric character entities like “
|
||||
which reference code points that are not Unicode code points. Note
|
||||
that this is only fixed when Beautiful Soup is used with the
|
||||
html.parser parser -- html5lib already worked and I couldn't fix it
|
||||
with lxml. [bug=1782933]
|
||||
|
||||
* Improved the warning given when no parser is specified. [bug=1780571]
|
||||
|
||||
* When markup contains duplicate elements, a select() call that
|
||||
includes multiple match clauses will match all relevant
|
||||
elements. [bug=1770596]
|
||||
|
||||
* Fixed code that was causing deprecation warnings in recent Python 3
|
||||
versions. Includes a patch from Ville Skyttä. [bug=1778909] [bug=1689496]
|
||||
|
||||
* Fixed a Windows crash in diagnose() when checking whether a long
|
||||
markup string is a filename. [bug=1737121]
|
||||
|
||||
* Stopped HTMLParser from raising an exception in very rare cases of
|
||||
bad markup. [bug=1708831]
|
||||
|
||||
* Fixed a bug where find_all() was not working when asked to find a
|
||||
tag with a namespaced name in an XML document that was parsed as
|
||||
HTML. [bug=1723783]
|
||||
|
||||
* You can get finer control over formatting by subclassing
|
||||
bs4.element.Formatter and passing a Formatter instance into (e.g.)
|
||||
encode(). [bug=1716272]
|
||||
|
||||
* You can pass a dictionary of `attrs` into
|
||||
BeautifulSoup.new_tag. This makes it possible to create a tag with
|
||||
an attribute like 'name' that would otherwise be masked by another
|
||||
argument of new_tag. [bug=1779276]
|
||||
|
||||
* Clarified the deprecation warning when accessing tag.fooTag, to cover
|
||||
the possibility that you might really have been looking for a tag
|
||||
called 'fooTag'.
|
||||
|
||||
= 4.6.0 (20170507) =
|
||||
|
||||
* Added the `Tag.get_attribute_list` method, which acts like `Tag.get` for
|
||||
getting the value of an attribute, but which always returns a list,
|
||||
whether or not the attribute is a multi-value attribute. [bug=1678589]
|
||||
|
||||
* It's now possible to use a tag's namespace prefix when searching,
|
||||
e.g. soup.find('namespace:tag') [bug=1655332]
|
||||
|
||||
* Improved the handling of empty-element tags like <br> when using the
|
||||
html.parser parser. [bug=1676935]
|
||||
|
||||
* HTML parsers treat all HTML4 and HTML5 empty element tags (aka void
|
||||
element tags) correctly. [bug=1656909]
|
||||
|
||||
* Namespace prefix is preserved when an XML tag is copied. Thanks
|
||||
to Vikas for a patch and test. [bug=1685172]
|
||||
|
||||
= 4.5.3 (20170102) =
|
||||
|
||||
* Fixed foster parenting when html5lib is the tree builder. Thanks to
|
||||
Geoffrey Sneddon for a patch and test.
|
||||
|
||||
* Fixed yet another problem that caused the html5lib tree builder to
|
||||
create a disconnected parse tree. [bug=1629825]
|
||||
|
||||
= 4.5.2 (20170102) =
|
||||
|
||||
* Apart from the version number, this release is identical to
|
||||
4.5.3. Due to user error, it could not be completely uploaded to
|
||||
PyPI. Use 4.5.3 instead.
|
||||
|
||||
= 4.5.1 (20160802) =
|
||||
|
||||
* Fixed a crash when passing Unicode markup that contained a
|
||||
processing instruction into the lxml HTML parser on Python
|
||||
3. [bug=1608048]
|
||||
|
||||
= 4.5.0 (20160719) =
|
||||
|
||||
* Beautiful Soup is no longer compatible with Python 2.6. This
|
||||
actually happened a few releases ago, but it's now official.
|
||||
|
||||
* Beautiful Soup will now work with versions of html5lib greater than
|
||||
0.99999999. [bug=1603299]
|
||||
|
||||
* If a search against each individual value of a multi-valued
|
||||
attribute fails, the search will be run one final time against the
|
||||
complete attribute value considered as a single string. That is, if
|
||||
a tag has class="foo bar" and neither "foo" nor "bar" matches, but
|
||||
"foo bar" does, the tag is now considered a match.
|
||||
|
||||
This happened in previous versions, but only when the value being
|
||||
searched for was a string. Now it also works when that value is
|
||||
a regular expression, a list of strings, etc. [bug=1476868]
|
||||
|
||||
* Fixed a bug that deranged the tree when a whitespace element was
|
||||
reparented into a tag that contained an identical whitespace
|
||||
element. [bug=1505351]
|
||||
|
||||
* Added support for CSS selector values that contain quoted spaces,
|
||||
such as tag[style="display: foo"]. [bug=1540588]
|
||||
|
||||
* Corrected handling of XML processing instructions. [bug=1504393]
|
||||
|
||||
* Corrected an encoding error that happened when a BeautifulSoup
|
||||
object was copied. [bug=1554439]
|
||||
|
||||
* The contents of <textarea> tags will no longer be modified when the
|
||||
tree is prettified. [bug=1555829]
|
||||
|
||||
* When a BeautifulSoup object is pickled but its tree builder cannot
|
||||
be pickled, its .builder attribute is set to None instead of being
|
||||
destroyed. This avoids a performance problem once the object is
|
||||
unpickled. [bug=1523629]
|
||||
|
||||
* Specify the file and line number when warning about a
|
||||
BeautifulSoup object being instantiated without a parser being
|
||||
specified. [bug=1574647]
|
||||
|
||||
* The `limit` argument to `select()` now works correctly, though it's
|
||||
not implemented very efficiently. [bug=1520530]
|
||||
|
||||
* Fixed a Python 3 ByteWarning when a URL was passed in as though it
|
||||
were markup. Thanks to James Salter for a patch and
|
||||
test. [bug=1533762]
|
||||
|
||||
* We don't run the check for a filename passed in as markup if the
|
||||
'filename' contains a less-than character; the less-than character
|
||||
indicates it's most likely a very small document. [bug=1577864]
|
||||
|
||||
= 4.4.1 (20150928) =
|
||||
|
||||
* Fixed a bug that deranged the tree when part of it was
|
||||
removed. Thanks to Eric Weiser for the patch and John Wiseman for a
|
||||
test. [bug=1481520]
|
||||
|
||||
* Fixed a parse bug with the html5lib tree-builder. Thanks to Roel
|
||||
Kramer for the patch. [bug=1483781]
|
||||
|
||||
* Improved the implementation of CSS selector grouping. Thanks to
|
||||
Orangain for the patch. [bug=1484543]
|
||||
|
||||
* Fixed the test_detect_utf8 test so that it works when chardet is
|
||||
installed. [bug=1471359]
|
||||
|
||||
* Corrected the output of Declaration objects. [bug=1477847]
|
||||
|
||||
|
||||
= 4.4.0 (20150703) =
|
||||
|
||||
Especially important changes:
|
||||
|
||||
* Added a warning when you instantiate a BeautifulSoup object without
|
||||
explicitly naming a parser. [bug=1398866]
|
||||
|
||||
* __repr__ now returns an ASCII bytestring in Python 2, and a Unicode
|
||||
string in Python 3, instead of a UTF8-encoded bytestring in both
|
||||
versions. In Python 3, __str__ now returns a Unicode string instead
|
||||
of a bytestring. [bug=1420131]
|
||||
|
||||
* The `text` argument to the find_* methods is now called `string`,
|
||||
which is more accurate. `text` still works, but `string` is the
|
||||
argument described in the documentation. `text` may eventually
|
||||
change its meaning, but not for a very long time. [bug=1366856]
|
||||
|
||||
* Changed the way soup objects work under copy.copy(). Copying a
|
||||
NavigableString or a Tag will give you a new NavigableString that's
|
||||
equal to the old one but not connected to the parse tree. Patch by
|
||||
Martijn Peters. [bug=1307490]
|
||||
|
||||
* Started using a standard MIT license. [bug=1294662]
|
||||
|
||||
* Added a Chinese translation of the documentation by Delong .w.
|
||||
|
||||
New features:
|
||||
|
||||
* Introduced the select_one() method, which uses a CSS selector but
|
||||
only returns the first match, instead of a list of
|
||||
matches. [bug=1349367]
|
||||
|
||||
* You can now create a Tag object without specifying a
|
||||
TreeBuilder. Patch by Martijn Pieters. [bug=1307471]
|
||||
|
||||
* You can now create a NavigableString or a subclass just by invoking
|
||||
the constructor. [bug=1294315]
|
||||
|
||||
* Added an `exclude_encodings` argument to UnicodeDammit and to the
|
||||
Beautiful Soup constructor, which lets you prohibit the detection of
|
||||
an encoding that you know is wrong. [bug=1469408]
|
||||
|
||||
* The select() method now supports selector grouping. Patch by
|
||||
Francisco Canas [bug=1191917]
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fixed yet another problem that caused the html5lib tree builder to
|
||||
create a disconnected parse tree. [bug=1237763]
|
||||
|
||||
* Force object_was_parsed() to keep the tree intact even when an element
|
||||
from later in the document is moved into place. [bug=1430633]
|
||||
|
||||
* Fixed yet another bug that caused a disconnected tree when html5lib
|
||||
copied an element from one part of the tree to another. [bug=1270611]
|
||||
|
||||
* Fixed a bug where Element.extract() could create an infinite loop in
|
||||
the remaining tree.
|
||||
|
||||
* The select() method can now find tags whose names contain
|
||||
dashes. Patch by Francisco Canas. [bug=1276211]
|
||||
|
||||
* The select() method can now find tags with attributes whose names
|
||||
contain dashes. Patch by Marek Kapolka. [bug=1304007]
|
||||
|
||||
* Improved the lxml tree builder's handling of processing
|
||||
instructions. [bug=1294645]
|
||||
|
||||
* Restored the helpful syntax error that happens when you try to
|
||||
import the Python 2 edition of Beautiful Soup under Python
|
||||
3. [bug=1213387]
|
||||
|
||||
* In Python 3.4 and above, set the new convert_charrefs argument to
|
||||
the html.parser constructor to avoid a warning and future
|
||||
failures. Patch by Stefano Revera. [bug=1375721]
|
||||
|
||||
* The warning when you pass in a filename or URL as markup will now be
|
||||
displayed correctly even if the filename or URL is a Unicode
|
||||
string. [bug=1268888]
|
||||
|
||||
* If the initial <html> tag contains a CDATA list attribute such as
|
||||
'class', the html5lib tree builder will now turn its value into a
|
||||
list, as it would with any other tag. [bug=1296481]
|
||||
|
||||
* Fixed an import error in Python 3.5 caused by the removal of the
|
||||
HTMLParseError class. [bug=1420063]
|
||||
|
||||
* Improved docstring for encode_contents() and
|
||||
decode_contents(). [bug=1441543]
|
||||
|
||||
* Fixed a crash in Unicode, Dammit's encoding detector when the name
|
||||
of the encoding itself contained invalid bytes. [bug=1360913]
|
||||
|
||||
* Improved the exception raised when you call .unwrap() or
|
||||
.replace_with() on an element that's not attached to a tree.
|
||||
|
||||
* Raise a NotImplementedError whenever an unsupported CSS pseudoclass
|
||||
is used in select(). Previously some cases did not result in a
|
||||
NotImplementedError.
|
||||
|
||||
* It's now possible to pickle a BeautifulSoup object no matter which
|
||||
tree builder was used to create it. However, the only tree builder
|
||||
that survives the pickling process is the HTMLParserTreeBuilder
|
||||
('html.parser'). If you unpickle a BeautifulSoup object created with
|
||||
some other tree builder, soup.builder will be None. [bug=1231545]
|
||||
|
||||
= 4.3.2 (20131002) =
|
||||
|
||||
* Fixed a bug in which short Unicode input was improperly encoded to
|
||||
@@ -1104,7 +331,7 @@ Bug fixes:
|
||||
* Renamed Tag.nsprefix to Tag.prefix, for consistency with
|
||||
NamespacedAttribute.
|
||||
|
||||
* Fixed a test failure that occurred on Python 3.x when chardet was
|
||||
* Fixed a test failure that occured on Python 3.x when chardet was
|
||||
installed.
|
||||
|
||||
* Made prettify() return Unicode by default, so it will look nice on
|
||||
@@ -1138,7 +365,7 @@ Bug fixes:
|
||||
|
||||
* Restored compatibility with Python 2.6.
|
||||
|
||||
* The install process no longer installs docs or auxiliary text files.
|
||||
* The install process no longer installs docs or auxillary text files.
|
||||
|
||||
* It's now possible to deepcopy a BeautifulSoup object created with
|
||||
Python's built-in HTML parser.
|
||||
@@ -1377,7 +604,7 @@ Added an import that makes BS work in Python 2.3.
|
||||
Fixed a UnicodeDecodeError when unpickling documents that contain
|
||||
non-ASCII characters.
|
||||
|
||||
Fixed a TypeError that occurred in some circumstances when a tag
|
||||
Fixed a TypeError that occured in some circumstances when a tag
|
||||
contained no text.
|
||||
|
||||
Jump through hoops to avoid the use of chardet, which can be extremely
|
||||
@@ -1,99 +1,65 @@
|
||||
"""Beautiful Soup Elixir and Tonic - "The Screen-Scraper's Friend".
|
||||
|
||||
"""Beautiful Soup
|
||||
Elixir and Tonic
|
||||
"The Screen-Scraper's Friend"
|
||||
http://www.crummy.com/software/BeautifulSoup/
|
||||
|
||||
Beautiful Soup uses a pluggable XML or HTML parser to parse a
|
||||
(possibly invalid) document into a tree representation. Beautiful Soup
|
||||
provides methods and Pythonic idioms that make it easy to navigate,
|
||||
search, and modify the parse tree.
|
||||
provides provides methods and Pythonic idioms that make it easy to
|
||||
navigate, search, and modify the parse tree.
|
||||
|
||||
Beautiful Soup works with Python 3.6 and up. It works better if lxml
|
||||
Beautiful Soup works with Python 2.6 and up. It works better if lxml
|
||||
and/or html5lib is installed.
|
||||
|
||||
For more than you ever wanted to know about Beautiful Soup, see the
|
||||
documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/
|
||||
documentation:
|
||||
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
|
||||
"""
|
||||
|
||||
__author__ = "Leonard Richardson (leonardr@segfault.org)"
|
||||
__version__ = "4.12.3"
|
||||
__copyright__ = "Copyright (c) 2004-2024 Leonard Richardson"
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__version__ = "4.4.1"
|
||||
__copyright__ = "Copyright (c) 2004-2015 Leonard Richardson"
|
||||
__license__ = "MIT"
|
||||
|
||||
__all__ = ['BeautifulSoup']
|
||||
|
||||
from collections import Counter
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
|
||||
# The very first thing we do is give a useful error if someone is
|
||||
# running this code under Python 2.
|
||||
if sys.version_info.major < 3:
|
||||
raise ImportError('You are trying to use a Python 3-specific version of Beautiful Soup under Python 2. This will not work. The final version of Beautiful Soup to support Python 2 was 4.9.3.')
|
||||
|
||||
from .builder import (
|
||||
builder_registry,
|
||||
ParserRejectedMarkup,
|
||||
XMLParsedAsHTMLWarning,
|
||||
HTMLParserTreeBuilder
|
||||
)
|
||||
from .builder import builder_registry, ParserRejectedMarkup
|
||||
from .dammit import UnicodeDammit
|
||||
from .element import (
|
||||
CData,
|
||||
Comment,
|
||||
CSS,
|
||||
DEFAULT_OUTPUT_ENCODING,
|
||||
Declaration,
|
||||
Doctype,
|
||||
NavigableString,
|
||||
PageElement,
|
||||
ProcessingInstruction,
|
||||
PYTHON_SPECIFIC_ENCODINGS,
|
||||
ResultSet,
|
||||
Script,
|
||||
Stylesheet,
|
||||
SoupStrainer,
|
||||
Tag,
|
||||
TemplateString,
|
||||
)
|
||||
|
||||
# Define some custom warnings.
|
||||
class GuessedAtParserWarning(UserWarning):
|
||||
"""The warning issued when BeautifulSoup has to guess what parser to
|
||||
use -- probably because no parser was specified in the constructor.
|
||||
"""
|
||||
# The very first thing we do is give a useful error if someone is
|
||||
# running this code under Python 3 without converting it.
|
||||
'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work.'!='You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
|
||||
|
||||
class MarkupResemblesLocatorWarning(UserWarning):
|
||||
"""The warning issued when BeautifulSoup is given 'markup' that
|
||||
actually looks like a resource locator -- a URL or a path to a file
|
||||
on disk.
|
||||
"""
|
||||
|
||||
|
||||
class BeautifulSoup(Tag):
|
||||
"""A data structure representing a parsed HTML or XML document.
|
||||
"""
|
||||
This class defines the basic interface called by the tree builders.
|
||||
|
||||
Most of the methods you'll call on a BeautifulSoup object are inherited from
|
||||
PageElement or Tag.
|
||||
|
||||
Internally, this class defines the basic interface called by the
|
||||
tree builders when converting an HTML/XML document into a data
|
||||
structure. The interface abstracts away the differences between
|
||||
parsers. To write a new tree builder, you'll need to understand
|
||||
these methods as a whole.
|
||||
|
||||
These methods will be called by the BeautifulSoup constructor:
|
||||
* reset()
|
||||
* feed(markup)
|
||||
These methods will be called by the parser:
|
||||
reset()
|
||||
feed(markup)
|
||||
|
||||
The tree builder may call these methods from its feed() implementation:
|
||||
* handle_starttag(name, attrs) # See note about return value
|
||||
* handle_endtag(name)
|
||||
* handle_data(data) # Appends to the current data node
|
||||
* endData(containerClass) # Ends the current data node
|
||||
handle_starttag(name, attrs) # See note about return value
|
||||
handle_endtag(name)
|
||||
handle_data(data) # Appends to the current data node
|
||||
endData(containerClass=NavigableString) # Ends the current data node
|
||||
|
||||
No matter how complicated the underlying parser is, you should be
|
||||
able to build a tree using 'start tag' events, 'end tag' events,
|
||||
@@ -103,77 +69,24 @@ class BeautifulSoup(Tag):
|
||||
like HTML's <br> tag), call handle_starttag and then
|
||||
handle_endtag.
|
||||
"""
|
||||
|
||||
# Since BeautifulSoup subclasses Tag, it's possible to treat it as
|
||||
# a Tag with a .name. This name makes it clear the BeautifulSoup
|
||||
# object isn't a real markup tag.
|
||||
ROOT_TAG_NAME = '[document]'
|
||||
|
||||
# If the end-user gives no indication which tree builder they
|
||||
# want, look for one with these features.
|
||||
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
|
||||
|
||||
# A string containing all ASCII whitespace characters, used in
|
||||
# endData() to detect data chunks that seem 'empty'.
|
||||
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
|
||||
|
||||
NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features=\"%(parser)s\"' to the BeautifulSoup constructor.\n"
|
||||
|
||||
NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nTo get rid of this warning, change this:\n\n BeautifulSoup([your markup])\n\nto this:\n\n BeautifulSoup([your markup], \"%(parser)s\")\n"
|
||||
|
||||
def __init__(self, markup="", features=None, builder=None,
|
||||
parse_only=None, from_encoding=None, exclude_encodings=None,
|
||||
element_classes=None, **kwargs):
|
||||
"""Constructor.
|
||||
**kwargs):
|
||||
"""The Soup object is initialized as the 'root tag', and the
|
||||
provided markup (which can be a string or a file-like object)
|
||||
is fed into the underlying parser."""
|
||||
|
||||
:param markup: A string or a file-like object representing
|
||||
markup to be parsed.
|
||||
|
||||
:param features: Desirable features of the parser to be
|
||||
used. This may be the name of a specific parser ("lxml",
|
||||
"lxml-xml", "html.parser", or "html5lib") or it may be the
|
||||
type of markup to be used ("html", "html5", "xml"). It's
|
||||
recommended that you name a specific parser, so that
|
||||
Beautiful Soup gives you the same results across platforms
|
||||
and virtual environments.
|
||||
|
||||
:param builder: A TreeBuilder subclass to instantiate (or
|
||||
instance to use) instead of looking one up based on
|
||||
`features`. You only need to use this if you've implemented a
|
||||
custom TreeBuilder.
|
||||
|
||||
:param parse_only: A SoupStrainer. Only parts of the document
|
||||
matching the SoupStrainer will be considered. This is useful
|
||||
when parsing part of a document that would otherwise be too
|
||||
large to fit into memory.
|
||||
|
||||
:param from_encoding: A string indicating the encoding of the
|
||||
document to be parsed. Pass this in if Beautiful Soup is
|
||||
guessing wrongly about the document's encoding.
|
||||
|
||||
:param exclude_encodings: A list of strings indicating
|
||||
encodings known to be wrong. Pass this in if you don't know
|
||||
the document's encoding but you know Beautiful Soup's guess is
|
||||
wrong.
|
||||
|
||||
:param element_classes: A dictionary mapping BeautifulSoup
|
||||
classes like Tag and NavigableString, to other classes you'd
|
||||
like to be instantiated instead as the parse tree is
|
||||
built. This is useful for subclassing Tag or NavigableString
|
||||
to modify default behavior.
|
||||
|
||||
:param kwargs: For backwards compatibility purposes, the
|
||||
constructor accepts certain keyword arguments used in
|
||||
Beautiful Soup 3. None of these arguments do anything in
|
||||
Beautiful Soup 4; they will result in a warning and then be
|
||||
ignored.
|
||||
|
||||
Apart from this, any keyword arguments passed into the
|
||||
BeautifulSoup constructor are propagated to the TreeBuilder
|
||||
constructor. This makes it possible to configure a
|
||||
TreeBuilder by passing in arguments, not just by saying which
|
||||
one to use.
|
||||
"""
|
||||
if 'convertEntities' in kwargs:
|
||||
del kwargs['convertEntities']
|
||||
warnings.warn(
|
||||
"BS4 does not respect the convertEntities argument to the "
|
||||
"BeautifulSoup constructor. Entities are always converted "
|
||||
@@ -212,10 +125,10 @@ class BeautifulSoup(Tag):
|
||||
if old_name in kwargs:
|
||||
warnings.warn(
|
||||
'The "%s" argument to the BeautifulSoup constructor '
|
||||
'has been renamed to "%s."' % (old_name, new_name),
|
||||
DeprecationWarning, stacklevel=3
|
||||
)
|
||||
return kwargs.pop(old_name)
|
||||
'has been renamed to "%s."' % (old_name, new_name))
|
||||
value = kwargs[old_name]
|
||||
del kwargs[old_name]
|
||||
return value
|
||||
return None
|
||||
|
||||
parse_only = parse_only or deprecated_argument(
|
||||
@@ -224,23 +137,13 @@ class BeautifulSoup(Tag):
|
||||
from_encoding = from_encoding or deprecated_argument(
|
||||
"fromEncoding", "from_encoding")
|
||||
|
||||
if from_encoding and isinstance(markup, str):
|
||||
warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.")
|
||||
from_encoding = None
|
||||
if len(kwargs) > 0:
|
||||
arg = list(kwargs.keys()).pop()
|
||||
raise TypeError(
|
||||
"__init__() got an unexpected keyword argument '%s'" % arg)
|
||||
|
||||
self.element_classes = element_classes or dict()
|
||||
|
||||
# We need this information to track whether or not the builder
|
||||
# was specified well enough that we can omit the 'you need to
|
||||
# specify a parser' warning.
|
||||
original_builder = builder
|
||||
original_features = features
|
||||
|
||||
if isinstance(builder, type):
|
||||
# A builder class was passed in; it needs to be instantiated.
|
||||
builder_class = builder
|
||||
builder = None
|
||||
elif builder is None:
|
||||
if builder is None:
|
||||
original_features = features
|
||||
if isinstance(features, str):
|
||||
features = [features]
|
||||
if features is None or len(features) == 0:
|
||||
@@ -251,227 +154,85 @@ class BeautifulSoup(Tag):
|
||||
"Couldn't find a tree builder with the features you "
|
||||
"requested: %s. Do you need to install a parser library?"
|
||||
% ",".join(features))
|
||||
|
||||
# At this point either we have a TreeBuilder instance in
|
||||
# builder, or we have a builder_class that we can instantiate
|
||||
# with the remaining **kwargs.
|
||||
if builder is None:
|
||||
builder = builder_class(**kwargs)
|
||||
if not original_builder and not (
|
||||
original_features == builder.NAME or
|
||||
original_features in builder.ALTERNATE_NAMES
|
||||
) and markup:
|
||||
# The user did not tell us which TreeBuilder to use,
|
||||
# and we had to guess. Issue a warning.
|
||||
builder = builder_class()
|
||||
if not (original_features == builder.NAME or
|
||||
original_features in builder.ALTERNATE_NAMES):
|
||||
if builder.is_xml:
|
||||
markup_type = "XML"
|
||||
else:
|
||||
markup_type = "HTML"
|
||||
warnings.warn(self.NO_PARSER_SPECIFIED_WARNING % dict(
|
||||
parser=builder.NAME,
|
||||
markup_type=markup_type))
|
||||
|
||||
# This code adapted from warnings.py so that we get the same line
|
||||
# of code as our warnings.warn() call gets, even if the answer is wrong
|
||||
# (as it may be in a multithreading situation).
|
||||
caller = None
|
||||
try:
|
||||
caller = sys._getframe(1)
|
||||
except ValueError:
|
||||
pass
|
||||
if caller:
|
||||
globals = caller.f_globals
|
||||
line_number = caller.f_lineno
|
||||
else:
|
||||
globals = sys.__dict__
|
||||
line_number= 1
|
||||
filename = globals.get('__file__')
|
||||
if filename:
|
||||
fnl = filename.lower()
|
||||
if fnl.endswith((".pyc", ".pyo")):
|
||||
filename = filename[:-1]
|
||||
if filename:
|
||||
# If there is no filename at all, the user is most likely in a REPL,
|
||||
# and the warning is not necessary.
|
||||
values = dict(
|
||||
filename=filename,
|
||||
line_number=line_number,
|
||||
parser=builder.NAME,
|
||||
markup_type=markup_type
|
||||
)
|
||||
warnings.warn(
|
||||
self.NO_PARSER_SPECIFIED_WARNING % values,
|
||||
GuessedAtParserWarning, stacklevel=2
|
||||
)
|
||||
else:
|
||||
if kwargs:
|
||||
warnings.warn("Keyword arguments to the BeautifulSoup constructor will be ignored. These would normally be passed into the TreeBuilder constructor, but a TreeBuilder instance was passed in as `builder`.")
|
||||
|
||||
self.builder = builder
|
||||
self.is_xml = builder.is_xml
|
||||
self.known_xml = self.is_xml
|
||||
self._namespaces = dict()
|
||||
self.builder.soup = self
|
||||
|
||||
self.parse_only = parse_only
|
||||
|
||||
if hasattr(markup, 'read'): # It's a file-type object.
|
||||
markup = markup.read()
|
||||
elif len(markup) <= 256 and (
|
||||
(isinstance(markup, bytes) and not b'<' in markup)
|
||||
or (isinstance(markup, str) and not '<' in markup)
|
||||
):
|
||||
# Issue warnings for a couple beginner problems
|
||||
elif len(markup) <= 256:
|
||||
# Print out warnings for a couple beginner problems
|
||||
# involving passing non-markup to Beautiful Soup.
|
||||
# Beautiful Soup will still parse the input as markup,
|
||||
# since that is sometimes the intended behavior.
|
||||
if not self._markup_is_url(markup):
|
||||
self._markup_resembles_filename(markup)
|
||||
# just in case that's what the user really wants.
|
||||
if (isinstance(markup, str)
|
||||
and not os.path.supports_unicode_filenames):
|
||||
possible_filename = markup.encode("utf8")
|
||||
else:
|
||||
possible_filename = markup
|
||||
is_file = False
|
||||
try:
|
||||
is_file = os.path.exists(possible_filename)
|
||||
except Exception as e:
|
||||
# This is almost certainly a problem involving
|
||||
# characters not valid in filenames on this
|
||||
# system. Just let it go.
|
||||
pass
|
||||
if is_file:
|
||||
if isinstance(markup, str):
|
||||
markup = markup.encode("utf8")
|
||||
warnings.warn(
|
||||
'"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
|
||||
if markup[:5] == "http:" or markup[:6] == "https:":
|
||||
# TODO: This is ugly but I couldn't get it to work in
|
||||
# Python 3 otherwise.
|
||||
if ((isinstance(markup, bytes) and not b' ' in markup)
|
||||
or (isinstance(markup, str) and not ' ' in markup)):
|
||||
if isinstance(markup, str):
|
||||
markup = markup.encode("utf8")
|
||||
warnings.warn(
|
||||
'"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
|
||||
|
||||
rejections = []
|
||||
success = False
|
||||
for (self.markup, self.original_encoding, self.declared_html_encoding,
|
||||
self.contains_replacement_characters) in (
|
||||
self.builder.prepare_markup(
|
||||
markup, from_encoding, exclude_encodings=exclude_encodings)):
|
||||
self.reset()
|
||||
self.builder.initialize_soup(self)
|
||||
try:
|
||||
self._feed()
|
||||
success = True
|
||||
break
|
||||
except ParserRejectedMarkup as e:
|
||||
rejections.append(e)
|
||||
except ParserRejectedMarkup:
|
||||
pass
|
||||
|
||||
if not success:
|
||||
other_exceptions = [str(e) for e in rejections]
|
||||
raise ParserRejectedMarkup(
|
||||
"The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help.\n\nOriginal exception(s) from parser:\n " + "\n ".join(other_exceptions)
|
||||
)
|
||||
|
||||
# Clear out the markup and remove the builder's circular
|
||||
# reference to this object.
|
||||
self.markup = None
|
||||
self.builder.soup = None
|
||||
|
||||
def _clone(self):
|
||||
"""Create a new BeautifulSoup object with the same TreeBuilder,
|
||||
but not associated with any markup.
|
||||
def __copy__(self):
|
||||
return type(self)(self.encode(), builder=self.builder)
|
||||
|
||||
This is the first step of the deepcopy process.
|
||||
"""
|
||||
clone = type(self)("", None, self.builder)
|
||||
|
||||
# Keep track of the encoding of the original document,
|
||||
# since we won't be parsing it again.
|
||||
clone.original_encoding = self.original_encoding
|
||||
return clone
|
||||
|
||||
def __getstate__(self):
|
||||
# Frequently a tree builder can't be pickled.
|
||||
d = dict(self.__dict__)
|
||||
if 'builder' in d and d['builder'] is not None and not self.builder.picklable:
|
||||
d['builder'] = type(self.builder)
|
||||
# Store the contents as a Unicode string.
|
||||
d['contents'] = []
|
||||
d['markup'] = self.decode()
|
||||
|
||||
# If _most_recent_element is present, it's a Tag object left
|
||||
# over from initial parse. It might not be picklable and we
|
||||
# don't need it.
|
||||
if '_most_recent_element' in d:
|
||||
del d['_most_recent_element']
|
||||
if 'builder' in d and not self.builder.picklable:
|
||||
del d['builder']
|
||||
return d
|
||||
|
||||
def __setstate__(self, state):
|
||||
# If necessary, restore the TreeBuilder by looking it up.
|
||||
self.__dict__ = state
|
||||
if isinstance(self.builder, type):
|
||||
self.builder = self.builder()
|
||||
elif not self.builder:
|
||||
# We don't know which builder was used to build this
|
||||
# parse tree, so use a default we know is always available.
|
||||
self.builder = HTMLParserTreeBuilder()
|
||||
self.builder.soup = self
|
||||
self.reset()
|
||||
self._feed()
|
||||
return state
|
||||
|
||||
|
||||
@classmethod
|
||||
def _decode_markup(cls, markup):
|
||||
"""Ensure `markup` is bytes so it's safe to send into warnings.warn.
|
||||
|
||||
TODO: warnings.warn had this problem back in 2010 but it might not
|
||||
anymore.
|
||||
"""
|
||||
if isinstance(markup, bytes):
|
||||
decoded = markup.decode('utf-8', 'replace')
|
||||
else:
|
||||
decoded = markup
|
||||
return decoded
|
||||
|
||||
@classmethod
|
||||
def _markup_is_url(cls, markup):
|
||||
"""Error-handling method to raise a warning if incoming markup looks
|
||||
like a URL.
|
||||
|
||||
:param markup: A string.
|
||||
:return: Whether or not the markup resembles a URL
|
||||
closely enough to justify a warning.
|
||||
"""
|
||||
if isinstance(markup, bytes):
|
||||
space = b' '
|
||||
cant_start_with = (b"http:", b"https:")
|
||||
elif isinstance(markup, str):
|
||||
space = ' '
|
||||
cant_start_with = ("http:", "https:")
|
||||
else:
|
||||
return False
|
||||
|
||||
if any(markup.startswith(prefix) for prefix in cant_start_with):
|
||||
if not space in markup:
|
||||
warnings.warn(
|
||||
'The input looks more like a URL than markup. You may want to use'
|
||||
' an HTTP client like requests to get the document behind'
|
||||
' the URL, and feed that document to Beautiful Soup.',
|
||||
MarkupResemblesLocatorWarning,
|
||||
stacklevel=3
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def _markup_resembles_filename(cls, markup):
|
||||
"""Error-handling method to raise a warning if incoming markup
|
||||
resembles a filename.
|
||||
|
||||
:param markup: A bytestring or string.
|
||||
:return: Whether or not the markup resembles a filename
|
||||
closely enough to justify a warning.
|
||||
"""
|
||||
path_characters = '/\\'
|
||||
extensions = ['.html', '.htm', '.xml', '.xhtml', '.txt']
|
||||
if isinstance(markup, bytes):
|
||||
path_characters = path_characters.encode("utf8")
|
||||
extensions = [x.encode('utf8') for x in extensions]
|
||||
filelike = False
|
||||
if any(x in markup for x in path_characters):
|
||||
filelike = True
|
||||
else:
|
||||
lower = markup.lower()
|
||||
if any(lower.endswith(ext) for ext in extensions):
|
||||
filelike = True
|
||||
if filelike:
|
||||
warnings.warn(
|
||||
'The input looks more like a filename than markup. You may'
|
||||
' want to open this file and pass the filehandle into'
|
||||
' Beautiful Soup.',
|
||||
MarkupResemblesLocatorWarning, stacklevel=3
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
def _feed(self):
|
||||
"""Internal method that parses previously set markup, creating a large
|
||||
number of Tag and NavigableString objects.
|
||||
"""
|
||||
# Convert the document to Unicode.
|
||||
self.builder.reset()
|
||||
|
||||
@@ -482,111 +243,48 @@ class BeautifulSoup(Tag):
|
||||
self.popTag()
|
||||
|
||||
def reset(self):
|
||||
"""Reset this object to a state as though it had never parsed any
|
||||
markup.
|
||||
"""
|
||||
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
|
||||
self.hidden = 1
|
||||
self.builder.reset()
|
||||
self.current_data = []
|
||||
self.currentTag = None
|
||||
self.tagStack = []
|
||||
self.open_tag_counter = Counter()
|
||||
self.preserve_whitespace_tag_stack = []
|
||||
self.string_container_stack = []
|
||||
self._most_recent_element = None
|
||||
self.pushTag(self)
|
||||
|
||||
def new_tag(self, name, namespace=None, nsprefix=None, attrs={},
|
||||
sourceline=None, sourcepos=None, **kwattrs):
|
||||
"""Create a new Tag associated with this BeautifulSoup object.
|
||||
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
|
||||
"""Create a new tag associated with this soup."""
|
||||
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
|
||||
|
||||
:param name: The name of the new Tag.
|
||||
:param namespace: The URI of the new Tag's XML namespace, if any.
|
||||
:param prefix: The prefix for the new Tag's XML namespace, if any.
|
||||
:param attrs: A dictionary of this Tag's attribute values; can
|
||||
be used instead of `kwattrs` for attributes like 'class'
|
||||
that are reserved words in Python.
|
||||
:param sourceline: The line number where this tag was
|
||||
(purportedly) found in its source document.
|
||||
:param sourcepos: The character position within `sourceline` where this
|
||||
tag was (purportedly) found.
|
||||
:param kwattrs: Keyword arguments for the new Tag's attribute values.
|
||||
def new_string(self, s, subclass=NavigableString):
|
||||
"""Create a new NavigableString associated with this soup."""
|
||||
return subclass(s)
|
||||
|
||||
"""
|
||||
kwattrs.update(attrs)
|
||||
return self.element_classes.get(Tag, Tag)(
|
||||
None, self.builder, name, namespace, nsprefix, kwattrs,
|
||||
sourceline=sourceline, sourcepos=sourcepos
|
||||
)
|
||||
|
||||
def string_container(self, base_class=None):
|
||||
container = base_class or NavigableString
|
||||
|
||||
# There may be a general override of NavigableString.
|
||||
container = self.element_classes.get(
|
||||
container, container
|
||||
)
|
||||
|
||||
# On top of that, we may be inside a tag that needs a special
|
||||
# container class.
|
||||
if self.string_container_stack and container is NavigableString:
|
||||
container = self.builder.string_containers.get(
|
||||
self.string_container_stack[-1].name, container
|
||||
)
|
||||
return container
|
||||
|
||||
def new_string(self, s, subclass=None):
|
||||
"""Create a new NavigableString associated with this BeautifulSoup
|
||||
object.
|
||||
"""
|
||||
container = self.string_container(subclass)
|
||||
return container(s)
|
||||
|
||||
def insert_before(self, *args):
|
||||
"""This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
|
||||
it because there is nothing before or after it in the parse tree.
|
||||
"""
|
||||
def insert_before(self, successor):
|
||||
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
|
||||
|
||||
def insert_after(self, *args):
|
||||
"""This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
|
||||
it because there is nothing before or after it in the parse tree.
|
||||
"""
|
||||
def insert_after(self, successor):
|
||||
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
|
||||
|
||||
def popTag(self):
|
||||
"""Internal method called by _popToTag when a tag is closed."""
|
||||
tag = self.tagStack.pop()
|
||||
if tag.name in self.open_tag_counter:
|
||||
self.open_tag_counter[tag.name] -= 1
|
||||
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
|
||||
self.preserve_whitespace_tag_stack.pop()
|
||||
if self.string_container_stack and tag == self.string_container_stack[-1]:
|
||||
self.string_container_stack.pop()
|
||||
#print("Pop", tag.name)
|
||||
#print "Pop", tag.name
|
||||
if self.tagStack:
|
||||
self.currentTag = self.tagStack[-1]
|
||||
return self.currentTag
|
||||
|
||||
def pushTag(self, tag):
|
||||
"""Internal method called by handle_starttag when a tag is opened."""
|
||||
#print("Push", tag.name)
|
||||
if self.currentTag is not None:
|
||||
#print "Push", tag.name
|
||||
if self.currentTag:
|
||||
self.currentTag.contents.append(tag)
|
||||
self.tagStack.append(tag)
|
||||
self.currentTag = self.tagStack[-1]
|
||||
if tag.name != self.ROOT_TAG_NAME:
|
||||
self.open_tag_counter[tag.name] += 1
|
||||
if tag.name in self.builder.preserve_whitespace_tags:
|
||||
self.preserve_whitespace_tag_stack.append(tag)
|
||||
if tag.name in self.builder.string_containers:
|
||||
self.string_container_stack.append(tag)
|
||||
|
||||
def endData(self, containerClass=None):
|
||||
"""Method called by the TreeBuilder when the end of a data segment
|
||||
occurs.
|
||||
"""
|
||||
def endData(self, containerClass=NavigableString):
|
||||
if self.current_data:
|
||||
current_data = ''.join(self.current_data)
|
||||
# If whitespace is not preserved, and this string contains
|
||||
@@ -613,93 +311,61 @@ class BeautifulSoup(Tag):
|
||||
not self.parse_only.search(current_data)):
|
||||
return
|
||||
|
||||
containerClass = self.string_container(containerClass)
|
||||
o = containerClass(current_data)
|
||||
self.object_was_parsed(o)
|
||||
|
||||
def object_was_parsed(self, o, parent=None, most_recent_element=None):
|
||||
"""Method called by the TreeBuilder to integrate an object into the parse tree."""
|
||||
if parent is None:
|
||||
parent = self.currentTag
|
||||
if most_recent_element is not None:
|
||||
previous_element = most_recent_element
|
||||
else:
|
||||
previous_element = self._most_recent_element
|
||||
"""Add an object to the parse tree."""
|
||||
parent = parent or self.currentTag
|
||||
previous_element = most_recent_element or self._most_recent_element
|
||||
|
||||
next_element = previous_sibling = next_sibling = None
|
||||
if isinstance(o, Tag):
|
||||
next_element = o.next_element
|
||||
next_sibling = o.next_sibling
|
||||
previous_sibling = o.previous_sibling
|
||||
if previous_element is None:
|
||||
if not previous_element:
|
||||
previous_element = o.previous_element
|
||||
|
||||
fix = parent.next_element is not None
|
||||
|
||||
o.setup(parent, previous_element, next_element, previous_sibling, next_sibling)
|
||||
|
||||
self._most_recent_element = o
|
||||
parent.contents.append(o)
|
||||
|
||||
# Check if we are inserting into an already parsed node.
|
||||
if fix:
|
||||
self._linkage_fixer(parent)
|
||||
if parent.next_sibling:
|
||||
# This node is being inserted into an element that has
|
||||
# already been parsed. Deal with any dangling references.
|
||||
index = parent.contents.index(o)
|
||||
if index == 0:
|
||||
previous_element = parent
|
||||
previous_sibling = None
|
||||
else:
|
||||
previous_element = previous_sibling = parent.contents[index-1]
|
||||
if index == len(parent.contents)-1:
|
||||
next_element = parent.next_sibling
|
||||
next_sibling = None
|
||||
else:
|
||||
next_element = next_sibling = parent.contents[index+1]
|
||||
|
||||
def _linkage_fixer(self, el):
|
||||
"""Make sure linkage of this fragment is sound."""
|
||||
|
||||
first = el.contents[0]
|
||||
child = el.contents[-1]
|
||||
descendant = child
|
||||
|
||||
if child is first and el.parent is not None:
|
||||
# Parent should be linked to first child
|
||||
el.next_element = child
|
||||
# We are no longer linked to whatever this element is
|
||||
prev_el = child.previous_element
|
||||
if prev_el is not None and prev_el is not el:
|
||||
prev_el.next_element = None
|
||||
# First child should be linked to the parent, and no previous siblings.
|
||||
child.previous_element = el
|
||||
child.previous_sibling = None
|
||||
|
||||
# We have no sibling as we've been appended as the last.
|
||||
child.next_sibling = None
|
||||
|
||||
# This index is a tag, dig deeper for a "last descendant"
|
||||
if isinstance(child, Tag) and child.contents:
|
||||
descendant = child._last_descendant(False)
|
||||
|
||||
# As the final step, link last descendant. It should be linked
|
||||
# to the parent's next sibling (if found), else walk up the chain
|
||||
# and find a parent with a sibling. It should have no next sibling.
|
||||
descendant.next_element = None
|
||||
descendant.next_sibling = None
|
||||
target = el
|
||||
while True:
|
||||
if target is None:
|
||||
break
|
||||
elif target.next_sibling is not None:
|
||||
descendant.next_element = target.next_sibling
|
||||
target.next_sibling.previous_element = child
|
||||
break
|
||||
target = target.parent
|
||||
o.previous_element = previous_element
|
||||
if previous_element:
|
||||
previous_element.next_element = o
|
||||
o.next_element = next_element
|
||||
if next_element:
|
||||
next_element.previous_element = o
|
||||
o.next_sibling = next_sibling
|
||||
if next_sibling:
|
||||
next_sibling.previous_sibling = o
|
||||
o.previous_sibling = previous_sibling
|
||||
if previous_sibling:
|
||||
previous_sibling.next_sibling = o
|
||||
|
||||
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
|
||||
"""Pops the tag stack up to and including the most recent
|
||||
instance of the given tag.
|
||||
|
||||
If there are no open tags with the given name, nothing will be
|
||||
popped.
|
||||
|
||||
:param name: Pop up to the most recent tag with this name.
|
||||
:param nsprefix: The namespace prefix that goes with `name`.
|
||||
:param inclusivePop: It this is false, pops the tag stack up
|
||||
to but *not* including the most recent instqance of the
|
||||
given tag.
|
||||
|
||||
"""
|
||||
#print("Popping to %s" % name)
|
||||
instance of the given tag. If inclusivePop is false, pops the tag
|
||||
stack up to but *not* including the most recent instqance of
|
||||
the given tag."""
|
||||
#print "Popping to %s" % name
|
||||
if name == self.ROOT_TAG_NAME:
|
||||
# The BeautifulSoup object itself can never be popped.
|
||||
return
|
||||
@@ -708,8 +374,6 @@ class BeautifulSoup(Tag):
|
||||
|
||||
stack_size = len(self.tagStack)
|
||||
for i in range(stack_size - 1, 0, -1):
|
||||
if not self.open_tag_counter.get(name):
|
||||
break
|
||||
t = self.tagStack[i]
|
||||
if (name == t.name and nsprefix == t.prefix):
|
||||
if inclusivePop:
|
||||
@@ -719,26 +383,16 @@ class BeautifulSoup(Tag):
|
||||
|
||||
return most_recently_popped
|
||||
|
||||
def handle_starttag(self, name, namespace, nsprefix, attrs, sourceline=None,
|
||||
sourcepos=None, namespaces=None):
|
||||
"""Called by the tree builder when a new tag is encountered.
|
||||
def handle_starttag(self, name, namespace, nsprefix, attrs):
|
||||
"""Push a start tag on to the stack.
|
||||
|
||||
:param name: Name of the tag.
|
||||
:param nsprefix: Namespace prefix for the tag.
|
||||
:param attrs: A dictionary of attribute values.
|
||||
:param sourceline: The line number where this tag was found in its
|
||||
source document.
|
||||
:param sourcepos: The character position within `sourceline` where this
|
||||
tag was found.
|
||||
:param namespaces: A dictionary of all namespace prefix mappings
|
||||
currently in scope in the document.
|
||||
|
||||
If this method returns None, the tag was rejected by an active
|
||||
SoupStrainer. You should proceed as if the tag had not occurred
|
||||
If this method returns None, the tag was rejected by the
|
||||
SoupStrainer. You should proceed as if the tag had not occured
|
||||
in the document. For instance, if this was a self-closing tag,
|
||||
don't call handle_endtag.
|
||||
"""
|
||||
# print("Start tag %s: %s" % (name, attrs))
|
||||
|
||||
# print "Start tag %s: %s" % (name, attrs)
|
||||
self.endData()
|
||||
|
||||
if (self.parse_only and len(self.tagStack) <= 1
|
||||
@@ -746,54 +400,34 @@ class BeautifulSoup(Tag):
|
||||
or not self.parse_only.search_tag(name, attrs))):
|
||||
return None
|
||||
|
||||
tag = self.element_classes.get(Tag, Tag)(
|
||||
self, self.builder, name, namespace, nsprefix, attrs,
|
||||
self.currentTag, self._most_recent_element,
|
||||
sourceline=sourceline, sourcepos=sourcepos,
|
||||
namespaces=namespaces
|
||||
)
|
||||
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
|
||||
self.currentTag, self._most_recent_element)
|
||||
if tag is None:
|
||||
return tag
|
||||
if self._most_recent_element is not None:
|
||||
if self._most_recent_element:
|
||||
self._most_recent_element.next_element = tag
|
||||
self._most_recent_element = tag
|
||||
self.pushTag(tag)
|
||||
return tag
|
||||
|
||||
def handle_endtag(self, name, nsprefix=None):
|
||||
"""Called by the tree builder when an ending tag is encountered.
|
||||
|
||||
:param name: Name of the tag.
|
||||
:param nsprefix: Namespace prefix for the tag.
|
||||
"""
|
||||
#print("End tag: " + name)
|
||||
#print "End tag: " + name
|
||||
self.endData()
|
||||
self._popToTag(name, nsprefix)
|
||||
|
||||
|
||||
def handle_data(self, data):
|
||||
"""Called by the tree builder when a chunk of textual data is encountered."""
|
||||
self.current_data.append(data)
|
||||
|
||||
|
||||
def decode(self, pretty_print=False,
|
||||
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
|
||||
formatter="minimal", iterator=None):
|
||||
"""Returns a string or Unicode representation of the parse tree
|
||||
as an HTML or XML document.
|
||||
formatter="minimal"):
|
||||
"""Returns a string or Unicode representation of this document.
|
||||
To get Unicode, pass None for encoding."""
|
||||
|
||||
:param pretty_print: If this is True, indentation will be used to
|
||||
make the document more readable.
|
||||
:param eventual_encoding: The encoding of the final document.
|
||||
If this is None, the document will be a Unicode string.
|
||||
"""
|
||||
if self.is_xml:
|
||||
# Print the XML declaration
|
||||
encoding_part = ''
|
||||
if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS:
|
||||
# This is a special Python encoding; it can't actually
|
||||
# go into an XML document because it means nothing
|
||||
# outside of Python.
|
||||
eventual_encoding = None
|
||||
if eventual_encoding != None:
|
||||
if eventual_encoding is not None:
|
||||
encoding_part = ' encoding="%s"' % eventual_encoding
|
||||
prefix = '<?xml version="1.0"%s?>\n' % encoding_part
|
||||
else:
|
||||
@@ -803,9 +437,9 @@ class BeautifulSoup(Tag):
|
||||
else:
|
||||
indent_level = 0
|
||||
return prefix + super(BeautifulSoup, self).decode(
|
||||
indent_level, eventual_encoding, formatter, iterator)
|
||||
indent_level, eventual_encoding, formatter)
|
||||
|
||||
# Aliases to make it easier to get started quickly, e.g. 'from bs4 import _soup'
|
||||
# Alias to make it easier to type import: 'from bs4 import _soup'
|
||||
_s = BeautifulSoup
|
||||
_soup = BeautifulSoup
|
||||
|
||||
@@ -816,24 +450,19 @@ class BeautifulStoneSoup(BeautifulSoup):
|
||||
kwargs['features'] = 'xml'
|
||||
warnings.warn(
|
||||
'The BeautifulStoneSoup class is deprecated. Instead of using '
|
||||
'it, pass features="xml" into the BeautifulSoup constructor.',
|
||||
DeprecationWarning, stacklevel=2
|
||||
)
|
||||
'it, pass features="xml" into the BeautifulSoup constructor.')
|
||||
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class StopParsing(Exception):
|
||||
"""Exception raised by a TreeBuilder if it's unable to continue parsing."""
|
||||
pass
|
||||
|
||||
class FeatureNotFound(ValueError):
|
||||
"""Exception raised by the BeautifulSoup constructor if no parser with the
|
||||
requested features is found.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
#If this file is run as a script, act as an HTML pretty-printer.
|
||||
#By default, act as an HTML pretty-printer.
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
soup = BeautifulSoup(sys.stdin)
|
||||
print((soup.prettify()))
|
||||
print(soup.prettify())
|
||||
|
||||
@@ -1,21 +1,11 @@
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
from collections import defaultdict
|
||||
import itertools
|
||||
import re
|
||||
import warnings
|
||||
import sys
|
||||
from bs4.element import (
|
||||
CharsetMetaAttributeValue,
|
||||
ContentMetaAttributeValue,
|
||||
RubyParenthesisString,
|
||||
RubyTextString,
|
||||
Stylesheet,
|
||||
Script,
|
||||
TemplateString,
|
||||
nonwhitespace_re
|
||||
)
|
||||
whitespace_re
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'HTMLTreeBuilder',
|
||||
@@ -32,41 +22,20 @@ XML = 'xml'
|
||||
HTML = 'html'
|
||||
HTML_5 = 'html5'
|
||||
|
||||
class XMLParsedAsHTMLWarning(UserWarning):
|
||||
"""The warning issued when an HTML parser is used to parse
|
||||
XML that is not XHTML.
|
||||
"""
|
||||
MESSAGE = """It looks like you're parsing an XML document using an HTML parser. If this really is an HTML document (maybe it's XHTML?), you can ignore or filter this warning. If it's XML, you should know that using an XML parser will be more reliable. To parse this document as XML, make sure you have the lxml package installed, and pass the keyword argument `features="xml"` into the BeautifulSoup constructor."""
|
||||
|
||||
|
||||
class TreeBuilderRegistry(object):
|
||||
"""A way of looking up TreeBuilder subclasses by their name or by desired
|
||||
features.
|
||||
"""
|
||||
|
||||
|
||||
def __init__(self):
|
||||
self.builders_for_feature = defaultdict(list)
|
||||
self.builders = []
|
||||
|
||||
def register(self, treebuilder_class):
|
||||
"""Register a treebuilder based on its advertised features.
|
||||
|
||||
:param treebuilder_class: A subclass of Treebuilder. its .features
|
||||
attribute should list its features.
|
||||
"""
|
||||
"""Register a treebuilder based on its advertised features."""
|
||||
for feature in treebuilder_class.features:
|
||||
self.builders_for_feature[feature].insert(0, treebuilder_class)
|
||||
self.builders.insert(0, treebuilder_class)
|
||||
|
||||
def lookup(self, *features):
|
||||
"""Look up a TreeBuilder subclass with the desired features.
|
||||
|
||||
:param features: A list of features to look for. If none are
|
||||
provided, the most recently registered TreeBuilder subclass
|
||||
will be used.
|
||||
:return: A TreeBuilder subclass, or None if there's no
|
||||
registered subclass with all the requested features.
|
||||
"""
|
||||
if len(self.builders) == 0:
|
||||
# There are no builders at all.
|
||||
return None
|
||||
@@ -109,7 +78,7 @@ class TreeBuilderRegistry(object):
|
||||
builder_registry = TreeBuilderRegistry()
|
||||
|
||||
class TreeBuilder(object):
|
||||
"""Turn a textual document into a Beautiful Soup object tree."""
|
||||
"""Turn a document into a Beautiful Soup object tree."""
|
||||
|
||||
NAME = "[Unknown tree builder]"
|
||||
ALTERNATE_NAMES = []
|
||||
@@ -117,89 +86,19 @@ class TreeBuilder(object):
|
||||
|
||||
is_xml = False
|
||||
picklable = False
|
||||
preserve_whitespace_tags = set()
|
||||
empty_element_tags = None # A tag will be considered an empty-element
|
||||
# tag when and only when it has no contents.
|
||||
|
||||
|
||||
# A value for these tag/attribute combinations is a space- or
|
||||
# comma-separated list of CDATA, rather than a single CDATA.
|
||||
DEFAULT_CDATA_LIST_ATTRIBUTES = defaultdict(list)
|
||||
cdata_list_attributes = {}
|
||||
|
||||
# Whitespace should be preserved inside these tags.
|
||||
DEFAULT_PRESERVE_WHITESPACE_TAGS = set()
|
||||
|
||||
# The textual contents of tags with these names should be
|
||||
# instantiated with some class other than NavigableString.
|
||||
DEFAULT_STRING_CONTAINERS = {}
|
||||
|
||||
USE_DEFAULT = object()
|
||||
|
||||
# Most parsers don't keep track of line numbers.
|
||||
TRACKS_LINE_NUMBERS = False
|
||||
|
||||
def __init__(self, multi_valued_attributes=USE_DEFAULT,
|
||||
preserve_whitespace_tags=USE_DEFAULT,
|
||||
store_line_numbers=USE_DEFAULT,
|
||||
string_containers=USE_DEFAULT,
|
||||
):
|
||||
"""Constructor.
|
||||
|
||||
:param multi_valued_attributes: If this is set to None, the
|
||||
TreeBuilder will not turn any values for attributes like
|
||||
'class' into lists. Setting this to a dictionary will
|
||||
customize this behavior; look at DEFAULT_CDATA_LIST_ATTRIBUTES
|
||||
for an example.
|
||||
|
||||
Internally, these are called "CDATA list attributes", but that
|
||||
probably doesn't make sense to an end-user, so the argument name
|
||||
is `multi_valued_attributes`.
|
||||
|
||||
:param preserve_whitespace_tags: A list of tags to treat
|
||||
the way <pre> tags are treated in HTML. Tags in this list
|
||||
are immune from pretty-printing; their contents will always be
|
||||
output as-is.
|
||||
|
||||
:param string_containers: A dictionary mapping tag names to
|
||||
the classes that should be instantiated to contain the textual
|
||||
contents of those tags. The default is to use NavigableString
|
||||
for every tag, no matter what the name. You can override the
|
||||
default by changing DEFAULT_STRING_CONTAINERS.
|
||||
|
||||
:param store_line_numbers: If the parser keeps track of the
|
||||
line numbers and positions of the original markup, that
|
||||
information will, by default, be stored in each corresponding
|
||||
`Tag` object. You can turn this off by passing
|
||||
store_line_numbers=False. If the parser you're using doesn't
|
||||
keep track of this information, then setting store_line_numbers=True
|
||||
will do nothing.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.soup = None
|
||||
if multi_valued_attributes is self.USE_DEFAULT:
|
||||
multi_valued_attributes = self.DEFAULT_CDATA_LIST_ATTRIBUTES
|
||||
self.cdata_list_attributes = multi_valued_attributes
|
||||
if preserve_whitespace_tags is self.USE_DEFAULT:
|
||||
preserve_whitespace_tags = self.DEFAULT_PRESERVE_WHITESPACE_TAGS
|
||||
self.preserve_whitespace_tags = preserve_whitespace_tags
|
||||
if store_line_numbers == self.USE_DEFAULT:
|
||||
store_line_numbers = self.TRACKS_LINE_NUMBERS
|
||||
self.store_line_numbers = store_line_numbers
|
||||
if string_containers == self.USE_DEFAULT:
|
||||
string_containers = self.DEFAULT_STRING_CONTAINERS
|
||||
self.string_containers = string_containers
|
||||
|
||||
def initialize_soup(self, soup):
|
||||
"""The BeautifulSoup object has been initialized and is now
|
||||
being associated with the TreeBuilder.
|
||||
|
||||
:param soup: A BeautifulSoup object.
|
||||
"""
|
||||
self.soup = soup
|
||||
|
||||
def reset(self):
|
||||
"""Do any work necessary to reset the underlying parser
|
||||
for a new document.
|
||||
|
||||
By default, this does nothing.
|
||||
"""
|
||||
pass
|
||||
|
||||
def can_be_empty_element(self, tag_name):
|
||||
@@ -211,58 +110,24 @@ class TreeBuilder(object):
|
||||
For instance: an HTMLBuilder does not consider a <p> tag to be
|
||||
an empty-element tag (it's not in
|
||||
HTMLBuilder.empty_element_tags). This means an empty <p> tag
|
||||
will be presented as "<p></p>", not "<p/>" or "<p>".
|
||||
will be presented as "<p></p>", not "<p />".
|
||||
|
||||
The default implementation has no opinion about which tags are
|
||||
empty-element tags, so a tag will be presented as an
|
||||
empty-element tag if and only if it has no children.
|
||||
"<foo></foo>" will become "<foo/>", and "<foo>bar</foo>" will
|
||||
empty-element tag if and only if it has no contents.
|
||||
"<foo></foo>" will become "<foo />", and "<foo>bar</foo>" will
|
||||
be left alone.
|
||||
|
||||
:param tag_name: The name of a markup tag.
|
||||
"""
|
||||
if self.empty_element_tags is None:
|
||||
return True
|
||||
return tag_name in self.empty_element_tags
|
||||
|
||||
|
||||
def feed(self, markup):
|
||||
"""Run some incoming markup through some parsing process,
|
||||
populating the `BeautifulSoup` object in self.soup.
|
||||
|
||||
This method is not implemented in TreeBuilder; it must be
|
||||
implemented in subclasses.
|
||||
|
||||
:return: None.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def prepare_markup(self, markup, user_specified_encoding=None,
|
||||
document_declared_encoding=None, exclude_encodings=None):
|
||||
"""Run any preliminary steps necessary to make incoming markup
|
||||
acceptable to the parser.
|
||||
|
||||
:param markup: Some markup -- probably a bytestring.
|
||||
:param user_specified_encoding: The user asked to try this encoding.
|
||||
:param document_declared_encoding: The markup itself claims to be
|
||||
in this encoding. NOTE: This argument is not used by the
|
||||
calling code and can probably be removed.
|
||||
:param exclude_encodings: The user asked _not_ to try any of
|
||||
these encodings.
|
||||
|
||||
:yield: A series of 4-tuples:
|
||||
(markup, encoding, declared encoding,
|
||||
has undergone character replacement)
|
||||
|
||||
Each 4-tuple represents a strategy for converting the
|
||||
document to Unicode and parsing it. Each strategy will be tried
|
||||
in turn.
|
||||
|
||||
By default, the only strategy is to parse the markup
|
||||
as-is. See `LXMLTreeBuilderForXML` and
|
||||
`HTMLParserTreeBuilder` for implementations that take into
|
||||
account the quirks of particular parsers.
|
||||
"""
|
||||
yield markup, None, None, False
|
||||
document_declared_encoding=None):
|
||||
return markup, None, None, False
|
||||
|
||||
def test_fragment_to_document(self, fragment):
|
||||
"""Wrap an HTML fragment to make it look like a document.
|
||||
@@ -274,36 +139,16 @@ class TreeBuilder(object):
|
||||
results against other HTML fragments.
|
||||
|
||||
This method should not be used outside of tests.
|
||||
|
||||
:param fragment: A string -- fragment of HTML.
|
||||
:return: A string -- a full HTML document.
|
||||
"""
|
||||
return fragment
|
||||
|
||||
def set_up_substitutions(self, tag):
|
||||
"""Set up any substitutions that will need to be performed on
|
||||
a `Tag` when it's output as a string.
|
||||
|
||||
By default, this does nothing. See `HTMLTreeBuilder` for a
|
||||
case where this is used.
|
||||
|
||||
:param tag: A `Tag`
|
||||
:return: Whether or not a substitution was performed.
|
||||
"""
|
||||
return False
|
||||
|
||||
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
|
||||
"""When an attribute value is associated with a tag that can
|
||||
have multiple values for that attribute, convert the string
|
||||
value to a list of strings.
|
||||
"""Replaces class="foo bar" with class=["foo", "bar"]
|
||||
|
||||
Basically, replaces class="foo bar" with class=["foo", "bar"]
|
||||
|
||||
NOTE: This method modifies its input in place.
|
||||
|
||||
:param tag_name: The name of a tag.
|
||||
:param attrs: A dictionary containing the tag's attributes.
|
||||
Any appropriate attribute values will be modified in place.
|
||||
Modifies its input in place.
|
||||
"""
|
||||
if not attrs:
|
||||
return attrs
|
||||
@@ -318,7 +163,7 @@ class TreeBuilder(object):
|
||||
# values. Split it into a list.
|
||||
value = attrs[attr]
|
||||
if isinstance(value, str):
|
||||
values = nonwhitespace_re.findall(value)
|
||||
values = whitespace_re.split(value)
|
||||
else:
|
||||
# html5lib sometimes calls setAttributes twice
|
||||
# for the same tag when rearranging the parse
|
||||
@@ -329,13 +174,9 @@ class TreeBuilder(object):
|
||||
values = value
|
||||
attrs[attr] = values
|
||||
return attrs
|
||||
|
||||
class SAXTreeBuilder(TreeBuilder):
|
||||
"""A Beautiful Soup treebuilder that listens for SAX events.
|
||||
|
||||
This is not currently used for anything, but it demonstrates
|
||||
how a simple TreeBuilder would work.
|
||||
"""
|
||||
class SAXTreeBuilder(TreeBuilder):
|
||||
"""A Beautiful Soup treebuilder that listens for SAX events."""
|
||||
|
||||
def feed(self, markup):
|
||||
raise NotImplementedError()
|
||||
@@ -345,11 +186,11 @@ class SAXTreeBuilder(TreeBuilder):
|
||||
|
||||
def startElement(self, name, attrs):
|
||||
attrs = dict((key[1], value) for key, value in list(attrs.items()))
|
||||
#print("Start %s, %r" % (name, attrs))
|
||||
#print "Start %s, %r" % (name, attrs)
|
||||
self.soup.handle_starttag(name, attrs)
|
||||
|
||||
def endElement(self, name):
|
||||
#print("End %s" % name)
|
||||
#print "End %s" % name
|
||||
self.soup.handle_endtag(name)
|
||||
|
||||
def startElementNS(self, nsTuple, nodeName, attrs):
|
||||
@@ -386,44 +227,10 @@ class HTMLTreeBuilder(TreeBuilder):
|
||||
Such as which tags are empty-element tags.
|
||||
"""
|
||||
|
||||
empty_element_tags = set([
|
||||
# These are from HTML5.
|
||||
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr',
|
||||
|
||||
# These are from earlier versions of HTML and are removed in HTML5.
|
||||
'basefont', 'bgsound', 'command', 'frame', 'image', 'isindex', 'nextid', 'spacer'
|
||||
])
|
||||
preserve_whitespace_tags = set(['pre', 'textarea'])
|
||||
empty_element_tags = set(['br' , 'hr', 'input', 'img', 'meta',
|
||||
'spacer', 'link', 'frame', 'base'])
|
||||
|
||||
# The HTML standard defines these as block-level elements. Beautiful
|
||||
# Soup does not treat these elements differently from other elements,
|
||||
# but it may do so eventually, and this information is available if
|
||||
# you need to use it.
|
||||
block_elements = set(["address", "article", "aside", "blockquote", "canvas", "dd", "div", "dl", "dt", "fieldset", "figcaption", "figure", "footer", "form", "h1", "h2", "h3", "h4", "h5", "h6", "header", "hr", "li", "main", "nav", "noscript", "ol", "output", "p", "pre", "section", "table", "tfoot", "ul", "video"])
|
||||
|
||||
# These HTML tags need special treatment so they can be
|
||||
# represented by a string class other than NavigableString.
|
||||
#
|
||||
# For some of these tags, it's because the HTML standard defines
|
||||
# an unusual content model for them. I made this list by going
|
||||
# through the HTML spec
|
||||
# (https://html.spec.whatwg.org/#metadata-content) and looking for
|
||||
# "metadata content" elements that can contain strings.
|
||||
#
|
||||
# The Ruby tags (<rt> and <rp>) are here despite being normal
|
||||
# "phrasing content" tags, because the content they contain is
|
||||
# qualitatively different from other text in the document, and it
|
||||
# can be useful to be able to distinguish it.
|
||||
#
|
||||
# TODO: Arguably <noscript> could go here but it seems
|
||||
# qualitatively different from the other tags.
|
||||
DEFAULT_STRING_CONTAINERS = {
|
||||
'rt' : RubyTextString,
|
||||
'rp' : RubyParenthesisString,
|
||||
'style': Stylesheet,
|
||||
'script': Script,
|
||||
'template': TemplateString,
|
||||
}
|
||||
|
||||
# The HTML standard defines these attributes as containing a
|
||||
# space-separated list of values, not a single value. That is,
|
||||
# class="foo bar" means that the 'class' attribute has two values,
|
||||
@@ -431,7 +238,7 @@ class HTMLTreeBuilder(TreeBuilder):
|
||||
# encounter one of these attributes, we will parse its value into
|
||||
# a list of values if possible. Upon output, the list will be
|
||||
# converted back into a string.
|
||||
DEFAULT_CDATA_LIST_ATTRIBUTES = {
|
||||
cdata_list_attributes = {
|
||||
"*" : ['class', 'accesskey', 'dropzone'],
|
||||
"a" : ['rel', 'rev'],
|
||||
"link" : ['rel', 'rev'],
|
||||
@@ -448,19 +255,7 @@ class HTMLTreeBuilder(TreeBuilder):
|
||||
"output" : ["for"],
|
||||
}
|
||||
|
||||
DEFAULT_PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
|
||||
|
||||
def set_up_substitutions(self, tag):
|
||||
"""Replace the declared encoding in a <meta> tag with a placeholder,
|
||||
to be substituted when the tag is output to a string.
|
||||
|
||||
An HTML document may come in to Beautiful Soup as one
|
||||
encoding, but exit in a different encoding, and the <meta> tag
|
||||
needs to be changed to reflect this.
|
||||
|
||||
:param tag: A `Tag`
|
||||
:return: Whether or not a substitution was performed.
|
||||
"""
|
||||
# We are only interested in <meta> tags
|
||||
if tag.name != 'meta':
|
||||
return False
|
||||
@@ -493,107 +288,10 @@ class HTMLTreeBuilder(TreeBuilder):
|
||||
|
||||
return (meta_encoding is not None)
|
||||
|
||||
class DetectsXMLParsedAsHTML(object):
|
||||
"""A mixin class for any class (a TreeBuilder, or some class used by a
|
||||
TreeBuilder) that's in a position to detect whether an XML
|
||||
document is being incorrectly parsed as HTML, and issue an
|
||||
appropriate warning.
|
||||
|
||||
This requires being able to observe an incoming processing
|
||||
instruction that might be an XML declaration, and also able to
|
||||
observe tags as they're opened. If you can't do that for a given
|
||||
TreeBuilder, there's a less reliable implementation based on
|
||||
examining the raw markup.
|
||||
"""
|
||||
|
||||
# Regular expression for seeing if markup has an <html> tag.
|
||||
LOOKS_LIKE_HTML = re.compile("<[^ +]html", re.I)
|
||||
LOOKS_LIKE_HTML_B = re.compile(b"<[^ +]html", re.I)
|
||||
|
||||
XML_PREFIX = '<?xml'
|
||||
XML_PREFIX_B = b'<?xml'
|
||||
|
||||
@classmethod
|
||||
def warn_if_markup_looks_like_xml(cls, markup, stacklevel=3):
|
||||
"""Perform a check on some markup to see if it looks like XML
|
||||
that's not XHTML. If so, issue a warning.
|
||||
|
||||
This is much less reliable than doing the check while parsing,
|
||||
but some of the tree builders can't do that.
|
||||
|
||||
:param stacklevel: The stacklevel of the code calling this
|
||||
function.
|
||||
|
||||
:return: True if the markup looks like non-XHTML XML, False
|
||||
otherwise.
|
||||
|
||||
"""
|
||||
if isinstance(markup, bytes):
|
||||
prefix = cls.XML_PREFIX_B
|
||||
looks_like_html = cls.LOOKS_LIKE_HTML_B
|
||||
else:
|
||||
prefix = cls.XML_PREFIX
|
||||
looks_like_html = cls.LOOKS_LIKE_HTML
|
||||
|
||||
if (markup is not None
|
||||
and markup.startswith(prefix)
|
||||
and not looks_like_html.search(markup[:500])
|
||||
):
|
||||
cls._warn(stacklevel=stacklevel+2)
|
||||
return True
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def _warn(cls, stacklevel=5):
|
||||
"""Issue a warning about XML being parsed as HTML."""
|
||||
warnings.warn(
|
||||
XMLParsedAsHTMLWarning.MESSAGE, XMLParsedAsHTMLWarning,
|
||||
stacklevel=stacklevel
|
||||
)
|
||||
|
||||
def _initialize_xml_detector(self):
|
||||
"""Call this method before parsing a document."""
|
||||
self._first_processing_instruction = None
|
||||
self._root_tag = None
|
||||
|
||||
def _document_might_be_xml(self, processing_instruction):
|
||||
"""Call this method when encountering an XML declaration, or a
|
||||
"processing instruction" that might be an XML declaration.
|
||||
"""
|
||||
if (self._first_processing_instruction is not None
|
||||
or self._root_tag is not None):
|
||||
# The document has already started. Don't bother checking
|
||||
# anymore.
|
||||
return
|
||||
|
||||
self._first_processing_instruction = processing_instruction
|
||||
|
||||
# We won't know until we encounter the first tag whether or
|
||||
# not this is actually a problem.
|
||||
|
||||
def _root_tag_encountered(self, name):
|
||||
"""Call this when you encounter the document's root tag.
|
||||
|
||||
This is where we actually check whether an XML document is
|
||||
being incorrectly parsed as HTML, and issue the warning.
|
||||
"""
|
||||
if self._root_tag is not None:
|
||||
# This method was incorrectly called multiple times. Do
|
||||
# nothing.
|
||||
return
|
||||
|
||||
self._root_tag = name
|
||||
if (name != 'html' and self._first_processing_instruction is not None
|
||||
and self._first_processing_instruction.lower().startswith('xml ')):
|
||||
# We encountered an XML declaration and then a tag other
|
||||
# than 'html'. This is a reliable indicator that a
|
||||
# non-XHTML document is being parsed as XML.
|
||||
self._warn()
|
||||
|
||||
|
||||
def register_treebuilders_from(module):
|
||||
"""Copy TreeBuilders from the given module into this module."""
|
||||
this_module = sys.modules[__name__]
|
||||
# I'm fairly sure this is not the best way to do this.
|
||||
this_module = sys.modules['bs4.builder']
|
||||
for name in module.__all__:
|
||||
obj = getattr(module, name)
|
||||
|
||||
@@ -604,22 +302,12 @@ def register_treebuilders_from(module):
|
||||
this_module.builder_registry.register(obj)
|
||||
|
||||
class ParserRejectedMarkup(Exception):
|
||||
"""An Exception to be raised when the underlying parser simply
|
||||
refuses to parse the given markup.
|
||||
"""
|
||||
def __init__(self, message_or_exception):
|
||||
"""Explain why the parser rejected the given markup, either
|
||||
with a textual explanation or another exception.
|
||||
"""
|
||||
if isinstance(message_or_exception, Exception):
|
||||
e = message_or_exception
|
||||
message_or_exception = "%s: %s" % (e.__class__.__name__, str(e))
|
||||
super(ParserRejectedMarkup, self).__init__(message_or_exception)
|
||||
|
||||
pass
|
||||
|
||||
# Builders are registered in reverse order of priority, so that custom
|
||||
# builder registrations will take precedence. In general, we want lxml
|
||||
# to take precedence over html5lib, because it's faster. And we only
|
||||
# want to use HTMLParser as a last resort.
|
||||
# want to use HTMLParser as a last result.
|
||||
from . import _htmlparser
|
||||
register_treebuilders_from(_htmlparser)
|
||||
try:
|
||||
|
||||
@@ -1,14 +1,9 @@
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
__all__ = [
|
||||
'HTML5TreeBuilder',
|
||||
]
|
||||
|
||||
import warnings
|
||||
import re
|
||||
from bs4.builder import (
|
||||
DetectsXMLParsedAsHTML,
|
||||
PERMISSIVE,
|
||||
HTML,
|
||||
HTML_5,
|
||||
@@ -16,13 +11,17 @@ from bs4.builder import (
|
||||
)
|
||||
from bs4.element import (
|
||||
NamespacedAttribute,
|
||||
nonwhitespace_re,
|
||||
whitespace_re,
|
||||
)
|
||||
import html5lib
|
||||
from html5lib.constants import (
|
||||
namespaces,
|
||||
prefixes,
|
||||
)
|
||||
try:
|
||||
# html5lib >= 0.99999999/1.0b9
|
||||
from html5lib.treebuilders import base as treebuildersbase
|
||||
except ImportError:
|
||||
# html5lib <= 0.9999999/1.0b8
|
||||
from html5lib.treebuilders import _base as treebuildersbase
|
||||
from html5lib.constants import namespaces
|
||||
|
||||
from bs4.element import (
|
||||
Comment,
|
||||
Doctype,
|
||||
@@ -30,37 +29,13 @@ from bs4.element import (
|
||||
Tag,
|
||||
)
|
||||
|
||||
try:
|
||||
# Pre-0.99999999
|
||||
from html5lib.treebuilders import _base as treebuilder_base
|
||||
new_html5lib = False
|
||||
except ImportError as e:
|
||||
# 0.99999999 and up
|
||||
from html5lib.treebuilders import base as treebuilder_base
|
||||
new_html5lib = True
|
||||
|
||||
class HTML5TreeBuilder(HTMLTreeBuilder):
|
||||
"""Use html5lib to build a tree.
|
||||
|
||||
Note that this TreeBuilder does not support some features common
|
||||
to HTML TreeBuilders. Some of these features could theoretically
|
||||
be implemented, but at the very least it's quite difficult,
|
||||
because html5lib moves the parse tree around as it's being built.
|
||||
|
||||
* This TreeBuilder doesn't use different subclasses of NavigableString
|
||||
based on the name of the tag in which the string was found.
|
||||
|
||||
* You can't use a SoupStrainer to parse only part of a document.
|
||||
"""
|
||||
"""Use html5lib to build a tree."""
|
||||
|
||||
NAME = "html5lib"
|
||||
|
||||
features = [NAME, PERMISSIVE, HTML_5, HTML]
|
||||
|
||||
# html5lib can tell us which line number and position in the
|
||||
# original file is the source of an element.
|
||||
TRACKS_LINE_NUMBERS = True
|
||||
|
||||
def prepare_markup(self, markup, user_specified_encoding,
|
||||
document_declared_encoding=None, exclude_encodings=None):
|
||||
# Store the user-specified encoding for use later on.
|
||||
@@ -70,56 +45,27 @@ class HTML5TreeBuilder(HTMLTreeBuilder):
|
||||
# ATM because the html5lib TreeBuilder doesn't use
|
||||
# UnicodeDammit.
|
||||
if exclude_encodings:
|
||||
warnings.warn(
|
||||
"You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.",
|
||||
stacklevel=3
|
||||
)
|
||||
|
||||
# html5lib only parses HTML, so if it's given XML that's worth
|
||||
# noting.
|
||||
DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(
|
||||
markup, stacklevel=3
|
||||
)
|
||||
|
||||
warnings.warn("You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.")
|
||||
yield (markup, None, None, False)
|
||||
|
||||
# These methods are defined by Beautiful Soup.
|
||||
def feed(self, markup):
|
||||
if self.soup.parse_only is not None:
|
||||
warnings.warn(
|
||||
"You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.",
|
||||
stacklevel=4
|
||||
)
|
||||
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
|
||||
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
|
||||
self.underlying_builder.parser = parser
|
||||
extra_kwargs = dict()
|
||||
if not isinstance(markup, str):
|
||||
if new_html5lib:
|
||||
extra_kwargs['override_encoding'] = self.user_specified_encoding
|
||||
else:
|
||||
extra_kwargs['encoding'] = self.user_specified_encoding
|
||||
doc = parser.parse(markup, **extra_kwargs)
|
||||
|
||||
doc = parser.parse(markup, encoding=self.user_specified_encoding)
|
||||
|
||||
# Set the character encoding detected by the tokenizer.
|
||||
if isinstance(markup, str):
|
||||
# We need to special-case this because html5lib sets
|
||||
# charEncoding to UTF-8 if it gets Unicode input.
|
||||
doc.original_encoding = None
|
||||
else:
|
||||
original_encoding = parser.tokenizer.stream.charEncoding[0]
|
||||
if not isinstance(original_encoding, str):
|
||||
# In 0.99999999 and up, the encoding is an html5lib
|
||||
# Encoding object. We want to use a string for compatibility
|
||||
# with other tree builders.
|
||||
original_encoding = original_encoding.name
|
||||
doc.original_encoding = original_encoding
|
||||
self.underlying_builder.parser = None
|
||||
|
||||
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
|
||||
|
||||
def create_treebuilder(self, namespaceHTMLElements):
|
||||
self.underlying_builder = TreeBuilderForHtml5lib(
|
||||
namespaceHTMLElements, self.soup,
|
||||
store_line_numbers=self.store_line_numbers
|
||||
)
|
||||
self.soup, namespaceHTMLElements)
|
||||
return self.underlying_builder
|
||||
|
||||
def test_fragment_to_document(self, fragment):
|
||||
@@ -127,30 +73,12 @@ class HTML5TreeBuilder(HTMLTreeBuilder):
|
||||
return '<html><head></head><body>%s</body></html>' % fragment
|
||||
|
||||
|
||||
class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
|
||||
|
||||
def __init__(self, namespaceHTMLElements, soup=None,
|
||||
store_line_numbers=True, **kwargs):
|
||||
if soup:
|
||||
self.soup = soup
|
||||
else:
|
||||
from bs4 import BeautifulSoup
|
||||
# TODO: Why is the parser 'html.parser' here? To avoid an
|
||||
# infinite loop?
|
||||
self.soup = BeautifulSoup(
|
||||
"", "html.parser", store_line_numbers=store_line_numbers,
|
||||
**kwargs
|
||||
)
|
||||
# TODO: What are **kwargs exactly? Should they be passed in
|
||||
# here in addition to/instead of being passed to the BeautifulSoup
|
||||
# constructor?
|
||||
class TreeBuilderForHtml5lib(treebuildersbase.TreeBuilder):
|
||||
|
||||
def __init__(self, soup, namespaceHTMLElements):
|
||||
self.soup = soup
|
||||
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
|
||||
|
||||
# This will be set later to an html5lib.html5parser.HTMLParser
|
||||
# object, which we can use to track the current line number.
|
||||
self.parser = None
|
||||
self.store_line_numbers = store_line_numbers
|
||||
|
||||
def documentClass(self):
|
||||
self.soup.reset()
|
||||
return Element(self.soup, self.soup, None)
|
||||
@@ -164,26 +92,14 @@ class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
|
||||
self.soup.object_was_parsed(doctype)
|
||||
|
||||
def elementClass(self, name, namespace):
|
||||
kwargs = {}
|
||||
if self.parser and self.store_line_numbers:
|
||||
# This represents the point immediately after the end of the
|
||||
# tag. We don't know when the tag started, but we do know
|
||||
# where it ended -- the character just before this one.
|
||||
sourceline, sourcepos = self.parser.tokenizer.stream.position()
|
||||
kwargs['sourceline'] = sourceline
|
||||
kwargs['sourcepos'] = sourcepos-1
|
||||
tag = self.soup.new_tag(name, namespace, **kwargs)
|
||||
|
||||
tag = self.soup.new_tag(name, namespace)
|
||||
return Element(tag, self.soup, namespace)
|
||||
|
||||
def commentClass(self, data):
|
||||
return TextNode(Comment(data), self.soup)
|
||||
|
||||
def fragmentClass(self):
|
||||
from bs4 import BeautifulSoup
|
||||
# TODO: Why is the parser 'html.parser' here? To avoid an
|
||||
# infinite loop?
|
||||
self.soup = BeautifulSoup("", "html.parser")
|
||||
self.soup = BeautifulSoup("")
|
||||
self.soup.name = "[document_fragment]"
|
||||
return Element(self.soup, self.soup, None)
|
||||
|
||||
@@ -195,57 +111,7 @@ class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
|
||||
return self.soup
|
||||
|
||||
def getFragment(self):
|
||||
return treebuilder_base.TreeBuilder.getFragment(self).element
|
||||
|
||||
def testSerializer(self, element):
|
||||
from bs4 import BeautifulSoup
|
||||
rv = []
|
||||
doctype_re = re.compile(r'^(.*?)(?: PUBLIC "(.*?)"(?: "(.*?)")?| SYSTEM "(.*?)")?$')
|
||||
|
||||
def serializeElement(element, indent=0):
|
||||
if isinstance(element, BeautifulSoup):
|
||||
pass
|
||||
if isinstance(element, Doctype):
|
||||
m = doctype_re.match(element)
|
||||
if m:
|
||||
name = m.group(1)
|
||||
if m.lastindex > 1:
|
||||
publicId = m.group(2) or ""
|
||||
systemId = m.group(3) or m.group(4) or ""
|
||||
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
|
||||
(' ' * indent, name, publicId, systemId))
|
||||
else:
|
||||
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, name))
|
||||
else:
|
||||
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
|
||||
elif isinstance(element, Comment):
|
||||
rv.append("|%s<!-- %s -->" % (' ' * indent, element))
|
||||
elif isinstance(element, NavigableString):
|
||||
rv.append("|%s\"%s\"" % (' ' * indent, element))
|
||||
else:
|
||||
if element.namespace:
|
||||
name = "%s %s" % (prefixes[element.namespace],
|
||||
element.name)
|
||||
else:
|
||||
name = element.name
|
||||
rv.append("|%s<%s>" % (' ' * indent, name))
|
||||
if element.attrs:
|
||||
attributes = []
|
||||
for name, value in list(element.attrs.items()):
|
||||
if isinstance(name, NamespacedAttribute):
|
||||
name = "%s %s" % (prefixes[name.namespace], name.name)
|
||||
if isinstance(value, list):
|
||||
value = " ".join(value)
|
||||
attributes.append((name, value))
|
||||
|
||||
for name, value in sorted(attributes):
|
||||
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
|
||||
indent += 2
|
||||
for child in element.children:
|
||||
serializeElement(child, indent)
|
||||
serializeElement(element, 0)
|
||||
|
||||
return "\n".join(rv)
|
||||
return treebuildersbase.TreeBuilder.getFragment(self).element
|
||||
|
||||
class AttrList(object):
|
||||
def __init__(self, element):
|
||||
@@ -256,14 +122,14 @@ class AttrList(object):
|
||||
def __setitem__(self, name, value):
|
||||
# If this attribute is a multi-valued attribute for this element,
|
||||
# turn its value into a list.
|
||||
list_attr = self.element.cdata_list_attributes or {}
|
||||
if (name in list_attr.get('*', [])
|
||||
list_attr = HTML5TreeBuilder.cdata_list_attributes
|
||||
if (name in list_attr['*']
|
||||
or (self.element.name in list_attr
|
||||
and name in list_attr.get(self.element.name, []))):
|
||||
and name in list_attr[self.element.name])):
|
||||
# A node that is being cloned may have already undergone
|
||||
# this procedure.
|
||||
if not isinstance(value, list):
|
||||
value = nonwhitespace_re.findall(value)
|
||||
value = whitespace_re.split(value)
|
||||
self.element[name] = value
|
||||
def items(self):
|
||||
return list(self.attrs.items())
|
||||
@@ -277,9 +143,9 @@ class AttrList(object):
|
||||
return name in list(self.attrs.keys())
|
||||
|
||||
|
||||
class Element(treebuilder_base.Node):
|
||||
class Element(treebuildersbase.Node):
|
||||
def __init__(self, element, soup, namespace):
|
||||
treebuilder_base.Node.__init__(self, element.name)
|
||||
treebuildersbase.Node.__init__(self, element.name)
|
||||
self.element = element
|
||||
self.soup = soup
|
||||
self.namespace = namespace
|
||||
@@ -298,15 +164,13 @@ class Element(treebuilder_base.Node):
|
||||
child = node
|
||||
elif node.element.__class__ == NavigableString:
|
||||
string_child = child = node.element
|
||||
node.parent = self
|
||||
else:
|
||||
child = node.element
|
||||
node.parent = self
|
||||
|
||||
if not isinstance(child, str) and child.parent is not None:
|
||||
node.element.extract()
|
||||
|
||||
if (string_child is not None and self.element.contents
|
||||
if (string_child and self.element.contents
|
||||
and self.element.contents[-1].__class__ == NavigableString):
|
||||
# We are appending a string onto another string.
|
||||
# TODO This has O(n^2) performance, for input like
|
||||
@@ -339,12 +203,12 @@ class Element(treebuilder_base.Node):
|
||||
most_recent_element=most_recent_element)
|
||||
|
||||
def getAttributes(self):
|
||||
if isinstance(self.element, Comment):
|
||||
return {}
|
||||
return AttrList(self.element)
|
||||
|
||||
def setAttributes(self, attributes):
|
||||
|
||||
if attributes is not None and len(attributes) > 0:
|
||||
|
||||
converted_attributes = []
|
||||
for name, value in list(attributes.items()):
|
||||
if isinstance(name, tuple):
|
||||
@@ -366,11 +230,11 @@ class Element(treebuilder_base.Node):
|
||||
attributes = property(getAttributes, setAttributes)
|
||||
|
||||
def insertText(self, data, insertBefore=None):
|
||||
text = TextNode(self.soup.new_string(data), self.soup)
|
||||
if insertBefore:
|
||||
self.insertBefore(text, insertBefore)
|
||||
text = TextNode(self.soup.new_string(data), self.soup)
|
||||
self.insertBefore(data, insertBefore)
|
||||
else:
|
||||
self.appendChild(text)
|
||||
self.appendChild(data)
|
||||
|
||||
def insertBefore(self, node, refNode):
|
||||
index = self.element.index(refNode.element)
|
||||
@@ -389,10 +253,9 @@ class Element(treebuilder_base.Node):
|
||||
|
||||
def reparentChildren(self, new_parent):
|
||||
"""Move all of this tag's children into another tag."""
|
||||
# print("MOVE", self.element.contents)
|
||||
# print("FROM", self.element)
|
||||
# print("TO", new_parent.element)
|
||||
|
||||
# print "MOVE", self.element.contents
|
||||
# print "FROM", self.element
|
||||
# print "TO", new_parent.element
|
||||
element = self.element
|
||||
new_parent_element = new_parent.element
|
||||
# Determine what this tag's next_element will be once all the children
|
||||
@@ -411,35 +274,29 @@ class Element(treebuilder_base.Node):
|
||||
new_parents_last_descendant_next_element = new_parent_element.next_element
|
||||
|
||||
to_append = element.contents
|
||||
append_after = new_parent_element.contents
|
||||
if len(to_append) > 0:
|
||||
# Set the first child's previous_element and previous_sibling
|
||||
# to elements within the new parent
|
||||
first_child = to_append[0]
|
||||
if new_parents_last_descendant is not None:
|
||||
if new_parents_last_descendant:
|
||||
first_child.previous_element = new_parents_last_descendant
|
||||
else:
|
||||
first_child.previous_element = new_parent_element
|
||||
first_child.previous_sibling = new_parents_last_child
|
||||
if new_parents_last_descendant is not None:
|
||||
if new_parents_last_descendant:
|
||||
new_parents_last_descendant.next_element = first_child
|
||||
else:
|
||||
new_parent_element.next_element = first_child
|
||||
if new_parents_last_child is not None:
|
||||
if new_parents_last_child:
|
||||
new_parents_last_child.next_sibling = first_child
|
||||
|
||||
# Find the very last element being moved. It is now the
|
||||
# parent's last descendant. It has no .next_sibling and
|
||||
# its .next_element is whatever the previous last
|
||||
# descendant had.
|
||||
last_childs_last_descendant = to_append[-1]._last_descendant(False, True)
|
||||
|
||||
last_childs_last_descendant.next_element = new_parents_last_descendant_next_element
|
||||
if new_parents_last_descendant_next_element is not None:
|
||||
# TODO: This code has no test coverage and I'm not sure
|
||||
# how to get html5lib to go through this path, but it's
|
||||
# just the other side of the previous line.
|
||||
new_parents_last_descendant_next_element.previous_element = last_childs_last_descendant
|
||||
last_childs_last_descendant.next_sibling = None
|
||||
# Fix the last child's next_element and next_sibling
|
||||
last_child = to_append[-1]
|
||||
last_child.next_element = new_parents_last_descendant_next_element
|
||||
if new_parents_last_descendant_next_element:
|
||||
new_parents_last_descendant_next_element.previous_element = last_child
|
||||
last_child.next_sibling = None
|
||||
|
||||
for child in to_append:
|
||||
child.parent = new_parent_element
|
||||
@@ -449,9 +306,9 @@ class Element(treebuilder_base.Node):
|
||||
element.contents = []
|
||||
element.next_element = final_next_element
|
||||
|
||||
# print("DONE WITH MOVE")
|
||||
# print("FROM", self.element)
|
||||
# print("TO", new_parent_element)
|
||||
# print "DONE WITH MOVE"
|
||||
# print "FROM", self.element
|
||||
# print "TO", new_parent_element
|
||||
|
||||
def cloneNode(self):
|
||||
tag = self.soup.new_tag(self.element.name, self.namespace)
|
||||
@@ -464,7 +321,7 @@ class Element(treebuilder_base.Node):
|
||||
return self.element.contents
|
||||
|
||||
def getNameTuple(self):
|
||||
if self.namespace == None:
|
||||
if self.namespace is None:
|
||||
return namespaces["html"], self.name
|
||||
else:
|
||||
return self.namespace, self.name
|
||||
@@ -473,7 +330,7 @@ class Element(treebuilder_base.Node):
|
||||
|
||||
class TextNode(Element):
|
||||
def __init__(self, element, soup):
|
||||
treebuilder_base.Node.__init__(self, None)
|
||||
treebuildersbase.Node.__init__(self, None)
|
||||
self.element = element
|
||||
self.soup = soup
|
||||
|
||||
|
||||
@@ -1,18 +1,35 @@
|
||||
# encoding: utf-8
|
||||
"""Use the HTMLParser library to parse HTML files that aren't too bad."""
|
||||
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
__all__ = [
|
||||
'HTMLParserTreeBuilder',
|
||||
]
|
||||
|
||||
from html.parser import HTMLParser
|
||||
|
||||
try:
|
||||
from html.parser import HTMLParseError
|
||||
except ImportError as e:
|
||||
# HTMLParseError is removed in Python 3.5. Since it can never be
|
||||
# thrown in 3.5, we can just define our own class as a placeholder.
|
||||
class HTMLParseError(Exception):
|
||||
pass
|
||||
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
|
||||
# argument, which we'd like to set to False. Unfortunately,
|
||||
# http://bugs.python.org/issue13273 makes strict=True a better bet
|
||||
# before Python 3.2.3.
|
||||
#
|
||||
# At the end of this file, we monkeypatch HTMLParser so that
|
||||
# strict=True works well on Python 3.2.2.
|
||||
major, minor, release = sys.version_info[:3]
|
||||
CONSTRUCTOR_TAKES_STRICT = major == 3 and minor == 2 and release >= 3
|
||||
CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3
|
||||
CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4
|
||||
|
||||
|
||||
from bs4.element import (
|
||||
CData,
|
||||
Comment,
|
||||
@@ -23,8 +40,6 @@ from bs4.element import (
|
||||
from bs4.dammit import EntitySubstitution, UnicodeDammit
|
||||
|
||||
from bs4.builder import (
|
||||
DetectsXMLParsedAsHTML,
|
||||
ParserRejectedMarkup,
|
||||
HTML,
|
||||
HTMLTreeBuilder,
|
||||
STRICT,
|
||||
@@ -33,84 +48,8 @@ from bs4.builder import (
|
||||
|
||||
HTMLPARSER = 'html.parser'
|
||||
|
||||
class BeautifulSoupHTMLParser(HTMLParser, DetectsXMLParsedAsHTML):
|
||||
"""A subclass of the Python standard library's HTMLParser class, which
|
||||
listens for HTMLParser events and translates them into calls
|
||||
to Beautiful Soup's tree construction API.
|
||||
"""
|
||||
|
||||
# Strategies for handling duplicate attributes
|
||||
IGNORE = 'ignore'
|
||||
REPLACE = 'replace'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Constructor.
|
||||
|
||||
:param on_duplicate_attribute: A strategy for what to do if a
|
||||
tag includes the same attribute more than once. Accepted
|
||||
values are: REPLACE (replace earlier values with later
|
||||
ones, the default), IGNORE (keep the earliest value
|
||||
encountered), or a callable. A callable must take three
|
||||
arguments: the dictionary of attributes already processed,
|
||||
the name of the duplicate attribute, and the most recent value
|
||||
encountered.
|
||||
"""
|
||||
self.on_duplicate_attribute = kwargs.pop(
|
||||
'on_duplicate_attribute', self.REPLACE
|
||||
)
|
||||
HTMLParser.__init__(self, *args, **kwargs)
|
||||
|
||||
# Keep a list of empty-element tags that were encountered
|
||||
# without an explicit closing tag. If we encounter a closing tag
|
||||
# of this type, we'll associate it with one of those entries.
|
||||
#
|
||||
# This isn't a stack because we don't care about the
|
||||
# order. It's a list of closing tags we've already handled and
|
||||
# will ignore, assuming they ever show up.
|
||||
self.already_closed_empty_element = []
|
||||
|
||||
self._initialize_xml_detector()
|
||||
|
||||
def error(self, message):
|
||||
# NOTE: This method is required so long as Python 3.9 is
|
||||
# supported. The corresponding code is removed from HTMLParser
|
||||
# in 3.5, but not removed from ParserBase until 3.10.
|
||||
# https://github.com/python/cpython/issues/76025
|
||||
#
|
||||
# The original implementation turned the error into a warning,
|
||||
# but in every case I discovered, this made HTMLParser
|
||||
# immediately crash with an error message that was less
|
||||
# helpful than the warning. The new implementation makes it
|
||||
# more clear that html.parser just can't parse this
|
||||
# markup. The 3.10 implementation does the same, though it
|
||||
# raises AssertionError rather than calling a method. (We
|
||||
# catch this error and wrap it in a ParserRejectedMarkup.)
|
||||
raise ParserRejectedMarkup(message)
|
||||
|
||||
def handle_startendtag(self, name, attrs):
|
||||
"""Handle an incoming empty-element tag.
|
||||
|
||||
This is only called when the markup looks like <tag/>.
|
||||
|
||||
:param name: Name of the tag.
|
||||
:param attrs: Dictionary of the tag's attributes.
|
||||
"""
|
||||
# is_startend() tells handle_starttag not to close the tag
|
||||
# just because its name matches a known empty-element tag. We
|
||||
# know that this is an empty-element tag and we want to call
|
||||
# handle_endtag ourselves.
|
||||
tag = self.handle_starttag(name, attrs, handle_empty_element=False)
|
||||
self.handle_endtag(name)
|
||||
|
||||
def handle_starttag(self, name, attrs, handle_empty_element=True):
|
||||
"""Handle an opening tag, e.g. '<tag>'
|
||||
|
||||
:param name: Name of the tag.
|
||||
:param attrs: Dictionary of the tag's attributes.
|
||||
:param handle_empty_element: True if this tag is known to be
|
||||
an empty-element tag (i.e. there is not expected to be any
|
||||
closing tag).
|
||||
"""
|
||||
class BeautifulSoupHTMLParser(HTMLParser):
|
||||
def handle_starttag(self, name, attrs):
|
||||
# XXX namespace
|
||||
attr_dict = {}
|
||||
for key, value in attrs:
|
||||
@@ -118,78 +57,20 @@ class BeautifulSoupHTMLParser(HTMLParser, DetectsXMLParsedAsHTML):
|
||||
# for consistency with the other tree builders.
|
||||
if value is None:
|
||||
value = ''
|
||||
if key in attr_dict:
|
||||
# A single attribute shows up multiple times in this
|
||||
# tag. How to handle it depends on the
|
||||
# on_duplicate_attribute setting.
|
||||
on_dupe = self.on_duplicate_attribute
|
||||
if on_dupe == self.IGNORE:
|
||||
pass
|
||||
elif on_dupe in (None, self.REPLACE):
|
||||
attr_dict[key] = value
|
||||
else:
|
||||
on_dupe(attr_dict, key, value)
|
||||
else:
|
||||
attr_dict[key] = value
|
||||
attr_dict[key] = value
|
||||
attrvalue = '""'
|
||||
#print("START", name)
|
||||
sourceline, sourcepos = self.getpos()
|
||||
tag = self.soup.handle_starttag(
|
||||
name, None, None, attr_dict, sourceline=sourceline,
|
||||
sourcepos=sourcepos
|
||||
)
|
||||
if tag and tag.is_empty_element and handle_empty_element:
|
||||
# Unlike other parsers, html.parser doesn't send separate end tag
|
||||
# events for empty-element tags. (It's handled in
|
||||
# handle_startendtag, but only if the original markup looked like
|
||||
# <tag/>.)
|
||||
#
|
||||
# So we need to call handle_endtag() ourselves. Since we
|
||||
# know the start event is identical to the end event, we
|
||||
# don't want handle_endtag() to cross off any previous end
|
||||
# events for tags of this name.
|
||||
self.handle_endtag(name, check_already_closed=False)
|
||||
self.soup.handle_starttag(name, None, None, attr_dict)
|
||||
|
||||
# But we might encounter an explicit closing tag for this tag
|
||||
# later on. If so, we want to ignore it.
|
||||
self.already_closed_empty_element.append(name)
|
||||
def handle_endtag(self, name):
|
||||
self.soup.handle_endtag(name)
|
||||
|
||||
if self._root_tag is None:
|
||||
self._root_tag_encountered(name)
|
||||
|
||||
def handle_endtag(self, name, check_already_closed=True):
|
||||
"""Handle a closing tag, e.g. '</tag>'
|
||||
|
||||
:param name: A tag name.
|
||||
:param check_already_closed: True if this tag is expected to
|
||||
be the closing portion of an empty-element tag,
|
||||
e.g. '<tag></tag>'.
|
||||
"""
|
||||
#print("END", name)
|
||||
if check_already_closed and name in self.already_closed_empty_element:
|
||||
# This is a redundant end tag for an empty-element tag.
|
||||
# We've already called handle_endtag() for it, so just
|
||||
# check it off the list.
|
||||
#print("ALREADY CLOSED", name)
|
||||
self.already_closed_empty_element.remove(name)
|
||||
else:
|
||||
self.soup.handle_endtag(name)
|
||||
|
||||
def handle_data(self, data):
|
||||
"""Handle some textual data that shows up between tags."""
|
||||
self.soup.handle_data(data)
|
||||
|
||||
def handle_charref(self, name):
|
||||
"""Handle a numeric character reference by converting it to the
|
||||
corresponding Unicode character and treating it as textual
|
||||
data.
|
||||
|
||||
:param name: Character number, possibly in hexadecimal.
|
||||
"""
|
||||
# TODO: This was originally a workaround for a bug in
|
||||
# HTMLParser. (http://bugs.python.org/issue13633) The bug has
|
||||
# been fixed, but removing this code still makes some
|
||||
# Beautiful Soup tests fail. This needs investigation.
|
||||
# XXX workaround for a bug in HTMLParser. Remove this once
|
||||
# it's fixed in all supported versions.
|
||||
# http://bugs.python.org/issue13633
|
||||
if name.startswith('x'):
|
||||
real_name = int(name.lstrip('x'), 16)
|
||||
elif name.startswith('X'):
|
||||
@@ -197,71 +78,37 @@ class BeautifulSoupHTMLParser(HTMLParser, DetectsXMLParsedAsHTML):
|
||||
else:
|
||||
real_name = int(name)
|
||||
|
||||
data = None
|
||||
if real_name < 256:
|
||||
# HTML numeric entities are supposed to reference Unicode
|
||||
# code points, but sometimes they reference code points in
|
||||
# some other encoding (ahem, Windows-1252). E.g. “
|
||||
# instead of É for LEFT DOUBLE QUOTATION MARK. This
|
||||
# code tries to detect this situation and compensate.
|
||||
for encoding in (self.soup.original_encoding, 'windows-1252'):
|
||||
if not encoding:
|
||||
continue
|
||||
try:
|
||||
data = bytearray([real_name]).decode(encoding)
|
||||
except UnicodeDecodeError as e:
|
||||
pass
|
||||
if not data:
|
||||
try:
|
||||
data = chr(real_name)
|
||||
except (ValueError, OverflowError) as e:
|
||||
pass
|
||||
data = data or "\N{REPLACEMENT CHARACTER}"
|
||||
try:
|
||||
data = chr(real_name)
|
||||
except (ValueError, OverflowError) as e:
|
||||
data = "\N{REPLACEMENT CHARACTER}"
|
||||
|
||||
self.handle_data(data)
|
||||
|
||||
def handle_entityref(self, name):
|
||||
"""Handle a named entity reference by converting it to the
|
||||
corresponding Unicode character(s) and treating it as textual
|
||||
data.
|
||||
|
||||
:param name: Name of the entity reference.
|
||||
"""
|
||||
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
|
||||
if character is not None:
|
||||
data = character
|
||||
else:
|
||||
# If this were XML, it would be ambiguous whether "&foo"
|
||||
# was an character entity reference with a missing
|
||||
# semicolon or the literal string "&foo". Since this is
|
||||
# HTML, we have a complete list of all character entity references,
|
||||
# and this one wasn't found, so assume it's the literal string "&foo".
|
||||
data = "&%s" % name
|
||||
data = "&%s;" % name
|
||||
self.handle_data(data)
|
||||
|
||||
def handle_comment(self, data):
|
||||
"""Handle an HTML comment.
|
||||
|
||||
:param data: The text of the comment.
|
||||
"""
|
||||
self.soup.endData()
|
||||
self.soup.handle_data(data)
|
||||
self.soup.endData(Comment)
|
||||
|
||||
def handle_decl(self, data):
|
||||
"""Handle a DOCTYPE declaration.
|
||||
|
||||
:param data: The text of the declaration.
|
||||
"""
|
||||
self.soup.endData()
|
||||
data = data[len("DOCTYPE "):]
|
||||
if data.startswith("DOCTYPE "):
|
||||
data = data[len("DOCTYPE "):]
|
||||
elif data == 'DOCTYPE':
|
||||
# i.e. "<!DOCTYPE>"
|
||||
data = ''
|
||||
self.soup.handle_data(data)
|
||||
self.soup.endData(Doctype)
|
||||
|
||||
def unknown_decl(self, data):
|
||||
"""Handle a declaration of unknown type -- probably a CDATA block.
|
||||
|
||||
:param data: The text of the declaration.
|
||||
"""
|
||||
if data.upper().startswith('CDATA['):
|
||||
cls = CData
|
||||
data = data[len('CDATA['):]
|
||||
@@ -272,116 +119,144 @@ class BeautifulSoupHTMLParser(HTMLParser, DetectsXMLParsedAsHTML):
|
||||
self.soup.endData(cls)
|
||||
|
||||
def handle_pi(self, data):
|
||||
"""Handle a processing instruction.
|
||||
|
||||
:param data: The text of the instruction.
|
||||
"""
|
||||
self.soup.endData()
|
||||
self.soup.handle_data(data)
|
||||
self._document_might_be_xml(data)
|
||||
self.soup.endData(ProcessingInstruction)
|
||||
|
||||
|
||||
class HTMLParserTreeBuilder(HTMLTreeBuilder):
|
||||
"""A Beautiful soup `TreeBuilder` that uses the `HTMLParser` parser,
|
||||
found in the Python standard library.
|
||||
"""
|
||||
|
||||
is_xml = False
|
||||
picklable = True
|
||||
NAME = HTMLPARSER
|
||||
features = [NAME, HTML, STRICT]
|
||||
|
||||
# The html.parser knows which line number and position in the
|
||||
# original file is the source of an element.
|
||||
TRACKS_LINE_NUMBERS = True
|
||||
def __init__(self, *args, **kwargs):
|
||||
if CONSTRUCTOR_TAKES_STRICT and not CONSTRUCTOR_STRICT_IS_DEPRECATED:
|
||||
kwargs['strict'] = False
|
||||
if CONSTRUCTOR_TAKES_CONVERT_CHARREFS:
|
||||
kwargs['convert_charrefs'] = False
|
||||
self.parser_args = (args, kwargs)
|
||||
|
||||
def __init__(self, parser_args=None, parser_kwargs=None, **kwargs):
|
||||
"""Constructor.
|
||||
|
||||
:param parser_args: Positional arguments to pass into
|
||||
the BeautifulSoupHTMLParser constructor, once it's
|
||||
invoked.
|
||||
:param parser_kwargs: Keyword arguments to pass into
|
||||
the BeautifulSoupHTMLParser constructor, once it's
|
||||
invoked.
|
||||
:param kwargs: Keyword arguments for the superclass constructor.
|
||||
"""
|
||||
# Some keyword arguments will be pulled out of kwargs and placed
|
||||
# into parser_kwargs.
|
||||
extra_parser_kwargs = dict()
|
||||
for arg in ('on_duplicate_attribute',):
|
||||
if arg in kwargs:
|
||||
value = kwargs.pop(arg)
|
||||
extra_parser_kwargs[arg] = value
|
||||
super(HTMLParserTreeBuilder, self).__init__(**kwargs)
|
||||
parser_args = parser_args or []
|
||||
parser_kwargs = parser_kwargs or {}
|
||||
parser_kwargs.update(extra_parser_kwargs)
|
||||
parser_kwargs['convert_charrefs'] = False
|
||||
self.parser_args = (parser_args, parser_kwargs)
|
||||
|
||||
def prepare_markup(self, markup, user_specified_encoding=None,
|
||||
document_declared_encoding=None, exclude_encodings=None):
|
||||
|
||||
"""Run any preliminary steps necessary to make incoming markup
|
||||
acceptable to the parser.
|
||||
|
||||
:param markup: Some markup -- probably a bytestring.
|
||||
:param user_specified_encoding: The user asked to try this encoding.
|
||||
:param document_declared_encoding: The markup itself claims to be
|
||||
in this encoding.
|
||||
:param exclude_encodings: The user asked _not_ to try any of
|
||||
these encodings.
|
||||
|
||||
:yield: A series of 4-tuples:
|
||||
(markup, encoding, declared encoding,
|
||||
has undergone character replacement)
|
||||
|
||||
Each 4-tuple represents a strategy for converting the
|
||||
document to Unicode and parsing it. Each strategy will be tried
|
||||
in turn.
|
||||
"""
|
||||
:return: A 4-tuple (markup, original encoding, encoding
|
||||
declared within markup, whether any characters had to be
|
||||
replaced with REPLACEMENT CHARACTER).
|
||||
"""
|
||||
if isinstance(markup, str):
|
||||
# Parse Unicode as-is.
|
||||
yield (markup, None, None, False)
|
||||
return
|
||||
|
||||
# Ask UnicodeDammit to sniff the most likely encoding.
|
||||
|
||||
# This was provided by the end-user; treat it as a known
|
||||
# definite encoding per the algorithm laid out in the HTML5
|
||||
# spec. (See the EncodingDetector class for details.)
|
||||
known_definite_encodings = [user_specified_encoding]
|
||||
|
||||
# This was found in the document; treat it as a slightly lower-priority
|
||||
# user encoding.
|
||||
user_encodings = [document_declared_encoding]
|
||||
|
||||
try_encodings = [user_specified_encoding, document_declared_encoding]
|
||||
dammit = UnicodeDammit(
|
||||
markup,
|
||||
known_definite_encodings=known_definite_encodings,
|
||||
user_encodings=user_encodings,
|
||||
is_html=True,
|
||||
exclude_encodings=exclude_encodings
|
||||
)
|
||||
dammit = UnicodeDammit(markup, try_encodings, is_html=True,
|
||||
exclude_encodings=exclude_encodings)
|
||||
yield (dammit.markup, dammit.original_encoding,
|
||||
dammit.declared_html_encoding,
|
||||
dammit.contains_replacement_characters)
|
||||
|
||||
def feed(self, markup):
|
||||
"""Run some incoming markup through some parsing process,
|
||||
populating the `BeautifulSoup` object in self.soup.
|
||||
"""
|
||||
args, kwargs = self.parser_args
|
||||
parser = BeautifulSoupHTMLParser(*args, **kwargs)
|
||||
parser.soup = self.soup
|
||||
try:
|
||||
parser.feed(markup)
|
||||
parser.close()
|
||||
except AssertionError as e:
|
||||
# html.parser raises AssertionError in rare cases to
|
||||
# indicate a fatal problem with the markup, especially
|
||||
# when there's an error in the doctype declaration.
|
||||
raise ParserRejectedMarkup(e)
|
||||
parser.already_closed_empty_element = []
|
||||
except HTMLParseError as e:
|
||||
warnings.warn(RuntimeWarning(
|
||||
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
|
||||
raise e
|
||||
|
||||
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
|
||||
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
|
||||
# string.
|
||||
#
|
||||
# XXX This code can be removed once most Python 3 users are on 3.2.3.
|
||||
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
|
||||
import re
|
||||
attrfind_tolerant = re.compile(
|
||||
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
|
||||
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
|
||||
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
|
||||
|
||||
locatestarttagend = re.compile(r"""
|
||||
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
|
||||
(?:\s+ # whitespace before attribute name
|
||||
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
|
||||
(?:\s*=\s* # value indicator
|
||||
(?:'[^']*' # LITA-enclosed value
|
||||
|\"[^\"]*\" # LIT-enclosed value
|
||||
|[^'\">\s]+ # bare value
|
||||
)
|
||||
)?
|
||||
)
|
||||
)*
|
||||
\s* # trailing whitespace
|
||||
""", re.VERBOSE)
|
||||
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
|
||||
|
||||
from html.parser import tagfind, attrfind
|
||||
|
||||
def parse_starttag(self, i):
|
||||
self.__starttag_text = None
|
||||
endpos = self.check_for_whole_start_tag(i)
|
||||
if endpos < 0:
|
||||
return endpos
|
||||
rawdata = self.rawdata
|
||||
self.__starttag_text = rawdata[i:endpos]
|
||||
|
||||
# Now parse the data between i+1 and j into a tag and attrs
|
||||
attrs = []
|
||||
match = tagfind.match(rawdata, i+1)
|
||||
assert match, 'unexpected call to parse_starttag()'
|
||||
k = match.end()
|
||||
self.lasttag = tag = rawdata[i+1:k].lower()
|
||||
while k < endpos:
|
||||
if self.strict:
|
||||
m = attrfind.match(rawdata, k)
|
||||
else:
|
||||
m = attrfind_tolerant.match(rawdata, k)
|
||||
if not m:
|
||||
break
|
||||
attrname, rest, attrvalue = m.group(1, 2, 3)
|
||||
if not rest:
|
||||
attrvalue = None
|
||||
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
|
||||
attrvalue[:1] == '"' == attrvalue[-1:]:
|
||||
attrvalue = attrvalue[1:-1]
|
||||
if attrvalue:
|
||||
attrvalue = self.unescape(attrvalue)
|
||||
attrs.append((attrname.lower(), attrvalue))
|
||||
k = m.end()
|
||||
|
||||
end = rawdata[k:endpos].strip()
|
||||
if end not in (">", "/>"):
|
||||
lineno, offset = self.getpos()
|
||||
if "\n" in self.__starttag_text:
|
||||
lineno = lineno + self.__starttag_text.count("\n")
|
||||
offset = len(self.__starttag_text) \
|
||||
- self.__starttag_text.rfind("\n")
|
||||
else:
|
||||
offset = offset + len(self.__starttag_text)
|
||||
if self.strict:
|
||||
self.error("junk characters in start tag: %r"
|
||||
% (rawdata[k:endpos][:20],))
|
||||
self.handle_data(rawdata[i:endpos])
|
||||
return endpos
|
||||
if end.endswith('/>'):
|
||||
# XHTML-style empty tag: <span attr="value" />
|
||||
self.handle_startendtag(tag, attrs)
|
||||
else:
|
||||
self.handle_starttag(tag, attrs)
|
||||
if tag in self.CDATA_CONTENT_ELEMENTS:
|
||||
self.set_cdata_mode(tag)
|
||||
return endpos
|
||||
|
||||
def set_cdata_mode(self, elem):
|
||||
self.cdata_elem = elem.lower()
|
||||
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
|
||||
|
||||
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
|
||||
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
|
||||
|
||||
CONSTRUCTOR_TAKES_STRICT = True
|
||||
|
||||
@@ -1,28 +1,19 @@
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
__all__ = [
|
||||
'LXMLTreeBuilderForXML',
|
||||
'LXMLTreeBuilder',
|
||||
]
|
||||
|
||||
try:
|
||||
from collections.abc import Callable # Python 3.6
|
||||
except ImportError as e:
|
||||
from collections import Callable
|
||||
|
||||
from io import BytesIO
|
||||
from io import StringIO
|
||||
import collections
|
||||
from lxml import etree
|
||||
from bs4.element import (
|
||||
Comment,
|
||||
Doctype,
|
||||
NamespacedAttribute,
|
||||
ProcessingInstruction,
|
||||
XMLProcessingInstruction,
|
||||
)
|
||||
from bs4.builder import (
|
||||
DetectsXMLParsedAsHTML,
|
||||
FAST,
|
||||
HTML,
|
||||
HTMLTreeBuilder,
|
||||
@@ -34,15 +25,10 @@ from bs4.dammit import EncodingDetector
|
||||
|
||||
LXML = 'lxml'
|
||||
|
||||
def _invert(d):
|
||||
"Invert a dictionary."
|
||||
return dict((v,k) for k, v in list(d.items()))
|
||||
|
||||
class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
DEFAULT_PARSER_CLASS = etree.XMLParser
|
||||
|
||||
is_xml = True
|
||||
processing_instruction_class = XMLProcessingInstruction
|
||||
|
||||
NAME = "lxml-xml"
|
||||
ALTERNATE_NAMES = ["xml"]
|
||||
@@ -54,79 +40,26 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
|
||||
# This namespace mapping is specified in the XML Namespace
|
||||
# standard.
|
||||
DEFAULT_NSMAPS = dict(xml='http://www.w3.org/XML/1998/namespace')
|
||||
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
|
||||
|
||||
DEFAULT_NSMAPS_INVERTED = _invert(DEFAULT_NSMAPS)
|
||||
|
||||
# NOTE: If we parsed Element objects and looked at .sourceline,
|
||||
# we'd be able to see the line numbers from the original document.
|
||||
# But instead we build an XMLParser or HTMLParser object to serve
|
||||
# as the target of parse messages, and those messages don't include
|
||||
# line numbers.
|
||||
# See: https://bugs.launchpad.net/lxml/+bug/1846906
|
||||
|
||||
def initialize_soup(self, soup):
|
||||
"""Let the BeautifulSoup object know about the standard namespace
|
||||
mapping.
|
||||
|
||||
:param soup: A `BeautifulSoup`.
|
||||
"""
|
||||
super(LXMLTreeBuilderForXML, self).initialize_soup(soup)
|
||||
self._register_namespaces(self.DEFAULT_NSMAPS)
|
||||
|
||||
def _register_namespaces(self, mapping):
|
||||
"""Let the BeautifulSoup object know about namespaces encountered
|
||||
while parsing the document.
|
||||
|
||||
This might be useful later on when creating CSS selectors.
|
||||
|
||||
This will track (almost) all namespaces, even ones that were
|
||||
only in scope for part of the document. If two namespaces have
|
||||
the same prefix, only the first one encountered will be
|
||||
tracked. Un-prefixed namespaces are not tracked.
|
||||
|
||||
:param mapping: A dictionary mapping namespace prefixes to URIs.
|
||||
"""
|
||||
for key, value in list(mapping.items()):
|
||||
# This is 'if key' and not 'if key is not None' because we
|
||||
# don't track un-prefixed namespaces. Soupselect will
|
||||
# treat an un-prefixed namespace as the default, which
|
||||
# causes confusion in some cases.
|
||||
if key and key not in self.soup._namespaces:
|
||||
# Let the BeautifulSoup object know about a new namespace.
|
||||
# If there are multiple namespaces defined with the same
|
||||
# prefix, the first one in the document takes precedence.
|
||||
self.soup._namespaces[key] = value
|
||||
|
||||
def default_parser(self, encoding):
|
||||
"""Find the default parser for the given encoding.
|
||||
|
||||
:param encoding: A string.
|
||||
:return: Either a parser object or a class, which
|
||||
will be instantiated with default arguments.
|
||||
"""
|
||||
# This can either return a parser object or a class, which
|
||||
# will be instantiated with default arguments.
|
||||
if self._default_parser is not None:
|
||||
return self._default_parser
|
||||
return etree.XMLParser(
|
||||
target=self, strip_cdata=False, recover=True, encoding=encoding)
|
||||
|
||||
def parser_for(self, encoding):
|
||||
"""Instantiate an appropriate parser for the given encoding.
|
||||
|
||||
:param encoding: A string.
|
||||
:return: A parser object such as an `etree.XMLParser`.
|
||||
"""
|
||||
# Use the default parser.
|
||||
parser = self.default_parser(encoding)
|
||||
|
||||
if isinstance(parser, Callable):
|
||||
if isinstance(parser, collections.Callable):
|
||||
# Instantiate the parser with default arguments
|
||||
parser = parser(
|
||||
target=self, strip_cdata=False, recover=True, encoding=encoding
|
||||
)
|
||||
parser = parser(target=self, strip_cdata=False, encoding=encoding)
|
||||
return parser
|
||||
|
||||
def __init__(self, parser=None, empty_element_tags=None, **kwargs):
|
||||
def __init__(self, parser=None, empty_element_tags=None):
|
||||
# TODO: Issue a warning if parser is present but not a
|
||||
# callable, since that means there's no way to create new
|
||||
# parsers for different encodings.
|
||||
@@ -134,10 +67,8 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
if empty_element_tags is not None:
|
||||
self.empty_element_tags = set(empty_element_tags)
|
||||
self.soup = None
|
||||
self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED]
|
||||
self.active_namespace_prefixes = [dict(self.DEFAULT_NSMAPS)]
|
||||
super(LXMLTreeBuilderForXML, self).__init__(**kwargs)
|
||||
|
||||
self.nsmaps = [self.DEFAULT_NSMAPS]
|
||||
|
||||
def _getNsTag(self, tag):
|
||||
# Split the namespace URL out of a fully-qualified lxml tag
|
||||
# name. Copied from lxml's src/lxml/sax.py.
|
||||
@@ -149,51 +80,16 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
def prepare_markup(self, markup, user_specified_encoding=None,
|
||||
exclude_encodings=None,
|
||||
document_declared_encoding=None):
|
||||
"""Run any preliminary steps necessary to make incoming markup
|
||||
acceptable to the parser.
|
||||
|
||||
lxml really wants to get a bytestring and convert it to
|
||||
Unicode itself. So instead of using UnicodeDammit to convert
|
||||
the bytestring to Unicode using different encodings, this
|
||||
implementation uses EncodingDetector to iterate over the
|
||||
encodings, and tell lxml to try to parse the document as each
|
||||
one in turn.
|
||||
|
||||
:param markup: Some markup -- hopefully a bytestring.
|
||||
:param user_specified_encoding: The user asked to try this encoding.
|
||||
:param document_declared_encoding: The markup itself claims to be
|
||||
in this encoding.
|
||||
:param exclude_encodings: The user asked _not_ to try any of
|
||||
these encodings.
|
||||
|
||||
:yield: A series of 4-tuples:
|
||||
"""
|
||||
:yield: A series of 4-tuples.
|
||||
(markup, encoding, declared encoding,
|
||||
has undergone character replacement)
|
||||
|
||||
Each 4-tuple represents a strategy for converting the
|
||||
document to Unicode and parsing it. Each strategy will be tried
|
||||
in turn.
|
||||
Each 4-tuple represents a strategy for parsing the document.
|
||||
"""
|
||||
is_html = not self.is_xml
|
||||
if is_html:
|
||||
self.processing_instruction_class = ProcessingInstruction
|
||||
# We're in HTML mode, so if we're given XML, that's worth
|
||||
# noting.
|
||||
DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(
|
||||
markup, stacklevel=3
|
||||
)
|
||||
else:
|
||||
self.processing_instruction_class = XMLProcessingInstruction
|
||||
|
||||
if isinstance(markup, str):
|
||||
# We were given Unicode. Maybe lxml can parse Unicode on
|
||||
# this system?
|
||||
|
||||
# TODO: This is a workaround for
|
||||
# https://bugs.launchpad.net/lxml/+bug/1948551.
|
||||
# We can remove it once the upstream issue is fixed.
|
||||
if len(markup) > 0 and markup[0] == u'\N{BYTE ORDER MARK}':
|
||||
markup = markup[1:]
|
||||
yield markup, None, document_declared_encoding, False
|
||||
|
||||
if isinstance(markup, str):
|
||||
@@ -202,19 +98,14 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
yield (markup.encode("utf8"), "utf8",
|
||||
document_declared_encoding, False)
|
||||
|
||||
# This was provided by the end-user; treat it as a known
|
||||
# definite encoding per the algorithm laid out in the HTML5
|
||||
# spec. (See the EncodingDetector class for details.)
|
||||
known_definite_encodings = [user_specified_encoding]
|
||||
|
||||
# This was found in the document; treat it as a slightly lower-priority
|
||||
# user encoding.
|
||||
user_encodings = [document_declared_encoding]
|
||||
# Instead of using UnicodeDammit to convert the bytestring to
|
||||
# Unicode using different encodings, use EncodingDetector to
|
||||
# iterate over the encodings, and tell lxml to try to parse
|
||||
# the document as each one in turn.
|
||||
is_html = not self.is_xml
|
||||
try_encodings = [user_specified_encoding, document_declared_encoding]
|
||||
detector = EncodingDetector(
|
||||
markup, known_definite_encodings=known_definite_encodings,
|
||||
user_encodings=user_encodings, is_html=is_html,
|
||||
exclude_encodings=exclude_encodings
|
||||
)
|
||||
markup, try_encodings, is_html, exclude_encodings)
|
||||
for encoding in detector.encodings:
|
||||
yield (detector.markup, encoding, document_declared_encoding, False)
|
||||
|
||||
@@ -237,45 +128,25 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
self.parser.feed(data)
|
||||
self.parser.close()
|
||||
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
|
||||
raise ParserRejectedMarkup(e)
|
||||
raise ParserRejectedMarkup(str(e))
|
||||
|
||||
def close(self):
|
||||
self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED]
|
||||
self.nsmaps = [self.DEFAULT_NSMAPS]
|
||||
|
||||
def start(self, name, attrs, nsmap={}):
|
||||
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
|
||||
attrs = dict(attrs)
|
||||
nsprefix = None
|
||||
# Invert each namespace map as it comes in.
|
||||
if len(nsmap) == 0 and len(self.nsmaps) > 1:
|
||||
# There are no new namespaces for this tag, but
|
||||
# non-default namespaces are in play, so we need a
|
||||
# separate tag stack to know when they end.
|
||||
self.nsmaps.append(None)
|
||||
if len(self.nsmaps) > 1:
|
||||
# There are no new namespaces for this tag, but
|
||||
# non-default namespaces are in play, so we need a
|
||||
# separate tag stack to know when they end.
|
||||
self.nsmaps.append(None)
|
||||
elif len(nsmap) > 0:
|
||||
# A new namespace mapping has come into play.
|
||||
|
||||
# First, Let the BeautifulSoup object know about it.
|
||||
self._register_namespaces(nsmap)
|
||||
|
||||
# Then, add it to our running list of inverted namespace
|
||||
# mappings.
|
||||
self.nsmaps.append(_invert(nsmap))
|
||||
|
||||
# The currently active namespace prefixes have
|
||||
# changed. Calculate the new mapping so it can be stored
|
||||
# with all Tag objects created while these prefixes are in
|
||||
# scope.
|
||||
current_mapping = dict(self.active_namespace_prefixes[-1])
|
||||
current_mapping.update(nsmap)
|
||||
|
||||
# We should not track un-prefixed namespaces as we can only hold one
|
||||
# and it will be recognized as the default namespace by soupsieve,
|
||||
# which may be confusing in some situations.
|
||||
if '' in current_mapping:
|
||||
del current_mapping['']
|
||||
self.active_namespace_prefixes.append(current_mapping)
|
||||
|
||||
inverted_nsmap = dict((value, key) for key, value in list(nsmap.items()))
|
||||
self.nsmaps.append(inverted_nsmap)
|
||||
# Also treat the namespace mapping as a set of attributes on the
|
||||
# tag, so we can recreate it later.
|
||||
attrs = attrs.copy()
|
||||
@@ -300,11 +171,8 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
|
||||
namespace, name = self._getNsTag(name)
|
||||
nsprefix = self._prefix_for_namespace(namespace)
|
||||
self.soup.handle_starttag(
|
||||
name, namespace, nsprefix, attrs,
|
||||
namespaces=self.active_namespace_prefixes[-1]
|
||||
)
|
||||
|
||||
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
|
||||
|
||||
def _prefix_for_namespace(self, namespace):
|
||||
"""Find the currently active prefix for the given namespace."""
|
||||
if namespace is None:
|
||||
@@ -328,20 +196,13 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
if len(self.nsmaps) > 1:
|
||||
# This tag, or one of its parents, introduced a namespace
|
||||
# mapping, so pop it off the stack.
|
||||
out_of_scope_nsmap = self.nsmaps.pop()
|
||||
self.nsmaps.pop()
|
||||
|
||||
if out_of_scope_nsmap is not None:
|
||||
# This tag introduced a namespace mapping which is no
|
||||
# longer in scope. Recalculate the currently active
|
||||
# namespace prefixes.
|
||||
self.active_namespace_prefixes.pop()
|
||||
|
||||
def pi(self, target, data):
|
||||
self.soup.endData()
|
||||
data = target + ' ' + data
|
||||
self.soup.handle_data(data)
|
||||
self.soup.endData(self.processing_instruction_class)
|
||||
|
||||
self.soup.handle_data(target + ' ' + data)
|
||||
self.soup.endData(ProcessingInstruction)
|
||||
|
||||
def data(self, content):
|
||||
self.soup.handle_data(content)
|
||||
|
||||
@@ -368,7 +229,6 @@ class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
|
||||
|
||||
features = ALTERNATE_NAMES + [NAME, HTML, FAST, PERMISSIVE]
|
||||
is_xml = False
|
||||
processing_instruction_class = ProcessingInstruction
|
||||
|
||||
def default_parser(self, encoding):
|
||||
return etree.HTMLParser
|
||||
@@ -380,7 +240,7 @@ class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
|
||||
self.parser.feed(markup)
|
||||
self.parser.close()
|
||||
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
|
||||
raise ParserRejectedMarkup(e)
|
||||
raise ParserRejectedMarkup(str(e))
|
||||
|
||||
|
||||
def test_fragment_to_document(self, fragment):
|
||||
|
||||
@@ -1,274 +0,0 @@
|
||||
"""Integration code for CSS selectors using Soup Sieve (pypi: soupsieve)."""
|
||||
|
||||
# We don't use soupsieve
|
||||
soupsieve = None
|
||||
|
||||
|
||||
class CSS(object):
|
||||
"""A proxy object against the soupsieve library, to simplify its
|
||||
CSS selector API.
|
||||
|
||||
Acquire this object through the .css attribute on the
|
||||
BeautifulSoup object, or on the Tag you want to use as the
|
||||
starting point for a CSS selector.
|
||||
|
||||
The main advantage of doing this is that the tag to be selected
|
||||
against doesn't need to be explicitly specified in the function
|
||||
calls, since it's already scoped to a tag.
|
||||
"""
|
||||
|
||||
def __init__(self, tag, api=soupsieve):
|
||||
"""Constructor.
|
||||
|
||||
You don't need to instantiate this class yourself; instead,
|
||||
access the .css attribute on the BeautifulSoup object, or on
|
||||
the Tag you want to use as the starting point for your CSS
|
||||
selector.
|
||||
|
||||
:param tag: All CSS selectors will use this as their starting
|
||||
point.
|
||||
|
||||
:param api: A plug-in replacement for the soupsieve module,
|
||||
designed mainly for use in tests.
|
||||
"""
|
||||
if api is None:
|
||||
raise NotImplementedError(
|
||||
"Cannot execute CSS selectors because the soupsieve package is not installed."
|
||||
)
|
||||
self.api = api
|
||||
self.tag = tag
|
||||
|
||||
def escape(self, ident):
|
||||
"""Escape a CSS identifier.
|
||||
|
||||
This is a simple wrapper around soupselect.escape(). See the
|
||||
documentation for that function for more information.
|
||||
"""
|
||||
if soupsieve is None:
|
||||
raise NotImplementedError(
|
||||
"Cannot escape CSS identifiers because the soupsieve package is not installed."
|
||||
)
|
||||
return self.api.escape(ident)
|
||||
|
||||
def _ns(self, ns, select):
|
||||
"""Normalize a dictionary of namespaces."""
|
||||
if not isinstance(select, self.api.SoupSieve) and ns is None:
|
||||
# If the selector is a precompiled pattern, it already has
|
||||
# a namespace context compiled in, which cannot be
|
||||
# replaced.
|
||||
ns = self.tag._namespaces
|
||||
return ns
|
||||
|
||||
def _rs(self, results):
|
||||
"""Normalize a list of results to a Resultset.
|
||||
|
||||
A ResultSet is more consistent with the rest of Beautiful
|
||||
Soup's API, and ResultSet.__getattr__ has a helpful error
|
||||
message if you try to treat a list of results as a single
|
||||
result (a common mistake).
|
||||
"""
|
||||
# Import here to avoid circular import
|
||||
from bs4.element import ResultSet
|
||||
return ResultSet(None, results)
|
||||
|
||||
def compile(self, select, namespaces=None, flags=0, **kwargs):
|
||||
"""Pre-compile a selector and return the compiled object.
|
||||
|
||||
:param selector: A CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will use the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
soupsieve.compile() method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into SoupSieve's
|
||||
soupsieve.compile() method.
|
||||
|
||||
:return: A precompiled selector object.
|
||||
:rtype: soupsieve.SoupSieve
|
||||
"""
|
||||
return self.api.compile(
|
||||
select, self._ns(namespaces, select), flags, **kwargs
|
||||
)
|
||||
|
||||
def select_one(self, select, namespaces=None, flags=0, **kwargs):
|
||||
"""Perform a CSS selection operation on the current Tag and return the
|
||||
first result.
|
||||
|
||||
This uses the Soup Sieve library. For more information, see
|
||||
that library's documentation for the soupsieve.select_one()
|
||||
method.
|
||||
|
||||
:param selector: A CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will use the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
soupsieve.select_one() method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into SoupSieve's
|
||||
soupsieve.select_one() method.
|
||||
|
||||
:return: A Tag, or None if the selector has no match.
|
||||
:rtype: bs4.element.Tag
|
||||
|
||||
"""
|
||||
return self.api.select_one(
|
||||
select, self.tag, self._ns(namespaces, select), flags, **kwargs
|
||||
)
|
||||
|
||||
def select(self, select, namespaces=None, limit=0, flags=0, **kwargs):
|
||||
"""Perform a CSS selection operation on the current Tag.
|
||||
|
||||
This uses the Soup Sieve library. For more information, see
|
||||
that library's documentation for the soupsieve.select()
|
||||
method.
|
||||
|
||||
:param selector: A string containing a CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will pass in the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param limit: After finding this number of results, stop looking.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
soupsieve.select() method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into SoupSieve's
|
||||
soupsieve.select() method.
|
||||
|
||||
:return: A ResultSet of Tag objects.
|
||||
:rtype: bs4.element.ResultSet
|
||||
|
||||
"""
|
||||
if limit is None:
|
||||
limit = 0
|
||||
|
||||
return self._rs(
|
||||
self.api.select(
|
||||
select, self.tag, self._ns(namespaces, select), limit, flags,
|
||||
**kwargs
|
||||
)
|
||||
)
|
||||
|
||||
def iselect(self, select, namespaces=None, limit=0, flags=0, **kwargs):
|
||||
"""Perform a CSS selection operation on the current Tag.
|
||||
|
||||
This uses the Soup Sieve library. For more information, see
|
||||
that library's documentation for the soupsieve.iselect()
|
||||
method. It is the same as select(), but it returns a generator
|
||||
instead of a list.
|
||||
|
||||
:param selector: A string containing a CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will pass in the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param limit: After finding this number of results, stop looking.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
soupsieve.iselect() method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into SoupSieve's
|
||||
soupsieve.iselect() method.
|
||||
|
||||
:return: A generator
|
||||
:rtype: types.GeneratorType
|
||||
"""
|
||||
return self.api.iselect(
|
||||
select, self.tag, self._ns(namespaces, select), limit, flags, **kwargs
|
||||
)
|
||||
|
||||
def closest(self, select, namespaces=None, flags=0, **kwargs):
|
||||
"""Find the Tag closest to this one that matches the given selector.
|
||||
|
||||
This uses the Soup Sieve library. For more information, see
|
||||
that library's documentation for the soupsieve.closest()
|
||||
method.
|
||||
|
||||
:param selector: A string containing a CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will pass in the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
soupsieve.closest() method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into SoupSieve's
|
||||
soupsieve.closest() method.
|
||||
|
||||
:return: A Tag, or None if there is no match.
|
||||
:rtype: bs4.Tag
|
||||
|
||||
"""
|
||||
return self.api.closest(
|
||||
select, self.tag, self._ns(namespaces, select), flags, **kwargs
|
||||
)
|
||||
|
||||
def match(self, select, namespaces=None, flags=0, **kwargs):
|
||||
"""Check whether this Tag matches the given CSS selector.
|
||||
|
||||
This uses the Soup Sieve library. For more information, see
|
||||
that library's documentation for the soupsieve.match()
|
||||
method.
|
||||
|
||||
:param: a CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will pass in the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
soupsieve.match() method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into SoupSieve's
|
||||
soupsieve.match() method.
|
||||
|
||||
:return: True if this Tag matches the selector; False otherwise.
|
||||
:rtype: bool
|
||||
"""
|
||||
return self.api.match(
|
||||
select, self.tag, self._ns(namespaces, select), flags, **kwargs
|
||||
)
|
||||
|
||||
def filter(self, select, namespaces=None, flags=0, **kwargs):
|
||||
"""Filter this Tag's direct children based on the given CSS selector.
|
||||
|
||||
This uses the Soup Sieve library. It works the same way as
|
||||
passing this Tag into that library's soupsieve.filter()
|
||||
method. More information, for more information see the
|
||||
documentation for soupsieve.filter().
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will pass in the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
soupsieve.filter() method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into SoupSieve's
|
||||
soupsieve.filter() method.
|
||||
|
||||
:return: A ResultSet of Tag objects.
|
||||
:rtype: bs4.element.ResultSet
|
||||
|
||||
"""
|
||||
return self._rs(
|
||||
self.api.filter(
|
||||
select, self.tag, self._ns(namespaces, select), flags, **kwargs
|
||||
)
|
||||
)
|
||||
@@ -6,185 +6,61 @@ necessary. It is heavily based on code from Mark Pilgrim's Universal
|
||||
Feed Parser. It works best on XML and HTML, but it does not rewrite the
|
||||
XML or HTML to reflect a new encoding; that's the tree builder's job.
|
||||
"""
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
from html.entities import codepoint2name
|
||||
from collections import defaultdict
|
||||
import codecs
|
||||
from html.entities import codepoint2name
|
||||
import re
|
||||
import logging
|
||||
import string
|
||||
|
||||
# Import a library to autodetect character encodings. We'll support
|
||||
# any of a number of libraries that all support the same API:
|
||||
#
|
||||
# * cchardet
|
||||
# * chardet
|
||||
# * charset-normalizer
|
||||
chardet_module = None
|
||||
# Import a library to autodetect character encodings.
|
||||
chardet_type = None
|
||||
try:
|
||||
# First try the fast C implementation.
|
||||
# PyPI package: cchardet
|
||||
import cchardet as chardet_module
|
||||
import cchardet
|
||||
def chardet_dammit(s):
|
||||
return cchardet.detect(s)['encoding']
|
||||
except ImportError:
|
||||
try:
|
||||
# Fall back to the pure Python implementation
|
||||
# Debian package: python-chardet
|
||||
# PyPI package: chardet
|
||||
import chardet as chardet_module
|
||||
import chardet
|
||||
def chardet_dammit(s):
|
||||
return chardet.detect(s)['encoding']
|
||||
#import chardet.constants
|
||||
#chardet.constants._debug = 1
|
||||
except ImportError:
|
||||
try:
|
||||
# PyPI package: charset-normalizer
|
||||
import charset_normalizer as chardet_module
|
||||
except ImportError:
|
||||
# No chardet available.
|
||||
chardet_module = None
|
||||
|
||||
if chardet_module:
|
||||
def chardet_dammit(s):
|
||||
if isinstance(s, str):
|
||||
# No chardet available.
|
||||
def chardet_dammit(s):
|
||||
return None
|
||||
return chardet_module.detect(s)['encoding']
|
||||
else:
|
||||
def chardet_dammit(s):
|
||||
return None
|
||||
|
||||
# Build bytestring and Unicode versions of regular expressions for finding
|
||||
# a declared encoding inside an XML or HTML document.
|
||||
xml_encoding = '^\\s*<\\?.*encoding=[\'"](.*?)[\'"].*\\?>'
|
||||
html_meta = '<\\s*meta[^>]+charset\\s*=\\s*["\']?([^>]*?)[ /;\'">]'
|
||||
encoding_res = dict()
|
||||
encoding_res[bytes] = {
|
||||
'html' : re.compile(html_meta.encode("ascii"), re.I),
|
||||
'xml' : re.compile(xml_encoding.encode("ascii"), re.I),
|
||||
}
|
||||
encoding_res[str] = {
|
||||
'html' : re.compile(html_meta, re.I),
|
||||
'xml' : re.compile(xml_encoding, re.I)
|
||||
}
|
||||
|
||||
from html.entities import html5
|
||||
xml_encoding_re = re.compile(
|
||||
r'^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode(), re.I)
|
||||
html_meta_re = re.compile(
|
||||
r'<\s*meta[^>]+charset\s*=\s*["\']?([^>]*?)[ /;\'">]'.encode(), re.I)
|
||||
|
||||
class EntitySubstitution(object):
|
||||
"""The ability to substitute XML or HTML entities for certain characters."""
|
||||
|
||||
"""Substitute XML or HTML entities for the corresponding characters."""
|
||||
|
||||
def _populate_class_variables():
|
||||
"""Initialize variables used by this class to manage the plethora of
|
||||
HTML5 named entities.
|
||||
|
||||
This function returns a 3-tuple containing two dictionaries
|
||||
and a regular expression:
|
||||
|
||||
unicode_to_name - A mapping of Unicode strings like "⦨" to
|
||||
entity names like "angmsdaa". When a single Unicode string has
|
||||
multiple entity names, we try to choose the most commonly-used
|
||||
name.
|
||||
|
||||
name_to_unicode: A mapping of entity names like "angmsdaa" to
|
||||
Unicode strings like "⦨".
|
||||
|
||||
named_entity_re: A regular expression matching (almost) any
|
||||
Unicode string that corresponds to an HTML5 named entity.
|
||||
"""
|
||||
unicode_to_name = {}
|
||||
name_to_unicode = {}
|
||||
|
||||
short_entities = set()
|
||||
long_entities_by_first_character = defaultdict(set)
|
||||
|
||||
for name_with_semicolon, character in sorted(html5.items()):
|
||||
# "It is intentional, for legacy compatibility, that many
|
||||
# code points have multiple character reference names. For
|
||||
# example, some appear both with and without the trailing
|
||||
# semicolon, or with different capitalizations."
|
||||
# - https://html.spec.whatwg.org/multipage/named-characters.html#named-character-references
|
||||
#
|
||||
# The parsers are in charge of handling (or not) character
|
||||
# references with no trailing semicolon, so we remove the
|
||||
# semicolon whenever it appears.
|
||||
if name_with_semicolon.endswith(';'):
|
||||
name = name_with_semicolon[:-1]
|
||||
else:
|
||||
name = name_with_semicolon
|
||||
|
||||
# When parsing HTML, we want to recognize any known named
|
||||
# entity and convert it to a sequence of Unicode
|
||||
# characters.
|
||||
if name not in name_to_unicode:
|
||||
name_to_unicode[name] = character
|
||||
|
||||
# When _generating_ HTML, we want to recognize special
|
||||
# character sequences that _could_ be converted to named
|
||||
# entities.
|
||||
unicode_to_name[character] = name
|
||||
|
||||
# We also need to build a regular expression that lets us
|
||||
# _find_ those characters in output strings so we can
|
||||
# replace them.
|
||||
#
|
||||
# This is tricky, for two reasons.
|
||||
|
||||
if (len(character) == 1 and ord(character) < 128
|
||||
and character not in '<>&'):
|
||||
# First, it would be annoying to turn single ASCII
|
||||
# characters like | into named entities like
|
||||
# |. The exceptions are <>&, which we _must_
|
||||
# turn into named entities to produce valid HTML.
|
||||
continue
|
||||
|
||||
if len(character) > 1 and all(ord(x) < 128 for x in character):
|
||||
# We also do not want to turn _combinations_ of ASCII
|
||||
# characters like 'fj' into named entities like 'fj',
|
||||
# though that's more debateable.
|
||||
continue
|
||||
|
||||
# Second, some named entities have a Unicode value that's
|
||||
# a subset of the Unicode value for some _other_ named
|
||||
# entity. As an example, \u2267' is ≧,
|
||||
# but '\u2267\u0338' is ≧̸. Our regular
|
||||
# expression needs to match the first two characters of
|
||||
# "\u2267\u0338foo", but only the first character of
|
||||
# "\u2267foo".
|
||||
#
|
||||
# In this step, we build two sets of characters that
|
||||
# _eventually_ need to go into the regular expression. But
|
||||
# we won't know exactly what the regular expression needs
|
||||
# to look like until we've gone through the entire list of
|
||||
# named entities.
|
||||
if len(character) == 1:
|
||||
short_entities.add(character)
|
||||
else:
|
||||
long_entities_by_first_character[character[0]].add(character)
|
||||
|
||||
# Now that we've been through the entire list of entities, we
|
||||
# can create a regular expression that matches any of them.
|
||||
particles = set()
|
||||
for short in short_entities:
|
||||
long_versions = long_entities_by_first_character[short]
|
||||
if not long_versions:
|
||||
particles.add(short)
|
||||
else:
|
||||
ignore = "".join([x[1] for x in long_versions])
|
||||
# This finds, e.g. \u2267 but only if it is _not_
|
||||
# followed by \u0338.
|
||||
particles.add("%s(?![%s])" % (short, ignore))
|
||||
|
||||
for long_entities in list(long_entities_by_first_character.values()):
|
||||
for long_entity in long_entities:
|
||||
particles.add(long_entity)
|
||||
|
||||
re_definition = "(%s)" % "|".join(particles)
|
||||
|
||||
# If an entity shows up in both html5 and codepoint2name, it's
|
||||
# likely that HTML5 gives it several different names, such as
|
||||
# 'rsquo' and 'rsquor'. When converting Unicode characters to
|
||||
# named entities, the codepoint2name name should take
|
||||
# precedence where possible, since that's the more easily
|
||||
# recognizable one.
|
||||
lookup = {}
|
||||
reverse_lookup = {}
|
||||
characters_for_re = []
|
||||
for codepoint, name in list(codepoint2name.items()):
|
||||
character = chr(codepoint)
|
||||
unicode_to_name[character] = name
|
||||
|
||||
return unicode_to_name, name_to_unicode, re.compile(re_definition)
|
||||
if codepoint != 34:
|
||||
# There's no point in turning the quotation mark into
|
||||
# ", unless it happens within an attribute value, which
|
||||
# is handled elsewhere.
|
||||
characters_for_re.append(character)
|
||||
lookup[character] = name
|
||||
# But we do want to turn " into the quotation mark.
|
||||
reverse_lookup[name] = character
|
||||
re_definition = "[%s]" % "".join(characters_for_re)
|
||||
return lookup, reverse_lookup, re.compile(re_definition)
|
||||
(CHARACTER_TO_HTML_ENTITY, HTML_ENTITY_TO_CHARACTER,
|
||||
CHARACTER_TO_HTML_ENTITY_RE) = _populate_class_variables()
|
||||
|
||||
@@ -196,23 +72,21 @@ class EntitySubstitution(object):
|
||||
">": "gt",
|
||||
}
|
||||
|
||||
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
|
||||
"&(?!#\\d+;|#x[0-9a-fA-F]+;|\\w+;)"
|
||||
")")
|
||||
BARE_AMPERSAND_OR_BRACKET = re.compile(r"([<>]|"
|
||||
r"&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
|
||||
r")")
|
||||
|
||||
AMPERSAND_OR_BRACKET = re.compile("([<>&])")
|
||||
AMPERSAND_OR_BRACKET = re.compile(r"([<>&])")
|
||||
|
||||
@classmethod
|
||||
def _substitute_html_entity(cls, matchobj):
|
||||
"""Used with a regular expression to substitute the
|
||||
appropriate HTML entity for a special character string."""
|
||||
entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0))
|
||||
return "&%s;" % entity
|
||||
|
||||
@classmethod
|
||||
def _substitute_xml_entity(cls, matchobj):
|
||||
"""Used with a regular expression to substitute the
|
||||
appropriate XML entity for a special character string."""
|
||||
appropriate XML entity for an XML special character."""
|
||||
entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)]
|
||||
return "&%s;" % entity
|
||||
|
||||
@@ -307,8 +181,6 @@ class EntitySubstitution(object):
|
||||
containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that
|
||||
character with "é" will make it more readable to some
|
||||
people.
|
||||
|
||||
:param s: A Unicode string.
|
||||
"""
|
||||
return cls.CHARACTER_TO_HTML_ENTITY_RE.sub(
|
||||
cls._substitute_html_entity, s)
|
||||
@@ -320,65 +192,23 @@ class EncodingDetector:
|
||||
Order of precedence:
|
||||
|
||||
1. Encodings you specifically tell EncodingDetector to try first
|
||||
(the known_definite_encodings argument to the constructor).
|
||||
(the override_encodings argument to the constructor).
|
||||
|
||||
2. An encoding determined by sniffing the document's byte-order mark.
|
||||
|
||||
3. Encodings you specifically tell EncodingDetector to try if
|
||||
byte-order mark sniffing fails (the user_encodings argument to the
|
||||
constructor).
|
||||
|
||||
4. An encoding declared within the bytestring itself, either in an
|
||||
2. An encoding declared within the bytestring itself, either in an
|
||||
XML declaration (if the bytestring is to be interpreted as an XML
|
||||
document), or in a <meta> tag (if the bytestring is to be
|
||||
interpreted as an HTML document.)
|
||||
|
||||
5. An encoding detected through textual analysis by chardet,
|
||||
3. An encoding detected through textual analysis by chardet,
|
||||
cchardet, or a similar external library.
|
||||
|
||||
4. UTF-8.
|
||||
|
||||
5. Windows-1252.
|
||||
|
||||
"""
|
||||
def __init__(self, markup, known_definite_encodings=None,
|
||||
is_html=False, exclude_encodings=None,
|
||||
user_encodings=None, override_encodings=None):
|
||||
"""Constructor.
|
||||
|
||||
:param markup: Some markup in an unknown encoding.
|
||||
|
||||
:param known_definite_encodings: When determining the encoding
|
||||
of `markup`, these encodings will be tried first, in
|
||||
order. In HTML terms, this corresponds to the "known
|
||||
definite encoding" step defined here:
|
||||
https://html.spec.whatwg.org/multipage/parsing.html#parsing-with-a-known-character-encoding
|
||||
|
||||
:param user_encodings: These encodings will be tried after the
|
||||
`known_definite_encodings` have been tried and failed, and
|
||||
after an attempt to sniff the encoding by looking at a
|
||||
byte order mark has failed. In HTML terms, this
|
||||
corresponds to the step "user has explicitly instructed
|
||||
the user agent to override the document's character
|
||||
encoding", defined here:
|
||||
https://html.spec.whatwg.org/multipage/parsing.html#determining-the-character-encoding
|
||||
|
||||
:param override_encodings: A deprecated alias for
|
||||
known_definite_encodings. Any encodings here will be tried
|
||||
immediately after the encodings in
|
||||
known_definite_encodings.
|
||||
|
||||
:param is_html: If True, this markup is considered to be
|
||||
HTML. Otherwise it's assumed to be XML.
|
||||
|
||||
:param exclude_encodings: These encodings will not be tried,
|
||||
even if they otherwise would be.
|
||||
|
||||
"""
|
||||
self.known_definite_encodings = list(known_definite_encodings or [])
|
||||
if override_encodings:
|
||||
self.known_definite_encodings += override_encodings
|
||||
self.user_encodings = user_encodings or []
|
||||
def __init__(self, markup, override_encodings=None, is_html=False,
|
||||
exclude_encodings=None):
|
||||
self.override_encodings = override_encodings or []
|
||||
exclude_encodings = exclude_encodings or []
|
||||
self.exclude_encodings = set([x.lower() for x in exclude_encodings])
|
||||
self.chardet_encoding = None
|
||||
@@ -389,12 +219,6 @@ class EncodingDetector:
|
||||
self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup)
|
||||
|
||||
def _usable(self, encoding, tried):
|
||||
"""Should we even bother to try this encoding?
|
||||
|
||||
:param encoding: Name of an encoding.
|
||||
:param tried: Encodings that have already been tried. This will be modified
|
||||
as a side effect.
|
||||
"""
|
||||
if encoding is not None:
|
||||
encoding = encoding.lower()
|
||||
if encoding in self.exclude_encodings:
|
||||
@@ -406,14 +230,9 @@ class EncodingDetector:
|
||||
|
||||
@property
|
||||
def encodings(self):
|
||||
"""Yield a number of encodings that might work for this markup.
|
||||
|
||||
:yield: A sequence of strings.
|
||||
"""
|
||||
"""Yield a number of encodings that might work for this markup."""
|
||||
tried = set()
|
||||
|
||||
# First, try the known definite encodings
|
||||
for e in self.known_definite_encodings:
|
||||
for e in self.override_encodings:
|
||||
if self._usable(e, tried):
|
||||
yield e
|
||||
|
||||
@@ -422,12 +241,6 @@ class EncodingDetector:
|
||||
if self._usable(self.sniffed_encoding, tried):
|
||||
yield self.sniffed_encoding
|
||||
|
||||
# Sniffing the byte-order mark did nothing; try the user
|
||||
# encodings.
|
||||
for e in self.user_encodings:
|
||||
if self._usable(e, tried):
|
||||
yield e
|
||||
|
||||
# Look within the document for an XML or HTML encoding
|
||||
# declaration.
|
||||
if self.declared_encoding is None:
|
||||
@@ -450,11 +263,7 @@ class EncodingDetector:
|
||||
|
||||
@classmethod
|
||||
def strip_byte_order_mark(cls, data):
|
||||
"""If a byte-order mark is present, strip it and return the encoding it implies.
|
||||
|
||||
:param data: Some markup.
|
||||
:return: A 2-tuple (modified data, implied encoding)
|
||||
"""
|
||||
"""If a byte-order mark is present, strip it and return the encoding it implies."""
|
||||
encoding = None
|
||||
if isinstance(data, str):
|
||||
# Unicode data cannot have a byte-order mark.
|
||||
@@ -486,36 +295,21 @@ class EncodingDetector:
|
||||
|
||||
An HTML encoding is declared in a <meta> tag, hopefully near the
|
||||
beginning of the document.
|
||||
|
||||
:param markup: Some markup.
|
||||
:param is_html: If True, this markup is considered to be HTML. Otherwise
|
||||
it's assumed to be XML.
|
||||
:param search_entire_document: Since an encoding is supposed to declared near the beginning
|
||||
of the document, most of the time it's only necessary to search a few kilobytes of data.
|
||||
Set this to True to force this method to search the entire document.
|
||||
"""
|
||||
if search_entire_document:
|
||||
xml_endpos = html_endpos = len(markup)
|
||||
else:
|
||||
xml_endpos = 1024
|
||||
html_endpos = max(2048, int(len(markup) * 0.05))
|
||||
|
||||
if isinstance(markup, bytes):
|
||||
res = encoding_res[bytes]
|
||||
else:
|
||||
res = encoding_res[str]
|
||||
|
||||
xml_re = res['xml']
|
||||
html_re = res['html']
|
||||
|
||||
declared_encoding = None
|
||||
declared_encoding_match = xml_re.search(markup, endpos=xml_endpos)
|
||||
declared_encoding_match = xml_encoding_re.search(markup, endpos=xml_endpos)
|
||||
if not declared_encoding_match and is_html:
|
||||
declared_encoding_match = html_re.search(markup, endpos=html_endpos)
|
||||
declared_encoding_match = html_meta_re.search(markup, endpos=html_endpos)
|
||||
if declared_encoding_match is not None:
|
||||
declared_encoding = declared_encoding_match.groups()[0]
|
||||
declared_encoding = declared_encoding_match.groups()[0].decode(
|
||||
'ascii', 'replace')
|
||||
if declared_encoding:
|
||||
if isinstance(declared_encoding, bytes):
|
||||
declared_encoding = declared_encoding.decode('ascii', 'replace')
|
||||
return declared_encoding.lower()
|
||||
return None
|
||||
|
||||
@@ -538,53 +332,15 @@ class UnicodeDammit:
|
||||
"iso-8859-2",
|
||||
]
|
||||
|
||||
def __init__(self, markup, known_definite_encodings=[],
|
||||
smart_quotes_to=None, is_html=False, exclude_encodings=[],
|
||||
user_encodings=None, override_encodings=None
|
||||
):
|
||||
"""Constructor.
|
||||
|
||||
:param markup: A bytestring representing markup in an unknown encoding.
|
||||
|
||||
:param known_definite_encodings: When determining the encoding
|
||||
of `markup`, these encodings will be tried first, in
|
||||
order. In HTML terms, this corresponds to the "known
|
||||
definite encoding" step defined here:
|
||||
https://html.spec.whatwg.org/multipage/parsing.html#parsing-with-a-known-character-encoding
|
||||
|
||||
:param user_encodings: These encodings will be tried after the
|
||||
`known_definite_encodings` have been tried and failed, and
|
||||
after an attempt to sniff the encoding by looking at a
|
||||
byte order mark has failed. In HTML terms, this
|
||||
corresponds to the step "user has explicitly instructed
|
||||
the user agent to override the document's character
|
||||
encoding", defined here:
|
||||
https://html.spec.whatwg.org/multipage/parsing.html#determining-the-character-encoding
|
||||
|
||||
:param override_encodings: A deprecated alias for
|
||||
known_definite_encodings. Any encodings here will be tried
|
||||
immediately after the encodings in
|
||||
known_definite_encodings.
|
||||
|
||||
:param smart_quotes_to: By default, Microsoft smart quotes will, like all other characters, be converted
|
||||
to Unicode characters. Setting this to 'ascii' will convert them to ASCII quotes instead.
|
||||
Setting it to 'xml' will convert them to XML entity references, and setting it to 'html'
|
||||
will convert them to HTML entity references.
|
||||
:param is_html: If True, this markup is considered to be HTML. Otherwise
|
||||
it's assumed to be XML.
|
||||
:param exclude_encodings: These encodings will not be considered, even
|
||||
if the sniffing code thinks they might make sense.
|
||||
|
||||
"""
|
||||
def __init__(self, markup, override_encodings=[],
|
||||
smart_quotes_to=None, is_html=False, exclude_encodings=[]):
|
||||
self.smart_quotes_to = smart_quotes_to
|
||||
self.tried_encodings = []
|
||||
self.contains_replacement_characters = False
|
||||
self.is_html = is_html
|
||||
self.log = logging.getLogger(__name__)
|
||||
|
||||
self.detector = EncodingDetector(
|
||||
markup, known_definite_encodings, is_html, exclude_encodings,
|
||||
user_encodings, override_encodings
|
||||
)
|
||||
markup, override_encodings, is_html, exclude_encodings)
|
||||
|
||||
# Short-circuit if the data is in Unicode to begin with.
|
||||
if isinstance(markup, str) or markup == '':
|
||||
@@ -612,10 +368,9 @@ class UnicodeDammit:
|
||||
if encoding != "ascii":
|
||||
u = self._convert_from(encoding, "replace")
|
||||
if u is not None:
|
||||
self.log.warning(
|
||||
logging.warning(
|
||||
"Some characters could not be decoded, and were "
|
||||
"replaced with REPLACEMENT CHARACTER."
|
||||
)
|
||||
"replaced with REPLACEMENT CHARACTER.")
|
||||
self.contains_replacement_characters = True
|
||||
break
|
||||
|
||||
@@ -644,10 +399,6 @@ class UnicodeDammit:
|
||||
return sub
|
||||
|
||||
def _convert_from(self, proposed, errors="strict"):
|
||||
"""Attempt to convert the markup to the proposed encoding.
|
||||
|
||||
:param proposed: The name of a character encoding.
|
||||
"""
|
||||
proposed = self.find_codec(proposed)
|
||||
if not proposed or (proposed, errors) in self.tried_encodings:
|
||||
return None
|
||||
@@ -662,40 +413,30 @@ class UnicodeDammit:
|
||||
markup = smart_quotes_compiled.sub(self._sub_ms_char, markup)
|
||||
|
||||
try:
|
||||
#print("Trying to convert document to %s (errors=%s)" % (
|
||||
# proposed, errors))
|
||||
#print "Trying to convert document to %s (errors=%s)" % (
|
||||
# proposed, errors)
|
||||
u = self._to_unicode(markup, proposed, errors)
|
||||
self.markup = u
|
||||
self.original_encoding = proposed
|
||||
except Exception as e:
|
||||
#print("That didn't work!")
|
||||
#print(e)
|
||||
#print "That didn't work!"
|
||||
#print e
|
||||
return None
|
||||
#print("Correct encoding: %s" % proposed)
|
||||
#print "Correct encoding: %s" % proposed
|
||||
return self.markup
|
||||
|
||||
def _to_unicode(self, data, encoding, errors="strict"):
|
||||
"""Given a string and its encoding, decodes the string into Unicode.
|
||||
|
||||
:param encoding: The name of an encoding.
|
||||
"""
|
||||
'''Given a string and its encoding, decodes the string into Unicode.
|
||||
%encoding is a string recognized by encodings.aliases'''
|
||||
return str(data, encoding, errors)
|
||||
|
||||
@property
|
||||
def declared_html_encoding(self):
|
||||
"""If the markup is an HTML document, returns the encoding declared _within_
|
||||
the document.
|
||||
"""
|
||||
if not self.is_html:
|
||||
return None
|
||||
return self.detector.declared_encoding
|
||||
|
||||
def find_codec(self, charset):
|
||||
"""Convert the name of a character set to a codec name.
|
||||
|
||||
:param charset: The name of a character set.
|
||||
:return: The name of a codec.
|
||||
"""
|
||||
value = (self._codec(self.CHARSET_ALIASES.get(charset, charset))
|
||||
or (charset and self._codec(charset.replace("-", "")))
|
||||
or (charset and self._codec(charset.replace("-", "_")))
|
||||
@@ -985,7 +726,7 @@ class UnicodeDammit:
|
||||
0xde : b'\xc3\x9e', # Þ
|
||||
0xdf : b'\xc3\x9f', # ß
|
||||
0xe0 : b'\xc3\xa0', # à
|
||||
0xe1 : b'\xa1', # á
|
||||
0xe1 : b'\xa1', # á
|
||||
0xe2 : b'\xc3\xa2', # â
|
||||
0xe3 : b'\xc3\xa3', # ã
|
||||
0xe4 : b'\xc3\xa4', # ä
|
||||
@@ -1034,16 +775,12 @@ class UnicodeDammit:
|
||||
Currently the only situation supported is Windows-1252 (or its
|
||||
subset ISO-8859-1), embedded in UTF-8.
|
||||
|
||||
:param in_bytes: A bytestring that you suspect contains
|
||||
characters from multiple encodings. Note that this _must_
|
||||
be a bytestring. If you've already converted the document
|
||||
to Unicode, you're too late.
|
||||
:param main_encoding: The primary encoding of `in_bytes`.
|
||||
:param embedded_encoding: The encoding that was used to embed characters
|
||||
in the main document.
|
||||
:return: A bytestring in which `embedded_encoding`
|
||||
characters have been converted to their `main_encoding`
|
||||
equivalents.
|
||||
The input must be a bytestring. If you've already converted
|
||||
the document to Unicode, you're too late.
|
||||
|
||||
The output is a bytestring in which `embedded_encoding`
|
||||
characters have been converted to their `main_encoding`
|
||||
equivalents.
|
||||
"""
|
||||
if embedded_encoding.replace('_', '-').lower() not in (
|
||||
'windows-1252', 'windows_1252'):
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
"""Diagnostic functions, mainly for use when doing tech support."""
|
||||
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
import cProfile
|
||||
from io import BytesIO
|
||||
from io import StringIO
|
||||
from html.parser import HTMLParser
|
||||
import bs4
|
||||
from bs4 import BeautifulSoup, __version__
|
||||
@@ -17,15 +16,12 @@ import tempfile
|
||||
import time
|
||||
import traceback
|
||||
import sys
|
||||
import cProfile
|
||||
|
||||
def diagnose(data):
|
||||
"""Diagnostic suite for isolating common problems.
|
||||
|
||||
:param data: A string containing markup that needs to be explained.
|
||||
:return: None; diagnostics are printed to standard output.
|
||||
"""
|
||||
print(("Diagnostic running on Beautiful Soup %s" % __version__))
|
||||
print(("Python version %s" % sys.version))
|
||||
"""Diagnostic suite for isolating common problems."""
|
||||
print("Diagnostic running on Beautiful Soup %s" % __version__)
|
||||
print("Python version %s" % sys.version)
|
||||
|
||||
basic_parsers = ["html.parser", "html5lib", "lxml"]
|
||||
for name in basic_parsers:
|
||||
@@ -39,70 +35,61 @@ def diagnose(data):
|
||||
name))
|
||||
|
||||
if 'lxml' in basic_parsers:
|
||||
basic_parsers.append("lxml-xml")
|
||||
basic_parsers.append(["lxml", "xml"])
|
||||
try:
|
||||
from lxml import etree
|
||||
print(("Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION))))
|
||||
print("Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION)))
|
||||
except ImportError as e:
|
||||
print(
|
||||
print (
|
||||
"lxml is not installed or couldn't be imported.")
|
||||
|
||||
|
||||
if 'html5lib' in basic_parsers:
|
||||
try:
|
||||
import html5lib
|
||||
print(("Found html5lib version %s" % html5lib.__version__))
|
||||
print("Found html5lib version %s" % html5lib.__version__)
|
||||
except ImportError as e:
|
||||
print(
|
||||
print (
|
||||
"html5lib is not installed or couldn't be imported.")
|
||||
|
||||
if hasattr(data, 'read'):
|
||||
data = data.read()
|
||||
elif os.path.exists(data):
|
||||
print('"%s" looks like a filename. Reading data from the file.' % data)
|
||||
data = open(data).read()
|
||||
elif data.startswith("http:") or data.startswith("https:"):
|
||||
print('"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data)
|
||||
print("You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup.")
|
||||
return
|
||||
print()
|
||||
|
||||
for parser in basic_parsers:
|
||||
print(("Trying to parse your markup with %s" % parser))
|
||||
print("Trying to parse your markup with %s" % parser)
|
||||
success = False
|
||||
try:
|
||||
soup = BeautifulSoup(data, features=parser)
|
||||
soup = BeautifulSoup(data, parser)
|
||||
success = True
|
||||
except Exception as e:
|
||||
print(("%s could not parse the markup." % parser))
|
||||
print("%s could not parse the markup." % parser)
|
||||
traceback.print_exc()
|
||||
if success:
|
||||
print(("Here's what %s did with the markup:" % parser))
|
||||
print((soup.prettify()))
|
||||
print("Here's what %s did with the markup:" % parser)
|
||||
print(soup.prettify())
|
||||
|
||||
print(("-" * 80))
|
||||
print("-" * 80)
|
||||
|
||||
def lxml_trace(data, html=True, **kwargs):
|
||||
"""Print out the lxml events that occur during parsing.
|
||||
|
||||
This lets you see how lxml parses a document when no Beautiful
|
||||
Soup code is running. You can use this to determine whether
|
||||
an lxml-specific problem is in Beautiful Soup's lxml tree builders
|
||||
or in lxml itself.
|
||||
|
||||
:param data: Some markup.
|
||||
:param html: If True, markup will be parsed with lxml's HTML parser.
|
||||
if False, lxml's XML parser will be used.
|
||||
Soup code is running.
|
||||
"""
|
||||
from lxml import etree
|
||||
recover = kwargs.pop('recover', True)
|
||||
if isinstance(data, str):
|
||||
data = data.encode("utf8")
|
||||
reader = BytesIO(data)
|
||||
for event, element in etree.iterparse(
|
||||
reader, html=html, recover=recover, **kwargs
|
||||
):
|
||||
for event, element in etree.iterparse(StringIO(data), html=html, **kwargs):
|
||||
print(("%s, %4s, %s" % (event, element.tag, element.text)))
|
||||
|
||||
class AnnouncingParser(HTMLParser):
|
||||
"""Subclass of HTMLParser that announces parse events, without doing
|
||||
anything else.
|
||||
|
||||
You can use this to get a picture of how html.parser sees a given
|
||||
document. The easiest way to do this is to call `htmlparser_trace`.
|
||||
"""
|
||||
"""Announces HTMLParser parse events, without doing anything else."""
|
||||
|
||||
def _p(self, s):
|
||||
print(s)
|
||||
@@ -139,8 +126,6 @@ def htmlparser_trace(data):
|
||||
|
||||
This lets you see how HTMLParser parses a document when no
|
||||
Beautiful Soup code is running.
|
||||
|
||||
:param data: Some markup.
|
||||
"""
|
||||
parser = AnnouncingParser()
|
||||
parser.feed(data)
|
||||
@@ -183,9 +168,9 @@ def rdoc(num_elements=1000):
|
||||
|
||||
def benchmark_parsers(num_elements=100000):
|
||||
"""Very basic head-to-head performance benchmark."""
|
||||
print(("Comparative parser benchmark on Beautiful Soup %s" % __version__))
|
||||
print("Comparative parser benchmark on Beautiful Soup %s" % __version__)
|
||||
data = rdoc(num_elements)
|
||||
print(("Generated a large invalid HTML document (%d bytes)." % len(data)))
|
||||
print("Generated a large invalid HTML document (%d bytes)." % len(data))
|
||||
|
||||
for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]:
|
||||
success = False
|
||||
@@ -195,26 +180,26 @@ def benchmark_parsers(num_elements=100000):
|
||||
b = time.time()
|
||||
success = True
|
||||
except Exception as e:
|
||||
print(("%s could not parse the markup." % parser))
|
||||
print("%s could not parse the markup." % parser)
|
||||
traceback.print_exc()
|
||||
if success:
|
||||
print(("BS4+%s parsed the markup in %.2fs." % (parser, b-a)))
|
||||
print("BS4+%s parsed the markup in %.2fs." % (parser, b-a))
|
||||
|
||||
from lxml import etree
|
||||
a = time.time()
|
||||
etree.HTML(data)
|
||||
b = time.time()
|
||||
print(("Raw lxml parsed the markup in %.2fs." % (b-a)))
|
||||
print("Raw lxml parsed the markup in %.2fs." % (b-a))
|
||||
|
||||
import html5lib
|
||||
parser = html5lib.HTMLParser()
|
||||
a = time.time()
|
||||
parser.parse(data)
|
||||
b = time.time()
|
||||
print(("Raw html5lib parsed the markup in %.2fs." % (b-a)))
|
||||
print("Raw html5lib parsed the markup in %.2fs." % (b-a))
|
||||
|
||||
def profile(num_elements=100000, parser="lxml"):
|
||||
"""Use Python's profiler on a randomly generated document."""
|
||||
|
||||
filehandle = tempfile.NamedTemporaryFile()
|
||||
filename = filehandle.name
|
||||
|
||||
@@ -227,6 +212,5 @@ def profile(num_elements=100000, parser="lxml"):
|
||||
stats.sort_stats("cumulative")
|
||||
stats.print_stats('_html5lib|bs4', 50)
|
||||
|
||||
# If this file is run as a script, standard input is diagnosed.
|
||||
if __name__ == '__main__':
|
||||
diagnose(sys.stdin.read())
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user