mirror of
https://git.yoctoproject.org/poky
synced 2026-02-20 08:29:42 +01:00
Compare commits
4 Commits
yocto-5.1.
...
yocto-5.0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fb91a49387 | ||
|
|
25b05cb80d | ||
|
|
5b727a8fa1 | ||
|
|
845626a36b |
@@ -41,7 +41,6 @@ Component/Subsystem Maintainers
|
||||
* devtool: Saul Wold
|
||||
* eSDK: Saul Wold
|
||||
* overlayfs: Vyacheslav Yurkov
|
||||
* Patchtest: Trevor Gamblin
|
||||
|
||||
Maintainers needed
|
||||
------------------
|
||||
@@ -53,6 +52,7 @@ Maintainers needed
|
||||
* error reporting system/web UI
|
||||
* wic
|
||||
* Patchwork
|
||||
* Patchtest
|
||||
* Matchbox
|
||||
* Sato
|
||||
* Autobuilder
|
||||
|
||||
@@ -27,7 +27,7 @@ from bb.main import bitbake_main, BitBakeConfigParameters, BBMainException
|
||||
|
||||
bb.utils.check_system_locale()
|
||||
|
||||
__version__ = "2.9.1"
|
||||
__version__ = "2.8.0"
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __version__ != bb.__version__:
|
||||
|
||||
@@ -72,17 +72,16 @@ def find_siginfo_task(bbhandler, pn, taskname, sig1=None, sig2=None):
|
||||
elif sig2 not in sigfiles:
|
||||
logger.error('No sigdata files found matching %s %s with signature %s' % (pn, taskname, sig2))
|
||||
sys.exit(1)
|
||||
|
||||
latestfiles = [sigfiles[sig1]['path'], sigfiles[sig2]['path']]
|
||||
else:
|
||||
sigfiles = find_siginfo(bbhandler, pn, taskname)
|
||||
latestsigs = sorted(sigfiles.keys(), key=lambda h: sigfiles[h]['time'])[-2:]
|
||||
if not latestsigs:
|
||||
logger.error('No sigdata files found matching %s %s' % (pn, taskname))
|
||||
sys.exit(1)
|
||||
latestfiles = [sigfiles[latestsigs[0]]['path']]
|
||||
if len(latestsigs) > 1:
|
||||
latestfiles.append(sigfiles[latestsigs[1]]['path'])
|
||||
sig1 = latestsigs[0]
|
||||
sig2 = latestsigs[1]
|
||||
|
||||
latestfiles = [sigfiles[sig1]['path'], sigfiles[sig2]['path']]
|
||||
|
||||
return latestfiles
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ import time
|
||||
import warnings
|
||||
import netrc
|
||||
import json
|
||||
import statistics
|
||||
warnings.simplefilter("default")
|
||||
|
||||
try:
|
||||
@@ -82,7 +81,6 @@ def main():
|
||||
nonlocal found_hashes
|
||||
nonlocal missed_hashes
|
||||
nonlocal max_time
|
||||
nonlocal times
|
||||
|
||||
with hashserv.create_client(args.address) as client:
|
||||
for i in range(args.requests):
|
||||
@@ -100,41 +98,29 @@ def main():
|
||||
else:
|
||||
missed_hashes += 1
|
||||
|
||||
times.append(elapsed)
|
||||
max_time = max(elapsed, max_time)
|
||||
pbar.update()
|
||||
|
||||
max_time = 0
|
||||
found_hashes = 0
|
||||
missed_hashes = 0
|
||||
lock = threading.Lock()
|
||||
times = []
|
||||
total_requests = args.clients * args.requests
|
||||
start_time = time.perf_counter()
|
||||
with ProgressBar(total=args.clients * args.requests) as pbar:
|
||||
with ProgressBar(total=total_requests) as pbar:
|
||||
threads = [threading.Thread(target=thread_main, args=(pbar, lock), daemon=False) for _ in range(args.clients)]
|
||||
for t in threads:
|
||||
t.start()
|
||||
|
||||
for t in threads:
|
||||
t.join()
|
||||
total_elapsed = time.perf_counter() - start_time
|
||||
|
||||
elapsed = time.perf_counter() - start_time
|
||||
with lock:
|
||||
mean = statistics.mean(times)
|
||||
median = statistics.median(times)
|
||||
stddev = statistics.pstdev(times)
|
||||
|
||||
print(f"Number of clients: {args.clients}")
|
||||
print(f"Requests per client: {args.requests}")
|
||||
print(f"Number of requests: {len(times)}")
|
||||
print(f"Total elapsed time: {total_elapsed:.3f}s")
|
||||
print(f"Total request rate: {len(times)/total_elapsed:.3f} req/s")
|
||||
print(f"Average request time: {mean:.3f}s")
|
||||
print(f"Median request time: {median:.3f}s")
|
||||
print(f"Request time std dev: {stddev:.3f}s")
|
||||
print(f"Maximum request time: {max(times):.3f}s")
|
||||
print(f"Minimum request time: {min(times):.3f}s")
|
||||
print(f"Hashes found: {found_hashes}")
|
||||
print(f"Hashes missed: {missed_hashes}")
|
||||
print("%d requests in %.1fs. %.1f requests per second" % (total_requests, elapsed, total_requests / elapsed))
|
||||
print("Average request time %.8fs" % (elapsed / total_requests))
|
||||
print("Max request time was %.8fs" % max_time)
|
||||
print("Found %d hashes, missed %d" % (found_hashes, missed_hashes))
|
||||
|
||||
if args.report:
|
||||
with ProgressBar(total=args.requests) as pbar:
|
||||
@@ -239,32 +225,6 @@ def main():
|
||||
print("true" if result else "false")
|
||||
return 0
|
||||
|
||||
def handle_ping(args, client):
|
||||
times = []
|
||||
for i in range(1, args.count + 1):
|
||||
if not args.quiet:
|
||||
print(f"Ping {i} of {args.count}... ", end="")
|
||||
start_time = time.perf_counter()
|
||||
client.ping()
|
||||
elapsed = time.perf_counter() - start_time
|
||||
times.append(elapsed)
|
||||
if not args.quiet:
|
||||
print(f"{elapsed:.3f}s")
|
||||
|
||||
mean = statistics.mean(times)
|
||||
median = statistics.median(times)
|
||||
std_dev = statistics.pstdev(times)
|
||||
|
||||
if not args.quiet:
|
||||
print("------------------------")
|
||||
print(f"Number of pings: {len(times)}")
|
||||
print(f"Average round trip time: {mean:.3f}s")
|
||||
print(f"Median round trip time: {median:.3f}s")
|
||||
print(f"Round trip time std dev: {std_dev:.3f}s")
|
||||
print(f"Min time is: {min(times):.3f}s")
|
||||
print(f"Max time is: {max(times):.3f}s")
|
||||
return 0
|
||||
|
||||
parser = argparse.ArgumentParser(description='Hash Equivalence Client')
|
||||
parser.add_argument('--address', default=DEFAULT_ADDRESS, help='Server address (default "%(default)s")')
|
||||
parser.add_argument('--log', default='WARNING', help='Set logging level')
|
||||
@@ -362,11 +322,6 @@ def main():
|
||||
unihash_exists_parser.add_argument("unihash", help="Unihash to check")
|
||||
unihash_exists_parser.set_defaults(func=handle_unihash_exists)
|
||||
|
||||
ping_parser = subparsers.add_parser('ping', help="Ping server")
|
||||
ping_parser.add_argument("-n", "--count", type=int, help="Number of pings. Default is %(default)s", default=10)
|
||||
ping_parser.add_argument("-q", "--quiet", action="store_true", help="Don't print each ping; only print results")
|
||||
ping_parser.set_defaults(func=handle_ping)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
logger = logging.getLogger('hashserv')
|
||||
|
||||
@@ -125,11 +125,6 @@ The following permissions are supported by the server:
|
||||
default=os.environ.get("HASHSERVER_ADMIN_PASSWORD", None),
|
||||
help="Create default admin user with password ADMIN_PASSWORD ($HASHSERVER_ADMIN_PASSWORD)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--reuseport",
|
||||
action="store_true",
|
||||
help="Enable SO_REUSEPORT, allowing multiple servers to bind to the same port for load balancing",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -137,9 +132,7 @@ The following permissions are supported by the server:
|
||||
|
||||
level = getattr(logging, args.log.upper(), None)
|
||||
if not isinstance(level, int):
|
||||
raise ValueError(
|
||||
"Invalid log level: %s (Try ERROR/WARNING/INFO/DEBUG)" % args.log
|
||||
)
|
||||
raise ValueError("Invalid log level: %s (Try ERROR/WARNING/INFO/DEBUG)" % args.log)
|
||||
|
||||
logger.setLevel(level)
|
||||
console = logging.StreamHandler()
|
||||
@@ -162,7 +155,6 @@ The following permissions are supported by the server:
|
||||
anon_perms=anon_perms,
|
||||
admin_username=args.admin_user,
|
||||
admin_password=args.admin_password,
|
||||
reuseport=args.reuseport,
|
||||
)
|
||||
server.serve_forever()
|
||||
return 0
|
||||
|
||||
@@ -33,7 +33,7 @@ def main():
|
||||
add_help=False)
|
||||
parser.add_argument('-d', '--debug', help='Enable debug output', action='store_true')
|
||||
parser.add_argument('-q', '--quiet', help='Print only errors', action='store_true')
|
||||
parser.add_argument('-F', '--force', help='Forced execution: can be specified multiple times. -F will force add without recipe parse verification and -FF will additionally force the run withput layer parsing.', action='count', default=0)
|
||||
parser.add_argument('-F', '--force', help='Force add without recipe parse verification', action='store_true')
|
||||
parser.add_argument('--color', choices=['auto', 'always', 'never'], default='auto', help='Colorize output (where %(metavar)s is %(choices)s)', metavar='COLOR')
|
||||
|
||||
global_args, unparsed_args = parser.parse_known_args()
|
||||
@@ -59,20 +59,16 @@ def main():
|
||||
plugins = []
|
||||
tinfoil = bb.tinfoil.Tinfoil(tracking=True)
|
||||
tinfoil.logger.setLevel(logger.getEffectiveLevel())
|
||||
if global_args.force > 1:
|
||||
bbpaths = []
|
||||
else:
|
||||
try:
|
||||
tinfoil.prepare(True)
|
||||
bbpaths = tinfoil.config_data.getVar('BBPATH').split(':')
|
||||
|
||||
try:
|
||||
for path in ([topdir] + bbpaths):
|
||||
for path in ([topdir] +
|
||||
tinfoil.config_data.getVar('BBPATH').split(':')):
|
||||
pluginpath = os.path.join(path, 'lib', 'bblayers')
|
||||
bb.utils.load_plugins(logger, plugins, pluginpath)
|
||||
|
||||
registered = False
|
||||
for plugin in plugins:
|
||||
if hasattr(plugin, 'tinfoil_init') and global_args.force <= 1:
|
||||
if hasattr(plugin, 'tinfoil_init'):
|
||||
plugin.tinfoil_init(tinfoil)
|
||||
if hasattr(plugin, 'register_commands'):
|
||||
registered = True
|
||||
|
||||
@@ -16,18 +16,11 @@ sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(__file__)), "lib
|
||||
import prserv
|
||||
import prserv.serv
|
||||
|
||||
VERSION = "2.0.0"
|
||||
VERSION = "1.1.0"
|
||||
|
||||
PRHOST_DEFAULT="0.0.0.0"
|
||||
PRPORT_DEFAULT=8585
|
||||
|
||||
def init_logger(logfile, loglevel):
|
||||
numeric_level = getattr(logging, loglevel.upper(), None)
|
||||
if not isinstance(numeric_level, int):
|
||||
raise ValueError("Invalid log level: %s" % loglevel)
|
||||
FORMAT = "%(asctime)-15s %(message)s"
|
||||
logging.basicConfig(level=numeric_level, filename=logfile, format=FORMAT)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="BitBake PR Server. Version=%s" % VERSION,
|
||||
@@ -77,25 +70,12 @@ def main():
|
||||
action="store_true",
|
||||
help="open database in read-only mode",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-u",
|
||||
"--upstream",
|
||||
default=os.environ.get("PRSERVER_UPSTREAM", None),
|
||||
help="Upstream PR service (host:port)",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
init_logger(os.path.abspath(args.log), args.loglevel)
|
||||
prserv.init_logger(os.path.abspath(args.log), args.loglevel)
|
||||
|
||||
if args.start:
|
||||
ret=prserv.serv.start_daemon(
|
||||
args.file,
|
||||
args.host,
|
||||
args.port,
|
||||
os.path.abspath(args.log),
|
||||
args.read_only,
|
||||
args.upstream
|
||||
)
|
||||
ret=prserv.serv.start_daemon(args.file, args.host, args.port, os.path.abspath(args.log), args.read_only)
|
||||
elif args.stop:
|
||||
ret=prserv.serv.stop_daemon(args.host, args.port)
|
||||
else:
|
||||
|
||||
@@ -15,7 +15,6 @@ import unittest
|
||||
try:
|
||||
import bb
|
||||
import hashserv
|
||||
import prserv
|
||||
import layerindexlib
|
||||
except RuntimeError as exc:
|
||||
sys.exit(str(exc))
|
||||
@@ -34,7 +33,6 @@ tests = ["bb.tests.codeparser",
|
||||
"bb.tests.utils",
|
||||
"bb.tests.compression",
|
||||
"hashserv.tests",
|
||||
"prserv.tests",
|
||||
"layerindexlib.tests.layerindexobj",
|
||||
"layerindexlib.tests.restapi",
|
||||
"layerindexlib.tests.cooker"]
|
||||
|
||||
@@ -424,7 +424,7 @@ overview of their function and contents.
|
||||
|
||||
Example usage::
|
||||
|
||||
BB_HASHSERVE_UPSTREAM = "hashserv.yoctoproject.org:8686"
|
||||
BB_HASHSERVE_UPSTREAM = "hashserv.yocto.io:8687"
|
||||
|
||||
:term:`BB_INVALIDCONF`
|
||||
Used in combination with the ``ConfigParsed`` event to trigger
|
||||
|
||||
@@ -5,10 +5,10 @@ BitBake Supported Release Manuals
|
||||
=================================
|
||||
|
||||
*******************************
|
||||
Release Series 5.0 (scarthgap)
|
||||
Release Series 4.2 (mickledore)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.8 User Manual </bitbake/2.8/>`
|
||||
- :yocto_docs:`BitBake 2.4 User Manual </bitbake/2.4/>`
|
||||
|
||||
******************************
|
||||
Release Series 4.0 (kirkstone)
|
||||
@@ -26,18 +26,6 @@ Release Series 3.1 (dunfell)
|
||||
BitBake Outdated Release Manuals
|
||||
================================
|
||||
|
||||
*******************************
|
||||
Release Series 4.3 (nanbield)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.6 User Manual </bitbake/2.6/>`
|
||||
|
||||
*******************************
|
||||
Release Series 4.2 (mickledore)
|
||||
*******************************
|
||||
|
||||
- :yocto_docs:`BitBake 2.4 User Manual </bitbake/2.4/>`
|
||||
|
||||
*****************************
|
||||
Release Series 4.1 (langdale)
|
||||
*****************************
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
__version__ = "2.9.1"
|
||||
__version__ = "2.8.0"
|
||||
|
||||
import sys
|
||||
if sys.version_info < (3, 8, 0):
|
||||
@@ -36,7 +36,6 @@ class BBHandledException(Exception):
|
||||
|
||||
import os
|
||||
import logging
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
class NullHandler(logging.Handler):
|
||||
@@ -104,6 +103,26 @@ class BBLoggerAdapter(logging.LoggerAdapter, BBLoggerMixin):
|
||||
self.setup_bblogger(logger.name)
|
||||
super().__init__(logger, *args, **kwargs)
|
||||
|
||||
if sys.version_info < (3, 6):
|
||||
# These properties were added in Python 3.6. Add them in older versions
|
||||
# for compatibility
|
||||
@property
|
||||
def manager(self):
|
||||
return self.logger.manager
|
||||
|
||||
@manager.setter
|
||||
def manager(self, value):
|
||||
self.logger.manager = value
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.logger.name
|
||||
|
||||
def __repr__(self):
|
||||
logger = self.logger
|
||||
level = logger.getLevelName(logger.getEffectiveLevel())
|
||||
return '<%s %s (%s)>' % (self.__class__.__name__, logger.name, level)
|
||||
|
||||
logging.LoggerAdapter = BBLoggerAdapter
|
||||
|
||||
logger = logging.getLogger("BitBake")
|
||||
@@ -208,14 +227,3 @@ def deprecate_import(current, modulename, fromlist, renames = None):
|
||||
|
||||
setattr(sys.modules[current], newname, newobj)
|
||||
|
||||
TaskData = namedtuple("TaskData", [
|
||||
"pn",
|
||||
"taskname",
|
||||
"fn",
|
||||
"deps",
|
||||
"provides",
|
||||
"taskhash",
|
||||
"unihash",
|
||||
"hashfn",
|
||||
"taskhash_deps",
|
||||
])
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
#
|
||||
|
||||
|
||||
from .client import AsyncClient, Client
|
||||
from .client import AsyncClient, Client, ClientPool
|
||||
from .serv import AsyncServer, AsyncServerConnection
|
||||
from .connection import DEFAULT_MAX_CHUNK
|
||||
from .exceptions import (
|
||||
|
||||
@@ -24,12 +24,6 @@ ADDR_TYPE_UNIX = 0
|
||||
ADDR_TYPE_TCP = 1
|
||||
ADDR_TYPE_WS = 2
|
||||
|
||||
WEBSOCKETS_MIN_VERSION = (9, 1)
|
||||
# Need websockets 10 with python 3.10+
|
||||
if sys.version_info >= (3, 10, 0):
|
||||
WEBSOCKETS_MIN_VERSION = (10, 0)
|
||||
|
||||
|
||||
def parse_address(addr):
|
||||
if addr.startswith(UNIX_PREFIX):
|
||||
return (ADDR_TYPE_UNIX, (addr[len(UNIX_PREFIX) :],))
|
||||
@@ -45,7 +39,6 @@ def parse_address(addr):
|
||||
|
||||
return (ADDR_TYPE_TCP, (host, int(port)))
|
||||
|
||||
|
||||
class AsyncClient(object):
|
||||
def __init__(
|
||||
self,
|
||||
@@ -93,30 +86,8 @@ class AsyncClient(object):
|
||||
async def connect_websocket(self, uri):
|
||||
import websockets
|
||||
|
||||
try:
|
||||
version = tuple(
|
||||
int(v)
|
||||
for v in websockets.__version__.split(".")[
|
||||
0 : len(WEBSOCKETS_MIN_VERSION)
|
||||
]
|
||||
)
|
||||
except ValueError:
|
||||
raise ImportError(
|
||||
f"Unable to parse websockets version '{websockets.__version__}'"
|
||||
)
|
||||
|
||||
if version < WEBSOCKETS_MIN_VERSION:
|
||||
min_ver_str = ".".join(str(v) for v in WEBSOCKETS_MIN_VERSION)
|
||||
raise ImportError(
|
||||
f"Websockets version {websockets.__version__} is less than minimum required version {min_ver_str}"
|
||||
)
|
||||
|
||||
async def connect_sock():
|
||||
websocket = await websockets.connect(
|
||||
uri,
|
||||
ping_interval=None,
|
||||
open_timeout=self.timeout,
|
||||
)
|
||||
websocket = await websockets.connect(uri, ping_interval=None)
|
||||
return WebsocketConnection(websocket, self.timeout)
|
||||
|
||||
self._connect_sock = connect_sock
|
||||
@@ -254,7 +225,8 @@ class Client(object):
|
||||
def close(self):
|
||||
if self.loop:
|
||||
self.loop.run_until_complete(self.client.close())
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
self.loop = None
|
||||
|
||||
@@ -264,3 +236,78 @@ class Client(object):
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
return False
|
||||
|
||||
|
||||
class ClientPool(object):
|
||||
def __init__(self, max_clients):
|
||||
self.avail_clients = []
|
||||
self.num_clients = 0
|
||||
self.max_clients = max_clients
|
||||
self.loop = None
|
||||
self.client_condition = None
|
||||
|
||||
@abc.abstractmethod
|
||||
async def _new_client(self):
|
||||
raise NotImplementedError("Must be implemented in derived class")
|
||||
|
||||
def close(self):
|
||||
if self.client_condition:
|
||||
self.client_condition = None
|
||||
|
||||
if self.loop:
|
||||
self.loop.run_until_complete(self.__close_clients())
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
self.loop = None
|
||||
|
||||
def run_tasks(self, tasks):
|
||||
if not self.loop:
|
||||
self.loop = asyncio.new_event_loop()
|
||||
|
||||
thread = Thread(target=self.__thread_main, args=(tasks,))
|
||||
thread.start()
|
||||
thread.join()
|
||||
|
||||
@contextlib.asynccontextmanager
|
||||
async def get_client(self):
|
||||
async with self.client_condition:
|
||||
if self.avail_clients:
|
||||
client = self.avail_clients.pop()
|
||||
elif self.num_clients < self.max_clients:
|
||||
self.num_clients += 1
|
||||
client = await self._new_client()
|
||||
else:
|
||||
while not self.avail_clients:
|
||||
await self.client_condition.wait()
|
||||
client = self.avail_clients.pop()
|
||||
|
||||
try:
|
||||
yield client
|
||||
finally:
|
||||
async with self.client_condition:
|
||||
self.avail_clients.append(client)
|
||||
self.client_condition.notify()
|
||||
|
||||
def __thread_main(self, tasks):
|
||||
async def process_task(task):
|
||||
async with self.get_client() as client:
|
||||
await task(client)
|
||||
|
||||
asyncio.set_event_loop(self.loop)
|
||||
if not self.client_condition:
|
||||
self.client_condition = asyncio.Condition()
|
||||
tasks = [process_task(t) for t in tasks]
|
||||
self.loop.run_until_complete(asyncio.gather(*tasks))
|
||||
|
||||
async def __close_clients(self):
|
||||
for c in self.avail_clients:
|
||||
await c.close()
|
||||
self.avail_clients = []
|
||||
self.num_clients = 0
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.close()
|
||||
return False
|
||||
|
||||
@@ -138,20 +138,14 @@ class StreamServer(object):
|
||||
|
||||
|
||||
class TCPStreamServer(StreamServer):
|
||||
def __init__(self, host, port, handler, logger, *, reuseport=False):
|
||||
def __init__(self, host, port, handler, logger):
|
||||
super().__init__(handler, logger)
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.reuseport = reuseport
|
||||
|
||||
def start(self, loop):
|
||||
self.server = loop.run_until_complete(
|
||||
asyncio.start_server(
|
||||
self.handle_stream_client,
|
||||
self.host,
|
||||
self.port,
|
||||
reuse_port=self.reuseport,
|
||||
)
|
||||
asyncio.start_server(self.handle_stream_client, self.host, self.port)
|
||||
)
|
||||
|
||||
for s in self.server.sockets:
|
||||
@@ -215,12 +209,11 @@ class UnixStreamServer(StreamServer):
|
||||
|
||||
|
||||
class WebsocketsServer(object):
|
||||
def __init__(self, host, port, handler, logger, *, reuseport=False):
|
||||
def __init__(self, host, port, handler, logger):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.handler = handler
|
||||
self.logger = logger
|
||||
self.reuseport = reuseport
|
||||
|
||||
def start(self, loop):
|
||||
import websockets.server
|
||||
@@ -231,7 +224,6 @@ class WebsocketsServer(object):
|
||||
self.host,
|
||||
self.port,
|
||||
ping_interval=None,
|
||||
reuse_port=self.reuseport,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -270,26 +262,14 @@ class AsyncServer(object):
|
||||
self.loop = None
|
||||
self.run_tasks = []
|
||||
|
||||
def start_tcp_server(self, host, port, *, reuseport=False):
|
||||
self.server = TCPStreamServer(
|
||||
host,
|
||||
port,
|
||||
self._client_handler,
|
||||
self.logger,
|
||||
reuseport=reuseport,
|
||||
)
|
||||
def start_tcp_server(self, host, port):
|
||||
self.server = TCPStreamServer(host, port, self._client_handler, self.logger)
|
||||
|
||||
def start_unix_server(self, path):
|
||||
self.server = UnixStreamServer(path, self._client_handler, self.logger)
|
||||
|
||||
def start_websocket_server(self, host, port, reuseport=False):
|
||||
self.server = WebsocketsServer(
|
||||
host,
|
||||
port,
|
||||
self._client_handler,
|
||||
self.logger,
|
||||
reuseport=reuseport,
|
||||
)
|
||||
def start_websocket_server(self, host, port):
|
||||
self.server = WebsocketsServer(host, port, self._client_handler, self.logger)
|
||||
|
||||
async def _client_handler(self, socket):
|
||||
address = socket.address
|
||||
@@ -388,7 +368,8 @@ class AsyncServer(object):
|
||||
|
||||
self._serve_forever(tasks)
|
||||
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
if sys.version_info >= (3, 6):
|
||||
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
|
||||
self.loop.close()
|
||||
|
||||
queue = multiprocessing.Queue()
|
||||
|
||||
@@ -197,8 +197,6 @@ def exec_func(func, d, dirs = None):
|
||||
for cdir in d.expand(cleandirs).split():
|
||||
bb.utils.remove(cdir, True)
|
||||
bb.utils.mkdirhier(cdir)
|
||||
if cdir == oldcwd:
|
||||
os.chdir(cdir)
|
||||
|
||||
if flags and dirs is None:
|
||||
dirs = flags.get('dirs')
|
||||
@@ -743,7 +741,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
|
||||
if quieterr:
|
||||
if not handled:
|
||||
logger.warning(str(exc))
|
||||
logger.warning(repr(exc))
|
||||
event.fire(TaskFailedSilent(task, fn, logfn, localdata), localdata)
|
||||
else:
|
||||
errprinted = errchk.triggered
|
||||
@@ -752,7 +750,7 @@ def _exec_task(fn, task, d, quieterr):
|
||||
if verboseStdoutLogging or handled:
|
||||
errprinted = True
|
||||
if not handled:
|
||||
logger.error(str(exc))
|
||||
logger.error(repr(exc))
|
||||
event.fire(TaskFailed(task, fn, logfn, localdata, errprinted), localdata)
|
||||
return 1
|
||||
|
||||
@@ -932,13 +930,9 @@ def add_tasks(tasklist, d):
|
||||
# don't assume holding a reference
|
||||
d.setVar('_task_deps', task_deps)
|
||||
|
||||
def ensure_task_prefix(name):
|
||||
if name[:3] != "do_":
|
||||
name = "do_" + name
|
||||
return name
|
||||
|
||||
def addtask(task, before, after, d):
|
||||
task = ensure_task_prefix(task)
|
||||
if task[:3] != "do_":
|
||||
task = "do_" + task
|
||||
|
||||
d.setVarFlag(task, "task", 1)
|
||||
bbtasks = d.getVar('__BBTASKS', False) or []
|
||||
@@ -950,20 +944,19 @@ def addtask(task, before, after, d):
|
||||
if after is not None:
|
||||
# set up deps for function
|
||||
for entry in after.split():
|
||||
entry = ensure_task_prefix(entry)
|
||||
if entry not in existing:
|
||||
existing.append(entry)
|
||||
d.setVarFlag(task, "deps", existing)
|
||||
if before is not None:
|
||||
# set up things that depend on this func
|
||||
for entry in before.split():
|
||||
entry = ensure_task_prefix(entry)
|
||||
existing = d.getVarFlag(entry, "deps", False) or []
|
||||
if task not in existing:
|
||||
d.setVarFlag(entry, "deps", [task] + existing)
|
||||
|
||||
def deltask(task, d):
|
||||
task = ensure_task_prefix(task)
|
||||
if task[:3] != "do_":
|
||||
task = "do_" + task
|
||||
|
||||
bbtasks = d.getVar('__BBTASKS', False) or []
|
||||
if task in bbtasks:
|
||||
|
||||
@@ -28,7 +28,7 @@ import shutil
|
||||
|
||||
logger = logging.getLogger("BitBake.Cache")
|
||||
|
||||
__cache_version__ = "156"
|
||||
__cache_version__ = "155"
|
||||
|
||||
def getCacheFile(path, filename, mc, data_hash):
|
||||
mcspec = ''
|
||||
@@ -441,7 +441,7 @@ class Cache(object):
|
||||
else:
|
||||
symlink = os.path.join(self.cachedir, "bb_cache.dat")
|
||||
|
||||
if os.path.exists(symlink) or os.path.islink(symlink):
|
||||
if os.path.exists(symlink):
|
||||
bb.utils.remove(symlink)
|
||||
try:
|
||||
os.symlink(os.path.basename(self.cachefile), symlink)
|
||||
@@ -779,6 +779,25 @@ class MulticonfigCache(Mapping):
|
||||
for k in self.__caches:
|
||||
yield k
|
||||
|
||||
def init(cooker):
|
||||
"""
|
||||
The Objective: Cache the minimum amount of data possible yet get to the
|
||||
stage of building packages (i.e. tryBuild) without reparsing any .bb files.
|
||||
|
||||
To do this, we intercept getVar calls and only cache the variables we see
|
||||
being accessed. We rely on the cache getVar calls being made for all
|
||||
variables bitbake might need to use to reach this stage. For each cached
|
||||
file we need to track:
|
||||
|
||||
* Its mtime
|
||||
* The mtimes of all its dependencies
|
||||
* Whether it caused a parse.SkipRecipe exception
|
||||
|
||||
Files causing parsing errors are evicted from the cache.
|
||||
|
||||
"""
|
||||
return Cache(cooker.configuration.data, cooker.configuration.data_hash)
|
||||
|
||||
|
||||
class CacheData(object):
|
||||
"""
|
||||
|
||||
@@ -72,11 +72,6 @@ def add_module_functions(fn, functions, namespace):
|
||||
parser.parse_python(None, filename=fn, lineno=1, fixedhash=fixedhash+f)
|
||||
#bb.warn("Cached %s" % f)
|
||||
except KeyError:
|
||||
targetfn = inspect.getsourcefile(functions[f])
|
||||
if fn != targetfn:
|
||||
# Skip references to other modules outside this file
|
||||
#bb.warn("Skipping %s" % name)
|
||||
continue
|
||||
lines, lineno = inspect.getsourcelines(functions[f])
|
||||
src = "".join(lines)
|
||||
parser.parse_python(src, filename=fn, lineno=lineno, fixedhash=fixedhash+f)
|
||||
@@ -87,17 +82,14 @@ def add_module_functions(fn, functions, namespace):
|
||||
if e in functions:
|
||||
execs.remove(e)
|
||||
execs.add(namespace + "." + e)
|
||||
visitorcode = None
|
||||
if hasattr(functions[f], 'visitorcode'):
|
||||
visitorcode = getattr(functions[f], "visitorcode")
|
||||
modulecode_deps[name] = [parser.references.copy(), execs, parser.var_execs.copy(), parser.contains.copy(), parser.extra, visitorcode]
|
||||
modulecode_deps[name] = [parser.references.copy(), execs, parser.var_execs.copy(), parser.contains.copy()]
|
||||
#bb.warn("%s: %s\nRefs:%s Execs: %s %s %s" % (name, fn, parser.references, parser.execs, parser.var_execs, parser.contains))
|
||||
|
||||
def update_module_dependencies(d):
|
||||
for mod in modulecode_deps:
|
||||
excludes = set((d.getVarFlag(mod, "vardepsexclude") or "").split())
|
||||
if excludes:
|
||||
modulecode_deps[mod] = [modulecode_deps[mod][0] - excludes, modulecode_deps[mod][1] - excludes, modulecode_deps[mod][2] - excludes, modulecode_deps[mod][3], modulecode_deps[mod][4], modulecode_deps[mod][5]]
|
||||
modulecode_deps[mod] = [modulecode_deps[mod][0] - excludes, modulecode_deps[mod][1] - excludes, modulecode_deps[mod][2] - excludes, modulecode_deps[mod][3]]
|
||||
|
||||
# A custom getstate/setstate using tuples is actually worth 15% cachesize by
|
||||
# avoiding duplication of the attribute names!
|
||||
@@ -120,22 +112,21 @@ class SetCache(object):
|
||||
codecache = SetCache()
|
||||
|
||||
class pythonCacheLine(object):
|
||||
def __init__(self, refs, execs, contains, extra):
|
||||
def __init__(self, refs, execs, contains):
|
||||
self.refs = codecache.internSet(refs)
|
||||
self.execs = codecache.internSet(execs)
|
||||
self.contains = {}
|
||||
for c in contains:
|
||||
self.contains[c] = codecache.internSet(contains[c])
|
||||
self.extra = extra
|
||||
|
||||
def __getstate__(self):
|
||||
return (self.refs, self.execs, self.contains, self.extra)
|
||||
return (self.refs, self.execs, self.contains)
|
||||
|
||||
def __setstate__(self, state):
|
||||
(refs, execs, contains, extra) = state
|
||||
self.__init__(refs, execs, contains, extra)
|
||||
(refs, execs, contains) = state
|
||||
self.__init__(refs, execs, contains)
|
||||
def __hash__(self):
|
||||
l = (hash(self.refs), hash(self.execs), hash(self.extra))
|
||||
l = (hash(self.refs), hash(self.execs))
|
||||
for c in sorted(self.contains.keys()):
|
||||
l = l + (c, hash(self.contains[c]))
|
||||
return hash(l)
|
||||
@@ -164,7 +155,7 @@ class CodeParserCache(MultiProcessCache):
|
||||
# so that an existing cache gets invalidated. Additionally you'll need
|
||||
# to increment __cache_version__ in cache.py in order to ensure that old
|
||||
# recipe caches don't trigger "Taskhash mismatch" errors.
|
||||
CACHE_VERSION = 14
|
||||
CACHE_VERSION = 11
|
||||
|
||||
def __init__(self):
|
||||
MultiProcessCache.__init__(self)
|
||||
@@ -178,8 +169,8 @@ class CodeParserCache(MultiProcessCache):
|
||||
self.pythoncachelines = {}
|
||||
self.shellcachelines = {}
|
||||
|
||||
def newPythonCacheLine(self, refs, execs, contains, extra):
|
||||
cacheline = pythonCacheLine(refs, execs, contains, extra)
|
||||
def newPythonCacheLine(self, refs, execs, contains):
|
||||
cacheline = pythonCacheLine(refs, execs, contains)
|
||||
h = hash(cacheline)
|
||||
if h in self.pythoncachelines:
|
||||
return self.pythoncachelines[h]
|
||||
@@ -264,15 +255,7 @@ class PythonParser():
|
||||
|
||||
def visit_Call(self, node):
|
||||
name = self.called_node_name(node.func)
|
||||
if name and name in modulecode_deps and modulecode_deps[name][5]:
|
||||
visitorcode = modulecode_deps[name][5]
|
||||
contains, execs, warn = visitorcode(name, node.args)
|
||||
for i in contains:
|
||||
self.contains[i] = contains[i]
|
||||
self.execs |= execs
|
||||
if warn:
|
||||
self.warn(node.func, warn)
|
||||
elif name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs):
|
||||
if name and (name.endswith(self.getvars) or name.endswith(self.getvarflags) or name in self.containsfuncs or name in self.containsanyfuncs):
|
||||
if isinstance(node.args[0], ast.Constant) and isinstance(node.args[0].value, str):
|
||||
varname = node.args[0].value
|
||||
if name in self.containsfuncs and isinstance(node.args[1], ast.Constant):
|
||||
@@ -355,7 +338,6 @@ class PythonParser():
|
||||
self.contains = {}
|
||||
for i in codeparsercache.pythoncache[h].contains:
|
||||
self.contains[i] = set(codeparsercache.pythoncache[h].contains[i])
|
||||
self.extra = codeparsercache.pythoncache[h].extra
|
||||
return
|
||||
|
||||
if h in codeparsercache.pythoncacheextras:
|
||||
@@ -364,7 +346,6 @@ class PythonParser():
|
||||
self.contains = {}
|
||||
for i in codeparsercache.pythoncacheextras[h].contains:
|
||||
self.contains[i] = set(codeparsercache.pythoncacheextras[h].contains[i])
|
||||
self.extra = codeparsercache.pythoncacheextras[h].extra
|
||||
return
|
||||
|
||||
if fixedhash and not node:
|
||||
@@ -383,11 +364,8 @@ class PythonParser():
|
||||
self.visit_Call(n)
|
||||
|
||||
self.execs.update(self.var_execs)
|
||||
self.extra = None
|
||||
if fixedhash:
|
||||
self.extra = bbhash(str(node))
|
||||
|
||||
codeparsercache.pythoncacheextras[h] = codeparsercache.newPythonCacheLine(self.references, self.execs, self.contains, self.extra)
|
||||
codeparsercache.pythoncacheextras[h] = codeparsercache.newPythonCacheLine(self.references, self.execs, self.contains)
|
||||
|
||||
class ShellParser():
|
||||
def __init__(self, name, log):
|
||||
@@ -506,34 +484,19 @@ class ShellParser():
|
||||
"""
|
||||
|
||||
words = list(words)
|
||||
for word in words:
|
||||
for word in list(words):
|
||||
wtree = pyshlex.make_wordtree(word[1])
|
||||
for part in wtree:
|
||||
if not isinstance(part, list):
|
||||
continue
|
||||
|
||||
candidates = [part]
|
||||
if part[0] in ('`', '$('):
|
||||
command = pyshlex.wordtree_as_string(part[1:-1])
|
||||
self._parse_shell(command)
|
||||
|
||||
# If command is of type:
|
||||
#
|
||||
# var="... $(cmd [...]) ..."
|
||||
#
|
||||
# Then iterate on what's between the quotes and if we find a
|
||||
# list, make that what we check for below.
|
||||
if len(part) >= 3 and part[0] == '"':
|
||||
for p in part[1:-1]:
|
||||
if isinstance(p, list):
|
||||
candidates.append(p)
|
||||
|
||||
for candidate in candidates:
|
||||
if len(candidate) >= 2:
|
||||
if candidate[0] in ('`', '$('):
|
||||
command = pyshlex.wordtree_as_string(candidate[1:-1])
|
||||
self._parse_shell(command)
|
||||
|
||||
if word[0] in ("cmd_name", "cmd_word"):
|
||||
if word in words:
|
||||
words.remove(word)
|
||||
if word[0] in ("cmd_name", "cmd_word"):
|
||||
if word in words:
|
||||
words.remove(word)
|
||||
|
||||
usetoken = False
|
||||
for word in words:
|
||||
|
||||
@@ -420,30 +420,15 @@ class CommandsSync:
|
||||
return command.cooker.recipecaches[mc].pkg_dp
|
||||
getDefaultPreference.readonly = True
|
||||
|
||||
|
||||
def getSkippedRecipes(self, command, params):
|
||||
"""
|
||||
Get the map of skipped recipes for the specified multiconfig/mc name (`params[0]`).
|
||||
|
||||
Invoked by `bb.tinfoil.Tinfoil.get_skipped_recipes`
|
||||
|
||||
:param command: Internally used parameter.
|
||||
:param params: Parameter array. params[0] is multiconfig/mc name. If not given, then default mc '' is assumed.
|
||||
:return: Dict whose keys are virtualfns and values are `bb.cooker.SkippedPackage`
|
||||
"""
|
||||
try:
|
||||
mc = params[0]
|
||||
except IndexError:
|
||||
mc = ''
|
||||
|
||||
# Return list sorted by reverse priority order
|
||||
import bb.cache
|
||||
def sortkey(x):
|
||||
vfn, _ = x
|
||||
realfn, _, item_mc = bb.cache.virtualfn2realfn(vfn)
|
||||
return -command.cooker.collections[item_mc].calc_bbfile_priority(realfn)[0], vfn
|
||||
realfn, _, mc = bb.cache.virtualfn2realfn(vfn)
|
||||
return (-command.cooker.collections[mc].calc_bbfile_priority(realfn)[0], vfn)
|
||||
|
||||
skipdict = OrderedDict(sorted(command.cooker.skiplist_by_mc[mc].items(), key=sortkey))
|
||||
skipdict = OrderedDict(sorted(command.cooker.skiplist.items(), key=sortkey))
|
||||
return list(skipdict.items())
|
||||
getSkippedRecipes.readonly = True
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ import threading
|
||||
from io import StringIO, UnsupportedOperation
|
||||
from contextlib import closing
|
||||
from collections import defaultdict, namedtuple
|
||||
import bb, bb.command
|
||||
import bb, bb.exceptions, bb.command
|
||||
from bb import utils, data, parse, event, cache, providers, taskdata, runqueue, build
|
||||
import queue
|
||||
import signal
|
||||
@@ -134,8 +134,7 @@ class BBCooker:
|
||||
self.baseconfig_valid = False
|
||||
self.parsecache_valid = False
|
||||
self.eventlog = None
|
||||
# The skiplists, one per multiconfig
|
||||
self.skiplist_by_mc = defaultdict(dict)
|
||||
self.skiplist = {}
|
||||
self.featureset = CookerFeatures()
|
||||
if featureSet:
|
||||
for f in featureSet:
|
||||
@@ -282,6 +281,7 @@ class BBCooker:
|
||||
self.databuilder = bb.cookerdata.CookerDataBuilder(self.configuration, False)
|
||||
self.databuilder.parseBaseConfiguration()
|
||||
self.data = self.databuilder.data
|
||||
self.data_hash = self.databuilder.data_hash
|
||||
self.extraconfigdata = {}
|
||||
|
||||
eventlog = self.data.getVar("BB_DEFAULT_EVENTLOG")
|
||||
@@ -315,13 +315,13 @@ class BBCooker:
|
||||
dbfile = (self.data.getVar("PERSISTENT_DIR") or self.data.getVar("CACHE")) + "/hashserv.db"
|
||||
upstream = self.data.getVar("BB_HASHSERVE_UPSTREAM") or None
|
||||
if upstream:
|
||||
import socket
|
||||
try:
|
||||
with hashserv.create_client(upstream) as client:
|
||||
client.ping()
|
||||
except (ConnectionError, ImportError) as e:
|
||||
sock = socket.create_connection(upstream.split(":"), 5)
|
||||
sock.close()
|
||||
except socket.error as e:
|
||||
bb.warn("BB_HASHSERVE_UPSTREAM is not valid, unable to connect hash equivalence server at '%s': %s"
|
||||
% (upstream, repr(e)))
|
||||
upstream = None
|
||||
|
||||
self.hashservaddr = "unix://%s/hashserve.sock" % self.data.getVar("TOPDIR")
|
||||
self.hashserv = hashserv.create_server(
|
||||
@@ -370,11 +370,6 @@ class BBCooker:
|
||||
if not clean:
|
||||
bb.parse.BBHandler.cached_statements = {}
|
||||
|
||||
# If writes were made to any of the data stores, we need to recalculate the data
|
||||
# store cache
|
||||
if hasattr(self, "databuilder"):
|
||||
self.databuilder.calc_datastore_hashes()
|
||||
|
||||
def parseConfiguration(self):
|
||||
self.updateCacheSync()
|
||||
|
||||
@@ -617,8 +612,8 @@ class BBCooker:
|
||||
localdata = {}
|
||||
|
||||
for mc in self.multiconfigs:
|
||||
taskdata[mc] = bb.taskdata.TaskData(halt, skiplist=self.skiplist_by_mc[mc], allowincomplete=allowincomplete)
|
||||
localdata[mc] = bb.data.createCopy(self.databuilder.mcdata[mc])
|
||||
taskdata[mc] = bb.taskdata.TaskData(halt, skiplist=self.skiplist, allowincomplete=allowincomplete)
|
||||
localdata[mc] = data.createCopy(self.databuilder.mcdata[mc])
|
||||
bb.data.expandKeys(localdata[mc])
|
||||
|
||||
current = 0
|
||||
@@ -685,14 +680,14 @@ class BBCooker:
|
||||
bb.event.fire(bb.event.TreeDataPreparationCompleted(len(fulltargetlist)), self.data)
|
||||
return taskdata, runlist
|
||||
|
||||
def prepareTreeData(self, pkgs_to_build, task, halt=False):
|
||||
def prepareTreeData(self, pkgs_to_build, task):
|
||||
"""
|
||||
Prepare a runqueue and taskdata object for iteration over pkgs_to_build
|
||||
"""
|
||||
|
||||
# We set halt to False here to prevent unbuildable targets raising
|
||||
# an exception when we're just generating data
|
||||
taskdata, runlist = self.buildTaskData(pkgs_to_build, task, halt, allowincomplete=True)
|
||||
taskdata, runlist = self.buildTaskData(pkgs_to_build, task, False, allowincomplete=True)
|
||||
|
||||
return runlist, taskdata
|
||||
|
||||
@@ -706,7 +701,7 @@ class BBCooker:
|
||||
if not task.startswith("do_"):
|
||||
task = "do_%s" % task
|
||||
|
||||
runlist, taskdata = self.prepareTreeData(pkgs_to_build, task, halt=True)
|
||||
runlist, taskdata = self.prepareTreeData(pkgs_to_build, task)
|
||||
rq = bb.runqueue.RunQueue(self, self.data, self.recipecaches, taskdata, runlist)
|
||||
rq.rqdata.prepare()
|
||||
return self.buildDependTree(rq, taskdata)
|
||||
@@ -938,7 +933,7 @@ class BBCooker:
|
||||
for mc in self.multiconfigs:
|
||||
# First get list of recipes, including skipped
|
||||
recipefns = list(self.recipecaches[mc].pkg_fn.keys())
|
||||
recipefns.extend(self.skiplist_by_mc[mc].keys())
|
||||
recipefns.extend(self.skiplist.keys())
|
||||
|
||||
# Work out list of bbappends that have been applied
|
||||
applied_appends = []
|
||||
@@ -1343,7 +1338,7 @@ class BBCooker:
|
||||
self.buildSetVars()
|
||||
self.reset_mtime_caches()
|
||||
|
||||
bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.databuilder.data_hash, self.caches_array)
|
||||
bb_caches = bb.cache.MulticonfigCache(self.databuilder, self.data_hash, self.caches_array)
|
||||
|
||||
layername = self.collections[mc].calc_bbfile_priority(fn)[2]
|
||||
infos = bb_caches[mc].parse(fn, self.collections[mc].get_file_appends(fn), layername)
|
||||
@@ -1464,6 +1459,7 @@ class BBCooker:
|
||||
|
||||
if t in task or getAllTaskSignatures:
|
||||
try:
|
||||
rq.rqdata.prepare_task_hash(tid)
|
||||
sig.append([pn, t, rq.rqdata.get_task_unihash(tid)])
|
||||
except KeyError:
|
||||
sig.append(self.getTaskSignatures(target, [t])[0])
|
||||
@@ -1817,8 +1813,8 @@ class CookerCollectFiles(object):
|
||||
bb.event.fire(CookerExit(), eventdata)
|
||||
|
||||
# We need to track where we look so that we can know when the cache is invalid. There
|
||||
# is no nice way to do this, this is horrid. We intercept the os.listdir() and os.scandir()
|
||||
# calls while we run glob().
|
||||
# is no nice way to do this, this is horrid. We intercept the os.listdir()
|
||||
# (or os.scandir() for python 3.6+) calls while we run glob().
|
||||
origlistdir = os.listdir
|
||||
if hasattr(os, 'scandir'):
|
||||
origscandir = os.scandir
|
||||
@@ -2102,6 +2098,7 @@ class Parser(multiprocessing.Process):
|
||||
except Exception as exc:
|
||||
tb = sys.exc_info()[2]
|
||||
exc.recipe = filename
|
||||
exc.traceback = list(bb.exceptions.extract_traceback(tb, context=3))
|
||||
return True, None, exc
|
||||
# Need to turn BaseExceptions into Exceptions here so we gracefully shutdown
|
||||
# and for example a worker thread doesn't just exit on its own in response to
|
||||
@@ -2116,7 +2113,7 @@ class CookerParser(object):
|
||||
self.mcfilelist = mcfilelist
|
||||
self.cooker = cooker
|
||||
self.cfgdata = cooker.data
|
||||
self.cfghash = cooker.databuilder.data_hash
|
||||
self.cfghash = cooker.data_hash
|
||||
self.cfgbuilder = cooker.databuilder
|
||||
|
||||
# Accounting statistics
|
||||
@@ -2228,8 +2225,9 @@ class CookerParser(object):
|
||||
|
||||
for process in self.processes:
|
||||
process.join()
|
||||
# clean up zombies
|
||||
process.close()
|
||||
# Added in 3.7, cleans up zombies
|
||||
if hasattr(process, "close"):
|
||||
process.close()
|
||||
|
||||
bb.codeparser.parser_cache_save()
|
||||
bb.codeparser.parser_cache_savemerge()
|
||||
@@ -2239,13 +2237,12 @@ class CookerParser(object):
|
||||
profiles = []
|
||||
for i in self.process_names:
|
||||
logfile = "profile-parse-%s.log" % i
|
||||
if os.path.exists(logfile) and os.path.getsize(logfile):
|
||||
if os.path.exists(logfile):
|
||||
profiles.append(logfile)
|
||||
|
||||
if profiles:
|
||||
pout = "profile-parse.log.processed"
|
||||
bb.utils.process_profilelog(profiles, pout = pout)
|
||||
print("Processed parsing statistics saved to %s" % (pout))
|
||||
pout = "profile-parse.log.processed"
|
||||
bb.utils.process_profilelog(profiles, pout = pout)
|
||||
print("Processed parsing statistics saved to %s" % (pout))
|
||||
|
||||
def final_cleanup(self):
|
||||
if self.syncthread:
|
||||
@@ -2302,12 +2299,8 @@ class CookerParser(object):
|
||||
return False
|
||||
except ParsingFailure as exc:
|
||||
self.error += 1
|
||||
|
||||
exc_desc = str(exc)
|
||||
if isinstance(exc, SystemExit) and not isinstance(exc.code, str):
|
||||
exc_desc = 'Exited with "%d"' % exc.code
|
||||
|
||||
logger.error('Unable to parse %s: %s' % (exc.recipe, exc_desc))
|
||||
logger.error('Unable to parse %s: %s' %
|
||||
(exc.recipe, bb.exceptions.to_string(exc.realexception)))
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
except bb.parse.ParseError as exc:
|
||||
@@ -2316,33 +2309,20 @@ class CookerParser(object):
|
||||
self.shutdown(clean=False, eventmsg=str(exc))
|
||||
return False
|
||||
except bb.data_smart.ExpansionError as exc:
|
||||
def skip_frames(f, fn_prefix):
|
||||
while f and f.tb_frame.f_code.co_filename.startswith(fn_prefix):
|
||||
f = f.tb_next
|
||||
return f
|
||||
|
||||
self.error += 1
|
||||
bbdir = os.path.dirname(__file__) + os.sep
|
||||
etype, value, tb = sys.exc_info()
|
||||
|
||||
# Remove any frames where the code comes from bitbake. This
|
||||
# prevents deep (and pretty useless) backtraces for expansion error
|
||||
tb = skip_frames(tb, bbdir)
|
||||
cur = tb
|
||||
while cur:
|
||||
cur.tb_next = skip_frames(cur.tb_next, bbdir)
|
||||
cur = cur.tb_next
|
||||
|
||||
etype, value, _ = sys.exc_info()
|
||||
tb = list(itertools.dropwhile(lambda e: e.filename.startswith(bbdir), exc.traceback))
|
||||
logger.error('ExpansionError during parsing %s', value.recipe,
|
||||
exc_info=(etype, value, tb))
|
||||
self.shutdown(clean=False)
|
||||
return False
|
||||
except Exception as exc:
|
||||
self.error += 1
|
||||
_, value, _ = sys.exc_info()
|
||||
etype, value, tb = sys.exc_info()
|
||||
if hasattr(value, "recipe"):
|
||||
logger.error('Unable to parse %s' % value.recipe,
|
||||
exc_info=sys.exc_info())
|
||||
exc_info=(etype, value, exc.traceback))
|
||||
else:
|
||||
# Most likely, an exception occurred during raising an exception
|
||||
import traceback
|
||||
@@ -2363,7 +2343,7 @@ class CookerParser(object):
|
||||
for virtualfn, info_array in result:
|
||||
if info_array[0].skipped:
|
||||
self.skipped += 1
|
||||
self.cooker.skiplist_by_mc[mc][virtualfn] = SkippedPackage(info_array[0])
|
||||
self.cooker.skiplist[virtualfn] = SkippedPackage(info_array[0])
|
||||
self.bb_caches[mc].add_info(virtualfn, info_array, self.cooker.recipecaches[mc],
|
||||
parsed=parsed, watcher = self.cooker.add_filewatch)
|
||||
return True
|
||||
|
||||
@@ -254,16 +254,9 @@ class CookerDataBuilder(object):
|
||||
self.data = self.basedata
|
||||
self.mcdata = {}
|
||||
|
||||
def calc_datastore_hashes(self):
|
||||
data_hash = hashlib.sha256()
|
||||
data_hash.update(self.data.get_hash().encode('utf-8'))
|
||||
multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
|
||||
for config in multiconfig:
|
||||
data_hash.update(self.mcdata[config].get_hash().encode('utf-8'))
|
||||
self.data_hash = data_hash.hexdigest()
|
||||
|
||||
def parseBaseConfiguration(self, worker=False):
|
||||
mcdata = {}
|
||||
data_hash = hashlib.sha256()
|
||||
try:
|
||||
self.data = self.parseConfigurationFiles(self.prefiles, self.postfiles)
|
||||
|
||||
@@ -286,6 +279,7 @@ class CookerDataBuilder(object):
|
||||
bb.event.fire(bb.event.ConfigParsed(), self.data)
|
||||
|
||||
bb.parse.init_parser(self.data)
|
||||
data_hash.update(self.data.get_hash().encode('utf-8'))
|
||||
mcdata[''] = self.data
|
||||
|
||||
multiconfig = (self.data.getVar("BBMULTICONFIG") or "").split()
|
||||
@@ -295,9 +289,11 @@ class CookerDataBuilder(object):
|
||||
parsed_mcdata = self.parseConfigurationFiles(self.prefiles, self.postfiles, config)
|
||||
bb.event.fire(bb.event.ConfigParsed(), parsed_mcdata)
|
||||
mcdata[config] = parsed_mcdata
|
||||
data_hash.update(parsed_mcdata.get_hash().encode('utf-8'))
|
||||
if multiconfig:
|
||||
bb.event.fire(bb.event.MultiConfigParsed(mcdata), self.data)
|
||||
|
||||
self.data_hash = data_hash.hexdigest()
|
||||
except bb.data_smart.ExpansionError as e:
|
||||
logger.error(str(e))
|
||||
raise bb.BBHandledException()
|
||||
@@ -332,7 +328,6 @@ class CookerDataBuilder(object):
|
||||
for mc in mcdata:
|
||||
self.mcdata[mc] = bb.data.createCopy(mcdata[mc])
|
||||
self.data = self.mcdata['']
|
||||
self.calc_datastore_hashes()
|
||||
|
||||
def reset(self):
|
||||
# We may not have run parseBaseConfiguration() yet
|
||||
|
||||
@@ -293,7 +293,7 @@ def build_dependencies(key, keys, mod_funcs, shelldeps, varflagsexcl, ignored_va
|
||||
if key in mod_funcs:
|
||||
exclusions = set()
|
||||
moddep = bb.codeparser.modulecode_deps[key]
|
||||
value = handle_contains(moddep[4], moddep[3], exclusions, d)
|
||||
value = handle_contains("", moddep[3], exclusions, d)
|
||||
return frozenset((moddep[0] | keys & moddep[1]) - ignored_vars), value
|
||||
|
||||
if key[-1] == ']':
|
||||
|
||||
@@ -272,9 +272,12 @@ class VariableHistory(object):
|
||||
return
|
||||
if 'op' not in loginfo or not loginfo['op']:
|
||||
loginfo['op'] = 'set'
|
||||
if 'detail' in loginfo:
|
||||
loginfo['detail'] = str(loginfo['detail'])
|
||||
if 'variable' not in loginfo or 'file' not in loginfo:
|
||||
raise ValueError("record() missing variable or file.")
|
||||
var = loginfo['variable']
|
||||
|
||||
if var not in self.variables:
|
||||
self.variables[var] = []
|
||||
if not isinstance(self.variables[var], list):
|
||||
@@ -333,8 +336,7 @@ class VariableHistory(object):
|
||||
flag = '[%s] ' % (event['flag'])
|
||||
else:
|
||||
flag = ''
|
||||
o.write("# %s %s:%s%s\n# %s\"%s\"\n" % \
|
||||
(event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', str(event['detail']))))
|
||||
o.write("# %s %s:%s%s\n# %s\"%s\"\n" % (event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', event['detail'])))
|
||||
if len(history) > 1:
|
||||
o.write("# pre-expansion value:\n")
|
||||
o.write('# "%s"\n' % (commentVal))
|
||||
@@ -388,7 +390,7 @@ class VariableHistory(object):
|
||||
if isset and event['op'] == 'set?':
|
||||
continue
|
||||
isset = True
|
||||
items = d.expand(str(event['detail'])).split()
|
||||
items = d.expand(event['detail']).split()
|
||||
for item in items:
|
||||
# This is a little crude but is belt-and-braces to avoid us
|
||||
# having to handle every possible operation type specifically
|
||||
|
||||
@@ -19,6 +19,7 @@ import sys
|
||||
import threading
|
||||
import traceback
|
||||
|
||||
import bb.exceptions
|
||||
import bb.utils
|
||||
|
||||
# This is the pid for which we should generate the event. This is set when
|
||||
@@ -194,12 +195,7 @@ def fire_ui_handlers(event, d):
|
||||
ui_queue.append(event)
|
||||
return
|
||||
|
||||
with bb.utils.lock_timeout_nocheck(_thread_lock) as lock:
|
||||
if not lock:
|
||||
# If we can't get the lock, we may be recursively called, queue and return
|
||||
ui_queue.append(event)
|
||||
return
|
||||
|
||||
with bb.utils.lock_timeout(_thread_lock):
|
||||
errors = []
|
||||
for h in _ui_handlers:
|
||||
#print "Sending event %s" % event
|
||||
@@ -218,9 +214,6 @@ def fire_ui_handlers(event, d):
|
||||
for h in errors:
|
||||
del _ui_handlers[h]
|
||||
|
||||
while ui_queue:
|
||||
fire_ui_handlers(ui_queue.pop(), d)
|
||||
|
||||
def fire(event, d):
|
||||
"""Fire off an Event"""
|
||||
|
||||
@@ -766,7 +759,13 @@ class LogHandler(logging.Handler):
|
||||
|
||||
def emit(self, record):
|
||||
if record.exc_info:
|
||||
record.bb_exc_formatted = traceback.format_exception(*record.exc_info)
|
||||
etype, value, tb = record.exc_info
|
||||
if hasattr(tb, 'tb_next'):
|
||||
tb = list(bb.exceptions.extract_traceback(tb, context=3))
|
||||
# Need to turn the value into something the logging system can pickle
|
||||
record.bb_exc_info = (etype, value, tb)
|
||||
record.bb_exc_formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
|
||||
value = str(value)
|
||||
record.exc_info = None
|
||||
fire(record, None)
|
||||
|
||||
|
||||
96
bitbake/lib/bb/exceptions.py
Normal file
96
bitbake/lib/bb/exceptions.py
Normal file
@@ -0,0 +1,96 @@
|
||||
#
|
||||
# Copyright BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
import inspect
|
||||
import traceback
|
||||
import bb.namedtuple_with_abc
|
||||
from collections import namedtuple
|
||||
|
||||
|
||||
class TracebackEntry(namedtuple.abc):
|
||||
"""Pickleable representation of a traceback entry"""
|
||||
_fields = 'filename lineno function args code_context index'
|
||||
_header = ' File "{0.filename}", line {0.lineno}, in {0.function}{0.args}'
|
||||
|
||||
def format(self, formatter=None):
|
||||
if not self.code_context:
|
||||
return self._header.format(self) + '\n'
|
||||
|
||||
formatted = [self._header.format(self) + ':\n']
|
||||
|
||||
for lineindex, line in enumerate(self.code_context):
|
||||
if formatter:
|
||||
line = formatter(line)
|
||||
|
||||
if lineindex == self.index:
|
||||
formatted.append(' >%s' % line)
|
||||
else:
|
||||
formatted.append(' %s' % line)
|
||||
return formatted
|
||||
|
||||
def __str__(self):
|
||||
return ''.join(self.format())
|
||||
|
||||
def _get_frame_args(frame):
|
||||
"""Get the formatted arguments and class (if available) for a frame"""
|
||||
arginfo = inspect.getargvalues(frame)
|
||||
|
||||
try:
|
||||
if not arginfo.args:
|
||||
return '', None
|
||||
# There have been reports from the field of python 2.6 which doesn't
|
||||
# return a namedtuple here but simply a tuple so fallback gracefully if
|
||||
# args isn't present.
|
||||
except AttributeError:
|
||||
return '', None
|
||||
|
||||
firstarg = arginfo.args[0]
|
||||
if firstarg == 'self':
|
||||
self = arginfo.locals['self']
|
||||
cls = self.__class__.__name__
|
||||
|
||||
arginfo.args.pop(0)
|
||||
del arginfo.locals['self']
|
||||
else:
|
||||
cls = None
|
||||
|
||||
formatted = inspect.formatargvalues(*arginfo)
|
||||
return formatted, cls
|
||||
|
||||
def extract_traceback(tb, context=1):
|
||||
frames = inspect.getinnerframes(tb, context)
|
||||
for frame, filename, lineno, function, code_context, index in frames:
|
||||
formatted_args, cls = _get_frame_args(frame)
|
||||
if cls:
|
||||
function = '%s.%s' % (cls, function)
|
||||
yield TracebackEntry(filename, lineno, function, formatted_args,
|
||||
code_context, index)
|
||||
|
||||
def format_extracted(extracted, formatter=None, limit=None):
|
||||
if limit:
|
||||
extracted = extracted[-limit:]
|
||||
|
||||
formatted = []
|
||||
for tracebackinfo in extracted:
|
||||
formatted.extend(tracebackinfo.format(formatter))
|
||||
return formatted
|
||||
|
||||
|
||||
def format_exception(etype, value, tb, context=1, limit=None, formatter=None):
|
||||
formatted = ['Traceback (most recent call last):\n']
|
||||
|
||||
if hasattr(tb, 'tb_next'):
|
||||
tb = extract_traceback(tb, context)
|
||||
|
||||
formatted.extend(format_extracted(tb, formatter, limit))
|
||||
formatted.extend(traceback.format_exception_only(etype, value))
|
||||
return formatted
|
||||
|
||||
def to_string(exc):
|
||||
if isinstance(exc, SystemExit):
|
||||
if not isinstance(exc.code, str):
|
||||
return 'Exited with "%d"' % exc.code
|
||||
return str(exc)
|
||||
@@ -499,30 +499,30 @@ def fetcher_init(d):
|
||||
Calls before this must not hit the cache.
|
||||
"""
|
||||
|
||||
with bb.persist_data.persist('BB_URI_HEADREVS', d) as revs:
|
||||
try:
|
||||
# fetcher_init is called multiple times, so make sure we only save the
|
||||
# revs the first time it is called.
|
||||
if not bb.fetch2.saved_headrevs:
|
||||
bb.fetch2.saved_headrevs = dict(revs)
|
||||
except:
|
||||
pass
|
||||
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
|
||||
try:
|
||||
# fetcher_init is called multiple times, so make sure we only save the
|
||||
# revs the first time it is called.
|
||||
if not bb.fetch2.saved_headrevs:
|
||||
bb.fetch2.saved_headrevs = dict(revs)
|
||||
except:
|
||||
pass
|
||||
|
||||
# When to drop SCM head revisions controlled by user policy
|
||||
srcrev_policy = d.getVar('BB_SRCREV_POLICY') or "clear"
|
||||
if srcrev_policy == "cache":
|
||||
logger.debug("Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
elif srcrev_policy == "clear":
|
||||
logger.debug("Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
revs.clear()
|
||||
else:
|
||||
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
|
||||
# When to drop SCM head revisions controlled by user policy
|
||||
srcrev_policy = d.getVar('BB_SRCREV_POLICY') or "clear"
|
||||
if srcrev_policy == "cache":
|
||||
logger.debug("Keeping SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
elif srcrev_policy == "clear":
|
||||
logger.debug("Clearing SRCREV cache due to cache policy of: %s", srcrev_policy)
|
||||
revs.clear()
|
||||
else:
|
||||
raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy)
|
||||
|
||||
_checksum_cache.init_cache(d.getVar("BB_CACHEDIR"))
|
||||
_checksum_cache.init_cache(d.getVar("BB_CACHEDIR"))
|
||||
|
||||
for m in methods:
|
||||
if hasattr(m, "init"):
|
||||
m.init(d)
|
||||
for m in methods:
|
||||
if hasattr(m, "init"):
|
||||
m.init(d)
|
||||
|
||||
def fetcher_parse_save():
|
||||
_checksum_cache.save_extras()
|
||||
@@ -536,8 +536,8 @@ def fetcher_compare_revisions(d):
|
||||
when bitbake was started and return true if they have changed.
|
||||
"""
|
||||
|
||||
with dict(bb.persist_data.persist('BB_URI_HEADREVS', d)) as headrevs:
|
||||
return headrevs != bb.fetch2.saved_headrevs
|
||||
headrevs = dict(bb.persist_data.persist('BB_URI_HEADREVS', d))
|
||||
return headrevs != bb.fetch2.saved_headrevs
|
||||
|
||||
def mirror_from_string(data):
|
||||
mirrors = (data or "").replace('\\n',' ').split()
|
||||
@@ -1317,7 +1317,7 @@ class FetchData(object):
|
||||
|
||||
if checksum_name in self.parm:
|
||||
checksum_expected = self.parm[checksum_name]
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3", "az", "crate", "gs", "gomod"]:
|
||||
elif self.type not in ["http", "https", "ftp", "ftps", "sftp", "s3", "az", "crate", "gs"]:
|
||||
checksum_expected = None
|
||||
else:
|
||||
checksum_expected = d.getVarFlag("SRC_URI", checksum_name)
|
||||
@@ -1606,7 +1606,7 @@ class FetchMethod(object):
|
||||
if urlpath.find("/") != -1:
|
||||
destdir = urlpath.rsplit("/", 1)[0] + '/'
|
||||
bb.utils.mkdirhier("%s/%s" % (unpackdir, destdir))
|
||||
cmd = 'cp --force --preserve=timestamps --no-dereference --recursive -H "%s" "%s"' % (file, destdir)
|
||||
cmd = 'cp -fpPRH "%s" "%s"' % (file, destdir)
|
||||
else:
|
||||
urldata.unpack_tracer.unpack("archive-extract", unpackdir)
|
||||
|
||||
@@ -1662,13 +1662,13 @@ class FetchMethod(object):
|
||||
if not hasattr(self, "_latest_revision"):
|
||||
raise ParameterError("The fetcher for this URL does not support _latest_revision", ud.url)
|
||||
|
||||
with bb.persist_data.persist('BB_URI_HEADREVS', d) as revs:
|
||||
key = self.generate_revision_key(ud, d, name)
|
||||
try:
|
||||
return revs[key]
|
||||
except KeyError:
|
||||
revs[key] = rev = self._latest_revision(ud, d, name)
|
||||
return rev
|
||||
revs = bb.persist_data.persist('BB_URI_HEADREVS', d)
|
||||
key = self.generate_revision_key(ud, d, name)
|
||||
try:
|
||||
return revs[key]
|
||||
except KeyError:
|
||||
revs[key] = rev = self._latest_revision(ud, d, name)
|
||||
return rev
|
||||
|
||||
def sortable_revision(self, ud, d, name):
|
||||
latest_rev = self._build_revision(ud, d, name)
|
||||
@@ -2088,7 +2088,6 @@ from . import npmsw
|
||||
from . import az
|
||||
from . import crate
|
||||
from . import gcp
|
||||
from . import gomod
|
||||
|
||||
methods.append(local.Local())
|
||||
methods.append(wget.Wget())
|
||||
@@ -2111,5 +2110,3 @@ methods.append(npmsw.NpmShrinkWrap())
|
||||
methods.append(az.Az())
|
||||
methods.append(crate.Crate())
|
||||
methods.append(gcp.GCP())
|
||||
methods.append(gomod.GoMod())
|
||||
methods.append(gomod.GoModGit())
|
||||
|
||||
@@ -108,7 +108,7 @@ class ClearCase(FetchMethod):
|
||||
ud.module.replace("/", "."),
|
||||
ud.label.replace("/", "."))
|
||||
|
||||
ud.viewname = "%s-view%s" % (ud.identifier, d.getVar("DATETIME"))
|
||||
ud.viewname = "%s-view%s" % (ud.identifier, d.getVar("DATETIME", d, True))
|
||||
ud.csname = "%s-config-spec" % (ud.identifier)
|
||||
ud.ccasedir = os.path.join(d.getVar("DL_DIR"), ud.type)
|
||||
ud.viewdir = os.path.join(ud.ccasedir, ud.viewname)
|
||||
@@ -196,7 +196,7 @@ class ClearCase(FetchMethod):
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if ("LATEST" in ud.label) or (ud.customspec and "LATEST" in ud.customspec):
|
||||
ud.identifier += "-%s" % d.getVar("DATETIME")
|
||||
ud.identifier += "-%s" % d.getVar("DATETIME",d, True)
|
||||
return True
|
||||
if os.path.exists(ud.localpath):
|
||||
return False
|
||||
|
||||
@@ -70,7 +70,6 @@ class Crate(Wget):
|
||||
host = 'crates.io/api/v1/crates'
|
||||
|
||||
ud.url = "https://%s/%s/%s/download" % (host, name, version)
|
||||
ud.versionsurl = "https://%s/%s/versions" % (host, name)
|
||||
ud.parm['downloadfilename'] = "%s-%s.crate" % (name, version)
|
||||
if 'name' not in ud.parm:
|
||||
ud.parm['name'] = '%s-%s' % (name, version)
|
||||
@@ -140,11 +139,3 @@ class Crate(Wget):
|
||||
mdpath = os.path.join(bbpath, cratepath, mdfile)
|
||||
with open(mdpath, "w") as f:
|
||||
json.dump(metadata, f)
|
||||
|
||||
def latest_versionstring(self, ud, d):
|
||||
from functools import cmp_to_key
|
||||
json_data = json.loads(self._fetch_index(ud.versionsurl, ud, d))
|
||||
versions = [(0, i["num"], "") for i in json_data["versions"]]
|
||||
versions = sorted(versions, key=cmp_to_key(bb.utils.vercmp))
|
||||
|
||||
return (versions[-1][1], "")
|
||||
|
||||
@@ -47,6 +47,7 @@ class GCP(FetchMethod):
|
||||
ud.basename = os.path.basename(ud.path)
|
||||
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.basename))
|
||||
ud.basecmd = "gsutil stat"
|
||||
|
||||
def get_gcp_client(self):
|
||||
from google.cloud import storage
|
||||
@@ -57,20 +58,17 @@ class GCP(FetchMethod):
|
||||
Fetch urls using the GCP API.
|
||||
Assumes localpath was called first.
|
||||
"""
|
||||
from google.api_core.exceptions import NotFound
|
||||
logger.debug2(f"Trying to download gs://{ud.host}{ud.path} to {ud.localpath}")
|
||||
if self.gcp_client is None:
|
||||
self.get_gcp_client()
|
||||
|
||||
bb.fetch2.check_network_access(d, "blob.download_to_filename", f"gs://{ud.host}{ud.path}")
|
||||
bb.fetch2.check_network_access(d, ud.basecmd, f"gs://{ud.host}{ud.path}")
|
||||
runfetchcmd("%s %s" % (ud.basecmd, f"gs://{ud.host}{ud.path}"), d)
|
||||
|
||||
# Path sometimes has leading slash, so strip it
|
||||
path = ud.path.lstrip("/")
|
||||
blob = self.gcp_client.bucket(ud.host).blob(path)
|
||||
try:
|
||||
blob.download_to_filename(ud.localpath)
|
||||
except NotFound:
|
||||
raise FetchError("The GCP API threw a NotFound exception")
|
||||
blob.download_to_filename(ud.localpath)
|
||||
|
||||
# Additional sanity checks copied from the wget class (although there
|
||||
# are no known issues which mean these are required, treat the GCP API
|
||||
@@ -92,7 +90,8 @@ class GCP(FetchMethod):
|
||||
if self.gcp_client is None:
|
||||
self.get_gcp_client()
|
||||
|
||||
bb.fetch2.check_network_access(d, "gcp_client.bucket(ud.host).blob(path).exists()", f"gs://{ud.host}{ud.path}")
|
||||
bb.fetch2.check_network_access(d, ud.basecmd, f"gs://{ud.host}{ud.path}")
|
||||
runfetchcmd("%s %s" % (ud.basecmd, f"gs://{ud.host}{ud.path}"), d)
|
||||
|
||||
# Path sometimes has leading slash, so strip it
|
||||
path = ud.path.lstrip("/")
|
||||
|
||||
@@ -262,7 +262,7 @@ class Git(FetchMethod):
|
||||
for name in ud.names:
|
||||
ud.unresolvedrev[name] = 'HEAD'
|
||||
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c gc.autoDetach=false -c core.pager=cat -c safe.bareRepository=all -c clone.defaultRemoteName=origin"
|
||||
ud.basecmd = d.getVar("FETCHCMD_git") or "git -c gc.autoDetach=false -c core.pager=cat -c safe.bareRepository=all"
|
||||
|
||||
write_tarballs = d.getVar("BB_GENERATE_MIRROR_TARBALLS") or "0"
|
||||
ud.write_tarballs = write_tarballs != "0" or ud.rebaseable
|
||||
@@ -551,31 +551,18 @@ class Git(FetchMethod):
|
||||
runfetchcmd("touch %s.done" % ud.fullmirror, d)
|
||||
|
||||
def clone_shallow_local(self, ud, dest, d):
|
||||
"""
|
||||
Shallow fetch from ud.clonedir (${DL_DIR}/git2/<gitrepo> by default):
|
||||
- For BB_GIT_SHALLOW_DEPTH: git fetch --depth <depth> rev
|
||||
- For BB_GIT_SHALLOW_REVS: git fetch --shallow-exclude=<revs> rev
|
||||
"""
|
||||
"""Clone the repo and make it shallow.
|
||||
|
||||
bb.utils.mkdirhier(dest)
|
||||
init_cmd = "%s init -q" % ud.basecmd
|
||||
if ud.bareclone:
|
||||
init_cmd += " --bare"
|
||||
runfetchcmd(init_cmd, d, workdir=dest)
|
||||
runfetchcmd("%s remote add origin %s" % (ud.basecmd, ud.clonedir), d, workdir=dest)
|
||||
|
||||
# Check the histories which should be excluded
|
||||
shallow_exclude = ''
|
||||
for revision in ud.shallow_revs:
|
||||
shallow_exclude += " --shallow-exclude=%s" % revision
|
||||
The upstream url of the new clone isn't set at this time, as it'll be
|
||||
set correctly when unpacked."""
|
||||
runfetchcmd("%s clone %s %s %s" % (ud.basecmd, ud.cloneflags, ud.clonedir, dest), d)
|
||||
|
||||
to_parse, shallow_branches = [], []
|
||||
for name in ud.names:
|
||||
revision = ud.revisions[name]
|
||||
depth = ud.shallow_depths[name]
|
||||
|
||||
# The --depth and --shallow-exclude can't be used together
|
||||
if depth and shallow_exclude:
|
||||
raise bb.fetch2.FetchError("BB_GIT_SHALLOW_REVS is set, but BB_GIT_SHALLOW_DEPTH is not 0.")
|
||||
if depth:
|
||||
to_parse.append('%s~%d^{}' % (revision, depth - 1))
|
||||
|
||||
# For nobranch, we need a ref, otherwise the commits will be
|
||||
# removed, and for non-nobranch, we truncate the branch to our
|
||||
@@ -588,49 +575,36 @@ class Git(FetchMethod):
|
||||
else:
|
||||
ref = "refs/remotes/origin/%s" % branch
|
||||
|
||||
fetch_cmd = "%s fetch origin %s" % (ud.basecmd, revision)
|
||||
if depth:
|
||||
fetch_cmd += " --depth %s" % depth
|
||||
|
||||
if shallow_exclude:
|
||||
fetch_cmd += shallow_exclude
|
||||
|
||||
# Advertise the revision for lower version git such as 2.25.1:
|
||||
# error: Server does not allow request for unadvertised object.
|
||||
# The ud.clonedir is a local temporary dir, will be removed when
|
||||
# fetch is done, so we can do anything on it.
|
||||
adv_cmd = 'git branch -f advertise-%s %s' % (revision, revision)
|
||||
runfetchcmd(adv_cmd, d, workdir=ud.clonedir)
|
||||
|
||||
runfetchcmd(fetch_cmd, d, workdir=dest)
|
||||
shallow_branches.append(ref)
|
||||
runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest)
|
||||
|
||||
# Map srcrev+depths to revisions
|
||||
parsed_depths = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join(to_parse)), d, workdir=dest)
|
||||
|
||||
# Resolve specified revisions
|
||||
parsed_revs = runfetchcmd("%s rev-parse %s" % (ud.basecmd, " ".join('"%s^{}"' % r for r in ud.shallow_revs)), d, workdir=dest)
|
||||
shallow_revisions = parsed_depths.splitlines() + parsed_revs.splitlines()
|
||||
|
||||
# Apply extra ref wildcards
|
||||
all_refs_remote = runfetchcmd("%s ls-remote origin 'refs/*'" % ud.basecmd, \
|
||||
d, workdir=dest).splitlines()
|
||||
all_refs = []
|
||||
for line in all_refs_remote:
|
||||
all_refs.append(line.split()[-1])
|
||||
extra_refs = []
|
||||
all_refs = runfetchcmd('%s for-each-ref "--format=%%(refname)"' % ud.basecmd,
|
||||
d, workdir=dest).splitlines()
|
||||
for r in ud.shallow_extra_refs:
|
||||
if not ud.bareclone:
|
||||
r = r.replace('refs/heads/', 'refs/remotes/origin/')
|
||||
|
||||
if '*' in r:
|
||||
matches = filter(lambda a: fnmatch.fnmatchcase(a, r), all_refs)
|
||||
extra_refs.extend(matches)
|
||||
shallow_branches.extend(matches)
|
||||
else:
|
||||
extra_refs.append(r)
|
||||
shallow_branches.append(r)
|
||||
|
||||
for ref in extra_refs:
|
||||
ref_fetch = os.path.basename(ref)
|
||||
runfetchcmd("%s fetch origin --depth 1 %s" % (ud.basecmd, ref_fetch), d, workdir=dest)
|
||||
revision = runfetchcmd("%s rev-parse FETCH_HEAD" % ud.basecmd, d, workdir=dest)
|
||||
runfetchcmd("%s update-ref %s %s" % (ud.basecmd, ref, revision), d, workdir=dest)
|
||||
|
||||
# The url is local ud.clonedir, set it to upstream one
|
||||
repourl = self._get_repo_url(ud)
|
||||
runfetchcmd("%s remote set-url origin %s" % (ud.basecmd, shlex.quote(repourl)), d, workdir=dest)
|
||||
# Make the repository shallow
|
||||
shallow_cmd = [self.make_shallow_path, '-s']
|
||||
for b in shallow_branches:
|
||||
shallow_cmd.append('-r')
|
||||
shallow_cmd.append(b)
|
||||
shallow_cmd.extend(shallow_revisions)
|
||||
runfetchcmd(subprocess.list2cmdline(shallow_cmd), d, workdir=dest)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
""" unpack the downloaded src to destdir"""
|
||||
@@ -952,8 +926,9 @@ class Git(FetchMethod):
|
||||
commits = None
|
||||
else:
|
||||
if not os.path.exists(rev_file) or not os.path.getsize(rev_file):
|
||||
from pipes import quote
|
||||
commits = bb.fetch2.runfetchcmd(
|
||||
"git rev-list %s -- | wc -l" % shlex.quote(rev),
|
||||
"git rev-list %s -- | wc -l" % quote(rev),
|
||||
d, quiet=True).strip().lstrip('0')
|
||||
if commits:
|
||||
open(rev_file, "w").write("%d\n" % int(commits))
|
||||
|
||||
@@ -147,19 +147,6 @@ class GitSM(Git):
|
||||
|
||||
return submodules != []
|
||||
|
||||
def call_process_submodules(self, ud, d, extra_check, subfunc):
|
||||
# If we're using a shallow mirror tarball it needs to be
|
||||
# unpacked temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and extra_check:
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
try:
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, subfunc, d)
|
||||
finally:
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, subfunc, d)
|
||||
|
||||
def need_update(self, ud, d):
|
||||
if Git.need_update(self, ud, d):
|
||||
return True
|
||||
@@ -177,7 +164,15 @@ class GitSM(Git):
|
||||
logger.error('gitsm: submodule update check failed: %s %s' % (type(e).__name__, str(e)))
|
||||
need_update_result = True
|
||||
|
||||
self.call_process_submodules(ud, d, not os.path.exists(ud.clonedir), need_update_submodule)
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and not os.path.exists(ud.clonedir):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, need_update_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, need_update_submodule, d)
|
||||
|
||||
if need_update_list:
|
||||
logger.debug('gitsm: Submodules requiring update: %s' % (' '.join(need_update_list)))
|
||||
@@ -200,7 +195,16 @@ class GitSM(Git):
|
||||
raise
|
||||
|
||||
Git.download(self, ud, d)
|
||||
self.call_process_submodules(ud, d, self.need_update(ud, d), download_submodule)
|
||||
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and self.need_update(ud, d):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
runfetchcmd("tar -xzf %s" % ud.fullshallow, d, workdir=tmpdir)
|
||||
self.process_submodules(ud, tmpdir, download_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, download_submodule, d)
|
||||
|
||||
def unpack(self, ud, destdir, d):
|
||||
def unpack_submodules(ud, url, module, modpath, workdir, d):
|
||||
@@ -259,6 +263,14 @@ class GitSM(Git):
|
||||
newfetch = Fetch([url], d, cache=False)
|
||||
urldata.extend(newfetch.expanded_urldata())
|
||||
|
||||
self.call_process_submodules(ud, d, ud.method.need_update(ud, d), add_submodule)
|
||||
# If we're using a shallow mirror tarball it needs to be unpacked
|
||||
# temporarily so that we can examine the .gitmodules file
|
||||
if ud.shallow and os.path.exists(ud.fullshallow) and ud.method.need_update(ud, d):
|
||||
tmpdir = tempfile.mkdtemp(dir=d.getVar("DL_DIR"))
|
||||
subprocess.check_call("tar -xzf %s" % ud.fullshallow, cwd=tmpdir, shell=True)
|
||||
self.process_submodules(ud, tmpdir, add_submodule, d)
|
||||
shutil.rmtree(tmpdir)
|
||||
else:
|
||||
self.process_submodules(ud, ud.clonedir, add_submodule, d)
|
||||
|
||||
return urldata
|
||||
|
||||
@@ -1,268 +0,0 @@
|
||||
"""
|
||||
BitBake 'Fetch' implementation for Go modules
|
||||
|
||||
The gomod/gomodgit fetchers are used to download Go modules to the module cache
|
||||
from a module proxy or directly from a version control repository.
|
||||
|
||||
Example SRC_URI:
|
||||
|
||||
SRC_URI += "gomod://golang.org/x/net;version=v0.9.0;sha256sum=..."
|
||||
SRC_URI += "gomodgit://golang.org/x/net;version=v0.9.0;repo=go.googlesource.com/net;srcrev=..."
|
||||
|
||||
Required SRC_URI parameters:
|
||||
|
||||
- version
|
||||
The version of the module.
|
||||
|
||||
Optional SRC_URI parameters:
|
||||
|
||||
- mod
|
||||
Fetch and unpack the go.mod file only instead of the complete module.
|
||||
The go command may need to download go.mod files for many different modules
|
||||
when computing the build list, and go.mod files are much smaller than
|
||||
module zip files.
|
||||
The default is "0", set mod=1 for the go.mod file only.
|
||||
|
||||
- sha256sum
|
||||
The checksum of the module zip file, or the go.mod file in case of fetching
|
||||
only the go.mod file. Alternatively, set the SRC_URI varible flag for
|
||||
"module@version.sha256sum".
|
||||
|
||||
- protocol
|
||||
The method used when fetching directly from a version control repository.
|
||||
The default is "https" for git.
|
||||
|
||||
- repo
|
||||
The URL when fetching directly from a version control repository. Required
|
||||
when the URL is different from the module path.
|
||||
|
||||
- srcrev
|
||||
The revision identifier used when fetching directly from a version control
|
||||
repository. Alternatively, set the SRCREV varible for "module@version".
|
||||
|
||||
- subdir
|
||||
The module subdirectory when fetching directly from a version control
|
||||
repository. Required when the module is not located in the root of the
|
||||
repository.
|
||||
|
||||
Related variables:
|
||||
|
||||
- GO_MOD_PROXY
|
||||
The module proxy used by the fetcher.
|
||||
|
||||
- GO_MOD_CACHE_DIR
|
||||
The directory where the module cache is located.
|
||||
This must match the exported GOMODCACHE variable for the go command to find
|
||||
the downloaded modules.
|
||||
|
||||
See the Go modules reference, https://go.dev/ref/mod, for more information
|
||||
about the module cache, module proxies and version control systems.
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import zipfile
|
||||
|
||||
import bb
|
||||
from bb.fetch2 import FetchError
|
||||
from bb.fetch2 import MissingParameterError
|
||||
from bb.fetch2 import runfetchcmd
|
||||
from bb.fetch2 import subprocess_setup
|
||||
from bb.fetch2.git import Git
|
||||
from bb.fetch2.wget import Wget
|
||||
|
||||
|
||||
def escape(path):
|
||||
"""Escape capital letters using exclamation points."""
|
||||
return re.sub(r'([A-Z])', lambda m: '!' + m.group(1).lower(), path)
|
||||
|
||||
|
||||
class GoMod(Wget):
|
||||
"""Class to fetch Go modules from a Go module proxy via wget"""
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""Check to see if a given URL is for this fetcher."""
|
||||
return ud.type == 'gomod'
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""Set up to download the module from the module proxy.
|
||||
|
||||
Set up to download the module zip file to the module cache directory
|
||||
and unpack the go.mod file (unless downloading only the go.mod file):
|
||||
|
||||
cache/download/<module>/@v/<version>.zip: The module zip file.
|
||||
cache/download/<module>/@v/<version>.mod: The go.mod file.
|
||||
"""
|
||||
|
||||
proxy = d.getVar('GO_MOD_PROXY') or 'proxy.golang.org'
|
||||
moddir = d.getVar('GO_MOD_CACHE_DIR') or 'pkg/mod'
|
||||
|
||||
if 'version' not in ud.parm:
|
||||
raise MissingParameterError('version', ud.url)
|
||||
|
||||
module = ud.host
|
||||
if ud.path != '/':
|
||||
module += ud.path
|
||||
ud.parm['module'] = module
|
||||
|
||||
# Set URL and filename for wget download
|
||||
path = escape(module + '/@v/' + ud.parm['version'])
|
||||
if ud.parm.get('mod', '0') == '1':
|
||||
path += '.mod'
|
||||
else:
|
||||
path += '.zip'
|
||||
ud.parm['unpack'] = '0'
|
||||
ud.url = bb.fetch2.encodeurl(
|
||||
('https', proxy, '/' + path, None, None, None))
|
||||
ud.parm['downloadfilename'] = path
|
||||
|
||||
# Set name parameter if sha256sum is set in recipe
|
||||
name = f"{module}@{ud.parm['version']}"
|
||||
if d.getVarFlag('SRC_URI', name + '.sha256sum'):
|
||||
ud.parm['name'] = name
|
||||
|
||||
# Set subdir for unpack
|
||||
ud.parm['subdir'] = os.path.join(moddir, 'cache/download',
|
||||
os.path.dirname(path))
|
||||
|
||||
super().urldata_init(ud, d)
|
||||
|
||||
def unpack(self, ud, rootdir, d):
|
||||
"""Unpack the module in the module cache."""
|
||||
|
||||
# Unpack the module zip file or go.mod file
|
||||
super().unpack(ud, rootdir, d)
|
||||
|
||||
if ud.localpath.endswith('.zip'):
|
||||
# Unpack the go.mod file from the zip file
|
||||
module = ud.parm['module']
|
||||
unpackdir = os.path.join(rootdir, ud.parm['subdir'])
|
||||
name = os.path.basename(ud.localpath).rsplit('.', 1)[0] + '.mod'
|
||||
bb.note(f"Unpacking {name} to {unpackdir}/")
|
||||
with zipfile.ZipFile(ud.localpath) as zf:
|
||||
with open(os.path.join(unpackdir, name), mode='wb') as mf:
|
||||
try:
|
||||
f = module + '@' + ud.parm['version'] + '/go.mod'
|
||||
shutil.copyfileobj(zf.open(f), mf)
|
||||
except KeyError:
|
||||
# If the module does not have a go.mod file, synthesize
|
||||
# one containing only a module statement.
|
||||
mf.write(f'module {module}\n'.encode())
|
||||
|
||||
|
||||
class GoModGit(Git):
|
||||
"""Class to fetch Go modules directly from a git repository"""
|
||||
|
||||
def supports(self, ud, d):
|
||||
"""Check to see if a given URL is for this fetcher."""
|
||||
return ud.type == 'gomodgit'
|
||||
|
||||
def urldata_init(self, ud, d):
|
||||
"""Set up to download the module from the git repository.
|
||||
|
||||
Set up to download the git repository to the module cache directory and
|
||||
unpack the module zip file and the go.mod file:
|
||||
|
||||
cache/vcs/<hash>: The bare git repository.
|
||||
cache/download/<module>/@v/<version>.zip: The module zip file.
|
||||
cache/download/<module>/@v/<version>.mod: The go.mod file.
|
||||
"""
|
||||
|
||||
moddir = d.getVar('GO_MOD_CACHE_DIR') or 'pkg/mod'
|
||||
|
||||
if 'version' not in ud.parm:
|
||||
raise MissingParameterError('version', ud.url)
|
||||
|
||||
module = ud.host
|
||||
if ud.path != '/':
|
||||
module += ud.path
|
||||
ud.parm['module'] = module
|
||||
|
||||
# Set host, path and srcrev for git download
|
||||
if 'repo' in ud.parm:
|
||||
repo = ud.parm['repo']
|
||||
idx = repo.find('/')
|
||||
if idx != -1:
|
||||
ud.host = repo[:idx]
|
||||
ud.path = repo[idx:]
|
||||
else:
|
||||
ud.host = repo
|
||||
ud.path = ''
|
||||
if 'protocol' not in ud.parm:
|
||||
ud.parm['protocol'] = 'https'
|
||||
name = f"{module}@{ud.parm['version']}"
|
||||
ud.names = [name]
|
||||
srcrev = d.getVar('SRCREV_' + name)
|
||||
if srcrev:
|
||||
if 'srcrev' not in ud.parm:
|
||||
ud.parm['srcrev'] = srcrev
|
||||
else:
|
||||
if 'srcrev' in ud.parm:
|
||||
d.setVar('SRCREV_' + name, ud.parm['srcrev'])
|
||||
if 'branch' not in ud.parm:
|
||||
ud.parm['nobranch'] = '1'
|
||||
|
||||
# Set subpath, subdir and bareclone for git unpack
|
||||
if 'subdir' in ud.parm:
|
||||
ud.parm['subpath'] = ud.parm['subdir']
|
||||
key = f"git3:{ud.parm['protocol']}://{ud.host}{ud.path}".encode()
|
||||
ud.parm['key'] = key
|
||||
ud.parm['subdir'] = os.path.join(moddir, 'cache/vcs',
|
||||
hashlib.sha256(key).hexdigest())
|
||||
ud.parm['bareclone'] = '1'
|
||||
|
||||
super().urldata_init(ud, d)
|
||||
|
||||
def unpack(self, ud, rootdir, d):
|
||||
"""Unpack the module in the module cache."""
|
||||
|
||||
# Unpack the bare git repository
|
||||
super().unpack(ud, rootdir, d)
|
||||
|
||||
moddir = d.getVar('GO_MOD_CACHE_DIR') or 'pkg/mod'
|
||||
|
||||
# Create the info file
|
||||
module = ud.parm['module']
|
||||
repodir = os.path.join(rootdir, ud.parm['subdir'])
|
||||
with open(repodir + '.info', 'wb') as f:
|
||||
f.write(ud.parm['key'])
|
||||
|
||||
# Unpack the go.mod file from the repository
|
||||
unpackdir = os.path.join(rootdir, moddir, 'cache/download',
|
||||
escape(module), '@v')
|
||||
bb.utils.mkdirhier(unpackdir)
|
||||
srcrev = ud.parm['srcrev']
|
||||
version = ud.parm['version']
|
||||
escaped_version = escape(version)
|
||||
cmd = f"git ls-tree -r --name-only '{srcrev}'"
|
||||
if 'subpath' in ud.parm:
|
||||
cmd += f" '{ud.parm['subpath']}'"
|
||||
files = runfetchcmd(cmd, d, workdir=repodir).split()
|
||||
name = escaped_version + '.mod'
|
||||
bb.note(f"Unpacking {name} to {unpackdir}/")
|
||||
with open(os.path.join(unpackdir, name), mode='wb') as mf:
|
||||
f = 'go.mod'
|
||||
if 'subpath' in ud.parm:
|
||||
f = os.path.join(ud.parm['subpath'], f)
|
||||
if f in files:
|
||||
cmd = ['git', 'cat-file', 'blob', srcrev + ':' + f]
|
||||
subprocess.check_call(cmd, stdout=mf, cwd=repodir,
|
||||
preexec_fn=subprocess_setup)
|
||||
else:
|
||||
# If the module does not have a go.mod file, synthesize one
|
||||
# containing only a module statement.
|
||||
mf.write(f'module {module}\n'.encode())
|
||||
|
||||
# Synthesize the module zip file from the repository
|
||||
name = escaped_version + '.zip'
|
||||
bb.note(f"Unpacking {name} to {unpackdir}/")
|
||||
with zipfile.ZipFile(os.path.join(unpackdir, name), mode='w') as zf:
|
||||
prefix = module + '@' + version + '/'
|
||||
for f in files:
|
||||
cmd = ['git', 'cat-file', 'blob', srcrev + ':' + f]
|
||||
data = subprocess.check_output(cmd, cwd=repodir,
|
||||
preexec_fn=subprocess_setup)
|
||||
zf.writestr(prefix + f, data)
|
||||
@@ -42,12 +42,11 @@ from bb.utils import is_semver
|
||||
|
||||
def npm_package(package):
|
||||
"""Convert the npm package name to remove unsupported character"""
|
||||
# For scoped package names ('@user/package') the '/' is replaced by a '-'.
|
||||
# This is similar to what 'npm pack' does, but 'npm pack' also strips the
|
||||
# leading '@', which can lead to ambiguous package names.
|
||||
# Scoped package names (with the @) use the same naming convention
|
||||
# as the 'npm pack' command.
|
||||
name = re.sub("/", "-", package)
|
||||
name = name.lower()
|
||||
name = re.sub(r"[^\-a-z0-9@]", "", name)
|
||||
name = re.sub(r"[^\-a-z0-9]", "", name)
|
||||
name = name.strip("-")
|
||||
return name
|
||||
|
||||
|
||||
@@ -97,7 +97,7 @@ class NpmShrinkWrap(FetchMethod):
|
||||
|
||||
integrity = params.get("integrity", None)
|
||||
resolved = params.get("resolved", None)
|
||||
version = params.get("version", resolved)
|
||||
version = params.get("version", None)
|
||||
|
||||
# Handle registry sources
|
||||
if is_semver(version) and integrity:
|
||||
@@ -184,7 +184,6 @@ class NpmShrinkWrap(FetchMethod):
|
||||
uri = URI("git://" + str(groups["url"]))
|
||||
uri.params["protocol"] = str(groups["protocol"])
|
||||
uri.params["rev"] = str(groups["rev"])
|
||||
uri.params["nobranch"] = "1"
|
||||
uri.params["destsuffix"] = destsuffix
|
||||
|
||||
url = str(uri)
|
||||
@@ -269,7 +268,7 @@ class NpmShrinkWrap(FetchMethod):
|
||||
|
||||
def unpack(self, ud, rootdir, d):
|
||||
"""Unpack the downloaded dependencies"""
|
||||
destdir = rootdir
|
||||
destdir = d.getVar("S")
|
||||
destsuffix = ud.parm.get("destsuffix")
|
||||
if destsuffix:
|
||||
destdir = os.path.join(rootdir, destsuffix)
|
||||
|
||||
@@ -210,6 +210,3 @@ class Svn(FetchMethod):
|
||||
|
||||
def _build_revision(self, ud, d):
|
||||
return ud.revision
|
||||
|
||||
def supports_checksum(self, urldata):
|
||||
return False
|
||||
|
||||
@@ -87,7 +87,7 @@ class Wget(FetchMethod):
|
||||
if not ud.localfile:
|
||||
ud.localfile = d.expand(urllib.parse.unquote(ud.host + ud.path).replace("/", "."))
|
||||
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 100"
|
||||
self.basecmd = d.getVar("FETCHCMD_wget") or "/usr/bin/env wget -t 2 -T 30"
|
||||
|
||||
if ud.type == 'ftp' or ud.type == 'ftps':
|
||||
self.basecmd += " --passive-ftp"
|
||||
@@ -108,8 +108,7 @@ class Wget(FetchMethod):
|
||||
|
||||
fetchcmd = self.basecmd
|
||||
|
||||
dldir = os.path.realpath(d.getVar("DL_DIR"))
|
||||
localpath = os.path.join(dldir, ud.localfile) + ".tmp"
|
||||
localpath = os.path.join(d.getVar("DL_DIR"), ud.localfile) + ".tmp"
|
||||
bb.utils.mkdirhier(os.path.dirname(localpath))
|
||||
fetchcmd += " -O %s" % shlex.quote(localpath)
|
||||
|
||||
@@ -129,21 +128,12 @@ class Wget(FetchMethod):
|
||||
uri = ud.url.split(";")[0]
|
||||
if os.path.exists(ud.localpath):
|
||||
# file exists, but we didnt complete it.. trying again..
|
||||
fetchcmd += " -c -P " + dldir + " '" + uri + "'"
|
||||
fetchcmd += d.expand(" -c -P ${DL_DIR} '%s'" % uri)
|
||||
else:
|
||||
fetchcmd += " -P " + dldir + " '" + uri + "'"
|
||||
fetchcmd += d.expand(" -P ${DL_DIR} '%s'" % uri)
|
||||
|
||||
self._runwget(ud, d, fetchcmd, False)
|
||||
|
||||
# Sanity check since wget can pretend it succeed when it didn't
|
||||
# Also, this used to happen if sourceforge sent us to the mirror page
|
||||
if not os.path.exists(localpath):
|
||||
raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (uri, localpath), uri)
|
||||
|
||||
if os.path.getsize(localpath) == 0:
|
||||
os.remove(localpath)
|
||||
raise FetchError("The fetch of %s resulted in a zero size file?! Deleting and failing since this isn't right." % (uri), uri)
|
||||
|
||||
# Try and verify any checksum now, meaning if it isn't correct, we don't remove the
|
||||
# original file, which might be a race (imagine two recipes referencing the same
|
||||
# source, one with an incorrect checksum)
|
||||
@@ -153,6 +143,15 @@ class Wget(FetchMethod):
|
||||
# Our lock prevents multiple writers but mirroring code may grab incomplete files
|
||||
os.rename(localpath, localpath[:-4])
|
||||
|
||||
# Sanity check since wget can pretend it succeed when it didn't
|
||||
# Also, this used to happen if sourceforge sent us to the mirror page
|
||||
if not os.path.exists(ud.localpath):
|
||||
raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (uri, ud.localpath), uri)
|
||||
|
||||
if os.path.getsize(ud.localpath) == 0:
|
||||
os.remove(ud.localpath)
|
||||
raise FetchError("The fetch of %s resulted in a zero size file?! Deleting and failing since this isn't right." % (uri), uri)
|
||||
|
||||
return True
|
||||
|
||||
def checkstatus(self, fetch, ud, d, try_again=True):
|
||||
@@ -244,12 +243,7 @@ class Wget(FetchMethod):
|
||||
fetch.connection_cache.remove_connection(h.host, h.port)
|
||||
raise urllib.error.URLError(err)
|
||||
else:
|
||||
try:
|
||||
r = h.getresponse()
|
||||
except TimeoutError as e:
|
||||
if fetch.connection_cache:
|
||||
fetch.connection_cache.remove_connection(h.host, h.port)
|
||||
raise TimeoutError(e)
|
||||
r = h.getresponse()
|
||||
|
||||
# Pick apart the HTTPResponse object to get the addinfourl
|
||||
# object initialized properly.
|
||||
@@ -376,7 +370,7 @@ class Wget(FetchMethod):
|
||||
except (FileNotFoundError, netrc.NetrcParseError):
|
||||
pass
|
||||
|
||||
with opener.open(r, timeout=100) as response:
|
||||
with opener.open(r, timeout=30) as response:
|
||||
pass
|
||||
except (urllib.error.URLError, ConnectionResetError, TimeoutError) as e:
|
||||
if try_again:
|
||||
|
||||
@@ -89,6 +89,10 @@ class BBLogFormatter(logging.Formatter):
|
||||
msg = logging.Formatter.format(self, record)
|
||||
if hasattr(record, 'bb_exc_formatted'):
|
||||
msg += '\n' + ''.join(record.bb_exc_formatted)
|
||||
elif hasattr(record, 'bb_exc_info'):
|
||||
etype, value, tb = record.bb_exc_info
|
||||
formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
|
||||
msg += '\n' + ''.join(formatted)
|
||||
return msg
|
||||
|
||||
def colorize(self, record):
|
||||
|
||||
@@ -49,23 +49,20 @@ class SkipPackage(SkipRecipe):
|
||||
__mtime_cache = {}
|
||||
def cached_mtime(f):
|
||||
if f not in __mtime_cache:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
return __mtime_cache[f]
|
||||
|
||||
def cached_mtime_noerror(f):
|
||||
if f not in __mtime_cache:
|
||||
try:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
except OSError:
|
||||
return 0
|
||||
return __mtime_cache[f]
|
||||
|
||||
def check_mtime(f, mtime):
|
||||
try:
|
||||
res = os.stat(f)
|
||||
current_mtime = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
current_mtime = os.stat(f)[stat.ST_MTIME]
|
||||
__mtime_cache[f] = current_mtime
|
||||
except OSError:
|
||||
current_mtime = 0
|
||||
@@ -73,8 +70,7 @@ def check_mtime(f, mtime):
|
||||
|
||||
def update_mtime(f):
|
||||
try:
|
||||
res = os.stat(f)
|
||||
__mtime_cache[f] = (res.st_mtime_ns, res.st_size, res.st_ino)
|
||||
__mtime_cache[f] = os.stat(f)[stat.ST_MTIME]
|
||||
except OSError:
|
||||
if f in __mtime_cache:
|
||||
del __mtime_cache[f]
|
||||
|
||||
@@ -240,16 +240,14 @@ class ExportFuncsNode(AstNode):
|
||||
data.setVar(func, sentinel + " " + calledfunc + "\n", parsing=True)
|
||||
|
||||
class AddTaskNode(AstNode):
|
||||
def __init__(self, filename, lineno, tasks, before, after):
|
||||
def __init__(self, filename, lineno, func, before, after):
|
||||
AstNode.__init__(self, filename, lineno)
|
||||
self.tasks = tasks
|
||||
self.func = func
|
||||
self.before = before
|
||||
self.after = after
|
||||
|
||||
def eval(self, data):
|
||||
tasks = self.tasks.split()
|
||||
for task in tasks:
|
||||
bb.build.addtask(task, self.before, self.after, data)
|
||||
bb.build.addtask(self.func, self.before, self.after, data)
|
||||
|
||||
class DelTaskNode(AstNode):
|
||||
def __init__(self, filename, lineno, tasks):
|
||||
@@ -350,11 +348,21 @@ def handlePythonMethod(statements, filename, lineno, funcname, modulename, body)
|
||||
def handleExportFuncs(statements, filename, lineno, m, classname):
|
||||
statements.append(ExportFuncsNode(filename, lineno, m.group(1), classname))
|
||||
|
||||
def handleAddTask(statements, filename, lineno, tasks, before, after):
|
||||
statements.append(AddTaskNode(filename, lineno, tasks, before, after))
|
||||
def handleAddTask(statements, filename, lineno, m):
|
||||
func = m.group("func")
|
||||
before = m.group("before")
|
||||
after = m.group("after")
|
||||
if func is None:
|
||||
return
|
||||
|
||||
def handleDelTask(statements, filename, lineno, tasks):
|
||||
statements.append(DelTaskNode(filename, lineno, tasks))
|
||||
statements.append(AddTaskNode(filename, lineno, func, before, after))
|
||||
|
||||
def handleDelTask(statements, filename, lineno, m):
|
||||
func = m.group(1)
|
||||
if func is None:
|
||||
return
|
||||
|
||||
statements.append(DelTaskNode(filename, lineno, func))
|
||||
|
||||
def handleBBHandlers(statements, filename, lineno, m):
|
||||
statements.append(BBHandlerNode(filename, lineno, m.group(1)))
|
||||
|
||||
@@ -23,8 +23,8 @@ __func_start_regexp__ = re.compile(r"(((?P<py>python(?=(\s|\()))|(?P<fr>faker
|
||||
__inherit_regexp__ = re.compile(r"inherit\s+(.+)" )
|
||||
__inherit_def_regexp__ = re.compile(r"inherit_defer\s+(.+)" )
|
||||
__export_func_regexp__ = re.compile(r"EXPORT_FUNCTIONS\s+(.+)" )
|
||||
__addtask_regexp__ = re.compile(r"addtask\s+([^#\n]+)(?P<comment>#.*|.*?)")
|
||||
__deltask_regexp__ = re.compile(r"deltask\s+([^#\n]+)(?P<comment>#.*|.*?)")
|
||||
__addtask_regexp__ = re.compile(r"addtask\s+(?P<func>\w+)\s*((before\s*(?P<before>((.*(?=after))|(.*))))|(after\s*(?P<after>((.*(?=before))|(.*)))))*")
|
||||
__deltask_regexp__ = re.compile(r"deltask\s+(.+)")
|
||||
__addhandler_regexp__ = re.compile(r"addhandler\s+(.+)" )
|
||||
__def_regexp__ = re.compile(r"def\s+(\w+).*:" )
|
||||
__python_func_regexp__ = re.compile(r"(\s+.*)|(^$)|(^#)" )
|
||||
@@ -239,38 +239,29 @@ def feeder(lineno, s, fn, root, statements, eof=False):
|
||||
|
||||
m = __addtask_regexp__.match(s)
|
||||
if m:
|
||||
after = ""
|
||||
before = ""
|
||||
if len(m.group().split()) == 2:
|
||||
# Check and warn for "addtask task1 task2"
|
||||
m2 = re.match(r"addtask\s+(?P<func>\w+)(?P<ignores>.*)", s)
|
||||
if m2 and m2.group('ignores'):
|
||||
logger.warning('addtask ignored: "%s"' % m2.group('ignores'))
|
||||
|
||||
# This code splits on 'before' and 'after' instead of on whitespace so we can defer
|
||||
# evaluation to as late as possible.
|
||||
tasks = m.group(1).split(" before ")[0].split(" after ")[0]
|
||||
|
||||
for exp in m.group(1).split(" before "):
|
||||
exp2 = exp.split(" after ")
|
||||
if len(exp2) > 1:
|
||||
after = after + " ".join(exp2[1:])
|
||||
|
||||
for exp in m.group(1).split(" after "):
|
||||
exp2 = exp.split(" before ")
|
||||
if len(exp2) > 1:
|
||||
before = before + " ".join(exp2[1:])
|
||||
|
||||
# Check and warn for having task with a keyword as part of task name
|
||||
# Check and warn for "addtask task1 before task2 before task3", the
|
||||
# similar to "after"
|
||||
taskexpression = s.split()
|
||||
for word in ('before', 'after'):
|
||||
if taskexpression.count(word) > 1:
|
||||
logger.warning("addtask contained multiple '%s' keywords, only one is supported" % word)
|
||||
|
||||
# Check and warn for having task with exprssion as part of task name
|
||||
for te in taskexpression:
|
||||
if any( ( "%s_" % keyword ) in te for keyword in bb.data_smart.__setvar_keyword__ ):
|
||||
raise ParseError("Task name '%s' contains a keyword which is not recommended/supported.\nPlease rename the task not to include the keyword.\n%s" % (te, ("\n".join(map(str, bb.data_smart.__setvar_keyword__)))), fn)
|
||||
|
||||
if tasks is not None:
|
||||
ast.handleAddTask(statements, fn, lineno, tasks, before, after)
|
||||
ast.handleAddTask(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __deltask_regexp__.match(s)
|
||||
if m:
|
||||
task = m.group(1)
|
||||
if task is not None:
|
||||
ast.handleDelTask(statements, fn, lineno, task)
|
||||
ast.handleDelTask(statements, fn, lineno, m)
|
||||
return
|
||||
|
||||
m = __addhandler_regexp__.match(s)
|
||||
|
||||
@@ -154,7 +154,6 @@ class SQLTable(collections.abc.MutableMapping):
|
||||
|
||||
def __exit__(self, *excinfo):
|
||||
self.connection.__exit__(*excinfo)
|
||||
self.connection.close()
|
||||
|
||||
@_Decorators.retry()
|
||||
@_Decorators.transaction
|
||||
|
||||
@@ -14,7 +14,6 @@ import os
|
||||
import sys
|
||||
import stat
|
||||
import errno
|
||||
import itertools
|
||||
import logging
|
||||
import re
|
||||
import bb
|
||||
@@ -1274,41 +1273,27 @@ class RunQueueData:
|
||||
|
||||
bb.parse.siggen.set_setscene_tasks(self.runq_setscene_tids)
|
||||
|
||||
starttime = time.time()
|
||||
lasttime = starttime
|
||||
|
||||
# Iterate over the task list and call into the siggen code
|
||||
dealtwith = set()
|
||||
todeal = set(self.runtaskentries)
|
||||
while todeal:
|
||||
ready = set()
|
||||
for tid in todeal.copy():
|
||||
if not (self.runtaskentries[tid].depends - dealtwith):
|
||||
self.runtaskentries[tid].taskhash_deps = bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches)
|
||||
# get_taskhash for a given tid *must* be called before get_unihash* below
|
||||
self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches)
|
||||
ready.add(tid)
|
||||
unihashes = bb.parse.siggen.get_unihashes(ready)
|
||||
for tid in ready:
|
||||
dealtwith.add(tid)
|
||||
todeal.remove(tid)
|
||||
self.runtaskentries[tid].unihash = unihashes[tid]
|
||||
|
||||
bb.event.check_for_interrupts(self.cooker.data)
|
||||
|
||||
if time.time() > (lasttime + 30):
|
||||
lasttime = time.time()
|
||||
hashequiv_logger.verbose("Initial setup loop progress: %s of %s in %s" % (len(todeal), len(self.runtaskentries), lasttime - starttime))
|
||||
|
||||
endtime = time.time()
|
||||
if (endtime-starttime > 60):
|
||||
hashequiv_logger.verbose("Initial setup loop took: %s" % (endtime-starttime))
|
||||
dealtwith.add(tid)
|
||||
todeal.remove(tid)
|
||||
self.prepare_task_hash(tid)
|
||||
bb.event.check_for_interrupts(self.cooker.data)
|
||||
|
||||
bb.parse.siggen.writeout_file_checksum_cache()
|
||||
|
||||
#self.dump_data()
|
||||
return len(self.runtaskentries)
|
||||
|
||||
def prepare_task_hash(self, tid):
|
||||
bb.parse.siggen.prep_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches)
|
||||
self.runtaskentries[tid].hash = bb.parse.siggen.get_taskhash(tid, self.runtaskentries[tid].depends, self.dataCaches)
|
||||
self.runtaskentries[tid].unihash = bb.parse.siggen.get_unihash(tid)
|
||||
|
||||
def dump_data(self):
|
||||
"""
|
||||
Dump some debug information on the internal data structures
|
||||
@@ -2190,20 +2175,12 @@ class RunQueueExecute:
|
||||
if not hasattr(self, "sorted_setscene_tids"):
|
||||
# Don't want to sort this set every execution
|
||||
self.sorted_setscene_tids = sorted(self.rqdata.runq_setscene_tids)
|
||||
# Resume looping where we left off when we returned to feed the mainloop
|
||||
self.setscene_tids_generator = itertools.cycle(self.rqdata.runq_setscene_tids)
|
||||
|
||||
task = None
|
||||
if not self.sqdone and self.can_start_task():
|
||||
loopcount = 0
|
||||
# Find the next setscene to run, exit the loop when we've processed all tids or found something to execute
|
||||
while loopcount < len(self.rqdata.runq_setscene_tids):
|
||||
loopcount += 1
|
||||
nexttask = next(self.setscene_tids_generator)
|
||||
# Find the next setscene to run
|
||||
for nexttask in self.sorted_setscene_tids:
|
||||
if nexttask in self.sq_buildable and nexttask not in self.sq_running and self.sqdata.stamps[nexttask] not in self.build_stamps.values() and nexttask not in self.sq_harddep_deferred:
|
||||
if nexttask in self.sq_deferred and self.sq_deferred[nexttask] not in self.runq_complete:
|
||||
# Skip deferred tasks quickly before the 'expensive' tests below - this is key to performant multiconfig builds
|
||||
continue
|
||||
if nexttask not in self.sqdata.unskippable and self.sqdata.sq_revdeps[nexttask] and \
|
||||
nexttask not in self.sq_needed_harddeps and \
|
||||
self.sqdata.sq_revdeps[nexttask].issubset(self.scenequeue_covered) and \
|
||||
@@ -2233,7 +2210,8 @@ class RunQueueExecute:
|
||||
if t in self.runq_running and t not in self.runq_complete:
|
||||
continue
|
||||
if nexttask in self.sq_deferred:
|
||||
# Deferred tasks that were still deferred were skipped above so we now need to process
|
||||
if self.sq_deferred[nexttask] not in self.runq_complete:
|
||||
continue
|
||||
logger.debug("Task %s no longer deferred" % nexttask)
|
||||
del self.sq_deferred[nexttask]
|
||||
valid = self.rq.validate_hashes(set([nexttask]), self.cooker.data, 0, False, summary=False)
|
||||
@@ -2460,17 +2438,14 @@ class RunQueueExecute:
|
||||
taskdepdata_cache = {}
|
||||
for task in self.rqdata.runtaskentries:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(task)
|
||||
taskdepdata_cache[task] = bb.TaskData(
|
||||
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn],
|
||||
taskname = taskname,
|
||||
fn = fn,
|
||||
deps = self.filtermcdeps(task, mc, self.rqdata.runtaskentries[task].depends),
|
||||
provides = self.rqdata.dataCaches[mc].fn_provides[taskfn],
|
||||
taskhash = self.rqdata.runtaskentries[task].hash,
|
||||
unihash = self.rqdata.runtaskentries[task].unihash,
|
||||
hashfn = self.rqdata.dataCaches[mc].hashfn[taskfn],
|
||||
taskhash_deps = self.rqdata.runtaskentries[task].taskhash_deps,
|
||||
)
|
||||
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
|
||||
deps = self.rqdata.runtaskentries[task].depends
|
||||
provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
|
||||
taskhash = self.rqdata.runtaskentries[task].hash
|
||||
unihash = self.rqdata.runtaskentries[task].unihash
|
||||
deps = self.filtermcdeps(task, mc, deps)
|
||||
hashfn = self.rqdata.dataCaches[mc].hashfn[taskfn]
|
||||
taskdepdata_cache[task] = [pn, taskname, fn, deps, provides, taskhash, unihash, hashfn]
|
||||
|
||||
self.taskdepdata_cache = taskdepdata_cache
|
||||
|
||||
@@ -2485,11 +2460,9 @@ class RunQueueExecute:
|
||||
while next:
|
||||
additional = []
|
||||
for revdep in next:
|
||||
self.taskdepdata_cache[revdep] = self.taskdepdata_cache[revdep]._replace(
|
||||
unihash=self.rqdata.runtaskentries[revdep].unihash
|
||||
)
|
||||
self.taskdepdata_cache[revdep][6] = self.rqdata.runtaskentries[revdep].unihash
|
||||
taskdepdata[revdep] = self.taskdepdata_cache[revdep]
|
||||
for revdep2 in self.taskdepdata_cache[revdep].deps:
|
||||
for revdep2 in self.taskdepdata_cache[revdep][3]:
|
||||
if revdep2 not in taskdepdata:
|
||||
additional.append(revdep2)
|
||||
next = additional
|
||||
@@ -2558,6 +2531,9 @@ class RunQueueExecute:
|
||||
self.rqdata.runtaskentries[hashtid].unihash = unihash
|
||||
bb.parse.siggen.set_unihash(hashtid, unihash)
|
||||
toprocess.add(hashtid)
|
||||
if torehash:
|
||||
# Need to save after set_unihash above
|
||||
bb.parse.siggen.save_unitaskhashes()
|
||||
|
||||
# Work out all tasks which depend upon these
|
||||
total = set()
|
||||
@@ -2580,28 +2556,17 @@ class RunQueueExecute:
|
||||
elif self.rqdata.runtaskentries[p].depends.isdisjoint(total):
|
||||
next.add(p)
|
||||
|
||||
starttime = time.time()
|
||||
lasttime = starttime
|
||||
|
||||
# When an item doesn't have dependencies in total, we can process it. Drop items from total when handled
|
||||
while next:
|
||||
current = next.copy()
|
||||
next = set()
|
||||
ready = {}
|
||||
for tid in current:
|
||||
if self.rqdata.runtaskentries[p].depends and not self.rqdata.runtaskentries[tid].depends.isdisjoint(total):
|
||||
continue
|
||||
# get_taskhash for a given tid *must* be called before get_unihash* below
|
||||
ready[tid] = bb.parse.siggen.get_taskhash(tid, self.rqdata.runtaskentries[tid].depends, self.rqdata.dataCaches)
|
||||
|
||||
unihashes = bb.parse.siggen.get_unihashes(ready.keys())
|
||||
|
||||
for tid in ready:
|
||||
orighash = self.rqdata.runtaskentries[tid].hash
|
||||
newhash = ready[tid]
|
||||
newhash = bb.parse.siggen.get_taskhash(tid, self.rqdata.runtaskentries[tid].depends, self.rqdata.dataCaches)
|
||||
origuni = self.rqdata.runtaskentries[tid].unihash
|
||||
newuni = unihashes[tid]
|
||||
|
||||
newuni = bb.parse.siggen.get_unihash(tid)
|
||||
# FIXME, need to check it can come from sstate at all for determinism?
|
||||
remapped = False
|
||||
if newuni == origuni:
|
||||
@@ -2622,15 +2587,6 @@ class RunQueueExecute:
|
||||
next |= self.rqdata.runtaskentries[tid].revdeps
|
||||
total.remove(tid)
|
||||
next.intersection_update(total)
|
||||
bb.event.check_for_interrupts(self.cooker.data)
|
||||
|
||||
if time.time() > (lasttime + 30):
|
||||
lasttime = time.time()
|
||||
hashequiv_logger.verbose("Rehash loop slow progress: %s in %s" % (len(total), lasttime - starttime))
|
||||
|
||||
endtime = time.time()
|
||||
if (endtime-starttime > 60):
|
||||
hashequiv_logger.verbose("Rehash loop took more than 60s: %s" % (endtime-starttime))
|
||||
|
||||
if changed:
|
||||
for mc in self.rq.worker:
|
||||
@@ -2756,12 +2712,8 @@ class RunQueueExecute:
|
||||
logger.debug2("%s was unavailable and is a hard dependency of %s so skipping" % (task, dep))
|
||||
self.sq_task_failoutright(dep)
|
||||
continue
|
||||
|
||||
# For performance, only compute allcovered once if needed
|
||||
if self.sqdata.sq_deps[task]:
|
||||
allcovered = self.scenequeue_covered | self.scenequeue_notcovered
|
||||
for dep in sorted(self.sqdata.sq_deps[task]):
|
||||
if self.sqdata.sq_revdeps[dep].issubset(allcovered):
|
||||
if self.sqdata.sq_revdeps[dep].issubset(self.scenequeue_covered | self.scenequeue_notcovered):
|
||||
if dep not in self.sq_buildable:
|
||||
self.sq_buildable.add(dep)
|
||||
|
||||
@@ -2854,19 +2806,13 @@ class RunQueueExecute:
|
||||
additional = []
|
||||
for revdep in next:
|
||||
(mc, fn, taskname, taskfn) = split_tid_mcfn(revdep)
|
||||
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn]
|
||||
deps = getsetscenedeps(revdep)
|
||||
|
||||
taskdepdata[revdep] = bb.TaskData(
|
||||
pn = self.rqdata.dataCaches[mc].pkg_fn[taskfn],
|
||||
taskname = taskname,
|
||||
fn = fn,
|
||||
deps = deps,
|
||||
provides = self.rqdata.dataCaches[mc].fn_provides[taskfn],
|
||||
taskhash = self.rqdata.runtaskentries[revdep].hash,
|
||||
unihash = self.rqdata.runtaskentries[revdep].unihash,
|
||||
hashfn = self.rqdata.dataCaches[mc].hashfn[taskfn],
|
||||
taskhash_deps = self.rqdata.runtaskentries[revdep].taskhash_deps,
|
||||
)
|
||||
provides = self.rqdata.dataCaches[mc].fn_provides[taskfn]
|
||||
taskhash = self.rqdata.runtaskentries[revdep].hash
|
||||
unihash = self.rqdata.runtaskentries[revdep].unihash
|
||||
hashfn = self.rqdata.dataCaches[mc].hashfn[taskfn]
|
||||
taskdepdata[revdep] = [pn, taskname, fn, deps, provides, taskhash, unihash, hashfn]
|
||||
for revdep2 in deps:
|
||||
if revdep2 not in taskdepdata:
|
||||
additional.append(revdep2)
|
||||
|
||||
@@ -201,6 +201,9 @@ class SignatureGenerator(object):
|
||||
def save_unitaskhashes(self):
|
||||
return
|
||||
|
||||
def copy_unitaskhashes(self, targetdir):
|
||||
return
|
||||
|
||||
def set_setscene_tasks(self, setscene_tasks):
|
||||
return
|
||||
|
||||
@@ -378,7 +381,7 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
self.taints[tid] = taint
|
||||
logger.warning("%s is tainted from a forced run" % tid)
|
||||
|
||||
return set(dep for _, dep in self.runtaskdeps[tid])
|
||||
return
|
||||
|
||||
def get_taskhash(self, tid, deps, dataCaches):
|
||||
|
||||
@@ -415,6 +418,9 @@ class SignatureGeneratorBasic(SignatureGenerator):
|
||||
def save_unitaskhashes(self):
|
||||
self.unihash_cache.save(self.unitaskhashes)
|
||||
|
||||
def copy_unitaskhashes(self, targetdir):
|
||||
self.unihash_cache.copyfile(targetdir)
|
||||
|
||||
def dump_sigtask(self, mcfn, task, stampbase, runtime):
|
||||
tid = mcfn + ":" + task
|
||||
mc = bb.runqueue.mc_from_tid(mcfn)
|
||||
@@ -534,7 +540,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
def __init__(self, data):
|
||||
self.extramethod = {}
|
||||
# NOTE: The cache only tracks hashes that exist. Hashes that don't
|
||||
# exist are always queried from the server since it is possible for
|
||||
# exist are always queries from the server since it is possible for
|
||||
# hashes to appear over time, but much less likely for them to
|
||||
# disappear
|
||||
self.unihash_exists_cache = set()
|
||||
@@ -552,11 +558,11 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
super().__init__(data)
|
||||
|
||||
def get_taskdata(self):
|
||||
return (self.server, self.method, self.extramethod, self.username, self.password, self.env) + super().get_taskdata()
|
||||
return (self.server, self.method, self.extramethod, self.max_parallel, self.username, self.password, self.env) + super().get_taskdata()
|
||||
|
||||
def set_taskdata(self, data):
|
||||
self.server, self.method, self.extramethod, self.username, self.password, self.env = data[:6]
|
||||
super().set_taskdata(data[6:])
|
||||
self.server, self.method, self.extramethod, self.max_parallel, self.username, self.password, self.env = data[:7]
|
||||
super().set_taskdata(data[7:])
|
||||
|
||||
def get_hashserv_creds(self):
|
||||
if self.username and self.password:
|
||||
@@ -589,6 +595,13 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
self._client = hashserv.create_client(self.server, **self.get_hashserv_creds())
|
||||
yield self._client
|
||||
|
||||
@contextmanager
|
||||
def client_pool(self):
|
||||
with self._client_env():
|
||||
if getattr(self, '_client_pool', None) is None:
|
||||
self._client_pool = hashserv.client.ClientPool(self.server, self.max_parallel, **self.get_hashserv_creds())
|
||||
yield self._client_pool
|
||||
|
||||
def reset(self, data):
|
||||
self.__close_clients()
|
||||
return super().reset(data)
|
||||
@@ -665,20 +678,25 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
if len(query) == 0:
|
||||
return {}
|
||||
|
||||
query_keys = []
|
||||
uncached_query = {}
|
||||
result = {}
|
||||
for key, unihash in query.items():
|
||||
if unihash in self.unihash_exists_cache:
|
||||
result[key] = True
|
||||
else:
|
||||
query_keys.append(key)
|
||||
uncached_query[key] = unihash
|
||||
|
||||
if query_keys:
|
||||
if self.max_parallel <= 1 or len(uncached_query) <= 1:
|
||||
# No parallelism required. Make the query serially with the single client
|
||||
with self.client() as client:
|
||||
query_result = client.unihash_exists_batch(query[k] for k in query_keys)
|
||||
uncached_result = {
|
||||
key: client.unihash_exists(value) for key, value in uncached_query.items()
|
||||
}
|
||||
else:
|
||||
with self.client_pool() as client_pool:
|
||||
uncached_result = client_pool.unihashes_exist(uncached_query)
|
||||
|
||||
for idx, key in enumerate(query_keys):
|
||||
exists = query_result[idx]
|
||||
for key, exists in uncached_result.items():
|
||||
if exists:
|
||||
self.unihash_exists_cache.add(query[key])
|
||||
result[key] = exists
|
||||
@@ -694,24 +712,29 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
unihash
|
||||
"""
|
||||
result = {}
|
||||
query_tids = []
|
||||
queries = {}
|
||||
query_result = {}
|
||||
|
||||
for tid in tids:
|
||||
unihash = self.get_cached_unihash(tid)
|
||||
if unihash:
|
||||
result[tid] = unihash
|
||||
else:
|
||||
query_tids.append(tid)
|
||||
queries[tid] = (self._get_method(tid), self.taskhash[tid])
|
||||
|
||||
if query_tids:
|
||||
unihashes = []
|
||||
try:
|
||||
with self.client() as client:
|
||||
unihashes = client.get_unihash_batch((self._get_method(tid), self.taskhash[tid]) for tid in query_tids)
|
||||
except (ConnectionError, FileNotFoundError) as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
if len(queries) == 0:
|
||||
return result
|
||||
|
||||
for idx, tid in enumerate(query_tids):
|
||||
if self.max_parallel <= 1 or len(queries) <= 1:
|
||||
# No parallelism required. Make the query serially with the single client
|
||||
with self.client() as client:
|
||||
for tid, args in queries.items():
|
||||
query_result[tid] = client.get_unihash(*args)
|
||||
else:
|
||||
with self.client_pool() as client_pool:
|
||||
query_result = client_pool.get_unihashes(queries)
|
||||
|
||||
for tid, unihash in query_result.items():
|
||||
# In the absence of being able to discover a unique hash from the
|
||||
# server, make it be equivalent to the taskhash. The unique "hash" only
|
||||
# really needs to be a unique string (not even necessarily a hash), but
|
||||
@@ -726,9 +749,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
# to the server, there is a better chance that they will agree on
|
||||
# the unique hash.
|
||||
taskhash = self.taskhash[tid]
|
||||
|
||||
if unihashes and unihashes[idx]:
|
||||
unihash = unihashes[idx]
|
||||
if unihash:
|
||||
# A unique hash equal to the taskhash is not very interesting,
|
||||
# so it is reported it at debug level 2. If they differ, that
|
||||
# is much more interesting, so it is reported at debug level 1
|
||||
@@ -737,6 +758,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
hashequiv_logger.debug2('No reported unihash for %s:%s from %s' % (tid, taskhash, self.server))
|
||||
unihash = taskhash
|
||||
|
||||
|
||||
self.set_unihash(tid, unihash)
|
||||
self.unihash[tid] = unihash
|
||||
result[tid] = unihash
|
||||
@@ -817,7 +839,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
d.setVar('BB_UNIHASH', new_unihash)
|
||||
else:
|
||||
hashequiv_logger.debug('Reported task %s as unihash %s to %s' % (taskhash, unihash, self.server))
|
||||
except (ConnectionError, FileNotFoundError) as e:
|
||||
except ConnectionError as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
finally:
|
||||
if sigfile:
|
||||
@@ -859,7 +881,7 @@ class SignatureGeneratorUniHashMixIn(object):
|
||||
# TODO: What to do here?
|
||||
hashequiv_logger.verbose('Task %s unihash reported as unwanted hash %s' % (tid, finalunihash))
|
||||
|
||||
except (ConnectionError, FileNotFoundError) as e:
|
||||
except ConnectionError as e:
|
||||
bb.warn('Error contacting Hash Equivalence Server %s: %s' % (self.server, str(e)))
|
||||
|
||||
return False
|
||||
@@ -873,12 +895,13 @@ class SignatureGeneratorTestEquivHash(SignatureGeneratorUniHashMixIn, SignatureG
|
||||
super().init_rundepcheck(data)
|
||||
self.server = data.getVar('BB_HASHSERVE')
|
||||
self.method = "sstate_output_hash"
|
||||
self.max_parallel = 1
|
||||
|
||||
def clean_checksum_file_path(file_checksum_tuple):
|
||||
f, cs = file_checksum_tuple
|
||||
if "/./" in f:
|
||||
return "./" + f.split("/./")[1]
|
||||
return os.path.basename(f)
|
||||
return f
|
||||
|
||||
def dump_this_task(outfile, d):
|
||||
import bb.parse
|
||||
|
||||
@@ -106,46 +106,6 @@ ${D}${libdir}/pkgconfig/*.pc
|
||||
self.parseExpression("foo=$(echo bar)")
|
||||
self.assertExecs(set(["echo"]))
|
||||
|
||||
def test_assign_subshell_expansion_quotes(self):
|
||||
self.parseExpression('foo="$(echo bar)"')
|
||||
self.assertExecs(set(["echo"]))
|
||||
|
||||
def test_assign_subshell_expansion_nested(self):
|
||||
self.parseExpression('foo="$(func1 "$(func2 bar$(func3))")"')
|
||||
self.assertExecs(set(["func1", "func2", "func3"]))
|
||||
|
||||
def test_assign_subshell_expansion_multiple(self):
|
||||
self.parseExpression('foo="$(func1 "$(func2)") $(func3)"')
|
||||
self.assertExecs(set(["func1", "func2", "func3"]))
|
||||
|
||||
def test_assign_subshell_expansion_escaped_quotes(self):
|
||||
self.parseExpression('foo="\\"fo\\"o$(func1)"')
|
||||
self.assertExecs(set(["func1"]))
|
||||
|
||||
def test_assign_subshell_expansion_empty(self):
|
||||
self.parseExpression('foo="bar$()foo"')
|
||||
self.assertExecs(set())
|
||||
|
||||
def test_assign_subshell_backticks(self):
|
||||
self.parseExpression("foo=`echo bar`")
|
||||
self.assertExecs(set(["echo"]))
|
||||
|
||||
def test_assign_subshell_backticks_quotes(self):
|
||||
self.parseExpression('foo="`echo bar`"')
|
||||
self.assertExecs(set(["echo"]))
|
||||
|
||||
def test_assign_subshell_backticks_multiple(self):
|
||||
self.parseExpression('foo="`func1 bar` `func2`"')
|
||||
self.assertExecs(set(["func1", "func2"]))
|
||||
|
||||
def test_assign_subshell_backticks_escaped_quotes(self):
|
||||
self.parseExpression('foo="\\"fo\\"o`func1`"')
|
||||
self.assertExecs(set(["func1"]))
|
||||
|
||||
def test_assign_subshell_backticks_empty(self):
|
||||
self.parseExpression('foo="bar``foo"')
|
||||
self.assertExecs(set())
|
||||
|
||||
def test_shell_unexpanded(self):
|
||||
self.setEmptyVars(["QT_BASE_NAME"])
|
||||
self.parseExpression('echo "${QT_BASE_NAME}"')
|
||||
|
||||
@@ -511,8 +511,7 @@ class MirrorUriTest(FetcherTest):
|
||||
mirrorvar = "http://.*/.* file:///somepath/downloads/ " \
|
||||
"git://someserver.org/bitbake git://git.openembedded.org/bitbake " \
|
||||
"https://.*/.* file:///someotherpath/downloads/ " \
|
||||
"http://.*/.* file:///someotherpath/downloads/ " \
|
||||
"svn://svn.server1.com/ svn://svn.server2.com/"
|
||||
"http://.*/.* file:///someotherpath/downloads/"
|
||||
|
||||
def test_urireplace(self):
|
||||
self.d.setVar("FILESPATH", ".")
|
||||
@@ -536,13 +535,6 @@ class MirrorUriTest(FetcherTest):
|
||||
uris, uds = bb.fetch2.build_mirroruris(fetcher, mirrors, self.d)
|
||||
self.assertEqual(uris, ['file:///someotherpath/downloads/bitbake-1.0.tar.gz'])
|
||||
|
||||
def test_urilistsvn(self):
|
||||
# Catch svn:// -> svn:// bug
|
||||
fetcher = bb.fetch.FetchData("svn://svn.server1.com/isource/svnroot/reponame/tags/tagname;module=path_in_tagnamefolder;protocol=https;rev=2", self.d)
|
||||
mirrors = bb.fetch2.mirror_from_string(self.mirrorvar)
|
||||
uris, uds = bb.fetch2.build_mirroruris(fetcher, mirrors, self.d)
|
||||
self.assertEqual(uris, ['svn://svn.server2.com/isource/svnroot/reponame/tags/tagname;module=path_in_tagnamefolder;protocol=https;rev=2'])
|
||||
|
||||
def test_mirror_of_mirror(self):
|
||||
# Test if mirror of a mirror works
|
||||
mirrorvar = self.mirrorvar + " http://.*/.* http://otherdownloads.yoctoproject.org/downloads/"
|
||||
@@ -1427,12 +1419,12 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
("dtc", "git://git.yoctoproject.org/bbfetchtests-dtc.git;branch=master;protocol=https", "65cc4d2748a2c2e6f27f1cf39e07a5dbabd80ebf", "", "")
|
||||
: "1.4.0",
|
||||
# combination version pattern
|
||||
("sysprof", "git://git.yoctoproject.org/sysprof.git;protocol=https;branch=master", "cd44ee6644c3641507fb53b8a2a69137f2971219", "", "")
|
||||
("sysprof", "git://gitlab.gnome.org/GNOME/sysprof.git;protocol=https;branch=master", "cd44ee6644c3641507fb53b8a2a69137f2971219", "", "")
|
||||
: "1.2.0",
|
||||
("u-boot-mkimage", "git://source.denx.de/u-boot/u-boot.git;branch=master;protocol=https", "62c175fbb8a0f9a926c88294ea9f7e88eb898f6c", "", "")
|
||||
("u-boot-mkimage", "git://git.denx.de/u-boot.git;branch=master;protocol=git", "62c175fbb8a0f9a926c88294ea9f7e88eb898f6c", "", "")
|
||||
: "2014.01",
|
||||
# version pattern "yyyymmdd"
|
||||
("mobile-broadband-provider-info", "git://git.yoctoproject.org/mobile-broadband-provider-info.git;protocol=https;branch=master", "4ed19e11c2975105b71b956440acdb25d46a347d", "", "")
|
||||
("mobile-broadband-provider-info", "git://gitlab.gnome.org/GNOME/mobile-broadband-provider-info.git;protocol=https;branch=master", "4ed19e11c2975105b71b956440acdb25d46a347d", "", "")
|
||||
: "20120614",
|
||||
# packages with a valid UPSTREAM_CHECK_GITTAGREGEX
|
||||
# mirror of git://anongit.freedesktop.org/xorg/driver/xf86-video-omap since network issues interfered with testing
|
||||
@@ -1501,12 +1493,6 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
: "2.8",
|
||||
}
|
||||
|
||||
test_crate_uris = {
|
||||
# basic example; version pattern "A.B.C+cargo-D.E.F"
|
||||
("cargo-c", "crate://crates.io/cargo-c/0.9.18+cargo-0.69")
|
||||
: "0.9.29"
|
||||
}
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_git_latest_versionstring(self):
|
||||
for k, v in self.test_git_uris.items():
|
||||
@@ -1525,7 +1511,7 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
|
||||
def test_wget_latest_versionstring(self):
|
||||
testdata = os.path.dirname(os.path.abspath(__file__)) + "/fetch-testdata"
|
||||
server = HTTPService(testdata, host="127.0.0.1")
|
||||
server = HTTPService(testdata)
|
||||
server.start()
|
||||
port = server.port
|
||||
try:
|
||||
@@ -1533,10 +1519,10 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
self.d.setVar("PN", k[0])
|
||||
checkuri = ""
|
||||
if k[2]:
|
||||
checkuri = "http://127.0.0.1:%s/" % port + k[2]
|
||||
checkuri = "http://localhost:%s/" % port + k[2]
|
||||
self.d.setVar("UPSTREAM_CHECK_URI", checkuri)
|
||||
self.d.setVar("UPSTREAM_CHECK_REGEX", k[3])
|
||||
url = "http://127.0.0.1:%s/" % port + k[1]
|
||||
url = "http://localhost:%s/" % port + k[1]
|
||||
ud = bb.fetch2.FetchData(url, self.d)
|
||||
pupver = ud.method.latest_versionstring(ud, self.d)
|
||||
verstring = pupver[0]
|
||||
@@ -1546,16 +1532,6 @@ class FetchLatestVersionTest(FetcherTest):
|
||||
finally:
|
||||
server.stop()
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_crate_latest_versionstring(self):
|
||||
for k, v in self.test_crate_uris.items():
|
||||
self.d.setVar("PN", k[0])
|
||||
ud = bb.fetch2.FetchData(k[1], self.d)
|
||||
pupver = ud.method.latest_versionstring(ud, self.d)
|
||||
verstring = pupver[0]
|
||||
self.assertTrue(verstring, msg="Could not find upstream version for %s" % k[0])
|
||||
r = bb.utils.vercmp_string(v, verstring)
|
||||
self.assertTrue(r == -1 or r == 0, msg="Package %s, version: %s <= %s" % (k[0], v, verstring))
|
||||
|
||||
class FetchCheckStatusTest(FetcherTest):
|
||||
test_wget_uris = ["https://downloads.yoctoproject.org/releases/sato/sato-engine-0.1.tar.gz",
|
||||
@@ -1739,8 +1715,6 @@ class GitShallowTest(FetcherTest):
|
||||
if cwd is None:
|
||||
cwd = self.gitdir
|
||||
actual_refs = self.git(['for-each-ref', '--format=%(refname)'], cwd=cwd).splitlines()
|
||||
# Resolve references into the same format as the comparision (needed by git 2.48 onwards)
|
||||
actual_refs = self.git(['rev-parse', '--symbolic-full-name'] + actual_refs, cwd=cwd).splitlines()
|
||||
full_expected = self.git(['rev-parse', '--symbolic-full-name'] + expected_refs, cwd=cwd).splitlines()
|
||||
self.assertEqual(sorted(set(full_expected)), sorted(set(actual_refs)))
|
||||
|
||||
@@ -2036,9 +2010,9 @@ class GitShallowTest(FetcherTest):
|
||||
self.add_empty_file('b')
|
||||
self.git('checkout -b a_branch', cwd=self.srcdir)
|
||||
self.add_empty_file('c')
|
||||
self.git('tag v0.0 HEAD', cwd=self.srcdir)
|
||||
self.add_empty_file('d')
|
||||
self.git('checkout master', cwd=self.srcdir)
|
||||
self.git('tag v0.0 a_branch', cwd=self.srcdir)
|
||||
self.add_empty_file('e')
|
||||
self.git('merge --no-ff --no-edit a_branch', cwd=self.srcdir)
|
||||
self.add_empty_file('f')
|
||||
@@ -2054,7 +2028,7 @@ class GitShallowTest(FetcherTest):
|
||||
|
||||
self.fetch_shallow(uri)
|
||||
|
||||
self.assertRevCount(4)
|
||||
self.assertRevCount(5)
|
||||
self.assertRefs(['master', 'origin/master', 'origin/a_branch'])
|
||||
|
||||
def test_shallow_multi_one_uri_depths(self):
|
||||
@@ -2201,7 +2175,7 @@ class GitShallowTest(FetcherTest):
|
||||
|
||||
self.fetch_shallow()
|
||||
|
||||
self.assertRevCount(2)
|
||||
self.assertRevCount(5)
|
||||
|
||||
def test_shallow_invalid_revs(self):
|
||||
self.add_empty_file('a')
|
||||
@@ -2220,10 +2194,7 @@ class GitShallowTest(FetcherTest):
|
||||
self.git('tag v0.0 master', cwd=self.srcdir)
|
||||
self.d.setVar('BB_GIT_SHALLOW_DEPTH', '0')
|
||||
self.d.setVar('BB_GIT_SHALLOW_REVS', 'v0.0')
|
||||
|
||||
with self.assertRaises(bb.fetch2.FetchError), self.assertLogs("BitBake.Fetcher", level="ERROR") as cm:
|
||||
self.fetch_shallow()
|
||||
self.assertIn("fatal: no commits selected for shallow requests", cm.output[0])
|
||||
self.fetch_shallow()
|
||||
|
||||
def test_shallow_fetch_missing_revs_fails(self):
|
||||
self.add_empty_file('a')
|
||||
@@ -2254,7 +2225,7 @@ class GitShallowTest(FetcherTest):
|
||||
revs = len(self.git('rev-list master').splitlines())
|
||||
self.assertNotEqual(orig_revs, revs)
|
||||
self.assertRefs(['master', 'origin/master'])
|
||||
self.assertRevCount(orig_revs - 1760)
|
||||
self.assertRevCount(orig_revs - 1758)
|
||||
|
||||
def test_that_unpack_throws_an_error_when_the_git_clone_nor_shallow_tarball_exist(self):
|
||||
self.add_empty_file('a')
|
||||
@@ -3392,212 +3363,3 @@ class FetchPremirroronlyBrokenTarball(FetcherTest):
|
||||
fetcher.download()
|
||||
output = "".join(logs.output)
|
||||
self.assertFalse(" not a git repository (or any parent up to mount point /)" in output)
|
||||
|
||||
class GoModTest(FetcherTest):
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gomod_url(self):
|
||||
urls = ['gomod://github.com/Azure/azure-sdk-for-go/sdk/storage/azblob;version=v1.0.0;'
|
||||
'sha256sum=9bb69aea32f1d59711701f9562d66432c9c0374205e5009d1d1a62f03fb4fdad']
|
||||
|
||||
fetcher = bb.fetch2.Fetch(urls, self.d)
|
||||
ud = fetcher.ud[urls[0]]
|
||||
self.assertEqual(ud.url, 'https://proxy.golang.org/github.com/%21azure/azure-sdk-for-go/sdk/storage/azblob/%40v/v1.0.0.zip')
|
||||
self.assertNotIn('name', ud.parm)
|
||||
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download')
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'github.com/!azure/azure-sdk-for-go/sdk/storage/azblob/@v/v1.0.0.zip')))
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'github.com/!azure/azure-sdk-for-go/sdk/storage/azblob/@v/v1.0.0.mod')))
|
||||
self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'github.com/!azure/azure-sdk-for-go/sdk/storage/azblob/@v/v1.0.0.mod')),
|
||||
'7873b8544842329b4f385a3aa6cf82cc2bc8defb41a04fa5291c35fd5900e873')
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gomod_url_go_mod_only(self):
|
||||
urls = ['gomod://github.com/Azure/azure-sdk-for-go/sdk/storage/azblob;version=v1.0.0;mod=1;'
|
||||
'sha256sum=7873b8544842329b4f385a3aa6cf82cc2bc8defb41a04fa5291c35fd5900e873']
|
||||
|
||||
fetcher = bb.fetch2.Fetch(urls, self.d)
|
||||
ud = fetcher.ud[urls[0]]
|
||||
self.assertEqual(ud.url, 'https://proxy.golang.org/github.com/%21azure/azure-sdk-for-go/sdk/storage/azblob/%40v/v1.0.0.mod')
|
||||
self.assertNotIn('name', ud.parm)
|
||||
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download')
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'github.com/!azure/azure-sdk-for-go/sdk/storage/azblob/@v/v1.0.0.mod')))
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gomod_url_sha256sum_varflag(self):
|
||||
urls = ['gomod://gopkg.in/ini.v1;version=v1.67.0']
|
||||
self.d.setVarFlag('SRC_URI', 'gopkg.in/ini.v1@v1.67.0.sha256sum', 'bd845dfc762a87a56e5a32a07770dc83e86976db7705d7f89c5dbafdc60b06c6')
|
||||
|
||||
fetcher = bb.fetch2.Fetch(urls, self.d)
|
||||
ud = fetcher.ud[urls[0]]
|
||||
self.assertEqual(ud.url, 'https://proxy.golang.org/gopkg.in/ini.v1/%40v/v1.67.0.zip')
|
||||
self.assertEqual(ud.parm['name'], 'gopkg.in/ini.v1@v1.67.0')
|
||||
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download')
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.zip')))
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.mod')))
|
||||
self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.mod')),
|
||||
'13aedd85db8e555104108e0e613bb7e4d1242af7f27c15423dd9ab63b60b72a1')
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gomod_url_no_go_mod_in_module(self):
|
||||
urls = ['gomod://gopkg.in/ini.v1;version=v1.67.0;'
|
||||
'sha256sum=bd845dfc762a87a56e5a32a07770dc83e86976db7705d7f89c5dbafdc60b06c6']
|
||||
|
||||
fetcher = bb.fetch2.Fetch(urls, self.d)
|
||||
ud = fetcher.ud[urls[0]]
|
||||
self.assertEqual(ud.url, 'https://proxy.golang.org/gopkg.in/ini.v1/%40v/v1.67.0.zip')
|
||||
self.assertNotIn('name', ud.parm)
|
||||
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download')
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.zip')))
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.mod')))
|
||||
self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.mod')),
|
||||
'13aedd85db8e555104108e0e613bb7e4d1242af7f27c15423dd9ab63b60b72a1')
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gomod_url_host_only(self):
|
||||
urls = ['gomod://go.opencensus.io;version=v0.24.0;'
|
||||
'sha256sum=203a767d7f8e7c1ebe5588220ad168d1e15b14ae70a636de7ca9a4a88a7e0d0c']
|
||||
|
||||
fetcher = bb.fetch2.Fetch(urls, self.d)
|
||||
ud = fetcher.ud[urls[0]]
|
||||
self.assertEqual(ud.url, 'https://proxy.golang.org/go.opencensus.io/%40v/v0.24.0.zip')
|
||||
self.assertNotIn('name', ud.parm)
|
||||
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download')
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'go.opencensus.io/@v/v0.24.0.zip')))
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'go.opencensus.io/@v/v0.24.0.mod')))
|
||||
self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'go.opencensus.io/@v/v0.24.0.mod')),
|
||||
'0dc9ccc660ad21cebaffd548f2cc6efa27891c68b4fbc1f8a3893b00f1acec96')
|
||||
|
||||
class GoModGitTest(FetcherTest):
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gomodgit_url_repo(self):
|
||||
urls = ['gomodgit://golang.org/x/net;version=v0.9.0;'
|
||||
'repo=go.googlesource.com/net;'
|
||||
'srcrev=694cff8668bac64e0864b552bffc280cd27f21b1']
|
||||
|
||||
fetcher = bb.fetch2.Fetch(urls, self.d)
|
||||
ud = fetcher.ud[urls[0]]
|
||||
self.assertEqual(ud.host, 'go.googlesource.com')
|
||||
self.assertEqual(ud.path, '/net')
|
||||
self.assertEqual(ud.names, ['golang.org/x/net@v0.9.0'])
|
||||
self.assertEqual(self.d.getVar('SRCREV_golang.org/x/net@v0.9.0'), '694cff8668bac64e0864b552bffc280cd27f21b1')
|
||||
|
||||
fetcher.download()
|
||||
self.assertTrue(os.path.exists(ud.localpath))
|
||||
|
||||
fetcher.unpack(self.unpackdir)
|
||||
vcsdir = os.path.join(self.unpackdir, 'pkg/mod/cache/vcs')
|
||||
self.assertTrue(os.path.exists(os.path.join(vcsdir, 'ed42bd05533fd84ae290a5d33ebd3695a0a2b06131beebd5450825bee8603aca')))
|
||||
downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download')
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'golang.org/x/net/@v/v0.9.0.zip')))
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'golang.org/x/net/@v/v0.9.0.mod')))
|
||||
self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'golang.org/x/net/@v/v0.9.0.mod')),
|
||||
'c5d6851ede50ec1c001afb763040194b68961bf06997e2605e8bf06dcd2aeb2e')
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gomodgit_url_subdir(self):
|
||||
urls = ['gomodgit://github.com/Azure/azure-sdk-for-go/sdk/storage/azblob;version=v1.0.0;'
|
||||
'repo=github.com/Azure/azure-sdk-for-go;subdir=sdk/storage/azblob;'
|
||||
'srcrev=ec928e0ed34db682b3f783d3739d1c538142e0c3']
|
||||
|
||||
fetcher = bb.fetch2.Fetch(urls, self.d)
|
||||
ud = fetcher.ud[urls[0]]
|
||||
self.assertEqual(ud.host, 'github.com')
|
||||
self.assertEqual(ud.path, '/Azure/azure-sdk-for-go')
|
||||
self.assertEqual(ud.parm['subpath'], 'sdk/storage/azblob')
|
||||
self.assertEqual(ud.names, ['github.com/Azure/azure-sdk-for-go/sdk/storage/azblob@v1.0.0'])
|
||||
self.assertEqual(self.d.getVar('SRCREV_github.com/Azure/azure-sdk-for-go/sdk/storage/azblob@v1.0.0'), 'ec928e0ed34db682b3f783d3739d1c538142e0c3')
|
||||
|
||||
fetcher.download()
|
||||
self.assertTrue(os.path.exists(ud.localpath))
|
||||
|
||||
fetcher.unpack(self.unpackdir)
|
||||
vcsdir = os.path.join(self.unpackdir, 'pkg/mod/cache/vcs')
|
||||
self.assertTrue(os.path.exists(os.path.join(vcsdir, 'd31d6145676ed3066ce573a8198f326dea5be45a43b3d8f41ce7787fd71d66b3')))
|
||||
downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download')
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'github.com/!azure/azure-sdk-for-go/sdk/storage/azblob/@v/v1.0.0.zip')))
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'github.com/!azure/azure-sdk-for-go/sdk/storage/azblob/@v/v1.0.0.mod')))
|
||||
self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'github.com/!azure/azure-sdk-for-go/sdk/storage/azblob/@v/v1.0.0.mod')),
|
||||
'7873b8544842329b4f385a3aa6cf82cc2bc8defb41a04fa5291c35fd5900e873')
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gomodgit_url_srcrev_var(self):
|
||||
urls = ['gomodgit://gopkg.in/ini.v1;version=v1.67.0']
|
||||
self.d.setVar('SRCREV_gopkg.in/ini.v1@v1.67.0', 'b2f570e5b5b844226bbefe6fb521d891f529a951')
|
||||
|
||||
fetcher = bb.fetch2.Fetch(urls, self.d)
|
||||
ud = fetcher.ud[urls[0]]
|
||||
self.assertEqual(ud.host, 'gopkg.in')
|
||||
self.assertEqual(ud.path, '/ini.v1')
|
||||
self.assertEqual(ud.names, ['gopkg.in/ini.v1@v1.67.0'])
|
||||
self.assertEqual(ud.parm['srcrev'], 'b2f570e5b5b844226bbefe6fb521d891f529a951')
|
||||
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
vcsdir = os.path.join(self.unpackdir, 'pkg/mod/cache/vcs')
|
||||
self.assertTrue(os.path.exists(os.path.join(vcsdir, 'b7879a4be9ba8598851b8278b14c4f71a8316be64913298d1639cce6bde59bc3')))
|
||||
downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download')
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.zip')))
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.mod')))
|
||||
self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.mod')),
|
||||
'13aedd85db8e555104108e0e613bb7e4d1242af7f27c15423dd9ab63b60b72a1')
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gomodgit_url_no_go_mod_in_module(self):
|
||||
urls = ['gomodgit://gopkg.in/ini.v1;version=v1.67.0;'
|
||||
'srcrev=b2f570e5b5b844226bbefe6fb521d891f529a951']
|
||||
|
||||
fetcher = bb.fetch2.Fetch(urls, self.d)
|
||||
ud = fetcher.ud[urls[0]]
|
||||
self.assertEqual(ud.host, 'gopkg.in')
|
||||
self.assertEqual(ud.path, '/ini.v1')
|
||||
self.assertEqual(ud.names, ['gopkg.in/ini.v1@v1.67.0'])
|
||||
self.assertEqual(self.d.getVar('SRCREV_gopkg.in/ini.v1@v1.67.0'), 'b2f570e5b5b844226bbefe6fb521d891f529a951')
|
||||
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
vcsdir = os.path.join(self.unpackdir, 'pkg/mod/cache/vcs')
|
||||
self.assertTrue(os.path.exists(os.path.join(vcsdir, 'b7879a4be9ba8598851b8278b14c4f71a8316be64913298d1639cce6bde59bc3')))
|
||||
downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download')
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.zip')))
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.mod')))
|
||||
self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'gopkg.in/ini.v1/@v/v1.67.0.mod')),
|
||||
'13aedd85db8e555104108e0e613bb7e4d1242af7f27c15423dd9ab63b60b72a1')
|
||||
|
||||
@skipIfNoNetwork()
|
||||
def test_gomodgit_url_host_only(self):
|
||||
urls = ['gomodgit://go.opencensus.io;version=v0.24.0;'
|
||||
'repo=github.com/census-instrumentation/opencensus-go;'
|
||||
'srcrev=b1a01ee95db0e690d91d7193d037447816fae4c5']
|
||||
|
||||
fetcher = bb.fetch2.Fetch(urls, self.d)
|
||||
ud = fetcher.ud[urls[0]]
|
||||
self.assertEqual(ud.host, 'github.com')
|
||||
self.assertEqual(ud.path, '/census-instrumentation/opencensus-go')
|
||||
self.assertEqual(ud.names, ['go.opencensus.io@v0.24.0'])
|
||||
self.assertEqual(self.d.getVar('SRCREV_go.opencensus.io@v0.24.0'), 'b1a01ee95db0e690d91d7193d037447816fae4c5')
|
||||
|
||||
fetcher.download()
|
||||
fetcher.unpack(self.unpackdir)
|
||||
vcsdir = os.path.join(self.unpackdir, 'pkg/mod/cache/vcs')
|
||||
self.assertTrue(os.path.exists(os.path.join(vcsdir, 'aae3ac7b2122ed3345654e6327855e9682f4a5350d63e93dbcfc51c4419df0e1')))
|
||||
downloaddir = os.path.join(self.unpackdir, 'pkg/mod/cache/download')
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'go.opencensus.io/@v/v0.24.0.zip')))
|
||||
self.assertTrue(os.path.exists(os.path.join(downloaddir, 'go.opencensus.io/@v/v0.24.0.mod')))
|
||||
self.assertEqual(bb.utils.sha256_file(os.path.join(downloaddir, 'go.opencensus.io/@v/v0.24.0.mod')),
|
||||
'0dc9ccc660ad21cebaffd548f2cc6efa27891c68b4fbc1f8a3893b00f1acec96')
|
||||
|
||||
@@ -177,19 +177,7 @@ python () {
|
||||
|
||||
addtask_deltask = """
|
||||
addtask do_patch after do_foo after do_unpack before do_configure before do_compile
|
||||
addtask do_fetch2 do_patch2
|
||||
|
||||
addtask do_myplaintask
|
||||
addtask do_myplaintask2
|
||||
deltask do_myplaintask2
|
||||
addtask do_mytask# comment
|
||||
addtask do_mytask2 # comment2
|
||||
addtask do_mytask3
|
||||
deltask do_mytask3# comment
|
||||
deltask do_mytask4 # comment2
|
||||
|
||||
# Ensure a missing task prefix on after works
|
||||
addtask do_mytask5 after mytask
|
||||
addtask do_fetch do_patch
|
||||
|
||||
MYVAR = "do_patch"
|
||||
EMPTYVAR = ""
|
||||
@@ -197,12 +185,17 @@ deltask do_fetch ${MYVAR} ${EMPTYVAR}
|
||||
deltask ${EMPTYVAR}
|
||||
"""
|
||||
def test_parse_addtask_deltask(self):
|
||||
import sys
|
||||
|
||||
f = self.parsehelper(self.addtask_deltask)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
with self.assertLogs() as logs:
|
||||
f = self.parsehelper(self.addtask_deltask)
|
||||
d = bb.parse.handle(f.name, self.d)['']
|
||||
|
||||
self.assertEqual(['do_fetch2', 'do_patch2', 'do_myplaintask', 'do_mytask', 'do_mytask2', 'do_mytask5'], d.getVar("__BBTASKS"))
|
||||
self.assertEqual(['do_mytask'], d.getVarFlag("do_mytask5", "deps"))
|
||||
output = "".join(logs.output)
|
||||
self.assertTrue("addtask contained multiple 'before' keywords" in output)
|
||||
self.assertTrue("addtask contained multiple 'after' keywords" in output)
|
||||
self.assertTrue('addtask ignored: " do_patch"' in output)
|
||||
#self.assertTrue('dependent task do_foo for do_patch does not exist' in output)
|
||||
|
||||
broken_multiline_comment = """
|
||||
# First line of comment \\
|
||||
|
||||
@@ -188,19 +188,11 @@ class TinfoilCookerAdapter:
|
||||
self._cache[name] = attrvalue
|
||||
return attrvalue
|
||||
|
||||
class TinfoilSkiplistByMcAdapter:
|
||||
def __init__(self, tinfoil):
|
||||
self.tinfoil = tinfoil
|
||||
|
||||
def __getitem__(self, mc):
|
||||
return self.tinfoil.get_skipped_recipes(mc)
|
||||
|
||||
def __init__(self, tinfoil):
|
||||
self.tinfoil = tinfoil
|
||||
self.multiconfigs = [''] + (tinfoil.config_data.getVar('BBMULTICONFIG') or '').split()
|
||||
self.collections = {}
|
||||
self.recipecaches = {}
|
||||
self.skiplist_by_mc = self.TinfoilSkiplistByMcAdapter(tinfoil)
|
||||
for mc in self.multiconfigs:
|
||||
self.collections[mc] = self.TinfoilCookerCollectionAdapter(tinfoil, mc)
|
||||
self.recipecaches[mc] = self.TinfoilRecipeCacheAdapter(tinfoil, mc)
|
||||
@@ -209,6 +201,8 @@ class TinfoilCookerAdapter:
|
||||
# Grab these only when they are requested since they aren't always used
|
||||
if name in self._cache:
|
||||
return self._cache[name]
|
||||
elif name == 'skiplist':
|
||||
attrvalue = self.tinfoil.get_skipped_recipes()
|
||||
elif name == 'bbfile_config_priorities':
|
||||
ret = self.tinfoil.run_command('getLayerPriorities')
|
||||
bbfile_config_priorities = []
|
||||
@@ -520,12 +514,12 @@ class Tinfoil:
|
||||
"""
|
||||
return defaultdict(list, self.run_command('getOverlayedRecipes', mc))
|
||||
|
||||
def get_skipped_recipes(self, mc=''):
|
||||
def get_skipped_recipes(self):
|
||||
"""
|
||||
Find recipes which were skipped (i.e. SkipRecipe was raised
|
||||
during parsing).
|
||||
"""
|
||||
return OrderedDict(self.run_command('getSkippedRecipes', mc))
|
||||
return OrderedDict(self.run_command('getSkippedRecipes'))
|
||||
|
||||
def get_all_providers(self, mc=''):
|
||||
return defaultdict(list, self.run_command('allProviders', mc))
|
||||
@@ -539,7 +533,6 @@ class Tinfoil:
|
||||
def get_runtime_providers(self, rdep):
|
||||
return self.run_command('getRuntimeProviders', rdep)
|
||||
|
||||
# TODO: teach this method about mc
|
||||
def get_recipe_file(self, pn):
|
||||
"""
|
||||
Get the file name for the specified recipe/target. Raises
|
||||
@@ -548,7 +541,6 @@ class Tinfoil:
|
||||
"""
|
||||
best = self.find_best_provider(pn)
|
||||
if not best or (len(best) > 3 and not best[3]):
|
||||
# TODO: pass down mc
|
||||
skiplist = self.get_skipped_recipes()
|
||||
taskdata = bb.taskdata.TaskData(None, skiplist=skiplist)
|
||||
skipreasons = taskdata.get_reasons(pn)
|
||||
|
||||
@@ -559,10 +559,7 @@ class ORMWrapper(object):
|
||||
# we might have an invalid link; no way to detect this. just set it to None
|
||||
filetarget_obj = None
|
||||
|
||||
try:
|
||||
parent_obj = Target_File.objects.get(target = target_obj, path = parent_path, inodetype = Target_File.ITYPE_DIRECTORY)
|
||||
except Target_File.DoesNotExist:
|
||||
parent_obj = None
|
||||
parent_obj = Target_File.objects.get(target = target_obj, path = parent_path, inodetype = Target_File.ITYPE_DIRECTORY)
|
||||
|
||||
Target_File.objects.create(
|
||||
target = target_obj,
|
||||
|
||||
@@ -577,8 +577,6 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
else:
|
||||
log_exec_tty = False
|
||||
|
||||
should_print_hyperlinks = sys.stdout.isatty() and os.environ.get('NO_COLOR', '') == ''
|
||||
|
||||
helper = uihelper.BBUIHelper()
|
||||
|
||||
# Look for the specially designated handlers which need to be passed to the
|
||||
@@ -642,7 +640,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
return_value = 0
|
||||
errors = 0
|
||||
warnings = 0
|
||||
taskfailures = {}
|
||||
taskfailures = []
|
||||
|
||||
printintervaldelta = 10 * 60 # 10 minutes
|
||||
printinterval = printintervaldelta
|
||||
@@ -728,8 +726,6 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
if isinstance(event, bb.build.TaskFailed):
|
||||
return_value = 1
|
||||
print_event_log(event, includelogs, loglines, termfilter)
|
||||
k = "{}:{}".format(event._fn, event._task)
|
||||
taskfailures[k] = event.logfile
|
||||
if isinstance(event, bb.build.TaskBase):
|
||||
logger.info(event._message)
|
||||
continue
|
||||
@@ -825,7 +821,7 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
|
||||
if isinstance(event, bb.runqueue.runQueueTaskFailed):
|
||||
return_value = 1
|
||||
taskfailures.setdefault(event.taskstring)
|
||||
taskfailures.append(event.taskstring)
|
||||
logger.error(str(event))
|
||||
continue
|
||||
|
||||
@@ -946,21 +942,11 @@ def main(server, eventHandler, params, tf = TerminalFilter):
|
||||
try:
|
||||
termfilter.clearFooter()
|
||||
summary = ""
|
||||
def format_hyperlink(url, link_text):
|
||||
if should_print_hyperlinks:
|
||||
start = f'\033]8;;{url}\033\\'
|
||||
end = '\033]8;;\033\\'
|
||||
return f'{start}{link_text}{end}'
|
||||
return link_text
|
||||
|
||||
if taskfailures:
|
||||
summary += pluralise("\nSummary: %s task failed:",
|
||||
"\nSummary: %s tasks failed:", len(taskfailures))
|
||||
for (failure, log_file) in taskfailures.items():
|
||||
for failure in taskfailures:
|
||||
summary += "\n %s" % failure
|
||||
if log_file:
|
||||
hyperlink = format_hyperlink(f"file://{log_file}", log_file)
|
||||
summary += "\n log: {}".format(hyperlink)
|
||||
if warnings:
|
||||
summary += pluralise("\nSummary: There was %s WARNING message.",
|
||||
"\nSummary: There were %s WARNING messages.", warnings)
|
||||
|
||||
@@ -30,6 +30,7 @@ import bb.build
|
||||
import bb.command
|
||||
import bb.cooker
|
||||
import bb.event
|
||||
import bb.exceptions
|
||||
import bb.runqueue
|
||||
from bb.ui import uihelper
|
||||
|
||||
@@ -101,6 +102,10 @@ class TeamcityLogFormatter(logging.Formatter):
|
||||
details = ""
|
||||
if hasattr(record, 'bb_exc_formatted'):
|
||||
details = ''.join(record.bb_exc_formatted)
|
||||
elif hasattr(record, 'bb_exc_info'):
|
||||
etype, value, tb = record.bb_exc_info
|
||||
formatted = bb.exceptions.format_exception(etype, value, tb, limit=5)
|
||||
details = ''.join(formatted)
|
||||
|
||||
if record.levelno in [bb.msg.BBLogFormatter.ERROR, bb.msg.BBLogFormatter.CRITICAL]:
|
||||
# ERROR gets a separate errorDetails field
|
||||
|
||||
@@ -1857,30 +1857,12 @@ def path_is_descendant(descendant, ancestor):
|
||||
# If we don't have a timeout of some kind and a process/thread exits badly (for example
|
||||
# OOM killed) and held a lock, we'd just hang in the lock futex forever. It is better
|
||||
# we exit at some point than hang. 5 minutes with no progress means we're probably deadlocked.
|
||||
# This function can still deadlock python since it can't signal the other threads to exit
|
||||
# (signals are handled in the main thread) and even os._exit() will wait on non-daemon threads
|
||||
# to exit.
|
||||
@contextmanager
|
||||
def lock_timeout(lock):
|
||||
held = lock.acquire(timeout=5*60)
|
||||
try:
|
||||
s = signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
|
||||
held = lock.acquire(timeout=5*60)
|
||||
if not held:
|
||||
bb.server.process.serverlog("Couldn't get the lock for 5 mins, timed out, exiting.\n%s" % traceback.format_stack())
|
||||
os._exit(1)
|
||||
yield held
|
||||
finally:
|
||||
lock.release()
|
||||
signal.pthread_sigmask(signal.SIG_SETMASK, s)
|
||||
|
||||
# A version of lock_timeout without the check that the lock was locked and a shorter timeout
|
||||
@contextmanager
|
||||
def lock_timeout_nocheck(lock):
|
||||
try:
|
||||
s = signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
|
||||
l = lock.acquire(timeout=10)
|
||||
yield l
|
||||
finally:
|
||||
if l:
|
||||
lock.release()
|
||||
signal.pthread_sigmask(signal.SIG_SETMASK, s)
|
||||
|
||||
@@ -50,8 +50,8 @@ class ActionPlugin(LayerPlugin):
|
||||
|
||||
try:
|
||||
notadded, _ = bb.utils.edit_bblayers_conf(bblayers_conf, layerdirs, None)
|
||||
self.tinfoil.modified_files()
|
||||
if not (args.force or notadded):
|
||||
self.tinfoil.modified_files()
|
||||
try:
|
||||
self.tinfoil.run_command('parseConfiguration')
|
||||
except (bb.tinfoil.TinfoilUIException, bb.BBHandledException):
|
||||
@@ -83,8 +83,6 @@ class ActionPlugin(LayerPlugin):
|
||||
layerdir = os.path.abspath(item)
|
||||
layerdirs.append(layerdir)
|
||||
(_, notremoved) = bb.utils.edit_bblayers_conf(bblayers_conf, None, layerdirs)
|
||||
if args.force > 1:
|
||||
return 0
|
||||
self.tinfoil.modified_files()
|
||||
if notremoved:
|
||||
for item in notremoved:
|
||||
|
||||
@@ -142,11 +142,10 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
|
||||
# Ensure we list skipped recipes
|
||||
# We are largely guessing about PN, PV and the preferred version here,
|
||||
# but we have no choice since skipped recipes are not fully parsed
|
||||
skiplist = list(self.tinfoil.cooker.skiplist_by_mc[mc].keys())
|
||||
|
||||
skiplist = list(self.tinfoil.cooker.skiplist.keys())
|
||||
mcspec = 'mc:%s:' % mc
|
||||
if mc:
|
||||
mcspec = f'mc:{mc}:'
|
||||
skiplist = [s[len(mcspec):] if s.startswith(mcspec) else s for s in skiplist]
|
||||
skiplist = [s[len(mcspec):] for s in skiplist if s.startswith(mcspec)]
|
||||
|
||||
for fn in skiplist:
|
||||
recipe_parts = os.path.splitext(os.path.basename(fn))[0].split('_')
|
||||
@@ -163,7 +162,7 @@ skipped recipes will also be listed, with a " (skipped)" suffix.
|
||||
def print_item(f, pn, ver, layer, ispref):
|
||||
if not selected_layer or layer == selected_layer:
|
||||
if not bare and f in skiplist:
|
||||
skipped = ' (skipped: %s)' % self.tinfoil.cooker.skiplist_by_mc[mc][f].skipreason
|
||||
skipped = ' (skipped: %s)' % self.tinfoil.cooker.skiplist[f].skipreason
|
||||
else:
|
||||
skipped = ''
|
||||
if show_filenames:
|
||||
@@ -302,7 +301,7 @@ Lists recipes with the bbappends that apply to them as subitems.
|
||||
if self.show_appends_for_pn(pn, cooker_data, args.mc):
|
||||
appends = True
|
||||
|
||||
if not args.pnspec and self.show_appends_for_skipped(args.mc):
|
||||
if not args.pnspec and self.show_appends_for_skipped():
|
||||
appends = True
|
||||
|
||||
if not appends:
|
||||
@@ -318,9 +317,9 @@ Lists recipes with the bbappends that apply to them as subitems.
|
||||
|
||||
return self.show_appends_output(filenames, best_filename)
|
||||
|
||||
def show_appends_for_skipped(self, mc):
|
||||
def show_appends_for_skipped(self):
|
||||
filenames = [os.path.basename(f)
|
||||
for f in self.tinfoil.cooker.skiplist_by_mc[mc].keys()]
|
||||
for f in self.tinfoil.cooker.skiplist.keys()]
|
||||
return self.show_appends_output(filenames, None, " (skipped)")
|
||||
|
||||
def show_appends_output(self, filenames, best_filename, name_suffix = ''):
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
Behold, mortal, the origins of Beautiful Soup...
|
||||
================================================
|
||||
|
||||
Leonard Richardson is the primary maintainer.
|
||||
|
||||
Aaron DeVore and Isaac Muse have made significant contributions to the
|
||||
code base.
|
||||
|
||||
Mark Pilgrim provided the encoding detection code that forms the base
|
||||
of UnicodeDammit.
|
||||
|
||||
Thomas Kluyver and Ezio Melotti finished the work of getting Beautiful
|
||||
Soup 4 working under Python 3.
|
||||
|
||||
Simon Willison wrote soupselect, which was used to make Beautiful Soup
|
||||
support CSS selectors. Isaac Muse wrote SoupSieve, which made it
|
||||
possible to _remove_ the CSS selector code from Beautiful Soup.
|
||||
|
||||
Sam Ruby helped with a lot of edge cases.
|
||||
|
||||
Jonathan Ellis was awarded the prestigious Beau Potage D'Or for his
|
||||
work in solving the nestable tags conundrum.
|
||||
|
||||
An incomplete list of people have contributed patches to Beautiful
|
||||
Soup:
|
||||
|
||||
Istvan Albert, Andrew Lin, Anthony Baxter, Oliver Beattie, Andrew
|
||||
Boyko, Tony Chang, Francisco Canas, "Delong", Zephyr Fang, Fuzzy,
|
||||
Roman Gaufman, Yoni Gilad, Richie Hindle, Toshihiro Kamiya, Peteris
|
||||
Krumins, Kent Johnson, Marek Kapolka, Andreas Kostyrka, Roel Kramer,
|
||||
Ben Last, Robert Leftwich, Stefaan Lippens, "liquider", Staffan
|
||||
Malmgren, Ksenia Marasanova, JP Moins, Adam Monsen, John Nagle, "Jon",
|
||||
Ed Oskiewicz, Martijn Peters, Greg Phillips, Giles Radford, Stefano
|
||||
Revera, Arthur Rudolph, Marko Samastur, James Salter, Jouni Sepp<70>nen,
|
||||
Alexander Schmolck, Tim Shirley, Geoffrey Sneddon, Ville Skytt<74>,
|
||||
"Vikas", Jens Svalgaard, Andy Theyers, Eric Weiser, Glyn Webster, John
|
||||
Wiseman, Paul Wright, Danny Yoo
|
||||
|
||||
An incomplete list of people who made suggestions or found bugs or
|
||||
found ways to break Beautiful Soup:
|
||||
|
||||
Hanno B<>ck, Matteo Bertini, Chris Curvey, Simon Cusack, Bruce Eckel,
|
||||
Matt Ernst, Michael Foord, Tom Harris, Bill de hOra, Donald Howes,
|
||||
Matt Patterson, Scott Roberts, Steve Strassmann, Mike Williams,
|
||||
warchild at redho dot com, Sami Kuisma, Carlos Rocha, Bob Hutchison,
|
||||
Joren Mc, Michal Migurski, John Kleven, Tim Heaney, Tripp Lilley, Ed
|
||||
Summers, Dennis Sutch, Chris Smith, Aaron Swartz, Stuart
|
||||
Turner, Greg Edwards, Kevin J Kalupson, Nikos Kouremenos, Artur de
|
||||
Sousa Rocha, Yichun Wei, Per Vognsen
|
||||
43
bitbake/lib/bs4/AUTHORS.txt
Normal file
43
bitbake/lib/bs4/AUTHORS.txt
Normal file
@@ -0,0 +1,43 @@
|
||||
Behold, mortal, the origins of Beautiful Soup...
|
||||
================================================
|
||||
|
||||
Leonard Richardson is the primary programmer.
|
||||
|
||||
Aaron DeVore is awesome.
|
||||
|
||||
Mark Pilgrim provided the encoding detection code that forms the base
|
||||
of UnicodeDammit.
|
||||
|
||||
Thomas Kluyver and Ezio Melotti finished the work of getting Beautiful
|
||||
Soup 4 working under Python 3.
|
||||
|
||||
Simon Willison wrote soupselect, which was used to make Beautiful Soup
|
||||
support CSS selectors.
|
||||
|
||||
Sam Ruby helped with a lot of edge cases.
|
||||
|
||||
Jonathan Ellis was awarded the prestigous Beau Potage D'Or for his
|
||||
work in solving the nestable tags conundrum.
|
||||
|
||||
An incomplete list of people have contributed patches to Beautiful
|
||||
Soup:
|
||||
|
||||
Istvan Albert, Andrew Lin, Anthony Baxter, Andrew Boyko, Tony Chang,
|
||||
Zephyr Fang, Fuzzy, Roman Gaufman, Yoni Gilad, Richie Hindle, Peteris
|
||||
Krumins, Kent Johnson, Ben Last, Robert Leftwich, Staffan Malmgren,
|
||||
Ksenia Marasanova, JP Moins, Adam Monsen, John Nagle, "Jon", Ed
|
||||
Oskiewicz, Greg Phillips, Giles Radford, Arthur Rudolph, Marko
|
||||
Samastur, Jouni Sepp<70>nen, Alexander Schmolck, Andy Theyers, Glyn
|
||||
Webster, Paul Wright, Danny Yoo
|
||||
|
||||
An incomplete list of people who made suggestions or found bugs or
|
||||
found ways to break Beautiful Soup:
|
||||
|
||||
Hanno B<>ck, Matteo Bertini, Chris Curvey, Simon Cusack, Bruce Eckel,
|
||||
Matt Ernst, Michael Foord, Tom Harris, Bill de hOra, Donald Howes,
|
||||
Matt Patterson, Scott Roberts, Steve Strassmann, Mike Williams,
|
||||
warchild at redho dot com, Sami Kuisma, Carlos Rocha, Bob Hutchison,
|
||||
Joren Mc, Michal Migurski, John Kleven, Tim Heaney, Tripp Lilley, Ed
|
||||
Summers, Dennis Sutch, Chris Smith, Aaron Sweep^W Swartz, Stuart
|
||||
Turner, Greg Edwards, Kevin J Kalupson, Nikos Kouremenos, Artur de
|
||||
Sousa Rocha, Yichun Wei, Per Vognsen
|
||||
@@ -1,6 +1,6 @@
|
||||
Beautiful Soup is made available under the MIT license:
|
||||
|
||||
Copyright (c) Leonard Richardson
|
||||
Copyright (c) 2004-2012 Leonard Richardson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
@@ -20,12 +20,7 @@ Beautiful Soup is made available under the MIT license:
|
||||
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
SOFTWARE, DAMMIT.
|
||||
|
||||
Beautiful Soup incorporates code from the html5lib library, which is
|
||||
also made available under the MIT license. Copyright (c) James Graham
|
||||
and other contributors
|
||||
|
||||
Beautiful Soup has an optional dependency on the soupsieve library,
|
||||
which is also made available under the MIT license. Copyright (c)
|
||||
Isaac Muse
|
||||
also made available under the MIT license.
|
||||
@@ -1,776 +1,3 @@
|
||||
= 4.12.3 (20240117)
|
||||
|
||||
* The Beautiful Soup documentation now has a Spanish translation, thanks
|
||||
to Carlos Romero. Delong Wang's Chinese translation has been updated
|
||||
to cover Beautiful Soup 4.12.0.
|
||||
|
||||
* Fixed a regression such that if you set .hidden on a tag, the tag
|
||||
becomes invisible but its contents are still visible. User manipulation
|
||||
of .hidden is not a documented or supported feature, so don't do this,
|
||||
but it wasn't too difficult to keep the old behavior working.
|
||||
|
||||
* Fixed a case found by Mengyuhan where html.parser giving up on
|
||||
markup would result in an AssertionError instead of a
|
||||
ParserRejectedMarkup exception.
|
||||
|
||||
* Added the correct stacklevel to instances of the XMLParsedAsHTMLWarning.
|
||||
[bug=2034451]
|
||||
|
||||
* Corrected the syntax of the license definition in pyproject.toml. Patch
|
||||
by Louis Maddox. [bug=2032848]
|
||||
|
||||
* Corrected a typo in a test that was causing test failures when run against
|
||||
libxml2 2.12.1. [bug=2045481]
|
||||
|
||||
= 4.12.2 (20230407)
|
||||
|
||||
* Fixed an unhandled exception in BeautifulSoup.decode_contents
|
||||
and methods that call it. [bug=2015545]
|
||||
|
||||
= 4.12.1 (20230405)
|
||||
|
||||
NOTE: the following things are likely to be dropped in the next
|
||||
feature release of Beautiful Soup:
|
||||
|
||||
Official support for Python 3.6.
|
||||
Inclusion of unit tests and test data in the wheel file.
|
||||
Two scripts: demonstrate_parser_differences.py and test-all-versions.
|
||||
|
||||
Changes:
|
||||
|
||||
* This version of Beautiful Soup replaces setup.py and setup.cfg
|
||||
with pyproject.toml. Beautiful Soup now uses tox as its test backend
|
||||
and hatch to do builds.
|
||||
|
||||
* The main functional improvement in this version is a nonrecursive technique
|
||||
for regenerating a tree. This technique is used to avoid situations where,
|
||||
in previous versions, doing something to a very deeply nested tree
|
||||
would overflow the Python interpreter stack:
|
||||
|
||||
1. Outputting a tree as a string, e.g. with
|
||||
BeautifulSoup.encode() [bug=1471755]
|
||||
|
||||
2. Making copies of trees (copy.copy() and
|
||||
copy.deepcopy() from the Python standard library). [bug=1709837]
|
||||
|
||||
3. Pickling a BeautifulSoup object. (Note that pickling a Tag
|
||||
object can still cause an overflow.)
|
||||
|
||||
* Making a copy of a BeautifulSoup object no longer parses the
|
||||
document again, which should improve performance significantly.
|
||||
|
||||
* When a BeautifulSoup object is unpickled, Beautiful Soup now
|
||||
tries to associate an appropriate TreeBuilder object with it.
|
||||
|
||||
* Tag.prettify() will now consistently end prettified markup with
|
||||
a newline.
|
||||
|
||||
* Added unit tests for fuzz test cases created by third
|
||||
parties. Some of these tests are skipped since they point
|
||||
to problems outside of Beautiful Soup, but this change
|
||||
puts them all in one convenient place.
|
||||
|
||||
* PageElement now implements the known_xml attribute. (This was technically
|
||||
a bug, but it shouldn't be an issue in normal use.) [bug=2007895]
|
||||
|
||||
* The demonstrate_parser_differences.py script was still written in
|
||||
Python 2. I've converted it to Python 3, but since no one has
|
||||
mentioned this over the years, it's a sign that no one uses this
|
||||
script and it's not serving its purpose.
|
||||
|
||||
= 4.12.0 (20230320)
|
||||
|
||||
* Introduced the .css property, which centralizes all access to
|
||||
the Soup Sieve API. This allows Beautiful Soup to give direct
|
||||
access to as much of Soup Sieve that makes sense, without cluttering
|
||||
the BeautifulSoup and Tag classes with a lot of new methods.
|
||||
|
||||
This does mean one addition to the BeautifulSoup and Tag classes
|
||||
(the .css property itself), so this might be a breaking change if you
|
||||
happen to use Beautiful Soup to parse XML that includes a tag called
|
||||
<css>. In particular, code like this will stop working in 4.12.0:
|
||||
|
||||
soup.css['id']
|
||||
|
||||
Code like this will work just as before:
|
||||
|
||||
soup.find_one('css')['id']
|
||||
|
||||
The Soup Sieve methods supported through the .css property are
|
||||
select(), select_one(), iselect(), closest(), match(), filter(),
|
||||
escape(), and compile(). The BeautifulSoup and Tag classes still
|
||||
support the select() and select_one() methods; they have not been
|
||||
deprecated, but they have been demoted to convenience methods.
|
||||
|
||||
[bug=2003677]
|
||||
|
||||
* When the html.parser parser decides it can't parse a document, Beautiful
|
||||
Soup now consistently propagates this fact by raising a
|
||||
ParserRejectedMarkup error. [bug=2007343]
|
||||
|
||||
* Removed some error checking code from diagnose(), which is redundant with
|
||||
similar (but more Pythonic) code in the BeautifulSoup constructor.
|
||||
[bug=2007344]
|
||||
|
||||
* Added intersphinx references to the documentation so that other
|
||||
projects have a target to point to when they reference Beautiful
|
||||
Soup classes. [bug=1453370]
|
||||
|
||||
= 4.11.2 (20230131)
|
||||
|
||||
* Fixed test failures caused by nondeterministic behavior of
|
||||
UnicodeDammit's character detection, depending on the platform setup.
|
||||
[bug=1973072]
|
||||
|
||||
* Fixed another crash when overriding multi_valued_attributes and using the
|
||||
html5lib parser. [bug=1948488]
|
||||
|
||||
* The HTMLFormatter and XMLFormatter constructors no longer return a
|
||||
value. [bug=1992693]
|
||||
|
||||
* Tag.interesting_string_types is now propagated when a tag is
|
||||
copied. [bug=1990400]
|
||||
|
||||
* Warnings now do their best to provide an appropriate stacklevel,
|
||||
improving the usefulness of the message. [bug=1978744]
|
||||
|
||||
* Passing a Tag's .contents into PageElement.extend() now works the
|
||||
same way as passing the Tag itself.
|
||||
|
||||
* Soup Sieve tests will be skipped if the library is not installed.
|
||||
|
||||
= 4.11.1 (20220408)
|
||||
|
||||
This release was done to ensure that the unit tests are packaged along
|
||||
with the released source. There are no functionality changes in this
|
||||
release, but there are a few other packaging changes:
|
||||
|
||||
* The Japanese and Korean translations of the documentation are included.
|
||||
* The changelog is now packaged as CHANGELOG, and the license file is
|
||||
packaged as LICENSE. NEWS.txt and COPYING.txt are still present,
|
||||
but may be removed in the future.
|
||||
* TODO.txt is no longer packaged, since a TODO is not relevant for released
|
||||
code.
|
||||
|
||||
= 4.11.0 (20220407)
|
||||
|
||||
* Ported unit tests to use pytest.
|
||||
|
||||
* Added special string classes, RubyParenthesisString and RubyTextString,
|
||||
to make it possible to treat ruby text specially in get_text() calls.
|
||||
[bug=1941980]
|
||||
|
||||
* It's now possible to customize the way output is indented by
|
||||
providing a value for the 'indent' argument to the Formatter
|
||||
constructor. The 'indent' argument works very similarly to the
|
||||
argument of the same name in the Python standard library's
|
||||
json.dump() function. [bug=1955497]
|
||||
|
||||
* If the charset-normalizer Python module
|
||||
(https://pypi.org/project/charset-normalizer/) is installed, Beautiful
|
||||
Soup will use it to detect the character sets of incoming documents.
|
||||
This is also the module used by newer versions of the Requests library.
|
||||
For the sake of backwards compatibility, chardet and cchardet both take
|
||||
precedence if installed. [bug=1955346]
|
||||
|
||||
* Added a workaround for an lxml bug
|
||||
(https://bugs.launchpad.net/lxml/+bug/1948551) that causes
|
||||
problems when parsing a Unicode string beginning with BYTE ORDER MARK.
|
||||
[bug=1947768]
|
||||
|
||||
* Issue a warning when an HTML parser is used to parse a document that
|
||||
looks like XML but not XHTML. [bug=1939121]
|
||||
|
||||
* Do a better job of keeping track of namespaces as an XML document is
|
||||
parsed, so that CSS selectors that use namespaces will do the right
|
||||
thing more often. [bug=1946243]
|
||||
|
||||
* Some time ago, the misleadingly named "text" argument to find-type
|
||||
methods was renamed to the more accurate "string." But this supposed
|
||||
"renaming" didn't make it into important places like the method
|
||||
signatures or the docstrings. That's corrected in this
|
||||
version. "text" still works, but will give a DeprecationWarning.
|
||||
[bug=1947038]
|
||||
|
||||
* Fixed a crash when pickling a BeautifulSoup object that has no
|
||||
tree builder. [bug=1934003]
|
||||
|
||||
* Fixed a crash when overriding multi_valued_attributes and using the
|
||||
html5lib parser. [bug=1948488]
|
||||
|
||||
* Standardized the wording of the MarkupResemblesLocatorWarning
|
||||
warnings to omit untrusted input and make the warnings less
|
||||
judgmental about what you ought to be doing. [bug=1955450]
|
||||
|
||||
* Removed support for the iconv_codec library, which doesn't seem
|
||||
to exist anymore and was never put up on PyPI. (The closest
|
||||
replacement on PyPI, iconv_codecs, is GPL-licensed, so we can't use
|
||||
it--it's also quite old.)
|
||||
|
||||
= 4.10.0 (20210907)
|
||||
|
||||
* This is the first release of Beautiful Soup to only support Python
|
||||
3. I dropped Python 2 support to maintain support for newer versions
|
||||
(58 and up) of setuptools. See:
|
||||
https://github.com/pypa/setuptools/issues/2769 [bug=1942919]
|
||||
|
||||
* The behavior of methods like .get_text() and .strings now differs
|
||||
depending on the type of tag. The change is visible with HTML tags
|
||||
like <script>, <style>, and <template>. Starting in 4.9.0, methods
|
||||
like get_text() returned no results on such tags, because the
|
||||
contents of those tags are not considered 'text' within the document
|
||||
as a whole.
|
||||
|
||||
But a user who calls script.get_text() is working from a different
|
||||
definition of 'text' than a user who calls div.get_text()--otherwise
|
||||
there would be no need to call script.get_text() at all. In 4.10.0,
|
||||
the contents of (e.g.) a <script> tag are considered 'text' during a
|
||||
get_text() call on the tag itself, but not considered 'text' during
|
||||
a get_text() call on the tag's parent.
|
||||
|
||||
Because of this change, calling get_text() on each child of a tag
|
||||
may now return a different result than calling get_text() on the tag
|
||||
itself. That's because different tags now have different
|
||||
understandings of what counts as 'text'. [bug=1906226] [bug=1868861]
|
||||
|
||||
* NavigableString and its subclasses now implement the get_text()
|
||||
method, as well as the properties .strings and
|
||||
.stripped_strings. These methods will either return the string
|
||||
itself, or nothing, so the only reason to use this is when iterating
|
||||
over a list of mixed Tag and NavigableString objects. [bug=1904309]
|
||||
|
||||
* The 'html5' formatter now treats attributes whose values are the
|
||||
empty string as HTML boolean attributes. Previously (and in other
|
||||
formatters), an attribute value must be set as None to be treated as
|
||||
a boolean attribute. In a future release, I plan to also give this
|
||||
behavior to the 'html' formatter. Patch by Isaac Muse. [bug=1915424]
|
||||
|
||||
* The 'replace_with()' method now takes a variable number of arguments,
|
||||
and can be used to replace a single element with a sequence of elements.
|
||||
Patch by Bill Chandos. [rev=605]
|
||||
|
||||
* Corrected output when the namespace prefix associated with a
|
||||
namespaced attribute is the empty string, as opposed to
|
||||
None. [bug=1915583]
|
||||
|
||||
* Performance improvement when processing tags that speeds up overall
|
||||
tree construction by 2%. Patch by Morotti. [bug=1899358]
|
||||
|
||||
* Corrected the use of special string container classes in cases when a
|
||||
single tag may contain strings with different containers; such as
|
||||
the <template> tag, which may contain both TemplateString objects
|
||||
and Comment objects. [bug=1913406]
|
||||
|
||||
* The html.parser tree builder can now handle named entities
|
||||
found in the HTML5 spec in much the same way that the html5lib
|
||||
tree builder does. Note that the lxml HTML tree builder doesn't handle
|
||||
named entities this way. [bug=1924908]
|
||||
|
||||
* Added a second way to pass specify encodings to UnicodeDammit and
|
||||
EncodingDetector, based on the order of precedence defined in the
|
||||
HTML5 spec, starting at:
|
||||
https://html.spec.whatwg.org/multipage/parsing.html#parsing-with-a-known-character-encoding
|
||||
|
||||
Encodings in 'known_definite_encodings' are tried first, then
|
||||
byte-order-mark sniffing is run, then encodings in 'user_encodings'
|
||||
are tried. The old argument, 'override_encodings', is now a
|
||||
deprecated alias for 'known_definite_encodings'.
|
||||
|
||||
This changes the default behavior of the html.parser and lxml tree
|
||||
builders, in a way that may slightly improve encoding
|
||||
detection but will probably have no effect. [bug=1889014]
|
||||
|
||||
* Improve the warning issued when a directory name (as opposed to
|
||||
the name of a regular file) is passed as markup into the BeautifulSoup
|
||||
constructor. [bug=1913628]
|
||||
|
||||
= 4.9.3 (20201003)
|
||||
|
||||
This is the final release of Beautiful Soup to support Python
|
||||
2. Beautiful Soup's official support for Python 2 ended on 01 January,
|
||||
2021. In the Launchpad Git repository, the final revision to support
|
||||
Python 2 was revision 70f546b1e689a70e2f103795efce6d261a3dadf7; it is
|
||||
tagged as "python2".
|
||||
|
||||
* Implemented a significant performance optimization to the process of
|
||||
searching the parse tree. Patch by Morotti. [bug=1898212]
|
||||
|
||||
= 4.9.2 (20200926)
|
||||
|
||||
* Fixed a bug that caused too many tags to be popped from the tag
|
||||
stack during tree building, when encountering a closing tag that had
|
||||
no matching opening tag. [bug=1880420]
|
||||
|
||||
* Fixed a bug that inconsistently moved elements over when passing
|
||||
a Tag, rather than a list, into Tag.extend(). [bug=1885710]
|
||||
|
||||
* Specify the soupsieve dependency in a way that complies with
|
||||
PEP 508. Patch by Mike Nerone. [bug=1893696]
|
||||
|
||||
* Change the signatures for BeautifulSoup.insert_before and insert_after
|
||||
(which are not implemented) to match PageElement.insert_before and
|
||||
insert_after, quieting warnings in some IDEs. [bug=1897120]
|
||||
|
||||
= 4.9.1 (20200517)
|
||||
|
||||
* Added a keyword argument 'on_duplicate_attribute' to the
|
||||
BeautifulSoupHTMLParser constructor (used by the html.parser tree
|
||||
builder) which lets you customize the handling of markup that
|
||||
contains the same attribute more than once, as in:
|
||||
<a href="url1" href="url2"> [bug=1878209]
|
||||
|
||||
* Added a distinct subclass, GuessedAtParserWarning, for the warning
|
||||
issued when BeautifulSoup is instantiated without a parser being
|
||||
specified. [bug=1873787]
|
||||
|
||||
* Added a distinct subclass, MarkupResemblesLocatorWarning, for the
|
||||
warning issued when BeautifulSoup is instantiated with 'markup' that
|
||||
actually seems to be a URL or the path to a file on
|
||||
disk. [bug=1873787]
|
||||
|
||||
* The new NavigableString subclasses (Stylesheet, Script, and
|
||||
TemplateString) can now be imported directly from the bs4 package.
|
||||
|
||||
* If you encode a document with a Python-specific encoding like
|
||||
'unicode_escape', that encoding is no longer mentioned in the final
|
||||
XML or HTML document. Instead, encoding information is omitted or
|
||||
left blank. [bug=1874955]
|
||||
|
||||
* Fixed test failures when run against soupselect 2.0. Patch by Tomáš
|
||||
Chvátal. [bug=1872279]
|
||||
|
||||
= 4.9.0 (20200405)
|
||||
|
||||
* Added PageElement.decomposed, a new property which lets you
|
||||
check whether you've already called decompose() on a Tag or
|
||||
NavigableString.
|
||||
|
||||
* Embedded CSS and Javascript is now stored in distinct Stylesheet and
|
||||
Script tags, which are ignored by methods like get_text() since most
|
||||
people don't consider this sort of content to be 'text'. This
|
||||
feature is not supported by the html5lib treebuilder. [bug=1868861]
|
||||
|
||||
* Added a Russian translation by 'authoress' to the repository.
|
||||
|
||||
* Fixed an unhandled exception when formatting a Tag that had been
|
||||
decomposed.[bug=1857767]
|
||||
|
||||
* Fixed a bug that happened when passing a Unicode filename containing
|
||||
non-ASCII characters as markup into Beautiful Soup, on a system that
|
||||
allows Unicode filenames. [bug=1866717]
|
||||
|
||||
* Added a performance optimization to PageElement.extract(). Patch by
|
||||
Arthur Darcet.
|
||||
|
||||
= 4.8.2 (20191224)
|
||||
|
||||
* Added Python docstrings to all public methods of the most commonly
|
||||
used classes.
|
||||
|
||||
* Added a Chinese translation by Deron Wang and a Brazilian Portuguese
|
||||
translation by Cezar Peixeiro to the repository.
|
||||
|
||||
* Fixed two deprecation warnings. Patches by Colin
|
||||
Watson and Nicholas Neumann. [bug=1847592] [bug=1855301]
|
||||
|
||||
* The html.parser tree builder now correctly handles DOCTYPEs that are
|
||||
not uppercase. [bug=1848401]
|
||||
|
||||
* PageElement.select() now returns a ResultSet rather than a regular
|
||||
list, making it consistent with methods like find_all().
|
||||
|
||||
= 4.8.1 (20191006)
|
||||
|
||||
* When the html.parser or html5lib parsers are in use, Beautiful Soup
|
||||
will, by default, record the position in the original document where
|
||||
each tag was encountered. This includes line number (Tag.sourceline)
|
||||
and position within a line (Tag.sourcepos). Based on code by Chris
|
||||
Mayo. [bug=1742921]
|
||||
|
||||
* When instantiating a BeautifulSoup object, it's now possible to
|
||||
provide a dictionary ('element_classes') of the classes you'd like to be
|
||||
instantiated instead of Tag, NavigableString, etc.
|
||||
|
||||
* Fixed the definition of the default XML namespace when using
|
||||
lxml 4.4. Patch by Isaac Muse. [bug=1840141]
|
||||
|
||||
* Fixed a crash when pretty-printing tags that were not created
|
||||
during initial parsing. [bug=1838903]
|
||||
|
||||
* Copying a Tag preserves information that was originally obtained from
|
||||
the TreeBuilder used to build the original Tag. [bug=1838903]
|
||||
|
||||
* Raise an explanatory exception when the underlying parser
|
||||
completely rejects the incoming markup. [bug=1838877]
|
||||
|
||||
* Avoid a crash when trying to detect the declared encoding of a
|
||||
Unicode document. [bug=1838877]
|
||||
|
||||
* Avoid a crash when unpickling certain parse trees generated
|
||||
using html5lib on Python 3. [bug=1843545]
|
||||
|
||||
= 4.8.0 (20190720, "One Small Soup")
|
||||
|
||||
This release focuses on making it easier to customize Beautiful Soup's
|
||||
input mechanism (the TreeBuilder) and output mechanism (the Formatter).
|
||||
|
||||
* You can customize the TreeBuilder object by passing keyword
|
||||
arguments into the BeautifulSoup constructor. Those keyword
|
||||
arguments will be passed along into the TreeBuilder constructor.
|
||||
|
||||
The main reason to do this right now is to change how which
|
||||
attributes are treated as multi-valued attributes (the way 'class'
|
||||
is treated by default). You can do this with the
|
||||
'multi_valued_attributes' argument. [bug=1832978]
|
||||
|
||||
* The role of Formatter objects has been greatly expanded. The Formatter
|
||||
class now controls the following:
|
||||
|
||||
- The function to call to perform entity substitution. (This was
|
||||
previously Formatter's only job.)
|
||||
- Which tags should be treated as containing CDATA and have their
|
||||
contents exempt from entity substitution.
|
||||
- The order in which a tag's attributes are output. [bug=1812422]
|
||||
- Whether or not to put a '/' inside a void element, e.g. '<br/>' vs '<br>'
|
||||
|
||||
All preexisting code should work as before.
|
||||
|
||||
* Added a new method to the API, Tag.smooth(), which consolidates
|
||||
multiple adjacent NavigableString elements. [bug=1697296]
|
||||
|
||||
* ' (which is valid in XML, XHTML, and HTML 5, but not HTML 4) is always
|
||||
recognized as a named entity and converted to a single quote. [bug=1818721]
|
||||
|
||||
= 4.7.1 (20190106)
|
||||
|
||||
* Fixed a significant performance problem introduced in 4.7.0. [bug=1810617]
|
||||
|
||||
* Fixed an incorrectly raised exception when inserting a tag before or
|
||||
after an identical tag. [bug=1810692]
|
||||
|
||||
* Beautiful Soup will no longer try to keep track of namespaces that
|
||||
are not defined with a prefix; this can confuse soupselect. [bug=1810680]
|
||||
|
||||
* Tried even harder to avoid the deprecation warning originally fixed in
|
||||
4.6.1. [bug=1778909]
|
||||
|
||||
= 4.7.0 (20181231)
|
||||
|
||||
* Beautiful Soup's CSS Selector implementation has been replaced by a
|
||||
dependency on Isaac Muse's SoupSieve project (the soupsieve package
|
||||
on PyPI). The good news is that SoupSieve has a much more robust and
|
||||
complete implementation of CSS selectors, resolving a large number
|
||||
of longstanding issues. The bad news is that from this point onward,
|
||||
SoupSieve must be installed if you want to use the select() method.
|
||||
|
||||
You don't have to change anything lf you installed Beautiful Soup
|
||||
through pip (SoupSieve will be automatically installed when you
|
||||
upgrade Beautiful Soup) or if you don't use CSS selectors from
|
||||
within Beautiful Soup.
|
||||
|
||||
SoupSieve documentation: https://facelessuser.github.io/soupsieve/
|
||||
|
||||
* Added the PageElement.extend() method, which works like list.append().
|
||||
[bug=1514970]
|
||||
|
||||
* PageElement.insert_before() and insert_after() now take a variable
|
||||
number of arguments. [bug=1514970]
|
||||
|
||||
* Fix a number of problems with the tree builder that caused
|
||||
trees that were superficially okay, but which fell apart when bits
|
||||
were extracted. Patch by Isaac Muse. [bug=1782928,1809910]
|
||||
|
||||
* Fixed a problem with the tree builder in which elements that
|
||||
contained no content (such as empty comments and all-whitespace
|
||||
elements) were not being treated as part of the tree. Patch by Isaac
|
||||
Muse. [bug=1798699]
|
||||
|
||||
* Fixed a problem with multi-valued attributes where the value
|
||||
contained whitespace. Thanks to Jens Svalgaard for the
|
||||
fix. [bug=1787453]
|
||||
|
||||
* Clarified ambiguous license statements in the source code. Beautiful
|
||||
Soup is released under the MIT license, and has been since 4.4.0.
|
||||
|
||||
* This file has been renamed from NEWS.txt to CHANGELOG.
|
||||
|
||||
= 4.6.3 (20180812)
|
||||
|
||||
* Exactly the same as 4.6.2. Re-released to make the README file
|
||||
render properly on PyPI.
|
||||
|
||||
= 4.6.2 (20180812)
|
||||
|
||||
* Fix an exception when a custom formatter was asked to format a void
|
||||
element. [bug=1784408]
|
||||
|
||||
= 4.6.1 (20180728)
|
||||
|
||||
* Stop data loss when encountering an empty numeric entity, and
|
||||
possibly in other cases. Thanks to tos.kamiya for the fix. [bug=1698503]
|
||||
|
||||
* Preserve XML namespaces introduced inside an XML document, not just
|
||||
the ones introduced at the top level. [bug=1718787]
|
||||
|
||||
* Added a new formatter, "html5", which represents void elements
|
||||
as "<element>" rather than "<element/>". [bug=1716272]
|
||||
|
||||
* Fixed a problem where the html.parser tree builder interpreted
|
||||
a string like "&foo " as the character entity "&foo;" [bug=1728706]
|
||||
|
||||
* Correctly handle invalid HTML numeric character entities like “
|
||||
which reference code points that are not Unicode code points. Note
|
||||
that this is only fixed when Beautiful Soup is used with the
|
||||
html.parser parser -- html5lib already worked and I couldn't fix it
|
||||
with lxml. [bug=1782933]
|
||||
|
||||
* Improved the warning given when no parser is specified. [bug=1780571]
|
||||
|
||||
* When markup contains duplicate elements, a select() call that
|
||||
includes multiple match clauses will match all relevant
|
||||
elements. [bug=1770596]
|
||||
|
||||
* Fixed code that was causing deprecation warnings in recent Python 3
|
||||
versions. Includes a patch from Ville Skyttä. [bug=1778909] [bug=1689496]
|
||||
|
||||
* Fixed a Windows crash in diagnose() when checking whether a long
|
||||
markup string is a filename. [bug=1737121]
|
||||
|
||||
* Stopped HTMLParser from raising an exception in very rare cases of
|
||||
bad markup. [bug=1708831]
|
||||
|
||||
* Fixed a bug where find_all() was not working when asked to find a
|
||||
tag with a namespaced name in an XML document that was parsed as
|
||||
HTML. [bug=1723783]
|
||||
|
||||
* You can get finer control over formatting by subclassing
|
||||
bs4.element.Formatter and passing a Formatter instance into (e.g.)
|
||||
encode(). [bug=1716272]
|
||||
|
||||
* You can pass a dictionary of `attrs` into
|
||||
BeautifulSoup.new_tag. This makes it possible to create a tag with
|
||||
an attribute like 'name' that would otherwise be masked by another
|
||||
argument of new_tag. [bug=1779276]
|
||||
|
||||
* Clarified the deprecation warning when accessing tag.fooTag, to cover
|
||||
the possibility that you might really have been looking for a tag
|
||||
called 'fooTag'.
|
||||
|
||||
= 4.6.0 (20170507) =
|
||||
|
||||
* Added the `Tag.get_attribute_list` method, which acts like `Tag.get` for
|
||||
getting the value of an attribute, but which always returns a list,
|
||||
whether or not the attribute is a multi-value attribute. [bug=1678589]
|
||||
|
||||
* It's now possible to use a tag's namespace prefix when searching,
|
||||
e.g. soup.find('namespace:tag') [bug=1655332]
|
||||
|
||||
* Improved the handling of empty-element tags like <br> when using the
|
||||
html.parser parser. [bug=1676935]
|
||||
|
||||
* HTML parsers treat all HTML4 and HTML5 empty element tags (aka void
|
||||
element tags) correctly. [bug=1656909]
|
||||
|
||||
* Namespace prefix is preserved when an XML tag is copied. Thanks
|
||||
to Vikas for a patch and test. [bug=1685172]
|
||||
|
||||
= 4.5.3 (20170102) =
|
||||
|
||||
* Fixed foster parenting when html5lib is the tree builder. Thanks to
|
||||
Geoffrey Sneddon for a patch and test.
|
||||
|
||||
* Fixed yet another problem that caused the html5lib tree builder to
|
||||
create a disconnected parse tree. [bug=1629825]
|
||||
|
||||
= 4.5.2 (20170102) =
|
||||
|
||||
* Apart from the version number, this release is identical to
|
||||
4.5.3. Due to user error, it could not be completely uploaded to
|
||||
PyPI. Use 4.5.3 instead.
|
||||
|
||||
= 4.5.1 (20160802) =
|
||||
|
||||
* Fixed a crash when passing Unicode markup that contained a
|
||||
processing instruction into the lxml HTML parser on Python
|
||||
3. [bug=1608048]
|
||||
|
||||
= 4.5.0 (20160719) =
|
||||
|
||||
* Beautiful Soup is no longer compatible with Python 2.6. This
|
||||
actually happened a few releases ago, but it's now official.
|
||||
|
||||
* Beautiful Soup will now work with versions of html5lib greater than
|
||||
0.99999999. [bug=1603299]
|
||||
|
||||
* If a search against each individual value of a multi-valued
|
||||
attribute fails, the search will be run one final time against the
|
||||
complete attribute value considered as a single string. That is, if
|
||||
a tag has class="foo bar" and neither "foo" nor "bar" matches, but
|
||||
"foo bar" does, the tag is now considered a match.
|
||||
|
||||
This happened in previous versions, but only when the value being
|
||||
searched for was a string. Now it also works when that value is
|
||||
a regular expression, a list of strings, etc. [bug=1476868]
|
||||
|
||||
* Fixed a bug that deranged the tree when a whitespace element was
|
||||
reparented into a tag that contained an identical whitespace
|
||||
element. [bug=1505351]
|
||||
|
||||
* Added support for CSS selector values that contain quoted spaces,
|
||||
such as tag[style="display: foo"]. [bug=1540588]
|
||||
|
||||
* Corrected handling of XML processing instructions. [bug=1504393]
|
||||
|
||||
* Corrected an encoding error that happened when a BeautifulSoup
|
||||
object was copied. [bug=1554439]
|
||||
|
||||
* The contents of <textarea> tags will no longer be modified when the
|
||||
tree is prettified. [bug=1555829]
|
||||
|
||||
* When a BeautifulSoup object is pickled but its tree builder cannot
|
||||
be pickled, its .builder attribute is set to None instead of being
|
||||
destroyed. This avoids a performance problem once the object is
|
||||
unpickled. [bug=1523629]
|
||||
|
||||
* Specify the file and line number when warning about a
|
||||
BeautifulSoup object being instantiated without a parser being
|
||||
specified. [bug=1574647]
|
||||
|
||||
* The `limit` argument to `select()` now works correctly, though it's
|
||||
not implemented very efficiently. [bug=1520530]
|
||||
|
||||
* Fixed a Python 3 ByteWarning when a URL was passed in as though it
|
||||
were markup. Thanks to James Salter for a patch and
|
||||
test. [bug=1533762]
|
||||
|
||||
* We don't run the check for a filename passed in as markup if the
|
||||
'filename' contains a less-than character; the less-than character
|
||||
indicates it's most likely a very small document. [bug=1577864]
|
||||
|
||||
= 4.4.1 (20150928) =
|
||||
|
||||
* Fixed a bug that deranged the tree when part of it was
|
||||
removed. Thanks to Eric Weiser for the patch and John Wiseman for a
|
||||
test. [bug=1481520]
|
||||
|
||||
* Fixed a parse bug with the html5lib tree-builder. Thanks to Roel
|
||||
Kramer for the patch. [bug=1483781]
|
||||
|
||||
* Improved the implementation of CSS selector grouping. Thanks to
|
||||
Orangain for the patch. [bug=1484543]
|
||||
|
||||
* Fixed the test_detect_utf8 test so that it works when chardet is
|
||||
installed. [bug=1471359]
|
||||
|
||||
* Corrected the output of Declaration objects. [bug=1477847]
|
||||
|
||||
|
||||
= 4.4.0 (20150703) =
|
||||
|
||||
Especially important changes:
|
||||
|
||||
* Added a warning when you instantiate a BeautifulSoup object without
|
||||
explicitly naming a parser. [bug=1398866]
|
||||
|
||||
* __repr__ now returns an ASCII bytestring in Python 2, and a Unicode
|
||||
string in Python 3, instead of a UTF8-encoded bytestring in both
|
||||
versions. In Python 3, __str__ now returns a Unicode string instead
|
||||
of a bytestring. [bug=1420131]
|
||||
|
||||
* The `text` argument to the find_* methods is now called `string`,
|
||||
which is more accurate. `text` still works, but `string` is the
|
||||
argument described in the documentation. `text` may eventually
|
||||
change its meaning, but not for a very long time. [bug=1366856]
|
||||
|
||||
* Changed the way soup objects work under copy.copy(). Copying a
|
||||
NavigableString or a Tag will give you a new NavigableString that's
|
||||
equal to the old one but not connected to the parse tree. Patch by
|
||||
Martijn Peters. [bug=1307490]
|
||||
|
||||
* Started using a standard MIT license. [bug=1294662]
|
||||
|
||||
* Added a Chinese translation of the documentation by Delong .w.
|
||||
|
||||
New features:
|
||||
|
||||
* Introduced the select_one() method, which uses a CSS selector but
|
||||
only returns the first match, instead of a list of
|
||||
matches. [bug=1349367]
|
||||
|
||||
* You can now create a Tag object without specifying a
|
||||
TreeBuilder. Patch by Martijn Pieters. [bug=1307471]
|
||||
|
||||
* You can now create a NavigableString or a subclass just by invoking
|
||||
the constructor. [bug=1294315]
|
||||
|
||||
* Added an `exclude_encodings` argument to UnicodeDammit and to the
|
||||
Beautiful Soup constructor, which lets you prohibit the detection of
|
||||
an encoding that you know is wrong. [bug=1469408]
|
||||
|
||||
* The select() method now supports selector grouping. Patch by
|
||||
Francisco Canas [bug=1191917]
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Fixed yet another problem that caused the html5lib tree builder to
|
||||
create a disconnected parse tree. [bug=1237763]
|
||||
|
||||
* Force object_was_parsed() to keep the tree intact even when an element
|
||||
from later in the document is moved into place. [bug=1430633]
|
||||
|
||||
* Fixed yet another bug that caused a disconnected tree when html5lib
|
||||
copied an element from one part of the tree to another. [bug=1270611]
|
||||
|
||||
* Fixed a bug where Element.extract() could create an infinite loop in
|
||||
the remaining tree.
|
||||
|
||||
* The select() method can now find tags whose names contain
|
||||
dashes. Patch by Francisco Canas. [bug=1276211]
|
||||
|
||||
* The select() method can now find tags with attributes whose names
|
||||
contain dashes. Patch by Marek Kapolka. [bug=1304007]
|
||||
|
||||
* Improved the lxml tree builder's handling of processing
|
||||
instructions. [bug=1294645]
|
||||
|
||||
* Restored the helpful syntax error that happens when you try to
|
||||
import the Python 2 edition of Beautiful Soup under Python
|
||||
3. [bug=1213387]
|
||||
|
||||
* In Python 3.4 and above, set the new convert_charrefs argument to
|
||||
the html.parser constructor to avoid a warning and future
|
||||
failures. Patch by Stefano Revera. [bug=1375721]
|
||||
|
||||
* The warning when you pass in a filename or URL as markup will now be
|
||||
displayed correctly even if the filename or URL is a Unicode
|
||||
string. [bug=1268888]
|
||||
|
||||
* If the initial <html> tag contains a CDATA list attribute such as
|
||||
'class', the html5lib tree builder will now turn its value into a
|
||||
list, as it would with any other tag. [bug=1296481]
|
||||
|
||||
* Fixed an import error in Python 3.5 caused by the removal of the
|
||||
HTMLParseError class. [bug=1420063]
|
||||
|
||||
* Improved docstring for encode_contents() and
|
||||
decode_contents(). [bug=1441543]
|
||||
|
||||
* Fixed a crash in Unicode, Dammit's encoding detector when the name
|
||||
of the encoding itself contained invalid bytes. [bug=1360913]
|
||||
|
||||
* Improved the exception raised when you call .unwrap() or
|
||||
.replace_with() on an element that's not attached to a tree.
|
||||
|
||||
* Raise a NotImplementedError whenever an unsupported CSS pseudoclass
|
||||
is used in select(). Previously some cases did not result in a
|
||||
NotImplementedError.
|
||||
|
||||
* It's now possible to pickle a BeautifulSoup object no matter which
|
||||
tree builder was used to create it. However, the only tree builder
|
||||
that survives the pickling process is the HTMLParserTreeBuilder
|
||||
('html.parser'). If you unpickle a BeautifulSoup object created with
|
||||
some other tree builder, soup.builder will be None. [bug=1231545]
|
||||
|
||||
= 4.3.2 (20131002) =
|
||||
|
||||
* Fixed a bug in which short Unicode input was improperly encoded to
|
||||
@@ -1104,7 +331,7 @@ Bug fixes:
|
||||
* Renamed Tag.nsprefix to Tag.prefix, for consistency with
|
||||
NamespacedAttribute.
|
||||
|
||||
* Fixed a test failure that occurred on Python 3.x when chardet was
|
||||
* Fixed a test failure that occured on Python 3.x when chardet was
|
||||
installed.
|
||||
|
||||
* Made prettify() return Unicode by default, so it will look nice on
|
||||
@@ -1138,7 +365,7 @@ Bug fixes:
|
||||
|
||||
* Restored compatibility with Python 2.6.
|
||||
|
||||
* The install process no longer installs docs or auxiliary text files.
|
||||
* The install process no longer installs docs or auxillary text files.
|
||||
|
||||
* It's now possible to deepcopy a BeautifulSoup object created with
|
||||
Python's built-in HTML parser.
|
||||
@@ -1377,7 +604,7 @@ Added an import that makes BS work in Python 2.3.
|
||||
Fixed a UnicodeDecodeError when unpickling documents that contain
|
||||
non-ASCII characters.
|
||||
|
||||
Fixed a TypeError that occurred in some circumstances when a tag
|
||||
Fixed a TypeError that occured in some circumstances when a tag
|
||||
contained no text.
|
||||
|
||||
Jump through hoops to avoid the use of chardet, which can be extremely
|
||||
@@ -1,99 +1,65 @@
|
||||
"""Beautiful Soup Elixir and Tonic - "The Screen-Scraper's Friend".
|
||||
|
||||
"""Beautiful Soup
|
||||
Elixir and Tonic
|
||||
"The Screen-Scraper's Friend"
|
||||
http://www.crummy.com/software/BeautifulSoup/
|
||||
|
||||
Beautiful Soup uses a pluggable XML or HTML parser to parse a
|
||||
(possibly invalid) document into a tree representation. Beautiful Soup
|
||||
provides methods and Pythonic idioms that make it easy to navigate,
|
||||
search, and modify the parse tree.
|
||||
provides provides methods and Pythonic idioms that make it easy to
|
||||
navigate, search, and modify the parse tree.
|
||||
|
||||
Beautiful Soup works with Python 3.6 and up. It works better if lxml
|
||||
Beautiful Soup works with Python 2.6 and up. It works better if lxml
|
||||
and/or html5lib is installed.
|
||||
|
||||
For more than you ever wanted to know about Beautiful Soup, see the
|
||||
documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/
|
||||
documentation:
|
||||
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
|
||||
"""
|
||||
|
||||
__author__ = "Leonard Richardson (leonardr@segfault.org)"
|
||||
__version__ = "4.12.3"
|
||||
__copyright__ = "Copyright (c) 2004-2024 Leonard Richardson"
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__version__ = "4.4.1"
|
||||
__copyright__ = "Copyright (c) 2004-2015 Leonard Richardson"
|
||||
__license__ = "MIT"
|
||||
|
||||
__all__ = ['BeautifulSoup']
|
||||
|
||||
from collections import Counter
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
import warnings
|
||||
|
||||
# The very first thing we do is give a useful error if someone is
|
||||
# running this code under Python 2.
|
||||
if sys.version_info.major < 3:
|
||||
raise ImportError('You are trying to use a Python 3-specific version of Beautiful Soup under Python 2. This will not work. The final version of Beautiful Soup to support Python 2 was 4.9.3.')
|
||||
|
||||
from .builder import (
|
||||
builder_registry,
|
||||
ParserRejectedMarkup,
|
||||
XMLParsedAsHTMLWarning,
|
||||
HTMLParserTreeBuilder
|
||||
)
|
||||
from .builder import builder_registry, ParserRejectedMarkup
|
||||
from .dammit import UnicodeDammit
|
||||
from .element import (
|
||||
CData,
|
||||
Comment,
|
||||
CSS,
|
||||
DEFAULT_OUTPUT_ENCODING,
|
||||
Declaration,
|
||||
Doctype,
|
||||
NavigableString,
|
||||
PageElement,
|
||||
ProcessingInstruction,
|
||||
PYTHON_SPECIFIC_ENCODINGS,
|
||||
ResultSet,
|
||||
Script,
|
||||
Stylesheet,
|
||||
SoupStrainer,
|
||||
Tag,
|
||||
TemplateString,
|
||||
)
|
||||
|
||||
# Define some custom warnings.
|
||||
class GuessedAtParserWarning(UserWarning):
|
||||
"""The warning issued when BeautifulSoup has to guess what parser to
|
||||
use -- probably because no parser was specified in the constructor.
|
||||
"""
|
||||
# The very first thing we do is give a useful error if someone is
|
||||
# running this code under Python 3 without converting it.
|
||||
'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work.'!='You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
|
||||
|
||||
class MarkupResemblesLocatorWarning(UserWarning):
|
||||
"""The warning issued when BeautifulSoup is given 'markup' that
|
||||
actually looks like a resource locator -- a URL or a path to a file
|
||||
on disk.
|
||||
"""
|
||||
|
||||
|
||||
class BeautifulSoup(Tag):
|
||||
"""A data structure representing a parsed HTML or XML document.
|
||||
"""
|
||||
This class defines the basic interface called by the tree builders.
|
||||
|
||||
Most of the methods you'll call on a BeautifulSoup object are inherited from
|
||||
PageElement or Tag.
|
||||
|
||||
Internally, this class defines the basic interface called by the
|
||||
tree builders when converting an HTML/XML document into a data
|
||||
structure. The interface abstracts away the differences between
|
||||
parsers. To write a new tree builder, you'll need to understand
|
||||
these methods as a whole.
|
||||
|
||||
These methods will be called by the BeautifulSoup constructor:
|
||||
* reset()
|
||||
* feed(markup)
|
||||
These methods will be called by the parser:
|
||||
reset()
|
||||
feed(markup)
|
||||
|
||||
The tree builder may call these methods from its feed() implementation:
|
||||
* handle_starttag(name, attrs) # See note about return value
|
||||
* handle_endtag(name)
|
||||
* handle_data(data) # Appends to the current data node
|
||||
* endData(containerClass) # Ends the current data node
|
||||
handle_starttag(name, attrs) # See note about return value
|
||||
handle_endtag(name)
|
||||
handle_data(data) # Appends to the current data node
|
||||
endData(containerClass=NavigableString) # Ends the current data node
|
||||
|
||||
No matter how complicated the underlying parser is, you should be
|
||||
able to build a tree using 'start tag' events, 'end tag' events,
|
||||
@@ -103,77 +69,24 @@ class BeautifulSoup(Tag):
|
||||
like HTML's <br> tag), call handle_starttag and then
|
||||
handle_endtag.
|
||||
"""
|
||||
|
||||
# Since BeautifulSoup subclasses Tag, it's possible to treat it as
|
||||
# a Tag with a .name. This name makes it clear the BeautifulSoup
|
||||
# object isn't a real markup tag.
|
||||
ROOT_TAG_NAME = '[document]'
|
||||
|
||||
# If the end-user gives no indication which tree builder they
|
||||
# want, look for one with these features.
|
||||
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
|
||||
|
||||
# A string containing all ASCII whitespace characters, used in
|
||||
# endData() to detect data chunks that seem 'empty'.
|
||||
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
|
||||
|
||||
NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features=\"%(parser)s\"' to the BeautifulSoup constructor.\n"
|
||||
|
||||
NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nTo get rid of this warning, change this:\n\n BeautifulSoup([your markup])\n\nto this:\n\n BeautifulSoup([your markup], \"%(parser)s\")\n"
|
||||
|
||||
def __init__(self, markup="", features=None, builder=None,
|
||||
parse_only=None, from_encoding=None, exclude_encodings=None,
|
||||
element_classes=None, **kwargs):
|
||||
"""Constructor.
|
||||
**kwargs):
|
||||
"""The Soup object is initialized as the 'root tag', and the
|
||||
provided markup (which can be a string or a file-like object)
|
||||
is fed into the underlying parser."""
|
||||
|
||||
:param markup: A string or a file-like object representing
|
||||
markup to be parsed.
|
||||
|
||||
:param features: Desirable features of the parser to be
|
||||
used. This may be the name of a specific parser ("lxml",
|
||||
"lxml-xml", "html.parser", or "html5lib") or it may be the
|
||||
type of markup to be used ("html", "html5", "xml"). It's
|
||||
recommended that you name a specific parser, so that
|
||||
Beautiful Soup gives you the same results across platforms
|
||||
and virtual environments.
|
||||
|
||||
:param builder: A TreeBuilder subclass to instantiate (or
|
||||
instance to use) instead of looking one up based on
|
||||
`features`. You only need to use this if you've implemented a
|
||||
custom TreeBuilder.
|
||||
|
||||
:param parse_only: A SoupStrainer. Only parts of the document
|
||||
matching the SoupStrainer will be considered. This is useful
|
||||
when parsing part of a document that would otherwise be too
|
||||
large to fit into memory.
|
||||
|
||||
:param from_encoding: A string indicating the encoding of the
|
||||
document to be parsed. Pass this in if Beautiful Soup is
|
||||
guessing wrongly about the document's encoding.
|
||||
|
||||
:param exclude_encodings: A list of strings indicating
|
||||
encodings known to be wrong. Pass this in if you don't know
|
||||
the document's encoding but you know Beautiful Soup's guess is
|
||||
wrong.
|
||||
|
||||
:param element_classes: A dictionary mapping BeautifulSoup
|
||||
classes like Tag and NavigableString, to other classes you'd
|
||||
like to be instantiated instead as the parse tree is
|
||||
built. This is useful for subclassing Tag or NavigableString
|
||||
to modify default behavior.
|
||||
|
||||
:param kwargs: For backwards compatibility purposes, the
|
||||
constructor accepts certain keyword arguments used in
|
||||
Beautiful Soup 3. None of these arguments do anything in
|
||||
Beautiful Soup 4; they will result in a warning and then be
|
||||
ignored.
|
||||
|
||||
Apart from this, any keyword arguments passed into the
|
||||
BeautifulSoup constructor are propagated to the TreeBuilder
|
||||
constructor. This makes it possible to configure a
|
||||
TreeBuilder by passing in arguments, not just by saying which
|
||||
one to use.
|
||||
"""
|
||||
if 'convertEntities' in kwargs:
|
||||
del kwargs['convertEntities']
|
||||
warnings.warn(
|
||||
"BS4 does not respect the convertEntities argument to the "
|
||||
"BeautifulSoup constructor. Entities are always converted "
|
||||
@@ -212,10 +125,10 @@ class BeautifulSoup(Tag):
|
||||
if old_name in kwargs:
|
||||
warnings.warn(
|
||||
'The "%s" argument to the BeautifulSoup constructor '
|
||||
'has been renamed to "%s."' % (old_name, new_name),
|
||||
DeprecationWarning, stacklevel=3
|
||||
)
|
||||
return kwargs.pop(old_name)
|
||||
'has been renamed to "%s."' % (old_name, new_name))
|
||||
value = kwargs[old_name]
|
||||
del kwargs[old_name]
|
||||
return value
|
||||
return None
|
||||
|
||||
parse_only = parse_only or deprecated_argument(
|
||||
@@ -224,23 +137,13 @@ class BeautifulSoup(Tag):
|
||||
from_encoding = from_encoding or deprecated_argument(
|
||||
"fromEncoding", "from_encoding")
|
||||
|
||||
if from_encoding and isinstance(markup, str):
|
||||
warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.")
|
||||
from_encoding = None
|
||||
if len(kwargs) > 0:
|
||||
arg = list(kwargs.keys()).pop()
|
||||
raise TypeError(
|
||||
"__init__() got an unexpected keyword argument '%s'" % arg)
|
||||
|
||||
self.element_classes = element_classes or dict()
|
||||
|
||||
# We need this information to track whether or not the builder
|
||||
# was specified well enough that we can omit the 'you need to
|
||||
# specify a parser' warning.
|
||||
original_builder = builder
|
||||
original_features = features
|
||||
|
||||
if isinstance(builder, type):
|
||||
# A builder class was passed in; it needs to be instantiated.
|
||||
builder_class = builder
|
||||
builder = None
|
||||
elif builder is None:
|
||||
if builder is None:
|
||||
original_features = features
|
||||
if isinstance(features, str):
|
||||
features = [features]
|
||||
if features is None or len(features) == 0:
|
||||
@@ -251,227 +154,85 @@ class BeautifulSoup(Tag):
|
||||
"Couldn't find a tree builder with the features you "
|
||||
"requested: %s. Do you need to install a parser library?"
|
||||
% ",".join(features))
|
||||
|
||||
# At this point either we have a TreeBuilder instance in
|
||||
# builder, or we have a builder_class that we can instantiate
|
||||
# with the remaining **kwargs.
|
||||
if builder is None:
|
||||
builder = builder_class(**kwargs)
|
||||
if not original_builder and not (
|
||||
original_features == builder.NAME or
|
||||
original_features in builder.ALTERNATE_NAMES
|
||||
) and markup:
|
||||
# The user did not tell us which TreeBuilder to use,
|
||||
# and we had to guess. Issue a warning.
|
||||
builder = builder_class()
|
||||
if not (original_features == builder.NAME or
|
||||
original_features in builder.ALTERNATE_NAMES):
|
||||
if builder.is_xml:
|
||||
markup_type = "XML"
|
||||
else:
|
||||
markup_type = "HTML"
|
||||
warnings.warn(self.NO_PARSER_SPECIFIED_WARNING % dict(
|
||||
parser=builder.NAME,
|
||||
markup_type=markup_type))
|
||||
|
||||
# This code adapted from warnings.py so that we get the same line
|
||||
# of code as our warnings.warn() call gets, even if the answer is wrong
|
||||
# (as it may be in a multithreading situation).
|
||||
caller = None
|
||||
try:
|
||||
caller = sys._getframe(1)
|
||||
except ValueError:
|
||||
pass
|
||||
if caller:
|
||||
globals = caller.f_globals
|
||||
line_number = caller.f_lineno
|
||||
else:
|
||||
globals = sys.__dict__
|
||||
line_number= 1
|
||||
filename = globals.get('__file__')
|
||||
if filename:
|
||||
fnl = filename.lower()
|
||||
if fnl.endswith((".pyc", ".pyo")):
|
||||
filename = filename[:-1]
|
||||
if filename:
|
||||
# If there is no filename at all, the user is most likely in a REPL,
|
||||
# and the warning is not necessary.
|
||||
values = dict(
|
||||
filename=filename,
|
||||
line_number=line_number,
|
||||
parser=builder.NAME,
|
||||
markup_type=markup_type
|
||||
)
|
||||
warnings.warn(
|
||||
self.NO_PARSER_SPECIFIED_WARNING % values,
|
||||
GuessedAtParserWarning, stacklevel=2
|
||||
)
|
||||
else:
|
||||
if kwargs:
|
||||
warnings.warn("Keyword arguments to the BeautifulSoup constructor will be ignored. These would normally be passed into the TreeBuilder constructor, but a TreeBuilder instance was passed in as `builder`.")
|
||||
|
||||
self.builder = builder
|
||||
self.is_xml = builder.is_xml
|
||||
self.known_xml = self.is_xml
|
||||
self._namespaces = dict()
|
||||
self.builder.soup = self
|
||||
|
||||
self.parse_only = parse_only
|
||||
|
||||
if hasattr(markup, 'read'): # It's a file-type object.
|
||||
markup = markup.read()
|
||||
elif len(markup) <= 256 and (
|
||||
(isinstance(markup, bytes) and not b'<' in markup)
|
||||
or (isinstance(markup, str) and not '<' in markup)
|
||||
):
|
||||
# Issue warnings for a couple beginner problems
|
||||
elif len(markup) <= 256:
|
||||
# Print out warnings for a couple beginner problems
|
||||
# involving passing non-markup to Beautiful Soup.
|
||||
# Beautiful Soup will still parse the input as markup,
|
||||
# since that is sometimes the intended behavior.
|
||||
if not self._markup_is_url(markup):
|
||||
self._markup_resembles_filename(markup)
|
||||
# just in case that's what the user really wants.
|
||||
if (isinstance(markup, str)
|
||||
and not os.path.supports_unicode_filenames):
|
||||
possible_filename = markup.encode("utf8")
|
||||
else:
|
||||
possible_filename = markup
|
||||
is_file = False
|
||||
try:
|
||||
is_file = os.path.exists(possible_filename)
|
||||
except Exception as e:
|
||||
# This is almost certainly a problem involving
|
||||
# characters not valid in filenames on this
|
||||
# system. Just let it go.
|
||||
pass
|
||||
if is_file:
|
||||
if isinstance(markup, str):
|
||||
markup = markup.encode("utf8")
|
||||
warnings.warn(
|
||||
'"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
|
||||
if markup[:5] == "http:" or markup[:6] == "https:":
|
||||
# TODO: This is ugly but I couldn't get it to work in
|
||||
# Python 3 otherwise.
|
||||
if ((isinstance(markup, bytes) and not b' ' in markup)
|
||||
or (isinstance(markup, str) and not ' ' in markup)):
|
||||
if isinstance(markup, str):
|
||||
markup = markup.encode("utf8")
|
||||
warnings.warn(
|
||||
'"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
|
||||
|
||||
rejections = []
|
||||
success = False
|
||||
for (self.markup, self.original_encoding, self.declared_html_encoding,
|
||||
self.contains_replacement_characters) in (
|
||||
self.builder.prepare_markup(
|
||||
markup, from_encoding, exclude_encodings=exclude_encodings)):
|
||||
self.reset()
|
||||
self.builder.initialize_soup(self)
|
||||
try:
|
||||
self._feed()
|
||||
success = True
|
||||
break
|
||||
except ParserRejectedMarkup as e:
|
||||
rejections.append(e)
|
||||
except ParserRejectedMarkup:
|
||||
pass
|
||||
|
||||
if not success:
|
||||
other_exceptions = [str(e) for e in rejections]
|
||||
raise ParserRejectedMarkup(
|
||||
"The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help.\n\nOriginal exception(s) from parser:\n " + "\n ".join(other_exceptions)
|
||||
)
|
||||
|
||||
# Clear out the markup and remove the builder's circular
|
||||
# reference to this object.
|
||||
self.markup = None
|
||||
self.builder.soup = None
|
||||
|
||||
def _clone(self):
|
||||
"""Create a new BeautifulSoup object with the same TreeBuilder,
|
||||
but not associated with any markup.
|
||||
def __copy__(self):
|
||||
return type(self)(self.encode(), builder=self.builder)
|
||||
|
||||
This is the first step of the deepcopy process.
|
||||
"""
|
||||
clone = type(self)("", None, self.builder)
|
||||
|
||||
# Keep track of the encoding of the original document,
|
||||
# since we won't be parsing it again.
|
||||
clone.original_encoding = self.original_encoding
|
||||
return clone
|
||||
|
||||
def __getstate__(self):
|
||||
# Frequently a tree builder can't be pickled.
|
||||
d = dict(self.__dict__)
|
||||
if 'builder' in d and d['builder'] is not None and not self.builder.picklable:
|
||||
d['builder'] = type(self.builder)
|
||||
# Store the contents as a Unicode string.
|
||||
d['contents'] = []
|
||||
d['markup'] = self.decode()
|
||||
|
||||
# If _most_recent_element is present, it's a Tag object left
|
||||
# over from initial parse. It might not be picklable and we
|
||||
# don't need it.
|
||||
if '_most_recent_element' in d:
|
||||
del d['_most_recent_element']
|
||||
if 'builder' in d and not self.builder.picklable:
|
||||
del d['builder']
|
||||
return d
|
||||
|
||||
def __setstate__(self, state):
|
||||
# If necessary, restore the TreeBuilder by looking it up.
|
||||
self.__dict__ = state
|
||||
if isinstance(self.builder, type):
|
||||
self.builder = self.builder()
|
||||
elif not self.builder:
|
||||
# We don't know which builder was used to build this
|
||||
# parse tree, so use a default we know is always available.
|
||||
self.builder = HTMLParserTreeBuilder()
|
||||
self.builder.soup = self
|
||||
self.reset()
|
||||
self._feed()
|
||||
return state
|
||||
|
||||
|
||||
@classmethod
|
||||
def _decode_markup(cls, markup):
|
||||
"""Ensure `markup` is bytes so it's safe to send into warnings.warn.
|
||||
|
||||
TODO: warnings.warn had this problem back in 2010 but it might not
|
||||
anymore.
|
||||
"""
|
||||
if isinstance(markup, bytes):
|
||||
decoded = markup.decode('utf-8', 'replace')
|
||||
else:
|
||||
decoded = markup
|
||||
return decoded
|
||||
|
||||
@classmethod
|
||||
def _markup_is_url(cls, markup):
|
||||
"""Error-handling method to raise a warning if incoming markup looks
|
||||
like a URL.
|
||||
|
||||
:param markup: A string.
|
||||
:return: Whether or not the markup resembles a URL
|
||||
closely enough to justify a warning.
|
||||
"""
|
||||
if isinstance(markup, bytes):
|
||||
space = b' '
|
||||
cant_start_with = (b"http:", b"https:")
|
||||
elif isinstance(markup, str):
|
||||
space = ' '
|
||||
cant_start_with = ("http:", "https:")
|
||||
else:
|
||||
return False
|
||||
|
||||
if any(markup.startswith(prefix) for prefix in cant_start_with):
|
||||
if not space in markup:
|
||||
warnings.warn(
|
||||
'The input looks more like a URL than markup. You may want to use'
|
||||
' an HTTP client like requests to get the document behind'
|
||||
' the URL, and feed that document to Beautiful Soup.',
|
||||
MarkupResemblesLocatorWarning,
|
||||
stacklevel=3
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def _markup_resembles_filename(cls, markup):
|
||||
"""Error-handling method to raise a warning if incoming markup
|
||||
resembles a filename.
|
||||
|
||||
:param markup: A bytestring or string.
|
||||
:return: Whether or not the markup resembles a filename
|
||||
closely enough to justify a warning.
|
||||
"""
|
||||
path_characters = '/\\'
|
||||
extensions = ['.html', '.htm', '.xml', '.xhtml', '.txt']
|
||||
if isinstance(markup, bytes):
|
||||
path_characters = path_characters.encode("utf8")
|
||||
extensions = [x.encode('utf8') for x in extensions]
|
||||
filelike = False
|
||||
if any(x in markup for x in path_characters):
|
||||
filelike = True
|
||||
else:
|
||||
lower = markup.lower()
|
||||
if any(lower.endswith(ext) for ext in extensions):
|
||||
filelike = True
|
||||
if filelike:
|
||||
warnings.warn(
|
||||
'The input looks more like a filename than markup. You may'
|
||||
' want to open this file and pass the filehandle into'
|
||||
' Beautiful Soup.',
|
||||
MarkupResemblesLocatorWarning, stacklevel=3
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
def _feed(self):
|
||||
"""Internal method that parses previously set markup, creating a large
|
||||
number of Tag and NavigableString objects.
|
||||
"""
|
||||
# Convert the document to Unicode.
|
||||
self.builder.reset()
|
||||
|
||||
@@ -482,111 +243,48 @@ class BeautifulSoup(Tag):
|
||||
self.popTag()
|
||||
|
||||
def reset(self):
|
||||
"""Reset this object to a state as though it had never parsed any
|
||||
markup.
|
||||
"""
|
||||
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
|
||||
self.hidden = 1
|
||||
self.builder.reset()
|
||||
self.current_data = []
|
||||
self.currentTag = None
|
||||
self.tagStack = []
|
||||
self.open_tag_counter = Counter()
|
||||
self.preserve_whitespace_tag_stack = []
|
||||
self.string_container_stack = []
|
||||
self._most_recent_element = None
|
||||
self.pushTag(self)
|
||||
|
||||
def new_tag(self, name, namespace=None, nsprefix=None, attrs={},
|
||||
sourceline=None, sourcepos=None, **kwattrs):
|
||||
"""Create a new Tag associated with this BeautifulSoup object.
|
||||
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
|
||||
"""Create a new tag associated with this soup."""
|
||||
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
|
||||
|
||||
:param name: The name of the new Tag.
|
||||
:param namespace: The URI of the new Tag's XML namespace, if any.
|
||||
:param prefix: The prefix for the new Tag's XML namespace, if any.
|
||||
:param attrs: A dictionary of this Tag's attribute values; can
|
||||
be used instead of `kwattrs` for attributes like 'class'
|
||||
that are reserved words in Python.
|
||||
:param sourceline: The line number where this tag was
|
||||
(purportedly) found in its source document.
|
||||
:param sourcepos: The character position within `sourceline` where this
|
||||
tag was (purportedly) found.
|
||||
:param kwattrs: Keyword arguments for the new Tag's attribute values.
|
||||
def new_string(self, s, subclass=NavigableString):
|
||||
"""Create a new NavigableString associated with this soup."""
|
||||
return subclass(s)
|
||||
|
||||
"""
|
||||
kwattrs.update(attrs)
|
||||
return self.element_classes.get(Tag, Tag)(
|
||||
None, self.builder, name, namespace, nsprefix, kwattrs,
|
||||
sourceline=sourceline, sourcepos=sourcepos
|
||||
)
|
||||
|
||||
def string_container(self, base_class=None):
|
||||
container = base_class or NavigableString
|
||||
|
||||
# There may be a general override of NavigableString.
|
||||
container = self.element_classes.get(
|
||||
container, container
|
||||
)
|
||||
|
||||
# On top of that, we may be inside a tag that needs a special
|
||||
# container class.
|
||||
if self.string_container_stack and container is NavigableString:
|
||||
container = self.builder.string_containers.get(
|
||||
self.string_container_stack[-1].name, container
|
||||
)
|
||||
return container
|
||||
|
||||
def new_string(self, s, subclass=None):
|
||||
"""Create a new NavigableString associated with this BeautifulSoup
|
||||
object.
|
||||
"""
|
||||
container = self.string_container(subclass)
|
||||
return container(s)
|
||||
|
||||
def insert_before(self, *args):
|
||||
"""This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
|
||||
it because there is nothing before or after it in the parse tree.
|
||||
"""
|
||||
def insert_before(self, successor):
|
||||
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
|
||||
|
||||
def insert_after(self, *args):
|
||||
"""This method is part of the PageElement API, but `BeautifulSoup` doesn't implement
|
||||
it because there is nothing before or after it in the parse tree.
|
||||
"""
|
||||
def insert_after(self, successor):
|
||||
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
|
||||
|
||||
def popTag(self):
|
||||
"""Internal method called by _popToTag when a tag is closed."""
|
||||
tag = self.tagStack.pop()
|
||||
if tag.name in self.open_tag_counter:
|
||||
self.open_tag_counter[tag.name] -= 1
|
||||
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
|
||||
self.preserve_whitespace_tag_stack.pop()
|
||||
if self.string_container_stack and tag == self.string_container_stack[-1]:
|
||||
self.string_container_stack.pop()
|
||||
#print("Pop", tag.name)
|
||||
#print "Pop", tag.name
|
||||
if self.tagStack:
|
||||
self.currentTag = self.tagStack[-1]
|
||||
return self.currentTag
|
||||
|
||||
def pushTag(self, tag):
|
||||
"""Internal method called by handle_starttag when a tag is opened."""
|
||||
#print("Push", tag.name)
|
||||
if self.currentTag is not None:
|
||||
#print "Push", tag.name
|
||||
if self.currentTag:
|
||||
self.currentTag.contents.append(tag)
|
||||
self.tagStack.append(tag)
|
||||
self.currentTag = self.tagStack[-1]
|
||||
if tag.name != self.ROOT_TAG_NAME:
|
||||
self.open_tag_counter[tag.name] += 1
|
||||
if tag.name in self.builder.preserve_whitespace_tags:
|
||||
self.preserve_whitespace_tag_stack.append(tag)
|
||||
if tag.name in self.builder.string_containers:
|
||||
self.string_container_stack.append(tag)
|
||||
|
||||
def endData(self, containerClass=None):
|
||||
"""Method called by the TreeBuilder when the end of a data segment
|
||||
occurs.
|
||||
"""
|
||||
def endData(self, containerClass=NavigableString):
|
||||
if self.current_data:
|
||||
current_data = ''.join(self.current_data)
|
||||
# If whitespace is not preserved, and this string contains
|
||||
@@ -613,93 +311,61 @@ class BeautifulSoup(Tag):
|
||||
not self.parse_only.search(current_data)):
|
||||
return
|
||||
|
||||
containerClass = self.string_container(containerClass)
|
||||
o = containerClass(current_data)
|
||||
self.object_was_parsed(o)
|
||||
|
||||
def object_was_parsed(self, o, parent=None, most_recent_element=None):
|
||||
"""Method called by the TreeBuilder to integrate an object into the parse tree."""
|
||||
if parent is None:
|
||||
parent = self.currentTag
|
||||
if most_recent_element is not None:
|
||||
previous_element = most_recent_element
|
||||
else:
|
||||
previous_element = self._most_recent_element
|
||||
"""Add an object to the parse tree."""
|
||||
parent = parent or self.currentTag
|
||||
previous_element = most_recent_element or self._most_recent_element
|
||||
|
||||
next_element = previous_sibling = next_sibling = None
|
||||
if isinstance(o, Tag):
|
||||
next_element = o.next_element
|
||||
next_sibling = o.next_sibling
|
||||
previous_sibling = o.previous_sibling
|
||||
if previous_element is None:
|
||||
if not previous_element:
|
||||
previous_element = o.previous_element
|
||||
|
||||
fix = parent.next_element is not None
|
||||
|
||||
o.setup(parent, previous_element, next_element, previous_sibling, next_sibling)
|
||||
|
||||
self._most_recent_element = o
|
||||
parent.contents.append(o)
|
||||
|
||||
# Check if we are inserting into an already parsed node.
|
||||
if fix:
|
||||
self._linkage_fixer(parent)
|
||||
if parent.next_sibling:
|
||||
# This node is being inserted into an element that has
|
||||
# already been parsed. Deal with any dangling references.
|
||||
index = parent.contents.index(o)
|
||||
if index == 0:
|
||||
previous_element = parent
|
||||
previous_sibling = None
|
||||
else:
|
||||
previous_element = previous_sibling = parent.contents[index-1]
|
||||
if index == len(parent.contents)-1:
|
||||
next_element = parent.next_sibling
|
||||
next_sibling = None
|
||||
else:
|
||||
next_element = next_sibling = parent.contents[index+1]
|
||||
|
||||
def _linkage_fixer(self, el):
|
||||
"""Make sure linkage of this fragment is sound."""
|
||||
|
||||
first = el.contents[0]
|
||||
child = el.contents[-1]
|
||||
descendant = child
|
||||
|
||||
if child is first and el.parent is not None:
|
||||
# Parent should be linked to first child
|
||||
el.next_element = child
|
||||
# We are no longer linked to whatever this element is
|
||||
prev_el = child.previous_element
|
||||
if prev_el is not None and prev_el is not el:
|
||||
prev_el.next_element = None
|
||||
# First child should be linked to the parent, and no previous siblings.
|
||||
child.previous_element = el
|
||||
child.previous_sibling = None
|
||||
|
||||
# We have no sibling as we've been appended as the last.
|
||||
child.next_sibling = None
|
||||
|
||||
# This index is a tag, dig deeper for a "last descendant"
|
||||
if isinstance(child, Tag) and child.contents:
|
||||
descendant = child._last_descendant(False)
|
||||
|
||||
# As the final step, link last descendant. It should be linked
|
||||
# to the parent's next sibling (if found), else walk up the chain
|
||||
# and find a parent with a sibling. It should have no next sibling.
|
||||
descendant.next_element = None
|
||||
descendant.next_sibling = None
|
||||
target = el
|
||||
while True:
|
||||
if target is None:
|
||||
break
|
||||
elif target.next_sibling is not None:
|
||||
descendant.next_element = target.next_sibling
|
||||
target.next_sibling.previous_element = child
|
||||
break
|
||||
target = target.parent
|
||||
o.previous_element = previous_element
|
||||
if previous_element:
|
||||
previous_element.next_element = o
|
||||
o.next_element = next_element
|
||||
if next_element:
|
||||
next_element.previous_element = o
|
||||
o.next_sibling = next_sibling
|
||||
if next_sibling:
|
||||
next_sibling.previous_sibling = o
|
||||
o.previous_sibling = previous_sibling
|
||||
if previous_sibling:
|
||||
previous_sibling.next_sibling = o
|
||||
|
||||
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
|
||||
"""Pops the tag stack up to and including the most recent
|
||||
instance of the given tag.
|
||||
|
||||
If there are no open tags with the given name, nothing will be
|
||||
popped.
|
||||
|
||||
:param name: Pop up to the most recent tag with this name.
|
||||
:param nsprefix: The namespace prefix that goes with `name`.
|
||||
:param inclusivePop: It this is false, pops the tag stack up
|
||||
to but *not* including the most recent instqance of the
|
||||
given tag.
|
||||
|
||||
"""
|
||||
#print("Popping to %s" % name)
|
||||
instance of the given tag. If inclusivePop is false, pops the tag
|
||||
stack up to but *not* including the most recent instqance of
|
||||
the given tag."""
|
||||
#print "Popping to %s" % name
|
||||
if name == self.ROOT_TAG_NAME:
|
||||
# The BeautifulSoup object itself can never be popped.
|
||||
return
|
||||
@@ -708,8 +374,6 @@ class BeautifulSoup(Tag):
|
||||
|
||||
stack_size = len(self.tagStack)
|
||||
for i in range(stack_size - 1, 0, -1):
|
||||
if not self.open_tag_counter.get(name):
|
||||
break
|
||||
t = self.tagStack[i]
|
||||
if (name == t.name and nsprefix == t.prefix):
|
||||
if inclusivePop:
|
||||
@@ -719,26 +383,16 @@ class BeautifulSoup(Tag):
|
||||
|
||||
return most_recently_popped
|
||||
|
||||
def handle_starttag(self, name, namespace, nsprefix, attrs, sourceline=None,
|
||||
sourcepos=None, namespaces=None):
|
||||
"""Called by the tree builder when a new tag is encountered.
|
||||
def handle_starttag(self, name, namespace, nsprefix, attrs):
|
||||
"""Push a start tag on to the stack.
|
||||
|
||||
:param name: Name of the tag.
|
||||
:param nsprefix: Namespace prefix for the tag.
|
||||
:param attrs: A dictionary of attribute values.
|
||||
:param sourceline: The line number where this tag was found in its
|
||||
source document.
|
||||
:param sourcepos: The character position within `sourceline` where this
|
||||
tag was found.
|
||||
:param namespaces: A dictionary of all namespace prefix mappings
|
||||
currently in scope in the document.
|
||||
|
||||
If this method returns None, the tag was rejected by an active
|
||||
SoupStrainer. You should proceed as if the tag had not occurred
|
||||
If this method returns None, the tag was rejected by the
|
||||
SoupStrainer. You should proceed as if the tag had not occured
|
||||
in the document. For instance, if this was a self-closing tag,
|
||||
don't call handle_endtag.
|
||||
"""
|
||||
# print("Start tag %s: %s" % (name, attrs))
|
||||
|
||||
# print "Start tag %s: %s" % (name, attrs)
|
||||
self.endData()
|
||||
|
||||
if (self.parse_only and len(self.tagStack) <= 1
|
||||
@@ -746,54 +400,34 @@ class BeautifulSoup(Tag):
|
||||
or not self.parse_only.search_tag(name, attrs))):
|
||||
return None
|
||||
|
||||
tag = self.element_classes.get(Tag, Tag)(
|
||||
self, self.builder, name, namespace, nsprefix, attrs,
|
||||
self.currentTag, self._most_recent_element,
|
||||
sourceline=sourceline, sourcepos=sourcepos,
|
||||
namespaces=namespaces
|
||||
)
|
||||
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
|
||||
self.currentTag, self._most_recent_element)
|
||||
if tag is None:
|
||||
return tag
|
||||
if self._most_recent_element is not None:
|
||||
if self._most_recent_element:
|
||||
self._most_recent_element.next_element = tag
|
||||
self._most_recent_element = tag
|
||||
self.pushTag(tag)
|
||||
return tag
|
||||
|
||||
def handle_endtag(self, name, nsprefix=None):
|
||||
"""Called by the tree builder when an ending tag is encountered.
|
||||
|
||||
:param name: Name of the tag.
|
||||
:param nsprefix: Namespace prefix for the tag.
|
||||
"""
|
||||
#print("End tag: " + name)
|
||||
#print "End tag: " + name
|
||||
self.endData()
|
||||
self._popToTag(name, nsprefix)
|
||||
|
||||
|
||||
def handle_data(self, data):
|
||||
"""Called by the tree builder when a chunk of textual data is encountered."""
|
||||
self.current_data.append(data)
|
||||
|
||||
|
||||
def decode(self, pretty_print=False,
|
||||
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
|
||||
formatter="minimal", iterator=None):
|
||||
"""Returns a string or Unicode representation of the parse tree
|
||||
as an HTML or XML document.
|
||||
formatter="minimal"):
|
||||
"""Returns a string or Unicode representation of this document.
|
||||
To get Unicode, pass None for encoding."""
|
||||
|
||||
:param pretty_print: If this is True, indentation will be used to
|
||||
make the document more readable.
|
||||
:param eventual_encoding: The encoding of the final document.
|
||||
If this is None, the document will be a Unicode string.
|
||||
"""
|
||||
if self.is_xml:
|
||||
# Print the XML declaration
|
||||
encoding_part = ''
|
||||
if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS:
|
||||
# This is a special Python encoding; it can't actually
|
||||
# go into an XML document because it means nothing
|
||||
# outside of Python.
|
||||
eventual_encoding = None
|
||||
if eventual_encoding != None:
|
||||
if eventual_encoding is not None:
|
||||
encoding_part = ' encoding="%s"' % eventual_encoding
|
||||
prefix = '<?xml version="1.0"%s?>\n' % encoding_part
|
||||
else:
|
||||
@@ -803,9 +437,9 @@ class BeautifulSoup(Tag):
|
||||
else:
|
||||
indent_level = 0
|
||||
return prefix + super(BeautifulSoup, self).decode(
|
||||
indent_level, eventual_encoding, formatter, iterator)
|
||||
indent_level, eventual_encoding, formatter)
|
||||
|
||||
# Aliases to make it easier to get started quickly, e.g. 'from bs4 import _soup'
|
||||
# Alias to make it easier to type import: 'from bs4 import _soup'
|
||||
_s = BeautifulSoup
|
||||
_soup = BeautifulSoup
|
||||
|
||||
@@ -816,25 +450,19 @@ class BeautifulStoneSoup(BeautifulSoup):
|
||||
kwargs['features'] = 'xml'
|
||||
warnings.warn(
|
||||
'The BeautifulStoneSoup class is deprecated. Instead of using '
|
||||
'it, pass features="xml" into the BeautifulSoup constructor.',
|
||||
DeprecationWarning, stacklevel=2
|
||||
)
|
||||
'it, pass features="xml" into the BeautifulSoup constructor.')
|
||||
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
class StopParsing(Exception):
|
||||
"""Exception raised by a TreeBuilder if it's unable to continue parsing."""
|
||||
pass
|
||||
|
||||
class FeatureNotFound(ValueError):
|
||||
"""Exception raised by the BeautifulSoup constructor if no parser with the
|
||||
requested features is found.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
#If this file is run as a script, act as an HTML pretty-printer.
|
||||
#By default, act as an HTML pretty-printer.
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
soup = BeautifulSoup(sys.stdin)
|
||||
print((soup.prettify()))
|
||||
print(soup.prettify())
|
||||
|
||||
@@ -1,21 +1,11 @@
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
from collections import defaultdict
|
||||
import itertools
|
||||
import re
|
||||
import warnings
|
||||
import sys
|
||||
from bs4.element import (
|
||||
CharsetMetaAttributeValue,
|
||||
ContentMetaAttributeValue,
|
||||
RubyParenthesisString,
|
||||
RubyTextString,
|
||||
Stylesheet,
|
||||
Script,
|
||||
TemplateString,
|
||||
nonwhitespace_re
|
||||
)
|
||||
whitespace_re
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
'HTMLTreeBuilder',
|
||||
@@ -32,41 +22,20 @@ XML = 'xml'
|
||||
HTML = 'html'
|
||||
HTML_5 = 'html5'
|
||||
|
||||
class XMLParsedAsHTMLWarning(UserWarning):
|
||||
"""The warning issued when an HTML parser is used to parse
|
||||
XML that is not XHTML.
|
||||
"""
|
||||
MESSAGE = """It looks like you're parsing an XML document using an HTML parser. If this really is an HTML document (maybe it's XHTML?), you can ignore or filter this warning. If it's XML, you should know that using an XML parser will be more reliable. To parse this document as XML, make sure you have the lxml package installed, and pass the keyword argument `features="xml"` into the BeautifulSoup constructor."""
|
||||
|
||||
|
||||
class TreeBuilderRegistry(object):
|
||||
"""A way of looking up TreeBuilder subclasses by their name or by desired
|
||||
features.
|
||||
"""
|
||||
|
||||
|
||||
def __init__(self):
|
||||
self.builders_for_feature = defaultdict(list)
|
||||
self.builders = []
|
||||
|
||||
def register(self, treebuilder_class):
|
||||
"""Register a treebuilder based on its advertised features.
|
||||
|
||||
:param treebuilder_class: A subclass of Treebuilder. its .features
|
||||
attribute should list its features.
|
||||
"""
|
||||
"""Register a treebuilder based on its advertised features."""
|
||||
for feature in treebuilder_class.features:
|
||||
self.builders_for_feature[feature].insert(0, treebuilder_class)
|
||||
self.builders.insert(0, treebuilder_class)
|
||||
|
||||
def lookup(self, *features):
|
||||
"""Look up a TreeBuilder subclass with the desired features.
|
||||
|
||||
:param features: A list of features to look for. If none are
|
||||
provided, the most recently registered TreeBuilder subclass
|
||||
will be used.
|
||||
:return: A TreeBuilder subclass, or None if there's no
|
||||
registered subclass with all the requested features.
|
||||
"""
|
||||
if len(self.builders) == 0:
|
||||
# There are no builders at all.
|
||||
return None
|
||||
@@ -109,7 +78,7 @@ class TreeBuilderRegistry(object):
|
||||
builder_registry = TreeBuilderRegistry()
|
||||
|
||||
class TreeBuilder(object):
|
||||
"""Turn a textual document into a Beautiful Soup object tree."""
|
||||
"""Turn a document into a Beautiful Soup object tree."""
|
||||
|
||||
NAME = "[Unknown tree builder]"
|
||||
ALTERNATE_NAMES = []
|
||||
@@ -117,89 +86,19 @@ class TreeBuilder(object):
|
||||
|
||||
is_xml = False
|
||||
picklable = False
|
||||
preserve_whitespace_tags = set()
|
||||
empty_element_tags = None # A tag will be considered an empty-element
|
||||
# tag when and only when it has no contents.
|
||||
|
||||
|
||||
# A value for these tag/attribute combinations is a space- or
|
||||
# comma-separated list of CDATA, rather than a single CDATA.
|
||||
DEFAULT_CDATA_LIST_ATTRIBUTES = defaultdict(list)
|
||||
cdata_list_attributes = {}
|
||||
|
||||
# Whitespace should be preserved inside these tags.
|
||||
DEFAULT_PRESERVE_WHITESPACE_TAGS = set()
|
||||
|
||||
# The textual contents of tags with these names should be
|
||||
# instantiated with some class other than NavigableString.
|
||||
DEFAULT_STRING_CONTAINERS = {}
|
||||
|
||||
USE_DEFAULT = object()
|
||||
|
||||
# Most parsers don't keep track of line numbers.
|
||||
TRACKS_LINE_NUMBERS = False
|
||||
|
||||
def __init__(self, multi_valued_attributes=USE_DEFAULT,
|
||||
preserve_whitespace_tags=USE_DEFAULT,
|
||||
store_line_numbers=USE_DEFAULT,
|
||||
string_containers=USE_DEFAULT,
|
||||
):
|
||||
"""Constructor.
|
||||
|
||||
:param multi_valued_attributes: If this is set to None, the
|
||||
TreeBuilder will not turn any values for attributes like
|
||||
'class' into lists. Setting this to a dictionary will
|
||||
customize this behavior; look at DEFAULT_CDATA_LIST_ATTRIBUTES
|
||||
for an example.
|
||||
|
||||
Internally, these are called "CDATA list attributes", but that
|
||||
probably doesn't make sense to an end-user, so the argument name
|
||||
is `multi_valued_attributes`.
|
||||
|
||||
:param preserve_whitespace_tags: A list of tags to treat
|
||||
the way <pre> tags are treated in HTML. Tags in this list
|
||||
are immune from pretty-printing; their contents will always be
|
||||
output as-is.
|
||||
|
||||
:param string_containers: A dictionary mapping tag names to
|
||||
the classes that should be instantiated to contain the textual
|
||||
contents of those tags. The default is to use NavigableString
|
||||
for every tag, no matter what the name. You can override the
|
||||
default by changing DEFAULT_STRING_CONTAINERS.
|
||||
|
||||
:param store_line_numbers: If the parser keeps track of the
|
||||
line numbers and positions of the original markup, that
|
||||
information will, by default, be stored in each corresponding
|
||||
`Tag` object. You can turn this off by passing
|
||||
store_line_numbers=False. If the parser you're using doesn't
|
||||
keep track of this information, then setting store_line_numbers=True
|
||||
will do nothing.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.soup = None
|
||||
if multi_valued_attributes is self.USE_DEFAULT:
|
||||
multi_valued_attributes = self.DEFAULT_CDATA_LIST_ATTRIBUTES
|
||||
self.cdata_list_attributes = multi_valued_attributes
|
||||
if preserve_whitespace_tags is self.USE_DEFAULT:
|
||||
preserve_whitespace_tags = self.DEFAULT_PRESERVE_WHITESPACE_TAGS
|
||||
self.preserve_whitespace_tags = preserve_whitespace_tags
|
||||
if store_line_numbers == self.USE_DEFAULT:
|
||||
store_line_numbers = self.TRACKS_LINE_NUMBERS
|
||||
self.store_line_numbers = store_line_numbers
|
||||
if string_containers == self.USE_DEFAULT:
|
||||
string_containers = self.DEFAULT_STRING_CONTAINERS
|
||||
self.string_containers = string_containers
|
||||
|
||||
def initialize_soup(self, soup):
|
||||
"""The BeautifulSoup object has been initialized and is now
|
||||
being associated with the TreeBuilder.
|
||||
|
||||
:param soup: A BeautifulSoup object.
|
||||
"""
|
||||
self.soup = soup
|
||||
|
||||
def reset(self):
|
||||
"""Do any work necessary to reset the underlying parser
|
||||
for a new document.
|
||||
|
||||
By default, this does nothing.
|
||||
"""
|
||||
pass
|
||||
|
||||
def can_be_empty_element(self, tag_name):
|
||||
@@ -211,58 +110,24 @@ class TreeBuilder(object):
|
||||
For instance: an HTMLBuilder does not consider a <p> tag to be
|
||||
an empty-element tag (it's not in
|
||||
HTMLBuilder.empty_element_tags). This means an empty <p> tag
|
||||
will be presented as "<p></p>", not "<p/>" or "<p>".
|
||||
will be presented as "<p></p>", not "<p />".
|
||||
|
||||
The default implementation has no opinion about which tags are
|
||||
empty-element tags, so a tag will be presented as an
|
||||
empty-element tag if and only if it has no children.
|
||||
"<foo></foo>" will become "<foo/>", and "<foo>bar</foo>" will
|
||||
empty-element tag if and only if it has no contents.
|
||||
"<foo></foo>" will become "<foo />", and "<foo>bar</foo>" will
|
||||
be left alone.
|
||||
|
||||
:param tag_name: The name of a markup tag.
|
||||
"""
|
||||
if self.empty_element_tags is None:
|
||||
return True
|
||||
return tag_name in self.empty_element_tags
|
||||
|
||||
|
||||
def feed(self, markup):
|
||||
"""Run some incoming markup through some parsing process,
|
||||
populating the `BeautifulSoup` object in self.soup.
|
||||
|
||||
This method is not implemented in TreeBuilder; it must be
|
||||
implemented in subclasses.
|
||||
|
||||
:return: None.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def prepare_markup(self, markup, user_specified_encoding=None,
|
||||
document_declared_encoding=None, exclude_encodings=None):
|
||||
"""Run any preliminary steps necessary to make incoming markup
|
||||
acceptable to the parser.
|
||||
|
||||
:param markup: Some markup -- probably a bytestring.
|
||||
:param user_specified_encoding: The user asked to try this encoding.
|
||||
:param document_declared_encoding: The markup itself claims to be
|
||||
in this encoding. NOTE: This argument is not used by the
|
||||
calling code and can probably be removed.
|
||||
:param exclude_encodings: The user asked _not_ to try any of
|
||||
these encodings.
|
||||
|
||||
:yield: A series of 4-tuples:
|
||||
(markup, encoding, declared encoding,
|
||||
has undergone character replacement)
|
||||
|
||||
Each 4-tuple represents a strategy for converting the
|
||||
document to Unicode and parsing it. Each strategy will be tried
|
||||
in turn.
|
||||
|
||||
By default, the only strategy is to parse the markup
|
||||
as-is. See `LXMLTreeBuilderForXML` and
|
||||
`HTMLParserTreeBuilder` for implementations that take into
|
||||
account the quirks of particular parsers.
|
||||
"""
|
||||
yield markup, None, None, False
|
||||
document_declared_encoding=None):
|
||||
return markup, None, None, False
|
||||
|
||||
def test_fragment_to_document(self, fragment):
|
||||
"""Wrap an HTML fragment to make it look like a document.
|
||||
@@ -274,36 +139,16 @@ class TreeBuilder(object):
|
||||
results against other HTML fragments.
|
||||
|
||||
This method should not be used outside of tests.
|
||||
|
||||
:param fragment: A string -- fragment of HTML.
|
||||
:return: A string -- a full HTML document.
|
||||
"""
|
||||
return fragment
|
||||
|
||||
def set_up_substitutions(self, tag):
|
||||
"""Set up any substitutions that will need to be performed on
|
||||
a `Tag` when it's output as a string.
|
||||
|
||||
By default, this does nothing. See `HTMLTreeBuilder` for a
|
||||
case where this is used.
|
||||
|
||||
:param tag: A `Tag`
|
||||
:return: Whether or not a substitution was performed.
|
||||
"""
|
||||
return False
|
||||
|
||||
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
|
||||
"""When an attribute value is associated with a tag that can
|
||||
have multiple values for that attribute, convert the string
|
||||
value to a list of strings.
|
||||
"""Replaces class="foo bar" with class=["foo", "bar"]
|
||||
|
||||
Basically, replaces class="foo bar" with class=["foo", "bar"]
|
||||
|
||||
NOTE: This method modifies its input in place.
|
||||
|
||||
:param tag_name: The name of a tag.
|
||||
:param attrs: A dictionary containing the tag's attributes.
|
||||
Any appropriate attribute values will be modified in place.
|
||||
Modifies its input in place.
|
||||
"""
|
||||
if not attrs:
|
||||
return attrs
|
||||
@@ -318,7 +163,7 @@ class TreeBuilder(object):
|
||||
# values. Split it into a list.
|
||||
value = attrs[attr]
|
||||
if isinstance(value, str):
|
||||
values = nonwhitespace_re.findall(value)
|
||||
values = whitespace_re.split(value)
|
||||
else:
|
||||
# html5lib sometimes calls setAttributes twice
|
||||
# for the same tag when rearranging the parse
|
||||
@@ -329,13 +174,9 @@ class TreeBuilder(object):
|
||||
values = value
|
||||
attrs[attr] = values
|
||||
return attrs
|
||||
|
||||
class SAXTreeBuilder(TreeBuilder):
|
||||
"""A Beautiful Soup treebuilder that listens for SAX events.
|
||||
|
||||
This is not currently used for anything, but it demonstrates
|
||||
how a simple TreeBuilder would work.
|
||||
"""
|
||||
class SAXTreeBuilder(TreeBuilder):
|
||||
"""A Beautiful Soup treebuilder that listens for SAX events."""
|
||||
|
||||
def feed(self, markup):
|
||||
raise NotImplementedError()
|
||||
@@ -345,11 +186,11 @@ class SAXTreeBuilder(TreeBuilder):
|
||||
|
||||
def startElement(self, name, attrs):
|
||||
attrs = dict((key[1], value) for key, value in list(attrs.items()))
|
||||
#print("Start %s, %r" % (name, attrs))
|
||||
#print "Start %s, %r" % (name, attrs)
|
||||
self.soup.handle_starttag(name, attrs)
|
||||
|
||||
def endElement(self, name):
|
||||
#print("End %s" % name)
|
||||
#print "End %s" % name
|
||||
self.soup.handle_endtag(name)
|
||||
|
||||
def startElementNS(self, nsTuple, nodeName, attrs):
|
||||
@@ -386,44 +227,10 @@ class HTMLTreeBuilder(TreeBuilder):
|
||||
Such as which tags are empty-element tags.
|
||||
"""
|
||||
|
||||
empty_element_tags = set([
|
||||
# These are from HTML5.
|
||||
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr',
|
||||
|
||||
# These are from earlier versions of HTML and are removed in HTML5.
|
||||
'basefont', 'bgsound', 'command', 'frame', 'image', 'isindex', 'nextid', 'spacer'
|
||||
])
|
||||
preserve_whitespace_tags = set(['pre', 'textarea'])
|
||||
empty_element_tags = set(['br' , 'hr', 'input', 'img', 'meta',
|
||||
'spacer', 'link', 'frame', 'base'])
|
||||
|
||||
# The HTML standard defines these as block-level elements. Beautiful
|
||||
# Soup does not treat these elements differently from other elements,
|
||||
# but it may do so eventually, and this information is available if
|
||||
# you need to use it.
|
||||
block_elements = set(["address", "article", "aside", "blockquote", "canvas", "dd", "div", "dl", "dt", "fieldset", "figcaption", "figure", "footer", "form", "h1", "h2", "h3", "h4", "h5", "h6", "header", "hr", "li", "main", "nav", "noscript", "ol", "output", "p", "pre", "section", "table", "tfoot", "ul", "video"])
|
||||
|
||||
# These HTML tags need special treatment so they can be
|
||||
# represented by a string class other than NavigableString.
|
||||
#
|
||||
# For some of these tags, it's because the HTML standard defines
|
||||
# an unusual content model for them. I made this list by going
|
||||
# through the HTML spec
|
||||
# (https://html.spec.whatwg.org/#metadata-content) and looking for
|
||||
# "metadata content" elements that can contain strings.
|
||||
#
|
||||
# The Ruby tags (<rt> and <rp>) are here despite being normal
|
||||
# "phrasing content" tags, because the content they contain is
|
||||
# qualitatively different from other text in the document, and it
|
||||
# can be useful to be able to distinguish it.
|
||||
#
|
||||
# TODO: Arguably <noscript> could go here but it seems
|
||||
# qualitatively different from the other tags.
|
||||
DEFAULT_STRING_CONTAINERS = {
|
||||
'rt' : RubyTextString,
|
||||
'rp' : RubyParenthesisString,
|
||||
'style': Stylesheet,
|
||||
'script': Script,
|
||||
'template': TemplateString,
|
||||
}
|
||||
|
||||
# The HTML standard defines these attributes as containing a
|
||||
# space-separated list of values, not a single value. That is,
|
||||
# class="foo bar" means that the 'class' attribute has two values,
|
||||
@@ -431,7 +238,7 @@ class HTMLTreeBuilder(TreeBuilder):
|
||||
# encounter one of these attributes, we will parse its value into
|
||||
# a list of values if possible. Upon output, the list will be
|
||||
# converted back into a string.
|
||||
DEFAULT_CDATA_LIST_ATTRIBUTES = {
|
||||
cdata_list_attributes = {
|
||||
"*" : ['class', 'accesskey', 'dropzone'],
|
||||
"a" : ['rel', 'rev'],
|
||||
"link" : ['rel', 'rev'],
|
||||
@@ -448,19 +255,7 @@ class HTMLTreeBuilder(TreeBuilder):
|
||||
"output" : ["for"],
|
||||
}
|
||||
|
||||
DEFAULT_PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
|
||||
|
||||
def set_up_substitutions(self, tag):
|
||||
"""Replace the declared encoding in a <meta> tag with a placeholder,
|
||||
to be substituted when the tag is output to a string.
|
||||
|
||||
An HTML document may come in to Beautiful Soup as one
|
||||
encoding, but exit in a different encoding, and the <meta> tag
|
||||
needs to be changed to reflect this.
|
||||
|
||||
:param tag: A `Tag`
|
||||
:return: Whether or not a substitution was performed.
|
||||
"""
|
||||
# We are only interested in <meta> tags
|
||||
if tag.name != 'meta':
|
||||
return False
|
||||
@@ -493,107 +288,10 @@ class HTMLTreeBuilder(TreeBuilder):
|
||||
|
||||
return (meta_encoding is not None)
|
||||
|
||||
class DetectsXMLParsedAsHTML(object):
|
||||
"""A mixin class for any class (a TreeBuilder, or some class used by a
|
||||
TreeBuilder) that's in a position to detect whether an XML
|
||||
document is being incorrectly parsed as HTML, and issue an
|
||||
appropriate warning.
|
||||
|
||||
This requires being able to observe an incoming processing
|
||||
instruction that might be an XML declaration, and also able to
|
||||
observe tags as they're opened. If you can't do that for a given
|
||||
TreeBuilder, there's a less reliable implementation based on
|
||||
examining the raw markup.
|
||||
"""
|
||||
|
||||
# Regular expression for seeing if markup has an <html> tag.
|
||||
LOOKS_LIKE_HTML = re.compile("<[^ +]html", re.I)
|
||||
LOOKS_LIKE_HTML_B = re.compile(b"<[^ +]html", re.I)
|
||||
|
||||
XML_PREFIX = '<?xml'
|
||||
XML_PREFIX_B = b'<?xml'
|
||||
|
||||
@classmethod
|
||||
def warn_if_markup_looks_like_xml(cls, markup, stacklevel=3):
|
||||
"""Perform a check on some markup to see if it looks like XML
|
||||
that's not XHTML. If so, issue a warning.
|
||||
|
||||
This is much less reliable than doing the check while parsing,
|
||||
but some of the tree builders can't do that.
|
||||
|
||||
:param stacklevel: The stacklevel of the code calling this
|
||||
function.
|
||||
|
||||
:return: True if the markup looks like non-XHTML XML, False
|
||||
otherwise.
|
||||
|
||||
"""
|
||||
if isinstance(markup, bytes):
|
||||
prefix = cls.XML_PREFIX_B
|
||||
looks_like_html = cls.LOOKS_LIKE_HTML_B
|
||||
else:
|
||||
prefix = cls.XML_PREFIX
|
||||
looks_like_html = cls.LOOKS_LIKE_HTML
|
||||
|
||||
if (markup is not None
|
||||
and markup.startswith(prefix)
|
||||
and not looks_like_html.search(markup[:500])
|
||||
):
|
||||
cls._warn(stacklevel=stacklevel+2)
|
||||
return True
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def _warn(cls, stacklevel=5):
|
||||
"""Issue a warning about XML being parsed as HTML."""
|
||||
warnings.warn(
|
||||
XMLParsedAsHTMLWarning.MESSAGE, XMLParsedAsHTMLWarning,
|
||||
stacklevel=stacklevel
|
||||
)
|
||||
|
||||
def _initialize_xml_detector(self):
|
||||
"""Call this method before parsing a document."""
|
||||
self._first_processing_instruction = None
|
||||
self._root_tag = None
|
||||
|
||||
def _document_might_be_xml(self, processing_instruction):
|
||||
"""Call this method when encountering an XML declaration, or a
|
||||
"processing instruction" that might be an XML declaration.
|
||||
"""
|
||||
if (self._first_processing_instruction is not None
|
||||
or self._root_tag is not None):
|
||||
# The document has already started. Don't bother checking
|
||||
# anymore.
|
||||
return
|
||||
|
||||
self._first_processing_instruction = processing_instruction
|
||||
|
||||
# We won't know until we encounter the first tag whether or
|
||||
# not this is actually a problem.
|
||||
|
||||
def _root_tag_encountered(self, name):
|
||||
"""Call this when you encounter the document's root tag.
|
||||
|
||||
This is where we actually check whether an XML document is
|
||||
being incorrectly parsed as HTML, and issue the warning.
|
||||
"""
|
||||
if self._root_tag is not None:
|
||||
# This method was incorrectly called multiple times. Do
|
||||
# nothing.
|
||||
return
|
||||
|
||||
self._root_tag = name
|
||||
if (name != 'html' and self._first_processing_instruction is not None
|
||||
and self._first_processing_instruction.lower().startswith('xml ')):
|
||||
# We encountered an XML declaration and then a tag other
|
||||
# than 'html'. This is a reliable indicator that a
|
||||
# non-XHTML document is being parsed as XML.
|
||||
self._warn()
|
||||
|
||||
|
||||
def register_treebuilders_from(module):
|
||||
"""Copy TreeBuilders from the given module into this module."""
|
||||
this_module = sys.modules[__name__]
|
||||
# I'm fairly sure this is not the best way to do this.
|
||||
this_module = sys.modules['bs4.builder']
|
||||
for name in module.__all__:
|
||||
obj = getattr(module, name)
|
||||
|
||||
@@ -604,22 +302,12 @@ def register_treebuilders_from(module):
|
||||
this_module.builder_registry.register(obj)
|
||||
|
||||
class ParserRejectedMarkup(Exception):
|
||||
"""An Exception to be raised when the underlying parser simply
|
||||
refuses to parse the given markup.
|
||||
"""
|
||||
def __init__(self, message_or_exception):
|
||||
"""Explain why the parser rejected the given markup, either
|
||||
with a textual explanation or another exception.
|
||||
"""
|
||||
if isinstance(message_or_exception, Exception):
|
||||
e = message_or_exception
|
||||
message_or_exception = "%s: %s" % (e.__class__.__name__, str(e))
|
||||
super(ParserRejectedMarkup, self).__init__(message_or_exception)
|
||||
|
||||
pass
|
||||
|
||||
# Builders are registered in reverse order of priority, so that custom
|
||||
# builder registrations will take precedence. In general, we want lxml
|
||||
# to take precedence over html5lib, because it's faster. And we only
|
||||
# want to use HTMLParser as a last resort.
|
||||
# want to use HTMLParser as a last result.
|
||||
from . import _htmlparser
|
||||
register_treebuilders_from(_htmlparser)
|
||||
try:
|
||||
|
||||
@@ -1,14 +1,9 @@
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
__all__ = [
|
||||
'HTML5TreeBuilder',
|
||||
]
|
||||
|
||||
import warnings
|
||||
import re
|
||||
from bs4.builder import (
|
||||
DetectsXMLParsedAsHTML,
|
||||
PERMISSIVE,
|
||||
HTML,
|
||||
HTML_5,
|
||||
@@ -16,13 +11,17 @@ from bs4.builder import (
|
||||
)
|
||||
from bs4.element import (
|
||||
NamespacedAttribute,
|
||||
nonwhitespace_re,
|
||||
whitespace_re,
|
||||
)
|
||||
import html5lib
|
||||
from html5lib.constants import (
|
||||
namespaces,
|
||||
prefixes,
|
||||
)
|
||||
try:
|
||||
# html5lib >= 0.99999999/1.0b9
|
||||
from html5lib.treebuilders import base as treebuildersbase
|
||||
except ImportError:
|
||||
# html5lib <= 0.9999999/1.0b8
|
||||
from html5lib.treebuilders import _base as treebuildersbase
|
||||
from html5lib.constants import namespaces
|
||||
|
||||
from bs4.element import (
|
||||
Comment,
|
||||
Doctype,
|
||||
@@ -30,37 +29,13 @@ from bs4.element import (
|
||||
Tag,
|
||||
)
|
||||
|
||||
try:
|
||||
# Pre-0.99999999
|
||||
from html5lib.treebuilders import _base as treebuilder_base
|
||||
new_html5lib = False
|
||||
except ImportError as e:
|
||||
# 0.99999999 and up
|
||||
from html5lib.treebuilders import base as treebuilder_base
|
||||
new_html5lib = True
|
||||
|
||||
class HTML5TreeBuilder(HTMLTreeBuilder):
|
||||
"""Use html5lib to build a tree.
|
||||
|
||||
Note that this TreeBuilder does not support some features common
|
||||
to HTML TreeBuilders. Some of these features could theoretically
|
||||
be implemented, but at the very least it's quite difficult,
|
||||
because html5lib moves the parse tree around as it's being built.
|
||||
|
||||
* This TreeBuilder doesn't use different subclasses of NavigableString
|
||||
based on the name of the tag in which the string was found.
|
||||
|
||||
* You can't use a SoupStrainer to parse only part of a document.
|
||||
"""
|
||||
"""Use html5lib to build a tree."""
|
||||
|
||||
NAME = "html5lib"
|
||||
|
||||
features = [NAME, PERMISSIVE, HTML_5, HTML]
|
||||
|
||||
# html5lib can tell us which line number and position in the
|
||||
# original file is the source of an element.
|
||||
TRACKS_LINE_NUMBERS = True
|
||||
|
||||
def prepare_markup(self, markup, user_specified_encoding,
|
||||
document_declared_encoding=None, exclude_encodings=None):
|
||||
# Store the user-specified encoding for use later on.
|
||||
@@ -70,56 +45,27 @@ class HTML5TreeBuilder(HTMLTreeBuilder):
|
||||
# ATM because the html5lib TreeBuilder doesn't use
|
||||
# UnicodeDammit.
|
||||
if exclude_encodings:
|
||||
warnings.warn(
|
||||
"You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.",
|
||||
stacklevel=3
|
||||
)
|
||||
|
||||
# html5lib only parses HTML, so if it's given XML that's worth
|
||||
# noting.
|
||||
DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(
|
||||
markup, stacklevel=3
|
||||
)
|
||||
|
||||
warnings.warn("You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.")
|
||||
yield (markup, None, None, False)
|
||||
|
||||
# These methods are defined by Beautiful Soup.
|
||||
def feed(self, markup):
|
||||
if self.soup.parse_only is not None:
|
||||
warnings.warn(
|
||||
"You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.",
|
||||
stacklevel=4
|
||||
)
|
||||
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
|
||||
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
|
||||
self.underlying_builder.parser = parser
|
||||
extra_kwargs = dict()
|
||||
if not isinstance(markup, str):
|
||||
if new_html5lib:
|
||||
extra_kwargs['override_encoding'] = self.user_specified_encoding
|
||||
else:
|
||||
extra_kwargs['encoding'] = self.user_specified_encoding
|
||||
doc = parser.parse(markup, **extra_kwargs)
|
||||
|
||||
doc = parser.parse(markup, encoding=self.user_specified_encoding)
|
||||
|
||||
# Set the character encoding detected by the tokenizer.
|
||||
if isinstance(markup, str):
|
||||
# We need to special-case this because html5lib sets
|
||||
# charEncoding to UTF-8 if it gets Unicode input.
|
||||
doc.original_encoding = None
|
||||
else:
|
||||
original_encoding = parser.tokenizer.stream.charEncoding[0]
|
||||
if not isinstance(original_encoding, str):
|
||||
# In 0.99999999 and up, the encoding is an html5lib
|
||||
# Encoding object. We want to use a string for compatibility
|
||||
# with other tree builders.
|
||||
original_encoding = original_encoding.name
|
||||
doc.original_encoding = original_encoding
|
||||
self.underlying_builder.parser = None
|
||||
|
||||
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
|
||||
|
||||
def create_treebuilder(self, namespaceHTMLElements):
|
||||
self.underlying_builder = TreeBuilderForHtml5lib(
|
||||
namespaceHTMLElements, self.soup,
|
||||
store_line_numbers=self.store_line_numbers
|
||||
)
|
||||
self.soup, namespaceHTMLElements)
|
||||
return self.underlying_builder
|
||||
|
||||
def test_fragment_to_document(self, fragment):
|
||||
@@ -127,30 +73,12 @@ class HTML5TreeBuilder(HTMLTreeBuilder):
|
||||
return '<html><head></head><body>%s</body></html>' % fragment
|
||||
|
||||
|
||||
class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
|
||||
|
||||
def __init__(self, namespaceHTMLElements, soup=None,
|
||||
store_line_numbers=True, **kwargs):
|
||||
if soup:
|
||||
self.soup = soup
|
||||
else:
|
||||
from bs4 import BeautifulSoup
|
||||
# TODO: Why is the parser 'html.parser' here? To avoid an
|
||||
# infinite loop?
|
||||
self.soup = BeautifulSoup(
|
||||
"", "html.parser", store_line_numbers=store_line_numbers,
|
||||
**kwargs
|
||||
)
|
||||
# TODO: What are **kwargs exactly? Should they be passed in
|
||||
# here in addition to/instead of being passed to the BeautifulSoup
|
||||
# constructor?
|
||||
class TreeBuilderForHtml5lib(treebuildersbase.TreeBuilder):
|
||||
|
||||
def __init__(self, soup, namespaceHTMLElements):
|
||||
self.soup = soup
|
||||
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
|
||||
|
||||
# This will be set later to an html5lib.html5parser.HTMLParser
|
||||
# object, which we can use to track the current line number.
|
||||
self.parser = None
|
||||
self.store_line_numbers = store_line_numbers
|
||||
|
||||
def documentClass(self):
|
||||
self.soup.reset()
|
||||
return Element(self.soup, self.soup, None)
|
||||
@@ -164,26 +92,14 @@ class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
|
||||
self.soup.object_was_parsed(doctype)
|
||||
|
||||
def elementClass(self, name, namespace):
|
||||
kwargs = {}
|
||||
if self.parser and self.store_line_numbers:
|
||||
# This represents the point immediately after the end of the
|
||||
# tag. We don't know when the tag started, but we do know
|
||||
# where it ended -- the character just before this one.
|
||||
sourceline, sourcepos = self.parser.tokenizer.stream.position()
|
||||
kwargs['sourceline'] = sourceline
|
||||
kwargs['sourcepos'] = sourcepos-1
|
||||
tag = self.soup.new_tag(name, namespace, **kwargs)
|
||||
|
||||
tag = self.soup.new_tag(name, namespace)
|
||||
return Element(tag, self.soup, namespace)
|
||||
|
||||
def commentClass(self, data):
|
||||
return TextNode(Comment(data), self.soup)
|
||||
|
||||
def fragmentClass(self):
|
||||
from bs4 import BeautifulSoup
|
||||
# TODO: Why is the parser 'html.parser' here? To avoid an
|
||||
# infinite loop?
|
||||
self.soup = BeautifulSoup("", "html.parser")
|
||||
self.soup = BeautifulSoup("")
|
||||
self.soup.name = "[document_fragment]"
|
||||
return Element(self.soup, self.soup, None)
|
||||
|
||||
@@ -195,57 +111,7 @@ class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
|
||||
return self.soup
|
||||
|
||||
def getFragment(self):
|
||||
return treebuilder_base.TreeBuilder.getFragment(self).element
|
||||
|
||||
def testSerializer(self, element):
|
||||
from bs4 import BeautifulSoup
|
||||
rv = []
|
||||
doctype_re = re.compile(r'^(.*?)(?: PUBLIC "(.*?)"(?: "(.*?)")?| SYSTEM "(.*?)")?$')
|
||||
|
||||
def serializeElement(element, indent=0):
|
||||
if isinstance(element, BeautifulSoup):
|
||||
pass
|
||||
if isinstance(element, Doctype):
|
||||
m = doctype_re.match(element)
|
||||
if m:
|
||||
name = m.group(1)
|
||||
if m.lastindex > 1:
|
||||
publicId = m.group(2) or ""
|
||||
systemId = m.group(3) or m.group(4) or ""
|
||||
rv.append("""|%s<!DOCTYPE %s "%s" "%s">""" %
|
||||
(' ' * indent, name, publicId, systemId))
|
||||
else:
|
||||
rv.append("|%s<!DOCTYPE %s>" % (' ' * indent, name))
|
||||
else:
|
||||
rv.append("|%s<!DOCTYPE >" % (' ' * indent,))
|
||||
elif isinstance(element, Comment):
|
||||
rv.append("|%s<!-- %s -->" % (' ' * indent, element))
|
||||
elif isinstance(element, NavigableString):
|
||||
rv.append("|%s\"%s\"" % (' ' * indent, element))
|
||||
else:
|
||||
if element.namespace:
|
||||
name = "%s %s" % (prefixes[element.namespace],
|
||||
element.name)
|
||||
else:
|
||||
name = element.name
|
||||
rv.append("|%s<%s>" % (' ' * indent, name))
|
||||
if element.attrs:
|
||||
attributes = []
|
||||
for name, value in list(element.attrs.items()):
|
||||
if isinstance(name, NamespacedAttribute):
|
||||
name = "%s %s" % (prefixes[name.namespace], name.name)
|
||||
if isinstance(value, list):
|
||||
value = " ".join(value)
|
||||
attributes.append((name, value))
|
||||
|
||||
for name, value in sorted(attributes):
|
||||
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
|
||||
indent += 2
|
||||
for child in element.children:
|
||||
serializeElement(child, indent)
|
||||
serializeElement(element, 0)
|
||||
|
||||
return "\n".join(rv)
|
||||
return treebuildersbase.TreeBuilder.getFragment(self).element
|
||||
|
||||
class AttrList(object):
|
||||
def __init__(self, element):
|
||||
@@ -256,14 +122,14 @@ class AttrList(object):
|
||||
def __setitem__(self, name, value):
|
||||
# If this attribute is a multi-valued attribute for this element,
|
||||
# turn its value into a list.
|
||||
list_attr = self.element.cdata_list_attributes or {}
|
||||
if (name in list_attr.get('*', [])
|
||||
list_attr = HTML5TreeBuilder.cdata_list_attributes
|
||||
if (name in list_attr['*']
|
||||
or (self.element.name in list_attr
|
||||
and name in list_attr.get(self.element.name, []))):
|
||||
and name in list_attr[self.element.name])):
|
||||
# A node that is being cloned may have already undergone
|
||||
# this procedure.
|
||||
if not isinstance(value, list):
|
||||
value = nonwhitespace_re.findall(value)
|
||||
value = whitespace_re.split(value)
|
||||
self.element[name] = value
|
||||
def items(self):
|
||||
return list(self.attrs.items())
|
||||
@@ -277,9 +143,9 @@ class AttrList(object):
|
||||
return name in list(self.attrs.keys())
|
||||
|
||||
|
||||
class Element(treebuilder_base.Node):
|
||||
class Element(treebuildersbase.Node):
|
||||
def __init__(self, element, soup, namespace):
|
||||
treebuilder_base.Node.__init__(self, element.name)
|
||||
treebuildersbase.Node.__init__(self, element.name)
|
||||
self.element = element
|
||||
self.soup = soup
|
||||
self.namespace = namespace
|
||||
@@ -298,15 +164,13 @@ class Element(treebuilder_base.Node):
|
||||
child = node
|
||||
elif node.element.__class__ == NavigableString:
|
||||
string_child = child = node.element
|
||||
node.parent = self
|
||||
else:
|
||||
child = node.element
|
||||
node.parent = self
|
||||
|
||||
if not isinstance(child, str) and child.parent is not None:
|
||||
node.element.extract()
|
||||
|
||||
if (string_child is not None and self.element.contents
|
||||
if (string_child and self.element.contents
|
||||
and self.element.contents[-1].__class__ == NavigableString):
|
||||
# We are appending a string onto another string.
|
||||
# TODO This has O(n^2) performance, for input like
|
||||
@@ -339,12 +203,12 @@ class Element(treebuilder_base.Node):
|
||||
most_recent_element=most_recent_element)
|
||||
|
||||
def getAttributes(self):
|
||||
if isinstance(self.element, Comment):
|
||||
return {}
|
||||
return AttrList(self.element)
|
||||
|
||||
def setAttributes(self, attributes):
|
||||
|
||||
if attributes is not None and len(attributes) > 0:
|
||||
|
||||
converted_attributes = []
|
||||
for name, value in list(attributes.items()):
|
||||
if isinstance(name, tuple):
|
||||
@@ -366,11 +230,11 @@ class Element(treebuilder_base.Node):
|
||||
attributes = property(getAttributes, setAttributes)
|
||||
|
||||
def insertText(self, data, insertBefore=None):
|
||||
text = TextNode(self.soup.new_string(data), self.soup)
|
||||
if insertBefore:
|
||||
self.insertBefore(text, insertBefore)
|
||||
text = TextNode(self.soup.new_string(data), self.soup)
|
||||
self.insertBefore(data, insertBefore)
|
||||
else:
|
||||
self.appendChild(text)
|
||||
self.appendChild(data)
|
||||
|
||||
def insertBefore(self, node, refNode):
|
||||
index = self.element.index(refNode.element)
|
||||
@@ -389,10 +253,9 @@ class Element(treebuilder_base.Node):
|
||||
|
||||
def reparentChildren(self, new_parent):
|
||||
"""Move all of this tag's children into another tag."""
|
||||
# print("MOVE", self.element.contents)
|
||||
# print("FROM", self.element)
|
||||
# print("TO", new_parent.element)
|
||||
|
||||
# print "MOVE", self.element.contents
|
||||
# print "FROM", self.element
|
||||
# print "TO", new_parent.element
|
||||
element = self.element
|
||||
new_parent_element = new_parent.element
|
||||
# Determine what this tag's next_element will be once all the children
|
||||
@@ -411,35 +274,29 @@ class Element(treebuilder_base.Node):
|
||||
new_parents_last_descendant_next_element = new_parent_element.next_element
|
||||
|
||||
to_append = element.contents
|
||||
append_after = new_parent_element.contents
|
||||
if len(to_append) > 0:
|
||||
# Set the first child's previous_element and previous_sibling
|
||||
# to elements within the new parent
|
||||
first_child = to_append[0]
|
||||
if new_parents_last_descendant is not None:
|
||||
if new_parents_last_descendant:
|
||||
first_child.previous_element = new_parents_last_descendant
|
||||
else:
|
||||
first_child.previous_element = new_parent_element
|
||||
first_child.previous_sibling = new_parents_last_child
|
||||
if new_parents_last_descendant is not None:
|
||||
if new_parents_last_descendant:
|
||||
new_parents_last_descendant.next_element = first_child
|
||||
else:
|
||||
new_parent_element.next_element = first_child
|
||||
if new_parents_last_child is not None:
|
||||
if new_parents_last_child:
|
||||
new_parents_last_child.next_sibling = first_child
|
||||
|
||||
# Find the very last element being moved. It is now the
|
||||
# parent's last descendant. It has no .next_sibling and
|
||||
# its .next_element is whatever the previous last
|
||||
# descendant had.
|
||||
last_childs_last_descendant = to_append[-1]._last_descendant(False, True)
|
||||
|
||||
last_childs_last_descendant.next_element = new_parents_last_descendant_next_element
|
||||
if new_parents_last_descendant_next_element is not None:
|
||||
# TODO: This code has no test coverage and I'm not sure
|
||||
# how to get html5lib to go through this path, but it's
|
||||
# just the other side of the previous line.
|
||||
new_parents_last_descendant_next_element.previous_element = last_childs_last_descendant
|
||||
last_childs_last_descendant.next_sibling = None
|
||||
# Fix the last child's next_element and next_sibling
|
||||
last_child = to_append[-1]
|
||||
last_child.next_element = new_parents_last_descendant_next_element
|
||||
if new_parents_last_descendant_next_element:
|
||||
new_parents_last_descendant_next_element.previous_element = last_child
|
||||
last_child.next_sibling = None
|
||||
|
||||
for child in to_append:
|
||||
child.parent = new_parent_element
|
||||
@@ -449,9 +306,9 @@ class Element(treebuilder_base.Node):
|
||||
element.contents = []
|
||||
element.next_element = final_next_element
|
||||
|
||||
# print("DONE WITH MOVE")
|
||||
# print("FROM", self.element)
|
||||
# print("TO", new_parent_element)
|
||||
# print "DONE WITH MOVE"
|
||||
# print "FROM", self.element
|
||||
# print "TO", new_parent_element
|
||||
|
||||
def cloneNode(self):
|
||||
tag = self.soup.new_tag(self.element.name, self.namespace)
|
||||
@@ -464,7 +321,7 @@ class Element(treebuilder_base.Node):
|
||||
return self.element.contents
|
||||
|
||||
def getNameTuple(self):
|
||||
if self.namespace == None:
|
||||
if self.namespace is None:
|
||||
return namespaces["html"], self.name
|
||||
else:
|
||||
return self.namespace, self.name
|
||||
@@ -473,7 +330,7 @@ class Element(treebuilder_base.Node):
|
||||
|
||||
class TextNode(Element):
|
||||
def __init__(self, element, soup):
|
||||
treebuilder_base.Node.__init__(self, None)
|
||||
treebuildersbase.Node.__init__(self, None)
|
||||
self.element = element
|
||||
self.soup = soup
|
||||
|
||||
|
||||
@@ -1,18 +1,35 @@
|
||||
# encoding: utf-8
|
||||
"""Use the HTMLParser library to parse HTML files that aren't too bad."""
|
||||
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
__all__ = [
|
||||
'HTMLParserTreeBuilder',
|
||||
]
|
||||
|
||||
from html.parser import HTMLParser
|
||||
|
||||
try:
|
||||
from html.parser import HTMLParseError
|
||||
except ImportError as e:
|
||||
# HTMLParseError is removed in Python 3.5. Since it can never be
|
||||
# thrown in 3.5, we can just define our own class as a placeholder.
|
||||
class HTMLParseError(Exception):
|
||||
pass
|
||||
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
|
||||
# argument, which we'd like to set to False. Unfortunately,
|
||||
# http://bugs.python.org/issue13273 makes strict=True a better bet
|
||||
# before Python 3.2.3.
|
||||
#
|
||||
# At the end of this file, we monkeypatch HTMLParser so that
|
||||
# strict=True works well on Python 3.2.2.
|
||||
major, minor, release = sys.version_info[:3]
|
||||
CONSTRUCTOR_TAKES_STRICT = major == 3 and minor == 2 and release >= 3
|
||||
CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3
|
||||
CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4
|
||||
|
||||
|
||||
from bs4.element import (
|
||||
CData,
|
||||
Comment,
|
||||
@@ -23,8 +40,6 @@ from bs4.element import (
|
||||
from bs4.dammit import EntitySubstitution, UnicodeDammit
|
||||
|
||||
from bs4.builder import (
|
||||
DetectsXMLParsedAsHTML,
|
||||
ParserRejectedMarkup,
|
||||
HTML,
|
||||
HTMLTreeBuilder,
|
||||
STRICT,
|
||||
@@ -33,84 +48,8 @@ from bs4.builder import (
|
||||
|
||||
HTMLPARSER = 'html.parser'
|
||||
|
||||
class BeautifulSoupHTMLParser(HTMLParser, DetectsXMLParsedAsHTML):
|
||||
"""A subclass of the Python standard library's HTMLParser class, which
|
||||
listens for HTMLParser events and translates them into calls
|
||||
to Beautiful Soup's tree construction API.
|
||||
"""
|
||||
|
||||
# Strategies for handling duplicate attributes
|
||||
IGNORE = 'ignore'
|
||||
REPLACE = 'replace'
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
"""Constructor.
|
||||
|
||||
:param on_duplicate_attribute: A strategy for what to do if a
|
||||
tag includes the same attribute more than once. Accepted
|
||||
values are: REPLACE (replace earlier values with later
|
||||
ones, the default), IGNORE (keep the earliest value
|
||||
encountered), or a callable. A callable must take three
|
||||
arguments: the dictionary of attributes already processed,
|
||||
the name of the duplicate attribute, and the most recent value
|
||||
encountered.
|
||||
"""
|
||||
self.on_duplicate_attribute = kwargs.pop(
|
||||
'on_duplicate_attribute', self.REPLACE
|
||||
)
|
||||
HTMLParser.__init__(self, *args, **kwargs)
|
||||
|
||||
# Keep a list of empty-element tags that were encountered
|
||||
# without an explicit closing tag. If we encounter a closing tag
|
||||
# of this type, we'll associate it with one of those entries.
|
||||
#
|
||||
# This isn't a stack because we don't care about the
|
||||
# order. It's a list of closing tags we've already handled and
|
||||
# will ignore, assuming they ever show up.
|
||||
self.already_closed_empty_element = []
|
||||
|
||||
self._initialize_xml_detector()
|
||||
|
||||
def error(self, message):
|
||||
# NOTE: This method is required so long as Python 3.9 is
|
||||
# supported. The corresponding code is removed from HTMLParser
|
||||
# in 3.5, but not removed from ParserBase until 3.10.
|
||||
# https://github.com/python/cpython/issues/76025
|
||||
#
|
||||
# The original implementation turned the error into a warning,
|
||||
# but in every case I discovered, this made HTMLParser
|
||||
# immediately crash with an error message that was less
|
||||
# helpful than the warning. The new implementation makes it
|
||||
# more clear that html.parser just can't parse this
|
||||
# markup. The 3.10 implementation does the same, though it
|
||||
# raises AssertionError rather than calling a method. (We
|
||||
# catch this error and wrap it in a ParserRejectedMarkup.)
|
||||
raise ParserRejectedMarkup(message)
|
||||
|
||||
def handle_startendtag(self, name, attrs):
|
||||
"""Handle an incoming empty-element tag.
|
||||
|
||||
This is only called when the markup looks like <tag/>.
|
||||
|
||||
:param name: Name of the tag.
|
||||
:param attrs: Dictionary of the tag's attributes.
|
||||
"""
|
||||
# is_startend() tells handle_starttag not to close the tag
|
||||
# just because its name matches a known empty-element tag. We
|
||||
# know that this is an empty-element tag and we want to call
|
||||
# handle_endtag ourselves.
|
||||
tag = self.handle_starttag(name, attrs, handle_empty_element=False)
|
||||
self.handle_endtag(name)
|
||||
|
||||
def handle_starttag(self, name, attrs, handle_empty_element=True):
|
||||
"""Handle an opening tag, e.g. '<tag>'
|
||||
|
||||
:param name: Name of the tag.
|
||||
:param attrs: Dictionary of the tag's attributes.
|
||||
:param handle_empty_element: True if this tag is known to be
|
||||
an empty-element tag (i.e. there is not expected to be any
|
||||
closing tag).
|
||||
"""
|
||||
class BeautifulSoupHTMLParser(HTMLParser):
|
||||
def handle_starttag(self, name, attrs):
|
||||
# XXX namespace
|
||||
attr_dict = {}
|
||||
for key, value in attrs:
|
||||
@@ -118,78 +57,20 @@ class BeautifulSoupHTMLParser(HTMLParser, DetectsXMLParsedAsHTML):
|
||||
# for consistency with the other tree builders.
|
||||
if value is None:
|
||||
value = ''
|
||||
if key in attr_dict:
|
||||
# A single attribute shows up multiple times in this
|
||||
# tag. How to handle it depends on the
|
||||
# on_duplicate_attribute setting.
|
||||
on_dupe = self.on_duplicate_attribute
|
||||
if on_dupe == self.IGNORE:
|
||||
pass
|
||||
elif on_dupe in (None, self.REPLACE):
|
||||
attr_dict[key] = value
|
||||
else:
|
||||
on_dupe(attr_dict, key, value)
|
||||
else:
|
||||
attr_dict[key] = value
|
||||
attr_dict[key] = value
|
||||
attrvalue = '""'
|
||||
#print("START", name)
|
||||
sourceline, sourcepos = self.getpos()
|
||||
tag = self.soup.handle_starttag(
|
||||
name, None, None, attr_dict, sourceline=sourceline,
|
||||
sourcepos=sourcepos
|
||||
)
|
||||
if tag and tag.is_empty_element and handle_empty_element:
|
||||
# Unlike other parsers, html.parser doesn't send separate end tag
|
||||
# events for empty-element tags. (It's handled in
|
||||
# handle_startendtag, but only if the original markup looked like
|
||||
# <tag/>.)
|
||||
#
|
||||
# So we need to call handle_endtag() ourselves. Since we
|
||||
# know the start event is identical to the end event, we
|
||||
# don't want handle_endtag() to cross off any previous end
|
||||
# events for tags of this name.
|
||||
self.handle_endtag(name, check_already_closed=False)
|
||||
self.soup.handle_starttag(name, None, None, attr_dict)
|
||||
|
||||
# But we might encounter an explicit closing tag for this tag
|
||||
# later on. If so, we want to ignore it.
|
||||
self.already_closed_empty_element.append(name)
|
||||
def handle_endtag(self, name):
|
||||
self.soup.handle_endtag(name)
|
||||
|
||||
if self._root_tag is None:
|
||||
self._root_tag_encountered(name)
|
||||
|
||||
def handle_endtag(self, name, check_already_closed=True):
|
||||
"""Handle a closing tag, e.g. '</tag>'
|
||||
|
||||
:param name: A tag name.
|
||||
:param check_already_closed: True if this tag is expected to
|
||||
be the closing portion of an empty-element tag,
|
||||
e.g. '<tag></tag>'.
|
||||
"""
|
||||
#print("END", name)
|
||||
if check_already_closed and name in self.already_closed_empty_element:
|
||||
# This is a redundant end tag for an empty-element tag.
|
||||
# We've already called handle_endtag() for it, so just
|
||||
# check it off the list.
|
||||
#print("ALREADY CLOSED", name)
|
||||
self.already_closed_empty_element.remove(name)
|
||||
else:
|
||||
self.soup.handle_endtag(name)
|
||||
|
||||
def handle_data(self, data):
|
||||
"""Handle some textual data that shows up between tags."""
|
||||
self.soup.handle_data(data)
|
||||
|
||||
def handle_charref(self, name):
|
||||
"""Handle a numeric character reference by converting it to the
|
||||
corresponding Unicode character and treating it as textual
|
||||
data.
|
||||
|
||||
:param name: Character number, possibly in hexadecimal.
|
||||
"""
|
||||
# TODO: This was originally a workaround for a bug in
|
||||
# HTMLParser. (http://bugs.python.org/issue13633) The bug has
|
||||
# been fixed, but removing this code still makes some
|
||||
# Beautiful Soup tests fail. This needs investigation.
|
||||
# XXX workaround for a bug in HTMLParser. Remove this once
|
||||
# it's fixed in all supported versions.
|
||||
# http://bugs.python.org/issue13633
|
||||
if name.startswith('x'):
|
||||
real_name = int(name.lstrip('x'), 16)
|
||||
elif name.startswith('X'):
|
||||
@@ -197,71 +78,37 @@ class BeautifulSoupHTMLParser(HTMLParser, DetectsXMLParsedAsHTML):
|
||||
else:
|
||||
real_name = int(name)
|
||||
|
||||
data = None
|
||||
if real_name < 256:
|
||||
# HTML numeric entities are supposed to reference Unicode
|
||||
# code points, but sometimes they reference code points in
|
||||
# some other encoding (ahem, Windows-1252). E.g. “
|
||||
# instead of É for LEFT DOUBLE QUOTATION MARK. This
|
||||
# code tries to detect this situation and compensate.
|
||||
for encoding in (self.soup.original_encoding, 'windows-1252'):
|
||||
if not encoding:
|
||||
continue
|
||||
try:
|
||||
data = bytearray([real_name]).decode(encoding)
|
||||
except UnicodeDecodeError as e:
|
||||
pass
|
||||
if not data:
|
||||
try:
|
||||
data = chr(real_name)
|
||||
except (ValueError, OverflowError) as e:
|
||||
pass
|
||||
data = data or "\N{REPLACEMENT CHARACTER}"
|
||||
try:
|
||||
data = chr(real_name)
|
||||
except (ValueError, OverflowError) as e:
|
||||
data = "\N{REPLACEMENT CHARACTER}"
|
||||
|
||||
self.handle_data(data)
|
||||
|
||||
def handle_entityref(self, name):
|
||||
"""Handle a named entity reference by converting it to the
|
||||
corresponding Unicode character(s) and treating it as textual
|
||||
data.
|
||||
|
||||
:param name: Name of the entity reference.
|
||||
"""
|
||||
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
|
||||
if character is not None:
|
||||
data = character
|
||||
else:
|
||||
# If this were XML, it would be ambiguous whether "&foo"
|
||||
# was an character entity reference with a missing
|
||||
# semicolon or the literal string "&foo". Since this is
|
||||
# HTML, we have a complete list of all character entity references,
|
||||
# and this one wasn't found, so assume it's the literal string "&foo".
|
||||
data = "&%s" % name
|
||||
data = "&%s;" % name
|
||||
self.handle_data(data)
|
||||
|
||||
def handle_comment(self, data):
|
||||
"""Handle an HTML comment.
|
||||
|
||||
:param data: The text of the comment.
|
||||
"""
|
||||
self.soup.endData()
|
||||
self.soup.handle_data(data)
|
||||
self.soup.endData(Comment)
|
||||
|
||||
def handle_decl(self, data):
|
||||
"""Handle a DOCTYPE declaration.
|
||||
|
||||
:param data: The text of the declaration.
|
||||
"""
|
||||
self.soup.endData()
|
||||
data = data[len("DOCTYPE "):]
|
||||
if data.startswith("DOCTYPE "):
|
||||
data = data[len("DOCTYPE "):]
|
||||
elif data == 'DOCTYPE':
|
||||
# i.e. "<!DOCTYPE>"
|
||||
data = ''
|
||||
self.soup.handle_data(data)
|
||||
self.soup.endData(Doctype)
|
||||
|
||||
def unknown_decl(self, data):
|
||||
"""Handle a declaration of unknown type -- probably a CDATA block.
|
||||
|
||||
:param data: The text of the declaration.
|
||||
"""
|
||||
if data.upper().startswith('CDATA['):
|
||||
cls = CData
|
||||
data = data[len('CDATA['):]
|
||||
@@ -272,116 +119,144 @@ class BeautifulSoupHTMLParser(HTMLParser, DetectsXMLParsedAsHTML):
|
||||
self.soup.endData(cls)
|
||||
|
||||
def handle_pi(self, data):
|
||||
"""Handle a processing instruction.
|
||||
|
||||
:param data: The text of the instruction.
|
||||
"""
|
||||
self.soup.endData()
|
||||
self.soup.handle_data(data)
|
||||
self._document_might_be_xml(data)
|
||||
self.soup.endData(ProcessingInstruction)
|
||||
|
||||
|
||||
class HTMLParserTreeBuilder(HTMLTreeBuilder):
|
||||
"""A Beautiful soup `TreeBuilder` that uses the `HTMLParser` parser,
|
||||
found in the Python standard library.
|
||||
"""
|
||||
|
||||
is_xml = False
|
||||
picklable = True
|
||||
NAME = HTMLPARSER
|
||||
features = [NAME, HTML, STRICT]
|
||||
|
||||
# The html.parser knows which line number and position in the
|
||||
# original file is the source of an element.
|
||||
TRACKS_LINE_NUMBERS = True
|
||||
def __init__(self, *args, **kwargs):
|
||||
if CONSTRUCTOR_TAKES_STRICT and not CONSTRUCTOR_STRICT_IS_DEPRECATED:
|
||||
kwargs['strict'] = False
|
||||
if CONSTRUCTOR_TAKES_CONVERT_CHARREFS:
|
||||
kwargs['convert_charrefs'] = False
|
||||
self.parser_args = (args, kwargs)
|
||||
|
||||
def __init__(self, parser_args=None, parser_kwargs=None, **kwargs):
|
||||
"""Constructor.
|
||||
|
||||
:param parser_args: Positional arguments to pass into
|
||||
the BeautifulSoupHTMLParser constructor, once it's
|
||||
invoked.
|
||||
:param parser_kwargs: Keyword arguments to pass into
|
||||
the BeautifulSoupHTMLParser constructor, once it's
|
||||
invoked.
|
||||
:param kwargs: Keyword arguments for the superclass constructor.
|
||||
"""
|
||||
# Some keyword arguments will be pulled out of kwargs and placed
|
||||
# into parser_kwargs.
|
||||
extra_parser_kwargs = dict()
|
||||
for arg in ('on_duplicate_attribute',):
|
||||
if arg in kwargs:
|
||||
value = kwargs.pop(arg)
|
||||
extra_parser_kwargs[arg] = value
|
||||
super(HTMLParserTreeBuilder, self).__init__(**kwargs)
|
||||
parser_args = parser_args or []
|
||||
parser_kwargs = parser_kwargs or {}
|
||||
parser_kwargs.update(extra_parser_kwargs)
|
||||
parser_kwargs['convert_charrefs'] = False
|
||||
self.parser_args = (parser_args, parser_kwargs)
|
||||
|
||||
def prepare_markup(self, markup, user_specified_encoding=None,
|
||||
document_declared_encoding=None, exclude_encodings=None):
|
||||
|
||||
"""Run any preliminary steps necessary to make incoming markup
|
||||
acceptable to the parser.
|
||||
|
||||
:param markup: Some markup -- probably a bytestring.
|
||||
:param user_specified_encoding: The user asked to try this encoding.
|
||||
:param document_declared_encoding: The markup itself claims to be
|
||||
in this encoding.
|
||||
:param exclude_encodings: The user asked _not_ to try any of
|
||||
these encodings.
|
||||
|
||||
:yield: A series of 4-tuples:
|
||||
(markup, encoding, declared encoding,
|
||||
has undergone character replacement)
|
||||
|
||||
Each 4-tuple represents a strategy for converting the
|
||||
document to Unicode and parsing it. Each strategy will be tried
|
||||
in turn.
|
||||
"""
|
||||
:return: A 4-tuple (markup, original encoding, encoding
|
||||
declared within markup, whether any characters had to be
|
||||
replaced with REPLACEMENT CHARACTER).
|
||||
"""
|
||||
if isinstance(markup, str):
|
||||
# Parse Unicode as-is.
|
||||
yield (markup, None, None, False)
|
||||
return
|
||||
|
||||
# Ask UnicodeDammit to sniff the most likely encoding.
|
||||
|
||||
# This was provided by the end-user; treat it as a known
|
||||
# definite encoding per the algorithm laid out in the HTML5
|
||||
# spec. (See the EncodingDetector class for details.)
|
||||
known_definite_encodings = [user_specified_encoding]
|
||||
|
||||
# This was found in the document; treat it as a slightly lower-priority
|
||||
# user encoding.
|
||||
user_encodings = [document_declared_encoding]
|
||||
|
||||
try_encodings = [user_specified_encoding, document_declared_encoding]
|
||||
dammit = UnicodeDammit(
|
||||
markup,
|
||||
known_definite_encodings=known_definite_encodings,
|
||||
user_encodings=user_encodings,
|
||||
is_html=True,
|
||||
exclude_encodings=exclude_encodings
|
||||
)
|
||||
dammit = UnicodeDammit(markup, try_encodings, is_html=True,
|
||||
exclude_encodings=exclude_encodings)
|
||||
yield (dammit.markup, dammit.original_encoding,
|
||||
dammit.declared_html_encoding,
|
||||
dammit.contains_replacement_characters)
|
||||
|
||||
def feed(self, markup):
|
||||
"""Run some incoming markup through some parsing process,
|
||||
populating the `BeautifulSoup` object in self.soup.
|
||||
"""
|
||||
args, kwargs = self.parser_args
|
||||
parser = BeautifulSoupHTMLParser(*args, **kwargs)
|
||||
parser.soup = self.soup
|
||||
try:
|
||||
parser.feed(markup)
|
||||
parser.close()
|
||||
except AssertionError as e:
|
||||
# html.parser raises AssertionError in rare cases to
|
||||
# indicate a fatal problem with the markup, especially
|
||||
# when there's an error in the doctype declaration.
|
||||
raise ParserRejectedMarkup(e)
|
||||
parser.already_closed_empty_element = []
|
||||
except HTMLParseError as e:
|
||||
warnings.warn(RuntimeWarning(
|
||||
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
|
||||
raise e
|
||||
|
||||
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
|
||||
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
|
||||
# string.
|
||||
#
|
||||
# XXX This code can be removed once most Python 3 users are on 3.2.3.
|
||||
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
|
||||
import re
|
||||
attrfind_tolerant = re.compile(
|
||||
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
|
||||
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
|
||||
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
|
||||
|
||||
locatestarttagend = re.compile(r"""
|
||||
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
|
||||
(?:\s+ # whitespace before attribute name
|
||||
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
|
||||
(?:\s*=\s* # value indicator
|
||||
(?:'[^']*' # LITA-enclosed value
|
||||
|\"[^\"]*\" # LIT-enclosed value
|
||||
|[^'\">\s]+ # bare value
|
||||
)
|
||||
)?
|
||||
)
|
||||
)*
|
||||
\s* # trailing whitespace
|
||||
""", re.VERBOSE)
|
||||
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
|
||||
|
||||
from html.parser import tagfind, attrfind
|
||||
|
||||
def parse_starttag(self, i):
|
||||
self.__starttag_text = None
|
||||
endpos = self.check_for_whole_start_tag(i)
|
||||
if endpos < 0:
|
||||
return endpos
|
||||
rawdata = self.rawdata
|
||||
self.__starttag_text = rawdata[i:endpos]
|
||||
|
||||
# Now parse the data between i+1 and j into a tag and attrs
|
||||
attrs = []
|
||||
match = tagfind.match(rawdata, i+1)
|
||||
assert match, 'unexpected call to parse_starttag()'
|
||||
k = match.end()
|
||||
self.lasttag = tag = rawdata[i+1:k].lower()
|
||||
while k < endpos:
|
||||
if self.strict:
|
||||
m = attrfind.match(rawdata, k)
|
||||
else:
|
||||
m = attrfind_tolerant.match(rawdata, k)
|
||||
if not m:
|
||||
break
|
||||
attrname, rest, attrvalue = m.group(1, 2, 3)
|
||||
if not rest:
|
||||
attrvalue = None
|
||||
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
|
||||
attrvalue[:1] == '"' == attrvalue[-1:]:
|
||||
attrvalue = attrvalue[1:-1]
|
||||
if attrvalue:
|
||||
attrvalue = self.unescape(attrvalue)
|
||||
attrs.append((attrname.lower(), attrvalue))
|
||||
k = m.end()
|
||||
|
||||
end = rawdata[k:endpos].strip()
|
||||
if end not in (">", "/>"):
|
||||
lineno, offset = self.getpos()
|
||||
if "\n" in self.__starttag_text:
|
||||
lineno = lineno + self.__starttag_text.count("\n")
|
||||
offset = len(self.__starttag_text) \
|
||||
- self.__starttag_text.rfind("\n")
|
||||
else:
|
||||
offset = offset + len(self.__starttag_text)
|
||||
if self.strict:
|
||||
self.error("junk characters in start tag: %r"
|
||||
% (rawdata[k:endpos][:20],))
|
||||
self.handle_data(rawdata[i:endpos])
|
||||
return endpos
|
||||
if end.endswith('/>'):
|
||||
# XHTML-style empty tag: <span attr="value" />
|
||||
self.handle_startendtag(tag, attrs)
|
||||
else:
|
||||
self.handle_starttag(tag, attrs)
|
||||
if tag in self.CDATA_CONTENT_ELEMENTS:
|
||||
self.set_cdata_mode(tag)
|
||||
return endpos
|
||||
|
||||
def set_cdata_mode(self, elem):
|
||||
self.cdata_elem = elem.lower()
|
||||
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
|
||||
|
||||
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
|
||||
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
|
||||
|
||||
CONSTRUCTOR_TAKES_STRICT = True
|
||||
|
||||
@@ -1,28 +1,19 @@
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
__all__ = [
|
||||
'LXMLTreeBuilderForXML',
|
||||
'LXMLTreeBuilder',
|
||||
]
|
||||
|
||||
try:
|
||||
from collections.abc import Callable # Python 3.6
|
||||
except ImportError as e:
|
||||
from collections import Callable
|
||||
|
||||
from io import BytesIO
|
||||
from io import StringIO
|
||||
import collections
|
||||
from lxml import etree
|
||||
from bs4.element import (
|
||||
Comment,
|
||||
Doctype,
|
||||
NamespacedAttribute,
|
||||
ProcessingInstruction,
|
||||
XMLProcessingInstruction,
|
||||
)
|
||||
from bs4.builder import (
|
||||
DetectsXMLParsedAsHTML,
|
||||
FAST,
|
||||
HTML,
|
||||
HTMLTreeBuilder,
|
||||
@@ -34,15 +25,10 @@ from bs4.dammit import EncodingDetector
|
||||
|
||||
LXML = 'lxml'
|
||||
|
||||
def _invert(d):
|
||||
"Invert a dictionary."
|
||||
return dict((v,k) for k, v in list(d.items()))
|
||||
|
||||
class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
DEFAULT_PARSER_CLASS = etree.XMLParser
|
||||
|
||||
is_xml = True
|
||||
processing_instruction_class = XMLProcessingInstruction
|
||||
|
||||
NAME = "lxml-xml"
|
||||
ALTERNATE_NAMES = ["xml"]
|
||||
@@ -54,79 +40,26 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
|
||||
# This namespace mapping is specified in the XML Namespace
|
||||
# standard.
|
||||
DEFAULT_NSMAPS = dict(xml='http://www.w3.org/XML/1998/namespace')
|
||||
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
|
||||
|
||||
DEFAULT_NSMAPS_INVERTED = _invert(DEFAULT_NSMAPS)
|
||||
|
||||
# NOTE: If we parsed Element objects and looked at .sourceline,
|
||||
# we'd be able to see the line numbers from the original document.
|
||||
# But instead we build an XMLParser or HTMLParser object to serve
|
||||
# as the target of parse messages, and those messages don't include
|
||||
# line numbers.
|
||||
# See: https://bugs.launchpad.net/lxml/+bug/1846906
|
||||
|
||||
def initialize_soup(self, soup):
|
||||
"""Let the BeautifulSoup object know about the standard namespace
|
||||
mapping.
|
||||
|
||||
:param soup: A `BeautifulSoup`.
|
||||
"""
|
||||
super(LXMLTreeBuilderForXML, self).initialize_soup(soup)
|
||||
self._register_namespaces(self.DEFAULT_NSMAPS)
|
||||
|
||||
def _register_namespaces(self, mapping):
|
||||
"""Let the BeautifulSoup object know about namespaces encountered
|
||||
while parsing the document.
|
||||
|
||||
This might be useful later on when creating CSS selectors.
|
||||
|
||||
This will track (almost) all namespaces, even ones that were
|
||||
only in scope for part of the document. If two namespaces have
|
||||
the same prefix, only the first one encountered will be
|
||||
tracked. Un-prefixed namespaces are not tracked.
|
||||
|
||||
:param mapping: A dictionary mapping namespace prefixes to URIs.
|
||||
"""
|
||||
for key, value in list(mapping.items()):
|
||||
# This is 'if key' and not 'if key is not None' because we
|
||||
# don't track un-prefixed namespaces. Soupselect will
|
||||
# treat an un-prefixed namespace as the default, which
|
||||
# causes confusion in some cases.
|
||||
if key and key not in self.soup._namespaces:
|
||||
# Let the BeautifulSoup object know about a new namespace.
|
||||
# If there are multiple namespaces defined with the same
|
||||
# prefix, the first one in the document takes precedence.
|
||||
self.soup._namespaces[key] = value
|
||||
|
||||
def default_parser(self, encoding):
|
||||
"""Find the default parser for the given encoding.
|
||||
|
||||
:param encoding: A string.
|
||||
:return: Either a parser object or a class, which
|
||||
will be instantiated with default arguments.
|
||||
"""
|
||||
# This can either return a parser object or a class, which
|
||||
# will be instantiated with default arguments.
|
||||
if self._default_parser is not None:
|
||||
return self._default_parser
|
||||
return etree.XMLParser(
|
||||
target=self, strip_cdata=False, recover=True, encoding=encoding)
|
||||
|
||||
def parser_for(self, encoding):
|
||||
"""Instantiate an appropriate parser for the given encoding.
|
||||
|
||||
:param encoding: A string.
|
||||
:return: A parser object such as an `etree.XMLParser`.
|
||||
"""
|
||||
# Use the default parser.
|
||||
parser = self.default_parser(encoding)
|
||||
|
||||
if isinstance(parser, Callable):
|
||||
if isinstance(parser, collections.Callable):
|
||||
# Instantiate the parser with default arguments
|
||||
parser = parser(
|
||||
target=self, strip_cdata=False, recover=True, encoding=encoding
|
||||
)
|
||||
parser = parser(target=self, strip_cdata=False, encoding=encoding)
|
||||
return parser
|
||||
|
||||
def __init__(self, parser=None, empty_element_tags=None, **kwargs):
|
||||
def __init__(self, parser=None, empty_element_tags=None):
|
||||
# TODO: Issue a warning if parser is present but not a
|
||||
# callable, since that means there's no way to create new
|
||||
# parsers for different encodings.
|
||||
@@ -134,10 +67,8 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
if empty_element_tags is not None:
|
||||
self.empty_element_tags = set(empty_element_tags)
|
||||
self.soup = None
|
||||
self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED]
|
||||
self.active_namespace_prefixes = [dict(self.DEFAULT_NSMAPS)]
|
||||
super(LXMLTreeBuilderForXML, self).__init__(**kwargs)
|
||||
|
||||
self.nsmaps = [self.DEFAULT_NSMAPS]
|
||||
|
||||
def _getNsTag(self, tag):
|
||||
# Split the namespace URL out of a fully-qualified lxml tag
|
||||
# name. Copied from lxml's src/lxml/sax.py.
|
||||
@@ -149,51 +80,16 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
def prepare_markup(self, markup, user_specified_encoding=None,
|
||||
exclude_encodings=None,
|
||||
document_declared_encoding=None):
|
||||
"""Run any preliminary steps necessary to make incoming markup
|
||||
acceptable to the parser.
|
||||
|
||||
lxml really wants to get a bytestring and convert it to
|
||||
Unicode itself. So instead of using UnicodeDammit to convert
|
||||
the bytestring to Unicode using different encodings, this
|
||||
implementation uses EncodingDetector to iterate over the
|
||||
encodings, and tell lxml to try to parse the document as each
|
||||
one in turn.
|
||||
|
||||
:param markup: Some markup -- hopefully a bytestring.
|
||||
:param user_specified_encoding: The user asked to try this encoding.
|
||||
:param document_declared_encoding: The markup itself claims to be
|
||||
in this encoding.
|
||||
:param exclude_encodings: The user asked _not_ to try any of
|
||||
these encodings.
|
||||
|
||||
:yield: A series of 4-tuples:
|
||||
"""
|
||||
:yield: A series of 4-tuples.
|
||||
(markup, encoding, declared encoding,
|
||||
has undergone character replacement)
|
||||
|
||||
Each 4-tuple represents a strategy for converting the
|
||||
document to Unicode and parsing it. Each strategy will be tried
|
||||
in turn.
|
||||
Each 4-tuple represents a strategy for parsing the document.
|
||||
"""
|
||||
is_html = not self.is_xml
|
||||
if is_html:
|
||||
self.processing_instruction_class = ProcessingInstruction
|
||||
# We're in HTML mode, so if we're given XML, that's worth
|
||||
# noting.
|
||||
DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(
|
||||
markup, stacklevel=3
|
||||
)
|
||||
else:
|
||||
self.processing_instruction_class = XMLProcessingInstruction
|
||||
|
||||
if isinstance(markup, str):
|
||||
# We were given Unicode. Maybe lxml can parse Unicode on
|
||||
# this system?
|
||||
|
||||
# TODO: This is a workaround for
|
||||
# https://bugs.launchpad.net/lxml/+bug/1948551.
|
||||
# We can remove it once the upstream issue is fixed.
|
||||
if len(markup) > 0 and markup[0] == u'\N{BYTE ORDER MARK}':
|
||||
markup = markup[1:]
|
||||
yield markup, None, document_declared_encoding, False
|
||||
|
||||
if isinstance(markup, str):
|
||||
@@ -202,19 +98,14 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
yield (markup.encode("utf8"), "utf8",
|
||||
document_declared_encoding, False)
|
||||
|
||||
# This was provided by the end-user; treat it as a known
|
||||
# definite encoding per the algorithm laid out in the HTML5
|
||||
# spec. (See the EncodingDetector class for details.)
|
||||
known_definite_encodings = [user_specified_encoding]
|
||||
|
||||
# This was found in the document; treat it as a slightly lower-priority
|
||||
# user encoding.
|
||||
user_encodings = [document_declared_encoding]
|
||||
# Instead of using UnicodeDammit to convert the bytestring to
|
||||
# Unicode using different encodings, use EncodingDetector to
|
||||
# iterate over the encodings, and tell lxml to try to parse
|
||||
# the document as each one in turn.
|
||||
is_html = not self.is_xml
|
||||
try_encodings = [user_specified_encoding, document_declared_encoding]
|
||||
detector = EncodingDetector(
|
||||
markup, known_definite_encodings=known_definite_encodings,
|
||||
user_encodings=user_encodings, is_html=is_html,
|
||||
exclude_encodings=exclude_encodings
|
||||
)
|
||||
markup, try_encodings, is_html, exclude_encodings)
|
||||
for encoding in detector.encodings:
|
||||
yield (detector.markup, encoding, document_declared_encoding, False)
|
||||
|
||||
@@ -237,45 +128,25 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
self.parser.feed(data)
|
||||
self.parser.close()
|
||||
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
|
||||
raise ParserRejectedMarkup(e)
|
||||
raise ParserRejectedMarkup(str(e))
|
||||
|
||||
def close(self):
|
||||
self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED]
|
||||
self.nsmaps = [self.DEFAULT_NSMAPS]
|
||||
|
||||
def start(self, name, attrs, nsmap={}):
|
||||
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
|
||||
attrs = dict(attrs)
|
||||
nsprefix = None
|
||||
# Invert each namespace map as it comes in.
|
||||
if len(nsmap) == 0 and len(self.nsmaps) > 1:
|
||||
# There are no new namespaces for this tag, but
|
||||
# non-default namespaces are in play, so we need a
|
||||
# separate tag stack to know when they end.
|
||||
self.nsmaps.append(None)
|
||||
if len(self.nsmaps) > 1:
|
||||
# There are no new namespaces for this tag, but
|
||||
# non-default namespaces are in play, so we need a
|
||||
# separate tag stack to know when they end.
|
||||
self.nsmaps.append(None)
|
||||
elif len(nsmap) > 0:
|
||||
# A new namespace mapping has come into play.
|
||||
|
||||
# First, Let the BeautifulSoup object know about it.
|
||||
self._register_namespaces(nsmap)
|
||||
|
||||
# Then, add it to our running list of inverted namespace
|
||||
# mappings.
|
||||
self.nsmaps.append(_invert(nsmap))
|
||||
|
||||
# The currently active namespace prefixes have
|
||||
# changed. Calculate the new mapping so it can be stored
|
||||
# with all Tag objects created while these prefixes are in
|
||||
# scope.
|
||||
current_mapping = dict(self.active_namespace_prefixes[-1])
|
||||
current_mapping.update(nsmap)
|
||||
|
||||
# We should not track un-prefixed namespaces as we can only hold one
|
||||
# and it will be recognized as the default namespace by soupsieve,
|
||||
# which may be confusing in some situations.
|
||||
if '' in current_mapping:
|
||||
del current_mapping['']
|
||||
self.active_namespace_prefixes.append(current_mapping)
|
||||
|
||||
inverted_nsmap = dict((value, key) for key, value in list(nsmap.items()))
|
||||
self.nsmaps.append(inverted_nsmap)
|
||||
# Also treat the namespace mapping as a set of attributes on the
|
||||
# tag, so we can recreate it later.
|
||||
attrs = attrs.copy()
|
||||
@@ -300,11 +171,8 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
|
||||
namespace, name = self._getNsTag(name)
|
||||
nsprefix = self._prefix_for_namespace(namespace)
|
||||
self.soup.handle_starttag(
|
||||
name, namespace, nsprefix, attrs,
|
||||
namespaces=self.active_namespace_prefixes[-1]
|
||||
)
|
||||
|
||||
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
|
||||
|
||||
def _prefix_for_namespace(self, namespace):
|
||||
"""Find the currently active prefix for the given namespace."""
|
||||
if namespace is None:
|
||||
@@ -328,20 +196,13 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
||||
if len(self.nsmaps) > 1:
|
||||
# This tag, or one of its parents, introduced a namespace
|
||||
# mapping, so pop it off the stack.
|
||||
out_of_scope_nsmap = self.nsmaps.pop()
|
||||
self.nsmaps.pop()
|
||||
|
||||
if out_of_scope_nsmap is not None:
|
||||
# This tag introduced a namespace mapping which is no
|
||||
# longer in scope. Recalculate the currently active
|
||||
# namespace prefixes.
|
||||
self.active_namespace_prefixes.pop()
|
||||
|
||||
def pi(self, target, data):
|
||||
self.soup.endData()
|
||||
data = target + ' ' + data
|
||||
self.soup.handle_data(data)
|
||||
self.soup.endData(self.processing_instruction_class)
|
||||
|
||||
self.soup.handle_data(target + ' ' + data)
|
||||
self.soup.endData(ProcessingInstruction)
|
||||
|
||||
def data(self, content):
|
||||
self.soup.handle_data(content)
|
||||
|
||||
@@ -368,7 +229,6 @@ class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
|
||||
|
||||
features = ALTERNATE_NAMES + [NAME, HTML, FAST, PERMISSIVE]
|
||||
is_xml = False
|
||||
processing_instruction_class = ProcessingInstruction
|
||||
|
||||
def default_parser(self, encoding):
|
||||
return etree.HTMLParser
|
||||
@@ -380,7 +240,7 @@ class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
|
||||
self.parser.feed(markup)
|
||||
self.parser.close()
|
||||
except (UnicodeDecodeError, LookupError, etree.ParserError) as e:
|
||||
raise ParserRejectedMarkup(e)
|
||||
raise ParserRejectedMarkup(str(e))
|
||||
|
||||
|
||||
def test_fragment_to_document(self, fragment):
|
||||
|
||||
@@ -1,274 +0,0 @@
|
||||
"""Integration code for CSS selectors using Soup Sieve (pypi: soupsieve)."""
|
||||
|
||||
# We don't use soupsieve
|
||||
soupsieve = None
|
||||
|
||||
|
||||
class CSS(object):
|
||||
"""A proxy object against the soupsieve library, to simplify its
|
||||
CSS selector API.
|
||||
|
||||
Acquire this object through the .css attribute on the
|
||||
BeautifulSoup object, or on the Tag you want to use as the
|
||||
starting point for a CSS selector.
|
||||
|
||||
The main advantage of doing this is that the tag to be selected
|
||||
against doesn't need to be explicitly specified in the function
|
||||
calls, since it's already scoped to a tag.
|
||||
"""
|
||||
|
||||
def __init__(self, tag, api=soupsieve):
|
||||
"""Constructor.
|
||||
|
||||
You don't need to instantiate this class yourself; instead,
|
||||
access the .css attribute on the BeautifulSoup object, or on
|
||||
the Tag you want to use as the starting point for your CSS
|
||||
selector.
|
||||
|
||||
:param tag: All CSS selectors will use this as their starting
|
||||
point.
|
||||
|
||||
:param api: A plug-in replacement for the soupsieve module,
|
||||
designed mainly for use in tests.
|
||||
"""
|
||||
if api is None:
|
||||
raise NotImplementedError(
|
||||
"Cannot execute CSS selectors because the soupsieve package is not installed."
|
||||
)
|
||||
self.api = api
|
||||
self.tag = tag
|
||||
|
||||
def escape(self, ident):
|
||||
"""Escape a CSS identifier.
|
||||
|
||||
This is a simple wrapper around soupselect.escape(). See the
|
||||
documentation for that function for more information.
|
||||
"""
|
||||
if soupsieve is None:
|
||||
raise NotImplementedError(
|
||||
"Cannot escape CSS identifiers because the soupsieve package is not installed."
|
||||
)
|
||||
return self.api.escape(ident)
|
||||
|
||||
def _ns(self, ns, select):
|
||||
"""Normalize a dictionary of namespaces."""
|
||||
if not isinstance(select, self.api.SoupSieve) and ns is None:
|
||||
# If the selector is a precompiled pattern, it already has
|
||||
# a namespace context compiled in, which cannot be
|
||||
# replaced.
|
||||
ns = self.tag._namespaces
|
||||
return ns
|
||||
|
||||
def _rs(self, results):
|
||||
"""Normalize a list of results to a Resultset.
|
||||
|
||||
A ResultSet is more consistent with the rest of Beautiful
|
||||
Soup's API, and ResultSet.__getattr__ has a helpful error
|
||||
message if you try to treat a list of results as a single
|
||||
result (a common mistake).
|
||||
"""
|
||||
# Import here to avoid circular import
|
||||
from bs4.element import ResultSet
|
||||
return ResultSet(None, results)
|
||||
|
||||
def compile(self, select, namespaces=None, flags=0, **kwargs):
|
||||
"""Pre-compile a selector and return the compiled object.
|
||||
|
||||
:param selector: A CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will use the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
soupsieve.compile() method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into SoupSieve's
|
||||
soupsieve.compile() method.
|
||||
|
||||
:return: A precompiled selector object.
|
||||
:rtype: soupsieve.SoupSieve
|
||||
"""
|
||||
return self.api.compile(
|
||||
select, self._ns(namespaces, select), flags, **kwargs
|
||||
)
|
||||
|
||||
def select_one(self, select, namespaces=None, flags=0, **kwargs):
|
||||
"""Perform a CSS selection operation on the current Tag and return the
|
||||
first result.
|
||||
|
||||
This uses the Soup Sieve library. For more information, see
|
||||
that library's documentation for the soupsieve.select_one()
|
||||
method.
|
||||
|
||||
:param selector: A CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will use the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
soupsieve.select_one() method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into SoupSieve's
|
||||
soupsieve.select_one() method.
|
||||
|
||||
:return: A Tag, or None if the selector has no match.
|
||||
:rtype: bs4.element.Tag
|
||||
|
||||
"""
|
||||
return self.api.select_one(
|
||||
select, self.tag, self._ns(namespaces, select), flags, **kwargs
|
||||
)
|
||||
|
||||
def select(self, select, namespaces=None, limit=0, flags=0, **kwargs):
|
||||
"""Perform a CSS selection operation on the current Tag.
|
||||
|
||||
This uses the Soup Sieve library. For more information, see
|
||||
that library's documentation for the soupsieve.select()
|
||||
method.
|
||||
|
||||
:param selector: A string containing a CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will pass in the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param limit: After finding this number of results, stop looking.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
soupsieve.select() method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into SoupSieve's
|
||||
soupsieve.select() method.
|
||||
|
||||
:return: A ResultSet of Tag objects.
|
||||
:rtype: bs4.element.ResultSet
|
||||
|
||||
"""
|
||||
if limit is None:
|
||||
limit = 0
|
||||
|
||||
return self._rs(
|
||||
self.api.select(
|
||||
select, self.tag, self._ns(namespaces, select), limit, flags,
|
||||
**kwargs
|
||||
)
|
||||
)
|
||||
|
||||
def iselect(self, select, namespaces=None, limit=0, flags=0, **kwargs):
|
||||
"""Perform a CSS selection operation on the current Tag.
|
||||
|
||||
This uses the Soup Sieve library. For more information, see
|
||||
that library's documentation for the soupsieve.iselect()
|
||||
method. It is the same as select(), but it returns a generator
|
||||
instead of a list.
|
||||
|
||||
:param selector: A string containing a CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will pass in the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param limit: After finding this number of results, stop looking.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
soupsieve.iselect() method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into SoupSieve's
|
||||
soupsieve.iselect() method.
|
||||
|
||||
:return: A generator
|
||||
:rtype: types.GeneratorType
|
||||
"""
|
||||
return self.api.iselect(
|
||||
select, self.tag, self._ns(namespaces, select), limit, flags, **kwargs
|
||||
)
|
||||
|
||||
def closest(self, select, namespaces=None, flags=0, **kwargs):
|
||||
"""Find the Tag closest to this one that matches the given selector.
|
||||
|
||||
This uses the Soup Sieve library. For more information, see
|
||||
that library's documentation for the soupsieve.closest()
|
||||
method.
|
||||
|
||||
:param selector: A string containing a CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will pass in the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
soupsieve.closest() method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into SoupSieve's
|
||||
soupsieve.closest() method.
|
||||
|
||||
:return: A Tag, or None if there is no match.
|
||||
:rtype: bs4.Tag
|
||||
|
||||
"""
|
||||
return self.api.closest(
|
||||
select, self.tag, self._ns(namespaces, select), flags, **kwargs
|
||||
)
|
||||
|
||||
def match(self, select, namespaces=None, flags=0, **kwargs):
|
||||
"""Check whether this Tag matches the given CSS selector.
|
||||
|
||||
This uses the Soup Sieve library. For more information, see
|
||||
that library's documentation for the soupsieve.match()
|
||||
method.
|
||||
|
||||
:param: a CSS selector.
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will pass in the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
soupsieve.match() method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into SoupSieve's
|
||||
soupsieve.match() method.
|
||||
|
||||
:return: True if this Tag matches the selector; False otherwise.
|
||||
:rtype: bool
|
||||
"""
|
||||
return self.api.match(
|
||||
select, self.tag, self._ns(namespaces, select), flags, **kwargs
|
||||
)
|
||||
|
||||
def filter(self, select, namespaces=None, flags=0, **kwargs):
|
||||
"""Filter this Tag's direct children based on the given CSS selector.
|
||||
|
||||
This uses the Soup Sieve library. It works the same way as
|
||||
passing this Tag into that library's soupsieve.filter()
|
||||
method. More information, for more information see the
|
||||
documentation for soupsieve.filter().
|
||||
|
||||
:param namespaces: A dictionary mapping namespace prefixes
|
||||
used in the CSS selector to namespace URIs. By default,
|
||||
Beautiful Soup will pass in the prefixes it encountered while
|
||||
parsing the document.
|
||||
|
||||
:param flags: Flags to be passed into Soup Sieve's
|
||||
soupsieve.filter() method.
|
||||
|
||||
:param kwargs: Keyword arguments to be passed into SoupSieve's
|
||||
soupsieve.filter() method.
|
||||
|
||||
:return: A ResultSet of Tag objects.
|
||||
:rtype: bs4.element.ResultSet
|
||||
|
||||
"""
|
||||
return self._rs(
|
||||
self.api.filter(
|
||||
select, self.tag, self._ns(namespaces, select), flags, **kwargs
|
||||
)
|
||||
)
|
||||
@@ -6,185 +6,61 @@ necessary. It is heavily based on code from Mark Pilgrim's Universal
|
||||
Feed Parser. It works best on XML and HTML, but it does not rewrite the
|
||||
XML or HTML to reflect a new encoding; that's the tree builder's job.
|
||||
"""
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
from html.entities import codepoint2name
|
||||
from collections import defaultdict
|
||||
import codecs
|
||||
from html.entities import codepoint2name
|
||||
import re
|
||||
import logging
|
||||
import string
|
||||
|
||||
# Import a library to autodetect character encodings. We'll support
|
||||
# any of a number of libraries that all support the same API:
|
||||
#
|
||||
# * cchardet
|
||||
# * chardet
|
||||
# * charset-normalizer
|
||||
chardet_module = None
|
||||
# Import a library to autodetect character encodings.
|
||||
chardet_type = None
|
||||
try:
|
||||
# First try the fast C implementation.
|
||||
# PyPI package: cchardet
|
||||
import cchardet as chardet_module
|
||||
import cchardet
|
||||
def chardet_dammit(s):
|
||||
return cchardet.detect(s)['encoding']
|
||||
except ImportError:
|
||||
try:
|
||||
# Fall back to the pure Python implementation
|
||||
# Debian package: python-chardet
|
||||
# PyPI package: chardet
|
||||
import chardet as chardet_module
|
||||
import chardet
|
||||
def chardet_dammit(s):
|
||||
return chardet.detect(s)['encoding']
|
||||
#import chardet.constants
|
||||
#chardet.constants._debug = 1
|
||||
except ImportError:
|
||||
try:
|
||||
# PyPI package: charset-normalizer
|
||||
import charset_normalizer as chardet_module
|
||||
except ImportError:
|
||||
# No chardet available.
|
||||
chardet_module = None
|
||||
|
||||
if chardet_module:
|
||||
def chardet_dammit(s):
|
||||
if isinstance(s, str):
|
||||
# No chardet available.
|
||||
def chardet_dammit(s):
|
||||
return None
|
||||
return chardet_module.detect(s)['encoding']
|
||||
else:
|
||||
def chardet_dammit(s):
|
||||
return None
|
||||
|
||||
# Build bytestring and Unicode versions of regular expressions for finding
|
||||
# a declared encoding inside an XML or HTML document.
|
||||
xml_encoding = '^\\s*<\\?.*encoding=[\'"](.*?)[\'"].*\\?>'
|
||||
html_meta = '<\\s*meta[^>]+charset\\s*=\\s*["\']?([^>]*?)[ /;\'">]'
|
||||
encoding_res = dict()
|
||||
encoding_res[bytes] = {
|
||||
'html' : re.compile(html_meta.encode("ascii"), re.I),
|
||||
'xml' : re.compile(xml_encoding.encode("ascii"), re.I),
|
||||
}
|
||||
encoding_res[str] = {
|
||||
'html' : re.compile(html_meta, re.I),
|
||||
'xml' : re.compile(xml_encoding, re.I)
|
||||
}
|
||||
|
||||
from html.entities import html5
|
||||
xml_encoding_re = re.compile(
|
||||
r'^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode(), re.I)
|
||||
html_meta_re = re.compile(
|
||||
r'<\s*meta[^>]+charset\s*=\s*["\']?([^>]*?)[ /;\'">]'.encode(), re.I)
|
||||
|
||||
class EntitySubstitution(object):
|
||||
"""The ability to substitute XML or HTML entities for certain characters."""
|
||||
|
||||
"""Substitute XML or HTML entities for the corresponding characters."""
|
||||
|
||||
def _populate_class_variables():
|
||||
"""Initialize variables used by this class to manage the plethora of
|
||||
HTML5 named entities.
|
||||
|
||||
This function returns a 3-tuple containing two dictionaries
|
||||
and a regular expression:
|
||||
|
||||
unicode_to_name - A mapping of Unicode strings like "⦨" to
|
||||
entity names like "angmsdaa". When a single Unicode string has
|
||||
multiple entity names, we try to choose the most commonly-used
|
||||
name.
|
||||
|
||||
name_to_unicode: A mapping of entity names like "angmsdaa" to
|
||||
Unicode strings like "⦨".
|
||||
|
||||
named_entity_re: A regular expression matching (almost) any
|
||||
Unicode string that corresponds to an HTML5 named entity.
|
||||
"""
|
||||
unicode_to_name = {}
|
||||
name_to_unicode = {}
|
||||
|
||||
short_entities = set()
|
||||
long_entities_by_first_character = defaultdict(set)
|
||||
|
||||
for name_with_semicolon, character in sorted(html5.items()):
|
||||
# "It is intentional, for legacy compatibility, that many
|
||||
# code points have multiple character reference names. For
|
||||
# example, some appear both with and without the trailing
|
||||
# semicolon, or with different capitalizations."
|
||||
# - https://html.spec.whatwg.org/multipage/named-characters.html#named-character-references
|
||||
#
|
||||
# The parsers are in charge of handling (or not) character
|
||||
# references with no trailing semicolon, so we remove the
|
||||
# semicolon whenever it appears.
|
||||
if name_with_semicolon.endswith(';'):
|
||||
name = name_with_semicolon[:-1]
|
||||
else:
|
||||
name = name_with_semicolon
|
||||
|
||||
# When parsing HTML, we want to recognize any known named
|
||||
# entity and convert it to a sequence of Unicode
|
||||
# characters.
|
||||
if name not in name_to_unicode:
|
||||
name_to_unicode[name] = character
|
||||
|
||||
# When _generating_ HTML, we want to recognize special
|
||||
# character sequences that _could_ be converted to named
|
||||
# entities.
|
||||
unicode_to_name[character] = name
|
||||
|
||||
# We also need to build a regular expression that lets us
|
||||
# _find_ those characters in output strings so we can
|
||||
# replace them.
|
||||
#
|
||||
# This is tricky, for two reasons.
|
||||
|
||||
if (len(character) == 1 and ord(character) < 128
|
||||
and character not in '<>&'):
|
||||
# First, it would be annoying to turn single ASCII
|
||||
# characters like | into named entities like
|
||||
# |. The exceptions are <>&, which we _must_
|
||||
# turn into named entities to produce valid HTML.
|
||||
continue
|
||||
|
||||
if len(character) > 1 and all(ord(x) < 128 for x in character):
|
||||
# We also do not want to turn _combinations_ of ASCII
|
||||
# characters like 'fj' into named entities like 'fj',
|
||||
# though that's more debateable.
|
||||
continue
|
||||
|
||||
# Second, some named entities have a Unicode value that's
|
||||
# a subset of the Unicode value for some _other_ named
|
||||
# entity. As an example, \u2267' is ≧,
|
||||
# but '\u2267\u0338' is ≧̸. Our regular
|
||||
# expression needs to match the first two characters of
|
||||
# "\u2267\u0338foo", but only the first character of
|
||||
# "\u2267foo".
|
||||
#
|
||||
# In this step, we build two sets of characters that
|
||||
# _eventually_ need to go into the regular expression. But
|
||||
# we won't know exactly what the regular expression needs
|
||||
# to look like until we've gone through the entire list of
|
||||
# named entities.
|
||||
if len(character) == 1:
|
||||
short_entities.add(character)
|
||||
else:
|
||||
long_entities_by_first_character[character[0]].add(character)
|
||||
|
||||
# Now that we've been through the entire list of entities, we
|
||||
# can create a regular expression that matches any of them.
|
||||
particles = set()
|
||||
for short in short_entities:
|
||||
long_versions = long_entities_by_first_character[short]
|
||||
if not long_versions:
|
||||
particles.add(short)
|
||||
else:
|
||||
ignore = "".join([x[1] for x in long_versions])
|
||||
# This finds, e.g. \u2267 but only if it is _not_
|
||||
# followed by \u0338.
|
||||
particles.add("%s(?![%s])" % (short, ignore))
|
||||
|
||||
for long_entities in list(long_entities_by_first_character.values()):
|
||||
for long_entity in long_entities:
|
||||
particles.add(long_entity)
|
||||
|
||||
re_definition = "(%s)" % "|".join(particles)
|
||||
|
||||
# If an entity shows up in both html5 and codepoint2name, it's
|
||||
# likely that HTML5 gives it several different names, such as
|
||||
# 'rsquo' and 'rsquor'. When converting Unicode characters to
|
||||
# named entities, the codepoint2name name should take
|
||||
# precedence where possible, since that's the more easily
|
||||
# recognizable one.
|
||||
lookup = {}
|
||||
reverse_lookup = {}
|
||||
characters_for_re = []
|
||||
for codepoint, name in list(codepoint2name.items()):
|
||||
character = chr(codepoint)
|
||||
unicode_to_name[character] = name
|
||||
|
||||
return unicode_to_name, name_to_unicode, re.compile(re_definition)
|
||||
if codepoint != 34:
|
||||
# There's no point in turning the quotation mark into
|
||||
# ", unless it happens within an attribute value, which
|
||||
# is handled elsewhere.
|
||||
characters_for_re.append(character)
|
||||
lookup[character] = name
|
||||
# But we do want to turn " into the quotation mark.
|
||||
reverse_lookup[name] = character
|
||||
re_definition = "[%s]" % "".join(characters_for_re)
|
||||
return lookup, reverse_lookup, re.compile(re_definition)
|
||||
(CHARACTER_TO_HTML_ENTITY, HTML_ENTITY_TO_CHARACTER,
|
||||
CHARACTER_TO_HTML_ENTITY_RE) = _populate_class_variables()
|
||||
|
||||
@@ -196,23 +72,21 @@ class EntitySubstitution(object):
|
||||
">": "gt",
|
||||
}
|
||||
|
||||
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
|
||||
"&(?!#\\d+;|#x[0-9a-fA-F]+;|\\w+;)"
|
||||
")")
|
||||
BARE_AMPERSAND_OR_BRACKET = re.compile(r"([<>]|"
|
||||
r"&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
|
||||
r")")
|
||||
|
||||
AMPERSAND_OR_BRACKET = re.compile("([<>&])")
|
||||
AMPERSAND_OR_BRACKET = re.compile(r"([<>&])")
|
||||
|
||||
@classmethod
|
||||
def _substitute_html_entity(cls, matchobj):
|
||||
"""Used with a regular expression to substitute the
|
||||
appropriate HTML entity for a special character string."""
|
||||
entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0))
|
||||
return "&%s;" % entity
|
||||
|
||||
@classmethod
|
||||
def _substitute_xml_entity(cls, matchobj):
|
||||
"""Used with a regular expression to substitute the
|
||||
appropriate XML entity for a special character string."""
|
||||
appropriate XML entity for an XML special character."""
|
||||
entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)]
|
||||
return "&%s;" % entity
|
||||
|
||||
@@ -307,8 +181,6 @@ class EntitySubstitution(object):
|
||||
containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that
|
||||
character with "é" will make it more readable to some
|
||||
people.
|
||||
|
||||
:param s: A Unicode string.
|
||||
"""
|
||||
return cls.CHARACTER_TO_HTML_ENTITY_RE.sub(
|
||||
cls._substitute_html_entity, s)
|
||||
@@ -320,65 +192,23 @@ class EncodingDetector:
|
||||
Order of precedence:
|
||||
|
||||
1. Encodings you specifically tell EncodingDetector to try first
|
||||
(the known_definite_encodings argument to the constructor).
|
||||
(the override_encodings argument to the constructor).
|
||||
|
||||
2. An encoding determined by sniffing the document's byte-order mark.
|
||||
|
||||
3. Encodings you specifically tell EncodingDetector to try if
|
||||
byte-order mark sniffing fails (the user_encodings argument to the
|
||||
constructor).
|
||||
|
||||
4. An encoding declared within the bytestring itself, either in an
|
||||
2. An encoding declared within the bytestring itself, either in an
|
||||
XML declaration (if the bytestring is to be interpreted as an XML
|
||||
document), or in a <meta> tag (if the bytestring is to be
|
||||
interpreted as an HTML document.)
|
||||
|
||||
5. An encoding detected through textual analysis by chardet,
|
||||
3. An encoding detected through textual analysis by chardet,
|
||||
cchardet, or a similar external library.
|
||||
|
||||
4. UTF-8.
|
||||
|
||||
5. Windows-1252.
|
||||
|
||||
"""
|
||||
def __init__(self, markup, known_definite_encodings=None,
|
||||
is_html=False, exclude_encodings=None,
|
||||
user_encodings=None, override_encodings=None):
|
||||
"""Constructor.
|
||||
|
||||
:param markup: Some markup in an unknown encoding.
|
||||
|
||||
:param known_definite_encodings: When determining the encoding
|
||||
of `markup`, these encodings will be tried first, in
|
||||
order. In HTML terms, this corresponds to the "known
|
||||
definite encoding" step defined here:
|
||||
https://html.spec.whatwg.org/multipage/parsing.html#parsing-with-a-known-character-encoding
|
||||
|
||||
:param user_encodings: These encodings will be tried after the
|
||||
`known_definite_encodings` have been tried and failed, and
|
||||
after an attempt to sniff the encoding by looking at a
|
||||
byte order mark has failed. In HTML terms, this
|
||||
corresponds to the step "user has explicitly instructed
|
||||
the user agent to override the document's character
|
||||
encoding", defined here:
|
||||
https://html.spec.whatwg.org/multipage/parsing.html#determining-the-character-encoding
|
||||
|
||||
:param override_encodings: A deprecated alias for
|
||||
known_definite_encodings. Any encodings here will be tried
|
||||
immediately after the encodings in
|
||||
known_definite_encodings.
|
||||
|
||||
:param is_html: If True, this markup is considered to be
|
||||
HTML. Otherwise it's assumed to be XML.
|
||||
|
||||
:param exclude_encodings: These encodings will not be tried,
|
||||
even if they otherwise would be.
|
||||
|
||||
"""
|
||||
self.known_definite_encodings = list(known_definite_encodings or [])
|
||||
if override_encodings:
|
||||
self.known_definite_encodings += override_encodings
|
||||
self.user_encodings = user_encodings or []
|
||||
def __init__(self, markup, override_encodings=None, is_html=False,
|
||||
exclude_encodings=None):
|
||||
self.override_encodings = override_encodings or []
|
||||
exclude_encodings = exclude_encodings or []
|
||||
self.exclude_encodings = set([x.lower() for x in exclude_encodings])
|
||||
self.chardet_encoding = None
|
||||
@@ -389,12 +219,6 @@ class EncodingDetector:
|
||||
self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup)
|
||||
|
||||
def _usable(self, encoding, tried):
|
||||
"""Should we even bother to try this encoding?
|
||||
|
||||
:param encoding: Name of an encoding.
|
||||
:param tried: Encodings that have already been tried. This will be modified
|
||||
as a side effect.
|
||||
"""
|
||||
if encoding is not None:
|
||||
encoding = encoding.lower()
|
||||
if encoding in self.exclude_encodings:
|
||||
@@ -406,14 +230,9 @@ class EncodingDetector:
|
||||
|
||||
@property
|
||||
def encodings(self):
|
||||
"""Yield a number of encodings that might work for this markup.
|
||||
|
||||
:yield: A sequence of strings.
|
||||
"""
|
||||
"""Yield a number of encodings that might work for this markup."""
|
||||
tried = set()
|
||||
|
||||
# First, try the known definite encodings
|
||||
for e in self.known_definite_encodings:
|
||||
for e in self.override_encodings:
|
||||
if self._usable(e, tried):
|
||||
yield e
|
||||
|
||||
@@ -422,12 +241,6 @@ class EncodingDetector:
|
||||
if self._usable(self.sniffed_encoding, tried):
|
||||
yield self.sniffed_encoding
|
||||
|
||||
# Sniffing the byte-order mark did nothing; try the user
|
||||
# encodings.
|
||||
for e in self.user_encodings:
|
||||
if self._usable(e, tried):
|
||||
yield e
|
||||
|
||||
# Look within the document for an XML or HTML encoding
|
||||
# declaration.
|
||||
if self.declared_encoding is None:
|
||||
@@ -450,11 +263,7 @@ class EncodingDetector:
|
||||
|
||||
@classmethod
|
||||
def strip_byte_order_mark(cls, data):
|
||||
"""If a byte-order mark is present, strip it and return the encoding it implies.
|
||||
|
||||
:param data: Some markup.
|
||||
:return: A 2-tuple (modified data, implied encoding)
|
||||
"""
|
||||
"""If a byte-order mark is present, strip it and return the encoding it implies."""
|
||||
encoding = None
|
||||
if isinstance(data, str):
|
||||
# Unicode data cannot have a byte-order mark.
|
||||
@@ -486,36 +295,21 @@ class EncodingDetector:
|
||||
|
||||
An HTML encoding is declared in a <meta> tag, hopefully near the
|
||||
beginning of the document.
|
||||
|
||||
:param markup: Some markup.
|
||||
:param is_html: If True, this markup is considered to be HTML. Otherwise
|
||||
it's assumed to be XML.
|
||||
:param search_entire_document: Since an encoding is supposed to declared near the beginning
|
||||
of the document, most of the time it's only necessary to search a few kilobytes of data.
|
||||
Set this to True to force this method to search the entire document.
|
||||
"""
|
||||
if search_entire_document:
|
||||
xml_endpos = html_endpos = len(markup)
|
||||
else:
|
||||
xml_endpos = 1024
|
||||
html_endpos = max(2048, int(len(markup) * 0.05))
|
||||
|
||||
if isinstance(markup, bytes):
|
||||
res = encoding_res[bytes]
|
||||
else:
|
||||
res = encoding_res[str]
|
||||
|
||||
xml_re = res['xml']
|
||||
html_re = res['html']
|
||||
|
||||
declared_encoding = None
|
||||
declared_encoding_match = xml_re.search(markup, endpos=xml_endpos)
|
||||
declared_encoding_match = xml_encoding_re.search(markup, endpos=xml_endpos)
|
||||
if not declared_encoding_match and is_html:
|
||||
declared_encoding_match = html_re.search(markup, endpos=html_endpos)
|
||||
declared_encoding_match = html_meta_re.search(markup, endpos=html_endpos)
|
||||
if declared_encoding_match is not None:
|
||||
declared_encoding = declared_encoding_match.groups()[0]
|
||||
declared_encoding = declared_encoding_match.groups()[0].decode(
|
||||
'ascii', 'replace')
|
||||
if declared_encoding:
|
||||
if isinstance(declared_encoding, bytes):
|
||||
declared_encoding = declared_encoding.decode('ascii', 'replace')
|
||||
return declared_encoding.lower()
|
||||
return None
|
||||
|
||||
@@ -538,53 +332,15 @@ class UnicodeDammit:
|
||||
"iso-8859-2",
|
||||
]
|
||||
|
||||
def __init__(self, markup, known_definite_encodings=[],
|
||||
smart_quotes_to=None, is_html=False, exclude_encodings=[],
|
||||
user_encodings=None, override_encodings=None
|
||||
):
|
||||
"""Constructor.
|
||||
|
||||
:param markup: A bytestring representing markup in an unknown encoding.
|
||||
|
||||
:param known_definite_encodings: When determining the encoding
|
||||
of `markup`, these encodings will be tried first, in
|
||||
order. In HTML terms, this corresponds to the "known
|
||||
definite encoding" step defined here:
|
||||
https://html.spec.whatwg.org/multipage/parsing.html#parsing-with-a-known-character-encoding
|
||||
|
||||
:param user_encodings: These encodings will be tried after the
|
||||
`known_definite_encodings` have been tried and failed, and
|
||||
after an attempt to sniff the encoding by looking at a
|
||||
byte order mark has failed. In HTML terms, this
|
||||
corresponds to the step "user has explicitly instructed
|
||||
the user agent to override the document's character
|
||||
encoding", defined here:
|
||||
https://html.spec.whatwg.org/multipage/parsing.html#determining-the-character-encoding
|
||||
|
||||
:param override_encodings: A deprecated alias for
|
||||
known_definite_encodings. Any encodings here will be tried
|
||||
immediately after the encodings in
|
||||
known_definite_encodings.
|
||||
|
||||
:param smart_quotes_to: By default, Microsoft smart quotes will, like all other characters, be converted
|
||||
to Unicode characters. Setting this to 'ascii' will convert them to ASCII quotes instead.
|
||||
Setting it to 'xml' will convert them to XML entity references, and setting it to 'html'
|
||||
will convert them to HTML entity references.
|
||||
:param is_html: If True, this markup is considered to be HTML. Otherwise
|
||||
it's assumed to be XML.
|
||||
:param exclude_encodings: These encodings will not be considered, even
|
||||
if the sniffing code thinks they might make sense.
|
||||
|
||||
"""
|
||||
def __init__(self, markup, override_encodings=[],
|
||||
smart_quotes_to=None, is_html=False, exclude_encodings=[]):
|
||||
self.smart_quotes_to = smart_quotes_to
|
||||
self.tried_encodings = []
|
||||
self.contains_replacement_characters = False
|
||||
self.is_html = is_html
|
||||
self.log = logging.getLogger(__name__)
|
||||
|
||||
self.detector = EncodingDetector(
|
||||
markup, known_definite_encodings, is_html, exclude_encodings,
|
||||
user_encodings, override_encodings
|
||||
)
|
||||
markup, override_encodings, is_html, exclude_encodings)
|
||||
|
||||
# Short-circuit if the data is in Unicode to begin with.
|
||||
if isinstance(markup, str) or markup == '':
|
||||
@@ -612,10 +368,9 @@ class UnicodeDammit:
|
||||
if encoding != "ascii":
|
||||
u = self._convert_from(encoding, "replace")
|
||||
if u is not None:
|
||||
self.log.warning(
|
||||
logging.warning(
|
||||
"Some characters could not be decoded, and were "
|
||||
"replaced with REPLACEMENT CHARACTER."
|
||||
)
|
||||
"replaced with REPLACEMENT CHARACTER.")
|
||||
self.contains_replacement_characters = True
|
||||
break
|
||||
|
||||
@@ -644,10 +399,6 @@ class UnicodeDammit:
|
||||
return sub
|
||||
|
||||
def _convert_from(self, proposed, errors="strict"):
|
||||
"""Attempt to convert the markup to the proposed encoding.
|
||||
|
||||
:param proposed: The name of a character encoding.
|
||||
"""
|
||||
proposed = self.find_codec(proposed)
|
||||
if not proposed or (proposed, errors) in self.tried_encodings:
|
||||
return None
|
||||
@@ -662,40 +413,30 @@ class UnicodeDammit:
|
||||
markup = smart_quotes_compiled.sub(self._sub_ms_char, markup)
|
||||
|
||||
try:
|
||||
#print("Trying to convert document to %s (errors=%s)" % (
|
||||
# proposed, errors))
|
||||
#print "Trying to convert document to %s (errors=%s)" % (
|
||||
# proposed, errors)
|
||||
u = self._to_unicode(markup, proposed, errors)
|
||||
self.markup = u
|
||||
self.original_encoding = proposed
|
||||
except Exception as e:
|
||||
#print("That didn't work!")
|
||||
#print(e)
|
||||
#print "That didn't work!"
|
||||
#print e
|
||||
return None
|
||||
#print("Correct encoding: %s" % proposed)
|
||||
#print "Correct encoding: %s" % proposed
|
||||
return self.markup
|
||||
|
||||
def _to_unicode(self, data, encoding, errors="strict"):
|
||||
"""Given a string and its encoding, decodes the string into Unicode.
|
||||
|
||||
:param encoding: The name of an encoding.
|
||||
"""
|
||||
'''Given a string and its encoding, decodes the string into Unicode.
|
||||
%encoding is a string recognized by encodings.aliases'''
|
||||
return str(data, encoding, errors)
|
||||
|
||||
@property
|
||||
def declared_html_encoding(self):
|
||||
"""If the markup is an HTML document, returns the encoding declared _within_
|
||||
the document.
|
||||
"""
|
||||
if not self.is_html:
|
||||
return None
|
||||
return self.detector.declared_encoding
|
||||
|
||||
def find_codec(self, charset):
|
||||
"""Convert the name of a character set to a codec name.
|
||||
|
||||
:param charset: The name of a character set.
|
||||
:return: The name of a codec.
|
||||
"""
|
||||
value = (self._codec(self.CHARSET_ALIASES.get(charset, charset))
|
||||
or (charset and self._codec(charset.replace("-", "")))
|
||||
or (charset and self._codec(charset.replace("-", "_")))
|
||||
@@ -985,7 +726,7 @@ class UnicodeDammit:
|
||||
0xde : b'\xc3\x9e', # Þ
|
||||
0xdf : b'\xc3\x9f', # ß
|
||||
0xe0 : b'\xc3\xa0', # à
|
||||
0xe1 : b'\xa1', # á
|
||||
0xe1 : b'\xa1', # á
|
||||
0xe2 : b'\xc3\xa2', # â
|
||||
0xe3 : b'\xc3\xa3', # ã
|
||||
0xe4 : b'\xc3\xa4', # ä
|
||||
@@ -1034,16 +775,12 @@ class UnicodeDammit:
|
||||
Currently the only situation supported is Windows-1252 (or its
|
||||
subset ISO-8859-1), embedded in UTF-8.
|
||||
|
||||
:param in_bytes: A bytestring that you suspect contains
|
||||
characters from multiple encodings. Note that this _must_
|
||||
be a bytestring. If you've already converted the document
|
||||
to Unicode, you're too late.
|
||||
:param main_encoding: The primary encoding of `in_bytes`.
|
||||
:param embedded_encoding: The encoding that was used to embed characters
|
||||
in the main document.
|
||||
:return: A bytestring in which `embedded_encoding`
|
||||
characters have been converted to their `main_encoding`
|
||||
equivalents.
|
||||
The input must be a bytestring. If you've already converted
|
||||
the document to Unicode, you're too late.
|
||||
|
||||
The output is a bytestring in which `embedded_encoding`
|
||||
characters have been converted to their `main_encoding`
|
||||
equivalents.
|
||||
"""
|
||||
if embedded_encoding.replace('_', '-').lower() not in (
|
||||
'windows-1252', 'windows_1252'):
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
"""Diagnostic functions, mainly for use when doing tech support."""
|
||||
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
import cProfile
|
||||
from io import BytesIO
|
||||
from io import StringIO
|
||||
from html.parser import HTMLParser
|
||||
import bs4
|
||||
from bs4 import BeautifulSoup, __version__
|
||||
@@ -20,13 +19,9 @@ import sys
|
||||
import cProfile
|
||||
|
||||
def diagnose(data):
|
||||
"""Diagnostic suite for isolating common problems.
|
||||
|
||||
:param data: A string containing markup that needs to be explained.
|
||||
:return: None; diagnostics are printed to standard output.
|
||||
"""
|
||||
print(("Diagnostic running on Beautiful Soup %s" % __version__))
|
||||
print(("Python version %s" % sys.version))
|
||||
"""Diagnostic suite for isolating common problems."""
|
||||
print("Diagnostic running on Beautiful Soup %s" % __version__)
|
||||
print("Python version %s" % sys.version)
|
||||
|
||||
basic_parsers = ["html.parser", "html5lib", "lxml"]
|
||||
for name in basic_parsers:
|
||||
@@ -40,70 +35,61 @@ def diagnose(data):
|
||||
name))
|
||||
|
||||
if 'lxml' in basic_parsers:
|
||||
basic_parsers.append("lxml-xml")
|
||||
basic_parsers.append(["lxml", "xml"])
|
||||
try:
|
||||
from lxml import etree
|
||||
print(("Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION))))
|
||||
print("Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION)))
|
||||
except ImportError as e:
|
||||
print(
|
||||
print (
|
||||
"lxml is not installed or couldn't be imported.")
|
||||
|
||||
|
||||
if 'html5lib' in basic_parsers:
|
||||
try:
|
||||
import html5lib
|
||||
print(("Found html5lib version %s" % html5lib.__version__))
|
||||
print("Found html5lib version %s" % html5lib.__version__)
|
||||
except ImportError as e:
|
||||
print(
|
||||
print (
|
||||
"html5lib is not installed or couldn't be imported.")
|
||||
|
||||
if hasattr(data, 'read'):
|
||||
data = data.read()
|
||||
elif os.path.exists(data):
|
||||
print('"%s" looks like a filename. Reading data from the file.' % data)
|
||||
data = open(data).read()
|
||||
elif data.startswith("http:") or data.startswith("https:"):
|
||||
print('"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data)
|
||||
print("You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup.")
|
||||
return
|
||||
print()
|
||||
|
||||
for parser in basic_parsers:
|
||||
print(("Trying to parse your markup with %s" % parser))
|
||||
print("Trying to parse your markup with %s" % parser)
|
||||
success = False
|
||||
try:
|
||||
soup = BeautifulSoup(data, features=parser)
|
||||
soup = BeautifulSoup(data, parser)
|
||||
success = True
|
||||
except Exception as e:
|
||||
print(("%s could not parse the markup." % parser))
|
||||
print("%s could not parse the markup." % parser)
|
||||
traceback.print_exc()
|
||||
if success:
|
||||
print(("Here's what %s did with the markup:" % parser))
|
||||
print((soup.prettify()))
|
||||
print("Here's what %s did with the markup:" % parser)
|
||||
print(soup.prettify())
|
||||
|
||||
print(("-" * 80))
|
||||
print("-" * 80)
|
||||
|
||||
def lxml_trace(data, html=True, **kwargs):
|
||||
"""Print out the lxml events that occur during parsing.
|
||||
|
||||
This lets you see how lxml parses a document when no Beautiful
|
||||
Soup code is running. You can use this to determine whether
|
||||
an lxml-specific problem is in Beautiful Soup's lxml tree builders
|
||||
or in lxml itself.
|
||||
|
||||
:param data: Some markup.
|
||||
:param html: If True, markup will be parsed with lxml's HTML parser.
|
||||
if False, lxml's XML parser will be used.
|
||||
Soup code is running.
|
||||
"""
|
||||
from lxml import etree
|
||||
recover = kwargs.pop('recover', True)
|
||||
if isinstance(data, str):
|
||||
data = data.encode("utf8")
|
||||
reader = BytesIO(data)
|
||||
for event, element in etree.iterparse(
|
||||
reader, html=html, recover=recover, **kwargs
|
||||
):
|
||||
for event, element in etree.iterparse(StringIO(data), html=html, **kwargs):
|
||||
print(("%s, %4s, %s" % (event, element.tag, element.text)))
|
||||
|
||||
class AnnouncingParser(HTMLParser):
|
||||
"""Subclass of HTMLParser that announces parse events, without doing
|
||||
anything else.
|
||||
|
||||
You can use this to get a picture of how html.parser sees a given
|
||||
document. The easiest way to do this is to call `htmlparser_trace`.
|
||||
"""
|
||||
"""Announces HTMLParser parse events, without doing anything else."""
|
||||
|
||||
def _p(self, s):
|
||||
print(s)
|
||||
@@ -140,8 +126,6 @@ def htmlparser_trace(data):
|
||||
|
||||
This lets you see how HTMLParser parses a document when no
|
||||
Beautiful Soup code is running.
|
||||
|
||||
:param data: Some markup.
|
||||
"""
|
||||
parser = AnnouncingParser()
|
||||
parser.feed(data)
|
||||
@@ -184,9 +168,9 @@ def rdoc(num_elements=1000):
|
||||
|
||||
def benchmark_parsers(num_elements=100000):
|
||||
"""Very basic head-to-head performance benchmark."""
|
||||
print(("Comparative parser benchmark on Beautiful Soup %s" % __version__))
|
||||
print("Comparative parser benchmark on Beautiful Soup %s" % __version__)
|
||||
data = rdoc(num_elements)
|
||||
print(("Generated a large invalid HTML document (%d bytes)." % len(data)))
|
||||
print("Generated a large invalid HTML document (%d bytes)." % len(data))
|
||||
|
||||
for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]:
|
||||
success = False
|
||||
@@ -196,26 +180,26 @@ def benchmark_parsers(num_elements=100000):
|
||||
b = time.time()
|
||||
success = True
|
||||
except Exception as e:
|
||||
print(("%s could not parse the markup." % parser))
|
||||
print("%s could not parse the markup." % parser)
|
||||
traceback.print_exc()
|
||||
if success:
|
||||
print(("BS4+%s parsed the markup in %.2fs." % (parser, b-a)))
|
||||
print("BS4+%s parsed the markup in %.2fs." % (parser, b-a))
|
||||
|
||||
from lxml import etree
|
||||
a = time.time()
|
||||
etree.HTML(data)
|
||||
b = time.time()
|
||||
print(("Raw lxml parsed the markup in %.2fs." % (b-a)))
|
||||
print("Raw lxml parsed the markup in %.2fs." % (b-a))
|
||||
|
||||
import html5lib
|
||||
parser = html5lib.HTMLParser()
|
||||
a = time.time()
|
||||
parser.parse(data)
|
||||
b = time.time()
|
||||
print(("Raw html5lib parsed the markup in %.2fs." % (b-a)))
|
||||
print("Raw html5lib parsed the markup in %.2fs." % (b-a))
|
||||
|
||||
def profile(num_elements=100000, parser="lxml"):
|
||||
"""Use Python's profiler on a randomly generated document."""
|
||||
|
||||
filehandle = tempfile.NamedTemporaryFile()
|
||||
filename = filehandle.name
|
||||
|
||||
@@ -228,6 +212,5 @@ def profile(num_elements=100000, parser="lxml"):
|
||||
stats.sort_stats("cumulative")
|
||||
stats.print_stats('_html5lib|bs4', 50)
|
||||
|
||||
# If this file is run as a script, standard input is diagnosed.
|
||||
if __name__ == '__main__':
|
||||
diagnose(sys.stdin.read())
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,185 +0,0 @@
|
||||
from bs4.dammit import EntitySubstitution
|
||||
|
||||
class Formatter(EntitySubstitution):
|
||||
"""Describes a strategy to use when outputting a parse tree to a string.
|
||||
|
||||
Some parts of this strategy come from the distinction between
|
||||
HTML4, HTML5, and XML. Others are configurable by the user.
|
||||
|
||||
Formatters are passed in as the `formatter` argument to methods
|
||||
like `PageElement.encode`. Most people won't need to think about
|
||||
formatters, and most people who need to think about them can pass
|
||||
in one of these predefined strings as `formatter` rather than
|
||||
making a new Formatter object:
|
||||
|
||||
For HTML documents:
|
||||
* 'html' - HTML entity substitution for generic HTML documents. (default)
|
||||
* 'html5' - HTML entity substitution for HTML5 documents, as
|
||||
well as some optimizations in the way tags are rendered.
|
||||
* 'minimal' - Only make the substitutions necessary to guarantee
|
||||
valid HTML.
|
||||
* None - Do not perform any substitution. This will be faster
|
||||
but may result in invalid markup.
|
||||
|
||||
For XML documents:
|
||||
* 'html' - Entity substitution for XHTML documents.
|
||||
* 'minimal' - Only make the substitutions necessary to guarantee
|
||||
valid XML. (default)
|
||||
* None - Do not perform any substitution. This will be faster
|
||||
but may result in invalid markup.
|
||||
"""
|
||||
# Registries of XML and HTML formatters.
|
||||
XML_FORMATTERS = {}
|
||||
HTML_FORMATTERS = {}
|
||||
|
||||
HTML = 'html'
|
||||
XML = 'xml'
|
||||
|
||||
HTML_DEFAULTS = dict(
|
||||
cdata_containing_tags=set(["script", "style"]),
|
||||
)
|
||||
|
||||
def _default(self, language, value, kwarg):
|
||||
if value is not None:
|
||||
return value
|
||||
if language == self.XML:
|
||||
return set()
|
||||
return self.HTML_DEFAULTS[kwarg]
|
||||
|
||||
def __init__(
|
||||
self, language=None, entity_substitution=None,
|
||||
void_element_close_prefix='/', cdata_containing_tags=None,
|
||||
empty_attributes_are_booleans=False, indent=1,
|
||||
):
|
||||
r"""Constructor.
|
||||
|
||||
:param language: This should be Formatter.XML if you are formatting
|
||||
XML markup and Formatter.HTML if you are formatting HTML markup.
|
||||
|
||||
:param entity_substitution: A function to call to replace special
|
||||
characters with XML/HTML entities. For examples, see
|
||||
bs4.dammit.EntitySubstitution.substitute_html and substitute_xml.
|
||||
:param void_element_close_prefix: By default, void elements
|
||||
are represented as <tag/> (XML rules) rather than <tag>
|
||||
(HTML rules). To get <tag>, pass in the empty string.
|
||||
:param cdata_containing_tags: The list of tags that are defined
|
||||
as containing CDATA in this dialect. For example, in HTML,
|
||||
<script> and <style> tags are defined as containing CDATA,
|
||||
and their contents should not be formatted.
|
||||
:param blank_attributes_are_booleans: Render attributes whose value
|
||||
is the empty string as HTML-style boolean attributes.
|
||||
(Attributes whose value is None are always rendered this way.)
|
||||
|
||||
:param indent: If indent is a non-negative integer or string,
|
||||
then the contents of elements will be indented
|
||||
appropriately when pretty-printing. An indent level of 0,
|
||||
negative, or "" will only insert newlines. Using a
|
||||
positive integer indent indents that many spaces per
|
||||
level. If indent is a string (such as "\t"), that string
|
||||
is used to indent each level. The default behavior is to
|
||||
indent one space per level.
|
||||
"""
|
||||
self.language = language
|
||||
self.entity_substitution = entity_substitution
|
||||
self.void_element_close_prefix = void_element_close_prefix
|
||||
self.cdata_containing_tags = self._default(
|
||||
language, cdata_containing_tags, 'cdata_containing_tags'
|
||||
)
|
||||
self.empty_attributes_are_booleans=empty_attributes_are_booleans
|
||||
if indent is None:
|
||||
indent = 0
|
||||
if isinstance(indent, int):
|
||||
if indent < 0:
|
||||
indent = 0
|
||||
indent = ' ' * indent
|
||||
elif isinstance(indent, str):
|
||||
indent = indent
|
||||
else:
|
||||
indent = ' '
|
||||
self.indent = indent
|
||||
|
||||
def substitute(self, ns):
|
||||
"""Process a string that needs to undergo entity substitution.
|
||||
This may be a string encountered in an attribute value or as
|
||||
text.
|
||||
|
||||
:param ns: A string.
|
||||
:return: A string with certain characters replaced by named
|
||||
or numeric entities.
|
||||
"""
|
||||
if not self.entity_substitution:
|
||||
return ns
|
||||
from .element import NavigableString
|
||||
if (isinstance(ns, NavigableString)
|
||||
and ns.parent is not None
|
||||
and ns.parent.name in self.cdata_containing_tags):
|
||||
# Do nothing.
|
||||
return ns
|
||||
# Substitute.
|
||||
return self.entity_substitution(ns)
|
||||
|
||||
def attribute_value(self, value):
|
||||
"""Process the value of an attribute.
|
||||
|
||||
:param ns: A string.
|
||||
:return: A string with certain characters replaced by named
|
||||
or numeric entities.
|
||||
"""
|
||||
return self.substitute(value)
|
||||
|
||||
def attributes(self, tag):
|
||||
"""Reorder a tag's attributes however you want.
|
||||
|
||||
By default, attributes are sorted alphabetically. This makes
|
||||
behavior consistent between Python 2 and Python 3, and preserves
|
||||
backwards compatibility with older versions of Beautiful Soup.
|
||||
|
||||
If `empty_boolean_attributes` is True, then attributes whose
|
||||
values are set to the empty string will be treated as boolean
|
||||
attributes.
|
||||
"""
|
||||
if tag.attrs is None:
|
||||
return []
|
||||
return sorted(
|
||||
(k, (None if self.empty_attributes_are_booleans and v == '' else v))
|
||||
for k, v in list(tag.attrs.items())
|
||||
)
|
||||
|
||||
class HTMLFormatter(Formatter):
|
||||
"""A generic Formatter for HTML."""
|
||||
REGISTRY = {}
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(HTMLFormatter, self).__init__(self.HTML, *args, **kwargs)
|
||||
|
||||
|
||||
class XMLFormatter(Formatter):
|
||||
"""A generic Formatter for XML."""
|
||||
REGISTRY = {}
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(XMLFormatter, self).__init__(self.XML, *args, **kwargs)
|
||||
|
||||
|
||||
# Set up aliases for the default formatters.
|
||||
HTMLFormatter.REGISTRY['html'] = HTMLFormatter(
|
||||
entity_substitution=EntitySubstitution.substitute_html
|
||||
)
|
||||
HTMLFormatter.REGISTRY["html5"] = HTMLFormatter(
|
||||
entity_substitution=EntitySubstitution.substitute_html,
|
||||
void_element_close_prefix=None,
|
||||
empty_attributes_are_booleans=True,
|
||||
)
|
||||
HTMLFormatter.REGISTRY["minimal"] = HTMLFormatter(
|
||||
entity_substitution=EntitySubstitution.substitute_xml
|
||||
)
|
||||
HTMLFormatter.REGISTRY[None] = HTMLFormatter(
|
||||
entity_substitution=None
|
||||
)
|
||||
XMLFormatter.REGISTRY["html"] = XMLFormatter(
|
||||
entity_substitution=EntitySubstitution.substitute_html
|
||||
)
|
||||
XMLFormatter.REGISTRY["minimal"] = XMLFormatter(
|
||||
entity_substitution=EntitySubstitution.substitute_xml
|
||||
)
|
||||
XMLFormatter.REGISTRY[None] = Formatter(
|
||||
Formatter(Formatter.XML, entity_substitution=None)
|
||||
)
|
||||
686
bitbake/lib/bs4/testing.py
Normal file
686
bitbake/lib/bs4/testing.py
Normal file
@@ -0,0 +1,686 @@
|
||||
"""Helper classes for tests."""
|
||||
|
||||
__license__ = "MIT"
|
||||
|
||||
import pickle
|
||||
import copy
|
||||
import unittest
|
||||
from unittest import TestCase
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.element import (
|
||||
CharsetMetaAttributeValue,
|
||||
Comment,
|
||||
ContentMetaAttributeValue,
|
||||
Doctype,
|
||||
SoupStrainer,
|
||||
)
|
||||
|
||||
from bs4.builder._htmlparser import HTMLParserTreeBuilder
|
||||
default_builder = HTMLParserTreeBuilder
|
||||
|
||||
|
||||
class SoupTest(unittest.TestCase):
|
||||
|
||||
@property
|
||||
def default_builder(self):
|
||||
return default_builder()
|
||||
|
||||
def soup(self, markup, **kwargs):
|
||||
"""Build a Beautiful Soup object from markup."""
|
||||
builder = kwargs.pop('builder', self.default_builder)
|
||||
return BeautifulSoup(markup, builder=builder, **kwargs)
|
||||
|
||||
def document_for(self, markup):
|
||||
"""Turn an HTML fragment into a document.
|
||||
|
||||
The details depend on the builder.
|
||||
"""
|
||||
return self.default_builder.test_fragment_to_document(markup)
|
||||
|
||||
def assertSoupEquals(self, to_parse, compare_parsed_to=None):
|
||||
builder = self.default_builder
|
||||
obj = BeautifulSoup(to_parse, builder=builder)
|
||||
if compare_parsed_to is None:
|
||||
compare_parsed_to = to_parse
|
||||
|
||||
self.assertEqual(obj.decode(), self.document_for(compare_parsed_to))
|
||||
|
||||
def assertConnectedness(self, element):
|
||||
"""Ensure that next_element and previous_element are properly
|
||||
set for all descendants of the given element.
|
||||
"""
|
||||
earlier = None
|
||||
for e in element.descendants:
|
||||
if earlier:
|
||||
self.assertEqual(e, earlier.next_element)
|
||||
self.assertEqual(earlier, e.previous_element)
|
||||
earlier = e
|
||||
|
||||
class HTMLTreeBuilderSmokeTest(SoupTest):
|
||||
|
||||
"""A basic test of a treebuilder's competence.
|
||||
|
||||
Any HTML treebuilder, present or future, should be able to pass
|
||||
these tests. With invalid markup, there's room for interpretation,
|
||||
and different parsers can handle it differently. But with the
|
||||
markup in these tests, there's not much room for interpretation.
|
||||
"""
|
||||
|
||||
def test_pickle_and_unpickle_identity(self):
|
||||
# Pickling a tree, then unpickling it, yields a tree identical
|
||||
# to the original.
|
||||
tree = self.soup("<a><b>foo</a>")
|
||||
dumped = pickle.dumps(tree, 2)
|
||||
loaded = pickle.loads(dumped)
|
||||
self.assertEqual(loaded.__class__, BeautifulSoup)
|
||||
self.assertEqual(loaded.decode(), tree.decode())
|
||||
|
||||
def assertDoctypeHandled(self, doctype_fragment):
|
||||
"""Assert that a given doctype string is handled correctly."""
|
||||
doctype_str, soup = self._document_with_doctype(doctype_fragment)
|
||||
|
||||
# Make sure a Doctype object was created.
|
||||
doctype = soup.contents[0]
|
||||
self.assertEqual(doctype.__class__, Doctype)
|
||||
self.assertEqual(doctype, doctype_fragment)
|
||||
self.assertEqual(str(soup)[:len(doctype_str)], doctype_str)
|
||||
|
||||
# Make sure that the doctype was correctly associated with the
|
||||
# parse tree and that the rest of the document parsed.
|
||||
self.assertEqual(soup.p.contents[0], 'foo')
|
||||
|
||||
def _document_with_doctype(self, doctype_fragment):
|
||||
"""Generate and parse a document with the given doctype."""
|
||||
doctype = '<!DOCTYPE %s>' % doctype_fragment
|
||||
markup = doctype + '\n<p>foo</p>'
|
||||
soup = self.soup(markup)
|
||||
return doctype, soup
|
||||
|
||||
def test_normal_doctypes(self):
|
||||
"""Make sure normal, everyday HTML doctypes are handled correctly."""
|
||||
self.assertDoctypeHandled("html")
|
||||
self.assertDoctypeHandled(
|
||||
'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"')
|
||||
|
||||
def test_empty_doctype(self):
|
||||
soup = self.soup("<!DOCTYPE>")
|
||||
doctype = soup.contents[0]
|
||||
self.assertEqual("", doctype.strip())
|
||||
|
||||
def test_public_doctype_with_url(self):
|
||||
doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"'
|
||||
self.assertDoctypeHandled(doctype)
|
||||
|
||||
def test_system_doctype(self):
|
||||
self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"')
|
||||
|
||||
def test_namespaced_system_doctype(self):
|
||||
# We can handle a namespaced doctype with a system ID.
|
||||
self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"')
|
||||
|
||||
def test_namespaced_public_doctype(self):
|
||||
# Test a namespaced doctype with a public id.
|
||||
self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"')
|
||||
|
||||
def test_real_xhtml_document(self):
|
||||
"""A real XHTML document should come out more or less the same as it went in."""
|
||||
markup = b"""<?xml version="1.0" encoding="utf-8"?>
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
<head><title>Hello.</title></head>
|
||||
<body>Goodbye.</body>
|
||||
</html>"""
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(
|
||||
soup.encode("utf-8").replace(b"\n", b""),
|
||||
markup.replace(b"\n", b""))
|
||||
|
||||
def test_processing_instruction(self):
|
||||
markup = b"""<?PITarget PIContent?>"""
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(markup, soup.encode("utf8"))
|
||||
|
||||
def test_deepcopy(self):
|
||||
"""Make sure you can copy the tree builder.
|
||||
|
||||
This is important because the builder is part of a
|
||||
BeautifulSoup object, and we want to be able to copy that.
|
||||
"""
|
||||
copy.deepcopy(self.default_builder)
|
||||
|
||||
def test_p_tag_is_never_empty_element(self):
|
||||
"""A <p> tag is never designated as an empty-element tag.
|
||||
|
||||
Even if the markup shows it as an empty-element tag, it
|
||||
shouldn't be presented that way.
|
||||
"""
|
||||
soup = self.soup("<p/>")
|
||||
self.assertFalse(soup.p.is_empty_element)
|
||||
self.assertEqual(str(soup.p), "<p></p>")
|
||||
|
||||
def test_unclosed_tags_get_closed(self):
|
||||
"""A tag that's not closed by the end of the document should be closed.
|
||||
|
||||
This applies to all tags except empty-element tags.
|
||||
"""
|
||||
self.assertSoupEquals("<p>", "<p></p>")
|
||||
self.assertSoupEquals("<b>", "<b></b>")
|
||||
|
||||
self.assertSoupEquals("<br>", "<br/>")
|
||||
|
||||
def test_br_is_always_empty_element_tag(self):
|
||||
"""A <br> tag is designated as an empty-element tag.
|
||||
|
||||
Some parsers treat <br></br> as one <br/> tag, some parsers as
|
||||
two tags, but it should always be an empty-element tag.
|
||||
"""
|
||||
soup = self.soup("<br></br>")
|
||||
self.assertTrue(soup.br.is_empty_element)
|
||||
self.assertEqual(str(soup.br), "<br/>")
|
||||
|
||||
def test_nested_formatting_elements(self):
|
||||
self.assertSoupEquals("<em><em></em></em>")
|
||||
|
||||
def test_double_head(self):
|
||||
html = '''<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Ordinary HEAD element test</title>
|
||||
</head>
|
||||
<script type="text/javascript">
|
||||
alert("Help!");
|
||||
</script>
|
||||
<body>
|
||||
Hello, world!
|
||||
</body>
|
||||
</html>
|
||||
'''
|
||||
soup = self.soup(html)
|
||||
self.assertEqual("text/javascript", soup.find('script')['type'])
|
||||
|
||||
def test_comment(self):
|
||||
# Comments are represented as Comment objects.
|
||||
markup = "<p>foo<!--foobar-->baz</p>"
|
||||
self.assertSoupEquals(markup)
|
||||
|
||||
soup = self.soup(markup)
|
||||
comment = soup.find(text="foobar")
|
||||
self.assertEqual(comment.__class__, Comment)
|
||||
|
||||
# The comment is properly integrated into the tree.
|
||||
foo = soup.find(text="foo")
|
||||
self.assertEqual(comment, foo.next_element)
|
||||
baz = soup.find(text="baz")
|
||||
self.assertEqual(comment, baz.previous_element)
|
||||
|
||||
def test_preserved_whitespace_in_pre_and_textarea(self):
|
||||
"""Whitespace must be preserved in <pre> and <textarea> tags."""
|
||||
self.assertSoupEquals("<pre> </pre>")
|
||||
self.assertSoupEquals("<textarea> woo </textarea>")
|
||||
|
||||
def test_nested_inline_elements(self):
|
||||
"""Inline elements can be nested indefinitely."""
|
||||
b_tag = "<b>Inside a B tag</b>"
|
||||
self.assertSoupEquals(b_tag)
|
||||
|
||||
nested_b_tag = "<p>A <i>nested <b>tag</b></i></p>"
|
||||
self.assertSoupEquals(nested_b_tag)
|
||||
|
||||
double_nested_b_tag = "<p>A <a>doubly <i>nested <b>tag</b></i></a></p>"
|
||||
self.assertSoupEquals(nested_b_tag)
|
||||
|
||||
def test_nested_block_level_elements(self):
|
||||
"""Block elements can be nested."""
|
||||
soup = self.soup('<blockquote><p><b>Foo</b></p></blockquote>')
|
||||
blockquote = soup.blockquote
|
||||
self.assertEqual(blockquote.p.b.string, 'Foo')
|
||||
self.assertEqual(blockquote.b.string, 'Foo')
|
||||
|
||||
def test_correctly_nested_tables(self):
|
||||
"""One table can go inside another one."""
|
||||
markup = ('<table id="1">'
|
||||
'<tr>'
|
||||
"<td>Here's another table:"
|
||||
'<table id="2">'
|
||||
'<tr><td>foo</td></tr>'
|
||||
'</table></td>')
|
||||
|
||||
self.assertSoupEquals(
|
||||
markup,
|
||||
'<table id="1"><tr><td>Here\'s another table:'
|
||||
'<table id="2"><tr><td>foo</td></tr></table>'
|
||||
'</td></tr></table>')
|
||||
|
||||
self.assertSoupEquals(
|
||||
"<table><thead><tr><td>Foo</td></tr></thead>"
|
||||
"<tbody><tr><td>Bar</td></tr></tbody>"
|
||||
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
|
||||
|
||||
def test_deeply_nested_multivalued_attribute(self):
|
||||
# html5lib can set the attributes of the same tag many times
|
||||
# as it rearranges the tree. This has caused problems with
|
||||
# multivalued attributes.
|
||||
markup = '<table><div><div class="css"></div></div></table>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(["css"], soup.div.div['class'])
|
||||
|
||||
def test_multivalued_attribute_on_html(self):
|
||||
# html5lib uses a different API to set the attributes ot the
|
||||
# <html> tag. This has caused problems with multivalued
|
||||
# attributes.
|
||||
markup = '<html class="a b"></html>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(["a", "b"], soup.html['class'])
|
||||
|
||||
def test_angle_brackets_in_attribute_values_are_escaped(self):
|
||||
self.assertSoupEquals('<a b="<a>"></a>', '<a b="<a>"></a>')
|
||||
|
||||
def test_entities_in_attributes_converted_to_unicode(self):
|
||||
expect = '<p id="pi\N{LATIN SMALL LETTER N WITH TILDE}ata"></p>'
|
||||
self.assertSoupEquals('<p id="piñata"></p>', expect)
|
||||
self.assertSoupEquals('<p id="piñata"></p>', expect)
|
||||
self.assertSoupEquals('<p id="piñata"></p>', expect)
|
||||
self.assertSoupEquals('<p id="piñata"></p>', expect)
|
||||
|
||||
def test_entities_in_text_converted_to_unicode(self):
|
||||
expect = '<p>pi\N{LATIN SMALL LETTER N WITH TILDE}ata</p>'
|
||||
self.assertSoupEquals("<p>piñata</p>", expect)
|
||||
self.assertSoupEquals("<p>piñata</p>", expect)
|
||||
self.assertSoupEquals("<p>piñata</p>", expect)
|
||||
self.assertSoupEquals("<p>piñata</p>", expect)
|
||||
|
||||
def test_quot_entity_converted_to_quotation_mark(self):
|
||||
self.assertSoupEquals("<p>I said "good day!"</p>",
|
||||
'<p>I said "good day!"</p>')
|
||||
|
||||
def test_out_of_range_entity(self):
|
||||
expect = "\N{REPLACEMENT CHARACTER}"
|
||||
self.assertSoupEquals("�", expect)
|
||||
self.assertSoupEquals("�", expect)
|
||||
self.assertSoupEquals("�", expect)
|
||||
|
||||
def test_multipart_strings(self):
|
||||
"Mostly to prevent a recurrence of a bug in the html5lib treebuilder."
|
||||
soup = self.soup("<html><h2>\nfoo</h2><p></p></html>")
|
||||
self.assertEqual("p", soup.h2.string.next_element.name)
|
||||
self.assertEqual("p", soup.p.name)
|
||||
self.assertConnectedness(soup)
|
||||
|
||||
def test_head_tag_between_head_and_body(self):
|
||||
"Prevent recurrence of a bug in the html5lib treebuilder."
|
||||
content = """<html><head></head>
|
||||
<link></link>
|
||||
<body>foo</body>
|
||||
</html>
|
||||
"""
|
||||
soup = self.soup(content)
|
||||
self.assertNotEqual(None, soup.html.body)
|
||||
self.assertConnectedness(soup)
|
||||
|
||||
def test_multiple_copies_of_a_tag(self):
|
||||
"Prevent recurrence of a bug in the html5lib treebuilder."
|
||||
content = """<!DOCTYPE html>
|
||||
<html>
|
||||
<body>
|
||||
<article id="a" >
|
||||
<div><a href="1"></div>
|
||||
<footer>
|
||||
<a href="2"></a>
|
||||
</footer>
|
||||
</article>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
soup = self.soup(content)
|
||||
self.assertConnectedness(soup.article)
|
||||
|
||||
def test_basic_namespaces(self):
|
||||
"""Parsers don't need to *understand* namespaces, but at the
|
||||
very least they should not choke on namespaces or lose
|
||||
data."""
|
||||
|
||||
markup = b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:mathml="http://www.w3.org/1998/Math/MathML" xmlns:svg="http://www.w3.org/2000/svg"><head></head><body><mathml:msqrt>4</mathml:msqrt><b svg:fill="red"></b></body></html>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(markup, soup.encode())
|
||||
html = soup.html
|
||||
self.assertEqual('http://www.w3.org/1999/xhtml', soup.html['xmlns'])
|
||||
self.assertEqual(
|
||||
'http://www.w3.org/1998/Math/MathML', soup.html['xmlns:mathml'])
|
||||
self.assertEqual(
|
||||
'http://www.w3.org/2000/svg', soup.html['xmlns:svg'])
|
||||
|
||||
def test_multivalued_attribute_value_becomes_list(self):
|
||||
markup = b'<a class="foo bar">'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(['foo', 'bar'], soup.a['class'])
|
||||
|
||||
#
|
||||
# Generally speaking, tests below this point are more tests of
|
||||
# Beautiful Soup than tests of the tree builders. But parsers are
|
||||
# weird, so we run these tests separately for every tree builder
|
||||
# to detect any differences between them.
|
||||
#
|
||||
|
||||
def test_can_parse_unicode_document(self):
|
||||
# A seemingly innocuous document... but it's in Unicode! And
|
||||
# it contains characters that can't be represented in the
|
||||
# encoding found in the declaration! The horror!
|
||||
markup = '<html><head><meta encoding="euc-jp"></head><body>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</body>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual('Sacr\xe9 bleu!', soup.body.string)
|
||||
|
||||
def test_soupstrainer(self):
|
||||
"""Parsers should be able to work with SoupStrainers."""
|
||||
strainer = SoupStrainer("b")
|
||||
soup = self.soup("A <b>bold</b> <meta/> <i>statement</i>",
|
||||
parse_only=strainer)
|
||||
self.assertEqual(soup.decode(), "<b>bold</b>")
|
||||
|
||||
def test_single_quote_attribute_values_become_double_quotes(self):
|
||||
self.assertSoupEquals("<foo attr='bar'></foo>",
|
||||
'<foo attr="bar"></foo>')
|
||||
|
||||
def test_attribute_values_with_nested_quotes_are_left_alone(self):
|
||||
text = """<foo attr='bar "brawls" happen'>a</foo>"""
|
||||
self.assertSoupEquals(text)
|
||||
|
||||
def test_attribute_values_with_double_nested_quotes_get_quoted(self):
|
||||
text = """<foo attr='bar "brawls" happen'>a</foo>"""
|
||||
soup = self.soup(text)
|
||||
soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"'
|
||||
self.assertSoupEquals(
|
||||
soup.foo.decode(),
|
||||
"""<foo attr="Brawls happen at "Bob\'s Bar"">a</foo>""")
|
||||
|
||||
def test_ampersand_in_attribute_value_gets_escaped(self):
|
||||
self.assertSoupEquals('<this is="really messed up & stuff"></this>',
|
||||
'<this is="really messed up & stuff"></this>')
|
||||
|
||||
self.assertSoupEquals(
|
||||
'<a href="http://example.org?a=1&b=2;3">foo</a>',
|
||||
'<a href="http://example.org?a=1&b=2;3">foo</a>')
|
||||
|
||||
def test_escaped_ampersand_in_attribute_value_is_left_alone(self):
|
||||
self.assertSoupEquals('<a href="http://example.org?a=1&b=2;3"></a>')
|
||||
|
||||
def test_entities_in_strings_converted_during_parsing(self):
|
||||
# Both XML and HTML entities are converted to Unicode characters
|
||||
# during parsing.
|
||||
text = "<p><<sacré bleu!>></p>"
|
||||
expected = "<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>"
|
||||
self.assertSoupEquals(text, expected)
|
||||
|
||||
def test_smart_quotes_converted_on_the_way_in(self):
|
||||
# Microsoft smart quotes are converted to Unicode characters during
|
||||
# parsing.
|
||||
quote = b"<p>\x91Foo\x92</p>"
|
||||
soup = self.soup(quote)
|
||||
self.assertEqual(
|
||||
soup.p.string,
|
||||
"\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}")
|
||||
|
||||
def test_non_breaking_spaces_converted_on_the_way_in(self):
|
||||
soup = self.soup("<a> </a>")
|
||||
self.assertEqual(soup.a.string, "\N{NO-BREAK SPACE}" * 2)
|
||||
|
||||
def test_entities_converted_on_the_way_out(self):
|
||||
text = "<p><<sacré bleu!>></p>"
|
||||
expected = "<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>".encode("utf-8")
|
||||
soup = self.soup(text)
|
||||
self.assertEqual(soup.p.encode("utf-8"), expected)
|
||||
|
||||
def test_real_iso_latin_document(self):
|
||||
# Smoke test of interrelated functionality, using an
|
||||
# easy-to-understand document.
|
||||
|
||||
# Here it is in Unicode. Note that it claims to be in ISO-Latin-1.
|
||||
unicode_html = '<html><head><meta content="text/html; charset=ISO-Latin-1" http-equiv="Content-type"/></head><body><p>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</p></body></html>'
|
||||
|
||||
# That's because we're going to encode it into ISO-Latin-1, and use
|
||||
# that to test.
|
||||
iso_latin_html = unicode_html.encode("iso-8859-1")
|
||||
|
||||
# Parse the ISO-Latin-1 HTML.
|
||||
soup = self.soup(iso_latin_html)
|
||||
# Encode it to UTF-8.
|
||||
result = soup.encode("utf-8")
|
||||
|
||||
# What do we expect the result to look like? Well, it would
|
||||
# look like unicode_html, except that the META tag would say
|
||||
# UTF-8 instead of ISO-Latin-1.
|
||||
expected = unicode_html.replace("ISO-Latin-1", "utf-8")
|
||||
|
||||
# And, of course, it would be in UTF-8, not Unicode.
|
||||
expected = expected.encode("utf-8")
|
||||
|
||||
# Ta-da!
|
||||
self.assertEqual(result, expected)
|
||||
|
||||
def test_real_shift_jis_document(self):
|
||||
# Smoke test to make sure the parser can handle a document in
|
||||
# Shift-JIS encoding, without choking.
|
||||
shift_jis_html = (
|
||||
b'<html><head></head><body><pre>'
|
||||
b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
|
||||
b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
|
||||
b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B'
|
||||
b'</pre></body></html>')
|
||||
unicode_html = shift_jis_html.decode("shift-jis")
|
||||
soup = self.soup(unicode_html)
|
||||
|
||||
# Make sure the parse tree is correctly encoded to various
|
||||
# encodings.
|
||||
self.assertEqual(soup.encode("utf-8"), unicode_html.encode("utf-8"))
|
||||
self.assertEqual(soup.encode("euc_jp"), unicode_html.encode("euc_jp"))
|
||||
|
||||
def test_real_hebrew_document(self):
|
||||
# A real-world test to make sure we can convert ISO-8859-9 (a
|
||||
# Hebrew encoding) to UTF-8.
|
||||
hebrew_document = b'<html><head><title>Hebrew (ISO 8859-8) in Visual Directionality</title></head><body><h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\xed\xe5\xec\xf9</body></html>'
|
||||
soup = self.soup(
|
||||
hebrew_document, from_encoding="iso8859-8")
|
||||
self.assertEqual(soup.original_encoding, 'iso8859-8')
|
||||
self.assertEqual(
|
||||
soup.encode('utf-8'),
|
||||
hebrew_document.decode("iso8859-8").encode("utf-8"))
|
||||
|
||||
def test_meta_tag_reflects_current_encoding(self):
|
||||
# Here's the <meta> tag saying that a document is
|
||||
# encoded in Shift-JIS.
|
||||
meta_tag = ('<meta content="text/html; charset=x-sjis" '
|
||||
'http-equiv="Content-type"/>')
|
||||
|
||||
# Here's a document incorporating that meta tag.
|
||||
shift_jis_html = (
|
||||
'<html><head>\n%s\n'
|
||||
'<meta http-equiv="Content-language" content="ja"/>'
|
||||
'</head><body>Shift-JIS markup goes here.') % meta_tag
|
||||
soup = self.soup(shift_jis_html)
|
||||
|
||||
# Parse the document, and the charset is seemingly unaffected.
|
||||
parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'})
|
||||
content = parsed_meta['content']
|
||||
self.assertEqual('text/html; charset=x-sjis', content)
|
||||
|
||||
# But that value is actually a ContentMetaAttributeValue object.
|
||||
self.assertTrue(isinstance(content, ContentMetaAttributeValue))
|
||||
|
||||
# And it will take on a value that reflects its current
|
||||
# encoding.
|
||||
self.assertEqual('text/html; charset=utf8', content.encode("utf8"))
|
||||
|
||||
# For the rest of the story, see TestSubstitutions in
|
||||
# test_tree.py.
|
||||
|
||||
def test_html5_style_meta_tag_reflects_current_encoding(self):
|
||||
# Here's the <meta> tag saying that a document is
|
||||
# encoded in Shift-JIS.
|
||||
meta_tag = ('<meta id="encoding" charset="x-sjis" />')
|
||||
|
||||
# Here's a document incorporating that meta tag.
|
||||
shift_jis_html = (
|
||||
'<html><head>\n%s\n'
|
||||
'<meta http-equiv="Content-language" content="ja"/>'
|
||||
'</head><body>Shift-JIS markup goes here.') % meta_tag
|
||||
soup = self.soup(shift_jis_html)
|
||||
|
||||
# Parse the document, and the charset is seemingly unaffected.
|
||||
parsed_meta = soup.find('meta', id="encoding")
|
||||
charset = parsed_meta['charset']
|
||||
self.assertEqual('x-sjis', charset)
|
||||
|
||||
# But that value is actually a CharsetMetaAttributeValue object.
|
||||
self.assertTrue(isinstance(charset, CharsetMetaAttributeValue))
|
||||
|
||||
# And it will take on a value that reflects its current
|
||||
# encoding.
|
||||
self.assertEqual('utf8', charset.encode("utf8"))
|
||||
|
||||
def test_tag_with_no_attributes_can_have_attributes_added(self):
|
||||
data = self.soup("<a>text</a>")
|
||||
data.a['foo'] = 'bar'
|
||||
self.assertEqual('<a foo="bar">text</a>', data.a.decode())
|
||||
|
||||
class XMLTreeBuilderSmokeTest(SoupTest):
|
||||
|
||||
def test_pickle_and_unpickle_identity(self):
|
||||
# Pickling a tree, then unpickling it, yields a tree identical
|
||||
# to the original.
|
||||
tree = self.soup("<a><b>foo</a>")
|
||||
dumped = pickle.dumps(tree, 2)
|
||||
loaded = pickle.loads(dumped)
|
||||
self.assertEqual(loaded.__class__, BeautifulSoup)
|
||||
self.assertEqual(loaded.decode(), tree.decode())
|
||||
|
||||
def test_docstring_generated(self):
|
||||
soup = self.soup("<root/>")
|
||||
self.assertEqual(
|
||||
soup.encode(), b'<?xml version="1.0" encoding="utf-8"?>\n<root/>')
|
||||
|
||||
def test_xml_declaration(self):
|
||||
markup = b"""<?xml version="1.0" encoding="utf8"?>\n<foo/>"""
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(markup, soup.encode("utf8"))
|
||||
|
||||
def test_real_xhtml_document(self):
|
||||
"""A real XHTML document should come out *exactly* the same as it went in."""
|
||||
markup = b"""<?xml version="1.0" encoding="utf-8"?>
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
<head><title>Hello.</title></head>
|
||||
<body>Goodbye.</body>
|
||||
</html>"""
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(
|
||||
soup.encode("utf-8"), markup)
|
||||
|
||||
def test_formatter_processes_script_tag_for_xml_documents(self):
|
||||
doc = """
|
||||
<script type="text/javascript">
|
||||
</script>
|
||||
"""
|
||||
soup = BeautifulSoup(doc, "lxml-xml")
|
||||
# lxml would have stripped this while parsing, but we can add
|
||||
# it later.
|
||||
soup.script.string = 'console.log("< < hey > > ");'
|
||||
encoded = soup.encode()
|
||||
self.assertTrue(b"< < hey > >" in encoded)
|
||||
|
||||
def test_can_parse_unicode_document(self):
|
||||
markup = '<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual('Sacr\xe9 bleu!', soup.root.string)
|
||||
|
||||
def test_popping_namespaced_tag(self):
|
||||
markup = '<rss xmlns:dc="foo"><dc:creator>b</dc:creator><dc:date>2012-07-02T20:33:42Z</dc:date><dc:rights>c</dc:rights></rss>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(
|
||||
str(soup.rss), markup)
|
||||
|
||||
def test_docstring_includes_correct_encoding(self):
|
||||
soup = self.soup("<root/>")
|
||||
self.assertEqual(
|
||||
soup.encode("latin1"),
|
||||
b'<?xml version="1.0" encoding="latin1"?>\n<root/>')
|
||||
|
||||
def test_large_xml_document(self):
|
||||
"""A large XML document should come out the same as it went in."""
|
||||
markup = (b'<?xml version="1.0" encoding="utf-8"?>\n<root>'
|
||||
+ b'0' * (2**12)
|
||||
+ b'</root>')
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(soup.encode("utf-8"), markup)
|
||||
|
||||
|
||||
def test_tags_are_empty_element_if_and_only_if_they_are_empty(self):
|
||||
self.assertSoupEquals("<p>", "<p/>")
|
||||
self.assertSoupEquals("<p>foo</p>")
|
||||
|
||||
def test_namespaces_are_preserved(self):
|
||||
markup = '<root xmlns:a="http://example.com/" xmlns:b="http://example.net/"><a:foo>This tag is in the a namespace</a:foo><b:foo>This tag is in the b namespace</b:foo></root>'
|
||||
soup = self.soup(markup)
|
||||
root = soup.root
|
||||
self.assertEqual("http://example.com/", root['xmlns:a'])
|
||||
self.assertEqual("http://example.net/", root['xmlns:b'])
|
||||
|
||||
def test_closing_namespaced_tag(self):
|
||||
markup = '<p xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>20010504</dc:date></p>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(str(soup.p), markup)
|
||||
|
||||
def test_namespaced_attributes(self):
|
||||
markup = '<foo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><bar xsi:schemaLocation="http://www.example.com"/></foo>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(str(soup.foo), markup)
|
||||
|
||||
def test_namespaced_attributes_xml_namespace(self):
|
||||
markup = '<foo xml:lang="fr">bar</foo>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(str(soup.foo), markup)
|
||||
|
||||
class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest):
|
||||
"""Smoke test for a tree builder that supports HTML5."""
|
||||
|
||||
def test_real_xhtml_document(self):
|
||||
# Since XHTML is not HTML5, HTML5 parsers are not tested to handle
|
||||
# XHTML documents in any particular way.
|
||||
pass
|
||||
|
||||
def test_html_tags_have_namespace(self):
|
||||
markup = "<a>"
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual("http://www.w3.org/1999/xhtml", soup.a.namespace)
|
||||
|
||||
def test_svg_tags_have_namespace(self):
|
||||
markup = '<svg><circle/></svg>'
|
||||
soup = self.soup(markup)
|
||||
namespace = "http://www.w3.org/2000/svg"
|
||||
self.assertEqual(namespace, soup.svg.namespace)
|
||||
self.assertEqual(namespace, soup.circle.namespace)
|
||||
|
||||
|
||||
def test_mathml_tags_have_namespace(self):
|
||||
markup = '<math><msqrt>5</msqrt></math>'
|
||||
soup = self.soup(markup)
|
||||
namespace = 'http://www.w3.org/1998/Math/MathML'
|
||||
self.assertEqual(namespace, soup.math.namespace)
|
||||
self.assertEqual(namespace, soup.msqrt.namespace)
|
||||
|
||||
def test_xml_declaration_becomes_comment(self):
|
||||
markup = '<?xml version="1.0" encoding="utf-8"?><html></html>'
|
||||
soup = self.soup(markup)
|
||||
self.assertTrue(isinstance(soup.contents[0], Comment))
|
||||
self.assertEqual(soup.contents[0], '?xml version="1.0" encoding="utf-8"?')
|
||||
self.assertEqual("html", soup.contents[0].next_element.name)
|
||||
|
||||
def skipIf(condition, reason):
|
||||
def nothing(test, *args, **kwargs):
|
||||
return None
|
||||
|
||||
def decorator(test_item):
|
||||
if condition:
|
||||
return nothing
|
||||
else:
|
||||
return test_item
|
||||
|
||||
return decorator
|
||||
1
bitbake/lib/bs4/tests/__init__.py
Normal file
1
bitbake/lib/bs4/tests/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"The beautifulsoup tests."
|
||||
147
bitbake/lib/bs4/tests/test_builder_registry.py
Normal file
147
bitbake/lib/bs4/tests/test_builder_registry.py
Normal file
@@ -0,0 +1,147 @@
|
||||
"""Tests of the builder registry."""
|
||||
|
||||
import unittest
|
||||
import warnings
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.builder import (
|
||||
builder_registry as registry,
|
||||
HTMLParserTreeBuilder,
|
||||
TreeBuilderRegistry,
|
||||
)
|
||||
|
||||
try:
|
||||
from bs4.builder import HTML5TreeBuilder
|
||||
HTML5LIB_PRESENT = True
|
||||
except ImportError:
|
||||
HTML5LIB_PRESENT = False
|
||||
|
||||
try:
|
||||
from bs4.builder import (
|
||||
LXMLTreeBuilderForXML,
|
||||
LXMLTreeBuilder,
|
||||
)
|
||||
LXML_PRESENT = True
|
||||
except ImportError:
|
||||
LXML_PRESENT = False
|
||||
|
||||
|
||||
class BuiltInRegistryTest(unittest.TestCase):
|
||||
"""Test the built-in registry with the default builders registered."""
|
||||
|
||||
def test_combination(self):
|
||||
if LXML_PRESENT:
|
||||
self.assertEqual(registry.lookup('fast', 'html'),
|
||||
LXMLTreeBuilder)
|
||||
|
||||
if LXML_PRESENT:
|
||||
self.assertEqual(registry.lookup('permissive', 'xml'),
|
||||
LXMLTreeBuilderForXML)
|
||||
self.assertEqual(registry.lookup('strict', 'html'),
|
||||
HTMLParserTreeBuilder)
|
||||
if HTML5LIB_PRESENT:
|
||||
self.assertEqual(registry.lookup('html5lib', 'html'),
|
||||
HTML5TreeBuilder)
|
||||
|
||||
def test_lookup_by_markup_type(self):
|
||||
if LXML_PRESENT:
|
||||
self.assertEqual(registry.lookup('html'), LXMLTreeBuilder)
|
||||
self.assertEqual(registry.lookup('xml'), LXMLTreeBuilderForXML)
|
||||
else:
|
||||
self.assertEqual(registry.lookup('xml'), None)
|
||||
if HTML5LIB_PRESENT:
|
||||
self.assertEqual(registry.lookup('html'), HTML5TreeBuilder)
|
||||
else:
|
||||
self.assertEqual(registry.lookup('html'), HTMLParserTreeBuilder)
|
||||
|
||||
def test_named_library(self):
|
||||
if LXML_PRESENT:
|
||||
self.assertEqual(registry.lookup('lxml', 'xml'),
|
||||
LXMLTreeBuilderForXML)
|
||||
self.assertEqual(registry.lookup('lxml', 'html'),
|
||||
LXMLTreeBuilder)
|
||||
if HTML5LIB_PRESENT:
|
||||
self.assertEqual(registry.lookup('html5lib'),
|
||||
HTML5TreeBuilder)
|
||||
|
||||
self.assertEqual(registry.lookup('html.parser'),
|
||||
HTMLParserTreeBuilder)
|
||||
|
||||
def test_beautifulsoup_constructor_does_lookup(self):
|
||||
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
# This will create a warning about not explicitly
|
||||
# specifying a parser, but we'll ignore it.
|
||||
|
||||
# You can pass in a string.
|
||||
BeautifulSoup("", features="html")
|
||||
# Or a list of strings.
|
||||
BeautifulSoup("", features=["html", "fast"])
|
||||
|
||||
# You'll get an exception if BS can't find an appropriate
|
||||
# builder.
|
||||
self.assertRaises(ValueError, BeautifulSoup,
|
||||
"", features="no-such-feature")
|
||||
|
||||
class RegistryTest(unittest.TestCase):
|
||||
"""Test the TreeBuilderRegistry class in general."""
|
||||
|
||||
def setUp(self):
|
||||
self.registry = TreeBuilderRegistry()
|
||||
|
||||
def builder_for_features(self, *feature_list):
|
||||
cls = type('Builder_' + '_'.join(feature_list),
|
||||
(object,), {'features' : feature_list})
|
||||
|
||||
self.registry.register(cls)
|
||||
return cls
|
||||
|
||||
def test_register_with_no_features(self):
|
||||
builder = self.builder_for_features()
|
||||
|
||||
# Since the builder advertises no features, you can't find it
|
||||
# by looking up features.
|
||||
self.assertEqual(self.registry.lookup('foo'), None)
|
||||
|
||||
# But you can find it by doing a lookup with no features, if
|
||||
# this happens to be the only registered builder.
|
||||
self.assertEqual(self.registry.lookup(), builder)
|
||||
|
||||
def test_register_with_features_makes_lookup_succeed(self):
|
||||
builder = self.builder_for_features('foo', 'bar')
|
||||
self.assertEqual(self.registry.lookup('foo'), builder)
|
||||
self.assertEqual(self.registry.lookup('bar'), builder)
|
||||
|
||||
def test_lookup_fails_when_no_builder_implements_feature(self):
|
||||
builder = self.builder_for_features('foo', 'bar')
|
||||
self.assertEqual(self.registry.lookup('baz'), None)
|
||||
|
||||
def test_lookup_gets_most_recent_registration_when_no_feature_specified(self):
|
||||
builder1 = self.builder_for_features('foo')
|
||||
builder2 = self.builder_for_features('bar')
|
||||
self.assertEqual(self.registry.lookup(), builder2)
|
||||
|
||||
def test_lookup_fails_when_no_tree_builders_registered(self):
|
||||
self.assertEqual(self.registry.lookup(), None)
|
||||
|
||||
def test_lookup_gets_most_recent_builder_supporting_all_features(self):
|
||||
has_one = self.builder_for_features('foo')
|
||||
has_the_other = self.builder_for_features('bar')
|
||||
has_both_early = self.builder_for_features('foo', 'bar', 'baz')
|
||||
has_both_late = self.builder_for_features('foo', 'bar', 'quux')
|
||||
lacks_one = self.builder_for_features('bar')
|
||||
has_the_other = self.builder_for_features('foo')
|
||||
|
||||
# There are two builders featuring 'foo' and 'bar', but
|
||||
# the one that also features 'quux' was registered later.
|
||||
self.assertEqual(self.registry.lookup('foo', 'bar'),
|
||||
has_both_late)
|
||||
|
||||
# There is only one builder featuring 'foo', 'bar', and 'baz'.
|
||||
self.assertEqual(self.registry.lookup('foo', 'bar', 'baz'),
|
||||
has_both_early)
|
||||
|
||||
def test_lookup_fails_when_cannot_reconcile_requested_features(self):
|
||||
builder1 = self.builder_for_features('foo', 'bar')
|
||||
builder2 = self.builder_for_features('foo', 'baz')
|
||||
self.assertEqual(self.registry.lookup('bar', 'baz'), None)
|
||||
32
bitbake/lib/bs4/tests/test_docs.py
Normal file
32
bitbake/lib/bs4/tests/test_docs.py
Normal file
@@ -0,0 +1,32 @@
|
||||
"Test harness for doctests."
|
||||
|
||||
# pylint: disable-msg=E0611,W0142
|
||||
|
||||
__metaclass__ = type
|
||||
__all__ = [
|
||||
'additional_tests',
|
||||
]
|
||||
|
||||
import doctest
|
||||
#from pkg_resources import (
|
||||
# resource_filename, resource_exists, resource_listdir, cleanup_resources)
|
||||
|
||||
DOCTEST_FLAGS = (
|
||||
doctest.ELLIPSIS |
|
||||
doctest.NORMALIZE_WHITESPACE |
|
||||
doctest.REPORT_NDIFF)
|
||||
|
||||
# def additional_tests():
|
||||
# "Run the doc tests (README.txt and docs/*, if any exist)"
|
||||
# doctest_files = [
|
||||
# os.path.abspath(resource_filename('bs4', 'README.txt'))]
|
||||
# if resource_exists('bs4', 'docs'):
|
||||
# for name in resource_listdir('bs4', 'docs'):
|
||||
# if name.endswith('.txt'):
|
||||
# doctest_files.append(
|
||||
# os.path.abspath(
|
||||
# resource_filename('bs4', 'docs/%s' % name)))
|
||||
# kwargs = dict(module_relative=False, optionflags=DOCTEST_FLAGS)
|
||||
# atexit.register(cleanup_resources)
|
||||
# return unittest.TestSuite((
|
||||
# doctest.DocFileSuite(*doctest_files, **kwargs)))
|
||||
98
bitbake/lib/bs4/tests/test_html5lib.py
Normal file
98
bitbake/lib/bs4/tests/test_html5lib.py
Normal file
@@ -0,0 +1,98 @@
|
||||
"""Tests to ensure that the html5lib tree builder generates good trees."""
|
||||
|
||||
import warnings
|
||||
|
||||
try:
|
||||
from bs4.builder import HTML5TreeBuilder
|
||||
HTML5LIB_PRESENT = True
|
||||
except ImportError as e:
|
||||
HTML5LIB_PRESENT = False
|
||||
from bs4.element import SoupStrainer
|
||||
from bs4.testing import (
|
||||
HTML5TreeBuilderSmokeTest,
|
||||
SoupTest,
|
||||
skipIf,
|
||||
)
|
||||
|
||||
@skipIf(
|
||||
not HTML5LIB_PRESENT,
|
||||
"html5lib seems not to be present, not testing its tree builder.")
|
||||
class HTML5LibBuilderSmokeTest(SoupTest, HTML5TreeBuilderSmokeTest):
|
||||
"""See ``HTML5TreeBuilderSmokeTest``."""
|
||||
|
||||
@property
|
||||
def default_builder(self):
|
||||
return HTML5TreeBuilder()
|
||||
|
||||
def test_soupstrainer(self):
|
||||
# The html5lib tree builder does not support SoupStrainers.
|
||||
strainer = SoupStrainer("b")
|
||||
markup = "<p>A <b>bold</b> statement.</p>"
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup(markup, parse_only=strainer)
|
||||
self.assertEqual(
|
||||
soup.decode(), self.document_for(markup))
|
||||
|
||||
self.assertTrue(
|
||||
"the html5lib tree builder doesn't support parse_only" in
|
||||
str(w[0].message))
|
||||
|
||||
def test_correctly_nested_tables(self):
|
||||
"""html5lib inserts <tbody> tags where other parsers don't."""
|
||||
markup = ('<table id="1">'
|
||||
'<tr>'
|
||||
"<td>Here's another table:"
|
||||
'<table id="2">'
|
||||
'<tr><td>foo</td></tr>'
|
||||
'</table></td>')
|
||||
|
||||
self.assertSoupEquals(
|
||||
markup,
|
||||
'<table id="1"><tbody><tr><td>Here\'s another table:'
|
||||
'<table id="2"><tbody><tr><td>foo</td></tr></tbody></table>'
|
||||
'</td></tr></tbody></table>')
|
||||
|
||||
self.assertSoupEquals(
|
||||
"<table><thead><tr><td>Foo</td></tr></thead>"
|
||||
"<tbody><tr><td>Bar</td></tr></tbody>"
|
||||
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
|
||||
|
||||
def test_xml_declaration_followed_by_doctype(self):
|
||||
markup = '''<?xml version="1.0" encoding="utf-8"?>
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
</head>
|
||||
<body>
|
||||
<p>foo</p>
|
||||
</body>
|
||||
</html>'''
|
||||
soup = self.soup(markup)
|
||||
# Verify that we can reach the <p> tag; this means the tree is connected.
|
||||
self.assertEqual(b"<p>foo</p>", soup.p.encode())
|
||||
|
||||
def test_reparented_markup(self):
|
||||
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual("<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p></body>", soup.body.decode())
|
||||
self.assertEqual(2, len(soup.find_all('p')))
|
||||
|
||||
|
||||
def test_reparented_markup_ends_with_whitespace(self):
|
||||
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>\n'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual("<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p>\n</body>", soup.body.decode())
|
||||
self.assertEqual(2, len(soup.find_all('p')))
|
||||
|
||||
def test_processing_instruction(self):
|
||||
"""Processing instructions become comments."""
|
||||
markup = b"""<?PITarget PIContent?>"""
|
||||
soup = self.soup(markup)
|
||||
assert str(soup).startswith("<!--?PITarget PIContent?-->")
|
||||
|
||||
def test_cloned_multivalue_node(self):
|
||||
markup = b"""<a class="my_class"><p></a>"""
|
||||
soup = self.soup(markup)
|
||||
a1, a2 = soup.find_all('a')
|
||||
self.assertEqual(a1, a2)
|
||||
assert a1 is not a2
|
||||
31
bitbake/lib/bs4/tests/test_htmlparser.py
Normal file
31
bitbake/lib/bs4/tests/test_htmlparser.py
Normal file
@@ -0,0 +1,31 @@
|
||||
"""Tests to ensure that the html.parser tree builder generates good
|
||||
trees."""
|
||||
|
||||
import pickle
|
||||
from bs4.testing import SoupTest, HTMLTreeBuilderSmokeTest
|
||||
from bs4.builder import HTMLParserTreeBuilder
|
||||
|
||||
class HTMLParserTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
|
||||
|
||||
@property
|
||||
def default_builder(self):
|
||||
return HTMLParserTreeBuilder()
|
||||
|
||||
def test_namespaced_system_doctype(self):
|
||||
# html.parser can't handle namespaced doctypes, so skip this one.
|
||||
pass
|
||||
|
||||
def test_namespaced_public_doctype(self):
|
||||
# html.parser can't handle namespaced doctypes, so skip this one.
|
||||
pass
|
||||
|
||||
def test_builder_is_pickled(self):
|
||||
"""Unlike most tree builders, HTMLParserTreeBuilder and will
|
||||
be restored after pickling.
|
||||
"""
|
||||
tree = self.soup("<a><b>foo</a>")
|
||||
dumped = pickle.dumps(tree, 2)
|
||||
loaded = pickle.loads(dumped)
|
||||
self.assertTrue(isinstance(loaded.builder, type(tree.builder)))
|
||||
|
||||
|
||||
70
bitbake/lib/bs4/tests/test_lxml.py
Normal file
70
bitbake/lib/bs4/tests/test_lxml.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""Tests to ensure that the lxml tree builder generates good trees."""
|
||||
|
||||
import warnings
|
||||
|
||||
try:
|
||||
import lxml.etree
|
||||
LXML_PRESENT = True
|
||||
LXML_VERSION = lxml.etree.LXML_VERSION
|
||||
except ImportError as e:
|
||||
LXML_PRESENT = False
|
||||
LXML_VERSION = (0,)
|
||||
|
||||
if LXML_PRESENT:
|
||||
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
|
||||
|
||||
from bs4 import BeautifulStoneSoup
|
||||
from bs4.testing import skipIf
|
||||
from bs4.testing import (
|
||||
HTMLTreeBuilderSmokeTest,
|
||||
XMLTreeBuilderSmokeTest,
|
||||
SoupTest,
|
||||
skipIf,
|
||||
)
|
||||
|
||||
@skipIf(
|
||||
not LXML_PRESENT,
|
||||
"lxml seems not to be present, not testing its tree builder.")
|
||||
class LXMLTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
|
||||
"""See ``HTMLTreeBuilderSmokeTest``."""
|
||||
|
||||
@property
|
||||
def default_builder(self):
|
||||
return LXMLTreeBuilder()
|
||||
|
||||
def test_out_of_range_entity(self):
|
||||
self.assertSoupEquals(
|
||||
"<p>foo�bar</p>", "<p>foobar</p>")
|
||||
self.assertSoupEquals(
|
||||
"<p>foo�bar</p>", "<p>foobar</p>")
|
||||
self.assertSoupEquals(
|
||||
"<p>foo�bar</p>", "<p>foobar</p>")
|
||||
|
||||
# In lxml < 2.3.5, an empty doctype causes a segfault. Skip this
|
||||
# test if an old version of lxml is installed.
|
||||
|
||||
@skipIf(
|
||||
not LXML_PRESENT or LXML_VERSION < (2,3,5,0),
|
||||
"Skipping doctype test for old version of lxml to avoid segfault.")
|
||||
def test_empty_doctype(self):
|
||||
soup = self.soup("<!DOCTYPE>")
|
||||
doctype = soup.contents[0]
|
||||
self.assertEqual("", doctype.strip())
|
||||
|
||||
def test_beautifulstonesoup_is_xml_parser(self):
|
||||
# Make sure that the deprecated BSS class uses an xml builder
|
||||
# if one is installed.
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = BeautifulStoneSoup("<b />")
|
||||
self.assertEqual("<b/>", str(soup.b))
|
||||
self.assertTrue("BeautifulStoneSoup class is deprecated" in str(w[0].message))
|
||||
|
||||
@skipIf(
|
||||
not LXML_PRESENT,
|
||||
"lxml seems not to be present, not testing its XML tree builder.")
|
||||
class LXMLXMLTreeBuilderSmokeTest(SoupTest, XMLTreeBuilderSmokeTest):
|
||||
"""See ``HTMLTreeBuilderSmokeTest``."""
|
||||
|
||||
@property
|
||||
def default_builder(self):
|
||||
return LXMLTreeBuilderForXML()
|
||||
479
bitbake/lib/bs4/tests/test_soup.py
Normal file
479
bitbake/lib/bs4/tests/test_soup.py
Normal file
@@ -0,0 +1,479 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Tests of Beautiful Soup as a whole."""
|
||||
|
||||
import logging
|
||||
import unittest
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.element import (
|
||||
CharsetMetaAttributeValue,
|
||||
ContentMetaAttributeValue,
|
||||
SoupStrainer,
|
||||
NamespacedAttribute,
|
||||
)
|
||||
import bs4.dammit
|
||||
from bs4.dammit import (
|
||||
EntitySubstitution,
|
||||
UnicodeDammit,
|
||||
EncodingDetector,
|
||||
)
|
||||
from bs4.testing import (
|
||||
SoupTest,
|
||||
skipIf,
|
||||
)
|
||||
import warnings
|
||||
|
||||
try:
|
||||
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
|
||||
LXML_PRESENT = True
|
||||
except ImportError as e:
|
||||
LXML_PRESENT = False
|
||||
|
||||
PYTHON_2_PRE_2_7 = (sys.version_info < (2,7))
|
||||
PYTHON_3_PRE_3_2 = (sys.version_info[0] == 3 and sys.version_info < (3,2))
|
||||
|
||||
class TestConstructor(SoupTest):
|
||||
|
||||
def test_short_unicode_input(self):
|
||||
data = "<h1>éé</h1>"
|
||||
soup = self.soup(data)
|
||||
self.assertEqual("éé", soup.h1.string)
|
||||
|
||||
def test_embedded_null(self):
|
||||
data = "<h1>foo\0bar</h1>"
|
||||
soup = self.soup(data)
|
||||
self.assertEqual("foo\0bar", soup.h1.string)
|
||||
|
||||
def test_exclude_encodings(self):
|
||||
utf8_data = "Räksmörgås".encode("utf-8")
|
||||
soup = self.soup(utf8_data, exclude_encodings=["utf-8"])
|
||||
self.assertEqual("windows-1252", soup.original_encoding)
|
||||
|
||||
|
||||
class TestWarnings(SoupTest):
|
||||
|
||||
def _no_parser_specified(self, s, is_there=True):
|
||||
v = s.startswith(BeautifulSoup.NO_PARSER_SPECIFIED_WARNING[:80])
|
||||
self.assertTrue(v)
|
||||
|
||||
def test_warning_if_no_parser_specified(self):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup("<a><b></b></a>")
|
||||
msg = str(w[0].message)
|
||||
self._assert_no_parser_specified(msg)
|
||||
|
||||
def test_warning_if_parser_specified_too_vague(self):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup("<a><b></b></a>", "html")
|
||||
msg = str(w[0].message)
|
||||
self._assert_no_parser_specified(msg)
|
||||
|
||||
def test_no_warning_if_explicit_parser_specified(self):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup("<a><b></b></a>", "html.parser")
|
||||
self.assertEqual([], w)
|
||||
|
||||
def test_parseOnlyThese_renamed_to_parse_only(self):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup("<a><b></b></a>", parseOnlyThese=SoupStrainer("b"))
|
||||
msg = str(w[0].message)
|
||||
self.assertTrue("parseOnlyThese" in msg)
|
||||
self.assertTrue("parse_only" in msg)
|
||||
self.assertEqual(b"<b></b>", soup.encode())
|
||||
|
||||
def test_fromEncoding_renamed_to_from_encoding(self):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
utf8 = b"\xc3\xa9"
|
||||
soup = self.soup(utf8, fromEncoding="utf8")
|
||||
msg = str(w[0].message)
|
||||
self.assertTrue("fromEncoding" in msg)
|
||||
self.assertTrue("from_encoding" in msg)
|
||||
self.assertEqual("utf8", soup.original_encoding)
|
||||
|
||||
def test_unrecognized_keyword_argument(self):
|
||||
self.assertRaises(
|
||||
TypeError, self.soup, "<a>", no_such_argument=True)
|
||||
|
||||
class TestWarnings(SoupTest):
|
||||
|
||||
def test_disk_file_warning(self):
|
||||
filehandle = tempfile.NamedTemporaryFile()
|
||||
filename = filehandle.name
|
||||
try:
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup(filename)
|
||||
msg = str(w[0].message)
|
||||
self.assertTrue("looks like a filename" in msg)
|
||||
finally:
|
||||
filehandle.close()
|
||||
|
||||
# The file no longer exists, so Beautiful Soup will no longer issue the warning.
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup(filename)
|
||||
self.assertEqual(0, len(w))
|
||||
|
||||
def test_url_warning(self):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup("http://www.crummy.com/")
|
||||
msg = str(w[0].message)
|
||||
self.assertTrue("looks like a URL" in msg)
|
||||
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup("http://www.crummy.com/ is great")
|
||||
self.assertEqual(0, len(w))
|
||||
|
||||
class TestSelectiveParsing(SoupTest):
|
||||
|
||||
def test_parse_with_soupstrainer(self):
|
||||
markup = "No<b>Yes</b><a>No<b>Yes <c>Yes</c></b>"
|
||||
strainer = SoupStrainer("b")
|
||||
soup = self.soup(markup, parse_only=strainer)
|
||||
self.assertEqual(soup.encode(), b"<b>Yes</b><b>Yes <c>Yes</c></b>")
|
||||
|
||||
|
||||
class TestEntitySubstitution(unittest.TestCase):
|
||||
"""Standalone tests of the EntitySubstitution class."""
|
||||
def setUp(self):
|
||||
self.sub = EntitySubstitution
|
||||
|
||||
def test_simple_html_substitution(self):
|
||||
# Unicode characters corresponding to named HTML entites
|
||||
# are substituted, and no others.
|
||||
s = "foo\u2200\N{SNOWMAN}\u00f5bar"
|
||||
self.assertEqual(self.sub.substitute_html(s),
|
||||
"foo∀\N{SNOWMAN}õbar")
|
||||
|
||||
def test_smart_quote_substitution(self):
|
||||
# MS smart quotes are a common source of frustration, so we
|
||||
# give them a special test.
|
||||
quotes = b"\x91\x92foo\x93\x94"
|
||||
dammit = UnicodeDammit(quotes)
|
||||
self.assertEqual(self.sub.substitute_html(dammit.markup),
|
||||
"‘’foo“”")
|
||||
|
||||
def test_xml_converstion_includes_no_quotes_if_make_quoted_attribute_is_false(self):
|
||||
s = 'Welcome to "my bar"'
|
||||
self.assertEqual(self.sub.substitute_xml(s, False), s)
|
||||
|
||||
def test_xml_attribute_quoting_normally_uses_double_quotes(self):
|
||||
self.assertEqual(self.sub.substitute_xml("Welcome", True),
|
||||
'"Welcome"')
|
||||
self.assertEqual(self.sub.substitute_xml("Bob's Bar", True),
|
||||
'"Bob\'s Bar"')
|
||||
|
||||
def test_xml_attribute_quoting_uses_single_quotes_when_value_contains_double_quotes(self):
|
||||
s = 'Welcome to "my bar"'
|
||||
self.assertEqual(self.sub.substitute_xml(s, True),
|
||||
"'Welcome to \"my bar\"'")
|
||||
|
||||
def test_xml_attribute_quoting_escapes_single_quotes_when_value_contains_both_single_and_double_quotes(self):
|
||||
s = 'Welcome to "Bob\'s Bar"'
|
||||
self.assertEqual(
|
||||
self.sub.substitute_xml(s, True),
|
||||
'"Welcome to "Bob\'s Bar""')
|
||||
|
||||
def test_xml_quotes_arent_escaped_when_value_is_not_being_quoted(self):
|
||||
quoted = 'Welcome to "Bob\'s Bar"'
|
||||
self.assertEqual(self.sub.substitute_xml(quoted), quoted)
|
||||
|
||||
def test_xml_quoting_handles_angle_brackets(self):
|
||||
self.assertEqual(
|
||||
self.sub.substitute_xml("foo<bar>"),
|
||||
"foo<bar>")
|
||||
|
||||
def test_xml_quoting_handles_ampersands(self):
|
||||
self.assertEqual(self.sub.substitute_xml("AT&T"), "AT&T")
|
||||
|
||||
def test_xml_quoting_including_ampersands_when_they_are_part_of_an_entity(self):
|
||||
self.assertEqual(
|
||||
self.sub.substitute_xml("ÁT&T"),
|
||||
"&Aacute;T&T")
|
||||
|
||||
def test_xml_quoting_ignoring_ampersands_when_they_are_part_of_an_entity(self):
|
||||
self.assertEqual(
|
||||
self.sub.substitute_xml_containing_entities("ÁT&T"),
|
||||
"ÁT&T")
|
||||
|
||||
def test_quotes_not_html_substituted(self):
|
||||
"""There's no need to do this except inside attribute values."""
|
||||
text = 'Bob\'s "bar"'
|
||||
self.assertEqual(self.sub.substitute_html(text), text)
|
||||
|
||||
|
||||
class TestEncodingConversion(SoupTest):
|
||||
# Test Beautiful Soup's ability to decode and encode from various
|
||||
# encodings.
|
||||
|
||||
def setUp(self):
|
||||
super(TestEncodingConversion, self).setUp()
|
||||
self.unicode_data = '<html><head><meta charset="utf-8"/></head><body><foo>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</foo></body></html>'
|
||||
self.utf8_data = self.unicode_data.encode("utf-8")
|
||||
# Just so you know what it looks like.
|
||||
self.assertEqual(
|
||||
self.utf8_data,
|
||||
b'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\xc3\xa9 bleu!</foo></body></html>')
|
||||
|
||||
def test_ascii_in_unicode_out(self):
|
||||
# ASCII input is converted to Unicode. The original_encoding
|
||||
# attribute is set to 'utf-8', a superset of ASCII.
|
||||
chardet = bs4.dammit.chardet_dammit
|
||||
logging.disable(logging.WARNING)
|
||||
try:
|
||||
def noop(str):
|
||||
return None
|
||||
# Disable chardet, which will realize that the ASCII is ASCII.
|
||||
bs4.dammit.chardet_dammit = noop
|
||||
ascii = b"<foo>a</foo>"
|
||||
soup_from_ascii = self.soup(ascii)
|
||||
unicode_output = soup_from_ascii.decode()
|
||||
self.assertTrue(isinstance(unicode_output, str))
|
||||
self.assertEqual(unicode_output, self.document_for(ascii.decode()))
|
||||
self.assertEqual(soup_from_ascii.original_encoding.lower(), "utf-8")
|
||||
finally:
|
||||
logging.disable(logging.NOTSET)
|
||||
bs4.dammit.chardet_dammit = chardet
|
||||
|
||||
def test_unicode_in_unicode_out(self):
|
||||
# Unicode input is left alone. The original_encoding attribute
|
||||
# is not set.
|
||||
soup_from_unicode = self.soup(self.unicode_data)
|
||||
self.assertEqual(soup_from_unicode.decode(), self.unicode_data)
|
||||
self.assertEqual(soup_from_unicode.foo.string, 'Sacr\xe9 bleu!')
|
||||
self.assertEqual(soup_from_unicode.original_encoding, None)
|
||||
|
||||
def test_utf8_in_unicode_out(self):
|
||||
# UTF-8 input is converted to Unicode. The original_encoding
|
||||
# attribute is set.
|
||||
soup_from_utf8 = self.soup(self.utf8_data)
|
||||
self.assertEqual(soup_from_utf8.decode(), self.unicode_data)
|
||||
self.assertEqual(soup_from_utf8.foo.string, 'Sacr\xe9 bleu!')
|
||||
|
||||
def test_utf8_out(self):
|
||||
# The internal data structures can be encoded as UTF-8.
|
||||
soup_from_unicode = self.soup(self.unicode_data)
|
||||
self.assertEqual(soup_from_unicode.encode('utf-8'), self.utf8_data)
|
||||
|
||||
@skipIf(
|
||||
PYTHON_2_PRE_2_7 or PYTHON_3_PRE_3_2,
|
||||
"Bad HTMLParser detected; skipping test of non-ASCII characters in attribute name.")
|
||||
def test_attribute_name_containing_unicode_characters(self):
|
||||
markup = '<div><a \N{SNOWMAN}="snowman"></a></div>'
|
||||
self.assertEqual(self.soup(markup).div.encode("utf8"), markup.encode("utf8"))
|
||||
|
||||
class TestUnicodeDammit(unittest.TestCase):
|
||||
"""Standalone tests of UnicodeDammit."""
|
||||
|
||||
def test_unicode_input(self):
|
||||
markup = "I'm already Unicode! \N{SNOWMAN}"
|
||||
dammit = UnicodeDammit(markup)
|
||||
self.assertEqual(dammit.unicode_markup, markup)
|
||||
|
||||
def test_smart_quotes_to_unicode(self):
|
||||
markup = b"<foo>\x91\x92\x93\x94</foo>"
|
||||
dammit = UnicodeDammit(markup)
|
||||
self.assertEqual(
|
||||
dammit.unicode_markup, "<foo>\u2018\u2019\u201c\u201d</foo>")
|
||||
|
||||
def test_smart_quotes_to_xml_entities(self):
|
||||
markup = b"<foo>\x91\x92\x93\x94</foo>"
|
||||
dammit = UnicodeDammit(markup, smart_quotes_to="xml")
|
||||
self.assertEqual(
|
||||
dammit.unicode_markup, "<foo>‘’“”</foo>")
|
||||
|
||||
def test_smart_quotes_to_html_entities(self):
|
||||
markup = b"<foo>\x91\x92\x93\x94</foo>"
|
||||
dammit = UnicodeDammit(markup, smart_quotes_to="html")
|
||||
self.assertEqual(
|
||||
dammit.unicode_markup, "<foo>‘’“”</foo>")
|
||||
|
||||
def test_smart_quotes_to_ascii(self):
|
||||
markup = b"<foo>\x91\x92\x93\x94</foo>"
|
||||
dammit = UnicodeDammit(markup, smart_quotes_to="ascii")
|
||||
self.assertEqual(
|
||||
dammit.unicode_markup, """<foo>''""</foo>""")
|
||||
|
||||
def test_detect_utf8(self):
|
||||
utf8 = b"Sacr\xc3\xa9 bleu! \xe2\x98\x83"
|
||||
dammit = UnicodeDammit(utf8)
|
||||
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
|
||||
self.assertEqual(dammit.unicode_markup, 'Sacr\xe9 bleu! \N{SNOWMAN}')
|
||||
|
||||
|
||||
def test_convert_hebrew(self):
|
||||
hebrew = b"\xed\xe5\xec\xf9"
|
||||
dammit = UnicodeDammit(hebrew, ["iso-8859-8"])
|
||||
self.assertEqual(dammit.original_encoding.lower(), 'iso-8859-8')
|
||||
self.assertEqual(dammit.unicode_markup, '\u05dd\u05d5\u05dc\u05e9')
|
||||
|
||||
def test_dont_see_smart_quotes_where_there_are_none(self):
|
||||
utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch"
|
||||
dammit = UnicodeDammit(utf_8)
|
||||
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
|
||||
self.assertEqual(dammit.unicode_markup.encode("utf-8"), utf_8)
|
||||
|
||||
def test_ignore_inappropriate_codecs(self):
|
||||
utf8_data = "Räksmörgås".encode("utf-8")
|
||||
dammit = UnicodeDammit(utf8_data, ["iso-8859-8"])
|
||||
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
|
||||
|
||||
def test_ignore_invalid_codecs(self):
|
||||
utf8_data = "Räksmörgås".encode("utf-8")
|
||||
for bad_encoding in ['.utf8', '...', 'utF---16.!']:
|
||||
dammit = UnicodeDammit(utf8_data, [bad_encoding])
|
||||
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
|
||||
|
||||
def test_exclude_encodings(self):
|
||||
# This is UTF-8.
|
||||
utf8_data = "Räksmörgås".encode("utf-8")
|
||||
|
||||
# But if we exclude UTF-8 from consideration, the guess is
|
||||
# Windows-1252.
|
||||
dammit = UnicodeDammit(utf8_data, exclude_encodings=["utf-8"])
|
||||
self.assertEqual(dammit.original_encoding.lower(), 'windows-1252')
|
||||
|
||||
# And if we exclude that, there is no valid guess at all.
|
||||
dammit = UnicodeDammit(
|
||||
utf8_data, exclude_encodings=["utf-8", "windows-1252"])
|
||||
self.assertEqual(dammit.original_encoding, None)
|
||||
|
||||
def test_encoding_detector_replaces_junk_in_encoding_name_with_replacement_character(self):
|
||||
detected = EncodingDetector(
|
||||
b'<?xml version="1.0" encoding="UTF-\xdb" ?>')
|
||||
encodings = list(detected.encodings)
|
||||
assert 'utf-\N{REPLACEMENT CHARACTER}' in encodings
|
||||
|
||||
def test_detect_html5_style_meta_tag(self):
|
||||
|
||||
for data in (
|
||||
b'<html><meta charset="euc-jp" /></html>',
|
||||
b"<html><meta charset='euc-jp' /></html>",
|
||||
b"<html><meta charset=euc-jp /></html>",
|
||||
b"<html><meta charset=euc-jp/></html>"):
|
||||
dammit = UnicodeDammit(data, is_html=True)
|
||||
self.assertEqual(
|
||||
"euc-jp", dammit.original_encoding)
|
||||
|
||||
def test_last_ditch_entity_replacement(self):
|
||||
# This is a UTF-8 document that contains bytestrings
|
||||
# completely incompatible with UTF-8 (ie. encoded with some other
|
||||
# encoding).
|
||||
#
|
||||
# Since there is no consistent encoding for the document,
|
||||
# Unicode, Dammit will eventually encode the document as UTF-8
|
||||
# and encode the incompatible characters as REPLACEMENT
|
||||
# CHARACTER.
|
||||
#
|
||||
# If chardet is installed, it will detect that the document
|
||||
# can be converted into ISO-8859-1 without errors. This happens
|
||||
# to be the wrong encoding, but it is a consistent encoding, so the
|
||||
# code we're testing here won't run.
|
||||
#
|
||||
# So we temporarily disable chardet if it's present.
|
||||
doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?>
|
||||
<html><b>\330\250\330\252\330\261</b>
|
||||
<i>\310\322\321\220\312\321\355\344</i></html>"""
|
||||
chardet = bs4.dammit.chardet_dammit
|
||||
logging.disable(logging.WARNING)
|
||||
try:
|
||||
def noop(str):
|
||||
return None
|
||||
bs4.dammit.chardet_dammit = noop
|
||||
dammit = UnicodeDammit(doc)
|
||||
self.assertEqual(True, dammit.contains_replacement_characters)
|
||||
self.assertTrue("\ufffd" in dammit.unicode_markup)
|
||||
|
||||
soup = BeautifulSoup(doc, "html.parser")
|
||||
self.assertTrue(soup.contains_replacement_characters)
|
||||
finally:
|
||||
logging.disable(logging.NOTSET)
|
||||
bs4.dammit.chardet_dammit = chardet
|
||||
|
||||
def test_byte_order_mark_removed(self):
|
||||
# A document written in UTF-16LE will have its byte order marker stripped.
|
||||
data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
|
||||
dammit = UnicodeDammit(data)
|
||||
self.assertEqual("<a>áé</a>", dammit.unicode_markup)
|
||||
self.assertEqual("utf-16le", dammit.original_encoding)
|
||||
|
||||
def test_detwingle(self):
|
||||
# Here's a UTF8 document.
|
||||
utf8 = ("\N{SNOWMAN}" * 3).encode("utf8")
|
||||
|
||||
# Here's a Windows-1252 document.
|
||||
windows_1252 = (
|
||||
"\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!"
|
||||
"\N{RIGHT DOUBLE QUOTATION MARK}").encode("windows_1252")
|
||||
|
||||
# Through some unholy alchemy, they've been stuck together.
|
||||
doc = utf8 + windows_1252 + utf8
|
||||
|
||||
# The document can't be turned into UTF-8:
|
||||
self.assertRaises(UnicodeDecodeError, doc.decode, "utf8")
|
||||
|
||||
# Unicode, Dammit thinks the whole document is Windows-1252,
|
||||
# and decodes it into "☃☃☃“Hi, I like Windows!”☃☃☃"
|
||||
|
||||
# But if we run it through fix_embedded_windows_1252, it's fixed:
|
||||
|
||||
fixed = UnicodeDammit.detwingle(doc)
|
||||
self.assertEqual(
|
||||
"☃☃☃“Hi, I like Windows!”☃☃☃", fixed.decode("utf8"))
|
||||
|
||||
def test_detwingle_ignores_multibyte_characters(self):
|
||||
# Each of these characters has a UTF-8 representation ending
|
||||
# in \x93. \x93 is a smart quote if interpreted as
|
||||
# Windows-1252. But our code knows to skip over multibyte
|
||||
# UTF-8 characters, so they'll survive the process unscathed.
|
||||
for tricky_unicode_char in (
|
||||
"\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93'
|
||||
"\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93'
|
||||
"\xf0\x90\x90\x93", # This is a CJK character, not sure which one.
|
||||
):
|
||||
input = tricky_unicode_char.encode("utf8")
|
||||
self.assertTrue(input.endswith(b'\x93'))
|
||||
output = UnicodeDammit.detwingle(input)
|
||||
self.assertEqual(output, input)
|
||||
|
||||
class TestNamedspacedAttribute(SoupTest):
|
||||
|
||||
def test_name_may_be_none(self):
|
||||
a = NamespacedAttribute("xmlns", None)
|
||||
self.assertEqual(a, "xmlns")
|
||||
|
||||
def test_attribute_is_equivalent_to_colon_separated_string(self):
|
||||
a = NamespacedAttribute("a", "b")
|
||||
self.assertEqual("a:b", a)
|
||||
|
||||
def test_attributes_are_equivalent_if_prefix_and_name_identical(self):
|
||||
a = NamespacedAttribute("a", "b", "c")
|
||||
b = NamespacedAttribute("a", "b", "c")
|
||||
self.assertEqual(a, b)
|
||||
|
||||
# The actual namespace is not considered.
|
||||
c = NamespacedAttribute("a", "b", None)
|
||||
self.assertEqual(a, c)
|
||||
|
||||
# But name and prefix are important.
|
||||
d = NamespacedAttribute("a", "z", "c")
|
||||
self.assertNotEqual(a, d)
|
||||
|
||||
e = NamespacedAttribute("z", "b", "c")
|
||||
self.assertNotEqual(a, e)
|
||||
|
||||
|
||||
class TestAttributeValueWithCharsetSubstitution(unittest.TestCase):
|
||||
|
||||
def test_content_meta_attribute_value(self):
|
||||
value = CharsetMetaAttributeValue("euc-jp")
|
||||
self.assertEqual("euc-jp", value)
|
||||
self.assertEqual("euc-jp", value.original_value)
|
||||
self.assertEqual("utf8", value.encode("utf8"))
|
||||
|
||||
|
||||
def test_content_meta_attribute_value(self):
|
||||
value = ContentMetaAttributeValue("text/html; charset=euc-jp")
|
||||
self.assertEqual("text/html; charset=euc-jp", value)
|
||||
self.assertEqual("text/html; charset=euc-jp", value.original_value)
|
||||
self.assertEqual("text/html; charset=utf8", value.encode("utf8"))
|
||||
2004
bitbake/lib/bs4/tests/test_tree.py
Normal file
2004
bitbake/lib/bs4/tests/test_tree.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -13,7 +13,6 @@ from bb.asyncrpc.client import parse_address, ADDR_TYPE_UNIX, ADDR_TYPE_WS
|
||||
|
||||
User = namedtuple("User", ("username", "permissions"))
|
||||
|
||||
|
||||
def create_server(
|
||||
addr,
|
||||
dbname,
|
||||
@@ -26,7 +25,6 @@ def create_server(
|
||||
anon_perms=None,
|
||||
admin_username=None,
|
||||
admin_password=None,
|
||||
reuseport=False,
|
||||
):
|
||||
def sqlite_engine():
|
||||
from .sqlite import DatabaseEngine
|
||||
@@ -62,9 +60,9 @@ def create_server(
|
||||
s.start_unix_server(*a)
|
||||
elif typ == ADDR_TYPE_WS:
|
||||
url = urlparse(a[0])
|
||||
s.start_websocket_server(url.hostname, url.port, reuseport=reuseport)
|
||||
s.start_websocket_server(url.hostname, url.port)
|
||||
else:
|
||||
s.start_tcp_server(*a, reuseport=reuseport)
|
||||
s.start_tcp_server(*a)
|
||||
|
||||
return s
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
|
||||
import logging
|
||||
import socket
|
||||
import asyncio
|
||||
import bb.asyncrpc
|
||||
import json
|
||||
from . import create_async_client
|
||||
@@ -14,66 +13,6 @@ from . import create_async_client
|
||||
logger = logging.getLogger("hashserv.client")
|
||||
|
||||
|
||||
class Batch(object):
|
||||
def __init__(self):
|
||||
self.done = False
|
||||
self.cond = asyncio.Condition()
|
||||
self.pending = []
|
||||
self.results = []
|
||||
self.sent_count = 0
|
||||
|
||||
async def recv(self, socket):
|
||||
while True:
|
||||
async with self.cond:
|
||||
await self.cond.wait_for(lambda: self.pending or self.done)
|
||||
|
||||
if not self.pending:
|
||||
if self.done:
|
||||
return
|
||||
continue
|
||||
|
||||
r = await socket.recv()
|
||||
self.results.append(r)
|
||||
|
||||
async with self.cond:
|
||||
self.pending.pop(0)
|
||||
|
||||
async def send(self, socket, msgs):
|
||||
try:
|
||||
# In the event of a restart due to a reconnect, all in-flight
|
||||
# messages need to be resent first to keep to result count in sync
|
||||
for m in self.pending:
|
||||
await socket.send(m)
|
||||
|
||||
for m in msgs:
|
||||
# Add the message to the pending list before attempting to send
|
||||
# it so that if the send fails it will be retried
|
||||
async with self.cond:
|
||||
self.pending.append(m)
|
||||
self.cond.notify()
|
||||
self.sent_count += 1
|
||||
|
||||
await socket.send(m)
|
||||
|
||||
finally:
|
||||
async with self.cond:
|
||||
self.done = True
|
||||
self.cond.notify()
|
||||
|
||||
async def process(self, socket, msgs):
|
||||
await asyncio.gather(
|
||||
self.recv(socket),
|
||||
self.send(socket, msgs),
|
||||
)
|
||||
|
||||
if len(self.results) != self.sent_count:
|
||||
raise ValueError(
|
||||
f"Expected result count {len(self.results)}. Expected {self.sent_count}"
|
||||
)
|
||||
|
||||
return self.results
|
||||
|
||||
|
||||
class AsyncClient(bb.asyncrpc.AsyncClient):
|
||||
MODE_NORMAL = 0
|
||||
MODE_GET_STREAM = 1
|
||||
@@ -97,52 +36,32 @@ class AsyncClient(bb.asyncrpc.AsyncClient):
|
||||
if become:
|
||||
await self.become_user(become)
|
||||
|
||||
async def send_stream_batch(self, mode, msgs):
|
||||
"""
|
||||
Does a "batch" process of stream messages. This sends the query
|
||||
messages as fast as possible, and simultaneously attempts to read the
|
||||
messages back. This helps to mitigate the effects of latency to the
|
||||
hash equivalence server be allowing multiple queries to be "in-flight"
|
||||
at once
|
||||
|
||||
The implementation does more complicated tracking using a count of sent
|
||||
messages so that `msgs` can be a generator function (i.e. its length is
|
||||
unknown)
|
||||
|
||||
"""
|
||||
|
||||
b = Batch()
|
||||
|
||||
async def send_stream(self, mode, msg):
|
||||
async def proc():
|
||||
nonlocal b
|
||||
|
||||
await self._set_mode(mode)
|
||||
return await b.process(self.socket, msgs)
|
||||
await self.socket.send(msg)
|
||||
return await self.socket.recv()
|
||||
|
||||
return await self._send_wrapper(proc)
|
||||
|
||||
async def invoke(self, *args, skip_mode=False, **kwargs):
|
||||
async def invoke(self, *args, **kwargs):
|
||||
# It's OK if connection errors cause a failure here, because the mode
|
||||
# is also reset to normal on a new connection
|
||||
if not skip_mode:
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
await self._set_mode(self.MODE_NORMAL)
|
||||
return await super().invoke(*args, **kwargs)
|
||||
|
||||
async def _set_mode(self, new_mode):
|
||||
async def stream_to_normal():
|
||||
# Check if already in normal mode (e.g. due to a connection reset)
|
||||
if self.mode == self.MODE_NORMAL:
|
||||
return "ok"
|
||||
await self.socket.send("END")
|
||||
return await self.socket.recv()
|
||||
|
||||
async def normal_to_stream(command):
|
||||
r = await self.invoke({command: None}, skip_mode=True)
|
||||
r = await self.invoke({command: None})
|
||||
if r != "ok":
|
||||
self.check_invoke_error(r)
|
||||
raise ConnectionError(
|
||||
f"Unable to transition to stream mode: Bad response from server {r!r}"
|
||||
)
|
||||
|
||||
self.logger.debug("Mode is now %s", command)
|
||||
|
||||
if new_mode == self.mode:
|
||||
@@ -170,15 +89,10 @@ class AsyncClient(bb.asyncrpc.AsyncClient):
|
||||
self.mode = new_mode
|
||||
|
||||
async def get_unihash(self, method, taskhash):
|
||||
r = await self.get_unihash_batch([(method, taskhash)])
|
||||
return r[0]
|
||||
|
||||
async def get_unihash_batch(self, args):
|
||||
result = await self.send_stream_batch(
|
||||
self.MODE_GET_STREAM,
|
||||
(f"{method} {taskhash}" for method, taskhash in args),
|
||||
)
|
||||
return [r if r else None for r in result]
|
||||
r = await self.send_stream(self.MODE_GET_STREAM, "%s %s" % (method, taskhash))
|
||||
if not r:
|
||||
return None
|
||||
return r
|
||||
|
||||
async def report_unihash(self, taskhash, method, outhash, unihash, extra={}):
|
||||
m = extra.copy()
|
||||
@@ -201,12 +115,8 @@ class AsyncClient(bb.asyncrpc.AsyncClient):
|
||||
)
|
||||
|
||||
async def unihash_exists(self, unihash):
|
||||
r = await self.unihash_exists_batch([unihash])
|
||||
return r[0]
|
||||
|
||||
async def unihash_exists_batch(self, unihashes):
|
||||
result = await self.send_stream_batch(self.MODE_EXIST_STREAM, unihashes)
|
||||
return [r == "true" for r in result]
|
||||
r = await self.send_stream(self.MODE_EXIST_STREAM, unihash)
|
||||
return r == "true"
|
||||
|
||||
async def get_outhash(self, method, outhash, taskhash, with_unihash=True):
|
||||
return await self.invoke(
|
||||
@@ -327,12 +237,10 @@ class Client(bb.asyncrpc.Client):
|
||||
"connect_tcp",
|
||||
"connect_websocket",
|
||||
"get_unihash",
|
||||
"get_unihash_batch",
|
||||
"report_unihash",
|
||||
"report_unihash_equiv",
|
||||
"get_taskhash",
|
||||
"unihash_exists",
|
||||
"unihash_exists_batch",
|
||||
"get_outhash",
|
||||
"get_stats",
|
||||
"reset_stats",
|
||||
@@ -356,3 +264,83 @@ class Client(bb.asyncrpc.Client):
|
||||
|
||||
def _get_async_client(self):
|
||||
return AsyncClient(self.username, self.password)
|
||||
|
||||
|
||||
class ClientPool(bb.asyncrpc.ClientPool):
|
||||
def __init__(
|
||||
self,
|
||||
address,
|
||||
max_clients,
|
||||
*,
|
||||
username=None,
|
||||
password=None,
|
||||
become=None,
|
||||
):
|
||||
super().__init__(max_clients)
|
||||
self.address = address
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.become = become
|
||||
|
||||
async def _new_client(self):
|
||||
client = await create_async_client(
|
||||
self.address,
|
||||
username=self.username,
|
||||
password=self.password,
|
||||
)
|
||||
if self.become:
|
||||
await client.become_user(self.become)
|
||||
return client
|
||||
|
||||
def _run_key_tasks(self, queries, call):
|
||||
results = {key: None for key in queries.keys()}
|
||||
|
||||
def make_task(key, args):
|
||||
async def task(client):
|
||||
nonlocal results
|
||||
unihash = await call(client, args)
|
||||
results[key] = unihash
|
||||
|
||||
return task
|
||||
|
||||
def gen_tasks():
|
||||
for key, args in queries.items():
|
||||
yield make_task(key, args)
|
||||
|
||||
self.run_tasks(gen_tasks())
|
||||
return results
|
||||
|
||||
def get_unihashes(self, queries):
|
||||
"""
|
||||
Query multiple unihashes in parallel.
|
||||
|
||||
The queries argument is a dictionary with arbitrary key. The values
|
||||
must be a tuple of (method, taskhash).
|
||||
|
||||
Returns a dictionary with a corresponding key for each input key, and
|
||||
the value is the queried unihash (which might be none if the query
|
||||
failed)
|
||||
"""
|
||||
|
||||
async def call(client, args):
|
||||
method, taskhash = args
|
||||
return await client.get_unihash(method, taskhash)
|
||||
|
||||
return self._run_key_tasks(queries, call)
|
||||
|
||||
def unihashes_exist(self, queries):
|
||||
"""
|
||||
Query multiple unihash existence checks in parallel.
|
||||
|
||||
The queries argument is a dictionary with arbitrary key. The values
|
||||
must be a unihash.
|
||||
|
||||
Returns a dictionary with a corresponding key for each input key, and
|
||||
the value is True or False if the unihash is known by the server (or
|
||||
None if there was a failure)
|
||||
"""
|
||||
|
||||
async def call(client, unihash):
|
||||
return await client.unihash_exists(unihash)
|
||||
|
||||
return self._run_key_tasks(queries, call)
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
from . import create_server, create_client
|
||||
from .server import DEFAULT_ANON_PERMS, ALL_PERMISSIONS
|
||||
from bb.asyncrpc import InvokeError
|
||||
from .client import ClientPool
|
||||
import hashlib
|
||||
import logging
|
||||
import multiprocessing
|
||||
@@ -93,6 +94,9 @@ class HashEquivalenceTestSetup(object):
|
||||
return self.start_client(self.auth_server_address, user["username"], user["token"])
|
||||
|
||||
def setUp(self):
|
||||
if sys.version_info < (3, 5, 0):
|
||||
self.skipTest('Python 3.5 or later required')
|
||||
|
||||
self.temp_dir = tempfile.TemporaryDirectory(prefix='bb-hashserv')
|
||||
self.addCleanup(self.temp_dir.cleanup)
|
||||
|
||||
@@ -551,7 +555,8 @@ class HashEquivalenceCommonTests(object):
|
||||
# shares a taskhash with Task 2
|
||||
self.assertClientGetHash(self.client, taskhash2, unihash2)
|
||||
|
||||
def test_get_unihash_batch(self):
|
||||
|
||||
def test_client_pool_get_unihashes(self):
|
||||
TEST_INPUT = (
|
||||
# taskhash outhash unihash
|
||||
('8aa96fcffb5831b3c2c0cb75f0431e3f8b20554a', 'afe240a439959ce86f5e322f8c208e1fedefea9e813f2140c81af866cc9edf7e','218e57509998197d570e2c98512d0105985dffc9'),
|
||||
@@ -568,27 +573,28 @@ class HashEquivalenceCommonTests(object):
|
||||
"6b6be7a84ab179b4240c4302518dc3f6",
|
||||
)
|
||||
|
||||
for taskhash, outhash, unihash in TEST_INPUT:
|
||||
self.client.report_unihash(taskhash, self.METHOD, outhash, unihash)
|
||||
with ClientPool(self.server_address, 10) as client_pool:
|
||||
for taskhash, outhash, unihash in TEST_INPUT:
|
||||
self.client.report_unihash(taskhash, self.METHOD, outhash, unihash)
|
||||
|
||||
query = {idx: (self.METHOD, data[0]) for idx, data in enumerate(TEST_INPUT)}
|
||||
for idx, taskhash in enumerate(EXTRA_QUERIES):
|
||||
query[idx + len(TEST_INPUT)] = (self.METHOD, taskhash)
|
||||
|
||||
result = self.client.get_unihash_batch(
|
||||
[(self.METHOD, data[0]) for data in TEST_INPUT] +
|
||||
[(self.METHOD, e) for e in EXTRA_QUERIES]
|
||||
)
|
||||
result = client_pool.get_unihashes(query)
|
||||
|
||||
self.assertListEqual(result, [
|
||||
"218e57509998197d570e2c98512d0105985dffc9",
|
||||
"218e57509998197d570e2c98512d0105985dffc9",
|
||||
"218e57509998197d570e2c98512d0105985dffc9",
|
||||
"3b5d3d83f07f259e9086fcb422c855286e18a57d",
|
||||
"f46d3fbb439bd9b921095da657a4de906510d2cd",
|
||||
"f46d3fbb439bd9b921095da657a4de906510d2cd",
|
||||
"05d2a63c81e32f0a36542ca677e8ad852365c538",
|
||||
None,
|
||||
])
|
||||
self.assertDictEqual(result, {
|
||||
0: "218e57509998197d570e2c98512d0105985dffc9",
|
||||
1: "218e57509998197d570e2c98512d0105985dffc9",
|
||||
2: "218e57509998197d570e2c98512d0105985dffc9",
|
||||
3: "3b5d3d83f07f259e9086fcb422c855286e18a57d",
|
||||
4: "f46d3fbb439bd9b921095da657a4de906510d2cd",
|
||||
5: "f46d3fbb439bd9b921095da657a4de906510d2cd",
|
||||
6: "05d2a63c81e32f0a36542ca677e8ad852365c538",
|
||||
7: None,
|
||||
})
|
||||
|
||||
def test_unihash_exists_batch(self):
|
||||
def test_client_pool_unihash_exists(self):
|
||||
TEST_INPUT = (
|
||||
# taskhash outhash unihash
|
||||
('8aa96fcffb5831b3c2c0cb75f0431e3f8b20554a', 'afe240a439959ce86f5e322f8c208e1fedefea9e813f2140c81af866cc9edf7e','218e57509998197d570e2c98512d0105985dffc9'),
|
||||
@@ -608,24 +614,28 @@ class HashEquivalenceCommonTests(object):
|
||||
result_unihashes = set()
|
||||
|
||||
|
||||
for taskhash, outhash, unihash in TEST_INPUT:
|
||||
result = self.client.report_unihash(taskhash, self.METHOD, outhash, unihash)
|
||||
result_unihashes.add(result["unihash"])
|
||||
with ClientPool(self.server_address, 10) as client_pool:
|
||||
for taskhash, outhash, unihash in TEST_INPUT:
|
||||
result = self.client.report_unihash(taskhash, self.METHOD, outhash, unihash)
|
||||
result_unihashes.add(result["unihash"])
|
||||
|
||||
query = []
|
||||
expected = []
|
||||
query = {}
|
||||
expected = {}
|
||||
|
||||
for _, _, unihash in TEST_INPUT:
|
||||
query.append(unihash)
|
||||
expected.append(unihash in result_unihashes)
|
||||
for _, _, unihash in TEST_INPUT:
|
||||
idx = len(query)
|
||||
query[idx] = unihash
|
||||
expected[idx] = unihash in result_unihashes
|
||||
|
||||
|
||||
for unihash in EXTRA_QUERIES:
|
||||
query.append(unihash)
|
||||
expected.append(False)
|
||||
for unihash in EXTRA_QUERIES:
|
||||
idx = len(query)
|
||||
query[idx] = unihash
|
||||
expected[idx] = False
|
||||
|
||||
result = client_pool.unihashes_exist(query)
|
||||
self.assertDictEqual(result, expected)
|
||||
|
||||
result = self.client.unihash_exists_batch(query)
|
||||
self.assertListEqual(result, expected)
|
||||
|
||||
def test_auth_read_perms(self):
|
||||
admin_client = self.start_auth_server()
|
||||
|
||||
@@ -4,92 +4,17 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
__version__ = "1.0.0"
|
||||
|
||||
__version__ = "2.0.0"
|
||||
import os, time
|
||||
import sys, logging
|
||||
|
||||
import logging
|
||||
logger = logging.getLogger("BitBake.PRserv")
|
||||
def init_logger(logfile, loglevel):
|
||||
numeric_level = getattr(logging, loglevel.upper(), None)
|
||||
if not isinstance(numeric_level, int):
|
||||
raise ValueError("Invalid log level: %s" % loglevel)
|
||||
FORMAT = "%(asctime)-15s %(message)s"
|
||||
logging.basicConfig(level=numeric_level, filename=logfile, format=FORMAT)
|
||||
|
||||
from bb.asyncrpc.client import parse_address, ADDR_TYPE_UNIX, ADDR_TYPE_WS
|
||||
|
||||
def create_server(addr, dbpath, upstream=None, read_only=False):
|
||||
from . import serv
|
||||
|
||||
s = serv.PRServer(dbpath, upstream=upstream, read_only=read_only)
|
||||
host, port = addr.split(":")
|
||||
s.start_tcp_server(host, int(port))
|
||||
|
||||
return s
|
||||
|
||||
def increase_revision(ver):
|
||||
"""Take a revision string such as "1" or "1.2.3" or even a number and increase its last number
|
||||
This fails if the last number is not an integer"""
|
||||
|
||||
fields=str(ver).split('.')
|
||||
last = fields[-1]
|
||||
|
||||
try:
|
||||
val = int(last)
|
||||
except Exception as e:
|
||||
logger.critical("Unable to increase revision value %s: %s" % (ver, e))
|
||||
raise e
|
||||
|
||||
return ".".join(fields[0:-1] + [ str(val + 1) ])
|
||||
|
||||
def _revision_greater_or_equal(rev1, rev2):
|
||||
"""Compares x.y.z revision numbers, using integer comparison
|
||||
Returns True if rev1 is greater or equal to rev2"""
|
||||
|
||||
fields1 = rev1.split(".")
|
||||
fields2 = rev2.split(".")
|
||||
l1 = len(fields1)
|
||||
l2 = len(fields2)
|
||||
|
||||
for i in range(l1):
|
||||
val1 = int(fields1[i])
|
||||
if i < l2:
|
||||
val2 = int(fields2[i])
|
||||
if val2 < val1:
|
||||
return True
|
||||
elif val2 > val1:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
return True
|
||||
|
||||
def revision_smaller(rev1, rev2):
|
||||
"""Compares x.y.z revision numbers, using integer comparison
|
||||
Returns True if rev1 is strictly smaller than rev2"""
|
||||
return not(_revision_greater_or_equal(rev1, rev2))
|
||||
|
||||
def revision_greater(rev1, rev2):
|
||||
"""Compares x.y.z revision numbers, using integer comparison
|
||||
Returns True if rev1 is strictly greater than rev2"""
|
||||
return _revision_greater_or_equal(rev1, rev2) and (rev1 != rev2)
|
||||
|
||||
def create_client(addr):
|
||||
from . import client
|
||||
|
||||
c = client.PRClient()
|
||||
|
||||
try:
|
||||
(typ, a) = parse_address(addr)
|
||||
c.connect_tcp(*a)
|
||||
return c
|
||||
except Exception as e:
|
||||
c.close()
|
||||
raise e
|
||||
|
||||
async def create_async_client(addr):
|
||||
from . import client
|
||||
|
||||
c = client.PRAsyncClient()
|
||||
|
||||
try:
|
||||
(typ, a) = parse_address(addr)
|
||||
await c.connect_tcp(*a)
|
||||
return c
|
||||
|
||||
except Exception as e:
|
||||
await c.close()
|
||||
raise e
|
||||
class NotFoundError(Exception):
|
||||
pass
|
||||
|
||||
@@ -6,7 +6,6 @@
|
||||
|
||||
import logging
|
||||
import bb.asyncrpc
|
||||
from . import create_async_client
|
||||
|
||||
logger = logging.getLogger("BitBake.PRserv")
|
||||
|
||||
@@ -14,16 +13,16 @@ class PRAsyncClient(bb.asyncrpc.AsyncClient):
|
||||
def __init__(self):
|
||||
super().__init__("PRSERVICE", "1.0", logger)
|
||||
|
||||
async def getPR(self, version, pkgarch, checksum, history=False):
|
||||
async def getPR(self, version, pkgarch, checksum):
|
||||
response = await self.invoke(
|
||||
{"get-pr": {"version": version, "pkgarch": pkgarch, "checksum": checksum, "history": history}}
|
||||
{"get-pr": {"version": version, "pkgarch": pkgarch, "checksum": checksum}}
|
||||
)
|
||||
if response:
|
||||
return response["value"]
|
||||
|
||||
async def test_pr(self, version, pkgarch, checksum, history=False):
|
||||
async def test_pr(self, version, pkgarch, checksum):
|
||||
response = await self.invoke(
|
||||
{"test-pr": {"version": version, "pkgarch": pkgarch, "checksum": checksum, "history": history}}
|
||||
{"test-pr": {"version": version, "pkgarch": pkgarch, "checksum": checksum}}
|
||||
)
|
||||
if response:
|
||||
return response["value"]
|
||||
@@ -49,9 +48,9 @@ class PRAsyncClient(bb.asyncrpc.AsyncClient):
|
||||
if response:
|
||||
return response["value"]
|
||||
|
||||
async def export(self, version, pkgarch, checksum, colinfo, history=False):
|
||||
async def export(self, version, pkgarch, checksum, colinfo):
|
||||
response = await self.invoke(
|
||||
{"export": {"version": version, "pkgarch": pkgarch, "checksum": checksum, "colinfo": colinfo, "history": history}}
|
||||
{"export": {"version": version, "pkgarch": pkgarch, "checksum": checksum, "colinfo": colinfo}}
|
||||
)
|
||||
if response:
|
||||
return (response["metainfo"], response["datainfo"])
|
||||
@@ -66,7 +65,7 @@ class PRAsyncClient(bb.asyncrpc.AsyncClient):
|
||||
class PRClient(bb.asyncrpc.Client):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self._add_methods("getPR", "test_pr", "test_package", "max_package_pr", "importone", "export", "is_readonly")
|
||||
self._add_methods("getPR", "test_pr", "test_package", "importone", "export", "is_readonly")
|
||||
|
||||
def _get_async_client(self):
|
||||
return PRAsyncClient()
|
||||
|
||||
@@ -8,13 +8,19 @@ import logging
|
||||
import os.path
|
||||
import errno
|
||||
import prserv
|
||||
import sqlite3
|
||||
import time
|
||||
|
||||
from contextlib import closing
|
||||
from . import increase_revision, revision_greater, revision_smaller
|
||||
try:
|
||||
import sqlite3
|
||||
except ImportError:
|
||||
from pysqlite2 import dbapi2 as sqlite3
|
||||
|
||||
logger = logging.getLogger("BitBake.PRserv")
|
||||
|
||||
sqlversion = sqlite3.sqlite_version_info
|
||||
if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3):
|
||||
raise Exception("sqlite3 version 3.3.0 or later is required.")
|
||||
|
||||
#
|
||||
# "No History" mode - for a given query tuple (version, pkgarch, checksum),
|
||||
# the returned value will be the largest among all the values of the same
|
||||
@@ -23,232 +29,287 @@ logger = logging.getLogger("BitBake.PRserv")
|
||||
# "History" mode - Return a new higher value for previously unseen query
|
||||
# tuple (version, pkgarch, checksum), otherwise return historical value.
|
||||
# Value can decrement if returning to a previous build.
|
||||
#
|
||||
|
||||
class PRTable(object):
|
||||
def __init__(self, conn, table, read_only):
|
||||
def __init__(self, conn, table, nohist, read_only):
|
||||
self.conn = conn
|
||||
self.nohist = nohist
|
||||
self.read_only = read_only
|
||||
self.table = table
|
||||
self.dirty = False
|
||||
if nohist:
|
||||
self.table = "%s_nohist" % table
|
||||
else:
|
||||
self.table = "%s_hist" % table
|
||||
|
||||
# Creating the table even if the server is read-only.
|
||||
# This avoids a race condition if a shared database
|
||||
# is accessed by a read-only server first.
|
||||
|
||||
with closing(self.conn.cursor()) as cursor:
|
||||
cursor.execute("CREATE TABLE IF NOT EXISTS %s \
|
||||
if self.read_only:
|
||||
table_exists = self._execute(
|
||||
"SELECT count(*) FROM sqlite_master \
|
||||
WHERE type='table' AND name='%s'" % (self.table))
|
||||
if not table_exists:
|
||||
raise prserv.NotFoundError
|
||||
else:
|
||||
self._execute("CREATE TABLE IF NOT EXISTS %s \
|
||||
(version TEXT NOT NULL, \
|
||||
pkgarch TEXT NOT NULL, \
|
||||
checksum TEXT NOT NULL, \
|
||||
value TEXT, \
|
||||
PRIMARY KEY (version, pkgarch, checksum, value));" % self.table)
|
||||
value INTEGER, \
|
||||
PRIMARY KEY (version, pkgarch, checksum));" % self.table)
|
||||
|
||||
def _execute(self, *query):
|
||||
"""Execute a query, waiting to acquire a lock if necessary"""
|
||||
start = time.time()
|
||||
end = start + 20
|
||||
while True:
|
||||
try:
|
||||
return self.conn.execute(*query)
|
||||
except sqlite3.OperationalError as exc:
|
||||
if "is locked" in str(exc) and end > time.time():
|
||||
continue
|
||||
raise exc
|
||||
|
||||
def sync(self):
|
||||
if not self.read_only:
|
||||
self.conn.commit()
|
||||
self._execute("BEGIN EXCLUSIVE TRANSACTION")
|
||||
|
||||
def _extremum_value(self, rows, is_max):
|
||||
value = None
|
||||
|
||||
for row in rows:
|
||||
current_value = row[0]
|
||||
if value is None:
|
||||
value = current_value
|
||||
else:
|
||||
if is_max:
|
||||
is_new_extremum = revision_greater(current_value, value)
|
||||
else:
|
||||
is_new_extremum = revision_smaller(current_value, value)
|
||||
if is_new_extremum:
|
||||
value = current_value
|
||||
return value
|
||||
|
||||
def _max_value(self, rows):
|
||||
return self._extremum_value(rows, True)
|
||||
|
||||
def _min_value(self, rows):
|
||||
return self._extremum_value(rows, False)
|
||||
def sync_if_dirty(self):
|
||||
if self.dirty:
|
||||
self.sync()
|
||||
self.dirty = False
|
||||
|
||||
def test_package(self, version, pkgarch):
|
||||
"""Returns whether the specified package version is found in the database for the specified architecture"""
|
||||
|
||||
# Just returns the value if found or None otherwise
|
||||
with closing(self.conn.cursor()) as cursor:
|
||||
data=cursor.execute("SELECT value FROM %s WHERE version=? AND pkgarch=?;" % self.table,
|
||||
(version, pkgarch))
|
||||
row=data.fetchone()
|
||||
if row is not None:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def test_checksum_value(self, version, pkgarch, checksum, value):
|
||||
"""Returns whether the specified value is found in the database for the specified package, architecture and checksum"""
|
||||
|
||||
with closing(self.conn.cursor()) as cursor:
|
||||
data=cursor.execute("SELECT value FROM %s WHERE version=? AND pkgarch=? and checksum=? and value=?;" % self.table,
|
||||
(version, pkgarch, checksum, value))
|
||||
row=data.fetchone()
|
||||
if row is not None:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
data=self._execute("SELECT value FROM %s WHERE version=? AND pkgarch=?;" % self.table,
|
||||
(version, pkgarch))
|
||||
row=data.fetchone()
|
||||
if row is not None:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def test_value(self, version, pkgarch, value):
|
||||
"""Returns whether the specified value is found in the database for the specified package and architecture"""
|
||||
|
||||
# Just returns the value if found or None otherwise
|
||||
with closing(self.conn.cursor()) as cursor:
|
||||
data=cursor.execute("SELECT value FROM %s WHERE version=? AND pkgarch=? and value=?;" % self.table,
|
||||
(version, pkgarch, value))
|
||||
row=data.fetchone()
|
||||
if row is not None:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
data=self._execute("SELECT value FROM %s WHERE version=? AND pkgarch=? and value=?;" % self.table,
|
||||
(version, pkgarch, value))
|
||||
row=data.fetchone()
|
||||
if row is not None:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def find_package_max_value(self, version, pkgarch):
|
||||
"""Returns the greatest value for (version, pkgarch), or None if not found. Doesn't create a new value"""
|
||||
|
||||
with closing(self.conn.cursor()) as cursor:
|
||||
data = cursor.execute("SELECT value FROM %s where version=? AND pkgarch=?;" % (self.table),
|
||||
(version, pkgarch))
|
||||
rows = data.fetchall()
|
||||
value = self._max_value(rows)
|
||||
return value
|
||||
|
||||
def find_value(self, version, pkgarch, checksum, history=False):
|
||||
def find_value(self, version, pkgarch, checksum):
|
||||
"""Returns the value for the specified checksum if found or None otherwise."""
|
||||
|
||||
if history:
|
||||
return self.find_min_value(version, pkgarch, checksum)
|
||||
data=self._execute("SELECT value FROM %s WHERE version=? AND pkgarch=? AND checksum=?;" % self.table,
|
||||
(version, pkgarch, checksum))
|
||||
row=data.fetchone()
|
||||
if row is not None:
|
||||
return row[0]
|
||||
else:
|
||||
return self.find_max_value(version, pkgarch, checksum)
|
||||
return None
|
||||
|
||||
def find_max_value(self, version, pkgarch):
|
||||
"""Returns the greatest value for (version, pkgarch), or None if not found. Doesn't create a new value"""
|
||||
|
||||
def _find_extremum_value(self, version, pkgarch, checksum, is_max):
|
||||
"""Returns the maximum (if is_max is True) or minimum (if is_max is False) value
|
||||
for (version, pkgarch, checksum), or None if not found. Doesn't create a new value"""
|
||||
data = self._execute("SELECT max(value) FROM %s where version=? AND pkgarch=?;" % (self.table),
|
||||
(version, pkgarch))
|
||||
row = data.fetchone()
|
||||
if row is not None:
|
||||
return row[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
with closing(self.conn.cursor()) as cursor:
|
||||
data = cursor.execute("SELECT value FROM %s where version=? AND pkgarch=? AND checksum=?;" % (self.table),
|
||||
(version, pkgarch, checksum))
|
||||
rows = data.fetchall()
|
||||
return self._extremum_value(rows, is_max)
|
||||
def _get_value_hist(self, version, pkgarch, checksum):
|
||||
data=self._execute("SELECT value FROM %s WHERE version=? AND pkgarch=? AND checksum=?;" % self.table,
|
||||
(version, pkgarch, checksum))
|
||||
row=data.fetchone()
|
||||
if row is not None:
|
||||
return row[0]
|
||||
else:
|
||||
#no value found, try to insert
|
||||
if self.read_only:
|
||||
data = self._execute("SELECT ifnull(max(value)+1, 0) FROM %s where version=? AND pkgarch=?;" % (self.table),
|
||||
(version, pkgarch))
|
||||
row = data.fetchone()
|
||||
if row is not None:
|
||||
return row[0]
|
||||
else:
|
||||
return 0
|
||||
|
||||
def find_max_value(self, version, pkgarch, checksum):
|
||||
return self._find_extremum_value(version, pkgarch, checksum, True)
|
||||
try:
|
||||
self._execute("INSERT INTO %s VALUES (?, ?, ?, (select ifnull(max(value)+1, 0) from %s where version=? AND pkgarch=?));"
|
||||
% (self.table, self.table),
|
||||
(version, pkgarch, checksum, version, pkgarch))
|
||||
except sqlite3.IntegrityError as exc:
|
||||
logger.error(str(exc))
|
||||
|
||||
def find_min_value(self, version, pkgarch, checksum):
|
||||
return self._find_extremum_value(version, pkgarch, checksum, False)
|
||||
self.dirty = True
|
||||
|
||||
def find_new_subvalue(self, version, pkgarch, base):
|
||||
"""Take and increase the greatest "<base>.y" value for (version, pkgarch), or return "<base>.0" if not found.
|
||||
This doesn't store a new value."""
|
||||
|
||||
with closing(self.conn.cursor()) as cursor:
|
||||
data = cursor.execute("SELECT value FROM %s where version=? AND pkgarch=? AND value LIKE '%s.%%';" % (self.table, base),
|
||||
(version, pkgarch))
|
||||
rows = data.fetchall()
|
||||
value = self._max_value(rows)
|
||||
|
||||
if value is not None:
|
||||
return increase_revision(value)
|
||||
data=self._execute("SELECT value FROM %s WHERE version=? AND pkgarch=? AND checksum=?;" % self.table,
|
||||
(version, pkgarch, checksum))
|
||||
row=data.fetchone()
|
||||
if row is not None:
|
||||
return row[0]
|
||||
else:
|
||||
return base + ".0"
|
||||
raise prserv.NotFoundError
|
||||
|
||||
def store_value(self, version, pkgarch, checksum, value):
|
||||
"""Store value in the database"""
|
||||
|
||||
if not self.read_only and not self.test_checksum_value(version, pkgarch, checksum, value):
|
||||
with closing(self.conn.cursor()) as cursor:
|
||||
cursor.execute("INSERT INTO %s VALUES (?, ?, ?, ?);" % (self.table),
|
||||
(version, pkgarch, checksum, value))
|
||||
self.conn.commit()
|
||||
|
||||
def _get_value(self, version, pkgarch, checksum, history):
|
||||
|
||||
max_value = self.find_package_max_value(version, pkgarch)
|
||||
|
||||
if max_value is None:
|
||||
# version, pkgarch completely unknown. Return initial value.
|
||||
return "0"
|
||||
|
||||
value = self.find_value(version, pkgarch, checksum, history)
|
||||
|
||||
if value is None:
|
||||
# version, pkgarch found but not checksum. Create a new value from the maximum one
|
||||
return increase_revision(max_value)
|
||||
|
||||
if history:
|
||||
return value
|
||||
|
||||
# "no history" mode - If the value is not the maximum value for the package, need to increase it.
|
||||
if max_value > value:
|
||||
return increase_revision(max_value)
|
||||
def _get_value_no_hist(self, version, pkgarch, checksum):
|
||||
data=self._execute("SELECT value FROM %s \
|
||||
WHERE version=? AND pkgarch=? AND checksum=? AND \
|
||||
value >= (select max(value) from %s where version=? AND pkgarch=?);"
|
||||
% (self.table, self.table),
|
||||
(version, pkgarch, checksum, version, pkgarch))
|
||||
row=data.fetchone()
|
||||
if row is not None:
|
||||
return row[0]
|
||||
else:
|
||||
return value
|
||||
#no value found, try to insert
|
||||
if self.read_only:
|
||||
data = self._execute("SELECT ifnull(max(value)+1, 0) FROM %s where version=? AND pkgarch=?;" % (self.table),
|
||||
(version, pkgarch))
|
||||
return data.fetchone()[0]
|
||||
|
||||
def get_value(self, version, pkgarch, checksum, history):
|
||||
value = self._get_value(version, pkgarch, checksum, history)
|
||||
if not self.read_only:
|
||||
self.store_value(version, pkgarch, checksum, value)
|
||||
return value
|
||||
try:
|
||||
self._execute("INSERT OR REPLACE INTO %s VALUES (?, ?, ?, (select ifnull(max(value)+1, 0) from %s where version=? AND pkgarch=?));"
|
||||
% (self.table, self.table),
|
||||
(version, pkgarch, checksum, version, pkgarch))
|
||||
except sqlite3.IntegrityError as exc:
|
||||
logger.error(str(exc))
|
||||
self.conn.rollback()
|
||||
|
||||
self.dirty = True
|
||||
|
||||
data=self._execute("SELECT value FROM %s WHERE version=? AND pkgarch=? AND checksum=?;" % self.table,
|
||||
(version, pkgarch, checksum))
|
||||
row=data.fetchone()
|
||||
if row is not None:
|
||||
return row[0]
|
||||
else:
|
||||
raise prserv.NotFoundError
|
||||
|
||||
def get_value(self, version, pkgarch, checksum):
|
||||
if self.nohist:
|
||||
return self._get_value_no_hist(version, pkgarch, checksum)
|
||||
else:
|
||||
return self._get_value_hist(version, pkgarch, checksum)
|
||||
|
||||
def _import_hist(self, version, pkgarch, checksum, value):
|
||||
if self.read_only:
|
||||
return None
|
||||
|
||||
val = None
|
||||
data = self._execute("SELECT value FROM %s WHERE version=? AND pkgarch=? AND checksum=?;" % self.table,
|
||||
(version, pkgarch, checksum))
|
||||
row = data.fetchone()
|
||||
if row is not None:
|
||||
val=row[0]
|
||||
else:
|
||||
#no value found, try to insert
|
||||
try:
|
||||
self._execute("INSERT INTO %s VALUES (?, ?, ?, ?);" % (self.table),
|
||||
(version, pkgarch, checksum, value))
|
||||
except sqlite3.IntegrityError as exc:
|
||||
logger.error(str(exc))
|
||||
|
||||
self.dirty = True
|
||||
|
||||
data = self._execute("SELECT value FROM %s WHERE version=? AND pkgarch=? AND checksum=?;" % self.table,
|
||||
(version, pkgarch, checksum))
|
||||
row = data.fetchone()
|
||||
if row is not None:
|
||||
val = row[0]
|
||||
return val
|
||||
|
||||
def _import_no_hist(self, version, pkgarch, checksum, value):
|
||||
if self.read_only:
|
||||
return None
|
||||
|
||||
try:
|
||||
#try to insert
|
||||
self._execute("INSERT INTO %s VALUES (?, ?, ?, ?);" % (self.table),
|
||||
(version, pkgarch, checksum, value))
|
||||
except sqlite3.IntegrityError as exc:
|
||||
#already have the record, try to update
|
||||
try:
|
||||
self._execute("UPDATE %s SET value=? WHERE version=? AND pkgarch=? AND checksum=? AND value<?"
|
||||
% (self.table),
|
||||
(value, version, pkgarch, checksum, value))
|
||||
except sqlite3.IntegrityError as exc:
|
||||
logger.error(str(exc))
|
||||
|
||||
self.dirty = True
|
||||
|
||||
data = self._execute("SELECT value FROM %s WHERE version=? AND pkgarch=? AND checksum=? AND value>=?;" % self.table,
|
||||
(version, pkgarch, checksum, value))
|
||||
row=data.fetchone()
|
||||
if row is not None:
|
||||
return row[0]
|
||||
else:
|
||||
return None
|
||||
|
||||
def importone(self, version, pkgarch, checksum, value):
|
||||
self.store_value(version, pkgarch, checksum, value)
|
||||
return value
|
||||
if self.nohist:
|
||||
return self._import_no_hist(version, pkgarch, checksum, value)
|
||||
else:
|
||||
return self._import_hist(version, pkgarch, checksum, value)
|
||||
|
||||
def export(self, version, pkgarch, checksum, colinfo, history=False):
|
||||
def export(self, version, pkgarch, checksum, colinfo):
|
||||
metainfo = {}
|
||||
with closing(self.conn.cursor()) as cursor:
|
||||
#column info
|
||||
if colinfo:
|
||||
metainfo["tbl_name"] = self.table
|
||||
metainfo["core_ver"] = prserv.__version__
|
||||
metainfo["col_info"] = []
|
||||
data = cursor.execute("PRAGMA table_info(%s);" % self.table)
|
||||
for row in data:
|
||||
col = {}
|
||||
col["name"] = row["name"]
|
||||
col["type"] = row["type"]
|
||||
col["notnull"] = row["notnull"]
|
||||
col["dflt_value"] = row["dflt_value"]
|
||||
col["pk"] = row["pk"]
|
||||
metainfo["col_info"].append(col)
|
||||
|
||||
#data info
|
||||
datainfo = []
|
||||
|
||||
if history:
|
||||
sqlstmt = "SELECT * FROM %s as T1 WHERE 1=1 " % self.table
|
||||
else:
|
||||
sqlstmt = "SELECT T1.version, T1.pkgarch, T1.checksum, T1.value FROM %s as T1, \
|
||||
(SELECT version, pkgarch, max(value) as maxvalue FROM %s GROUP BY version, pkgarch) as T2 \
|
||||
WHERE T1.version=T2.version AND T1.pkgarch=T2.pkgarch AND T1.value=T2.maxvalue " % (self.table, self.table)
|
||||
sqlarg = []
|
||||
where = ""
|
||||
if version:
|
||||
where += "AND T1.version=? "
|
||||
sqlarg.append(str(version))
|
||||
if pkgarch:
|
||||
where += "AND T1.pkgarch=? "
|
||||
sqlarg.append(str(pkgarch))
|
||||
if checksum:
|
||||
where += "AND T1.checksum=? "
|
||||
sqlarg.append(str(checksum))
|
||||
|
||||
sqlstmt += where + ";"
|
||||
|
||||
if len(sqlarg):
|
||||
data = cursor.execute(sqlstmt, tuple(sqlarg))
|
||||
else:
|
||||
data = cursor.execute(sqlstmt)
|
||||
#column info
|
||||
if colinfo:
|
||||
metainfo["tbl_name"] = self.table
|
||||
metainfo["core_ver"] = prserv.__version__
|
||||
metainfo["col_info"] = []
|
||||
data = self._execute("PRAGMA table_info(%s);" % self.table)
|
||||
for row in data:
|
||||
if row["version"]:
|
||||
col = {}
|
||||
col["version"] = row["version"]
|
||||
col["pkgarch"] = row["pkgarch"]
|
||||
col["checksum"] = row["checksum"]
|
||||
col["value"] = row["value"]
|
||||
datainfo.append(col)
|
||||
col = {}
|
||||
col["name"] = row["name"]
|
||||
col["type"] = row["type"]
|
||||
col["notnull"] = row["notnull"]
|
||||
col["dflt_value"] = row["dflt_value"]
|
||||
col["pk"] = row["pk"]
|
||||
metainfo["col_info"].append(col)
|
||||
|
||||
#data info
|
||||
datainfo = []
|
||||
|
||||
if self.nohist:
|
||||
sqlstmt = "SELECT T1.version, T1.pkgarch, T1.checksum, T1.value FROM %s as T1, \
|
||||
(SELECT version, pkgarch, max(value) as maxvalue FROM %s GROUP BY version, pkgarch) as T2 \
|
||||
WHERE T1.version=T2.version AND T1.pkgarch=T2.pkgarch AND T1.value=T2.maxvalue " % (self.table, self.table)
|
||||
else:
|
||||
sqlstmt = "SELECT * FROM %s as T1 WHERE 1=1 " % self.table
|
||||
sqlarg = []
|
||||
where = ""
|
||||
if version:
|
||||
where += "AND T1.version=? "
|
||||
sqlarg.append(str(version))
|
||||
if pkgarch:
|
||||
where += "AND T1.pkgarch=? "
|
||||
sqlarg.append(str(pkgarch))
|
||||
if checksum:
|
||||
where += "AND T1.checksum=? "
|
||||
sqlarg.append(str(checksum))
|
||||
|
||||
sqlstmt += where + ";"
|
||||
|
||||
if len(sqlarg):
|
||||
data = self._execute(sqlstmt, tuple(sqlarg))
|
||||
else:
|
||||
data = self._execute(sqlstmt)
|
||||
for row in data:
|
||||
if row["version"]:
|
||||
col = {}
|
||||
col["version"] = row["version"]
|
||||
col["pkgarch"] = row["pkgarch"]
|
||||
col["checksum"] = row["checksum"]
|
||||
col["value"] = row["value"]
|
||||
datainfo.append(col)
|
||||
return (metainfo, datainfo)
|
||||
|
||||
def dump_db(self, fd):
|
||||
@@ -261,8 +322,9 @@ class PRTable(object):
|
||||
|
||||
class PRData(object):
|
||||
"""Object representing the PR database"""
|
||||
def __init__(self, filename, read_only=False):
|
||||
def __init__(self, filename, nohist=True, read_only=False):
|
||||
self.filename=os.path.abspath(filename)
|
||||
self.nohist=nohist
|
||||
self.read_only = read_only
|
||||
#build directory hierarchy
|
||||
try:
|
||||
@@ -272,15 +334,14 @@ class PRData(object):
|
||||
raise e
|
||||
uri = "file:%s%s" % (self.filename, "?mode=ro" if self.read_only else "")
|
||||
logger.debug("Opening PRServ database '%s'" % (uri))
|
||||
self.connection=sqlite3.connect(uri, uri=True)
|
||||
self.connection=sqlite3.connect(uri, uri=True, isolation_level="EXCLUSIVE", check_same_thread = False)
|
||||
self.connection.row_factory=sqlite3.Row
|
||||
self.connection.execute("PRAGMA synchronous = OFF;")
|
||||
self.connection.execute("PRAGMA journal_mode = WAL;")
|
||||
self.connection.commit()
|
||||
if not self.read_only:
|
||||
self.connection.execute("pragma synchronous = off;")
|
||||
self.connection.execute("PRAGMA journal_mode = MEMORY;")
|
||||
self._tables={}
|
||||
|
||||
def disconnect(self):
|
||||
self.connection.commit()
|
||||
self.connection.close()
|
||||
|
||||
def __getitem__(self, tblname):
|
||||
@@ -290,7 +351,7 @@ class PRData(object):
|
||||
if tblname in self._tables:
|
||||
return self._tables[tblname]
|
||||
else:
|
||||
tableobj = self._tables[tblname] = PRTable(self.connection, tblname, self.read_only)
|
||||
tableobj = self._tables[tblname] = PRTable(self.connection, tblname, self.nohist, self.read_only)
|
||||
return tableobj
|
||||
|
||||
def __delitem__(self, tblname):
|
||||
@@ -298,4 +359,3 @@ class PRData(object):
|
||||
del self._tables[tblname]
|
||||
logger.info("drop table %s" % (tblname))
|
||||
self.connection.execute("DROP TABLE IF EXISTS %s;" % tblname)
|
||||
self.connection.commit()
|
||||
|
||||
@@ -12,7 +12,6 @@ import sqlite3
|
||||
import prserv
|
||||
import prserv.db
|
||||
import errno
|
||||
from . import create_async_client, revision_smaller, increase_revision
|
||||
import bb.asyncrpc
|
||||
|
||||
logger = logging.getLogger("BitBake.PRserv")
|
||||
@@ -42,16 +41,18 @@ class PRServerClient(bb.asyncrpc.AsyncServerConnection):
|
||||
try:
|
||||
return await super().dispatch_message(msg)
|
||||
except:
|
||||
self.server.table.sync()
|
||||
raise
|
||||
else:
|
||||
self.server.table.sync_if_dirty()
|
||||
|
||||
async def handle_test_pr(self, request):
|
||||
'''Finds the PR value corresponding to the request. If not found, returns None and doesn't insert a new value'''
|
||||
version = request["version"]
|
||||
pkgarch = request["pkgarch"]
|
||||
checksum = request["checksum"]
|
||||
history = request["history"]
|
||||
|
||||
value = self.server.table.find_value(version, pkgarch, checksum, history)
|
||||
value = self.server.table.find_value(version, pkgarch, checksum)
|
||||
return {"value": value}
|
||||
|
||||
async def handle_test_package(self, request):
|
||||
@@ -67,110 +68,22 @@ class PRServerClient(bb.asyncrpc.AsyncServerConnection):
|
||||
version = request["version"]
|
||||
pkgarch = request["pkgarch"]
|
||||
|
||||
value = self.server.table.find_package_max_value(version, pkgarch)
|
||||
value = self.server.table.find_max_value(version, pkgarch)
|
||||
return {"value": value}
|
||||
|
||||
async def handle_get_pr(self, request):
|
||||
version = request["version"]
|
||||
pkgarch = request["pkgarch"]
|
||||
checksum = request["checksum"]
|
||||
history = request["history"]
|
||||
|
||||
if self.upstream_client is None:
|
||||
value = self.server.table.get_value(version, pkgarch, checksum, history)
|
||||
return {"value": value}
|
||||
|
||||
# We have an upstream server.
|
||||
# Check whether the local server already knows the requested configuration.
|
||||
# If the configuration is a new one, the generated value we will add will
|
||||
# depend on what's on the upstream server. That's why we're calling find_value()
|
||||
# instead of get_value() directly.
|
||||
|
||||
value = self.server.table.find_value(version, pkgarch, checksum, history)
|
||||
upstream_max = await self.upstream_client.max_package_pr(version, pkgarch)
|
||||
|
||||
if value is not None:
|
||||
|
||||
# The configuration is already known locally.
|
||||
|
||||
if history:
|
||||
value = self.server.table.get_value(version, pkgarch, checksum, history)
|
||||
else:
|
||||
existing_value = value
|
||||
# In "no history", we need to make sure the value doesn't decrease
|
||||
# and is at least greater than the maximum upstream value
|
||||
# and the maximum local value
|
||||
|
||||
local_max = self.server.table.find_package_max_value(version, pkgarch)
|
||||
if revision_smaller(value, local_max):
|
||||
value = increase_revision(local_max)
|
||||
|
||||
if revision_smaller(value, upstream_max):
|
||||
# Ask upstream whether it knows the checksum
|
||||
upstream_value = await self.upstream_client.test_pr(version, pkgarch, checksum)
|
||||
if upstream_value is None:
|
||||
# Upstream doesn't have our checksum, let create a new one
|
||||
value = upstream_max + ".0"
|
||||
else:
|
||||
# Fine to take the same value as upstream
|
||||
value = upstream_max
|
||||
|
||||
if not value == existing_value and not self.server.read_only:
|
||||
self.server.table.store_value(version, pkgarch, checksum, value)
|
||||
|
||||
return {"value": value}
|
||||
|
||||
# The configuration is a new one for the local server
|
||||
# Let's ask the upstream server whether it knows it
|
||||
|
||||
known_upstream = await self.upstream_client.test_package(version, pkgarch)
|
||||
|
||||
if not known_upstream:
|
||||
|
||||
# The package is not known upstream, must be a local-only package
|
||||
# Let's compute the PR number using the local-only method
|
||||
|
||||
value = self.server.table.get_value(version, pkgarch, checksum, history)
|
||||
return {"value": value}
|
||||
|
||||
# The package is known upstream, let's ask the upstream server
|
||||
# whether it knows our new output hash
|
||||
|
||||
value = await self.upstream_client.test_pr(version, pkgarch, checksum)
|
||||
|
||||
if value is not None:
|
||||
|
||||
# Upstream knows this output hash, let's store it and use it too.
|
||||
|
||||
if not self.server.read_only:
|
||||
self.server.table.store_value(version, pkgarch, checksum, value)
|
||||
# If the local server is read only, won't be able to store the new
|
||||
# value in the database and will have to keep asking the upstream server
|
||||
return {"value": value}
|
||||
|
||||
# The output hash doesn't exist upstream, get the most recent number from upstream (x)
|
||||
# Then, we want to have a new PR value for the local server: x.y
|
||||
|
||||
upstream_max = await self.upstream_client.max_package_pr(version, pkgarch)
|
||||
# Here we know that the package is known upstream, so upstream_max can't be None
|
||||
subvalue = self.server.table.find_new_subvalue(version, pkgarch, upstream_max)
|
||||
|
||||
if not self.server.read_only:
|
||||
self.server.table.store_value(version, pkgarch, checksum, subvalue)
|
||||
|
||||
return {"value": subvalue}
|
||||
|
||||
async def process_requests(self):
|
||||
if self.server.upstream is not None:
|
||||
self.upstream_client = await create_async_client(self.server.upstream)
|
||||
else:
|
||||
self.upstream_client = None
|
||||
|
||||
response = None
|
||||
try:
|
||||
await super().process_requests()
|
||||
finally:
|
||||
if self.upstream_client is not None:
|
||||
await self.upstream_client.close()
|
||||
value = self.server.table.get_value(version, pkgarch, checksum)
|
||||
response = {"value": value}
|
||||
except prserv.NotFoundError:
|
||||
self.logger.error("failure storing value in database for (%s, %s)",version, checksum)
|
||||
|
||||
return response
|
||||
|
||||
async def handle_import_one(self, request):
|
||||
response = None
|
||||
@@ -191,10 +104,9 @@ class PRServerClient(bb.asyncrpc.AsyncServerConnection):
|
||||
pkgarch = request["pkgarch"]
|
||||
checksum = request["checksum"]
|
||||
colinfo = request["colinfo"]
|
||||
history = request["history"]
|
||||
|
||||
try:
|
||||
(metainfo, datainfo) = self.server.table.export(version, pkgarch, checksum, colinfo, history)
|
||||
(metainfo, datainfo) = self.server.table.export(version, pkgarch, checksum, colinfo)
|
||||
except sqlite3.Error as exc:
|
||||
self.logger.error(str(exc))
|
||||
metainfo = datainfo = None
|
||||
@@ -205,12 +117,11 @@ class PRServerClient(bb.asyncrpc.AsyncServerConnection):
|
||||
return {"readonly": self.server.read_only}
|
||||
|
||||
class PRServer(bb.asyncrpc.AsyncServer):
|
||||
def __init__(self, dbfile, read_only=False, upstream=None):
|
||||
def __init__(self, dbfile, read_only=False):
|
||||
super().__init__(logger)
|
||||
self.dbfile = dbfile
|
||||
self.table = None
|
||||
self.read_only = read_only
|
||||
self.upstream = upstream
|
||||
|
||||
def accept_client(self, socket):
|
||||
return PRServerClient(socket, self)
|
||||
@@ -223,25 +134,27 @@ class PRServer(bb.asyncrpc.AsyncServer):
|
||||
self.logger.info("Started PRServer with DBfile: %s, Address: %s, PID: %s" %
|
||||
(self.dbfile, self.address, str(os.getpid())))
|
||||
|
||||
if self.upstream is not None:
|
||||
self.logger.info("And upstream PRServer: %s " % (self.upstream))
|
||||
|
||||
return tasks
|
||||
|
||||
async def stop(self):
|
||||
self.table.sync_if_dirty()
|
||||
self.db.disconnect()
|
||||
await super().stop()
|
||||
|
||||
def signal_handler(self):
|
||||
super().signal_handler()
|
||||
if self.table:
|
||||
self.table.sync()
|
||||
|
||||
class PRServSingleton(object):
|
||||
def __init__(self, dbfile, logfile, host, port, upstream):
|
||||
def __init__(self, dbfile, logfile, host, port):
|
||||
self.dbfile = dbfile
|
||||
self.logfile = logfile
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.upstream = upstream
|
||||
|
||||
def start(self):
|
||||
self.prserv = PRServer(self.dbfile, upstream=self.upstream)
|
||||
self.prserv = PRServer(self.dbfile)
|
||||
self.prserv.start_tcp_server(socket.gethostbyname(self.host), self.port)
|
||||
self.process = self.prserv.serve_as_process(log_level=logging.WARNING)
|
||||
|
||||
@@ -320,7 +233,7 @@ def run_as_daemon(func, pidfile, logfile):
|
||||
os.remove(pidfile)
|
||||
os._exit(0)
|
||||
|
||||
def start_daemon(dbfile, host, port, logfile, read_only=False, upstream=None):
|
||||
def start_daemon(dbfile, host, port, logfile, read_only=False):
|
||||
ip = socket.gethostbyname(host)
|
||||
pidfile = PIDPREFIX % (ip, port)
|
||||
try:
|
||||
@@ -336,7 +249,7 @@ def start_daemon(dbfile, host, port, logfile, read_only=False, upstream=None):
|
||||
|
||||
dbfile = os.path.abspath(dbfile)
|
||||
def daemon_main():
|
||||
server = PRServer(dbfile, read_only=read_only, upstream=upstream)
|
||||
server = PRServer(dbfile, read_only=read_only)
|
||||
server.start_tcp_server(ip, port)
|
||||
server.serve_forever()
|
||||
|
||||
@@ -423,9 +336,6 @@ def auto_start(d):
|
||||
|
||||
host = host_params[0].strip().lower()
|
||||
port = int(host_params[1])
|
||||
|
||||
upstream = d.getVar("PRSERV_UPSTREAM") or None
|
||||
|
||||
if is_local_special(host, port):
|
||||
import bb.utils
|
||||
cachedir = (d.getVar("PERSISTENT_DIR") or d.getVar("CACHE"))
|
||||
@@ -440,7 +350,7 @@ def auto_start(d):
|
||||
auto_shutdown()
|
||||
if not singleton:
|
||||
bb.utils.mkdirhier(cachedir)
|
||||
singleton = PRServSingleton(os.path.abspath(dbfile), os.path.abspath(logfile), host, port, upstream)
|
||||
singleton = PRServSingleton(os.path.abspath(dbfile), os.path.abspath(logfile), host, port)
|
||||
singleton.start()
|
||||
if singleton:
|
||||
host = singleton.host
|
||||
|
||||
@@ -1,388 +0,0 @@
|
||||
#! /usr/bin/env python3
|
||||
#
|
||||
# Copyright (C) 2024 BitBake Contributors
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
#
|
||||
|
||||
from . import create_server, create_client, increase_revision, revision_greater, revision_smaller, _revision_greater_or_equal
|
||||
import prserv.db as db
|
||||
from bb.asyncrpc import InvokeError
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
import socket
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
THIS_DIR = Path(__file__).parent
|
||||
BIN_DIR = THIS_DIR.parent.parent / "bin"
|
||||
|
||||
version = "dummy-1.0-r0"
|
||||
pkgarch = "core2-64"
|
||||
other_arch = "aarch64"
|
||||
|
||||
checksumX = "51bf8189dbe9ea81fa6dd89608bf19380c437a9cf12f6c6239887801ba4ab4f0"
|
||||
checksum0 = "51bf8189dbe9ea81fa6dd89608bf19380c437a9cf12f6c6239887801ba4ab4a0"
|
||||
checksum1 = "51bf8189dbe9ea81fa6dd89608bf19380c437a9cf12f6c6239887801ba4ab4a1"
|
||||
checksum2 = "51bf8189dbe9ea81fa6dd89608bf19380c437a9cf12f6c6239887801ba4ab4a2"
|
||||
checksum3 = "51bf8189dbe9ea81fa6dd89608bf19380c437a9cf12f6c6239887801ba4ab4a3"
|
||||
checksum4 = "51bf8189dbe9ea81fa6dd89608bf19380c437a9cf12f6c6239887801ba4ab4a4"
|
||||
checksum5 = "51bf8189dbe9ea81fa6dd89608bf19380c437a9cf12f6c6239887801ba4ab4a5"
|
||||
checksum6 = "51bf8189dbe9ea81fa6dd89608bf19380c437a9cf12f6c6239887801ba4ab4a6"
|
||||
checksum7 = "51bf8189dbe9ea81fa6dd89608bf19380c437a9cf12f6c6239887801ba4ab4a7"
|
||||
checksum8 = "51bf8189dbe9ea81fa6dd89608bf19380c437a9cf12f6c6239887801ba4ab4a8"
|
||||
checksum9 = "51bf8189dbe9ea81fa6dd89608bf19380c437a9cf12f6c6239887801ba4ab4a9"
|
||||
checksum10 = "51bf8189dbe9ea81fa6dd89608bf19380c437a9cf12f6c6239887801ba4ab4aa"
|
||||
|
||||
def server_prefunc(server, name):
|
||||
logging.basicConfig(level=logging.DEBUG, filename='prserv-%s.log' % name, filemode='w',
|
||||
format='%(levelname)s %(filename)s:%(lineno)d %(message)s')
|
||||
server.logger.debug("Running server %s" % name)
|
||||
sys.stdout = open('prserv-stdout-%s.log' % name, 'w')
|
||||
sys.stderr = sys.stdout
|
||||
|
||||
class PRTestSetup(object):
|
||||
|
||||
def start_server(self, name, dbfile, upstream=None, read_only=False, prefunc=server_prefunc):
|
||||
|
||||
def cleanup_server(server):
|
||||
if server.process.exitcode is not None:
|
||||
return
|
||||
server.process.terminate()
|
||||
server.process.join()
|
||||
|
||||
server = create_server(socket.gethostbyname("localhost") + ":0",
|
||||
dbfile,
|
||||
upstream=upstream,
|
||||
read_only=read_only)
|
||||
|
||||
server.serve_as_process(prefunc=prefunc, args=(name,))
|
||||
self.addCleanup(cleanup_server, server)
|
||||
|
||||
return server
|
||||
|
||||
def start_client(self, server_address):
|
||||
def cleanup_client(client):
|
||||
client.close()
|
||||
|
||||
client = create_client(server_address)
|
||||
self.addCleanup(cleanup_client, client)
|
||||
|
||||
return client
|
||||
|
||||
class FunctionTests(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.temp_dir = tempfile.TemporaryDirectory(prefix='bb-prserv')
|
||||
self.addCleanup(self.temp_dir.cleanup)
|
||||
|
||||
def test_increase_revision(self):
|
||||
self.assertEqual(increase_revision("1"), "2")
|
||||
self.assertEqual(increase_revision("1.0"), "1.1")
|
||||
self.assertEqual(increase_revision("1.1.1"), "1.1.2")
|
||||
self.assertEqual(increase_revision("1.1.1.3"), "1.1.1.4")
|
||||
self.assertEqual(increase_revision("9"), "10")
|
||||
self.assertEqual(increase_revision("1.9"), "1.10")
|
||||
self.assertRaises(ValueError, increase_revision, "1.a")
|
||||
self.assertRaises(ValueError, increase_revision, "1.")
|
||||
self.assertRaises(ValueError, increase_revision, "")
|
||||
|
||||
def test_revision_greater_or_equal(self):
|
||||
self.assertTrue(_revision_greater_or_equal("2", "2"))
|
||||
self.assertTrue(_revision_greater_or_equal("2", "1"))
|
||||
self.assertTrue(_revision_greater_or_equal("10", "2"))
|
||||
self.assertTrue(_revision_greater_or_equal("1.10", "1.2"))
|
||||
self.assertFalse(_revision_greater_or_equal("1.2", "1.10"))
|
||||
self.assertTrue(_revision_greater_or_equal("1.10", "1"))
|
||||
self.assertTrue(_revision_greater_or_equal("1.10.1", "1.10"))
|
||||
self.assertFalse(_revision_greater_or_equal("1.10.1", "1.10.2"))
|
||||
self.assertTrue(_revision_greater_or_equal("1.10.1", "1.10.1"))
|
||||
self.assertTrue(_revision_greater_or_equal("1.10.1", "1"))
|
||||
self.assertTrue(revision_greater("1.20", "1.3"))
|
||||
self.assertTrue(revision_smaller("1.3", "1.20"))
|
||||
|
||||
# DB tests
|
||||
|
||||
def test_db(self):
|
||||
dbfile = os.path.join(self.temp_dir.name, "testtable.sqlite3")
|
||||
|
||||
self.db = db.PRData(dbfile)
|
||||
self.table = self.db["PRMAIN"]
|
||||
|
||||
self.table.store_value(version, pkgarch, checksum0, "0")
|
||||
self.table.store_value(version, pkgarch, checksum1, "1")
|
||||
# "No history" mode supports multiple PRs for the same checksum
|
||||
self.table.store_value(version, pkgarch, checksum0, "2")
|
||||
self.table.store_value(version, pkgarch, checksum2, "1.0")
|
||||
|
||||
self.assertTrue(self.table.test_package(version, pkgarch))
|
||||
self.assertFalse(self.table.test_package(version, other_arch))
|
||||
|
||||
self.assertTrue(self.table.test_value(version, pkgarch, "0"))
|
||||
self.assertTrue(self.table.test_value(version, pkgarch, "1"))
|
||||
self.assertTrue(self.table.test_value(version, pkgarch, "2"))
|
||||
|
||||
self.assertEqual(self.table.find_package_max_value(version, pkgarch), "2")
|
||||
|
||||
self.assertEqual(self.table.find_min_value(version, pkgarch, checksum0), "0")
|
||||
self.assertEqual(self.table.find_max_value(version, pkgarch, checksum0), "2")
|
||||
|
||||
# Test history modes
|
||||
self.assertEqual(self.table.find_value(version, pkgarch, checksum0, True), "0")
|
||||
self.assertEqual(self.table.find_value(version, pkgarch, checksum0, False), "2")
|
||||
|
||||
self.assertEqual(self.table.find_new_subvalue(version, pkgarch, "3"), "3.0")
|
||||
self.assertEqual(self.table.find_new_subvalue(version, pkgarch, "1"), "1.1")
|
||||
|
||||
# Revision comparison tests
|
||||
self.table.store_value(version, pkgarch, checksum1, "1.3")
|
||||
self.table.store_value(version, pkgarch, checksum1, "1.20")
|
||||
self.assertEqual(self.table.find_min_value(version, pkgarch, checksum1), "1")
|
||||
self.assertEqual(self.table.find_max_value(version, pkgarch, checksum1), "1.20")
|
||||
|
||||
class PRBasicTests(PRTestSetup, unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.temp_dir = tempfile.TemporaryDirectory(prefix='bb-prserv')
|
||||
self.addCleanup(self.temp_dir.cleanup)
|
||||
|
||||
dbfile = os.path.join(self.temp_dir.name, "prtest-basic.sqlite3")
|
||||
|
||||
self.server1 = self.start_server("basic", dbfile)
|
||||
self.client1 = self.start_client(self.server1.address)
|
||||
|
||||
def test_basic(self):
|
||||
|
||||
# Checks on non existing configuration
|
||||
|
||||
result = self.client1.test_pr(version, pkgarch, checksum0)
|
||||
self.assertIsNone(result, "test_pr should return 'None' for a non existing PR")
|
||||
|
||||
result = self.client1.test_package(version, pkgarch)
|
||||
self.assertFalse(result, "test_package should return 'False' for a non existing PR")
|
||||
|
||||
result = self.client1.max_package_pr(version, pkgarch)
|
||||
self.assertIsNone(result, "max_package_pr should return 'None' for a non existing PR")
|
||||
|
||||
# Add a first configuration
|
||||
|
||||
result = self.client1.getPR(version, pkgarch, checksum0)
|
||||
self.assertEqual(result, "0", "getPR: initial PR of a package should be '0'")
|
||||
|
||||
result = self.client1.test_pr(version, pkgarch, checksum0)
|
||||
self.assertEqual(result, "0", "test_pr should return '0' here, matching the result of getPR")
|
||||
|
||||
result = self.client1.test_package(version, pkgarch)
|
||||
self.assertTrue(result, "test_package should return 'True' for an existing PR")
|
||||
|
||||
result = self.client1.max_package_pr(version, pkgarch)
|
||||
self.assertEqual(result, "0", "max_package_pr should return '0' in the current test series")
|
||||
|
||||
# Check that the same request gets the same value
|
||||
|
||||
result = self.client1.getPR(version, pkgarch, checksum0)
|
||||
self.assertEqual(result, "0", "getPR: asking for the same PR a second time in a row should return the same value.")
|
||||
|
||||
# Add new configurations
|
||||
|
||||
result = self.client1.getPR(version, pkgarch, checksum1)
|
||||
self.assertEqual(result, "1", "getPR: second PR of a package should be '1'")
|
||||
|
||||
result = self.client1.test_pr(version, pkgarch, checksum1)
|
||||
self.assertEqual(result, "1", "test_pr should return '1' here, matching the result of getPR")
|
||||
|
||||
result = self.client1.max_package_pr(version, pkgarch)
|
||||
self.assertEqual(result, "1", "max_package_pr should return '1' in the current test series")
|
||||
|
||||
result = self.client1.getPR(version, pkgarch, checksum2)
|
||||
self.assertEqual(result, "2", "getPR: second PR of a package should be '2'")
|
||||
|
||||
result = self.client1.test_pr(version, pkgarch, checksum2)
|
||||
self.assertEqual(result, "2", "test_pr should return '2' here, matching the result of getPR")
|
||||
|
||||
result = self.client1.max_package_pr(version, pkgarch)
|
||||
self.assertEqual(result, "2", "max_package_pr should return '2' in the current test series")
|
||||
|
||||
result = self.client1.getPR(version, pkgarch, checksum3)
|
||||
self.assertEqual(result, "3", "getPR: second PR of a package should be '3'")
|
||||
|
||||
result = self.client1.test_pr(version, pkgarch, checksum3)
|
||||
self.assertEqual(result, "3", "test_pr should return '3' here, matching the result of getPR")
|
||||
|
||||
result = self.client1.max_package_pr(version, pkgarch)
|
||||
self.assertEqual(result, "3", "max_package_pr should return '3' in the current test series")
|
||||
|
||||
# Ask again for the first configuration
|
||||
|
||||
result = self.client1.getPR(version, pkgarch, checksum0)
|
||||
self.assertEqual(result, "4", "getPR: should return '4' in this configuration")
|
||||
|
||||
# Ask again with explicit "no history" mode
|
||||
|
||||
result = self.client1.getPR(version, pkgarch, checksum0, False)
|
||||
self.assertEqual(result, "4", "getPR: should return '4' in this configuration")
|
||||
|
||||
# Ask again with explicit "history" mode. This should return the first recorded PR for checksum0
|
||||
|
||||
result = self.client1.getPR(version, pkgarch, checksum0, True)
|
||||
self.assertEqual(result, "0", "getPR: should return '0' in this configuration")
|
||||
|
||||
# Check again that another pkgarg resets the counters
|
||||
|
||||
result = self.client1.test_pr(version, other_arch, checksum0)
|
||||
self.assertIsNone(result, "test_pr should return 'None' for a non existing PR")
|
||||
|
||||
result = self.client1.test_package(version, other_arch)
|
||||
self.assertFalse(result, "test_package should return 'False' for a non existing PR")
|
||||
|
||||
result = self.client1.max_package_pr(version, other_arch)
|
||||
self.assertIsNone(result, "max_package_pr should return 'None' for a non existing PR")
|
||||
|
||||
# Now add the configuration
|
||||
|
||||
result = self.client1.getPR(version, other_arch, checksum0)
|
||||
self.assertEqual(result, "0", "getPR: initial PR of a package should be '0'")
|
||||
|
||||
result = self.client1.test_pr(version, other_arch, checksum0)
|
||||
self.assertEqual(result, "0", "test_pr should return '0' here, matching the result of getPR")
|
||||
|
||||
result = self.client1.test_package(version, other_arch)
|
||||
self.assertTrue(result, "test_package should return 'True' for an existing PR")
|
||||
|
||||
result = self.client1.max_package_pr(version, other_arch)
|
||||
self.assertEqual(result, "0", "max_package_pr should return '0' in the current test series")
|
||||
|
||||
result = self.client1.is_readonly()
|
||||
self.assertFalse(result, "Server should not be described as 'read-only'")
|
||||
|
||||
class PRUpstreamTests(PRTestSetup, unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
||||
self.temp_dir = tempfile.TemporaryDirectory(prefix='bb-prserv')
|
||||
self.addCleanup(self.temp_dir.cleanup)
|
||||
|
||||
dbfile2 = os.path.join(self.temp_dir.name, "prtest-upstream2.sqlite3")
|
||||
self.server2 = self.start_server("upstream2", dbfile2)
|
||||
self.client2 = self.start_client(self.server2.address)
|
||||
|
||||
dbfile1 = os.path.join(self.temp_dir.name, "prtest-upstream1.sqlite3")
|
||||
self.server1 = self.start_server("upstream1", dbfile1, upstream=self.server2.address)
|
||||
self.client1 = self.start_client(self.server1.address)
|
||||
|
||||
dbfile0 = os.path.join(self.temp_dir.name, "prtest-local.sqlite3")
|
||||
self.server0 = self.start_server("local", dbfile0, upstream=self.server1.address)
|
||||
self.client0 = self.start_client(self.server0.address)
|
||||
self.shared_db = dbfile0
|
||||
|
||||
def test_upstream_and_readonly(self):
|
||||
|
||||
# For identical checksums, all servers should return the same PR
|
||||
|
||||
result = self.client2.getPR(version, pkgarch, checksum0)
|
||||
self.assertEqual(result, "0", "getPR: initial PR of a package should be '0'")
|
||||
|
||||
result = self.client1.getPR(version, pkgarch, checksum0)
|
||||
self.assertEqual(result, "0", "getPR: initial PR of a package should be '0' (same as upstream)")
|
||||
|
||||
result = self.client0.getPR(version, pkgarch, checksum0)
|
||||
self.assertEqual(result, "0", "getPR: initial PR of a package should be '0' (same as upstream)")
|
||||
|
||||
# Now introduce new checksums on server1 for, same version
|
||||
|
||||
result = self.client1.getPR(version, pkgarch, checksum1)
|
||||
self.assertEqual(result, "0.0", "getPR: first PR of a package which has a different checksum upstream should be '0.0'")
|
||||
|
||||
result = self.client1.getPR(version, pkgarch, checksum2)
|
||||
self.assertEqual(result, "0.1", "getPR: second PR of a package that has a different checksum upstream should be '0.1'")
|
||||
|
||||
# Now introduce checksums on server0 for, same version
|
||||
|
||||
result = self.client1.getPR(version, pkgarch, checksum1)
|
||||
self.assertEqual(result, "0.2", "getPR: can't decrease for known PR")
|
||||
|
||||
result = self.client1.getPR(version, pkgarch, checksum2)
|
||||
self.assertEqual(result, "0.3")
|
||||
|
||||
result = self.client1.max_package_pr(version, pkgarch)
|
||||
self.assertEqual(result, "0.3")
|
||||
|
||||
result = self.client0.getPR(version, pkgarch, checksum3)
|
||||
self.assertEqual(result, "0.3.0", "getPR: first PR of a package that doesn't exist upstream should be '0.3.0'")
|
||||
|
||||
result = self.client0.getPR(version, pkgarch, checksum4)
|
||||
self.assertEqual(result, "0.3.1", "getPR: second PR of a package that doesn't exist upstream should be '0.3.1'")
|
||||
|
||||
result = self.client0.getPR(version, pkgarch, checksum3)
|
||||
self.assertEqual(result, "0.3.2")
|
||||
|
||||
# More upstream updates
|
||||
# Here, we assume no communication between server2 and server0. server2 only impacts server0
|
||||
# after impacting server1
|
||||
|
||||
self.assertEqual(self.client2.getPR(version, pkgarch, checksum5), "1")
|
||||
self.assertEqual(self.client1.getPR(version, pkgarch, checksum6), "1.0")
|
||||
self.assertEqual(self.client1.getPR(version, pkgarch, checksum7), "1.1")
|
||||
self.assertEqual(self.client0.getPR(version, pkgarch, checksum8), "1.1.0")
|
||||
self.assertEqual(self.client0.getPR(version, pkgarch, checksum9), "1.1.1")
|
||||
|
||||
# "history" mode tests
|
||||
|
||||
self.assertEqual(self.client2.getPR(version, pkgarch, checksum0, True), "0")
|
||||
self.assertEqual(self.client1.getPR(version, pkgarch, checksum2, True), "0.1")
|
||||
self.assertEqual(self.client0.getPR(version, pkgarch, checksum3, True), "0.3.0")
|
||||
|
||||
# More "no history" mode tests
|
||||
|
||||
self.assertEqual(self.client2.getPR(version, pkgarch, checksum0), "2")
|
||||
self.assertEqual(self.client1.getPR(version, pkgarch, checksum0), "2") # Same as upstream
|
||||
self.assertEqual(self.client0.getPR(version, pkgarch, checksum0), "2") # Same as upstream
|
||||
self.assertEqual(self.client1.getPR(version, pkgarch, checksum7), "3") # This could be surprising, but since the previous revision was "2", increasing it yields "3".
|
||||
# We don't know how many upstream servers we have
|
||||
# Start read-only server with server1 as upstream
|
||||
self.server_ro = self.start_server("local-ro", self.shared_db, upstream=self.server1.address, read_only=True)
|
||||
self.client_ro = self.start_client(self.server_ro.address)
|
||||
|
||||
self.assertTrue(self.client_ro.is_readonly(), "Database should be described as 'read-only'")
|
||||
|
||||
# Checks on non existing configurations
|
||||
self.assertIsNone(self.client_ro.test_pr(version, pkgarch, checksumX))
|
||||
self.assertFalse(self.client_ro.test_package("unknown", pkgarch))
|
||||
|
||||
# Look up existing configurations
|
||||
self.assertEqual(self.client_ro.getPR(version, pkgarch, checksum0), "3") # "no history" mode
|
||||
self.assertEqual(self.client_ro.getPR(version, pkgarch, checksum0, True), "0") # "history" mode
|
||||
self.assertEqual(self.client_ro.getPR(version, pkgarch, checksum3), "3")
|
||||
self.assertEqual(self.client_ro.getPR(version, pkgarch, checksum3, True), "0.3.0")
|
||||
self.assertEqual(self.client_ro.max_package_pr(version, pkgarch), "2") # normal as "3" was never saved
|
||||
|
||||
# Try to insert a new value. Here this one is know upstream.
|
||||
self.assertEqual(self.client_ro.getPR(version, pkgarch, checksum7), "3")
|
||||
# Try to insert a completely new value. As the max upstream value is already "3", it should be "3.0"
|
||||
self.assertEqual(self.client_ro.getPR(version, pkgarch, checksum10), "3.0")
|
||||
# Same with another value which only exists in the upstream upstream server
|
||||
# This time, as the upstream server doesn't know it, it will ask its upstream server. So that's a known one.
|
||||
self.assertEqual(self.client_ro.getPR(version, pkgarch, checksum9), "3")
|
||||
|
||||
class ScriptTests(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
|
||||
self.temp_dir = tempfile.TemporaryDirectory(prefix='bb-prserv')
|
||||
self.addCleanup(self.temp_dir.cleanup)
|
||||
self.dbfile = os.path.join(self.temp_dir.name, "prtest.sqlite3")
|
||||
|
||||
def test_1_start_bitbake_prserv(self):
|
||||
try:
|
||||
subprocess.check_call([BIN_DIR / "bitbake-prserv", "--start", "-f", self.dbfile])
|
||||
except subprocess.CalledProcessError as e:
|
||||
self.fail("Failed to start bitbake-prserv: %s" % e.returncode)
|
||||
|
||||
def test_2_stop_bitbake_prserv(self):
|
||||
try:
|
||||
subprocess.check_call([BIN_DIR / "bitbake-prserv", "--stop"])
|
||||
except subprocess.CalledProcessError as e:
|
||||
self.fail("Failed to stop bitbake-prserv: %s" % e.returncode)
|
||||
@@ -9,7 +9,7 @@
|
||||
#
|
||||
# Edit the 'current_releases' table for each new release cycle
|
||||
#
|
||||
# Usage: ./get_fixtures --all
|
||||
# Usage: ./get_fixtures all
|
||||
#
|
||||
|
||||
import os
|
||||
@@ -35,22 +35,19 @@ verbose = False
|
||||
# [Codename, Yocto Project Version, Release Date, Current Version, Support Level, Poky Version, BitBake branch]
|
||||
current_releases = [
|
||||
# Release slot #1
|
||||
['Scarthgap','5.0','April 2024','5.0.0 (April 2024)','Long Term Support (until April 2028)','','2.8'],
|
||||
['Kirkstone','4.0','April 2022','4.0.8 (March 2023)','Stable - Long Term Support (until Apr. 2024)','','2.0'],
|
||||
# Release slot #2 'local'
|
||||
['HEAD','HEAD','','Local Yocto Project','HEAD','','HEAD'],
|
||||
# Release slot #3 'master'
|
||||
['Master','master','','Yocto Project master','master','','master'],
|
||||
# Release slot #4
|
||||
['Styhead','5.1','November 2024','5.1.0 (November 2024)','Support for 7 months (until May 2025)','','2.10'],
|
||||
# ['Nanbield','4.3','November 2023','4.3.0 (November 2023)','Support for 7 months (until May 2024)','','2.6'],
|
||||
# ['Mickledore','4.2','April 2023','4.2.0 (April 2023)','Support for 7 months (until October 2023)','','2.4'],
|
||||
['Mickledore','4.2','April 2023','4.2.0 (April 2023)','Support for 7 months (until October 2023)','','2.4'],
|
||||
# ['Langdale','4.1','October 2022','4.1.2 (January 2023)','Support for 7 months (until May 2023)','','2.2'],
|
||||
['Kirkstone','4.0','April 2022','4.0.8 (March 2023)','Stable - Long Term Support (until Apr. 2024)','','2.0'],
|
||||
# ['Honister','3.4','October 2021','3.4.2 (February 2022)','Support for 7 months (until May 2022)','26.0','1.52'],
|
||||
# ['Hardknott','3.3','April 2021','3.3.5 (March 2022)','Stable - Support for 13 months (until Apr. 2022)','25.0','1.50'],
|
||||
# ['Gatesgarth','3.2','Oct 2020','3.2.4 (May 2021)','EOL','24.0','1.48'],
|
||||
# Optional Release slot #5
|
||||
#['Dunfell','3.1','April 2020','3.1.23 (February 2023)','Stable - Long Term Support (until Apr. 2024)','23.0','1.46'],
|
||||
['Dunfell','3.1','April 2020','3.1.23 (February 2023)','Stable - Long Term Support (until Apr. 2024)','23.0','1.46'],
|
||||
]
|
||||
|
||||
default_poky_layers = [
|
||||
|
||||
@@ -8,9 +8,9 @@
|
||||
|
||||
<!-- Bitbake versions which correspond to the metadata release -->
|
||||
<object model="orm.bitbakeversion" pk="1">
|
||||
<field type="CharField" name="name">scarthgap</field>
|
||||
<field type="CharField" name="name">kirkstone</field>
|
||||
<field type="CharField" name="giturl">git://git.openembedded.org/bitbake</field>
|
||||
<field type="CharField" name="branch">2.8</field>
|
||||
<field type="CharField" name="branch">2.0</field>
|
||||
</object>
|
||||
<object model="orm.bitbakeversion" pk="2">
|
||||
<field type="CharField" name="name">HEAD</field>
|
||||
@@ -23,23 +23,23 @@
|
||||
<field type="CharField" name="branch">master</field>
|
||||
</object>
|
||||
<object model="orm.bitbakeversion" pk="4">
|
||||
<field type="CharField" name="name">styhead</field>
|
||||
<field type="CharField" name="name">mickledore</field>
|
||||
<field type="CharField" name="giturl">git://git.openembedded.org/bitbake</field>
|
||||
<field type="CharField" name="branch">2.10</field>
|
||||
<field type="CharField" name="branch">2.4</field>
|
||||
</object>
|
||||
<object model="orm.bitbakeversion" pk="5">
|
||||
<field type="CharField" name="name">kirkstone</field>
|
||||
<field type="CharField" name="name">dunfell</field>
|
||||
<field type="CharField" name="giturl">git://git.openembedded.org/bitbake</field>
|
||||
<field type="CharField" name="branch">2.0</field>
|
||||
<field type="CharField" name="branch">1.46</field>
|
||||
</object>
|
||||
|
||||
<!-- Releases available -->
|
||||
<object model="orm.release" pk="1">
|
||||
<field type="CharField" name="name">scarthgap</field>
|
||||
<field type="CharField" name="description">Openembedded Scarthgap</field>
|
||||
<field type="CharField" name="name">kirkstone</field>
|
||||
<field type="CharField" name="description">Openembedded Kirkstone</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">1</field>
|
||||
<field type="CharField" name="branch_name">scarthgap</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href=\"https://cgit.openembedded.org/openembedded-core/log/?h=scarthgap\">OpenEmbedded Scarthgap</a> branch.</field>
|
||||
<field type="CharField" name="branch_name">kirkstone</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href=\"https://cgit.openembedded.org/openembedded-core/log/?h=kirkstone\">OpenEmbedded Kirkstone</a> branch.</field>
|
||||
</object>
|
||||
<object model="orm.release" pk="2">
|
||||
<field type="CharField" name="name">local</field>
|
||||
@@ -56,18 +56,18 @@
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href=\"https://cgit.openembedded.org/openembedded-core/log/\">OpenEmbedded master</a> branch.</field>
|
||||
</object>
|
||||
<object model="orm.release" pk="4">
|
||||
<field type="CharField" name="name">styhead</field>
|
||||
<field type="CharField" name="description">Openembedded Styhead</field>
|
||||
<field type="CharField" name="name">mickledore</field>
|
||||
<field type="CharField" name="description">Openembedded Mickledore</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">4</field>
|
||||
<field type="CharField" name="branch_name">styhead</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href=\"https://cgit.openembedded.org/openembedded-core/log/?h=styhead\">OpenEmbedded Styhead</a> branch.</field>
|
||||
<field type="CharField" name="branch_name">mickledore</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href=\"https://cgit.openembedded.org/openembedded-core/log/?h=mickledore\">OpenEmbedded Mickledore</a> branch.</field>
|
||||
</object>
|
||||
<object model="orm.release" pk="5">
|
||||
<field type="CharField" name="name">kirkstone</field>
|
||||
<field type="CharField" name="description">Openembedded Kirkstone</field>
|
||||
<field type="CharField" name="name">dunfell</field>
|
||||
<field type="CharField" name="description">Openembedded Dunfell</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">5</field>
|
||||
<field type="CharField" name="branch_name">kirkstone</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href=\"https://cgit.openembedded.org/openembedded-core/log/?h=kirkstone\">OpenEmbedded Kirkstone</a> branch.</field>
|
||||
<field type="CharField" name="branch_name">dunfell</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href=\"https://cgit.openembedded.org/openembedded-core/log/?h=dunfell\">OpenEmbedded Dunfell</a> branch.</field>
|
||||
</object>
|
||||
|
||||
<!-- Default layers for each release -->
|
||||
|
||||
@@ -8,9 +8,9 @@
|
||||
|
||||
<!-- Bitbake versions which correspond to the metadata release -->
|
||||
<object model="orm.bitbakeversion" pk="1">
|
||||
<field type="CharField" name="name">scarthgap</field>
|
||||
<field type="CharField" name="name">kirkstone</field>
|
||||
<field type="CharField" name="giturl">git://git.yoctoproject.org/poky</field>
|
||||
<field type="CharField" name="branch">scarthgap</field>
|
||||
<field type="CharField" name="branch">kirkstone</field>
|
||||
<field type="CharField" name="dirpath">bitbake</field>
|
||||
</object>
|
||||
<object model="orm.bitbakeversion" pk="2">
|
||||
@@ -26,26 +26,26 @@
|
||||
<field type="CharField" name="dirpath">bitbake</field>
|
||||
</object>
|
||||
<object model="orm.bitbakeversion" pk="4">
|
||||
<field type="CharField" name="name">styhead</field>
|
||||
<field type="CharField" name="name">mickledore</field>
|
||||
<field type="CharField" name="giturl">git://git.yoctoproject.org/poky</field>
|
||||
<field type="CharField" name="branch">styhead</field>
|
||||
<field type="CharField" name="branch">mickledore</field>
|
||||
<field type="CharField" name="dirpath">bitbake</field>
|
||||
</object>
|
||||
<object model="orm.bitbakeversion" pk="5">
|
||||
<field type="CharField" name="name">kirkstone</field>
|
||||
<field type="CharField" name="name">dunfell</field>
|
||||
<field type="CharField" name="giturl">git://git.yoctoproject.org/poky</field>
|
||||
<field type="CharField" name="branch">kirkstone</field>
|
||||
<field type="CharField" name="branch">dunfell</field>
|
||||
<field type="CharField" name="dirpath">bitbake</field>
|
||||
</object>
|
||||
|
||||
|
||||
<!-- Releases available -->
|
||||
<object model="orm.release" pk="1">
|
||||
<field type="CharField" name="name">scarthgap</field>
|
||||
<field type="CharField" name="description">Yocto Project 5.0 "Scarthgap"</field>
|
||||
<field type="CharField" name="name">kirkstone</field>
|
||||
<field type="CharField" name="description">Yocto Project 4.0 "Kirkstone"</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">1</field>
|
||||
<field type="CharField" name="branch_name">scarthgap</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?h=scarthgap">Yocto Project Scarthgap branch</a>.</field>
|
||||
<field type="CharField" name="branch_name">kirkstone</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?h=kirkstone">Yocto Project Kirkstone branch</a>.</field>
|
||||
</object>
|
||||
<object model="orm.release" pk="2">
|
||||
<field type="CharField" name="name">local</field>
|
||||
@@ -62,18 +62,18 @@
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/">Yocto Project Master branch</a>.</field>
|
||||
</object>
|
||||
<object model="orm.release" pk="4">
|
||||
<field type="CharField" name="name">styhead</field>
|
||||
<field type="CharField" name="description">Yocto Project 5.1 "Styhead"</field>
|
||||
<field type="CharField" name="name">mickledore</field>
|
||||
<field type="CharField" name="description">Yocto Project 4.2 "Mickledore"</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">4</field>
|
||||
<field type="CharField" name="branch_name">styhead</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?h=styhead">Yocto Project Styhead branch</a>.</field>
|
||||
<field type="CharField" name="branch_name">mickledore</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?h=mickledore">Yocto Project Mickledore branch</a>.</field>
|
||||
</object>
|
||||
<object model="orm.release" pk="5">
|
||||
<field type="CharField" name="name">kirkstone</field>
|
||||
<field type="CharField" name="description">Yocto Project 4.0 "Kirkstone"</field>
|
||||
<field type="CharField" name="name">dunfell</field>
|
||||
<field type="CharField" name="description">Yocto Project 3.1 "Dunfell"</field>
|
||||
<field rel="ManyToOneRel" to="orm.bitbakeversion" name="bitbake_version">5</field>
|
||||
<field type="CharField" name="branch_name">kirkstone</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?h=kirkstone">Yocto Project Kirkstone branch</a>.</field>
|
||||
<field type="CharField" name="branch_name">dunfell</field>
|
||||
<field type="TextField" name="helptext">Toaster will run your builds using the tip of the <a href="https://git.yoctoproject.org/cgit/cgit.cgi/poky/log/?h=dunfell">Yocto Project Dunfell branch</a>.</field>
|
||||
</object>
|
||||
|
||||
<!-- Default project layers for each release -->
|
||||
@@ -155,7 +155,7 @@
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">1</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">1</field>
|
||||
<field type="CharField" name="branch">scarthgap</field>
|
||||
<field type="CharField" name="branch">kirkstone</field>
|
||||
<field type="CharField" name="dirpath">meta</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="2">
|
||||
@@ -177,14 +177,14 @@
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">1</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">4</field>
|
||||
<field type="CharField" name="branch">styhead</field>
|
||||
<field type="CharField" name="branch">mickledore</field>
|
||||
<field type="CharField" name="dirpath">meta</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="5">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">1</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">5</field>
|
||||
<field type="CharField" name="branch">kirkstone</field>
|
||||
<field type="CharField" name="branch">dunfell</field>
|
||||
<field type="CharField" name="dirpath">meta</field>
|
||||
</object>
|
||||
|
||||
@@ -200,7 +200,7 @@
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">2</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">1</field>
|
||||
<field type="CharField" name="branch">scarthgap</field>
|
||||
<field type="CharField" name="branch">kirkstone</field>
|
||||
<field type="CharField" name="dirpath">meta-poky</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="7">
|
||||
@@ -222,14 +222,14 @@
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">2</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">4</field>
|
||||
<field type="CharField" name="branch">styhead</field>
|
||||
<field type="CharField" name="branch">mickledore</field>
|
||||
<field type="CharField" name="dirpath">meta-poky</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="10">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">2</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">5</field>
|
||||
<field type="CharField" name="branch">kirkstone</field>
|
||||
<field type="CharField" name="branch">dunfell</field>
|
||||
<field type="CharField" name="dirpath">meta-poky</field>
|
||||
</object>
|
||||
|
||||
@@ -245,7 +245,7 @@
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">3</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">1</field>
|
||||
<field type="CharField" name="branch">scarthgap</field>
|
||||
<field type="CharField" name="branch">kirkstone</field>
|
||||
<field type="CharField" name="dirpath">meta-yocto-bsp</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="12">
|
||||
@@ -267,14 +267,14 @@
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">3</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">4</field>
|
||||
<field type="CharField" name="branch">styhead</field>
|
||||
<field type="CharField" name="branch">mickledore</field>
|
||||
<field type="CharField" name="dirpath">meta-yocto-bsp</field>
|
||||
</object>
|
||||
<object model="orm.layer_version" pk="15">
|
||||
<field rel="ManyToOneRel" to="orm.layer" name="layer">3</field>
|
||||
<field type="IntegerField" name="layer_source">0</field>
|
||||
<field rel="ManyToOneRel" to="orm.release" name="release">5</field>
|
||||
<field type="CharField" name="branch">kirkstone</field>
|
||||
<field type="CharField" name="branch">dunfell</field>
|
||||
<field type="CharField" name="dirpath">meta-yocto-bsp</field>
|
||||
</object>
|
||||
</django-objects>
|
||||
|
||||
@@ -88,29 +88,13 @@ class TestCreateNewProject(SeleniumFunctionalTestCase):
|
||||
False,
|
||||
)
|
||||
|
||||
def test_create_new_project_scarthgap(self):
|
||||
""" Test create new project using:
|
||||
- Project Name: Any string
|
||||
- Release: Yocto Project 5.0 "Scarthgap" (option value: 1)
|
||||
- Merge Toaster settings: True
|
||||
"""
|
||||
release = '1'
|
||||
release_title = 'Yocto Project 5.0 "Scarthgap"'
|
||||
project_name = 'projectscarthgap'
|
||||
self._create_test_new_project(
|
||||
project_name,
|
||||
release,
|
||||
release_title,
|
||||
True,
|
||||
)
|
||||
|
||||
def test_create_new_project_kirkstone(self):
|
||||
""" Test create new project using:
|
||||
- Project Name: Any string
|
||||
- Release: Yocto Project 4.0 "Kirkstone" (option value: 4)
|
||||
- Release: Yocto Project 4.0 "Kirkstone" (option value: 1)
|
||||
- Merge Toaster settings: True
|
||||
"""
|
||||
release = '4'
|
||||
release = '1'
|
||||
release_title = 'Yocto Project 4.0 "Kirkstone"'
|
||||
project_name = 'projectkirkstone'
|
||||
self._create_test_new_project(
|
||||
@@ -120,6 +104,22 @@ class TestCreateNewProject(SeleniumFunctionalTestCase):
|
||||
True,
|
||||
)
|
||||
|
||||
def test_create_new_project_dunfell(self):
|
||||
""" Test create new project using:
|
||||
- Project Name: Any string
|
||||
- Release: Yocto Project 3.1 "Dunfell" (option value: 5)
|
||||
- Merge Toaster settings: False
|
||||
"""
|
||||
release = '5'
|
||||
release_title = 'Yocto Project 3.1 "Dunfell"'
|
||||
project_name = 'projectdunfell'
|
||||
self._create_test_new_project(
|
||||
project_name,
|
||||
release,
|
||||
release_title,
|
||||
False,
|
||||
)
|
||||
|
||||
def test_create_new_project_local(self):
|
||||
""" Test create new project using:
|
||||
- Project Name: Any string
|
||||
|
||||
@@ -708,7 +708,7 @@ class TestProjectPage(SeleniumFunctionalTestCase):
|
||||
- Check layer summary
|
||||
- Check layer description
|
||||
"""
|
||||
url = reverse("layerdetails", args=(TestProjectPage.project_id, 7))
|
||||
url = reverse("layerdetails", args=(TestProjectPage.project_id, 8))
|
||||
self.get(url)
|
||||
self.wait_until_visible('.page-header')
|
||||
# check title is displayed
|
||||
|
||||
@@ -3,18 +3,17 @@
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?= -W --keep-going -j auto
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SPHINXOPTS ?= -W --keep-going -j auto
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
# Release notes are excluded because they contain contributor names and commit messages which can't be modified
|
||||
VALEOPTS ?= --no-wrap --glob '!migration-guides/release-notes-*.rst'
|
||||
SOURCEDIR = .
|
||||
VALEDOCS ?= $(SOURCEDIR)
|
||||
SPHINXLINTDOCS ?= $(SOURCEDIR)
|
||||
IMAGEDIRS = */svg
|
||||
BUILDDIR = _build
|
||||
DESTDIR = final
|
||||
SVG2PNG = rsvg-convert
|
||||
SVG2PDF = rsvg-convert
|
||||
VALEOPTS ?= --no-wrap --glob '!migration-guides/release-notes-*.rst'
|
||||
VALEDOCS ?= .
|
||||
SOURCEDIR = .
|
||||
IMAGEDIRS = */svg
|
||||
BUILDDIR = _build
|
||||
DESTDIR = final
|
||||
SVG2PNG = inkscape
|
||||
SVG2PDF = inkscape
|
||||
|
||||
ifeq ($(shell if which $(SPHINXBUILD) >/dev/null 2>&1; then echo 1; else echo 0; fi),0)
|
||||
$(error "The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed")
|
||||
@@ -26,12 +25,10 @@ help:
|
||||
|
||||
.PHONY: all help Makefile clean stylecheck publish epub latexpdf
|
||||
|
||||
publish: Makefile epub latexpdf html singlehtml
|
||||
publish: Makefile html singlehtml
|
||||
rm -rf $(BUILDDIR)/$(DESTDIR)/
|
||||
mkdir -p $(BUILDDIR)/$(DESTDIR)/
|
||||
cp -r $(BUILDDIR)/html/* $(BUILDDIR)/$(DESTDIR)/
|
||||
mkdir -p $(BUILDDIR)/$(DESTDIR)/_static
|
||||
cp $(BUILDDIR)/epub/TheYoctoProject.epub $(BUILDDIR)/latex/theyoctoproject.pdf $(BUILDDIR)/$(DESTDIR)/_static/
|
||||
cp $(BUILDDIR)/singlehtml/index.html $(BUILDDIR)/$(DESTDIR)/singleindex.html
|
||||
sed -i -e 's@index.html#@singleindex.html#@g' $(BUILDDIR)/$(DESTDIR)/singleindex.html
|
||||
|
||||
@@ -43,11 +40,11 @@ PNGs := $(foreach dir, $(IMAGEDIRS), $(patsubst %.svg,%.png,$(wildcard $(SOURCED
|
||||
|
||||
# Pattern rule for converting SVG to PDF
|
||||
%.pdf : %.svg
|
||||
$(SVG2PDF) --format=Pdf --output=$@ $<
|
||||
$(SVG2PDF) --export-filename=$@ $<
|
||||
|
||||
# Pattern rule for converting SVG to PNG
|
||||
%.png : %.svg
|
||||
$(SVG2PNG) --format=Png --output=$@ $<
|
||||
$(SVG2PNG) --export-filename=$@ $<
|
||||
|
||||
clean:
|
||||
@rm -rf $(BUILDDIR) $(PNGs) $(PDFs) poky.yaml sphinx-static/switchers.js releases.rst
|
||||
@@ -57,18 +54,15 @@ stylecheck:
|
||||
vale $(VALEOPTS) $(VALEDOCS)
|
||||
|
||||
sphinx-lint:
|
||||
sphinx-lint $(SPHINXLINTDOCS)
|
||||
sphinx-lint $(SOURCEDIR)
|
||||
|
||||
epub: $(PNGs)
|
||||
$(SOURCEDIR)/set_versions.py
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
# Note: we need to pass buf_size here (which is also configurable from
|
||||
# texmf.cnf), to avoid following error:
|
||||
# Unable to read an entire line---bufsize=200000. Please increase buf_size in texmf.cnf.
|
||||
latexpdf: $(PDFs)
|
||||
$(SOURCEDIR)/set_versions.py
|
||||
buf_size=10000000 $(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
all: html epub latexpdf
|
||||
|
||||
|
||||
@@ -108,9 +108,32 @@ generated with DocBook.
|
||||
How to build the Yocto Project documentation
|
||||
============================================
|
||||
|
||||
To build the documentation, you need Sphinx and a few other packages,
|
||||
which depend on your host GNU/Linux distribution. Such packages are listed on
|
||||
https://docs.yoctoproject.org/dev/ref-manual/system-requirements.html#required-packages-for-the-build-host
|
||||
Sphinx is written in Python. While it might work with Python2, for
|
||||
obvious reasons, we will only support building the Yocto Project
|
||||
documentation with Python3.
|
||||
|
||||
Sphinx might be available in your Linux distro packages repositories,
|
||||
however it is not recommended to use distro packages, as they might be
|
||||
old versions, especially if you are using an LTS version of your
|
||||
distro. The recommended method to install the latest versions of Sphinx
|
||||
and of its required dependencies is to use the Python Package Index (pip).
|
||||
|
||||
To install all required packages run:
|
||||
|
||||
$ pip3 install sphinx sphinx_rtd_theme pyyaml
|
||||
|
||||
To make sure you always have the latest versions of such packages, you
|
||||
should regularly run the same command with an added "--upgrade" option:
|
||||
|
||||
$ pip3 install --upgrade sphinx sphinx_rtd_theme pyyaml
|
||||
|
||||
Also install the "inkscape" package from your distribution.
|
||||
Inkscape is need to convert SVG graphics to PNG (for EPUB
|
||||
export) and to PDF (for PDF export).
|
||||
|
||||
Additionally install "fncychap.sty" TeX font if you want to build PDFs. Debian
|
||||
and Ubuntu have it in "texlive-latex-extra" package while RedHat distributions
|
||||
and OpenSUSE have it in "texlive-fncychap" package for example.
|
||||
|
||||
To build the documentation locally, run:
|
||||
|
||||
@@ -142,15 +165,7 @@ To run Vale:
|
||||
|
||||
$ make stylecheck
|
||||
|
||||
Style checking the whole documentation might take some time and generate a
|
||||
lot of warnings/errors, thus one can run Vale on a subset of files or
|
||||
directories:
|
||||
|
||||
$ make stylecheck VALEDOCS=<file>
|
||||
$ make stylecheck VALEDOCS="<file1> <file2>"
|
||||
$ make stylecheck VALEDOCS=<dir>
|
||||
|
||||
Lint checking the Yocto Project documentation
|
||||
Link checking the Yocto Project documentation
|
||||
=============================================
|
||||
|
||||
To fix errors which are not reported by Sphinx itself,
|
||||
@@ -164,14 +179,6 @@ To run sphinx-lint:
|
||||
|
||||
$ make sphinx-lint
|
||||
|
||||
Lint checking the whole documentation might take some time and generate a
|
||||
lot of warnings/errors, thus one can run sphinx-lint on a subset of files
|
||||
or directories:
|
||||
|
||||
$ make sphinx-lint SPHINXLINTDOCS=<file>
|
||||
$ make sphinx-lint SPHINXLINTDOCS="<file1> <file2>"
|
||||
$ make sphinx-lint SPHINXLINTDOCS=<dir>
|
||||
|
||||
Sphinx theme and CSS customization
|
||||
==================================
|
||||
|
||||
|
||||
@@ -78,7 +78,7 @@ You must install essential host packages on your build host. The
|
||||
following command installs the host packages based on an Ubuntu
|
||||
distribution::
|
||||
|
||||
$ sudo apt install &UBUNTU_DEBIAN_HOST_PACKAGES_ESSENTIAL;
|
||||
$ sudo apt install &UBUNTU_HOST_PACKAGES_ESSENTIAL;
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -251,17 +251,11 @@ an entire Linux distribution, including the toolchain, from source.
|
||||
To use such mirrors, uncomment the below lines in your ``conf/local.conf``
|
||||
file in the :term:`Build Directory`::
|
||||
|
||||
BB_HASHSERVE_UPSTREAM = "wss://hashserv.yoctoproject.org/ws"
|
||||
BB_HASHSERVE_UPSTREAM = "hashserv.yocto.io:8687"
|
||||
SSTATE_MIRRORS ?= "file://.* http://cdn.jsdelivr.net/yocto/sstate/all/PATH;downloadfilename=PATH"
|
||||
BB_HASHSERVE = "auto"
|
||||
BB_SIGNATURE_HANDLER = "OEEquivHash"
|
||||
|
||||
The hash equivalence server needs the websockets python module version 9.1
|
||||
or later. Debian GNU/Linux 12 (Bookworm) and later, Fedora, CentOS Stream
|
||||
9 and later, and Ubuntu 22.04 (LTS) and later, all have a recent enough
|
||||
package. Other supported distributions need to get the module some other
|
||||
place than their package feed, e.g. via ``pip``.
|
||||
|
||||
#. **Start the Build:** Continue with the following command to build an OS
|
||||
image for the target, which is ``core-image-sato`` in this example:
|
||||
|
||||
|
||||
@@ -90,9 +90,8 @@ rst_prolog = """
|
||||
|
||||
# external links and substitutions
|
||||
extlinks = {
|
||||
'bitbake_git': ('https://git.openembedded.org/bitbake%s', None),
|
||||
'cve': ('https://nvd.nist.gov/vuln/detail/CVE-%s', 'CVE-%s'),
|
||||
'cve_mitre': ('https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-%s', 'CVE-%s'),
|
||||
'cve_nist': ('https://nvd.nist.gov/vuln/detail/CVE-%s', 'CVE-%s'),
|
||||
'yocto_home': ('https://www.yoctoproject.org%s', None),
|
||||
'yocto_wiki': ('https://wiki.yoctoproject.org/wiki%s', None),
|
||||
'yocto_dl': ('https://downloads.yoctoproject.org%s', None),
|
||||
@@ -136,7 +135,6 @@ except ImportError:
|
||||
sys.exit(1)
|
||||
|
||||
html_logo = 'sphinx-static/YoctoProject_Logo_RGB.jpg'
|
||||
html_favicon = 'sphinx-static/favicon.ico'
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
@@ -160,14 +158,9 @@ html_last_updated_fmt = '%b %d, %Y'
|
||||
# Remove the trailing 'dot' in section numbers
|
||||
html_secnumber_suffix = " "
|
||||
|
||||
# We need XeTeX to process special unicode character, sometimes the contributor
|
||||
# list from the release note contains those.
|
||||
# See https://docs.readthedocs.io/en/stable/guides/pdf-non-ascii-languages.html.
|
||||
latex_engine = 'xelatex'
|
||||
latex_use_xindy = False
|
||||
latex_elements = {
|
||||
'passoptionstopackages': '\\PassOptionsToPackage{bookmarksdepth=5}{hyperref}',
|
||||
'preamble': '\\usepackage[UTF8]{ctex}\n\\setcounter{tocdepth}{2}',
|
||||
'preamble': '\\setcounter{tocdepth}{2}',
|
||||
}
|
||||
|
||||
# Make the EPUB builder prefer PNG to SVG because of issues rendering Inkscape SVG
|
||||
|
||||
@@ -47,10 +47,10 @@ Debian policy closely.
|
||||
|
||||
When a recipe references a git revision that does not correspond to a released
|
||||
version of software (e.g. is not a tagged version), the :term:`PV` variable
|
||||
should include the sign ``+``, so :term:`bitbake` automatically includes package
|
||||
version information during the packaging phase::
|
||||
should include the Git revision using the following to make the
|
||||
version clear::
|
||||
|
||||
PV = "<version>+git"
|
||||
PV = "<version>+git${SRCPV}"
|
||||
|
||||
In this case, ``<version>`` should be the most recently released version of the
|
||||
software from the current source revision (``git describe`` can be useful for
|
||||
@@ -395,7 +395,7 @@ one CVE is fixed, separate them using spaces.
|
||||
CVE Examples
|
||||
------------
|
||||
|
||||
This should be the header of patch that fixes :cve_nist:`2015-8370` in GRUB2::
|
||||
This should be the header of patch that fixes :cve:`2015-8370` in GRUB2::
|
||||
|
||||
grub2: Fix CVE-2015-8370
|
||||
|
||||
@@ -409,15 +409,3 @@ This should be the header of patch that fixes :cve_nist:`2015-8370` in GRUB2::
|
||||
Upstream-Status: Backport [http://git.savannah.gnu.org/cgit/grub.git/commit/?id=451d80e52d851432e109771bb8febafca7a5f1f2]
|
||||
CVE: CVE-2015-8370
|
||||
Signed-off-by: Joe Developer <joe.developer@example.com>
|
||||
|
||||
Patch format
|
||||
============
|
||||
|
||||
By default, patches created with ``git format-patch`` have a `Git` version signature at the end.
|
||||
To avoid having a `Git` signature at the end of generated or updated patches,
|
||||
you can use `Git` configuration settings::
|
||||
|
||||
git config --global format.signature ""
|
||||
|
||||
.. note::
|
||||
Patches generated or updated by ``devtool`` are created with no signature.
|
||||
|
||||
@@ -65,13 +65,6 @@ use to identify your commits::
|
||||
git config --global user.name "Ada Lovelace"
|
||||
git config --global user.email "ada.lovelace@gmail.com"
|
||||
|
||||
By default, Git adds a signature line at the end of patches containing the Git
|
||||
version. We suggest to remove it as it doesn't add useful information.
|
||||
|
||||
Remove it with the following command::
|
||||
|
||||
git config --global format.signature ""
|
||||
|
||||
Clone the Git repository for the component to modify
|
||||
----------------------------------------------------
|
||||
|
||||
@@ -490,7 +483,7 @@ typical usage of ``git send-email``::
|
||||
git send-email --to <mailing-list-address> *.patch
|
||||
|
||||
Then, review each subject line and list of recipients carefully, and then
|
||||
allow the command to send each message.
|
||||
and then allow the command to send each message.
|
||||
|
||||
You will see that ``git send-email`` will automatically copy the people listed
|
||||
in any commit tags such as ``Signed-off-by`` or ``Reported-by``.
|
||||
@@ -832,52 +825,3 @@ Other layers may have similar testing branches but there is no formal
|
||||
requirement or standard for these so please check the documentation for the
|
||||
layers you are contributing to.
|
||||
|
||||
Acceptance of AI Generated Code
|
||||
===============================
|
||||
|
||||
The Yocto Project and OpenEmbedded follow the guidance of the Linux Foundation
|
||||
in regards to the use of generative AI tools. See:
|
||||
https://www.linuxfoundation.org/legal/generative-ai.
|
||||
|
||||
All of the existing guidelines in this document are expected to be followed,
|
||||
including in the :doc:`recipe-style-guide`, and contributing the changes with
|
||||
additional requirements to the items in section
|
||||
:ref:`contributor-guide/submit-changes:Implement and commit changes`.
|
||||
|
||||
All AI Generated Code must be labeled as such in the commit message,
|
||||
prior to your ``Signed-off-by`` line. It is also strongly recommended,
|
||||
that any patches or code within the commit also have a comment or other
|
||||
indication that this code was AI generated.
|
||||
|
||||
For example, here is a properly formatted commit message::
|
||||
|
||||
component: Add the ability to ...
|
||||
|
||||
AI-Generated: Uses GitHub Copilot
|
||||
|
||||
Signed-off-by: Your Name <your.name@domain>
|
||||
|
||||
The ``Signed-off-by`` line must be written by you, and not the AI helper.
|
||||
As a reminder, when contributing a change, your ``Signed-off-by`` line is
|
||||
required and the stipulations in the `Developer's Statement of Origin
|
||||
1.1 <https://developercertificate.org/>`__ still apply.
|
||||
|
||||
Additionally, you must stipulate AI contributions conform to the Linux
|
||||
Foundation policy, specifically:
|
||||
|
||||
#. Contributors should ensure that the terms and conditions of the generative AI
|
||||
tool do not place any contractual restrictions on how the tool's output can
|
||||
be used that are inconsistent with the project's open source software
|
||||
license, the project's intellectual property policies, or the Open Source
|
||||
Definition.
|
||||
|
||||
#. If any pre-existing copyrighted materials (including pre-existing open
|
||||
source code) authored or owned by third parties are included in the AI tool's
|
||||
output, prior to contributing such output to the project, the Contributor
|
||||
should confirm that they have permission from the third party
|
||||
owners -- such as the form of an open source license or public domain
|
||||
declaration that complies with the project's licensing policies -- to use and
|
||||
modify such pre-existing materials and contribute them to the project.
|
||||
Additionally, the contributor should provide notice and attribution of such
|
||||
third party rights, along with information about the applicable license
|
||||
terms, with their contribution.
|
||||
|
||||
@@ -1,129 +0,0 @@
|
||||
.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
|
||||
|
||||
Locking and Unlocking Recipes Using ``bblock``
|
||||
**********************************************
|
||||
|
||||
By design, the OpenEmbedded build system builds everything from scratch
|
||||
unless BitBake determines that specific tasks do not require rebuilding.
|
||||
At startup, it computes a signature for all tasks, based on the task's input.
|
||||
Then, it compares these signatures with the ones from the sstate cache (if they
|
||||
exist). Any changes cause the task to rerun.
|
||||
|
||||
During development, changes might trigger BitBake to rebuild certain
|
||||
recipes, even when we know they do not require rebuilding at that stage.
|
||||
For example, modifying a recipe can lead to rebuilding its native
|
||||
counterpart, which might prove unnecessary. Editing the ``python3`` recipe,
|
||||
for instance, can prompt BitBake to rebuild ``python3-native`` along with any
|
||||
recipes that depend on it.
|
||||
|
||||
To prevent this, use ``bblock`` to lock specific tasks or recipes to
|
||||
specific signatures, forcing BitBake to use the sstate cache for them.
|
||||
|
||||
.. warning::
|
||||
|
||||
Use ``bblock`` only during the development phase.
|
||||
|
||||
Forcing BitBake to use the sstate cache, regardless of input changes, means
|
||||
the recipe metadata no longer directly reflect the output. Use this feature
|
||||
with caution. If you do not understand why signatures change, see the section
|
||||
on :yocto_wiki:`understanding what changed </TipsAndTricks/Understanding_what_changed_(diffsigs_etc)>`.
|
||||
|
||||
|
||||
Locking tasks and recipes
|
||||
-------------------------
|
||||
|
||||
To lock a recipe, use::
|
||||
|
||||
$ bblock recipe
|
||||
|
||||
You can also use a space-separated list of recipes to lock multiple recipes::
|
||||
|
||||
$ bblock recipe1 recipe2
|
||||
|
||||
Locking a recipe means locking all tasks of the recipe. If you need to
|
||||
lock only particular tasks, use the `-t` option with a comma-separated
|
||||
list of tasks::
|
||||
|
||||
$ bblock -t task1,task2 recipe
|
||||
|
||||
|
||||
Unlocking tasks and recipes
|
||||
---------------------------
|
||||
|
||||
To unlock a recipe, use the ``-r`` option::
|
||||
|
||||
$ bblock -r recipe
|
||||
|
||||
You can also use a space-separated list of recipes to unlock multiple recipes::
|
||||
|
||||
$ bblock -r recipe1 recipe2
|
||||
|
||||
Unlocking a recipe means unlocking all tasks of the recipe. If you need to
|
||||
unlock only particular tasks use the ``-t`` option with a comma-separated
|
||||
list of tasks::
|
||||
|
||||
$ bblock -r -t task1,task2 recipe
|
||||
|
||||
To unlock all recipes, do not specify any recipe::
|
||||
|
||||
$ bblock -r
|
||||
|
||||
|
||||
Configuration file
|
||||
------------------
|
||||
|
||||
``bblock`` will dump the signatures in the ``build/conf/bblock.conf`` file,
|
||||
included by default in :oe_git:`meta/conf/bitbake.conf </openembedded-core/tree/meta/conf/bitbake.conf>`.
|
||||
|
||||
To dump the file, use the ``-d`` option::
|
||||
|
||||
$ bblock -d
|
||||
|
||||
|
||||
Locking mechanism
|
||||
-----------------
|
||||
|
||||
``bblock`` computes the signature(s) of the task(s) and sets the 3 following
|
||||
variables: :term:`SIGGEN_LOCKEDSIGS`, :term:`SIGGEN_LOCKEDSIGS_TYPES`
|
||||
and :term:`SIGGEN_LOCKEDSIGS_TASKSIG_CHECK`.
|
||||
|
||||
In particular, ``bblock`` sets::
|
||||
|
||||
SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "info"
|
||||
SIGGEN_LOCKEDSIGS_TYPES += "${PACKAGE_ARCHS}"
|
||||
|
||||
SIGGEN_LOCKEDSIGS_<package_arch> += "<recipe>:<task>:<signature>"
|
||||
|
||||
This produces architecture specific locks and reminds user that some tasks
|
||||
have locked signatures.
|
||||
|
||||
Example
|
||||
-------
|
||||
|
||||
When working on the ``python3`` recipe, we can lock ``python3-native`` with
|
||||
the following::
|
||||
|
||||
$ bblock python3-native
|
||||
$ bblock -d
|
||||
# Generated by bblock
|
||||
SIGGEN_LOCKEDSIGS_TASKSIG_CHECK = "info"
|
||||
SIGGEN_LOCKEDSIGS_TYPES += "${PACKAGE_ARCHS}"
|
||||
|
||||
SIGGEN_LOCKEDSIGS_x86_64 += "python3-native:do_patch:865859c27e603ba42025b7bb766c3cd4c0f477e4962cfd39128c0619d695fce7"
|
||||
SIGGEN_LOCKEDSIGS_x86_64 += "python3-native:do_populate_sysroot:f8fa5d3194cef638416000252b959e86d0a19f6b7898e1f56b643c588cdd8605"
|
||||
SIGGEN_LOCKEDSIGS_x86_64 += "python3-native:do_prepare_recipe_sysroot:fe295ac505d9d1143313424b201c6f3f2a0a90da40a13a905b86b874705f226a"
|
||||
SIGGEN_LOCKEDSIGS_x86_64 += "python3-native:do_fetch:1b6e4728fee631bc7a8a7006855c5b8182a8224579e32e3d0a2db77c26459f25"
|
||||
SIGGEN_LOCKEDSIGS_x86_64 += "python3-native:do_unpack:2ad74d6f865ef75c35c0e6bbe3f9a90923a6b2c62c18a3ddef514ea31fbc588f"
|
||||
SIGGEN_LOCKEDSIGS_x86_64 += "python3-native:do_deploy_source_date_epoch:15f89b8483c1ad7507480f337619bb98c26e231227785eb3543db163593e7b42"
|
||||
SIGGEN_LOCKEDSIGS_x86_64 += "python3-native:do_configure:7960c13d23270fdb12b3a7c426ce1da0d2f5c7cf5e5d3f5bdce5fa330eb7d482"
|
||||
SIGGEN_LOCKEDSIGS_x86_64 += "python3-native:do_compile:012e1d4a63f1a78fc2143bd90d704dbcf5865c5257d6272aa7540ec1cd3063d9"
|
||||
SIGGEN_LOCKEDSIGS_x86_64 += "python3-native:do_install:d3401cc2afa4c996beb154beaad3e45fa0272b9c56fb86e9db14ec3544c68f9d"
|
||||
SIGGEN_LOCKEDSIGS_x86_64 += "python3-native:do_build:fa88bb7afb9046c0417c24a3fa98a058653805a8b00eda2c2d7fea68fc42f882"
|
||||
SIGGEN_LOCKEDSIGS_x86_64 += "python3-native:do_collect_spdx_deps:cc9c53ba7c495567e9a38ec4801830c425c0d1f895aa2fc66930a2edd510d9b4"
|
||||
SIGGEN_LOCKEDSIGS_x86_64 += "python3-native:do_create_spdx:766a1d09368438b7b5a1a8e2a8f823b2b731db44b57e67d8b3196de91966f9c5"
|
||||
SIGGEN_LOCKEDSIGS_x86_64 += "python3-native:do_create_package_spdx:46f80faeab25575e9977ba3bf14c819489c3d489432ae5145255635108c21020"
|
||||
SIGGEN_LOCKEDSIGS_x86_64 += "python3-native:do_recipe_qa:cb960cdb074e7944e894958db58f3dc2a0436ecf87c247feb3e095e214fec0e4"
|
||||
SIGGEN_LOCKEDSIGS_x86_64 += "python3-native:do_populate_lic:15657441621ee83f15c2e650e7edbb036870b56f55e72e046c6142da3c5783fd"
|
||||
SIGGEN_LOCKEDSIGS_x86_64 += "python3-native:do_create_manifest:24f0abbec221d27bbb2909b6e846288b12cab419f1faf9f5006ed80423d37e28"
|
||||
SIGGEN_LOCKEDSIGS_x86_64 += "python3-native:do_addto_recipe_sysroot:bcb6a1905f113128de3f88d702b706befd6a786267c045ee82532759a7c214d7"
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
.. SPDX-License-Identifier: CC-BY-SA-2.0-UK
|
||||
|
||||
Flashing Images Using `bmaptool`
|
||||
********************************
|
||||
Flashing Images Using ``bmaptool``
|
||||
**********************************
|
||||
|
||||
A fast and easy way to flash an image to a bootable device is to use
|
||||
`bmaptool`, which is integrated into the OpenEmbedded build system.
|
||||
`bmaptool` is a generic tool that creates a file's block map (bmap) and
|
||||
bmaptool, which is integrated into the OpenEmbedded build system.
|
||||
bmaptool is a generic tool that creates a file's block map (bmap) and
|
||||
then uses that map to copy the file. As compared to traditional tools
|
||||
such as `dd` or `cp`, `bmaptool` can copy (or flash) large files like raw
|
||||
such as dd or cp, bmaptool can copy (or flash) large files like raw
|
||||
system image files much faster.
|
||||
|
||||
.. note::
|
||||
@@ -20,13 +20,13 @@ system image files much faster.
|
||||
$ sudo apt install bmap-tools
|
||||
|
||||
- If you are unable to install the ``bmap-tools`` package, you will
|
||||
need to build `bmaptool` before using it. Use the following command::
|
||||
need to build bmaptool before using it. Use the following command::
|
||||
|
||||
$ bitbake bmaptool-native -caddto_recipe_sysroot
|
||||
$ bitbake bmaptool-native
|
||||
|
||||
Following, is an example that shows how to flash a Wic image. Realize
|
||||
that while this example uses a Wic image, you can use `bmaptool` to flash
|
||||
any type of image. Use these steps to flash an image using `bmaptool`:
|
||||
that while this example uses a Wic image, you can use bmaptool to flash
|
||||
any type of image. Use these steps to flash an image using bmaptool:
|
||||
|
||||
#. *Update your local.conf File:* You need to have the following set
|
||||
in your ``local.conf`` file before building your image::
|
||||
@@ -39,17 +39,18 @@ any type of image. Use these steps to flash an image using `bmaptool`:
|
||||
|
||||
$ bitbake image
|
||||
|
||||
#. *Flash the Device:* Flash the device with the image by using `bmaptool`
|
||||
#. *Flash the Device:* Flash the device with the image by using bmaptool
|
||||
depending on your particular setup. The following commands assume the
|
||||
image resides in the :term:`Build Directory`'s ``deploy/images/`` area:
|
||||
|
||||
- If you installed the package for `bmaptool`, you can directly run::
|
||||
- If you have write access to the media, use this command form::
|
||||
|
||||
$ sudo bmaptool copy build-directory/tmp/deploy/images/machine/image.wic /dev/sdX
|
||||
$ oe-run-native bmaptool-native bmaptool copy build-directory/tmp/deploy/images/machine/image.wic /dev/sdX
|
||||
|
||||
- Otherwise, if you built `bmaptool` with BitBake, run::
|
||||
- If you do not have write access to the media, set your permissions
|
||||
first and then use the same command form::
|
||||
|
||||
$ sudo chmod a+w /dev/sdX # get write access to the media, needed only once after booting
|
||||
$ sudo chmod 666 /dev/sdX
|
||||
$ oe-run-native bmaptool-native bmaptool copy build-directory/tmp/deploy/images/machine/image.wic /dev/sdX
|
||||
|
||||
For help on the ``bmaptool`` command, use the following command::
|
||||
|
||||
@@ -280,9 +280,7 @@ Follow these steps to create an :term:`Initramfs` image:
|
||||
#. *Create the Initramfs Image Recipe:* You can reference the
|
||||
``core-image-minimal-initramfs.bb`` recipe found in the
|
||||
``meta/recipes-core`` directory of the :term:`Source Directory`
|
||||
as an example from which to work. The ``core-image-minimal-initramfs`` recipe
|
||||
is based on the :ref:`initramfs-framework <dev-manual/building:Customizing an
|
||||
Initramfs using \`\`initramfs-framework\`\`>` recipe described below.
|
||||
as an example from which to work.
|
||||
|
||||
#. *Decide if You Need to Bundle the Initramfs Image Into the Kernel
|
||||
Image:* If you want the :term:`Initramfs` image that is built to be bundled
|
||||
@@ -310,86 +308,6 @@ Follow these steps to create an :term:`Initramfs` image:
|
||||
and bundled with the kernel image if you used the
|
||||
:term:`INITRAMFS_IMAGE_BUNDLE` variable described earlier.
|
||||
|
||||
Customizing an Initramfs using ``initramfs-framework``
|
||||
------------------------------------------------------
|
||||
|
||||
The ``core-image-minimal-initramfs.bb`` recipe found in
|
||||
:oe_git:`meta/recipes-core/images
|
||||
</openembedded-core/tree/meta/recipes-core/images>` uses the
|
||||
:oe_git:`initramfs-framework_1.0.bb
|
||||
</openembedded-core/tree/meta/recipes-core/initrdscripts/initramfs-framework_1.0.bb>`
|
||||
recipe as its base component. The goal of the ``initramfs-framework`` recipe is
|
||||
to provide the building blocks to build a customized :term:`Initramfs`.
|
||||
|
||||
The ``initramfs-framework`` recipe relies on shell initialization scripts
|
||||
defined in :oe_git:`meta/recipes-core/initrdscripts/initramfs-framework
|
||||
</openembedded-core/tree/meta/recipes-core/initrdscripts/initramfs-framework>`. Since some of
|
||||
these scripts do not apply for all use cases, the ``initramfs-framework`` recipe
|
||||
defines different packages:
|
||||
|
||||
- ``initramfs-framework-base``: this package installs the basic components of
|
||||
an :term:`Initramfs`, such as the ``init`` script or the ``/dev/console``
|
||||
character special file. As this package is a runtime dependency of all
|
||||
modules listed below, it is automatically pulled in when one of the modules
|
||||
is installed in the image.
|
||||
- ``initramfs-module-exec``: support for execution of applications.
|
||||
- ``initramfs-module-mdev``: support for `mdev
|
||||
<https://wiki.gentoo.org/wiki/Mdev>`__.
|
||||
- ``initramfs-module-udev``: support for :wikipedia:`Udev <Udev>`.
|
||||
- ``initramfs-module-e2fs``: support for :wikipedia:`ext4/ext3/ext2
|
||||
<Extended_file_system>` filesystems.
|
||||
- ``initramfs-module-nfsrootfs``: support for locating and mounting the root
|
||||
partition via :wikipedia:`NFS <Network_File_System>`.
|
||||
- ``initramfs-module-rootfs``: support for locating and mounting the root
|
||||
partition.
|
||||
- ``initramfs-module-debug``: dynamic debug support.
|
||||
- ``initramfs-module-lvm``: :wikipedia:`LVM <Logical_volume_management>` rootfs support.
|
||||
- ``initramfs-module-overlayroot``: support for mounting a read-write overlay
|
||||
on top of a read-only root filesystem.
|
||||
|
||||
In addition to the packages defined by the ``initramfs-framework`` recipe
|
||||
itself, the following packages are defined by the recipes present in
|
||||
:oe_git:`meta/recipes-core/initrdscripts </openembedded-core/tree/meta/recipes-core/initrdscripts>`:
|
||||
|
||||
- ``initramfs-module-install``: module to create and install a partition layout
|
||||
on a selected block device.
|
||||
- ``initramfs-module-install-efi``: module to create and install an EFI
|
||||
partition layout on a selected block device.
|
||||
- ``initramfs-module-setup-live``: module to start a shell in the
|
||||
:term:`Initramfs` if ``root=/dev/ram0`` in passed in the `Kernel command-line
|
||||
<https://www.kernel.org/doc/html/latest/admin-guide/kernel-parameters.html>`__
|
||||
or the ``root=`` parameter was not passed.
|
||||
|
||||
To customize the :term:`Initramfs`, you can add or remove packages listed
|
||||
earlier from the :term:`PACKAGE_INSTALL` variable with a :ref:`bbappend
|
||||
<dev-manual/layers:Appending Other Layers Metadata With Your Layer>` on the
|
||||
``core-image-minimal-initramfs`` recipe, or create a custom recipe for the
|
||||
:term:`Initramfs` taking ``core-image-minimal-initramfs`` as example.
|
||||
|
||||
Custom scripts can be added to the :term:`Initramfs` by writing your own
|
||||
recipes. The recipes are conventionally named ``initramfs-module-<module name>``
|
||||
where ``<module name>`` is the name of the module. The recipe should set its
|
||||
:term:`RDEPENDS` package-specific variables to include
|
||||
``initramfs-framework-base`` and the other packages on which the module depends
|
||||
at runtime.
|
||||
|
||||
The recipe must install shell initialization scripts in :term:`${D} <D>`\
|
||||
``/init.d`` and must follow the ``<number>-<script name>`` naming scheme where:
|
||||
|
||||
- ``<number>`` is a *two-digit* number that affects the execution order of the
|
||||
script compared to others. For example, the script ``80-setup-live`` would be
|
||||
executed after ``01-udev`` because 80 is greater than 01.
|
||||
|
||||
This number being two-digits is important here as the scripts are executed
|
||||
alphabetically. For example, the script ``10-script`` would be executed
|
||||
before the script ``8-script``, because ``1`` is inferior to ``8``.
|
||||
Therefore, the script should be named ``08-script``.
|
||||
|
||||
- ``<script name>`` is the script name which you can choose freely.
|
||||
|
||||
If two script use the same ``<number>``, they are sorted alphabetically based
|
||||
on ``<script name>``.
|
||||
|
||||
Bundling an Initramfs Image From a Separate Multiconfig
|
||||
-------------------------------------------------------
|
||||
|
||||
|
||||
@@ -80,14 +80,15 @@ recipe that are enabled with :term:`IMAGE_FEATURES`. The value of
|
||||
:term:`EXTRA_IMAGE_FEATURES` is added to :term:`IMAGE_FEATURES` within
|
||||
``meta/conf/bitbake.conf``.
|
||||
|
||||
To illustrate how you can use these variables to modify your image, consider an
|
||||
example that selects the SSH server. The Yocto Project ships with two SSH
|
||||
servers you can use with your images: Dropbear and OpenSSH. Dropbear is a
|
||||
minimal SSH server appropriate for resource-constrained environments, while
|
||||
OpenSSH is a well-known standard SSH server implementation. By default, the
|
||||
``core-image-sato`` image is configured to use Dropbear. The
|
||||
``core-image-full-cmdline`` image includes OpenSSH. The ``core-image-minimal``
|
||||
image does not contain an SSH server.
|
||||
To illustrate how you can use these variables to modify your image,
|
||||
consider an example that selects the SSH server. The Yocto Project ships
|
||||
with two SSH servers you can use with your images: Dropbear and OpenSSH.
|
||||
Dropbear is a minimal SSH server appropriate for resource-constrained
|
||||
environments, while OpenSSH is a well-known standard SSH server
|
||||
implementation. By default, the ``core-image-sato`` image is configured
|
||||
to use Dropbear. The ``core-image-full-cmdline`` and ``core-image-lsb``
|
||||
images both include OpenSSH. The ``core-image-minimal`` image does not
|
||||
contain an SSH server.
|
||||
|
||||
You can customize your image and change these defaults. Edit the
|
||||
:term:`IMAGE_FEATURES` variable in your recipe or use the
|
||||
|
||||
@@ -12,13 +12,10 @@ revision number for changes. Currently, you can do this with Apache
|
||||
Subversion (SVN), Git, and Bazaar (BZR) repositories.
|
||||
|
||||
To enable this behavior, the :term:`PV` of
|
||||
the recipe needs to include a ``+`` sign in its assignment.
|
||||
Here is an example::
|
||||
the recipe needs to reference
|
||||
:term:`SRCPV`. Here is an example::
|
||||
|
||||
PV = "1.2.3+git"
|
||||
|
||||
:term:`Bitbake` later includes the source control information in :term:`PKGV`
|
||||
during the packaging phase.
|
||||
PV = "1.2.3+git${SRCPV}"
|
||||
|
||||
Then, you can add the following to your
|
||||
``local.conf``::
|
||||
|
||||
@@ -39,6 +39,7 @@ Yocto Project Development Tasks Manual
|
||||
external-scm
|
||||
read-only-rootfs
|
||||
build-quality
|
||||
runtime-testing
|
||||
debugging
|
||||
licenses
|
||||
security-subjects
|
||||
@@ -47,6 +48,5 @@ Yocto Project Development Tasks Manual
|
||||
error-reporting-tool
|
||||
wayland
|
||||
qemu
|
||||
bblock
|
||||
|
||||
.. include:: /boilerplate.rst
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user