diff --git a/.ci/test.py b/.ci/test.py index 8cd3e4545..a4f5ee742 100644 --- a/.ci/test.py +++ b/.ci/test.py @@ -8,24 +8,23 @@ from itertools import chain from pathlib import Path -from utils import (Plugin, configure_git, enumerate_plugins) +from utils import Plugin, configure_git, enumerate_plugins logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) global_dependencies = [ - 'pytest', - 'pytest-xdist', - 'pytest-timeout', + "pytest", + "pytest-xdist", + "pytest-timeout", ] -pip_opts = ['-qq'] +pip_opts = ["-qq"] def prepare_env(p: Plugin, directory: Path, env: dict, workflow: str) -> bool: - """ Returns whether we can run at all. Raises error if preparing failed. - """ - subprocess.check_call(['python3', '-m', 'venv', '--clear', directory]) - os.environ['PATH'] += f":{directory}" + """Returns whether we can run at all. Raises error if preparing failed.""" + subprocess.check_call(["python3", "-m", "venv", "--clear", directory]) + os.environ["PATH"] += f":{directory}" if p.framework == "pip": return prepare_env_pip(p, directory, workflow) @@ -40,21 +39,21 @@ def prepare_env(p: Plugin, directory: Path, env: dict, workflow: str) -> bool: def prepare_env_poetry(p: Plugin, directory: Path) -> bool: logging.info("Installing a new poetry virtualenv") - pip3 = directory / 'bin' / 'pip3' - poetry = directory / 'bin' / 'poetry' - python3 = directory / 'bin' / 'python3' + pip3 = directory / "bin" / "pip3" + poetry = directory / "bin" / "poetry" + python3 = directory / "bin" / "python3" - subprocess.check_call(['which', 'python3']) + subprocess.check_call(["which", "python3"]) - subprocess.check_call([ - pip3, 'install', '-U', *pip_opts, 'pip', 'wheel', 'poetry' - ], cwd=p.path.parent) + subprocess.check_call( + [pip3, "install", "-U", *pip_opts, "pip", "wheel", "poetry"], cwd=p.path.parent + ) # Install pytest (eventually we'd want plugin authors to include # it in their requirements-dev.txt, but for now let's help them a # bit). subprocess.check_call( - [pip3, 'install', '-U', '-qq', *global_dependencies], + [pip3, "install", "-U", "-qq", *global_dependencies], stderr=subprocess.STDOUT, ) @@ -64,23 +63,41 @@ def prepare_env_poetry(p: Plugin, directory: Path) -> bool: logging.info(f"Using poetry at {poetry} ({python3}) to run tests in {workdir}") # Now we can proceed with the actual implementation - logging.info(f"Exporting poetry {poetry} dependencies from {p.details['pyproject']}") - subprocess.check_call([ - poetry, 'export', '--with=dev', '--without-hashes', '-f', 'requirements.txt', - '--output', 'requirements.txt' - ], cwd=workdir) + logging.info( + f"Exporting poetry {poetry} dependencies from {p.details['pyproject']}" + ) + subprocess.check_call( + [ + poetry, + "export", + "--with=dev", + "--without-hashes", + "-f", + "requirements.txt", + "--output", + "requirements.txt", + ], + cwd=workdir, + ) - subprocess.check_call([ - pip3, 'install', *pip_opts, '-r', str(workdir) + "/requirements.txt", - ], stderr=subprocess.STDOUT) + subprocess.check_call( + [ + pip3, + "install", + *pip_opts, + "-r", + str(workdir) + "/requirements.txt", + ], + stderr=subprocess.STDOUT, + ) - subprocess.check_call([pip3, 'freeze']) + subprocess.check_call([pip3, "freeze"]) return True def prepare_env_pip(p: Plugin, directory: Path, workflow: str) -> bool: print("Installing a new pip virtualenv") - pip_path = directory / 'bin' / 'pip3' + pip_path = directory / "bin" / "pip3" if workflow == "nightly": install_dev_pyln_testing(pip_path) @@ -90,24 +107,24 @@ def prepare_env_pip(p: Plugin, directory: Path, workflow: str) -> bool: # Now install all the requirements print(f"Installing requirements from {p.details['requirements']}") subprocess.check_call( - [pip_path, 'install', *pip_opts, '-r', p.details['requirements']], + [pip_path, "install", *pip_opts, "-r", p.details["requirements"]], stderr=subprocess.STDOUT, ) - if p.details['devrequirements'].exists(): + if p.details["devrequirements"].exists(): print(f"Installing requirements from {p.details['devrequirements']}") subprocess.check_call( - [pip_path, 'install', *pip_opts, '-r', p.details['devrequirements']], + [pip_path, "install", *pip_opts, "-r", p.details["devrequirements"]], stderr=subprocess.STDOUT, ) - subprocess.check_call([pip_path, 'freeze']) + subprocess.check_call([pip_path, "freeze"]) return True def prepare_generic(p: Plugin, directory: Path, env: dict, workflow: str) -> bool: print("Installing a new generic virtualenv") - pip_path = directory / 'bin' / 'pip3' + pip_path = directory / "bin" / "pip3" if workflow == "nightly": install_dev_pyln_testing(pip_path) @@ -115,49 +132,51 @@ def prepare_generic(p: Plugin, directory: Path, env: dict, workflow: str) -> boo install_pyln_testing(pip_path) # Now install all the requirements - if p.details['requirements'].exists(): + if p.details["requirements"].exists(): print(f"Installing requirements from {p.details['requirements']}") subprocess.check_call( - [pip_path, 'install', '-U', *pip_opts, '-r', p.details['requirements']], + [pip_path, "install", "-U", *pip_opts, "-r", p.details["requirements"]], stderr=subprocess.STDOUT, ) - if p.details['setup'].exists(): + if p.details["setup"].exists(): print(f"Running setup script from {p.details['setup']}") subprocess.check_call( - ['bash', p.details['setup'], f'TEST_DIR={directory}'], + ["bash", p.details["setup"], f"TEST_DIR={directory}"], env=env, stderr=subprocess.STDOUT, ) - subprocess.check_call([pip_path, 'freeze']) + subprocess.check_call([pip_path, "freeze"]) return True def install_pyln_testing(pip_path): # Many plugins only implicitly depend on pyln-testing, so let's help them - cln_path = os.environ['CLN_PATH'] + cln_path = os.environ["CLN_PATH"] # Install pytest (eventually we'd want plugin authors to include # it in their requirements-dev.txt, but for now let's help them a # bit). subprocess.check_call( - [pip_path, 'install', *pip_opts, *global_dependencies], + [pip_path, "install", *pip_opts, *global_dependencies], stderr=subprocess.STDOUT, ) subprocess.check_call( - [pip_path, 'install', '-U', *pip_opts, 'pip', 'wheel'], + [pip_path, "install", "-U", *pip_opts, "pip", "wheel"], stderr=subprocess.STDOUT, ) subprocess.check_call( [ - pip_path, 'install', *pip_opts, + pip_path, + "install", + *pip_opts, cln_path + "/contrib/pyln-client", cln_path + "/contrib/pyln-testing", "MarkupSafe>=2.0", - 'itsdangerous>=2.0' + "itsdangerous>=2.0", ], stderr=subprocess.STDOUT, ) @@ -165,11 +184,18 @@ def install_pyln_testing(pip_path): def install_dev_pyln_testing(pip_path): # Many plugins only implicitly depend on pyln-testing, so let's help them - cln_path = os.environ['CLN_PATH'] + cln_path = os.environ["CLN_PATH"] - subprocess.check_call([ - pip_path, 'install', *pip_opts, '-r', cln_path + "/requirements.txt", - ], stderr=subprocess.STDOUT) + subprocess.check_call( + [ + pip_path, + "install", + *pip_opts, + "-r", + cln_path + "/requirements.txt", + ], + stderr=subprocess.STDOUT, + ) def run_one(p: Plugin, workflow: str) -> bool: @@ -179,24 +205,30 @@ def run_one(p: Plugin, workflow: str) -> bool: print("No test files found, skipping plugin {p.name}".format(p=p)) return True - print("Found {ctestfiles} test files, creating virtualenv and running tests".format(ctestfiles=len(p.testfiles))) + print( + "Found {ctestfiles} test files, creating virtualenv and running tests".format( + ctestfiles=len(p.testfiles) + ) + ) print("::group::{p.name}".format(p=p)) # Create a virtual env vdir = tempfile.TemporaryDirectory() vpath = Path(vdir.name) - bin_path = vpath / 'bin' - pytest_path = vpath / 'bin' / 'pytest' + bin_path = vpath / "bin" + pytest_path = vpath / "bin" / "pytest" env = os.environ.copy() - env.update({ - # Need to customize PATH so lightningd can find the correct python3 - 'PATH': "{}:{}".format(bin_path, os.environ['PATH']), - # Some plugins require a valid locale to be set - 'LC_ALL': 'C.UTF-8', - 'LANG': 'C.UTF-8', - }) + env.update( + { + # Need to customize PATH so lightningd can find the correct python3 + "PATH": "{}:{}".format(bin_path, os.environ["PATH"]), + # Some plugins require a valid locale to be set + "LC_ALL": "C.UTF-8", + "LANG": "C.UTF-8", + } + ) try: if not prepare_env(p, vpath, env, workflow): @@ -211,11 +243,11 @@ def run_one(p: Plugin, workflow: str) -> bool: cmd = [ str(pytest_path), - '-vvv', - '--timeout=600', - '--timeout-method=thread', - '--color=yes', - '-n=5', + "-vvv", + "--timeout=600", + "--timeout-method=thread", + "--color=yes", + "-n=5", ] logging.info(f"Running `{' '.join(cmd)}` in directory {p.path.resolve()}") @@ -254,11 +286,11 @@ def push_gather_data(data: dict, workflow: str, python_version: str): subprocess.run(["git", "checkout", "badges"]) filenames_to_add = [] for plugin_name, result in data.items(): - filename = write_gather_data_file( - plugin_name, result, workflow, python_version - ) + filename = write_gather_data_file(plugin_name, result, workflow, python_version) filenames_to_add.append(filename) - output = subprocess.check_output(list(chain(["git", "add", "-v"], filenames_to_add))).decode("utf-8") + output = subprocess.check_output( + list(chain(["git", "add", "-v"], filenames_to_add)) + ).decode("utf-8") print(f"output from git add: {output}") if output != "": output = subprocess.check_output( @@ -272,9 +304,9 @@ def push_gather_data(data: dict, workflow: str, python_version: str): print(f"output from git commit: {output}") for _ in range(10): subprocess.run(["git", "pull", "--rebase"]) - output = subprocess.run(["git", "push", "origin", "badges"], - capture_output=True, - text=True) + output = subprocess.run( + ["git", "push", "origin", "badges"], capture_output=True, text=True + ) if output.returncode == 0: print("Push successful") break @@ -287,7 +319,9 @@ def push_gather_data(data: dict, workflow: str, python_version: str): print("Done.") -def write_gather_data_file(plugin_name: str, result, workflow: str, python_version: str) -> str: +def write_gather_data_file( + plugin_name: str, result, workflow: str, python_version: str +) -> str: _dir = f".badges/gather_data/{workflow}/{plugin_name}" filename = os.path.join(_dir, f"python{python_version}.txt") os.makedirs(_dir, exist_ok=True) @@ -307,11 +341,11 @@ def gather_old_failures(old_failures: list, workflow: str): directory = ".badges" for filename in os.listdir(directory): - if filename.endswith(f'_{workflow}.json'): + if filename.endswith(f"_{workflow}.json"): file_path = os.path.join(directory, filename) - plugin_name = filename.rsplit(f'_{workflow}.json', 1)[0] + plugin_name = filename.rsplit(f"_{workflow}.json", 1)[0] - with open(file_path, 'r') as file: + with open(file_path, "r") as file: data = json.load(file) if data["color"] == "red": old_failures.append(plugin_name) @@ -319,19 +353,26 @@ def gather_old_failures(old_failures: list, workflow: str): print(f"Old failures: {old_failures}") print("Done.") -def run_all(workflow: str, python_version: str, update_badges: bool, plugin_names: list): - root_path = subprocess.check_output([ - 'git', - 'rev-parse', - '--show-toplevel' - ]).decode('ASCII').strip() + +def run_all( + workflow: str, python_version: str, update_badges: bool, plugin_names: list +): + root_path = ( + subprocess.check_output(["git", "rev-parse", "--show-toplevel"]) + .decode("ASCII") + .strip() + ) root = Path(root_path) plugins = list(enumerate_plugins(root)) if plugin_names != []: plugins = [p for p in plugins if p.name in plugin_names] - print("Testing the following plugins: {names}".format(names=[p.name for p in plugins])) + print( + "Testing the following plugins: {names}".format( + names=[p.name for p in plugins] + ) + ) else: print("Testing all plugins in {root}".format(root=root)) @@ -343,7 +384,9 @@ def run_all(workflow: str, python_version: str, update_badges: bool, plugin_name gather_old_failures(old_failures, workflow) if update_badges: - push_gather_data(collect_gather_data(results, success), workflow, python_version) + push_gather_data( + collect_gather_data(results, success), workflow, python_version + ) if not success: print("The following tests failed:") @@ -361,10 +404,14 @@ def run_all(workflow: str, python_version: str, update_badges: bool, plugin_name if __name__ == "__main__": import argparse - parser = argparse.ArgumentParser(description='Plugins test script') + parser = argparse.ArgumentParser(description="Plugins test script") parser.add_argument("workflow", type=str, help="Name of the GitHub workflow") parser.add_argument("python_version", type=str, help="Python version") - parser.add_argument("--update-badges", action='store_true', help="Whether badges data should be updated") + parser.add_argument( + "--update-badges", + action="store_true", + help="Whether badges data should be updated", + ) parser.add_argument("plugins", nargs="*", default=[], help="List of plugins") args = parser.parse_args() diff --git a/.ci/update_badges.py b/.ci/update_badges.py index de9d463b2..89ca7acb0 100644 --- a/.ci/update_badges.py +++ b/.ci/update_badges.py @@ -7,7 +7,9 @@ from utils import configure_git, enumerate_plugins -def update_and_commit_badge(plugin_name: str, passed: bool, workflow: str, has_tests: bool) -> bool: +def update_and_commit_badge( + plugin_name: str, passed: bool, workflow: str, has_tests: bool +) -> bool: json_data = {"schemaVersion": 1, "label": "", "message": "✔", "color": "green"} if not passed: json_data.update({"message": "✗", "color": "red"}) @@ -34,16 +36,18 @@ def update_and_commit_badge(plugin_name: str, passed: bool, workflow: str, has_t def cleanup_old_results(plugin_name: str, file: Path) -> bool: os.remove(file) - print(f"Removed deprecated result {file.name} for {plugin_name}, we no longer test for this version!") + print( + f"Removed deprecated result {file.name} for {plugin_name}, we no longer test for this version!" + ) subprocess.run(["git", "add", "-v", file]) subprocess.run( - [ - "git", - "commit", - "-m", - f'Remove deprecated result {file.name}', - ] - ) + [ + "git", + "commit", + "-m", + f"Remove deprecated result {file.name}", + ] + ) return True @@ -96,9 +100,9 @@ def push_badges_data(workflow: str, python_versions_tested: list): if any_changes: for _ in range(10): subprocess.run(["git", "pull", "--rebase"]) - output = subprocess.run(["git", "push", "origin", "badges"], - capture_output=True, - text=True) + output = subprocess.run( + ["git", "push", "origin", "badges"], capture_output=True, text=True + ) if output.returncode == 0: print("Push successful") break @@ -117,7 +121,11 @@ def push_badges_data(workflow: str, python_versions_tested: list): parser = argparse.ArgumentParser(description="Plugins completion script") parser.add_argument("workflow", type=str, help="Name of the GitHub workflow") parser.add_argument( - "python_versions_tested", nargs="*", type=str, default=[], help="Python versions tested" + "python_versions_tested", + nargs="*", + type=str, + default=[], + help="Python versions tested", ) args = parser.parse_args() diff --git a/.ci/utils.py b/.ci/utils.py index 6cd56f83b..ba7c32ad8 100644 --- a/.ci/utils.py +++ b/.ci/utils.py @@ -36,7 +36,15 @@ def get_testfiles(p: Path) -> List[PosixPath]: test_files = [] for x in p.iterdir(): if x.is_dir() and x.name == "tests": - test_files.extend([y for y in x.iterdir() if y.is_file() and y.name.startswith("test_") and y.name.endswith(".py")]) + test_files.extend( + [ + y + for y in x.iterdir() + if y.is_file() + and y.name.startswith("test_") + and y.name.endswith(".py") + ] + ) elif x.is_file() and x.name.startswith("test_") and x.name.endswith(".py"): test_files.append(x) return test_files diff --git a/backup/backend.py b/backup/backend.py index 47fe274d7..de85e6534 100644 --- a/backup/backend.py +++ b/backup/backend.py @@ -11,7 +11,7 @@ # This is used by the plugin from time to time to allow the backend to compress # the changelog and forms a new basis for the backup. # If `Change` contains a snapshot and a transaction, they apply in that order. -Change = namedtuple('Change', ['version', 'snapshot', 'transaction']) +Change = namedtuple("Change", ["version", "snapshot", "transaction"]) class Backend(object): @@ -37,14 +37,11 @@ def add_change(self, change: Change) -> bool: raise NotImplementedError def initialize(self) -> bool: - """Set up any resources needed by this backend. - - """ + """Set up any resources needed by this backend.""" raise NotImplementedError def stream_changes(self) -> Iterator[Change]: - """Retrieve changes from the backend in order to perform a restore. - """ + """Retrieve changes from the backend in order to perform a restore.""" raise NotImplementedError def rewind(self) -> bool: @@ -63,8 +60,7 @@ def rewind(self) -> bool: raise NotImplementedError def compact(self): - """Apply some incremental changes to the snapshot to reduce our size. - """ + """Apply some incremental changes to the snapshot to reduce our size.""" raise NotImplementedError def _db_open(self, dest: str) -> sqlite3.Connection: @@ -75,7 +71,7 @@ def _db_open(self, dest: str) -> sqlite3.Connection: def _restore_snapshot(self, snapshot: bytes, dest: str): if os.path.exists(dest): os.unlink(dest) - with open(dest, 'wb') as f: + with open(dest, "wb") as f: f.write(snapshot) self.db = self._db_open(dest) @@ -87,8 +83,12 @@ def _rewrite_stmt(self, stmt: str) -> str: re-inserts the space. """ - stmt = re.sub(r'reserved_til=([0-9]+)WHERE', r'reserved_til=\1 WHERE', stmt) - stmt = re.sub(r'peer_id=([0-9]+)WHERE channels.id=', r'peer_id=\1 WHERE channels.id=', stmt) + stmt = re.sub(r"reserved_til=([0-9]+)WHERE", r"reserved_til=\1 WHERE", stmt) + stmt = re.sub( + r"peer_id=([0-9]+)WHERE channels.id=", + r"peer_id=\1 WHERE channels.id=", + stmt, + ) return stmt def _restore_transaction(self, tx: Iterator[str]): @@ -109,9 +109,7 @@ def restore(self, dest: str, remove_existing: bool = False): if os.path.exists(dest): if not remove_existing: raise ValueError( - "Destination for backup restore exists: {dest}".format( - dest=dest - ) + "Destination for backup restore exists: {dest}".format(dest=dest) ) os.unlink(dest) diff --git a/backup/backends.py b/backup/backends.py index e59a92c03..668807233 100644 --- a/backup/backends.py +++ b/backup/backends.py @@ -1,4 +1,5 @@ -'''Create a backend instance based on URI scheme dispatch.''' +"""Create a backend instance based on URI scheme dispatch.""" + from typing import Type from urllib.parse import urlparse @@ -8,10 +9,9 @@ def resolve_backend_class(backend_url): - backend_map: Mapping[str, Type[Backend]] = { - 'file': FileBackend, - 'socket': SocketBackend, + "file": FileBackend, + "socket": SocketBackend, } p = urlparse(backend_url) backend_cl = backend_map.get(p.scheme, None) @@ -21,13 +21,19 @@ def resolve_backend_class(backend_url): def get_backend(destination, create=False, require_init=False): backend_cl = resolve_backend_class(destination) if backend_cl is None: - raise ValueError("No backend implementation found for {destination}".format( - destination=destination, - )) + raise ValueError( + "No backend implementation found for {destination}".format( + destination=destination, + ) + ) backend = backend_cl(destination, create=create) initialized = backend.initialize() if require_init and not initialized: - kill("Could not initialize the backup {}, please use 'backup-cli' to initialize the backup first.".format(destination)) + kill( + "Could not initialize the backup {}, please use 'backup-cli' to initialize the backup first.".format( + destination + ) + ) assert backend.version is not None assert backend.prev_version is not None return backend diff --git a/backup/backup.py b/backup/backup.py index 4874ff308..d26ce137a 100755 --- a/backup/backup.py +++ b/backup/backup.py @@ -17,7 +17,7 @@ handler = logging.StreamHandler(sys.stdout) handler.setLevel(logging.DEBUG) -formatter = logging.Formatter('%(message)s') +formatter = logging.Formatter("%(message)s") handler.setFormatter(formatter) root.addHandler(handler) @@ -37,9 +37,11 @@ def check_first_write(plugin, data_version): """ backend = plugin.backend - logging.info("Comparing backup version {} versus first write version {}".format( - backend.version, data_version - )) + logging.info( + "Comparing backup version {} versus first write version {}".format( + backend.version, data_version + ) + ) if backend.version == data_version - 1: logging.info("Versions match up") @@ -50,13 +52,15 @@ def check_first_write(plugin, data_version): return True elif backend.prev_version > data_version - 1: - kill("Core-Lightning seems to have lost some state (failed restore?). Emergency shutdown.") + kill( + "Core-Lightning seems to have lost some state (failed restore?). Emergency shutdown." + ) else: kill("Backup is out of date, we cannot continue safely. Emergency shutdown.") -@plugin.hook('db_write') +@plugin.hook("db_write") def on_db_write(writes, data_version, plugin, **kwargs): change = Change(data_version, None, writes) if not plugin.initialized: @@ -85,14 +89,14 @@ def compact(plugin): @plugin.init() def on_init(options, **kwargs): - dest = options.get('backup-destination', 'null') - if dest != 'null': + dest = options.get("backup-destination", "null") + if dest != "null": plugin.log( "The `--backup-destination` option is deprecated and will be " "removed in future versions of the backup plugin. Please remove " "it from your configuration. The destination is now determined by " "the `backup.lock` file in the lightning directory", - level="warn" + level="warn", ) # IMPORTANT NOTE @@ -109,12 +113,9 @@ def kill(message: str): # Search for lightningd in my ancestor processes: procs = [p for p in psutil.Process(os.getpid()).parents()] for p in procs: - if p.name() != 'lightningd': + if p.name() != "lightningd": continue - plugin.log("Killing process {name} ({pid})".format( - name=p.name(), - pid=p.pid - )) + plugin.log("Killing process {name} ({pid})".format(name=p.name(), pid=p.pid)) p.kill() # Sleep forever, just in case the master doesn't die on us... @@ -123,8 +124,9 @@ def kill(message: str): plugin.add_option( - 'backup-destination', None, - 'UNUSED. Kept for backward compatibility only. Please update your configuration to remove this option.' + "backup-destination", + None, + "UNUSED. Kept for backward compatibility only. Please update your configuration to remove this option.", ) @@ -135,10 +137,10 @@ def kill(message: str): kill("Could not find backup.lock in the lightning-dir") try: - d = json.load(open("backup.lock", 'r')) - destination = d['backend_url'] + d = json.load(open("backup.lock", "r")) + destination = d["backend_url"] plugin.backend = get_backend(destination, require_init=True) plugin.run() except Exception: - logging.exception('Exception while initializing backup plugin') - kill('Exception while initializing plugin, terminating lightningd') + logging.exception("Exception while initializing backup plugin") + kill("Exception while initializing plugin, terminating lightningd") diff --git a/backup/filebackend.py b/backup/filebackend.py index 22e25deb7..2195e3189 100644 --- a/backup/filebackend.py +++ b/backup/filebackend.py @@ -18,9 +18,13 @@ def __init__(self, destination: str, create: bool): self.url = urlparse(self.destination) if os.path.exists(self.url.path) and create: - raise ValueError("Attempted to create a FileBackend, but file already exists.") + raise ValueError( + "Attempted to create a FileBackend, but file already exists." + ) if not os.path.exists(self.url.path) and not create: - raise ValueError("Attempted to open a FileBackend but file doesn't already exists, use `backup-cli init` to initialize it first.") + raise ValueError( + "Attempted to open a FileBackend but file doesn't already exists, use `backup-cli init` to initialize it first." + ) if create: # Initialize a new backup file self.version, self.prev_version = 0, 0 @@ -32,12 +36,18 @@ def initialize(self) -> bool: return self.read_metadata() def write_metadata(self): - blob = struct.pack("!IIQIQQ", 0x01, self.version, self.offsets[0], - self.prev_version, self.offsets[1], - self.version_count) + blob = struct.pack( + "!IIQIQQ", + 0x01, + self.version, + self.offsets[0], + self.prev_version, + self.offsets[1], + self.version_count, + ) # Pad the header - blob += b'\x00' * (512 - len(blob)) + blob += b"\x00" * (512 - len(blob)) mode = "rb+" if os.path.exists(self.url.path) else "wb+" with open(self.url.path, mode) as f: @@ -46,31 +56,41 @@ def write_metadata(self): f.flush() def read_metadata(self): - with open(self.url.path, 'rb') as f: + with open(self.url.path, "rb") as f: blob = f.read(512) if len(blob) != 512: - logging.warn("Corrupt FileBackend header, expected 512 bytes, got {} bytes".format(len(blob))) + logging.warn( + "Corrupt FileBackend header, expected 512 bytes, got {} bytes".format( + len(blob) + ) + ) return False - file_version, = struct.unpack_from("!I", blob) + (file_version,) = struct.unpack_from("!I", blob) if file_version != 1: logging.warn("Unknown FileBackend version {}".format(file_version)) return False - self.version, self.offsets[0], self.prev_version, self.offsets[1], self.version_count, = struct.unpack_from("!IQIQQ", blob, offset=4) + ( + self.version, + self.offsets[0], + self.prev_version, + self.offsets[1], + self.version_count, + ) = struct.unpack_from("!IQIQQ", blob, offset=4) return True def add_change(self, entry: Change) -> bool: - typ = b'\x01' if entry.snapshot is None else b'\x02' - if typ == b'\x01': - payload = b'\x00'.join([t.encode('UTF-8') for t in entry.transaction]) - elif typ == b'\x02': + typ = b"\x01" if entry.snapshot is None else b"\x02" + if typ == b"\x01": + payload = b"\x00".join([t.encode("UTF-8") for t in entry.transaction]) + elif typ == b"\x02": payload = entry.snapshot length = struct.pack("!I", len(payload)) version = struct.pack("!I", entry.version) - with open(self.url.path, 'ab') as f: + with open(self.url.path, "ab") as f: f.seek(self.offsets[0]) f.write(length) f.write(version) @@ -100,7 +120,7 @@ def rewind(self): def stream_changes(self) -> Iterator[Change]: self.read_metadata() version = -1 - with open(self.url.path, 'rb') as f: + with open(self.url.path, "rb") as f: # Skip the header f.seek(512) while version < self.version: @@ -110,7 +130,7 @@ def stream_changes(self) -> Iterator[Change]: yield Change( version=version, snapshot=None, - transaction=[t.decode('UTF-8') for t in payload.split(b'\x00')] + transaction=[t.decode("UTF-8") for t in payload.split(b"\x00")], ) elif typ == 2: yield Change(version=version, snapshot=payload, transaction=None) @@ -118,7 +138,11 @@ def stream_changes(self) -> Iterator[Change]: raise ValueError("Unknown FileBackend entry type {}".format(typ)) if version != self.version: - raise ValueError("Versions do not match up: restored version {}, backend version {}".format(version, self.version)) + raise ValueError( + "Versions do not match up: restored version {}, backend version {}".format( + version, self.version + ) + ) assert version == self.version def compact(self): @@ -137,9 +161,9 @@ def compact(self): snapshotpath = os.path.join(tmp.name, "lightningd.sqlite3") stats = { - 'before': { - 'backupsize': os.stat(self.url.path).st_size, - 'version_count': self.version_count, + "before": { + "backupsize": os.stat(self.url.path).st_size, + "version_count": self.version_count, }, } @@ -178,13 +202,14 @@ def compact(self): snapshot = Change( version=change.version - 1, - snapshot=open(snapshotpath, 'rb').read(), - transaction=None + snapshot=open(snapshotpath, "rb").read(), + transaction=None, + ) + print( + "Adding intial snapshot with {} bytes for version {}".format( + len(snapshot.snapshot), snapshot.version + ) ) - print("Adding intial snapshot with {} bytes for version {}".format( - len(snapshot.snapshot), - snapshot.version - )) clone.add_change(snapshot) assert clone.version == change.version - 1 @@ -194,15 +219,17 @@ def compact(self): assert self.version == clone.version assert self.prev_version == clone.prev_version - stats['after'] = { - 'version_count': clone.version_count, - 'backupsize': os.stat(clonepath).st_size, + stats["after"] = { + "version_count": clone.version_count, + "backupsize": os.stat(clonepath).st_size, } - print("Compacted {} changes, saving {} bytes, swapping backups".format( - stats['before']['version_count'] - stats['after']['version_count'], - stats['before']['backupsize'] - stats['after']['backupsize'], - )) + print( + "Compacted {} changes, saving {} bytes, swapping backups".format( + stats["before"]["version_count"] - stats["after"]["version_count"], + stats["before"]["backupsize"] - stats["after"]["backupsize"], + ) + ) shutil.move(clonepath, self.url.path) # Re-initialize ourselves so we have the correct metadata diff --git a/backup/protocol.py b/backup/protocol.py index 8157074d2..3ad7828a1 100644 --- a/backup/protocol.py +++ b/backup/protocol.py @@ -1,6 +1,7 @@ -''' +""" Socket-based remote backup protocol. This is used to create a connection to a backup backend, and send it incremental database updates. -''' +""" + import socket import struct from typing import Tuple @@ -19,56 +20,59 @@ class PacketType: NACK = 0x07 METADATA = 0x08 DONE = 0x09 - COMPACT = 0x0a - COMPACT_RES = 0x0b + COMPACT = 0x0A + COMPACT_RES = 0x0B PKT_CHANGE_TYPES = {PacketType.CHANGE, PacketType.SNAPSHOT} def recvall(sock: socket.socket, n: int) -> bytearray: - '''Receive exactly n bytes from a socket.''' + """Receive exactly n bytes from a socket.""" buf = bytearray(n) view = memoryview(buf) ptr = 0 while ptr < n: count = sock.recv_into(view[ptr:]) if count == 0: - raise IOError('Premature end of stream') + raise IOError("Premature end of stream") ptr += count return buf def send_packet(sock: socket.socket, typ: int, payload: bytes) -> None: - sock.sendall(struct.pack('!BI', typ, len(payload))) + sock.sendall(struct.pack("!BI", typ, len(payload))) sock.sendall(payload) def recv_packet(sock: socket.socket) -> Tuple[int, bytes]: - (typ, length) = struct.unpack('!BI', recvall(sock, 5)) + (typ, length) = struct.unpack("!BI", recvall(sock, 5)) payload = recvall(sock, length) return (typ, payload) def change_from_packet(typ, payload): - '''Convert a network packet to a Change object.''' + """Convert a network packet to a Change object.""" if typ == PacketType.CHANGE: - (version, ) = struct.unpack('!I', payload[0:4]) + (version,) = struct.unpack("!I", payload[0:4]) payload = zlib.decompress(payload[4:]) - return Change(version=version, snapshot=None, - transaction=[t.decode('UTF-8') for t in payload.split(b'\x00')]) + return Change( + version=version, + snapshot=None, + transaction=[t.decode("UTF-8") for t in payload.split(b"\x00")], + ) elif typ == PacketType.SNAPSHOT: - (version, ) = struct.unpack('!I', payload[0:4]) + (version,) = struct.unpack("!I", payload[0:4]) payload = zlib.decompress(payload[4:]) return Change(version=version, snapshot=payload, transaction=None) - raise ValueError('Not a change (typ {})'.format(typ)) + raise ValueError("Not a change (typ {})".format(typ)) def packet_from_change(entry): - '''Convert a Change object to a network packet.''' + """Convert a Change object to a network packet.""" if entry.snapshot is None: typ = PacketType.CHANGE - payload = b'\x00'.join([t.encode('UTF-8') for t in entry.transaction]) + payload = b"\x00".join([t.encode("UTF-8") for t in entry.transaction]) else: typ = PacketType.SNAPSHOT payload = entry.snapshot diff --git a/backup/server.py b/backup/server.py index 82391f363..e0b9cba44 100644 --- a/backup/server.py +++ b/backup/server.py @@ -6,7 +6,14 @@ from typing import Tuple from backend import Backend -from protocol import PacketType, PKT_CHANGE_TYPES, change_from_packet, packet_from_change, send_packet, recv_packet +from protocol import ( + PacketType, + PKT_CHANGE_TYPES, + change_from_packet, + packet_from_change, + send_packet, + recv_packet, +) class SystemdHandler(logging.Handler): @@ -19,7 +26,7 @@ class SystemdHandler(logging.Handler): # NOTICE <5> logging.INFO: "<6>", logging.DEBUG: "<7>", - logging.NOTSET: "<7>" + logging.NOTSET: "<7>", } def __init__(self, stream=sys.stdout): @@ -39,12 +46,12 @@ def setup_server_logging(mode, level): root_logger = logging.getLogger() root_logger.setLevel(level.upper()) mode = mode.lower() - if mode == 'systemd': + if mode == "systemd": # replace handler with systemd one root_logger.handlers = [] root_logger.addHandler(SystemdHandler()) else: - assert mode == 'plain' + assert mode == "plain" class SocketServer: @@ -63,67 +70,77 @@ def _recv_packet(self) -> Tuple[int, bytes]: def _handle_conn(self, conn) -> None: # Can only handle one connection at a time - logging.info('Servicing incoming connection') + logging.info("Servicing incoming connection") self.sock = conn while True: try: (typ, payload) = self._recv_packet() except IOError: - logging.info('Connection closed') + logging.info("Connection closed") break if typ in PKT_CHANGE_TYPES: change = change_from_packet(typ, payload) if typ == PacketType.CHANGE: - logging.debug('Received CHANGE {}'.format(change.version)) + logging.debug("Received CHANGE {}".format(change.version)) else: - logging.info('Received SNAPSHOT {}'.format(change.version)) + logging.info("Received SNAPSHOT {}".format(change.version)) self.backend.add_change(change) - self._send_packet(PacketType.ACK, struct.pack("!I", self.backend.version)) + self._send_packet( + PacketType.ACK, struct.pack("!I", self.backend.version) + ) elif typ == PacketType.REWIND: - logging.info('Received REWIND') - to_version, = struct.unpack('!I', payload) + logging.info("Received REWIND") + (to_version,) = struct.unpack("!I", payload) if to_version != self.backend.prev_version: - logging.info('Cannot rewind to version {}'.format(to_version)) - self._send_packet(PacketType.NACK, struct.pack("!I", self.backend.version)) + logging.info("Cannot rewind to version {}".format(to_version)) + self._send_packet( + PacketType.NACK, struct.pack("!I", self.backend.version) + ) else: self.backend.rewind() - self._send_packet(PacketType.ACK, struct.pack("!I", self.backend.version)) + self._send_packet( + PacketType.ACK, struct.pack("!I", self.backend.version) + ) elif typ == PacketType.REQ_METADATA: - logging.debug('Received REQ_METADATA') - blob = struct.pack("!IIIQ", 0x01, self.backend.version, - self.backend.prev_version, - self.backend.version_count) + logging.debug("Received REQ_METADATA") + blob = struct.pack( + "!IIIQ", + 0x01, + self.backend.version, + self.backend.prev_version, + self.backend.version_count, + ) self._send_packet(PacketType.METADATA, blob) elif typ == PacketType.RESTORE: - logging.info('Received RESTORE') + logging.info("Received RESTORE") for change in self.backend.stream_changes(): (typ, payload) = packet_from_change(change) self._send_packet(typ, payload) - self._send_packet(PacketType.DONE, b'') + self._send_packet(PacketType.DONE, b"") elif typ == PacketType.COMPACT: - logging.info('Received COMPACT') + logging.info("Received COMPACT") stats = self.backend.compact() self._send_packet(PacketType.COMPACT_RES, json.dumps(stats).encode()) elif typ == PacketType.ACK: - logging.debug('Received ACK') + logging.debug("Received ACK") elif typ == PacketType.NACK: - logging.debug('Received NACK') + logging.debug("Received NACK") elif typ == PacketType.METADATA: - logging.debug('Received METADATA') + logging.debug("Received METADATA") elif typ == PacketType.COMPACT_RES: - logging.debug('Received COMPACT_RES') + logging.debug("Received COMPACT_RES") else: - raise Exception('Unknown or unexpected packet type {}'.format(typ)) + raise Exception("Unknown or unexpected packet type {}".format(typ)) self.conn = None def run(self) -> None: self.bind.listen(1) - logging.info('Waiting for connection on {}'.format(self.addr)) + logging.info("Waiting for connection on {}".format(self.addr)) while True: conn, _ = self.bind.accept() try: self._handle_conn(conn) except Exception: - logging.exception('Got exception') + logging.exception("Got exception") finally: conn.close() diff --git a/backup/socketbackend.py b/backup/socketbackend.py index 535acc3ed..30d84f058 100644 --- a/backup/socketbackend.py +++ b/backup/socketbackend.py @@ -9,7 +9,14 @@ from urllib.parse import urlparse, parse_qs from backend import Backend, Change -from protocol import PacketType, PKT_CHANGE_TYPES, change_from_packet, packet_from_change, send_packet, recv_packet +from protocol import ( + PacketType, + PKT_CHANGE_TYPES, + change_from_packet, + packet_from_change, + send_packet, + recv_packet, +) # Total number of reconnection tries RECONNECT_TRIES = 5 @@ -20,8 +27,8 @@ # Scale delay factor after each failure RECONNECT_DELAY_BACKOFF = 1.5 -HostPortInfo = namedtuple('HostPortInfo', ['host', 'port', 'addrtype']) -SocketURLInfo = namedtuple('SocketURLInfo', ['target', 'proxytype', 'proxytarget']) +HostPortInfo = namedtuple("HostPortInfo", ["host", "port", "addrtype"]) +SocketURLInfo = namedtuple("SocketURLInfo", ["target", "proxytype", "proxytarget"]) # Network address type. @@ -31,6 +38,7 @@ class AddrType: IPv6 = 1 NAME = 2 + # Proxy type. Only SOCKS5 supported at the moment as this is sufficient for Tor. @@ -40,23 +48,23 @@ class ProxyType: def parse_host_port(path: str) -> HostPortInfo: - '''Parse a host:port pair.''' - if path.startswith('['): # bracketed IPv6 address - eidx = path.find(']') + """Parse a host:port pair.""" + if path.startswith("["): # bracketed IPv6 address + eidx = path.find("]") if eidx == -1: - raise ValueError('Unterminated bracketed host address.') + raise ValueError("Unterminated bracketed host address.") host = path[1:eidx] addrtype = AddrType.IPv6 eidx += 1 - if eidx >= len(path) or path[eidx] != ':': - raise ValueError('Port number missing.') + if eidx >= len(path) or path[eidx] != ":": + raise ValueError("Port number missing.") eidx += 1 else: - eidx = path.find(':') + eidx = path.find(":") if eidx == -1: - raise ValueError('Port number missing.') + raise ValueError("Port number missing.") host = path[0:eidx] - if re.match(r'\d+\.\d+\.\d+\.\d+$', host): # matches IPv4 address format + if re.match(r"\d+\.\d+\.\d+\.\d+$", host): # matches IPv4 address format addrtype = AddrType.IPv4 else: addrtype = AddrType.NAME @@ -65,16 +73,16 @@ def parse_host_port(path: str) -> HostPortInfo: try: port = int(path[eidx:]) except ValueError: - raise ValueError('Invalid port number') + raise ValueError("Invalid port number") return HostPortInfo(host=host, port=port, addrtype=addrtype) def parse_socket_url(destination: str) -> SocketURLInfo: - '''Parse a socket: URL to extract the information contained in it.''' + """Parse a socket: URL to extract the information contained in it.""" url = urlparse(destination) - if url.scheme != 'socket': - raise ValueError('Scheme for socket backend must be socket:...') + if url.scheme != "socket": + raise ValueError("Scheme for socket backend must be socket:...") target = parse_host_port(url.path) @@ -83,19 +91,19 @@ def parse_socket_url(destination: str) -> SocketURLInfo: # parse query parameters # reject unknown parameters (currently all of them) qs = parse_qs(url.query) - for (key, values) in qs.items(): - if key == 'proxy': # proxy=socks5:127.0.0.1:9050 + for key, values in qs.items(): + if key == "proxy": # proxy=socks5:127.0.0.1:9050 if len(values) != 1: - raise ValueError('Proxy can only have one value') + raise ValueError("Proxy can only have one value") - (ptype, ptarget) = values[0].split(':', 1) - if ptype != 'socks5': - raise ValueError('Unknown proxy type ' + ptype) + (ptype, ptarget) = values[0].split(":", 1) + if ptype != "socks5": + raise ValueError("Unknown proxy type " + ptype) proxytype = ProxyType.SOCKS5 proxytarget = parse_host_port(ptarget) else: - raise ValueError('Unknown query string parameter ' + key) + raise ValueError("Unknown query string parameter " + key) return SocketURLInfo(target=target, proxytype=proxytype, proxytarget=proxytarget) @@ -117,14 +125,23 @@ def connect(self): else: assert self.url.proxytype == ProxyType.SOCKS5 import socks - self.sock = socks.socksocket() - self.sock.set_proxy(socks.SOCKS5, self.url.proxytarget.host, self.url.proxytarget.port) - logging.info('Connecting to {}:{} (addrtype {}, proxytype {}, proxytarget {})...'.format( - self.url.target.host, self.url.target.port, self.url.target.addrtype, - self.url.proxytype, self.url.proxytarget)) + self.sock = socks.socksocket() + self.sock.set_proxy( + socks.SOCKS5, self.url.proxytarget.host, self.url.proxytarget.port + ) + + logging.info( + "Connecting to {}:{} (addrtype {}, proxytype {}, proxytarget {})...".format( + self.url.target.host, + self.url.target.port, + self.url.target.addrtype, + self.url.proxytype, + self.url.proxytarget, + ) + ) self.sock.connect((self.url.target.host, self.url.target.port)) - logging.info('Connected to {}'.format(self.destination)) + logging.info("Connected to {}".format(self.destination)) def _send_packet(self, typ: int, payload: bytes) -> None: send_packet(self.sock, typ, payload) @@ -133,21 +150,25 @@ def _recv_packet(self) -> Tuple[int, bytes]: return recv_packet(self.sock) def initialize(self) -> bool: - ''' + """ Initialize socket backend by request current metadata from server. - ''' - logging.info('Initializing backend') + """ + logging.info("Initializing backend") self._request_metadata() - logging.info('Initialized SocketBackend: protocol={}, version={}, prev_version={}, version_count={}'.format( - self.protocol, self.version, self.prev_version, self.version_count - )) + logging.info( + "Initialized SocketBackend: protocol={}, version={}, prev_version={}, version_count={}".format( + self.protocol, self.version, self.prev_version, self.version_count + ) + ) return True def _request_metadata(self) -> None: - self._send_packet(PacketType.REQ_METADATA, b'') + self._send_packet(PacketType.REQ_METADATA, b"") (typ, payload) = self._recv_packet() assert typ == PacketType.METADATA - self.protocol, self.version, self.prev_version, self.version_count = struct.unpack("!IIIQ", payload) + self.protocol, self.version, self.prev_version, self.version_count = ( + struct.unpack("!IIIQ", payload) + ) def add_change(self, entry: Change) -> bool: typ, payload = packet_from_change(entry) @@ -172,7 +193,11 @@ def add_change(self, entry: Change) -> bool: # that on the server side. Then we retry. pass else: - raise Exception('Unexpected backup version {} after reconnect'.format(self.version)) + raise Exception( + "Unexpected backup version {} after reconnect".format( + self.version + ) + ) self._send_packet(typ, payload) # Wait for change to be acknowledged before continuing. @@ -184,11 +209,19 @@ def add_change(self, entry: Change) -> bool: break if retry == RECONNECT_TRIES: - logging.error('Connection was lost while sending change (giving up after {} retries)'.format(retry)) - raise IOError('Connection was lost while sending change') + logging.error( + "Connection was lost while sending change (giving up after {} retries)".format( + retry + ) + ) + raise IOError("Connection was lost while sending change") retry += 1 - logging.warning('Connection was lost while sending change (retry {} of {}, will try again after {} seconds)'.format(retry, RECONNECT_TRIES, retry_delay)) + logging.warning( + "Connection was lost while sending change (retry {} of {}, will try again after {} seconds)".format( + retry, RECONNECT_TRIES, retry_delay + ) + ) time.sleep(retry_delay) retry_delay *= RECONNECT_DELAY_BACKOFF need_connect = True @@ -198,7 +231,7 @@ def add_change(self, entry: Change) -> bool: return True def rewind(self) -> bool: - '''Rewind to previous version.''' + """Rewind to previous version.""" version = struct.pack("!I", self.prev_version) self._send_packet(PacketType.REWIND, version) # Wait for change to be acknowledged before continuing. @@ -207,7 +240,7 @@ def rewind(self) -> bool: return True def stream_changes(self) -> Iterator[Change]: - self._send_packet(PacketType.RESTORE, b'') + self._send_packet(PacketType.RESTORE, b"") version = -1 while True: (typ, payload) = self._recv_packet() @@ -221,11 +254,15 @@ def stream_changes(self) -> Iterator[Change]: raise ValueError("Unknown entry type {}".format(typ)) if version != self.version: - raise ValueError("Versions do not match up: restored version {}, backend version {}".format(version, self.version)) + raise ValueError( + "Versions do not match up: restored version {}, backend version {}".format( + version, self.version + ) + ) assert version == self.version def compact(self): - self._send_packet(PacketType.COMPACT, b'') + self._send_packet(PacketType.COMPACT, b"") (typ, payload) = self._recv_packet() assert typ == PacketType.COMPACT_RES return json.loads(payload.decode()) diff --git a/backup/test_backup.py b/backup/test_backup.py index 1509d1995..fb75d1e4c 100644 --- a/backup/test_backup.py +++ b/backup/test_backup.py @@ -18,42 +18,41 @@ def test_start(node_factory, directory): - bpath = os.path.join(directory, 'lightning-1', 'regtest') - bdest = 'file://' + os.path.join(bpath, 'backup.dbak') + bpath = os.path.join(directory, "lightning-1", "regtest") + bdest = "file://" + os.path.join(bpath, "backup.dbak") os.makedirs(bpath) subprocess.check_call([cli_path, "init", "--lightning-dir", bpath, bdest]) opts = { - 'plugin': plugin_path, - 'allow-deprecated-apis': deprecated_apis, + "plugin": plugin_path, + "allow-deprecated-apis": deprecated_apis, } l1 = node_factory.get_node(options=opts, cleandir=False) - plugins = [os.path.basename(p['name']) for p in l1.rpc.plugin("list")['plugins']] + plugins = [os.path.basename(p["name"]) for p in l1.rpc.plugin("list")["plugins"]] assert "backup.py" in plugins # Restart the node a couple of times, to check that we can resume normally for i in range(5): l1.restart() - plugins = [os.path.basename(p['name']) for p in l1.rpc.plugin("list")['plugins']] + plugins = [ + os.path.basename(p["name"]) for p in l1.rpc.plugin("list")["plugins"] + ] assert "backup.py" in plugins def test_start_no_init(node_factory, directory): - """The plugin should refuse to start if we haven't initialized the backup - """ - bpath = os.path.join(directory, 'lightning-1', 'regtest') + """The plugin should refuse to start if we haven't initialized the backup""" + bpath = os.path.join(directory, "lightning-1", "regtest") os.makedirs(bpath) opts = { - 'plugin': plugin_path, + "plugin": plugin_path, } - l1 = node_factory.get_node( - options=opts, cleandir=False, may_fail=True, start=False - ) + l1 = node_factory.get_node(options=opts, cleandir=False, may_fail=True, start=False) with pytest.raises(TimeoutError): # The way we detect a failure to start is when start() is running # into timeout looking for 'Server started with public key'. l1.start() - assert l1.daemon.is_in_log(r'Could not find backup.lock in the lightning-dir') + assert l1.daemon.is_in_log(r"Could not find backup.lock in the lightning-dir") def test_init_not_empty(node_factory, directory): @@ -61,20 +60,20 @@ def test_init_not_empty(node_factory, directory): backup-cli init should start the backup with an initial snapshot. """ - bpath = os.path.join(directory, 'lightning-1', 'regtest') - bdest = 'file://' + os.path.join(bpath, 'backup.dbak') + bpath = os.path.join(directory, "lightning-1", "regtest") + bdest = "file://" + os.path.join(bpath, "backup.dbak") l1 = node_factory.get_node() l1.stop() out = subprocess.check_output([cli_path, "init", "--lightning-dir", bpath, bdest]) - assert b'Found an existing database' in out - assert b'Successfully written initial snapshot' in out + assert b"Found an existing database" in out + assert b"Successfully written initial snapshot" in out # Now restart and add the plugin - l1.daemon.opts['plugin'] = plugin_path - l1.daemon.opts['allow-deprecated-apis'] = deprecated_apis + l1.daemon.opts["plugin"] = plugin_path + l1.daemon.opts["allow-deprecated-apis"] = deprecated_apis l1.start() - assert l1.daemon.is_in_log(r'plugin-backup.py: Versions match up') + assert l1.daemon.is_in_log(r"plugin-backup.py: Versions match up") @flaky @@ -88,13 +87,13 @@ def test_tx_abort(node_factory, directory): inbetween the hook call and the DB transaction. """ - bpath = os.path.join(directory, 'lightning-1', 'regtest') - bdest = 'file://' + os.path.join(bpath, 'backup.dbak') + bpath = os.path.join(directory, "lightning-1", "regtest") + bdest = "file://" + os.path.join(bpath, "backup.dbak") os.makedirs(bpath) subprocess.check_call([cli_path, "init", "--lightning-dir", bpath, bdest]) opts = { - 'plugin': plugin_path, - 'allow-deprecated-apis': deprecated_apis, + "plugin": plugin_path, + "allow-deprecated-apis": deprecated_apis, } l1 = node_factory.get_node(options=opts, cleandir=False) l1.stop() @@ -107,7 +106,7 @@ def test_tx_abort(node_factory, directory): print(l1.db.query("SELECT * FROM vars;")) l1.restart() - assert l1.daemon.is_in_log(r'Last changes not applied') + assert l1.daemon.is_in_log(r"Last changes not applied") @flaky @@ -118,13 +117,13 @@ def test_failing_restore(node_factory, directory): in the database back to n-2, which is non-recoverable. """ - bpath = os.path.join(directory, 'lightning-1', 'regtest') - bdest = 'file://' + os.path.join(bpath, 'backup.dbak') + bpath = os.path.join(directory, "lightning-1", "regtest") + bdest = "file://" + os.path.join(bpath, "backup.dbak") os.makedirs(bpath) subprocess.check_call([cli_path, "init", "--lightning-dir", bpath, bdest]) opts = { - 'plugin': plugin_path, - 'allow-deprecated-apis': deprecated_apis, + "plugin": plugin_path, + "allow-deprecated-apis": deprecated_apis, } def section(comment): @@ -144,25 +143,23 @@ def section(comment): l1.daemon.proc.wait() section("Verifying the node died with an error") - assert l1.daemon.is_in_log(r'lost some state') is not None + assert l1.daemon.is_in_log(r"lost some state") is not None def test_intermittent_backup(node_factory, directory): - """Simulate intermittent use of the backup, or an old file backup. - - """ - bpath = os.path.join(directory, 'lightning-1', 'regtest') - bdest = 'file://' + os.path.join(bpath, 'backup.dbak') + """Simulate intermittent use of the backup, or an old file backup.""" + bpath = os.path.join(directory, "lightning-1", "regtest") + bdest = "file://" + os.path.join(bpath, "backup.dbak") os.makedirs(bpath) subprocess.check_call([cli_path, "init", "--lightning-dir", bpath, bdest]) opts = { - 'plugin': plugin_path, - 'allow-deprecated-apis': deprecated_apis, + "plugin": plugin_path, + "allow-deprecated-apis": deprecated_apis, } l1 = node_factory.get_node(options=opts, cleandir=False, may_fail=True) # Now start without the plugin. This should work fine. - del l1.daemon.opts['plugin'] + del l1.daemon.opts["plugin"] l1.restart() # Now restart adding the plugin again, and it should fail due to gaps in @@ -173,33 +170,33 @@ def test_intermittent_backup(node_factory, directory): l1.start() l1.daemon.proc.wait() - assert l1.daemon.is_in_log(r'Backup is out of date') is not None + assert l1.daemon.is_in_log(r"Backup is out of date") is not None def test_restore(node_factory, directory): - bpath = os.path.join(directory, 'lightning-1', 'regtest') - bdest = 'file://' + os.path.join(bpath, 'backup.dbak') + bpath = os.path.join(directory, "lightning-1", "regtest") + bdest = "file://" + os.path.join(bpath, "backup.dbak") os.makedirs(bpath) subprocess.check_call([cli_path, "init", "--lightning-dir", bpath, bdest]) opts = { - 'plugin': plugin_path, - 'allow-deprecated-apis': deprecated_apis, + "plugin": plugin_path, + "allow-deprecated-apis": deprecated_apis, } l1 = node_factory.get_node(options=opts, cleandir=False) l1.stop() - rdest = os.path.join(bpath, 'lightningd.sqlite.restore') + rdest = os.path.join(bpath, "lightningd.sqlite.restore") subprocess.check_call([cli_path, "restore", bdest, rdest]) def test_restore_dir(node_factory, directory): - bpath = os.path.join(directory, 'lightning-1', 'regtest') - bdest = 'file://' + os.path.join(bpath, 'backup.dbak') + bpath = os.path.join(directory, "lightning-1", "regtest") + bdest = "file://" + os.path.join(bpath, "backup.dbak") os.makedirs(bpath) subprocess.check_call([cli_path, "init", "--lightning-dir", bpath, bdest]) opts = { - 'plugin': plugin_path, - 'allow-deprecated-apis': deprecated_apis, + "plugin": plugin_path, + "allow-deprecated-apis": deprecated_apis, } l1 = node_factory.get_node(options=opts, cleandir=False) l1.stop() @@ -214,19 +211,21 @@ def test_restore_dir(node_factory, directory): def test_warning(directory, node_factory): - bpath = os.path.join(directory, 'lightning-1', 'regtest') - bdest = 'file://' + os.path.join(bpath, 'backup.dbak') + bpath = os.path.join(directory, "lightning-1", "regtest") + bdest = "file://" + os.path.join(bpath, "backup.dbak") os.makedirs(bpath) subprocess.check_call([cli_path, "init", "--lightning-dir", bpath, bdest]) opts = { - 'plugin': plugin_path, - 'allow-deprecated-apis': deprecated_apis, - 'backup-destination': 'somewhere/over/the/rainbox', + "plugin": plugin_path, + "allow-deprecated-apis": deprecated_apis, + "backup-destination": "somewhere/over/the/rainbox", } l1 = node_factory.get_node(options=opts, cleandir=False) l1.stop() - assert l1.daemon.is_in_log(r'The `--backup-destination` option is deprecated and will be removed in future versions of the backup plugin.') + assert l1.daemon.is_in_log( + r"The `--backup-destination` option is deprecated and will be removed in future versions of the backup plugin." + ) class DummyBackend(Backend): @@ -237,8 +236,8 @@ def __init__(self): def test_rewrite(): tests = [ ( - r'UPDATE outputs SET status=123, reserved_til=1891733WHERE prev_out_tx=1 AND prev_out_index=2', - r'UPDATE outputs SET status=123, reserved_til=1891733 WHERE prev_out_tx=1 AND prev_out_index=2', + r"UPDATE outputs SET status=123, reserved_til=1891733WHERE prev_out_tx=1 AND prev_out_index=2", + r"UPDATE outputs SET status=123, reserved_til=1891733 WHERE prev_out_tx=1 AND prev_out_index=2", ), ] @@ -249,21 +248,22 @@ def test_rewrite(): def test_restore_pre_4090(directory): - """The prev-4090-backup.dbak contains faulty expansions, fix em. - """ - bdest = 'file://' + os.path.join(os.path.dirname(__file__), 'tests', 'pre-4090-backup.dbak') - rdest = os.path.join(directory, 'lightningd.sqlite.restore') + """The prev-4090-backup.dbak contains faulty expansions, fix em.""" + bdest = "file://" + os.path.join( + os.path.dirname(__file__), "tests", "pre-4090-backup.dbak" + ) + rdest = os.path.join(directory, "lightningd.sqlite.restore") subprocess.check_call([cli_path, "restore", bdest, rdest]) def test_compact(bitcoind, directory, node_factory): - bpath = os.path.join(directory, 'lightning-1', 'regtest') - bdest = 'file://' + os.path.join(bpath, 'backup.dbak') + bpath = os.path.join(directory, "lightning-1", "regtest") + bdest = "file://" + os.path.join(bpath, "backup.dbak") os.makedirs(bpath) subprocess.check_call([cli_path, "init", "--lightning-dir", bpath, bdest]) opts = { - 'plugin': plugin_path, - 'allow-deprecated-apis': deprecated_apis, + "plugin": plugin_path, + "allow-deprecated-apis": deprecated_apis, } l1 = node_factory.get_node(options=opts, cleandir=False) l1.rpc.backup_compact() @@ -283,53 +283,57 @@ def test_compact(bitcoind, directory, node_factory): def test_parse_socket_url(): with pytest.raises(ValueError): # fail: invalid url scheme - socketbackend.parse_socket_url('none') + socketbackend.parse_socket_url("none") # fail: no port number - socketbackend.parse_socket_url('socket:127.0.0.1') - socketbackend.parse_socket_url('socket:127.0.0.1:') + socketbackend.parse_socket_url("socket:127.0.0.1") + socketbackend.parse_socket_url("socket:127.0.0.1:") # fail: unbracketed IPv6 - socketbackend.parse_socket_url('socket:::1:1234') + socketbackend.parse_socket_url("socket:::1:1234") # fail: no port number IPv6 - socketbackend.parse_socket_url('socket:[::1]') - socketbackend.parse_socket_url('socket:[::1]:') + socketbackend.parse_socket_url("socket:[::1]") + socketbackend.parse_socket_url("socket:[::1]:") # fail: invalid port number - socketbackend.parse_socket_url('socket:127.0.0.1:12bla') + socketbackend.parse_socket_url("socket:127.0.0.1:12bla") # fail: unrecognized query string key - socketbackend.parse_socket_url('socket:127.0.0.1:1234?dummy=value') + socketbackend.parse_socket_url("socket:127.0.0.1:1234?dummy=value") # fail: incomplete proxy spec - socketbackend.parse_socket_url('socket:127.0.0.1:1234?proxy=socks5') - socketbackend.parse_socket_url('socket:127.0.0.1:1234?proxy=socks5:') - socketbackend.parse_socket_url('socket:127.0.0.1:1234?proxy=socks5:127.0.0.1:') + socketbackend.parse_socket_url("socket:127.0.0.1:1234?proxy=socks5") + socketbackend.parse_socket_url("socket:127.0.0.1:1234?proxy=socks5:") + socketbackend.parse_socket_url("socket:127.0.0.1:1234?proxy=socks5:127.0.0.1:") # fail: unknown proxy scheme - socketbackend.parse_socket_url('socket:127.0.0.1:1234?proxy=socks6:127.0.0.1:9050') + socketbackend.parse_socket_url( + "socket:127.0.0.1:1234?proxy=socks6:127.0.0.1:9050" + ) # IPv4 - s = socketbackend.parse_socket_url('socket:127.0.0.1:1234') - assert s.target.host == '127.0.0.1' + s = socketbackend.parse_socket_url("socket:127.0.0.1:1234") + assert s.target.host == "127.0.0.1" assert s.target.port == 1234 assert s.target.addrtype == socketbackend.AddrType.IPv4 assert s.proxytype == socketbackend.ProxyType.DIRECT # IPv6 - s = socketbackend.parse_socket_url('socket:[::1]:1235') - assert s.target.host == '::1' + s = socketbackend.parse_socket_url("socket:[::1]:1235") + assert s.target.host == "::1" assert s.target.port == 1235 assert s.target.addrtype == socketbackend.AddrType.IPv6 assert s.proxytype == socketbackend.ProxyType.DIRECT # Hostname - s = socketbackend.parse_socket_url('socket:backup.local:1236') - assert s.target.host == 'backup.local' + s = socketbackend.parse_socket_url("socket:backup.local:1236") + assert s.target.host == "backup.local" assert s.target.port == 1236 assert s.target.addrtype == socketbackend.AddrType.NAME assert s.proxytype == socketbackend.ProxyType.DIRECT # Tor - s = socketbackend.parse_socket_url('socket:backupserver.onion:1234?proxy=socks5:127.0.0.1:9050') - assert s.target.host == 'backupserver.onion' + s = socketbackend.parse_socket_url( + "socket:backupserver.onion:1234?proxy=socks5:127.0.0.1:9050" + ) + assert s.target.host == "backupserver.onion" assert s.target.port == 1234 assert s.target.addrtype == socketbackend.AddrType.NAME assert s.proxytype == socketbackend.ProxyType.SOCKS5 - assert s.proxytarget.host == '127.0.0.1' + assert s.proxytarget.host == "127.0.0.1" assert s.proxytarget.port == 9050 assert s.proxytarget.addrtype == socketbackend.AddrType.IPv4 diff --git a/clearnet/clearnet.py b/clearnet/clearnet.py index 85cada76f..b61bc7b75 100755 --- a/clearnet/clearnet.py +++ b/clearnet/clearnet.py @@ -1,29 +1,30 @@ #!/usr/bin/env python3 import socket from contextlib import closing + from pyln.client import Plugin, RpcError plugin = Plugin() def get_address_type(addrstr: str): - """ I know this can be more sophisticated, but works """ + """I know this can be more sophisticated, but works""" if ".onion:" in addrstr: - return 'tor' + return "tor" if addrstr[0].isdigit(): - return 'ipv4' + return "ipv4" if addrstr.startswith("["): - return 'ipv6' - return 'dns' + return "ipv6" + return "dns" # taken from: # https://stackoverflow.com/questions/19196105/how-to-check-if-a-network-port-is-open def check_socket(host: str, port: int, timeout: float = None): - """ Checks if a socket can be opened to a host """ - if host.count('.') == 3: + """Checks if a socket can be opened to a host""" + if host.count(".") == 3: proto = socket.AF_INET - if host.count(':') > 1: + if host.count(":") > 1: proto = socket.AF_INET6 with closing(socket.socket(proto, socket.SOCK_STREAM)) as sock: if timeout is not None: @@ -35,30 +36,30 @@ def check_socket(host: str, port: int, timeout: float = None): def clearnet_pid(peer: dict, messages: list): - peer_id = peer['id'] - if not peer['connected']: + peer_id = peer["id"] + if not peer["connected"]: messages += [f"Peer is not conencted: {peer_id}"] return False - if get_address_type(peer['netaddr'][0]) != 'tor': + if get_address_type(peer["netaddr"][0]) != "tor": messages += [f"Already connected via clearnet: {peer_id}"] return True # lets check what gossip knows about this peer - nodes = plugin.rpc.listnodes(peer_id)['nodes'] + nodes = plugin.rpc.listnodes(peer_id)["nodes"] if len(nodes) == 0: messages += [f"Error: No gossip for: {peer_id}"] return - addrs = [a for a in nodes[0]['addresses'] if not a['type'].startswith("tor")] + addrs = [a for a in nodes[0]["addresses"] if not a["type"].startswith("tor")] if len(addrs) == 0: messages += [f"Error: No clearnet addresses known for: {peer_id}"] return # now check addrs for open ports for addr in addrs: - if addr['type'] == 'dns': + if addr["type"] == "dns": messages += [f"TODO: DNS lookups for: {addr['address']}"] continue - if check_socket(addr['address'], addr['port'], 2.0): + if check_socket(addr["address"], addr["port"], 2.0): # disconnect result = plugin.rpc.disconnect(peer_id, True) if len(result) != 0: @@ -67,18 +68,24 @@ def clearnet_pid(peer: dict, messages: list): # try clearnet connection try: - result = plugin.rpc.connect(peer_id, addr['address'], addr['port']) - newtype = result['address']['type'] - if not newtype.startswith('tor'): - messages += [f"Established clearnet connection for: {peer_id} with {newtype}"] + result = plugin.rpc.connect(peer_id, addr["address"], addr["port"]) + newtype = result["address"]["type"] + if not newtype.startswith("tor"): + messages += [ + f"Established clearnet connection for: {peer_id} with {newtype}" + ] return True except RpcError: # we got an connection error, try reconnect - messages += [f"Error: Connection failed for: {peer_id} with {addr['type']}"] + messages += [ + f"Error: Connection failed for: {peer_id} with {addr['type']}" + ] try: result = plugin.rpc.connect(peer_id) # without address - newtype = result['address']['type'] - if not newtype.startswith('tor'): - messages += [f"Established clearnet connection for: {peer_id} with {newtype}"] + newtype = result["address"]["type"] + if not newtype.startswith("tor"): + messages += [ + f"Established clearnet connection for: {peer_id} with {newtype}" + ] return True except RpcError: # we got a reconnection error messages += [f"Error: Reconnection failed for: {peer_id}"] @@ -90,13 +97,13 @@ def clearnet_pid(peer: dict, messages: list): @plugin.method("clearnet") def clearnet(plugin: Plugin, peer_id: str = None): - """ Enforce a clearnet connection on all peers or a given `peer_id`.""" + """Enforce a clearnet connection on all peers or a given `peer_id`.""" if peer_id is None: - peers = plugin.rpc.listpeers(peer_id)['peers'] + peers = plugin.rpc.listpeers(peer_id)["peers"] else: if not isinstance(peer_id, str) or len(peer_id) != 66: return f"Error: Invalid peer_id: {peer_id}" - peers = plugin.rpc.listpeers(peer_id)['peers'] + peers = plugin.rpc.listpeers(peer_id)["peers"] if len(peers) == 0: return f"Error: peer not found: {peer_id}" @@ -108,7 +115,7 @@ def clearnet(plugin: Plugin, peer_id: str = None): @plugin.init() def init(options: dict, configuration: dict, plugin: Plugin, **kwargs): - plugin.log(f"clearnet enforcer plugin initialized") + plugin.log("clearnet enforcer plugin initialized") plugin.run() diff --git a/clearnet/test_clearnet.py b/clearnet/test_clearnet.py index 9c7468861..b0e32c5ba 100644 --- a/clearnet/test_clearnet.py +++ b/clearnet/test_clearnet.py @@ -17,6 +17,6 @@ def test_clearnet_starts(node_factory): def test_clearnet_runs(node_factory): - pluginopt = {'plugin': plugin_path} + pluginopt = {"plugin": plugin_path} l1, l2 = node_factory.line_graph(2, opts=pluginopt) l1.rpc.clearnet() diff --git a/currencyrate/currencyrate.py b/currencyrate/currencyrate.py index 1135e69b2..31fe22320 100755 --- a/currencyrate/currencyrate.py +++ b/currencyrate/currencyrate.py @@ -10,29 +10,33 @@ plugin = Plugin() -Source = namedtuple('Source', ['name', 'urlformat', 'replymembers']) +Source = namedtuple("Source", ["name", "urlformat", "replymembers"]) sources = [ # e.g. {"high": "18502.56", "last": "17970.41", "timestamp": "1607650787", "bid": "17961.87", "vwap": "18223.42", "volume": "7055.63066541", "low": "17815.92", "ask": "17970.41", "open": "18250.30"} - Source('bitstamp', - 'https://www.bitstamp.net/api/v2/ticker/btc{currency_lc}/', - ['last']), + Source( + "bitstamp", "https://www.bitstamp.net/api/v2/ticker/btc{currency_lc}/", ["last"] + ), # e.g. {"bitcoin":{"usd":17885.84}} - Source('coingecko', - 'https://api.coingecko.com/api/v3/simple/price?ids=bitcoin&vs_currencies={currency_lc}', - ['bitcoin', '{currency_lc}']), + Source( + "coingecko", + "https://api.coingecko.com/api/v3/simple/price?ids=bitcoin&vs_currencies={currency_lc}", + ["bitcoin", "{currency_lc}"], + ), # e.g. {"time":{"updated":"Dec 16, 2020 00:58:00 UTC","updatedISO":"2020-12-16T00:58:00+00:00","updateduk":"Dec 16, 2020 at 00:58 GMT"},"disclaimer":"This data was produced from the CoinDesk Bitcoin Price Index (USD). Non-USD currency data converted using hourly conversion rate from openexchangerates.org","bpi":{"USD":{"code":"USD","rate":"19,395.1400","description":"United States Dollar","rate_float":19395.14},"AUD":{"code":"AUD","rate":"25,663.5329","description":"Australian Dollar","rate_float":25663.5329}}} - Source('coindesk', - 'https://api.coindesk.com/v1/bpi/currentprice/{currency}.json', - ['bpi', '{currency}', 'rate_float']), + Source( + "coindesk", + "https://api.coindesk.com/v1/bpi/currentprice/{currency}.json", + ["bpi", "{currency}", "rate_float"], + ), # e.g. {"data":{"base":"BTC","currency":"USD","amount":"19414.63"}} - Source('coinbase', - 'https://api.coinbase.com/v2/prices/spot?currency={currency}', - ['data', 'amount']), + Source( + "coinbase", + "https://api.coinbase.com/v2/prices/spot?currency={currency}", + ["data", "amount"], + ), # e.g. { "USD" : {"15m" : 6650.3, "last" : 6650.3, "buy" : 6650.3, "sell" : 6650.3, "symbol" : "$"}, "AUD" : {"15m" : 10857.19, "last" : 10857.19, "buy" : 10857.19, "sell" : 10857.19, "symbol" : "$"},... - Source('blockchain.info', - 'https://blockchain.info/ticker', - ['{currency}', 'last']), + Source("blockchain.info", "https://blockchain.info/ticker", ["{currency}", "last"]), ] @@ -52,8 +56,8 @@ def requests_retry_session( status_forcelist=status_forcelist, ) adapter = HTTPAdapter(max_retries=retry) - session.mount('http://', adapter) - session.mount('https://', adapter) + session.mount("http://", adapter) + session.mount("https://", adapter) return session @@ -62,36 +66,43 @@ def get_currencyrate(plugin, currency, urlformat, replymembers): # Workaround: retry up to 5 times with a delay currency_lc = currency.lower() url = urlformat.format(currency_lc=currency_lc, currency=currency) - r = requests_retry_session(retries=5, status_forcelist=[404]).get(url, proxies=plugin.proxies) + r = requests_retry_session(retries=5, status_forcelist=[404]).get( + url, proxies=plugin.proxies + ) if r.status_code != 200: - plugin.log(level='info', message='{}: bad response {}'.format(url, r.status_code)) + plugin.log( + level="info", message="{}: bad response {}".format(url, r.status_code) + ) return None json = r.json() for m in replymembers: expanded = m.format(currency_lc=currency_lc, currency=currency) if expanded not in json: - plugin.log(level='debug', message='{}: {} not in {}'.format(url, expanded, json)) + plugin.log( + level="debug", message="{}: {} not in {}".format(url, expanded, json) + ) return None json = json[expanded] try: return Millisatoshi(int(10**11 / float(json))) except Exception: - plugin.log(level='info', message='{}: could not convert {} to msat'.format(url, json)) + plugin.log( + level="info", message="{}: could not convert {} to msat".format(url, json) + ) return None def set_proxies(plugin): config = plugin.rpc.listconfigs() - if 'always-use-proxy' in config and config['always-use-proxy']: - paddr = config['proxy'] + if "always-use-proxy" in config and config["always-use-proxy"]: + paddr = config["proxy"] # Default port in 9050 - if ':' not in paddr: - paddr += ':9050' - plugin.proxies = {'https': 'socks5h://' + paddr, - 'http': 'socks5h://' + paddr} + if ":" not in paddr: + paddr += ":9050" + plugin.proxies = {"https": "socks5h://" + paddr, "http": "socks5h://" + paddr} else: plugin.proxies = None @@ -129,31 +140,37 @@ def currencyconvert(plugin, amount, currency): def init(options, configuration, plugin): set_proxies(plugin) - sourceopts = options['add-source'] + sourceopts = options["add-source"] # Prior to 0.9.3, 'multi' was unsupported. if type(sourceopts) is not list: sourceopts = [sourceopts] - if sourceopts != ['']: + if sourceopts != [""]: for s in sourceopts: - parts = s.split(',') + parts = s.split(",") sources.append(Source(parts[0], parts[1], parts[2:])) - disableopts = options['disable-source'] + disableopts = options["disable-source"] # Prior to 0.9.3, 'multi' was unsupported. if type(disableopts) is not list: disableopts = [disableopts] - if disableopts != ['']: + if disableopts != [""]: for s in sources[:]: if s.name in disableopts: sources.remove(s) # As a bad example: binance,https://api.binance.com/api/v3/ticker/price?symbol=BTC{currency}T,price -plugin.add_option(name='add-source', default='', description='Add source name,urlformat,resultmembers...') -plugin.add_option(name='disable-source', default='', description='Disable source by name') +plugin.add_option( + name="add-source", + default="", + description="Add source name,urlformat,resultmembers...", +) +plugin.add_option( + name="disable-source", default="", description="Disable source by name" +) # This has an effect only for recent pyln versions (0.9.3+). -plugin.options['add-source']['multi'] = True -plugin.options['disable-source']['multi'] = True +plugin.options["add-source"]["multi"] = True +plugin.options["disable-source"]["multi"] = True plugin.run() diff --git a/currencyrate/test_currencyrate.py b/currencyrate/test_currencyrate.py index 1b4b76397..86a72c8c2 100644 --- a/currencyrate/test_currencyrate.py +++ b/currencyrate/test_currencyrate.py @@ -34,9 +34,7 @@ def test_currencyrate(node_factory): "disable-source": "bitstamp", } l1 = node_factory.get_node(options=opts) - plugins = [ - os.path.basename(p["name"]) for p in l1.rpc.plugin("list")["plugins"] - ] + plugins = [os.path.basename(p["name"]) for p in l1.rpc.plugin("list")["plugins"]] assert "currencyrate.py" in plugins rates = l1.rpc.call("currencyrates", ["USD"]) diff --git a/datastore/datastore-plugin.py b/datastore/datastore-plugin.py index 2df1bf364..ead3974c2 100755 --- a/datastore/datastore-plugin.py +++ b/datastore/datastore-plugin.py @@ -2,6 +2,7 @@ """This does the actual datastore work, if the main plugin says there's no datastore support. We can't even load this if there's real datastore support. """ + from pyln.client import Plugin, RpcException from collections import namedtuple import os @@ -18,7 +19,7 @@ DATASTORE_UPDATE_NO_CHILDREN = 1206 plugin = Plugin() -Entry = namedtuple('Entry', ['generation', 'data']) +Entry = namedtuple("Entry", ["generation", "data"]) # A singleton to most commands turns into a []. @@ -30,11 +31,11 @@ def normalize_key(key: Union[Sequence[str], str]) -> List[str]: # We turn list into nul-separated hexbytes for storage (shelve needs all keys to be strings) def key_to_hex(key: Sequence[str]) -> str: - return b'\0'.join([bytes(k, encoding='utf8') for k in key]).hex() + return b"\0".join([bytes(k, encoding="utf8") for k in key]).hex() def hex_to_key(hexstr: str) -> List[str]: - return [b.decode() for b in bytes.fromhex(hexstr).split(b'\0')] + return [b.decode() for b in bytes.fromhex(hexstr).split(b"\0")] def datastore_entry(key: Sequence[str], entry: Optional[Entry]): @@ -43,18 +44,18 @@ def datastore_entry(key: Sequence[str], entry: Optional[Entry]): if isinstance(key, str): key = [key] - ret = {'key': key} + ret = {"key": key} if entry is not None: # Entry may be a simple tuple; convert entry = Entry(*entry) - ret['generation'] = entry.generation - ret['hex'] = entry.data.hex() + ret["generation"] = entry.generation + ret["hex"] = entry.data.hex() # FFS, Python3 seems happy with \0 in UTF-8. if 0 not in entry.data: try: - ret['string'] = entry.data.decode('utf8') + ret["string"] = entry.data.decode("utf8") except UnicodeDecodeError: pass return ret @@ -63,7 +64,7 @@ def datastore_entry(key: Sequence[str], entry: Optional[Entry]): @plugin.method("datastore") def datastore(plugin, key, string=None, hex=None, mode="must-create", generation=None): """Add/modify a {key} and {hex}/{string} data to the data store, -optionally insisting it be {generation}""" + optionally insisting it be {generation}""" key = normalize_key(key) khex = key_to_hex(key) @@ -78,29 +79,23 @@ def datastore(plugin, key, string=None, hex=None, mode="must-create", generation if mode == "must-create": if khex in plugin.datastore: - raise RpcException("already exists", - DATASTORE_UPDATE_ALREADY_EXISTS) + raise RpcException("already exists", DATASTORE_UPDATE_ALREADY_EXISTS) elif mode == "must-replace": if khex not in plugin.datastore: - raise RpcException("does not exist", - DATASTORE_UPDATE_DOES_NOT_EXIST) + raise RpcException("does not exist", DATASTORE_UPDATE_DOES_NOT_EXIST) elif mode == "create-or-replace": if generation is not None: - raise RpcException("generation only valid with" - " must-create/must-replace") + raise RpcException("generation only valid with" " must-create/must-replace") pass elif mode == "must-append": if generation is not None: - raise RpcException("generation only valid with" - " must-create/must-replace") + raise RpcException("generation only valid with" " must-create/must-replace") if khex not in plugin.datastore: - raise RpcException("does not exist", - DATASTORE_UPDATE_DOES_NOT_EXIST) + raise RpcException("does not exist", DATASTORE_UPDATE_DOES_NOT_EXIST) data = plugin.datastore[khex].data + data elif mode == "create-or-append": if generation is not None: - raise RpcException("generation only valid with" - " must-create/must-replace") + raise RpcException("generation only valid with" " must-create/must-replace") data = plugin.datastore.get(khex, Entry(0, bytes())).data + data else: raise RpcException("invalid mode") @@ -109,22 +104,24 @@ def datastore(plugin, key, string=None, hex=None, mode="must-create", generation parent = [key[0]] for i in range(1, len(key)): if key_to_hex(parent) in plugin.datastore: - raise RpcException("Parent key [{}] exists".format(','.join(parent)), - DATASTORE_UPDATE_NO_CHILDREN) + raise RpcException( + "Parent key [{}] exists".format(",".join(parent)), + DATASTORE_UPDATE_NO_CHILDREN, + ) parent += [key[i]] if khex in plugin.datastore: entry = plugin.datastore[khex] if generation is not None: if entry.generation != generation: - raise RpcException("generation is different", - DATASTORE_UPDATE_WRONG_GENERATION) + raise RpcException( + "generation is different", DATASTORE_UPDATE_WRONG_GENERATION + ) gen = entry.generation + 1 else: # Make sure child doesn't exist (grossly inefficient) - if any([hex_to_key(k)[:len(key)] == key for k in plugin.datastore]): - raise RpcException("Key has children", - DATASTORE_UPDATE_HAS_CHILDREN) + if any([hex_to_key(k)[: len(key)] == key for k in plugin.datastore]): + raise RpcException("Key has children", DATASTORE_UPDATE_HAS_CHILDREN) gen = 0 plugin.datastore[khex] = Entry(gen, data) @@ -143,8 +140,7 @@ def deldatastore(plugin, key, generation=None): entry = plugin.datastore[khex] if generation is not None and entry.generation != generation: - raise RpcException("generation is different", - DATASTORE_DEL_WRONG_GENERATION) + raise RpcException("generation is different", DATASTORE_DEL_WRONG_GENERATION) ret = datastore_entry(key, entry) del plugin.datastore[khex] @@ -160,39 +156,39 @@ def listdatastore(plugin, key=[]): prev = None for khex, e in sorted(plugin.datastore.items()): k = hex_to_key(khex) - if k[:len(key)] != key: + if k[: len(key)] != key: continue # Don't print sub-children if len(k) > len(key) + 1: - if prev is None or k[:len(key)+1] != prev: - prev = k[:len(key)+1] + if prev is None or k[: len(key) + 1] != prev: + prev = k[: len(key) + 1] ret += [datastore_entry(prev, None)] else: ret += [datastore_entry(k, e)] - return {'datastore': ret} + return {"datastore": ret} def upgrade_store(plugin): """Initial version of this plugin had no generation numbers""" try: - oldstore = shelve.open('datastore.dat', 'r') + oldstore = shelve.open("datastore.dat", "r") except: return - plugin.log("Upgrading store to have generation numbers", level='unusual') - datastore = shelve.open('datastore_v1.dat', 'c') + plugin.log("Upgrading store to have generation numbers", level="unusual") + datastore = shelve.open("datastore_v1.dat", "c") for k, d in oldstore.items(): datastore[key_to_hex([k])] = Entry(0, d) oldstore.close() datastore.close() - os.unlink('datastore.dat') + os.unlink("datastore.dat") @plugin.init() def init(options, configuration, plugin): upgrade_store(plugin) - plugin.datastore = shelve.open('datastore_v1.dat') + plugin.datastore = shelve.open("datastore_v1.dat") plugin.run() diff --git a/datastore/datastore.py b/datastore/datastore.py index 8d3cbbbf3..a30ecda7a 100755 --- a/datastore/datastore.py +++ b/datastore/datastore.py @@ -10,36 +10,38 @@ def unload_store(plugin): """When we have a real store, we transfer our contents into it""" try: - datastore = shelve.open('datastore_v1.dat', 'r') + datastore = shelve.open("datastore_v1.dat", "r") except: return - plugin.log("Emptying store into main store (resetting generations!)", level='unusual') + plugin.log( + "Emptying store into main store (resetting generations!)", level="unusual" + ) for k, (g, data) in datastore.items(): try: plugin.rpc.datastore(key=[k], hex=data.hex()) except RpcError as e: - plugin.log("Failed to put {} into store: {}".format(k, e), - level='broken') + plugin.log("Failed to put {} into store: {}".format(k, e), level="broken") datastore.close() - plugin.log("Erasing our store", level='unusual') - os.unlink('datastore_v1.dat') + plugin.log("Erasing our store", level="unusual") + os.unlink("datastore_v1.dat") @plugin.init() def init(options, configuration, plugin): # If we have real datastore commands, don't load plugin. try: - plugin.rpc.help('datastore') + plugin.rpc.help("datastore") unload_store(plugin) - return {'disable': 'there is a real datastore command'} + return {"disable": "there is a real datastore command"} except RpcError: pass # Start up real plugin now - plugin.rpc.plugin_start(os.path.join(os.path.dirname(__file__), - "datastore-plugin.py")) - return {'disable': 'no builtin-datastore: plugin loaded'} + plugin.rpc.plugin_start( + os.path.join(os.path.dirname(__file__), "datastore-plugin.py") + ) + return {"disable": "no builtin-datastore: plugin loaded"} plugin.run() diff --git a/datastore/test_datastore.py b/datastore/test_datastore.py index d44a23056..d4e8aa527 100644 --- a/datastore/test_datastore.py +++ b/datastore/test_datastore.py @@ -11,119 +11,142 @@ # Test taken from lightning/tests/test_misc.py def test_datastore(node_factory): - l1 = node_factory.get_node(options={'plugin': plugin_path}) + l1 = node_factory.get_node(options={"plugin": plugin_path}) time.sleep(5) # Starts empty - assert l1.rpc.listdatastore() == {'datastore': []} - assert l1.rpc.listdatastore('somekey') == {'datastore': []} + assert l1.rpc.listdatastore() == {"datastore": []} + assert l1.rpc.listdatastore("somekey") == {"datastore": []} # Add entries. - somedata = b'somedata'.hex() - somedata_expect = {'key': ['somekey'], - 'generation': 0, - 'hex': somedata, - 'string': 'somedata'} - assert l1.rpc.datastore(key='somekey', hex=somedata) == somedata_expect - - assert l1.rpc.listdatastore() == {'datastore': [somedata_expect]} - assert l1.rpc.listdatastore('somekey') == {'datastore': [somedata_expect]} - assert l1.rpc.listdatastore('otherkey') == {'datastore': []} + somedata = b"somedata".hex() + somedata_expect = { + "key": ["somekey"], + "generation": 0, + "hex": somedata, + "string": "somedata", + } + assert l1.rpc.datastore(key="somekey", hex=somedata) == somedata_expect + + assert l1.rpc.listdatastore() == {"datastore": [somedata_expect]} + assert l1.rpc.listdatastore("somekey") == {"datastore": [somedata_expect]} + assert l1.rpc.listdatastore("otherkey") == {"datastore": []} # Cannot add by default. - with pytest.raises(RpcError, match='already exists'): - l1.rpc.datastore(key='somekey', hex=somedata) + with pytest.raises(RpcError, match="already exists"): + l1.rpc.datastore(key="somekey", hex=somedata) - with pytest.raises(RpcError, match='already exists'): - l1.rpc.datastore(key='somekey', hex=somedata, mode="must-create") + with pytest.raises(RpcError, match="already exists"): + l1.rpc.datastore(key="somekey", hex=somedata, mode="must-create") # But can insist on replace. - l1.rpc.datastore(key='somekey', hex=somedata[:-4], mode="must-replace") - assert only_one(l1.rpc.listdatastore('somekey')['datastore'])['hex'] == somedata[:-4] + l1.rpc.datastore(key="somekey", hex=somedata[:-4], mode="must-replace") + assert ( + only_one(l1.rpc.listdatastore("somekey")["datastore"])["hex"] == somedata[:-4] + ) # And append works. - l1.rpc.datastore(key='somekey', hex=somedata[-4:-2], mode="must-append") - assert only_one(l1.rpc.listdatastore('somekey')['datastore'])['hex'] == somedata[:-2] - l1.rpc.datastore(key='somekey', hex=somedata[-2:], mode="create-or-append") - assert only_one(l1.rpc.listdatastore('somekey')['datastore'])['hex'] == somedata + l1.rpc.datastore(key="somekey", hex=somedata[-4:-2], mode="must-append") + assert ( + only_one(l1.rpc.listdatastore("somekey")["datastore"])["hex"] == somedata[:-2] + ) + l1.rpc.datastore(key="somekey", hex=somedata[-2:], mode="create-or-append") + assert only_one(l1.rpc.listdatastore("somekey")["datastore"])["hex"] == somedata # Generation will have increased due to three ops above. - somedata_expect['generation'] += 3 - assert l1.rpc.listdatastore() == {'datastore': [somedata_expect]} + somedata_expect["generation"] += 3 + assert l1.rpc.listdatastore() == {"datastore": [somedata_expect]} # Can't replace or append non-existing records if we say not to - with pytest.raises(RpcError, match='does not exist'): - l1.rpc.datastore(key='otherkey', hex=somedata, mode="must-replace") - - with pytest.raises(RpcError, match='does not exist'): - l1.rpc.datastore(key='otherkey', hex=somedata, mode="must-append") - - otherdata = b'otherdata'.hex() - otherdata_expect = {'key': ['otherkey'], - 'generation': 0, - 'hex': otherdata, - 'string': 'otherdata'} - assert l1.rpc.datastore(key='otherkey', string='otherdata', mode="create-or-append") == otherdata_expect - - assert l1.rpc.listdatastore('somekey') == {'datastore': [somedata_expect]} - assert l1.rpc.listdatastore('otherkey') == {'datastore': [otherdata_expect]} - assert l1.rpc.listdatastore('badkey') == {'datastore': []} + with pytest.raises(RpcError, match="does not exist"): + l1.rpc.datastore(key="otherkey", hex=somedata, mode="must-replace") + + with pytest.raises(RpcError, match="does not exist"): + l1.rpc.datastore(key="otherkey", hex=somedata, mode="must-append") + + otherdata = b"otherdata".hex() + otherdata_expect = { + "key": ["otherkey"], + "generation": 0, + "hex": otherdata, + "string": "otherdata", + } + assert ( + l1.rpc.datastore(key="otherkey", string="otherdata", mode="create-or-append") + == otherdata_expect + ) + + assert l1.rpc.listdatastore("somekey") == {"datastore": [somedata_expect]} + assert l1.rpc.listdatastore("otherkey") == {"datastore": [otherdata_expect]} + assert l1.rpc.listdatastore("badkey") == {"datastore": []} ds = l1.rpc.listdatastore() # Order is undefined! - assert (ds == {'datastore': [somedata_expect, otherdata_expect]} - or ds == {'datastore': [otherdata_expect, somedata_expect]}) + assert ds == {"datastore": [somedata_expect, otherdata_expect]} or ds == { + "datastore": [otherdata_expect, somedata_expect] + } - assert l1.rpc.deldatastore('somekey') == somedata_expect - assert l1.rpc.listdatastore() == {'datastore': [otherdata_expect]} - assert l1.rpc.listdatastore('somekey') == {'datastore': []} - assert l1.rpc.listdatastore('otherkey') == {'datastore': [otherdata_expect]} - assert l1.rpc.listdatastore('badkey') == {'datastore': []} - assert l1.rpc.listdatastore() == {'datastore': [otherdata_expect]} + assert l1.rpc.deldatastore("somekey") == somedata_expect + assert l1.rpc.listdatastore() == {"datastore": [otherdata_expect]} + assert l1.rpc.listdatastore("somekey") == {"datastore": []} + assert l1.rpc.listdatastore("otherkey") == {"datastore": [otherdata_expect]} + assert l1.rpc.listdatastore("badkey") == {"datastore": []} + assert l1.rpc.listdatastore() == {"datastore": [otherdata_expect]} # if it's not a string, won't print - badstring_expect = {'key': ['badstring'], - 'generation': 0, - 'hex': '00'} - assert l1.rpc.datastore(key='badstring', hex='00') == badstring_expect - assert l1.rpc.listdatastore('badstring') == {'datastore': [badstring_expect]} - assert l1.rpc.deldatastore('badstring') == badstring_expect + badstring_expect = {"key": ["badstring"], "generation": 0, "hex": "00"} + assert l1.rpc.datastore(key="badstring", hex="00") == badstring_expect + assert l1.rpc.listdatastore("badstring") == {"datastore": [badstring_expect]} + assert l1.rpc.deldatastore("badstring") == badstring_expect # It's persistent l1.restart() - assert l1.rpc.listdatastore() == {'datastore': [otherdata_expect]} + assert l1.rpc.listdatastore() == {"datastore": [otherdata_expect]} # We can insist generation match on update. - with pytest.raises(RpcError, match='generation is different'): - l1.rpc.datastore(key='otherkey', hex='00', mode='must-replace', - generation=otherdata_expect['generation'] + 1) - - otherdata_expect['generation'] += 1 - otherdata_expect['string'] += 'a' - otherdata_expect['hex'] += '61' - assert (l1.rpc.datastore(key='otherkey', string='otherdataa', - mode='must-replace', - generation=otherdata_expect['generation'] - 1) - == otherdata_expect) - assert l1.rpc.listdatastore() == {'datastore': [otherdata_expect]} + with pytest.raises(RpcError, match="generation is different"): + l1.rpc.datastore( + key="otherkey", + hex="00", + mode="must-replace", + generation=otherdata_expect["generation"] + 1, + ) + + otherdata_expect["generation"] += 1 + otherdata_expect["string"] += "a" + otherdata_expect["hex"] += "61" + assert ( + l1.rpc.datastore( + key="otherkey", + string="otherdataa", + mode="must-replace", + generation=otherdata_expect["generation"] - 1, + ) + == otherdata_expect + ) + assert l1.rpc.listdatastore() == {"datastore": [otherdata_expect]} # We can insist generation match on delete. - with pytest.raises(RpcError, match='generation is different'): - l1.rpc.deldatastore(key='otherkey', - generation=otherdata_expect['generation'] + 1) + with pytest.raises(RpcError, match="generation is different"): + l1.rpc.deldatastore( + key="otherkey", generation=otherdata_expect["generation"] + 1 + ) - assert (l1.rpc.deldatastore(key='otherkey', - generation=otherdata_expect['generation']) - == otherdata_expect) - assert l1.rpc.listdatastore() == {'datastore': []} + assert ( + l1.rpc.deldatastore(key="otherkey", generation=otherdata_expect["generation"]) + == otherdata_expect + ) + assert l1.rpc.listdatastore() == {"datastore": []} def test_upgrade(node_factory): l1 = node_factory.get_node() - datastore = shelve.open(os.path.join(l1.daemon.lightning_dir, 'regtest', 'datastore.dat'), 'c') - datastore['foo'] = b'foodata' - datastore['bar'] = b'bardata' + datastore = shelve.open( + os.path.join(l1.daemon.lightning_dir, "regtest", "datastore.dat"), "c" + ) + datastore["foo"] = b"foodata" + datastore["bar"] = b"bardata" datastore.close() # This "fails" because it unloads itself. @@ -133,83 +156,108 @@ def test_upgrade(node_factory): pass # There's no upgrade if there's a real datastore. - if l1.daemon.is_in_log('there is a real datastore command'): + if l1.daemon.is_in_log("there is a real datastore command"): return - l1.daemon.wait_for_log('Upgrading store to have generation numbers') - wait_for(lambda: not os.path.exists(os.path.join(l1.daemon.lightning_dir, - 'regtest', - 'datastore.dat'))) + l1.daemon.wait_for_log("Upgrading store to have generation numbers") + wait_for( + lambda: not os.path.exists( + os.path.join(l1.daemon.lightning_dir, "regtest", "datastore.dat") + ) + ) - vals = l1.rpc.listdatastore()['datastore'] - assert vals == [{'key': ['bar'], - 'generation': 0, - 'hex': b'bardata'.hex(), - 'string': 'bardata'}, - {'key': ['foo'], - 'generation': 0, - 'hex': b'foodata'.hex(), - 'string': 'foodata'}] + vals = l1.rpc.listdatastore()["datastore"] + assert vals == [ + {"key": ["bar"], "generation": 0, "hex": b"bardata".hex(), "string": "bardata"}, + {"key": ["foo"], "generation": 0, "hex": b"foodata".hex(), "string": "foodata"}, + ] def test_datastore_keylist(node_factory): - l1 = node_factory.get_node(options={'plugin': plugin_path}) + l1 = node_factory.get_node(options={"plugin": plugin_path}) time.sleep(5) # Starts empty - assert l1.rpc.listdatastore() == {'datastore': []} - assert l1.rpc.listdatastore(['a']) == {'datastore': []} - assert l1.rpc.listdatastore(['a', 'b']) == {'datastore': []} + assert l1.rpc.listdatastore() == {"datastore": []} + assert l1.rpc.listdatastore(["a"]) == {"datastore": []} + assert l1.rpc.listdatastore(["a", "b"]) == {"datastore": []} # Cannot add child to existing! - l1.rpc.datastore(key='a', string='aval') - with pytest.raises(RpcError, match=r'1206.*Parent key \[a\] exists'): - l1.rpc.datastore(key=['a', 'b'], string='abval', - mode='create-or-replace') + l1.rpc.datastore(key="a", string="aval") + with pytest.raises(RpcError, match=r"1206.*Parent key \[a\] exists"): + l1.rpc.datastore(key=["a", "b"], string="abval", mode="create-or-replace") # Listing subkey gives DNE. - assert l1.rpc.listdatastore(['a', 'b']) == {'datastore': []} - l1.rpc.deldatastore(key=['a']) + assert l1.rpc.listdatastore(["a", "b"]) == {"datastore": []} + l1.rpc.deldatastore(key=["a"]) # Create child key. - l1.rpc.datastore(key=['a', 'b'], string='abval') - assert l1.rpc.listdatastore() == {'datastore': [{'key': ['a']}]} - assert l1.rpc.listdatastore(key=['a']) == {'datastore': [{'key': ['a', 'b'], - 'generation': 0, - 'string': 'abval', - 'hex': b'abval'.hex()}]} + l1.rpc.datastore(key=["a", "b"], string="abval") + assert l1.rpc.listdatastore() == {"datastore": [{"key": ["a"]}]} + assert l1.rpc.listdatastore(key=["a"]) == { + "datastore": [ + { + "key": ["a", "b"], + "generation": 0, + "string": "abval", + "hex": b"abval".hex(), + } + ] + } # Cannot create key over that - with pytest.raises(RpcError, match='has children'): - l1.rpc.datastore(key='a', string='aval', mode='create-or-replace') + with pytest.raises(RpcError, match="has children"): + l1.rpc.datastore(key="a", string="aval", mode="create-or-replace") # Can create another key. - l1.rpc.datastore(key=['a', 'b2'], string='ab2val') - assert l1.rpc.listdatastore() == {'datastore': [{'key': ['a']}]} - assert l1.rpc.listdatastore(key=['a']) == {'datastore': [{'key': ['a', 'b'], - 'string': 'abval', - 'generation': 0, - 'hex': b'abval'.hex()}, - {'key': ['a', 'b2'], - 'string': 'ab2val', - 'generation': 0, - 'hex': b'ab2val'.hex()}]} + l1.rpc.datastore(key=["a", "b2"], string="ab2val") + assert l1.rpc.listdatastore() == {"datastore": [{"key": ["a"]}]} + assert l1.rpc.listdatastore(key=["a"]) == { + "datastore": [ + { + "key": ["a", "b"], + "string": "abval", + "generation": 0, + "hex": b"abval".hex(), + }, + { + "key": ["a", "b2"], + "string": "ab2val", + "generation": 0, + "hex": b"ab2val".hex(), + }, + ] + } # Can create subkey. - l1.rpc.datastore(key=['a', 'b3', 'c'], string='ab2val') - assert l1.rpc.listdatastore() == {'datastore': [{'key': ['a']}]} - assert l1.rpc.listdatastore(key=['a']) == {'datastore': [{'key': ['a', 'b'], - 'string': 'abval', - 'generation': 0, - 'hex': b'abval'.hex()}, - {'key': ['a', 'b2'], - 'string': 'ab2val', - 'generation': 0, - 'hex': b'ab2val'.hex()}, - {'key': ['a', 'b3']}]} + l1.rpc.datastore(key=["a", "b3", "c"], string="ab2val") + assert l1.rpc.listdatastore() == {"datastore": [{"key": ["a"]}]} + assert l1.rpc.listdatastore(key=["a"]) == { + "datastore": [ + { + "key": ["a", "b"], + "string": "abval", + "generation": 0, + "hex": b"abval".hex(), + }, + { + "key": ["a", "b2"], + "string": "ab2val", + "generation": 0, + "hex": b"ab2val".hex(), + }, + {"key": ["a", "b3"]}, + ] + } # Can update subkey - l1.rpc.datastore(key=['a', 'b3', 'c'], string='2', mode='must-append') - assert l1.rpc.listdatastore(key=['a', 'b3', 'c']) == {'datastore': [{'key': ['a', 'b3', 'c'], - 'string': 'ab2val2', - 'generation': 1, - 'hex': b'ab2val2'.hex()}]} + l1.rpc.datastore(key=["a", "b3", "c"], string="2", mode="must-append") + assert l1.rpc.listdatastore(key=["a", "b3", "c"]) == { + "datastore": [ + { + "key": ["a", "b3", "c"], + "string": "ab2val2", + "generation": 1, + "hex": b"ab2val2".hex(), + } + ] + } diff --git a/donations/donations.py b/donations/donations.py index f20a19917..e4551fbfb 100755 --- a/donations/donations.py +++ b/donations/donations.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -""" A small donation service so that users can request ln invoices +"""A small donation service so that users can request ln invoices This plugin spins up a small flask server that provides a form to users who wish to donate some money to the owner of the lightning @@ -15,6 +15,7 @@ LICENSE: MIT / APACHE """ + import base64 import multiprocessing import qrcode @@ -34,11 +35,14 @@ class DonationForm(FlaskForm): - """Form for donations """ - amount = IntegerField("Enter how many Satoshis you want to donate!", - validators=[DataRequired(), NumberRange(min=1, max=16666666)]) + """Form for donations""" + + amount = IntegerField( + "Enter how many Satoshis you want to donate!", + validators=[DataRequired(), NumberRange(min=1, max=16666666)], + ) description = StringField("Leave a comment (displayed publically)") - submit = SubmitField('Donate') + submit = SubmitField("Donate") def make_base64_qr_code(bolt11): @@ -94,18 +98,26 @@ def donation_form(): donations.append((ts, satoshis, description)) if b11 is not None: - return render_template("donation.html", donations=sorted(donations, reverse=True), form=form, bolt11=b11, qr=qr, label=label) + return render_template( + "donation.html", + donations=sorted(donations, reverse=True), + form=form, + bolt11=b11, + qr=qr, + label=label, + ) else: - return render_template("donation.html", donations=sorted(donations, reverse=True), form=form) + return render_template( + "donation.html", donations=sorted(donations, reverse=True), form=form + ) def worker(port): app = Flask(__name__) # FIXME: use hexlified hsm secret or something else - app.config['SECRET_KEY'] = 'you-will-never-guess-this' - app.add_url_rule('/donation', 'donation', - donation_form, methods=["GET", "POST"]) - app.add_url_rule('/is_invoice_paid/