ArchiveBox/archivebox/misc/system.py

232 lines
8.9 KiB
Python
Raw Normal View History

2024-10-01 00:25:15 +00:00
__package__ = 'archivebox.misc'
2019-05-01 03:13:04 +00:00
import os
import signal
2019-05-01 03:13:04 +00:00
import shutil
2020-06-26 02:14:40 +00:00
from json import dump
from pathlib import Path
2019-05-01 03:13:04 +00:00
from typing import Optional, Union, Set, Tuple
from subprocess import _mswindows, PIPE, Popen, CalledProcessError, CompletedProcess, TimeoutExpired
2019-05-01 03:13:04 +00:00
from crontab import CronTab
2024-08-23 09:01:02 +00:00
from atomicwrites import atomic_write as lib_atomic_write
2019-05-01 03:13:04 +00:00
from archivebox.config.common import STORAGE_CONFIG
2024-10-01 00:25:15 +00:00
from archivebox.misc.util import enforce_types, ExtendedEncoder
2019-05-01 03:13:04 +00:00
2021-04-24 08:43:02 +00:00
def run(cmd, *args, input=None, capture_output=True, timeout=None, check=False, text=False, start_new_session=True, **kwargs):
2021-04-06 05:38:24 +00:00
"""Patched of subprocess.run to kill forked child subprocesses and fix blocking io making timeout=innefective
Mostly copied from https://github.com/python/cpython/blob/master/Lib/subprocess.py
"""
2019-05-01 03:13:04 +00:00
2024-09-25 07:41:55 +00:00
cmd = [str(arg) for arg in cmd]
2019-05-01 03:13:04 +00:00
if input is not None:
if kwargs.get('stdin') is not None:
2019-05-01 03:13:04 +00:00
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = PIPE
2019-05-01 03:13:04 +00:00
if capture_output:
if ('stdout' in kwargs) or ('stderr' in kwargs):
2024-02-13 05:28:52 +00:00
raise ValueError('stdout and stderr arguments may not be used with capture_output.')
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE
2020-06-26 02:14:40 +00:00
pgid = None
try:
2021-04-24 08:43:02 +00:00
if isinstance(cmd, (list, tuple)) and cmd[0].endswith('.py'):
PYTHON_BINARY = sys.executable
2021-04-24 08:43:02 +00:00
cmd = (PYTHON_BINARY, *cmd)
2024-09-25 07:41:55 +00:00
with Popen(cmd, *args, start_new_session=start_new_session, text=text, **kwargs) as process:
pgid = os.getpgid(process.pid)
try:
stdout, stderr = process.communicate(input, timeout=timeout)
except TimeoutExpired as exc:
process.kill()
if _mswindows:
# Windows accumulates the output in a single blocking
# read() call run on child threads, with the timeout
# being done in a join() on those threads. communicate()
# _after_ kill() is required to collect that and add it
# to the exception.
exc.stdout, exc.stderr = process.communicate()
else:
# POSIX _communicate already populated the output so
# far into the TimeoutExpired exception.
process.wait()
raise
except: # Including KeyboardInterrupt, communicate handled that.
process.kill()
# We don't call process.wait() as .__exit__ does that for us.
raise
retcode = process.poll()
if check and retcode:
raise CalledProcessError(retcode, process.args,
output=stdout, stderr=stderr)
finally:
# force kill any straggler subprocesses that were forked from the main proc
try:
os.killpg(pgid, signal.SIGINT)
2021-04-06 06:50:51 +00:00
except Exception:
pass
return CompletedProcess(process.args, retcode, stdout, stderr)
2020-06-30 06:04:16 +00:00
2020-06-26 02:14:40 +00:00
@enforce_types
2021-06-01 06:58:36 +00:00
def atomic_write(path: Union[Path, str], contents: Union[dict, str, bytes], overwrite: bool=True) -> None:
2019-05-01 03:13:04 +00:00
"""Safe atomic write to filesystem by writing to temp file + atomic rename"""
2020-06-30 06:04:16 +00:00
2020-06-26 02:14:40 +00:00
mode = 'wb+' if isinstance(contents, bytes) else 'w'
2021-03-27 05:19:32 +00:00
encoding = None if isinstance(contents, bytes) else 'utf-8' # enforce utf-8 on all text writes
2020-06-26 02:14:40 +00:00
# print('\n> Atomic Write:', mode, path, len(contents), f'overwrite={overwrite}')
try:
with lib_atomic_write(path, mode=mode, overwrite=overwrite, encoding=encoding) as f:
if isinstance(contents, dict):
dump(contents, f, indent=4, sort_keys=True, cls=ExtendedEncoder)
elif isinstance(contents, (bytes, str)):
f.write(contents)
except OSError as e:
if STORAGE_CONFIG.ENFORCE_ATOMIC_WRITES:
print(f"[X] OSError: Failed to write {path} with fcntl.F_FULLFSYNC. ({e})")
print(" You can store the archive/ subfolder on a hard drive or network share that doesn't support support syncronous writes,")
print(" but the main folder containing the index.sqlite3 and ArchiveBox.conf files must be on a filesystem that supports FSYNC.")
raise SystemExit(1)
# retry the write without forcing FSYNC (aka atomic mode)
with open(path, mode=mode, encoding=encoding) as f:
if isinstance(contents, dict):
dump(contents, f, indent=4, sort_keys=True, cls=ExtendedEncoder)
elif isinstance(contents, (bytes, str)):
f.write(contents)
2021-06-01 06:58:36 +00:00
# set file permissions
os.chmod(path, int(STORAGE_CONFIG.OUTPUT_PERMISSIONS, base=8))
2020-06-30 06:04:16 +00:00
2019-05-01 03:13:04 +00:00
@enforce_types
def chmod_file(path: str, cwd: str='') -> None:
2019-05-01 03:13:04 +00:00
"""chmod -R <permissions> <cwd>/<path>"""
root = Path(cwd or os.getcwd()) / path
if not root.exists():
2019-05-01 03:13:04 +00:00
raise Exception('Failed to chmod: {} does not exist (did the previous step fail?)'.format(path))
if not root.is_dir():
# path is just a plain file
os.chmod(root, int(STORAGE_CONFIG.OUTPUT_PERMISSIONS, base=8))
else:
for subpath in Path(path).glob('**/*'):
if subpath.is_dir():
# directories need execute permissions to be able to list contents
os.chmod(subpath, int(STORAGE_CONFIG.DIR_OUTPUT_PERMISSIONS, base=8))
else:
os.chmod(subpath, int(STORAGE_CONFIG.OUTPUT_PERMISSIONS, base=8))
2019-05-01 03:13:04 +00:00
@enforce_types
def copy_and_overwrite(from_path: Union[str, Path], to_path: Union[str, Path]):
2019-05-01 03:13:04 +00:00
"""copy a given file or directory to a given path, overwriting the destination"""
2020-09-30 20:04:46 +00:00
if Path(from_path).is_dir():
2019-05-01 03:13:04 +00:00
shutil.rmtree(to_path, ignore_errors=True)
shutil.copytree(from_path, to_path)
else:
with open(from_path, 'rb') as src:
2020-06-26 01:30:29 +00:00
contents = src.read()
atomic_write(to_path, contents)
2019-05-01 03:13:04 +00:00
@enforce_types
def get_dir_size(path: Union[str, Path], recursive: bool=True, pattern: Optional[str]=None) -> Tuple[int, int, int]:
2019-05-01 03:13:04 +00:00
"""get the total disk size of a given directory, optionally summing up
recursively and limiting to a given filter list
"""
num_bytes, num_dirs, num_files = 0, 0, 0
try:
for entry in os.scandir(path):
if (pattern is not None) and (pattern not in entry.path):
2019-05-01 03:13:04 +00:00
continue
if entry.is_dir(follow_symlinks=False):
if not recursive:
continue
num_dirs += 1
bytes_inside, dirs_inside, files_inside = get_dir_size(entry.path)
num_bytes += bytes_inside
num_dirs += dirs_inside
num_files += files_inside
else:
num_bytes += entry.stat(follow_symlinks=False).st_size
num_files += 1
except OSError:
# e.g. FileNameTooLong or other error while trying to read dir
pass
2019-05-01 03:13:04 +00:00
return num_bytes, num_dirs, num_files
CRON_COMMENT = 'archivebox_schedule'
2020-06-30 06:04:16 +00:00
2019-05-01 03:13:04 +00:00
@enforce_types
def dedupe_cron_jobs(cron: CronTab) -> CronTab:
deduped: Set[Tuple[str, str]] = set()
for job in list(cron):
2024-02-13 05:28:52 +00:00
unique_tuple = (str(job.slices), str(job.command))
2019-05-01 03:13:04 +00:00
if unique_tuple not in deduped:
deduped.add(unique_tuple)
cron.remove(job)
for schedule, command in deduped:
job = cron.new(command=command, comment=CRON_COMMENT)
job.setall(schedule)
job.enable()
return cron
2020-10-31 23:33:17 +00:00
class suppress_output(object):
2023-11-14 07:43:53 +00:00
"""
2020-10-31 23:33:17 +00:00
A context manager for doing a "deep suppression" of stdout and stderr in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
2023-11-14 07:43:53 +00:00
This will not suppress raised exceptions, since exceptions are printed
2020-10-31 23:33:17 +00:00
to stderr just before a script exits, and after the context manager has
exited (at least, I think that is why it lets exceptions through).
with suppress_stdout_stderr():
rogue_function()
2023-11-14 07:43:53 +00:00
"""
2020-10-31 23:33:17 +00:00
def __init__(self, stdout=True, stderr=True):
# Open a pair of null files
# Save the actual stdout (1) and stderr (2) file descriptors.
self.stdout, self.stderr = stdout, stderr
if stdout:
self.null_stdout = os.open(os.devnull, os.O_RDWR)
self.real_stdout = os.dup(1)
if stderr:
self.null_stderr = os.open(os.devnull, os.O_RDWR)
self.real_stderr = os.dup(2)
def __enter__(self):
# Assign the null pointers to stdout and stderr.
if self.stdout:
os.dup2(self.null_stdout, 1)
if self.stderr:
os.dup2(self.null_stderr, 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
if self.stdout:
os.dup2(self.real_stdout, 1)
os.close(self.null_stdout)
if self.stderr:
os.dup2(self.real_stderr, 2)
os.close(self.null_stderr)