context manager and subsystem work

This commit is contained in:
John Lancaster
2024-10-20 20:56:18 +00:00
parent 9121d1ef04
commit 8064a73e9f
3 changed files with 206 additions and 19 deletions

View File

@@ -0,0 +1,153 @@
import asyncio
import functools
import logging
import signal
from concurrent.futures import ThreadPoolExecutor
from contextlib import ExitStack, contextmanager
from threading import Event, Lock
from time import sleep
from typing import Any, Callable
logger = logging.getLogger()
def handler(signum, frame):
print('Signal handler called with signal', signum)
raise OSError("Couldn't open device!")
class AppDaemonRunContext:
_stack: ExitStack
loop: asyncio.AbstractEventLoop
executor: ThreadPoolExecutor
stop_event: Event
shutdown_lock: Lock
def __init__(self):
self._stack = ExitStack()
self.stop_event = Event()
self.shutdown_lock = Lock()
def __enter__(self):
self.loop = self._stack.enter_context(self.asyncio_context())
logger.debug("Entered asyncio context")
signals = (signal.SIGHUP, signal.SIGTERM, signal.SIGINT)
for s in signals:
self.loop.add_signal_handler(s, lambda s=s: self.loop.create_task(self.shutdown(s)))
# self.loop.add_signal_handler(signal.SIGINT, self.handle_signal, signal.SIGINT)
# self.loop.add_signal_handler(signal.SIGTERM, self.handle_signal)
# self.executor = self._stack.enter_context(ThreadPoolExecutor(max_workers=5))
self.executor = self._stack.enter_context(self.thread_context())
logger.debug("Entered threadpool context")
return self
def __exit__(self, exc_type, exc_value, traceback):
self._stack.__exit__(exc_type, exc_value, traceback)
logger.debug("Exited context")
async def shutdown(self, signal):
"""Cleanup tasks tied to the service's shutdown.
https://www.roguelynn.com/words/asyncio-graceful-shutdowns/
"""
logger.info(f"Received exit signal {signal.name}...")
if not self.stop_event.is_set():
logger.debug('Setting stop event')
self.stop_event.set()
tasks = [
t for t in asyncio.all_tasks()
if t is not asyncio.current_task()
]
for task in tasks:
# logger.debug(f"Waiting on task to finish: {task.get_coro().__qualname__}")
logger.debug(f"Cancelling {task.get_name()}: {task.get_coro().__qualname__}")
task.cancel()
logger.debug('Waiting for tasks to finish...')
try:
await asyncio.gather(*tasks, return_exceptions=True)
except asyncio.CancelledError as e:
logger.debug(f'Cancelled: {e}')
logger.debug("Stopping event loop in context shutdown")
self.loop.stop()
else:
logger.warning('Already started shutdown')
@contextmanager
def asyncio_context(self):
try:
loop = asyncio.get_event_loop()
yield loop
finally:
loop.close()
if loop.is_closed():
logger.debug("Closed the event loop.")
@contextmanager
def thread_context(self):
with ThreadPoolExecutor(max_workers=5) as executor:
yield executor
if executor._shutdown:
logger.debug('Shut down the ThreadPoolExecutor')
async def run_in_executor(
self,
func: Callable,
*args,
timeout: float | None = None,
**kwargs
) -> Any:
"""Run the sync function using the ThreadPoolExecutor and await the result"""
timeout = timeout or 10.0
coro = self.loop.run_in_executor(
self.executor,
functools.partial(func, **kwargs),
*args,
)
try:
return await asyncio.wait_for(coro, timeout)
except asyncio.TimeoutError:
print('Timed out')
if __name__ == "__main__":
import logging
import random
from uuid import uuid4
logging.basicConfig(level="DEBUG", format="{levelname:<8} {message}", style="{")
def dummy_function(delay: float):
id_ = uuid4().hex[:4]
logger.info(f'{id_} sleeping for {delay:.1f}s')
sleep(delay)
logger.info(f'{id_} Done')
async def async_dummy_function(delay: float):
id_ = uuid4().hex[:4]
logger.info(f'{id_} async sleeping for {delay:.1f}s')
await asyncio.sleep(delay)
logger.info(f'{id_} Done async')
with AppDaemonRunContext() as cm:
for _ in range(3):
logger.info('Submitting random dummy_functions')
cm.executor.submit(dummy_function, random.random() * 10.0)
cm.loop.create_task(async_dummy_function(random.random() * 5.0))
try:
logger.info('Running until complete')
cm.loop.run_until_complete(asyncio.gather(*asyncio.all_tasks(cm.loop)))
except asyncio.CancelledError:
logger.error('Cancelled')
if cm.loop.is_closed():
logger.debug('Loop is closed')
if cm.executor._shutdown:
logger.debug('Executor is shut down')

146
appdaemon/subsystem.py Normal file
View File

@@ -0,0 +1,146 @@
import asyncio
import logging
from dataclasses import dataclass, field
from logging import Logger, getLogger
from threading import Event, RLock
from typing import Callable, Coroutine
from appdaemon.models import AppDaemonConfig
from context_manager import AppDaemonRunContext
@dataclass
class ADSubsystem:
AD: "AppDaemon"
stop: Event
lock: RLock = field(default_factory=RLock)
logger: Logger = field(init=False)
def __post_init__(self) -> None:
name = f'_{self.__class__.__name__.lower()}'
self.logger = getLogger(f'AppDaemon.{name}')
self.AD.starts.append(self.start)
@property
def stopping(self) -> bool:
return self.stop.is_set()
def start(self):
raise NotImplementedError('Need to implement start for subsystem')
@dataclass
class Utility(ADSubsystem):
loop_rate: float = 1.0
def start(self):
self.AD.create_task(self.loop(), 'Utility loop')
async def loop(self):
while not self.stopping:
self.logger.info('Looping...')
await asyncio.sleep(self.loop_rate)
self.logger.debug('Stopped utility loop')
@dataclass
class Plugin(ADSubsystem):
state: dict[str, int] = field(default_factory=dict)
update_rate: float = 10.0
def __post_init__(self) -> None:
super().__post_init__()
self.state['update_count'] = 0
def start(self):
self.AD.create_task(self.periodic_self_udpate(), 'Periodic plugin update')
async def periodic_self_udpate(self):
while not self.stopping:
with self.lock:
self.state['update_count'] += 1
self.logger.info(f'Updated self: {self.state["update_count"]}')
await asyncio.sleep(self.update_rate)
self.logger.debug('Stopped plugin updates')
@dataclass
class AppDaemon:
cfg: AppDaemonConfig
context: AppDaemonRunContext
utility: Utility = field(init=False)
plugins: dict[str, Plugin] = field(default_factory=dict)
starts: list[Callable] = field(default_factory=list)
def __post_init__(self) -> None:
self.utility = Utility(self, self.context.stop_event)
self.plugins['dummy'] = Plugin(self, self.context.stop_event)
def create_task(self, coro: Coroutine, name: str | None = None):
return self.context.loop.create_task(coro, name=name)
def start(self):
for start in self.starts:
start()
@dataclass
class ADMain:
config_file: str
cfg: AppDaemonConfig = field(init=False)
def __post_init__(self) -> None:
raw_cfg = read_config_file(self.config_file)
self.cfg = AppDaemonConfig(
config_file=self.config_file,
**raw_cfg['appdaemon']
)
def run(self):
with AppDaemonRunContext() as cm:
ad = AppDaemon(self.cfg, cm)
ad.start()
cm.loop.run_forever()
if __name__ == '__main__':
import logging.config
from appdaemon.utils import read_config_file
from rich.console import Console
from rich.highlighter import NullHighlighter
console = Console()
logging.config.dictConfig(
{
'version': 1,
'disable_existing_loggers': False,
'formatters': {'basic': {'style': '{', 'format': '{message}'}},
'handlers': {
'rich': {
'()': 'rich.logging.RichHandler',
'formatter': 'basic',
'console': console,
'highlighter': NullHighlighter(),
'markup': True
}
},
'root': {'level': 'DEBUG', 'handlers': ['rich']},
}
)
main = ADMain('/conf/ad-test/conf/appdaemon.yaml')
main.run()
# config_file = '/conf/ad-test/conf/appdaemon.yaml'
# raw_cfg = read_config_file(config_file)
# cfg = AppDaemonConfig(
# config_file=config_file,
# **raw_cfg['appdaemon']
# )
# with AppDaemonRunContext() as cm:
# ad = AppDaemon(cfg, cm)
# # ad.start()
# # cm.loop.run_forever()