2021-03-05 18:03:23 +08:00
|
|
|
import importlib
|
|
|
|
import os
|
|
|
|
import signal
|
|
|
|
import time
|
|
|
|
import subprocess
|
|
|
|
from abc import ABC, abstractmethod
|
|
|
|
from multiprocessing import Process
|
|
|
|
|
|
|
|
from setproctitle import setproctitle # pylint: disable=no-name-in-module
|
|
|
|
|
|
|
|
import cereal.messaging as messaging
|
|
|
|
import selfdrive.crash as crash
|
|
|
|
from common.basedir import BASEDIR
|
|
|
|
from common.params import Params
|
2021-03-09 11:17:46 +08:00
|
|
|
from common.realtime import sec_since_boot
|
2021-03-05 18:03:23 +08:00
|
|
|
from selfdrive.swaglog import cloudlog
|
|
|
|
from selfdrive.hardware import HARDWARE
|
|
|
|
from cereal import log
|
|
|
|
|
2021-03-09 11:17:46 +08:00
|
|
|
WATCHDOG_FN = "/dev/shm/wd_"
|
|
|
|
ENABLE_WATCHDOG = os.getenv("NO_WATCHDOG") is None
|
|
|
|
|
2021-03-05 18:03:23 +08:00
|
|
|
|
|
|
|
def launcher(proc):
|
|
|
|
try:
|
|
|
|
# import the process
|
|
|
|
mod = importlib.import_module(proc)
|
|
|
|
|
|
|
|
# rename the process
|
|
|
|
setproctitle(proc)
|
|
|
|
|
|
|
|
# create new context since we forked
|
|
|
|
messaging.context = messaging.Context()
|
|
|
|
|
|
|
|
# exec the process
|
|
|
|
mod.main()
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
cloudlog.warning("child %s got SIGINT" % proc)
|
|
|
|
except Exception:
|
|
|
|
# can't install the crash handler becuase sys.excepthook doesn't play nice
|
|
|
|
# with threads, so catch it here.
|
|
|
|
crash.capture_exception()
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
|
|
def nativelauncher(pargs, cwd):
|
|
|
|
# exec the process
|
|
|
|
os.chdir(cwd)
|
|
|
|
os.execvp(pargs[0], pargs)
|
|
|
|
|
|
|
|
|
|
|
|
def join_process(process, timeout):
|
|
|
|
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
|
|
|
|
# We have to poll the exitcode instead
|
|
|
|
t = time.monotonic()
|
|
|
|
while time.monotonic() - t < timeout and process.exitcode is None:
|
|
|
|
time.sleep(0.001)
|
|
|
|
|
|
|
|
|
|
|
|
class ManagerProcess(ABC):
|
|
|
|
unkillable = False
|
|
|
|
daemon = False
|
|
|
|
sigkill = False
|
|
|
|
proc = None
|
2021-03-08 19:18:58 +08:00
|
|
|
enabled = True
|
2021-03-05 18:03:23 +08:00
|
|
|
name = ""
|
|
|
|
|
2021-03-09 11:17:46 +08:00
|
|
|
last_watchdog_time = 0
|
|
|
|
watchdog_max_dt = None
|
|
|
|
watchdog_seen = False
|
|
|
|
|
2021-03-05 18:03:23 +08:00
|
|
|
@abstractmethod
|
|
|
|
def prepare(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
@abstractmethod
|
|
|
|
def start(self):
|
|
|
|
pass
|
|
|
|
|
2021-03-09 11:17:46 +08:00
|
|
|
def restart(self):
|
|
|
|
self.stop()
|
|
|
|
self.start()
|
|
|
|
|
|
|
|
def check_watchdog(self, started):
|
|
|
|
if self.watchdog_max_dt is None or self.proc is None:
|
|
|
|
return
|
|
|
|
|
|
|
|
try:
|
|
|
|
fn = WATCHDOG_FN + str(self.proc.pid)
|
|
|
|
self.last_watchdog_time = int(open(fn).read())
|
|
|
|
except Exception:
|
|
|
|
pass
|
|
|
|
|
|
|
|
dt = sec_since_boot() - self.last_watchdog_time / 1e9
|
|
|
|
|
|
|
|
if dt > self.watchdog_max_dt:
|
|
|
|
# Only restart while offroad for now
|
|
|
|
if self.watchdog_seen and ENABLE_WATCHDOG and (not started):
|
2021-03-09 20:11:40 +08:00
|
|
|
cloudlog.error(f"Watchdog timeout for {self.name} (exitcode {self.proc.exitcode}) restarting")
|
2021-03-09 11:17:46 +08:00
|
|
|
self.restart()
|
|
|
|
else:
|
|
|
|
self.watchdog_seen = True
|
|
|
|
|
2021-03-05 18:03:23 +08:00
|
|
|
def stop(self, retry=True):
|
|
|
|
if self.proc is None:
|
|
|
|
return
|
|
|
|
|
|
|
|
cloudlog.info(f"killing {self.name}")
|
|
|
|
|
|
|
|
if self.proc.exitcode is None:
|
|
|
|
sig = signal.SIGKILL if self.sigkill else signal.SIGINT
|
|
|
|
self.signal(sig)
|
|
|
|
|
|
|
|
join_process(self.proc, 5)
|
|
|
|
|
|
|
|
# If process failed to die send SIGKILL or reboot
|
|
|
|
if self.proc.exitcode is None and retry:
|
|
|
|
if self.unkillable:
|
|
|
|
cloudlog.critical(f"unkillable process {self.name} failed to exit! rebooting in 15 if it doesn't die")
|
|
|
|
join_process(self.proc, 15)
|
|
|
|
|
|
|
|
if self.proc.exitcode is None:
|
|
|
|
cloudlog.critical(f"unkillable process {self.name} failed to die!")
|
|
|
|
os.system("date >> /data/unkillable_reboot")
|
|
|
|
os.sync()
|
|
|
|
HARDWARE.reboot()
|
|
|
|
raise RuntimeError
|
|
|
|
else:
|
|
|
|
cloudlog.info(f"killing {self.name} with SIGKILL")
|
|
|
|
self.signal(signal.SIGKILL)
|
|
|
|
self.proc.join()
|
|
|
|
|
|
|
|
ret = self.proc.exitcode
|
|
|
|
cloudlog.info(f"{self.name} is dead with {ret}")
|
|
|
|
|
|
|
|
if self.proc.exitcode is not None:
|
|
|
|
self.proc = None
|
|
|
|
|
|
|
|
return ret
|
|
|
|
|
|
|
|
def signal(self, sig):
|
2021-03-08 22:42:09 +08:00
|
|
|
if self.proc is None:
|
|
|
|
return
|
|
|
|
|
|
|
|
# Don't signal if already exited
|
2021-03-05 18:03:23 +08:00
|
|
|
if self.proc.exitcode is not None and self.proc.pid is not None:
|
|
|
|
return
|
|
|
|
|
|
|
|
cloudlog.info(f"sending signal {sig} to {self.name}")
|
|
|
|
os.kill(self.proc.pid, sig)
|
|
|
|
|
|
|
|
def get_process_state_msg(self):
|
|
|
|
state = log.ManagerState.ProcessState.new_message()
|
|
|
|
state.name = self.name
|
|
|
|
if self.proc:
|
|
|
|
state.running = self.proc.is_alive()
|
|
|
|
state.pid = self.proc.pid or 0
|
|
|
|
state.exitCode = self.proc.exitcode or 0
|
|
|
|
return state
|
|
|
|
|
|
|
|
|
|
|
|
class NativeProcess(ManagerProcess):
|
2021-03-09 11:17:46 +08:00
|
|
|
def __init__(self, name, cwd, cmdline, enabled=True, persistent=False, driverview=False, unkillable=False, sigkill=False, watchdog_max_dt=None):
|
2021-03-05 18:03:23 +08:00
|
|
|
self.name = name
|
|
|
|
self.cwd = cwd
|
|
|
|
self.cmdline = cmdline
|
2021-03-08 19:18:58 +08:00
|
|
|
self.enabled = enabled
|
2021-03-05 18:03:23 +08:00
|
|
|
self.persistent = persistent
|
|
|
|
self.driverview = driverview
|
|
|
|
self.unkillable = unkillable
|
|
|
|
self.sigkill = sigkill
|
2021-03-09 11:17:46 +08:00
|
|
|
self.watchdog_max_dt = watchdog_max_dt
|
2021-03-05 18:03:23 +08:00
|
|
|
|
|
|
|
def prepare(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def start(self):
|
|
|
|
if self.proc is not None:
|
|
|
|
return
|
|
|
|
|
|
|
|
cwd = os.path.join(BASEDIR, self.cwd)
|
|
|
|
cloudlog.info("starting process %s" % self.name)
|
|
|
|
self.proc = Process(name=self.name, target=nativelauncher, args=(self.cmdline, cwd))
|
|
|
|
self.proc.start()
|
2021-03-09 11:17:46 +08:00
|
|
|
self.watchdog_seen = False
|
2021-03-05 18:03:23 +08:00
|
|
|
|
|
|
|
|
|
|
|
class PythonProcess(ManagerProcess):
|
2021-03-09 11:17:46 +08:00
|
|
|
def __init__(self, name, module, enabled=True, persistent=False, driverview=False, unkillable=False, sigkill=False, watchdog_max_dt=None):
|
2021-03-05 18:03:23 +08:00
|
|
|
self.name = name
|
|
|
|
self.module = module
|
2021-03-08 19:18:58 +08:00
|
|
|
self.enabled = enabled
|
2021-03-05 18:03:23 +08:00
|
|
|
self.persistent = persistent
|
|
|
|
self.driverview = driverview
|
|
|
|
self.unkillable = unkillable
|
|
|
|
self.sigkill = sigkill
|
2021-03-09 11:17:46 +08:00
|
|
|
self.watchdog_max_dt = watchdog_max_dt
|
2021-03-05 18:03:23 +08:00
|
|
|
|
|
|
|
def prepare(self):
|
2021-03-08 19:18:58 +08:00
|
|
|
if self.enabled:
|
|
|
|
cloudlog.info("preimporting %s" % self.module)
|
|
|
|
importlib.import_module(self.module)
|
2021-03-05 18:03:23 +08:00
|
|
|
|
|
|
|
def start(self):
|
|
|
|
if self.proc is not None:
|
|
|
|
return
|
|
|
|
|
|
|
|
cloudlog.info("starting python %s" % self.module)
|
|
|
|
self.proc = Process(name=self.name, target=launcher, args=(self.module,))
|
|
|
|
self.proc.start()
|
2021-03-09 11:17:46 +08:00
|
|
|
self.watchdog_seen = False
|
2021-03-05 18:03:23 +08:00
|
|
|
|
|
|
|
|
|
|
|
class DaemonProcess(ManagerProcess):
|
|
|
|
"""Python process that has to stay running accross manager restart.
|
|
|
|
This is used for athena so you don't lose SSH access when restarting manager."""
|
2021-03-08 19:18:58 +08:00
|
|
|
def __init__(self, name, module, param_name, enabled=True):
|
2021-03-05 18:03:23 +08:00
|
|
|
self.name = name
|
|
|
|
self.module = module
|
|
|
|
self.param_name = param_name
|
2021-03-08 19:18:58 +08:00
|
|
|
self.enabled = enabled
|
2021-03-05 18:03:23 +08:00
|
|
|
self.persistent = True
|
|
|
|
|
|
|
|
def prepare(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
def start(self):
|
|
|
|
params = Params()
|
|
|
|
pid = params.get(self.param_name, encoding='utf-8')
|
|
|
|
|
|
|
|
if pid is not None:
|
|
|
|
try:
|
|
|
|
os.kill(int(pid), 0)
|
|
|
|
with open(f'/proc/{pid}/cmdline') as f:
|
|
|
|
if self.module in f.read():
|
|
|
|
# daemon is running
|
|
|
|
return
|
|
|
|
except (OSError, FileNotFoundError):
|
|
|
|
# process is dead
|
|
|
|
pass
|
|
|
|
|
|
|
|
cloudlog.info("starting daemon %s" % self.name)
|
|
|
|
proc = subprocess.Popen(['python', '-m', self.module], # pylint: disable=subprocess-popen-preexec-fn
|
|
|
|
stdin=open('/dev/null', 'r'),
|
|
|
|
stdout=open('/dev/null', 'w'),
|
|
|
|
stderr=open('/dev/null', 'w'),
|
|
|
|
preexec_fn=os.setpgrp)
|
|
|
|
|
|
|
|
params.put(self.param_name, str(proc.pid))
|
|
|
|
|
|
|
|
def stop(self, retry=True):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
def ensure_running(procs, started, driverview=False, not_run=None):
|
|
|
|
if not_run is None:
|
|
|
|
not_run = []
|
|
|
|
|
|
|
|
# TODO: can we do this in parallel?
|
|
|
|
for p in procs:
|
|
|
|
if p.name in not_run:
|
|
|
|
p.stop()
|
2021-03-08 19:18:58 +08:00
|
|
|
elif not p.enabled:
|
|
|
|
p.stop()
|
2021-03-05 18:03:23 +08:00
|
|
|
elif p.persistent:
|
|
|
|
p.start()
|
|
|
|
elif p.driverview and driverview:
|
|
|
|
p.start()
|
|
|
|
elif started:
|
|
|
|
p.start()
|
|
|
|
else:
|
|
|
|
p.stop()
|
2021-03-09 11:17:46 +08:00
|
|
|
|
|
|
|
p.check_watchdog(started)
|
|
|
|
|