clearpilot: initial commit of full source
This commit is contained in:
0
selfdrive/manager/__init__.py
Executable file
0
selfdrive/manager/__init__.py
Executable file
90
selfdrive/manager/build.py
Executable file
90
selfdrive/manager/build.py
Executable file
@@ -0,0 +1,90 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
# NOTE: Do NOT import anything here that needs be built (e.g. params)
|
||||
from openpilot.common.basedir import BASEDIR
|
||||
from openpilot.common.spinner import Spinner
|
||||
from openpilot.common.text_window import TextWindow
|
||||
from openpilot.system.hardware import AGNOS
|
||||
from openpilot.common.swaglog import cloudlog, add_file_handler
|
||||
from openpilot.system.version import is_dirty
|
||||
|
||||
MAX_CACHE_SIZE = 4e9 if "CI" in os.environ else 2e9
|
||||
CACHE_DIR = Path("/data/scons_cache" if AGNOS else "/tmp/scons_cache")
|
||||
|
||||
TOTAL_SCONS_NODES = 2560
|
||||
MAX_BUILD_PROGRESS = 100
|
||||
|
||||
def build(spinner: Spinner, dirty: bool = False, minimal: bool = False) -> None:
|
||||
env = os.environ.copy()
|
||||
env['SCONS_PROGRESS'] = "1"
|
||||
nproc = os.cpu_count()
|
||||
if nproc is None:
|
||||
nproc = 2
|
||||
|
||||
extra_args = ["--minimal"] if minimal else []
|
||||
|
||||
# building with all cores can result in using too
|
||||
# much memory, so retry with less parallelism
|
||||
compile_output: list[bytes] = []
|
||||
for n in (nproc, nproc/2, 1):
|
||||
compile_output.clear()
|
||||
scons: subprocess.Popen = subprocess.Popen(["scons", f"-j{int(n)}", "--cache-populate", *extra_args], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
|
||||
assert scons.stderr is not None
|
||||
|
||||
# Read progress from stderr and update spinner
|
||||
while scons.poll() is None:
|
||||
try:
|
||||
line = scons.stderr.readline()
|
||||
if line is None:
|
||||
continue
|
||||
line = line.rstrip()
|
||||
|
||||
prefix = b'progress: '
|
||||
if line.startswith(prefix):
|
||||
i = int(line[len(prefix):])
|
||||
spinner.update_progress(MAX_BUILD_PROGRESS * min(1., i / TOTAL_SCONS_NODES), 100.)
|
||||
elif len(line):
|
||||
compile_output.append(line)
|
||||
print(line.decode('utf8', 'replace'))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if scons.returncode == 0:
|
||||
Path('/data/openpilot/prebuilt').touch()
|
||||
break
|
||||
|
||||
if scons.returncode != 0:
|
||||
# Read remaining output
|
||||
if scons.stderr is not None:
|
||||
compile_output += scons.stderr.read().split(b'\n')
|
||||
|
||||
# Build failed log errors
|
||||
error_s = b"\n".join(compile_output).decode('utf8', 'replace')
|
||||
add_file_handler(cloudlog)
|
||||
cloudlog.error("scons build failed\n" + error_s)
|
||||
|
||||
# Show TextWindow
|
||||
spinner.close()
|
||||
if not os.getenv("CI"):
|
||||
with TextWindow("openpilot failed to build\n \n" + error_s) as t:
|
||||
t.wait_for_exit()
|
||||
exit(1)
|
||||
|
||||
# enforce max cache size
|
||||
cache_files = [f for f in CACHE_DIR.rglob('*') if f.is_file()]
|
||||
cache_files.sort(key=lambda f: f.stat().st_mtime)
|
||||
cache_size = sum(f.stat().st_size for f in cache_files)
|
||||
for f in cache_files:
|
||||
if cache_size < MAX_CACHE_SIZE:
|
||||
break
|
||||
cache_size -= f.stat().st_size
|
||||
f.unlink()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
spinner = Spinner()
|
||||
spinner.update_progress(0, 100)
|
||||
build(spinner, is_dirty(), minimal = AGNOS)
|
||||
67
selfdrive/manager/helpers.py
Executable file
67
selfdrive/manager/helpers.py
Executable file
@@ -0,0 +1,67 @@
|
||||
import errno
|
||||
import fcntl
|
||||
import os
|
||||
import sys
|
||||
import pathlib
|
||||
import shutil
|
||||
import signal
|
||||
import subprocess
|
||||
import tempfile
|
||||
import threading
|
||||
|
||||
from openpilot.common.basedir import BASEDIR
|
||||
from openpilot.common.params import Params
|
||||
|
||||
def unblock_stdout() -> None:
|
||||
# get a non-blocking stdout
|
||||
child_pid, child_pty = os.forkpty()
|
||||
if child_pid != 0: # parent
|
||||
|
||||
# child is in its own process group, manually pass kill signals
|
||||
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
|
||||
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
|
||||
|
||||
fcntl.fcntl(sys.stdout, fcntl.F_SETFL, fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
|
||||
|
||||
while True:
|
||||
try:
|
||||
dat = os.read(child_pty, 4096)
|
||||
except OSError as e:
|
||||
if e.errno == errno.EIO:
|
||||
break
|
||||
continue
|
||||
|
||||
if not dat:
|
||||
break
|
||||
|
||||
try:
|
||||
sys.stdout.write(dat.decode('utf8'))
|
||||
except (OSError, UnicodeDecodeError):
|
||||
pass
|
||||
|
||||
# os.wait() returns a tuple with the pid and a 16 bit value
|
||||
# whose low byte is the signal number and whose high byte is the exit status
|
||||
exit_status = os.wait()[1] >> 8
|
||||
os._exit(exit_status)
|
||||
|
||||
|
||||
def write_onroad_params(started, params):
|
||||
params.put_bool("IsOnroad", started)
|
||||
params.put_bool("IsOffroad", not started)
|
||||
|
||||
|
||||
def save_bootlog():
|
||||
# copy current params
|
||||
tmp = tempfile.mkdtemp()
|
||||
params_dirname = pathlib.Path(Params().get_param_path()).name
|
||||
params_dir = os.path.join(tmp, params_dirname)
|
||||
shutil.copytree(Params().get_param_path(), params_dir, dirs_exist_ok=True)
|
||||
|
||||
def fn(tmpdir):
|
||||
env = os.environ.copy()
|
||||
env['PARAMS_COPY_PATH'] = tmpdir
|
||||
subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "system/loggerd"), env=env)
|
||||
shutil.rmtree(tmpdir)
|
||||
t = threading.Thread(target=fn, args=(tmp, ))
|
||||
t.daemon = True
|
||||
t.start()
|
||||
497
selfdrive/manager/manager.py
Executable file
497
selfdrive/manager/manager.py
Executable file
@@ -0,0 +1,497 @@
|
||||
#!/usr/bin/env python3
|
||||
import datetime
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from cereal import log
|
||||
import cereal.messaging as messaging
|
||||
import openpilot.selfdrive.sentry as sentry
|
||||
from openpilot.common.params import Params, ParamKeyType
|
||||
from openpilot.common.text_window import TextWindow
|
||||
from openpilot.common.time import system_time_valid
|
||||
from openpilot.system.hardware import HARDWARE, PC
|
||||
from openpilot.selfdrive.manager.helpers import unblock_stdout, write_onroad_params, save_bootlog
|
||||
from openpilot.selfdrive.manager.process import ensure_running
|
||||
from openpilot.selfdrive.manager.process_config import managed_processes
|
||||
from openpilot.selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID
|
||||
from openpilot.common.swaglog import cloudlog, add_file_handler
|
||||
from openpilot.system.version import is_dirty, get_commit, get_version, get_origin, get_short_branch, \
|
||||
get_normalized_origin, terms_version, training_version, \
|
||||
is_tested_branch, is_release_branch, get_commit_date
|
||||
|
||||
from openpilot.selfdrive.frogpilot.controls.lib.frogpilot_functions import FrogPilotFunctions
|
||||
from openpilot.selfdrive.frogpilot.controls.lib.model_manager import DEFAULT_MODEL, DEFAULT_MODEL_NAME, delete_deprecated_models
|
||||
|
||||
|
||||
def frogpilot_boot_functions(frogpilot_functions):
|
||||
try:
|
||||
delete_deprecated_models()
|
||||
|
||||
while not system_time_valid():
|
||||
print("Waiting for system time to become valid...")
|
||||
time.sleep(1)
|
||||
|
||||
try:
|
||||
frogpilot_functions.backup_frogpilot()
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Failed to backup FrogPilot. Error: {e}")
|
||||
return
|
||||
|
||||
try:
|
||||
frogpilot_functions.backup_toggles()
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Failed to backup toggles. Error: {e}")
|
||||
return
|
||||
|
||||
except Exception as e:
|
||||
print(f"An unexpected error occurred: {e}")
|
||||
|
||||
def manager_init(frogpilot_functions) -> None:
|
||||
frogpilot_boot = threading.Thread(target=frogpilot_boot_functions, args=(frogpilot_functions,))
|
||||
frogpilot_boot.start()
|
||||
|
||||
save_bootlog()
|
||||
|
||||
params = Params()
|
||||
params_storage = Params("/persist/params")
|
||||
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
|
||||
params.clear_all(ParamKeyType.CLEAR_ON_ONROAD_TRANSITION)
|
||||
params.clear_all(ParamKeyType.CLEAR_ON_OFFROAD_TRANSITION)
|
||||
if is_release_branch():
|
||||
params.clear_all(ParamKeyType.DEVELOPMENT_ONLY)
|
||||
|
||||
default_params: list[tuple[str, str | bytes]] = [
|
||||
("CarParamsPersistent", ""),
|
||||
("CompletedTrainingVersion", "0"),
|
||||
("DisengageOnAccelerator", "0"),
|
||||
("ExperimentalLongitudinalEnabled", "1"),
|
||||
("GsmMetered", "1"),
|
||||
("HasAcceptedTerms", "0"),
|
||||
("IsLdwEnabled", "0"),
|
||||
("IsMetric", "0"),
|
||||
("LanguageSetting", "main_en"),
|
||||
("NavSettingLeftSide", "0"),
|
||||
("NavSettingTime24h", "0"),
|
||||
("OpenpilotEnabledToggle", "1"),
|
||||
("RecordFront", "0"),
|
||||
("LongitudinalPersonality", str(log.LongitudinalPersonality.standard)),
|
||||
|
||||
# Default FrogPilot parameters
|
||||
("AccelerationPath", "1"),
|
||||
("AccelerationProfile", "2"),
|
||||
("AdjacentPath", "0"),
|
||||
("AdjacentPathMetrics", "0"),
|
||||
("AggressiveAcceleration", "1"),
|
||||
("AggressiveFollow", "1.25"),
|
||||
("AggressiveJerk", "0.5"),
|
||||
("AlertVolumeControl", "0"),
|
||||
("AlwaysOnLateral", "1"),
|
||||
("AlwaysOnLateralMain", "0"),
|
||||
("AMapKey1", ""),
|
||||
("AMapKey2", ""),
|
||||
("AutomaticUpdates", "0"),
|
||||
("BlindSpotPath", "1"),
|
||||
("CameraView", "2"),
|
||||
("CarMake", ""),
|
||||
("CarModel", ""),
|
||||
("CECurves", "1"),
|
||||
("CENavigation", "1"),
|
||||
("CENavigationIntersections", "1"),
|
||||
("CENavigationLead", "1"),
|
||||
("CENavigationTurns", "1"),
|
||||
("CESignal", "1"),
|
||||
("CESlowerLead", "1"),
|
||||
("CESpeed", "0"),
|
||||
("CESpeedLead", "0"),
|
||||
("CEStopLights", "1"),
|
||||
("CEStopLightsLead", "0"),
|
||||
("Compass", "1"),
|
||||
("ConditionalExperimental", "1"),
|
||||
("CrosstrekTorque", "1"),
|
||||
("CurveSensitivity", "100"),
|
||||
("CustomAlerts", "1"),
|
||||
("CustomColors", "1"),
|
||||
("CustomCruise", "1"),
|
||||
("CustomCruiseLong", "5"),
|
||||
("CustomIcons", "1"),
|
||||
("CustomPaths", "1"),
|
||||
("CustomPersonalities", "1"),
|
||||
("CustomSignals", "1"),
|
||||
("CustomSounds", "1"),
|
||||
("CustomTheme", "1"),
|
||||
("CustomUI", "1"),
|
||||
("CydiaTune", "0"),
|
||||
("DecelerationProfile", "1"),
|
||||
("DeveloperUI", "0"),
|
||||
("DeviceManagement", "1"),
|
||||
("DeviceShutdown", "9"),
|
||||
("DisableMTSCSmoothing", "0"),
|
||||
("DisableOnroadUploads", "0"),
|
||||
("DisableOpenpilotLongitudinal", "0"),
|
||||
("DisableVTSCSmoothing", "0"),
|
||||
("DisengageVolume", "100"),
|
||||
("DragonPilotTune", "0"),
|
||||
("DriverCamera", "0"),
|
||||
("DynamicPathWidth", "0"),
|
||||
("EngageVolume", "100"),
|
||||
("EVTable", "1"),
|
||||
("ExperimentalModeActivation", "1"),
|
||||
("ExperimentalModeViaDistance", "1"),
|
||||
("ExperimentalModeViaLKAS", "1"),
|
||||
("ExperimentalModeViaTap", "0"),
|
||||
("Fahrenheit", "0"),
|
||||
("ForceAutoTune", "1"),
|
||||
("ForceFingerprint", "0"),
|
||||
("ForceMPHDashboard", "0"),
|
||||
("FPSCounter", "0"),
|
||||
("FrogPilotDrives", "0"),
|
||||
("FrogPilotKilometers", "0"),
|
||||
("FrogPilotMinutes", "0"),
|
||||
("FrogsGoMooTune", "1"),
|
||||
("FullMap", "0"),
|
||||
("GasRegenCmd", "0"),
|
||||
("GMapKey", ""),
|
||||
("GoatScream", "1"),
|
||||
("GreenLightAlert", "0"),
|
||||
("HideAlerts", "0"),
|
||||
("HideAOLStatusBar", "0"),
|
||||
("HideCEMStatusBar", "0"),
|
||||
("HideLeadMarker", "0"),
|
||||
("HideMapIcon", "0"),
|
||||
("HideMaxSpeed", "0"),
|
||||
("HideSpeed", "0"),
|
||||
("HideSpeedUI", "0"),
|
||||
("HideUIElements", "0"),
|
||||
("HigherBitrate", "0"),
|
||||
("HolidayThemes", "1"),
|
||||
("IncreaseThermalLimits", "0"),
|
||||
("LaneChangeTime", "0"),
|
||||
("LaneDetectionWidth", "60"),
|
||||
("LaneLinesWidth", "4"),
|
||||
("LateralTune", "1"),
|
||||
("LeadDepartingAlert", "0"),
|
||||
("LeadDetectionThreshold", "35"),
|
||||
("LeadInfo", "0"),
|
||||
("LockDoors", "1"),
|
||||
("LongitudinalTune", "1"),
|
||||
("LongPitch", "1"),
|
||||
("LoudBlindspotAlert", "0"),
|
||||
("LowVoltageShutdown", "11.8"),
|
||||
("MapsSelected", ""),
|
||||
("MapboxPublicKey", ""),
|
||||
("MapboxSecretKey", ""),
|
||||
("MapStyle", "0"),
|
||||
("MTSCAggressiveness", "100"),
|
||||
("MTSCCurvatureCheck", "0"),
|
||||
("Model", DEFAULT_MODEL),
|
||||
("ModelName", DEFAULT_MODEL_NAME),
|
||||
("ModelSelector", "1"),
|
||||
("ModelUI", "1"),
|
||||
("MTSCEnabled", "1"),
|
||||
("NNFF", "1"),
|
||||
("NNFFLite", "1"),
|
||||
("NoLogging", "0"),
|
||||
("NoUploads", "0"),
|
||||
("NudgelessLaneChange", "1"),
|
||||
("NumericalTemp", "0"),
|
||||
("OfflineMode", "1"),
|
||||
("Offset1", "5"),
|
||||
("Offset2", "5"),
|
||||
("Offset3", "5"),
|
||||
("Offset4", "10"),
|
||||
("OneLaneChange", "1"),
|
||||
("OnroadDistanceButton", "0"),
|
||||
("PathEdgeWidth", "20"),
|
||||
("PathWidth", "61"),
|
||||
("PauseAOLOnBrake", "0"),
|
||||
("PauseLateralOnSignal", "0"),
|
||||
("PedalsOnUI", "1"),
|
||||
("PreferredSchedule", "0"),
|
||||
("PromptVolume", "100"),
|
||||
("PromptDistractedVolume", "100"),
|
||||
("QOLControls", "1"),
|
||||
("QOLVisuals", "1"),
|
||||
("RandomEvents", "0"),
|
||||
("RefuseVolume", "100"),
|
||||
("RelaxedFollow", "1.75"),
|
||||
("RelaxedJerk", "1.0"),
|
||||
("ReverseCruise", "0"),
|
||||
("ReverseCruiseUI", "1"),
|
||||
("RoadEdgesWidth", "2"),
|
||||
("RoadNameUI", "1"),
|
||||
("RotatingWheel", "1"),
|
||||
("ScreenBrightness", "101"),
|
||||
("ScreenBrightnessOnroad", "101"),
|
||||
("ScreenManagement", "1"),
|
||||
("ScreenRecorder", "1"),
|
||||
("ScreenTimeout", "30"),
|
||||
("ScreenTimeoutOnroad", "30"),
|
||||
("SearchInput", "0"),
|
||||
("SetSpeedLimit", "0"),
|
||||
("SetSpeedOffset", "0"),
|
||||
("ShowCPU", "0"),
|
||||
("ShowGPU", "0"),
|
||||
("ShowIP", "0"),
|
||||
("ShowJerk", "1"),
|
||||
("ShowMemoryUsage", "0"),
|
||||
("ShowSLCOffset", "1"),
|
||||
("ShowSLCOffsetUI", "1"),
|
||||
("ShowStorageLeft", "0"),
|
||||
("ShowStorageUsed", "0"),
|
||||
("ShowTuning", "1"),
|
||||
("Sidebar", "0"),
|
||||
("SLCConfirmation", "1"),
|
||||
("SLCConfirmationLower", "1"),
|
||||
("SLCConfirmationHigher", "1"),
|
||||
("SLCFallback", "2"),
|
||||
("SLCLookaheadHigher", "5"),
|
||||
("SLCLookaheadLower", "5"),
|
||||
("SLCOverride", "1"),
|
||||
("SLCPriority1", "Dashboard"),
|
||||
("SLCPriority2", "Offline Maps"),
|
||||
("SLCPriority3", "Navigation"),
|
||||
("SmoothBraking", "1"),
|
||||
("SmoothBrakingFarLead", "0"),
|
||||
("SmoothBrakingJerk", "0"),
|
||||
("SNGHack", "1"),
|
||||
("SpeedLimitChangedAlert", "1"),
|
||||
("SpeedLimitController", "1"),
|
||||
("StandardFollow", "1.45"),
|
||||
("StandardJerk", "1.0"),
|
||||
("StandbyMode", "0"),
|
||||
("SteerRatio", "0"),
|
||||
("StockTune", "0"),
|
||||
("StoppingDistance", "0"),
|
||||
("TacoTune", "1"),
|
||||
("ToyotaDoors", "0"),
|
||||
("TrafficFollow", "0.5"),
|
||||
("TrafficJerk", "1"),
|
||||
("TrafficMode", "0"),
|
||||
("TurnAggressiveness", "100"),
|
||||
("TurnDesires", "0"),
|
||||
("UnlimitedLength", "1"),
|
||||
("UnlockDoors", "1"),
|
||||
("UseSI", "1"),
|
||||
("UseVienna", "0"),
|
||||
("VisionTurnControl", "1"),
|
||||
("WarningSoftVolume", "100"),
|
||||
("WarningImmediateVolume", "100"),
|
||||
("WheelIcon", "3"),
|
||||
("WheelSpeed", "0")
|
||||
]
|
||||
if not PC:
|
||||
default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')))
|
||||
|
||||
if params.get_bool("RecordFrontLock"):
|
||||
params.put_bool("RecordFront", True)
|
||||
|
||||
# set unset params
|
||||
for k, v in default_params:
|
||||
if params.get(k) is None:
|
||||
if params_storage.get(k) is None:
|
||||
params.put(k, v)
|
||||
else:
|
||||
params.put(k, params_storage.get(k))
|
||||
else:
|
||||
params_storage.put(k, params.get(k))
|
||||
|
||||
# Create folders needed for msgq
|
||||
try:
|
||||
os.mkdir("/dev/shm")
|
||||
except FileExistsError:
|
||||
pass
|
||||
except PermissionError:
|
||||
print("WARNING: failed to make /dev/shm")
|
||||
|
||||
# set version params
|
||||
params.put("Version", get_version())
|
||||
params.put("TermsVersion", terms_version)
|
||||
params.put("TrainingVersion", training_version)
|
||||
params.put("GitCommit", get_commit())
|
||||
params.put("GitCommitDate", get_commit_date())
|
||||
params.put("GitBranch", get_short_branch())
|
||||
params.put("GitRemote", get_origin())
|
||||
params.put_bool("IsTestedBranch", is_tested_branch())
|
||||
params.put_bool("IsReleaseBranch", is_release_branch())
|
||||
|
||||
# set dongle id
|
||||
reg_res = register(show_spinner=True)
|
||||
if reg_res:
|
||||
dongle_id = reg_res
|
||||
else:
|
||||
serial = params.get("HardwareSerial")
|
||||
raise Exception(f"Registration failed for device {serial}")
|
||||
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
|
||||
os.environ['GIT_ORIGIN'] = get_normalized_origin() # Needed for swaglog
|
||||
os.environ['GIT_BRANCH'] = get_short_branch() # Needed for swaglog
|
||||
os.environ['GIT_COMMIT'] = get_commit() # Needed for swaglog
|
||||
|
||||
if not is_dirty():
|
||||
os.environ['CLEAN'] = '1'
|
||||
|
||||
# init logging
|
||||
sentry.init(sentry.SentryProject.SELFDRIVE)
|
||||
cloudlog.bind_global(dongle_id=dongle_id,
|
||||
version=get_version(),
|
||||
origin=get_normalized_origin(),
|
||||
branch=get_short_branch(),
|
||||
commit=get_commit(),
|
||||
dirty=is_dirty(),
|
||||
device=HARDWARE.get_device_type())
|
||||
|
||||
# preimport all processes
|
||||
for p in managed_processes.values():
|
||||
p.prepare()
|
||||
|
||||
|
||||
def manager_cleanup() -> None:
|
||||
# send signals to kill all procs
|
||||
for p in managed_processes.values():
|
||||
p.stop(block=False)
|
||||
|
||||
# ensure all are killed
|
||||
for p in managed_processes.values():
|
||||
p.stop(block=True)
|
||||
|
||||
cloudlog.info("everything is dead")
|
||||
|
||||
|
||||
def manager_thread(frogpilot_functions) -> None:
|
||||
cloudlog.bind(daemon="manager")
|
||||
cloudlog.info("manager start")
|
||||
cloudlog.info({"environ": os.environ})
|
||||
|
||||
params = Params()
|
||||
params_memory = Params("/dev/shm/params")
|
||||
|
||||
ignore: list[str] = []
|
||||
if params.get("DongleId", encoding='utf8') in (None, UNREGISTERED_DONGLE_ID):
|
||||
ignore += ["manage_athenad", "uploader"]
|
||||
if os.getenv("NOBOARD") is not None:
|
||||
ignore.append("pandad")
|
||||
ignore += [x for x in os.getenv("BLOCK", "").split(",") if len(x) > 0]
|
||||
|
||||
sm = messaging.SubMaster(['deviceState', 'carParams'], poll='deviceState')
|
||||
pm = messaging.PubMaster(['managerState'])
|
||||
|
||||
write_onroad_params(False, params)
|
||||
ensure_running(managed_processes.values(), False, params=params, CP=sm['carParams'], not_run=ignore)
|
||||
|
||||
started_prev = False
|
||||
|
||||
while True:
|
||||
sm.update(1000)
|
||||
|
||||
openpilot_crashed = os.path.isfile(os.path.join(sentry.CRASHES_DIR, 'error.txt'))
|
||||
if openpilot_crashed:
|
||||
frogpilot_functions.delete_logs()
|
||||
|
||||
started = sm['deviceState'].started
|
||||
|
||||
if started and not started_prev:
|
||||
params.clear_all(ParamKeyType.CLEAR_ON_ONROAD_TRANSITION)
|
||||
|
||||
if openpilot_crashed:
|
||||
os.remove(os.path.join(sentry.CRASHES_DIR, 'error.txt'))
|
||||
|
||||
elif not started and started_prev:
|
||||
params.clear_all(ParamKeyType.CLEAR_ON_OFFROAD_TRANSITION)
|
||||
params_memory.clear_all(ParamKeyType.CLEAR_ON_OFFROAD_TRANSITION)
|
||||
|
||||
# update onroad params, which drives boardd's safety setter thread
|
||||
if started != started_prev:
|
||||
write_onroad_params(started, params)
|
||||
|
||||
started_prev = started
|
||||
|
||||
ensure_running(managed_processes.values(), started, params=params, CP=sm['carParams'], not_run=ignore)
|
||||
|
||||
running = ' '.join("{}{}\u001b[0m".format("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
|
||||
for p in managed_processes.values() if p.proc)
|
||||
print(running)
|
||||
cloudlog.debug(running)
|
||||
|
||||
# send managerState
|
||||
msg = messaging.new_message('managerState', valid=True)
|
||||
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
|
||||
pm.send('managerState', msg)
|
||||
|
||||
# Exit main loop when uninstall/shutdown/reboot is needed
|
||||
shutdown = False
|
||||
for param in ("DoUninstall", "DoShutdown", "DoReboot"):
|
||||
if params.get_bool(param):
|
||||
shutdown = True
|
||||
params.put("LastManagerExitReason", f"{param} {datetime.datetime.now()}")
|
||||
cloudlog.warning(f"Shutting down manager - {param} set")
|
||||
|
||||
if shutdown:
|
||||
break
|
||||
|
||||
|
||||
def main() -> None:
|
||||
frogpilot_functions = FrogPilotFunctions()
|
||||
|
||||
try:
|
||||
frogpilot_functions.setup_frogpilot()
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Failed to setup FrogPilot. Error: {e}")
|
||||
return
|
||||
|
||||
manager_init(frogpilot_functions)
|
||||
if os.getenv("PREPAREONLY") is not None:
|
||||
return
|
||||
|
||||
# SystemExit on sigterm
|
||||
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
|
||||
|
||||
try:
|
||||
manager_thread(frogpilot_functions)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
sentry.capture_exception()
|
||||
finally:
|
||||
manager_cleanup()
|
||||
|
||||
params = Params()
|
||||
if params.get_bool("DoUninstall"):
|
||||
cloudlog.warning("uninstalling")
|
||||
frogpilot_functions.uninstall_frogpilot()
|
||||
elif params.get_bool("DoReboot"):
|
||||
cloudlog.warning("reboot")
|
||||
HARDWARE.reboot()
|
||||
elif params.get_bool("DoShutdown"):
|
||||
cloudlog.warning("shutdown")
|
||||
HARDWARE.shutdown()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unblock_stdout()
|
||||
|
||||
try:
|
||||
main()
|
||||
except KeyboardInterrupt:
|
||||
print("got CTRL-C, exiting")
|
||||
except Exception:
|
||||
add_file_handler(cloudlog)
|
||||
cloudlog.exception("Manager failed to start")
|
||||
|
||||
try:
|
||||
managed_processes['ui'].stop()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Show last 3 lines of traceback
|
||||
error = traceback.format_exc(-3)
|
||||
error = "Manager failed to start\n\n" + error
|
||||
with TextWindow(error) as t:
|
||||
t.wait_for_exit()
|
||||
|
||||
raise
|
||||
|
||||
# manual exit because we are forked
|
||||
sys.exit(0)
|
||||
519
selfdrive/manager/manager_wip.txt
Normal file
519
selfdrive/manager/manager_wip.txt
Normal file
@@ -0,0 +1,519 @@
|
||||
#!/usr/bin/env python3
|
||||
import datetime
|
||||
import os
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from cereal import log
|
||||
import cereal.messaging as messaging
|
||||
import openpilot.selfdrive.sentry as sentry
|
||||
from openpilot.common.params import Params, ParamKeyType
|
||||
from openpilot.common.text_window import TextWindow
|
||||
from openpilot.common.time import system_time_valid
|
||||
from openpilot.system.hardware import HARDWARE, PC
|
||||
from openpilot.selfdrive.manager.helpers import unblock_stdout, write_onroad_params, save_bootlog
|
||||
from openpilot.selfdrive.manager.process import ensure_running
|
||||
from openpilot.selfdrive.manager.process_config import managed_processes
|
||||
from openpilot.selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID
|
||||
from openpilot.common.swaglog import cloudlog, add_file_handler
|
||||
from openpilot.system.version import is_dirty, get_commit, get_version, get_origin, get_short_branch, \
|
||||
get_normalized_origin, terms_version, training_version, \
|
||||
is_tested_branch, is_release_branch, get_commit_date
|
||||
|
||||
from openpilot.selfdrive.frogpilot.controls.lib.frogpilot_functions import FrogPilotFunctions
|
||||
from openpilot.selfdrive.frogpilot.controls.lib.model_manager import DEFAULT_MODEL, DEFAULT_MODEL_NAME, delete_deprecated_models
|
||||
|
||||
|
||||
def frogpilot_boot_functions(frogpilot_functions):
|
||||
try:
|
||||
delete_deprecated_models()
|
||||
|
||||
while not system_time_valid():
|
||||
print("Waiting for system time to become valid...")
|
||||
time.sleep(1)
|
||||
|
||||
try:
|
||||
frogpilot_functions.backup_frogpilot()
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Failed to backup FrogPilot. Error: {e}")
|
||||
return
|
||||
|
||||
try:
|
||||
frogpilot_functions.backup_toggles()
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Failed to backup toggles. Error: {e}")
|
||||
return
|
||||
|
||||
except Exception as e:
|
||||
print(f"An unexpected error occurred: {e}")
|
||||
|
||||
def manager_init(frogpilot_functions) -> None:
|
||||
timestamp = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
|
||||
log_dir = f"/data/log2/{timestamp}"
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
|
||||
frogpilot_boot = threading.Thread(target=frogpilot_boot_functions, args=(frogpilot_functions,))
|
||||
frogpilot_boot.start()
|
||||
|
||||
save_bootlog()
|
||||
|
||||
params = Params()
|
||||
params_storage = Params("/persist/params")
|
||||
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
|
||||
params.clear_all(ParamKeyType.CLEAR_ON_ONROAD_TRANSITION)
|
||||
params.clear_all(ParamKeyType.CLEAR_ON_OFFROAD_TRANSITION)
|
||||
if is_release_branch():
|
||||
params.clear_all(ParamKeyType.DEVELOPMENT_ONLY)
|
||||
|
||||
default_params: list[tuple[str, str | bytes]] = [
|
||||
("CarParamsPersistent", ""),
|
||||
("CompletedTrainingVersion", "0"),
|
||||
("DisengageOnAccelerator", "0"),
|
||||
("ExperimentalLongitudinalEnabled", "1"),
|
||||
("GsmMetered", "1"),
|
||||
("HasAcceptedTerms", "0"),
|
||||
("IsLdwEnabled", "0"),
|
||||
("IsMetric", "0"),
|
||||
("LanguageSetting", "main_en"),
|
||||
("NavSettingLeftSide", "0"),
|
||||
("NavSettingTime24h", "0"),
|
||||
("OpenpilotEnabledToggle", "1"),
|
||||
("RecordFront", "0"),
|
||||
("LongitudinalPersonality", str(log.LongitudinalPersonality.standard)),
|
||||
|
||||
# Default FrogPilot parameters
|
||||
("AccelerationPath", "1"),
|
||||
("AccelerationProfile", "2"),
|
||||
("AdjacentPath", "0"),
|
||||
("AdjacentPathMetrics", "0"),
|
||||
("AggressiveAcceleration", "1"),
|
||||
("AggressiveFollow", "1.25"),
|
||||
("AggressiveJerk", "0.5"),
|
||||
("AlertVolumeControl", "0"),
|
||||
("AlwaysOnLateral", "1"),
|
||||
("AlwaysOnLateralMain", "0"),
|
||||
("AMapKey1", ""),
|
||||
("AMapKey2", ""),
|
||||
("AutomaticUpdates", "0"),
|
||||
("BlindSpotPath", "1"),
|
||||
("CameraView", "2"),
|
||||
("CarMake", ""),
|
||||
("CarModel", ""),
|
||||
("CECurves", "1"),
|
||||
("CENavigation", "1"),
|
||||
("CENavigationIntersections", "1"),
|
||||
("CENavigationLead", "1"),
|
||||
("CENavigationTurns", "1"),
|
||||
("CESignal", "1"),
|
||||
("CESlowerLead", "1"),
|
||||
("CESpeed", "0"),
|
||||
("CESpeedLead", "0"),
|
||||
("CEStopLights", "1"),
|
||||
("CEStopLightsLead", "0"),
|
||||
("Compass", "1"),
|
||||
("ConditionalExperimental", "1"),
|
||||
("CrosstrekTorque", "1"),
|
||||
("CurveSensitivity", "100"),
|
||||
("CustomAlerts", "1"),
|
||||
("CustomColors", "1"),
|
||||
("CustomCruise", "1"),
|
||||
("CustomCruiseLong", "5"),
|
||||
("CustomIcons", "1"),
|
||||
("CustomPaths", "1"),
|
||||
("CustomPersonalities", "1"),
|
||||
("CustomSignals", "1"),
|
||||
("CustomSounds", "1"),
|
||||
("CustomTheme", "1"),
|
||||
("CustomUI", "1"),
|
||||
("CydiaTune", "0"),
|
||||
("DecelerationProfile", "1"),
|
||||
("DeveloperUI", "0"),
|
||||
("DeviceManagement", "1"),
|
||||
("DeviceShutdown", "9"),
|
||||
("DisableMTSCSmoothing", "0"),
|
||||
("DisableOnroadUploads", "0"),
|
||||
("DisableOpenpilotLongitudinal", "0"),
|
||||
("DisableVTSCSmoothing", "0"),
|
||||
("DisengageVolume", "100"),
|
||||
("DragonPilotTune", "0"),
|
||||
("DriverCamera", "0"),
|
||||
("DynamicPathWidth", "0"),
|
||||
("EngageVolume", "100"),
|
||||
("EVTable", "1"),
|
||||
("ExperimentalModeActivation", "1"),
|
||||
("ExperimentalModeViaDistance", "1"),
|
||||
("ExperimentalModeViaLKAS", "1"),
|
||||
("ExperimentalModeViaTap", "0"),
|
||||
("Fahrenheit", "0"),
|
||||
("ForceAutoTune", "1"),
|
||||
("ForceFingerprint", "0"),
|
||||
("ForceMPHDashboard", "0"),
|
||||
("FPSCounter", "0"),
|
||||
("FrogsGoMooTune", "1"),
|
||||
("FullMap", "0"),
|
||||
("GasRegenCmd", "0"),
|
||||
("GMapKey", ""),
|
||||
("GoatScream", "1"),
|
||||
("GreenLightAlert", "0"),
|
||||
("HideAlerts", "0"),
|
||||
("HideAOLStatusBar", "0"),
|
||||
("HideCEMStatusBar", "0"),
|
||||
("HideLeadMarker", "0"),
|
||||
("HideMapIcon", "0"),
|
||||
("HideMaxSpeed", "0"),
|
||||
("HideSpeed", "0"),
|
||||
("HideSpeedUI", "0"),
|
||||
("HideUIElements", "0"),
|
||||
("HigherBitrate", "0"),
|
||||
("HolidayThemes", "1"),
|
||||
("IncreaseThermalLimits", "0"),
|
||||
("LaneChangeTime", "0"),
|
||||
("LaneDetectionWidth", "60"),
|
||||
("LaneLinesWidth", "2"),
|
||||
("LateralTune", "1"),
|
||||
("LeadDepartingAlert", "0"),
|
||||
("LeadDetectionThreshold", "35"),
|
||||
("LeadInfo", "0"),
|
||||
("LockDoors", "1"),
|
||||
("LongitudinalTune", "1"),
|
||||
("LongPitch", "1"),
|
||||
("LoudBlindspotAlert", "0"),
|
||||
("LowVoltageShutdown", "11.8"),
|
||||
("kiV1", "0.60"),
|
||||
("kiV2", "0.45"),
|
||||
("kiV3", "0.30"),
|
||||
("kiV4", "0.15"),
|
||||
("kpV1", "1.50"),
|
||||
("kpV2", "1.00"),
|
||||
("kpV3", "0.75"),
|
||||
("kpV4", "0.50"),
|
||||
("MapsSelected", ""),
|
||||
("MapboxPublicKey", ""),
|
||||
("MapboxSecretKey", ""),
|
||||
("MapStyle", "0"),
|
||||
("MTSCAggressiveness", "100"),
|
||||
("MTSCCurvatureCheck", "0"),
|
||||
("Model", DEFAULT_MODEL),
|
||||
("ModelName", DEFAULT_MODEL_NAME),
|
||||
("ModelSelector", "1"),
|
||||
("ModelUI", "1"),
|
||||
("MTSCEnabled", "1"),
|
||||
("NNFF", "1"),
|
||||
("NNFFLite", "1"),
|
||||
("NoLogging", "0"),
|
||||
("NoUploads", "0"),
|
||||
("NudgelessLaneChange", "1"),
|
||||
("NumericalTemp", "0"),
|
||||
("OfflineMode", "1"),
|
||||
("Offset1", "5"),
|
||||
("Offset2", "5"),
|
||||
("Offset3", "5"),
|
||||
("Offset4", "10"),
|
||||
("OneLaneChange", "1"),
|
||||
("OnroadDistanceButton", "0"),
|
||||
("PathEdgeWidth", "20"),
|
||||
("PathWidth", "61"),
|
||||
("PauseAOLOnBrake", "0"),
|
||||
("PauseLateralOnSignal", "0"),
|
||||
("PedalsOnUI", "1"),
|
||||
("PreferredSchedule", "0"),
|
||||
("PromptVolume", "100"),
|
||||
("PromptDistractedVolume", "100"),
|
||||
("QOLControls", "1"),
|
||||
("QOLVisuals", "1"),
|
||||
("RandomEvents", "0"),
|
||||
("RefuseVolume", "100"),
|
||||
("RelaxedFollow", "1.75"),
|
||||
("RelaxedJerk", "1.0"),
|
||||
("ReverseCruise", "0"),
|
||||
("ReverseCruiseUI", "1"),
|
||||
("RoadEdgesWidth", "2"),
|
||||
("RoadNameUI", "1"),
|
||||
("RotatingWheel", "1"),
|
||||
("ScreenBrightness", "101"),
|
||||
("ScreenBrightnessOnroad", "101"),
|
||||
("ScreenManagement", "1"),
|
||||
("ScreenRecorder", "1"),
|
||||
("ScreenTimeout", "30"),
|
||||
("ScreenTimeoutOnroad", "30"),
|
||||
("SearchInput", "0"),
|
||||
("SetSpeedLimit", "0"),
|
||||
("SetSpeedOffset", "0"),
|
||||
("ShowCPU", "0"),
|
||||
("ShowGPU", "0"),
|
||||
("ShowIP", "0"),
|
||||
("ShowJerk", "1"),
|
||||
("ShowMemoryUsage", "0"),
|
||||
("ShowSLCOffset", "1"),
|
||||
("ShowSLCOffsetUI", "1"),
|
||||
("ShowStorageLeft", "0"),
|
||||
("ShowStorageUsed", "0"),
|
||||
("ShowTuning", "1"),
|
||||
("Sidebar", "0"),
|
||||
("SLCConfirmation", "1"),
|
||||
("SLCConfirmationLower", "1"),
|
||||
("SLCConfirmationHigher", "1"),
|
||||
("SLCFallback", "2"),
|
||||
("SLCLookaheadHigher", "5"),
|
||||
("SLCLookaheadLower", "5"),
|
||||
("SLCOverride", "1"),
|
||||
("SLCPriority1", "Dashboard"),
|
||||
("SLCPriority2", "Offline Maps"),
|
||||
("SLCPriority3", "Navigation"),
|
||||
("SmoothBraking", "1"),
|
||||
("SmoothBrakingFarLead", "0"),
|
||||
("SmoothBrakingJerk", "0"),
|
||||
("SNGHack", "1"),
|
||||
("SpeedLimitChangedAlert", "1"),
|
||||
("SpeedLimitController", "1"),
|
||||
("StandardFollow", "1.45"),
|
||||
("StandardJerk", "1.0"),
|
||||
("StandbyMode", "0"),
|
||||
("SteerRatio", "0"),
|
||||
("StockTune", "0"),
|
||||
("StoppingDistance", "0"),
|
||||
("TacoTune", "1"),
|
||||
("ToyotaDoors", "0"),
|
||||
("TrafficFollow", "0.5"),
|
||||
("TrafficJerk", "1"),
|
||||
("TrafficMode", "0"),
|
||||
("Tuning", "1"),
|
||||
("TurnAggressiveness", "100"),
|
||||
("TurnDesires", "0"),
|
||||
("UnlimitedLength", "1"),
|
||||
("UnlockDoors", "1"),
|
||||
("UseSI", "1"),
|
||||
("UseVienna", "0"),
|
||||
("VisionTurnControl", "1"),
|
||||
("WarningSoftVolume", "100"),
|
||||
("WarningImmediateVolume", "100"),
|
||||
("WheelIcon", "3"),
|
||||
("WheelSpeed", "0")
|
||||
]
|
||||
if not PC:
|
||||
default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')))
|
||||
|
||||
if params.get_bool("RecordFrontLock"):
|
||||
params.put_bool("RecordFront", True)
|
||||
|
||||
# set unset params
|
||||
for k, v in default_params:
|
||||
if params.get(k) is None:
|
||||
if params_storage.get(k) is None:
|
||||
params.put(k, v)
|
||||
else:
|
||||
params.put(k, params_storage.get(k))
|
||||
else:
|
||||
params_storage.put(k, params.get(k))
|
||||
|
||||
# Create folders needed for msgq
|
||||
try:
|
||||
os.mkdir("/dev/shm")
|
||||
except FileExistsError:
|
||||
pass
|
||||
except PermissionError:
|
||||
print("WARNING: failed to make /dev/shm")
|
||||
|
||||
# set version params
|
||||
params.put("Version", get_version())
|
||||
params.put("TermsVersion", terms_version)
|
||||
params.put("TrainingVersion", training_version)
|
||||
params.put("GitCommit", get_commit())
|
||||
params.put("GitCommitDate", get_commit_date())
|
||||
params.put("GitBranch", get_short_branch())
|
||||
params.put("GitRemote", get_origin())
|
||||
params.put_bool("IsTestedBranch", is_tested_branch())
|
||||
params.put_bool("IsReleaseBranch", is_release_branch())
|
||||
|
||||
# set dongle id
|
||||
reg_res = register(show_spinner=True)
|
||||
if reg_res:
|
||||
dongle_id = reg_res
|
||||
else:
|
||||
serial = params.get("HardwareSerial")
|
||||
raise Exception(f"Registration failed for device {serial}")
|
||||
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
|
||||
os.environ['GIT_ORIGIN'] = get_normalized_origin() # Needed for swaglog
|
||||
os.environ['GIT_BRANCH'] = get_short_branch() # Needed for swaglog
|
||||
os.environ['GIT_COMMIT'] = get_commit() # Needed for swaglog
|
||||
|
||||
if not is_dirty():
|
||||
os.environ['CLEAN'] = '1'
|
||||
|
||||
# init logging
|
||||
# sentry.init(sentry.SentryProject.SELFDRIVE)
|
||||
# cloudlog.bind_global(dongle_id=dongle_id,
|
||||
# version=get_version(),
|
||||
# origin=get_normalized_origin(),
|
||||
# branch=get_short_branch(),
|
||||
# commit=get_commit(),
|
||||
# dirty=is_dirty(),
|
||||
# device=HARDWARE.get_device_type())
|
||||
|
||||
# preimport all processes
|
||||
for p in managed_processes.values():
|
||||
p.prepare()
|
||||
|
||||
return log_dir
|
||||
|
||||
|
||||
def manager_cleanup() -> None:
|
||||
# send signals to kill all procs
|
||||
for p in managed_processes.values():
|
||||
p.stop(block=False)
|
||||
|
||||
# ensure all are killed
|
||||
for p in managed_processes.values():
|
||||
p.stop(block=True)
|
||||
|
||||
cloudlog.info("everything is dead")
|
||||
|
||||
last_running = ""
|
||||
|
||||
def manager_thread(frogpilot_functions, log_dir) -> None:
|
||||
global last_running
|
||||
|
||||
cloudlog.bind(daemon="manager")
|
||||
cloudlog.info("manager start")
|
||||
cloudlog.info({"environ": os.environ})
|
||||
|
||||
params = Params()
|
||||
params_memory = Params("/dev/shm/params")
|
||||
|
||||
ignore: list[str] = []
|
||||
if params.get("DongleId", encoding='utf8') in (None, UNREGISTERED_DONGLE_ID):
|
||||
ignore += ["manage_athenad", "uploader"]
|
||||
if os.getenv("NOBOARD") is not None:
|
||||
ignore.append("pandad")
|
||||
ignore += [x for x in os.getenv("BLOCK", "").split(",") if len(x) > 0]
|
||||
|
||||
sm = messaging.SubMaster(['deviceState', 'carParams'], poll='deviceState')
|
||||
pm = messaging.PubMaster(['managerState'])
|
||||
|
||||
write_onroad_params(False, params)
|
||||
ensure_running(managed_processes.values(), False, params=params, CP=sm['carParams'], not_run=ignore, log_dir=log_dir)
|
||||
|
||||
started_prev = False
|
||||
|
||||
while True:
|
||||
sm.update(1000)
|
||||
|
||||
openpilot_crashed = os.path.isfile(os.path.join(sentry.CRASHES_DIR, 'error.txt'))
|
||||
if openpilot_crashed:
|
||||
frogpilot_functions.delete_logs()
|
||||
|
||||
started = sm['deviceState'].started
|
||||
|
||||
if started and not started_prev:
|
||||
params.clear_all(ParamKeyType.CLEAR_ON_ONROAD_TRANSITION)
|
||||
|
||||
if openpilot_crashed:
|
||||
os.remove(os.path.join(sentry.CRASHES_DIR, 'error.txt'))
|
||||
|
||||
elif not started and started_prev:
|
||||
params.clear_all(ParamKeyType.CLEAR_ON_OFFROAD_TRANSITION)
|
||||
params_memory.clear_all(ParamKeyType.CLEAR_ON_OFFROAD_TRANSITION)
|
||||
|
||||
# update onroad params, which drives boardd's safety setter thread
|
||||
if started != started_prev:
|
||||
write_onroad_params(started, params)
|
||||
|
||||
started_prev = started
|
||||
|
||||
ensure_running(managed_processes.values(), started, params=params, CP=sm['carParams'], not_run=ignore, log_dir=log_dir)
|
||||
|
||||
running = ' '.join("{}{}\u001b[0m".format("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
|
||||
for p in managed_processes.values() if p.proc)
|
||||
|
||||
# clearpilot
|
||||
if (running != last_running):
|
||||
print(running)
|
||||
cloudlog.debug(running)
|
||||
last_running = running
|
||||
|
||||
# send managerState
|
||||
msg = messaging.new_message('managerState', valid=True)
|
||||
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
|
||||
pm.send('managerState', msg)
|
||||
|
||||
# Exit main loop when uninstall/shutdown/reboot is needed
|
||||
shutdown = False
|
||||
for param in ("DoUninstall", "DoShutdown", "DoReboot", "DoSoftReboot"):
|
||||
if params.get_bool(param):
|
||||
shutdown = True
|
||||
params.put("LastManagerExitReason", f"{param} {datetime.datetime.now()}")
|
||||
cloudlog.warning(f"Shutting down manager - {param} set")
|
||||
|
||||
if shutdown:
|
||||
break
|
||||
|
||||
|
||||
def main() -> None:
|
||||
frogpilot_functions = FrogPilotFunctions()
|
||||
|
||||
try:
|
||||
frogpilot_functions.setup_frogpilot()
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Failed to setup FrogPilot. Error: {e}")
|
||||
return
|
||||
|
||||
log_dir = manager_init(frogpilot_functions)
|
||||
if os.getenv("PREPAREONLY") is not None:
|
||||
return
|
||||
|
||||
# SystemExit on sigterm
|
||||
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
|
||||
|
||||
try:
|
||||
manager_thread(frogpilot_functions, log_dir)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
sentry.capture_exception()
|
||||
finally:
|
||||
manager_cleanup()
|
||||
|
||||
params = Params()
|
||||
if params.get_bool("DoUninstall"):
|
||||
cloudlog.warning("uninstalling")
|
||||
frogpilot_functions.uninstall_frogpilot()
|
||||
elif params.get_bool("DoReboot"):
|
||||
cloudlog.warning("reboot")
|
||||
HARDWARE.reboot()
|
||||
elif params.get_bool("DoSoftReboot"):
|
||||
cloudlog.warning("softreboot")
|
||||
HARDWARE.soft_reboot()
|
||||
elif params.get_bool("DoShutdown"):
|
||||
cloudlog.warning("shutdown")
|
||||
HARDWARE.shutdown()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unblock_stdout()
|
||||
|
||||
try:
|
||||
main()
|
||||
except KeyboardInterrupt:
|
||||
print("got CTRL-C, exiting")
|
||||
except Exception:
|
||||
add_file_handler(cloudlog)
|
||||
cloudlog.exception("Manager failed to start")
|
||||
|
||||
try:
|
||||
managed_processes['ui'].stop()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Show last 3 lines of traceback
|
||||
error = traceback.format_exc(-3)
|
||||
error = "Manager failed to start\n\n" + error
|
||||
with TextWindow(error) as t:
|
||||
t.wait_for_exit()
|
||||
|
||||
raise
|
||||
|
||||
# manual exit because we are forked
|
||||
sys.exit(0)
|
||||
309
selfdrive/manager/process.py
Executable file
309
selfdrive/manager/process.py
Executable file
@@ -0,0 +1,309 @@
|
||||
import importlib
|
||||
import os
|
||||
import signal
|
||||
import struct
|
||||
import datetime
|
||||
import time
|
||||
import subprocess
|
||||
from collections.abc import Callable, ValuesView
|
||||
from abc import ABC, abstractmethod
|
||||
from multiprocessing import Process
|
||||
|
||||
from setproctitle import setproctitle
|
||||
|
||||
from cereal import car, log
|
||||
import cereal.messaging as messaging
|
||||
import openpilot.selfdrive.sentry as sentry
|
||||
from openpilot.common.basedir import BASEDIR
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.common.swaglog import cloudlog
|
||||
|
||||
WATCHDOG_FN = "/dev/shm/wd_"
|
||||
ENABLE_WATCHDOG = os.getenv("NO_WATCHDOG") is None
|
||||
|
||||
timestamp = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
|
||||
_log_dir = f"/data/log2/{timestamp}"
|
||||
os.makedirs(_log_dir, exist_ok=True)
|
||||
|
||||
|
||||
|
||||
def launcher(proc: str, name: str) -> None:
|
||||
try:
|
||||
# import the process
|
||||
mod = importlib.import_module(proc)
|
||||
|
||||
# rename the process
|
||||
setproctitle(proc)
|
||||
|
||||
# create new context since we forked
|
||||
messaging.context = messaging.Context()
|
||||
|
||||
# add daemon name tag to logs
|
||||
cloudlog.bind(daemon=name)
|
||||
sentry.set_tag("daemon", name)
|
||||
|
||||
# exec the process
|
||||
mod.main()
|
||||
except KeyboardInterrupt:
|
||||
cloudlog.warning(f"child {proc} got SIGINT")
|
||||
except Exception:
|
||||
# can't install the crash handler because sys.excepthook doesn't play nice
|
||||
# with threads, so catch it here.
|
||||
sentry.capture_exception()
|
||||
raise
|
||||
|
||||
|
||||
def nativelauncher(pargs: list[str], cwd: str, name: str) -> None:
|
||||
os.environ['MANAGER_DAEMON'] = name
|
||||
|
||||
# exec the process
|
||||
os.chdir(cwd)
|
||||
os.execvp(pargs[0], pargs)
|
||||
|
||||
def join_process(process: Process, timeout: float) -> None:
|
||||
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
|
||||
# We have to poll the exitcode instead
|
||||
t = time.monotonic()
|
||||
while time.monotonic() - t < timeout and process.exitcode is None:
|
||||
time.sleep(0.001)
|
||||
|
||||
|
||||
class ManagerProcess(ABC):
|
||||
daemon = False
|
||||
sigkill = False
|
||||
should_run: Callable[[bool, Params, car.CarParams], bool]
|
||||
proc: Process | None = None
|
||||
enabled = True
|
||||
name = ""
|
||||
|
||||
last_watchdog_time = 0
|
||||
watchdog_max_dt: int | None = None
|
||||
watchdog_seen = False
|
||||
shutting_down = False
|
||||
|
||||
@abstractmethod
|
||||
def prepare(self) -> None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def start(self) -> None:
|
||||
pass
|
||||
|
||||
def restart(self) -> None:
|
||||
self.stop(sig=signal.SIGKILL)
|
||||
self.start()
|
||||
|
||||
def check_watchdog(self, started: bool) -> None:
|
||||
if self.watchdog_max_dt is None or self.proc is None:
|
||||
return
|
||||
|
||||
try:
|
||||
fn = WATCHDOG_FN + str(self.proc.pid)
|
||||
with open(fn, "rb") as f:
|
||||
# TODO: why can't pylint find struct.unpack?
|
||||
self.last_watchdog_time = struct.unpack('Q', f.read())[0]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
dt = time.monotonic() - self.last_watchdog_time / 1e9
|
||||
|
||||
if dt > self.watchdog_max_dt:
|
||||
if (self.watchdog_seen or self.always_watchdog and self.proc.exitcode is not None) and ENABLE_WATCHDOG:
|
||||
cloudlog.error(f"Watchdog timeout for {self.name} (exitcode {self.proc.exitcode}) restarting ({started=})")
|
||||
self.restart()
|
||||
else:
|
||||
self.watchdog_seen = True
|
||||
|
||||
def stop(self, retry: bool = True, block: bool = True, sig: signal.Signals = None) -> int | None:
|
||||
if self.proc is None:
|
||||
return None
|
||||
|
||||
if self.proc.exitcode is None:
|
||||
if not self.shutting_down:
|
||||
cloudlog.info(f"killing {self.name}")
|
||||
if sig is None:
|
||||
sig = signal.SIGKILL if self.sigkill else signal.SIGINT
|
||||
self.signal(sig)
|
||||
self.shutting_down = True
|
||||
|
||||
if not block:
|
||||
return None
|
||||
|
||||
join_process(self.proc, 5)
|
||||
|
||||
# If process failed to die send SIGKILL
|
||||
if self.proc.exitcode is None and retry:
|
||||
cloudlog.info(f"killing {self.name} with SIGKILL")
|
||||
self.signal(signal.SIGKILL)
|
||||
self.proc.join()
|
||||
|
||||
ret = self.proc.exitcode
|
||||
cloudlog.info(f"{self.name} is dead with {ret}")
|
||||
|
||||
if self.proc.exitcode is not None:
|
||||
self.shutting_down = False
|
||||
self.proc = None
|
||||
|
||||
return ret
|
||||
|
||||
def signal(self, sig: int) -> None:
|
||||
if self.proc is None:
|
||||
return
|
||||
|
||||
# Don't signal if already exited
|
||||
if self.proc.exitcode is not None and self.proc.pid is not None:
|
||||
return
|
||||
|
||||
# Can't signal if we don't have a pid
|
||||
if self.proc.pid is None:
|
||||
return
|
||||
|
||||
cloudlog.info(f"sending signal {sig} to {self.name}")
|
||||
os.kill(self.proc.pid, sig)
|
||||
|
||||
def get_process_state_msg(self):
|
||||
state = log.ManagerState.ProcessState.new_message()
|
||||
state.name = self.name
|
||||
if self.proc:
|
||||
state.running = self.proc.is_alive()
|
||||
state.shouldBeRunning = self.proc is not None and not self.shutting_down
|
||||
state.pid = self.proc.pid or 0
|
||||
state.exitCode = self.proc.exitcode or 0
|
||||
return state
|
||||
|
||||
|
||||
class NativeProcess(ManagerProcess):
|
||||
def __init__(self, name, cwd, cmdline, should_run, enabled=True, sigkill=False, watchdog_max_dt=None, always_watchdog=False):
|
||||
self.name = name
|
||||
self.cwd = cwd
|
||||
self.cmdline = cmdline
|
||||
self.should_run = should_run
|
||||
self.enabled = enabled
|
||||
self.sigkill = sigkill
|
||||
self.watchdog_max_dt = watchdog_max_dt
|
||||
self.launcher = nativelauncher
|
||||
self.always_watchdog = always_watchdog
|
||||
|
||||
def prepare(self) -> None:
|
||||
pass
|
||||
|
||||
def start(self) -> None:
|
||||
# In case we only tried a non blocking stop we need to stop it before restarting
|
||||
if self.shutting_down:
|
||||
self.stop()
|
||||
|
||||
if self.proc is not None:
|
||||
return
|
||||
|
||||
global _log_dir
|
||||
log_path = _log_dir+"/"+self.name+".log"
|
||||
|
||||
cwd = os.path.join(BASEDIR, self.cwd)
|
||||
cloudlog.info(f"starting process {self.name}")
|
||||
self.proc = Process(name=self.name, target=self.launcher, args=(self.cmdline, cwd, self.name))
|
||||
self.proc.start()
|
||||
self.watchdog_seen = False
|
||||
self.shutting_down = False
|
||||
|
||||
|
||||
class PythonProcess(ManagerProcess):
|
||||
def __init__(self, name, module, should_run, enabled=True, sigkill=False, watchdog_max_dt=None):
|
||||
self.name = name
|
||||
self.module = module
|
||||
self.should_run = should_run
|
||||
self.enabled = enabled
|
||||
self.sigkill = sigkill
|
||||
self.watchdog_max_dt = watchdog_max_dt
|
||||
self.launcher = launcher
|
||||
|
||||
def prepare(self) -> None:
|
||||
if self.enabled:
|
||||
cloudlog.info(f"preimporting {self.module}")
|
||||
importlib.import_module(self.module)
|
||||
|
||||
def start(self) -> None:
|
||||
# In case we only tried a non blocking stop we need to stop it before restarting
|
||||
if self.shutting_down:
|
||||
self.stop()
|
||||
|
||||
if self.proc is not None:
|
||||
return
|
||||
|
||||
global _log_dir
|
||||
log_path = _log_dir+"/"+self.name+".log"
|
||||
cloudlog.info(f"starting python {self.module}")
|
||||
self.proc = Process(name=self.name, target=self.launcher, args=(self.module, self.name))
|
||||
self.proc.start()
|
||||
self.watchdog_seen = False
|
||||
self.shutting_down = False
|
||||
|
||||
|
||||
class DaemonProcess(ManagerProcess):
|
||||
"""Python process that has to stay running across manager restart.
|
||||
This is used for athena so you don't lose SSH access when restarting manager."""
|
||||
def __init__(self, name, module, param_name, enabled=True):
|
||||
self.name = name
|
||||
self.module = module
|
||||
self.param_name = param_name
|
||||
self.enabled = enabled
|
||||
self.params = None
|
||||
|
||||
|
||||
@staticmethod
|
||||
def should_run(started, params, CP):
|
||||
return True
|
||||
|
||||
def prepare(self) -> None:
|
||||
pass
|
||||
|
||||
def start(self) -> None:
|
||||
if self.params is None:
|
||||
self.params = Params()
|
||||
|
||||
global _log_dir
|
||||
log_path = _log_dir+"/"+self.name+".log"
|
||||
|
||||
pid = self.params.get(self.param_name, encoding='utf-8')
|
||||
if pid is not None:
|
||||
try:
|
||||
os.kill(int(pid), 0)
|
||||
with open(f'/proc/{pid}/cmdline') as f:
|
||||
if self.module in f.read():
|
||||
# daemon is running
|
||||
return
|
||||
except (OSError, FileNotFoundError):
|
||||
# process is dead
|
||||
pass
|
||||
|
||||
cloudlog.info(f"starting daemon {self.name}")
|
||||
proc = subprocess.Popen(['python', '-m', self.module],
|
||||
stdin=open('/dev/null'),
|
||||
stdout=open(log_path, 'a'),
|
||||
stderr=subprocess.STDOUT,
|
||||
preexec_fn=os.setpgrp)
|
||||
|
||||
self.params.put(self.param_name, str(proc.pid))
|
||||
|
||||
def stop(self, retry=True, block=True, sig=None) -> None:
|
||||
pass
|
||||
|
||||
|
||||
def ensure_running(procs: ValuesView[ManagerProcess], started: bool, params=None, CP: car.CarParams=None,
|
||||
not_run: list[str] | None=None) -> list[ManagerProcess]:
|
||||
|
||||
if not_run is None:
|
||||
not_run = []
|
||||
|
||||
running = []
|
||||
for p in procs:
|
||||
if p.enabled and p.name not in not_run and p.should_run(started, params, CP):
|
||||
running.append(p)
|
||||
else:
|
||||
p.stop(block=False)
|
||||
|
||||
p.check_watchdog(started)
|
||||
|
||||
for p in running:
|
||||
p.start()
|
||||
|
||||
return running
|
||||
106
selfdrive/manager/process_config.py
Executable file
106
selfdrive/manager/process_config.py
Executable file
@@ -0,0 +1,106 @@
|
||||
import os
|
||||
|
||||
from cereal import car
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.system.hardware import PC, TICI
|
||||
from openpilot.selfdrive.manager.process import PythonProcess, NativeProcess, DaemonProcess
|
||||
|
||||
|
||||
WEBCAM = os.getenv("USE_WEBCAM") is not None
|
||||
|
||||
def driverview(started: bool, params: Params, CP: car.CarParams) -> bool:
|
||||
return started or params.get_bool("IsDriverViewEnabled")
|
||||
|
||||
def notcar(started: bool, params: Params, CP: car.CarParams) -> bool:
|
||||
return started and CP.notCar
|
||||
|
||||
def iscar(started: bool, params: Params, CP: car.CarParams) -> bool:
|
||||
return started and not CP.notCar
|
||||
|
||||
def logging(started, params, CP: car.CarParams) -> bool:
|
||||
run = (not CP.notCar) or not params.get_bool("DisableLogging")
|
||||
return started and run
|
||||
|
||||
def ublox_available() -> bool:
|
||||
return os.path.exists('/dev/ttyHS0') and not os.path.exists('/persist/comma/use-quectel-gps')
|
||||
|
||||
def ublox(started, params, CP: car.CarParams) -> bool:
|
||||
use_ublox = ublox_available()
|
||||
if use_ublox != params.get_bool("UbloxAvailable"):
|
||||
params.put_bool("UbloxAvailable", use_ublox)
|
||||
return use_ublox
|
||||
|
||||
def qcomgps(started, params, CP: car.CarParams) -> bool:
|
||||
return not ublox_available()
|
||||
|
||||
def always_run(started, params, CP: car.CarParams) -> bool:
|
||||
return True
|
||||
|
||||
def only_onroad(started: bool, params, CP: car.CarParams) -> bool:
|
||||
return started
|
||||
|
||||
def only_offroad(started, params, CP: car.CarParams) -> bool:
|
||||
return not started
|
||||
|
||||
# FrogPilot functions
|
||||
def allow_logging(started, params, CP: car.CarParams) -> bool:
|
||||
allow_logging = not (params.get_bool("DeviceManagement") and params.get_bool("NoLogging"))
|
||||
return allow_logging and logging(started, params, CP)
|
||||
|
||||
def allow_uploads(started, params, CP: car.CarParams) -> bool:
|
||||
allow_uploads = not (params.get_bool("DeviceManagement") and params.get_bool("NoUploads"))
|
||||
return allow_uploads
|
||||
|
||||
procs = [
|
||||
DaemonProcess("manage_athenad", "selfdrive.athena.manage_athenad", "AthenadPid"),
|
||||
|
||||
NativeProcess("camerad", "system/camerad", ["./camerad"], driverview),
|
||||
NativeProcess("logcatd", "system/logcatd", ["./logcatd"], allow_logging),
|
||||
NativeProcess("proclogd", "system/proclogd", ["./proclogd"], allow_logging),
|
||||
PythonProcess("logmessaged", "system.logmessaged", allow_logging),
|
||||
PythonProcess("micd", "system.micd", iscar),
|
||||
PythonProcess("timed", "system.timed", always_run, enabled=not PC),
|
||||
|
||||
PythonProcess("dmonitoringmodeld", "selfdrive.modeld.dmonitoringmodeld", driverview, enabled=(not PC or WEBCAM)),
|
||||
NativeProcess("encoderd", "system/loggerd", ["./encoderd"], allow_logging),
|
||||
NativeProcess("stream_encoderd", "system/loggerd", ["./encoderd", "--stream"], notcar),
|
||||
NativeProcess("loggerd", "system/loggerd", ["./loggerd"], allow_logging),
|
||||
NativeProcess("modeld", "selfdrive/modeld", ["./modeld"], only_onroad),
|
||||
#NativeProcess("mapsd", "selfdrive/navd", ["./mapsd"], only_onroad),
|
||||
#PythonProcess("navmodeld", "selfdrive.modeld.navmodeld", only_onroad),
|
||||
NativeProcess("sensord", "system/sensord", ["./sensord"], only_onroad, enabled=not PC),
|
||||
NativeProcess("ui", "selfdrive/ui", ["./ui"], always_run, watchdog_max_dt=(5 if not PC else None), always_watchdog=only_offroad),
|
||||
PythonProcess("soundd", "selfdrive.ui.soundd", only_onroad),
|
||||
NativeProcess("locationd", "selfdrive/locationd", ["./locationd"], only_onroad),
|
||||
NativeProcess("boardd", "selfdrive/boardd", ["./boardd"], always_run, enabled=False),
|
||||
PythonProcess("calibrationd", "selfdrive.locationd.calibrationd", only_onroad),
|
||||
PythonProcess("torqued", "selfdrive.locationd.torqued", only_onroad),
|
||||
PythonProcess("controlsd", "selfdrive.controls.controlsd", only_onroad),
|
||||
PythonProcess("deleter", "system.loggerd.deleter", always_run),
|
||||
PythonProcess("dmonitoringd", "selfdrive.monitoring.dmonitoringd", driverview, enabled=(not PC or WEBCAM)),
|
||||
# PythonProcess("qcomgpsd", "system.qcomgpsd.qcomgpsd", qcomgps, enabled=TICI), # Fixme
|
||||
# PythonProcess("ugpsd", "system.ugpsd", only_onroad, enabled=TICI),
|
||||
#PythonProcess("navd", "selfdrive.navd.navd", only_onroad),
|
||||
PythonProcess("pandad", "selfdrive.boardd.pandad", always_run),
|
||||
PythonProcess("paramsd", "selfdrive.locationd.paramsd", only_onroad),
|
||||
NativeProcess("ubloxd", "system/ubloxd", ["./ubloxd"], ublox, enabled=TICI),
|
||||
PythonProcess("pigeond", "system.ubloxd.pigeond", ublox, enabled=TICI),
|
||||
PythonProcess("plannerd", "selfdrive.controls.plannerd", only_onroad),
|
||||
PythonProcess("radard", "selfdrive.controls.radard", only_onroad),
|
||||
PythonProcess("thermald", "selfdrive.thermald.thermald", always_run),
|
||||
PythonProcess("tombstoned", "selfdrive.tombstoned", always_run, enabled=not PC),
|
||||
# PythonProcess("updated", "selfdrive.updated.updated", always_run, enabled=not PC),
|
||||
# PythonProcess("uploader", "system.loggerd.uploader", allow_uploads),
|
||||
PythonProcess("statsd", "selfdrive.statsd", allow_logging),
|
||||
|
||||
# debug procs
|
||||
NativeProcess("bridge", "cereal/messaging", ["./bridge"], notcar),
|
||||
PythonProcess("webrtcd", "system.webrtc.webrtcd", notcar),
|
||||
PythonProcess("webjoystick", "tools.bodyteleop.web", notcar),
|
||||
|
||||
# FrogPilot processes
|
||||
PythonProcess("fleet_manager", "selfdrive.frogpilot.fleetmanager.fleet_manager", always_run),
|
||||
PythonProcess("frogpilot_process", "selfdrive.frogpilot.frogpilot_process", always_run),
|
||||
]
|
||||
|
||||
managed_processes = {p.name: p for p in procs}
|
||||
251
selfdrive/manager/process_wip.txt
Normal file
251
selfdrive/manager/process_wip.txt
Normal file
@@ -0,0 +1,251 @@
|
||||
import importlib
|
||||
import os
|
||||
import signal
|
||||
import struct
|
||||
import time
|
||||
import subprocess
|
||||
from collections.abc import Callable, ValuesView
|
||||
from abc import ABC, abstractmethod
|
||||
from multiprocessing import Process
|
||||
|
||||
from setproctitle import setproctitle
|
||||
|
||||
from cereal import car, log
|
||||
import cereal.messaging as messaging
|
||||
import openpilot.selfdrive.sentry as sentry
|
||||
from openpilot.common.basedir import BASEDIR
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.common.swaglog import cloudlog
|
||||
|
||||
WATCHDOG_FN = "/dev/shm/wd_"
|
||||
ENABLE_WATCHDOG = os.getenv("NO_WATCHDOG") is None
|
||||
ENABLE_WATCHDOG = False # Fixme
|
||||
_log_dir = ""
|
||||
|
||||
def nativelauncher(pargs: list[str], cwd: str, name: str, log_path: str) -> None:
|
||||
os.environ['MANAGER_DAEMON'] = name
|
||||
with open(log_path, 'a') as log_file:
|
||||
os.chdir(cwd)
|
||||
proc = subprocess.Popen(pargs, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True)
|
||||
log_file.write("Started "+name)
|
||||
for line in proc.stdout:
|
||||
print(line, end='')
|
||||
log_file.write(line)
|
||||
proc.wait()
|
||||
|
||||
def launcher(proc: str, name: str, log_path: str) -> None:
|
||||
for _ in iter(int, 1):
|
||||
try:
|
||||
mod = importlib.import_module(proc)
|
||||
setproctitle(proc)
|
||||
messaging.context = messaging.Context()
|
||||
cloudlog.bind(daemon=name)
|
||||
sentry.set_tag("daemon", name)
|
||||
with open(log_path, 'a') as log_file, subprocess.Popen(['python', '-m', proc], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True) as proc:
|
||||
log_file.write("Started "+name)
|
||||
for line in proc.stdout:
|
||||
print(line, end='')
|
||||
log_file.write(line)
|
||||
proc.wait()
|
||||
except Exception as e:
|
||||
print ("Fatal: "+name)
|
||||
print (e)
|
||||
sentry.capture_exception()
|
||||
|
||||
def join_process(process: Process, timeout: float) -> None:
|
||||
t = time.monotonic()
|
||||
while time.monotonic() - t < timeout and process.exitcode is None:
|
||||
time.sleep(0.001)
|
||||
|
||||
class ManagerProcess(ABC):
|
||||
daemon = False
|
||||
sigkill = False
|
||||
should_run: Callable[[bool, Params, car.CarParams], bool]
|
||||
proc: Process | None = None
|
||||
enabled = True
|
||||
name = ""
|
||||
last_watchdog_time = 0
|
||||
watchdog_max_dt: int | None = None
|
||||
watchdog_seen = False
|
||||
shutting_down = False
|
||||
|
||||
@abstractmethod
|
||||
def prepare(self) -> None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def start(self) -> None:
|
||||
pass
|
||||
|
||||
def restart(self) -> None:
|
||||
if self.proc is not None and self.proc.exitcode is not None:
|
||||
self.stop(sig=signal.SIGKILL, block=False)
|
||||
self.start()
|
||||
|
||||
def check_watchdog(self, started: bool) -> None:
|
||||
if self.watchdog_max_dt is None or self.proc is None:
|
||||
return
|
||||
try:
|
||||
fn = WATCHDOG_FN + str(self.proc.pid)
|
||||
with open(fn, "rb") as f:
|
||||
self.last_watchdog_time = struct.unpack('Q', f.read())[0]
|
||||
except Exception:
|
||||
pass
|
||||
dt = time.monotonic() - self.last_watchdog_time / 1e9
|
||||
if dt > self.watchdog_max_dt:
|
||||
if (self.watchdog_seen or self.always_watchdog and self.proc.exitcode is not None) and ENABLE_WATCHDOG:
|
||||
cloudlog.error(f"Watchdog timeout for {self.name} (exitcode {self.proc.exitcode}) restarting ({started=})")
|
||||
self.restart()
|
||||
else:
|
||||
self.watchdog_seen = True
|
||||
|
||||
def stop(self, retry: bool = True, block: bool = True, sig: signal.Signals = None) -> int | None:
|
||||
if self.proc is None:
|
||||
return None
|
||||
if self.proc.exitcode is None:
|
||||
if not self.shutting_down:
|
||||
cloudlog.info(f"killing {self.name}")
|
||||
if sig is None:
|
||||
sig = signal.SIGKILL if self.sigkill else signal.SIGINT
|
||||
self.signal(sig)
|
||||
self.shutting_down = True
|
||||
if not block:
|
||||
return None
|
||||
join_process(self.proc, 5)
|
||||
if self.proc.exitcode is None and retry:
|
||||
cloudlog.info(f"killing {self.name} with SIGKILL")
|
||||
self.signal(signal.SIGKILL)
|
||||
self.proc.join()
|
||||
ret = self.proc.exitcode
|
||||
cloudlog.info(f"{self.name} is dead with {ret}")
|
||||
if self.proc.exitcode is not None:
|
||||
self.shutting_down = False
|
||||
self.proc = None
|
||||
return ret
|
||||
|
||||
def signal(self, sig: int) -> None:
|
||||
if self.proc is None or self.proc.exitcode is not None or self.proc.pid is None:
|
||||
return
|
||||
cloudlog.info(f"sending signal {sig} to {self.name}")
|
||||
os.kill(self.proc.pid, sig)
|
||||
|
||||
def get_process_state_msg(self):
|
||||
state = log.ManagerState.ProcessState.new_message()
|
||||
state.name = self.name
|
||||
if self.proc:
|
||||
state.running = self.proc.is_alive()
|
||||
state.shouldBeRunning = self.proc is not None and not self.shutting_down
|
||||
state.pid = self.proc.pid or 0
|
||||
state.exitCode = self.proc.exitcode or 0
|
||||
return state
|
||||
|
||||
|
||||
class NativeProcess(ManagerProcess):
|
||||
def __init__(self, name, cwd, cmdline, should_run, enabled=True, sigkill=False, watchdog_max_dt=None, always_watchdog=False):
|
||||
self.name = name
|
||||
self.cwd = cwd
|
||||
self.cmdline = cmdline
|
||||
self.should_run = should_run
|
||||
self.enabled = enabled
|
||||
self.sigkill = sigkill
|
||||
self.watchdog_max_dt = watchdog_max_dt
|
||||
self.launcher = nativelauncher
|
||||
self.always_watchdog = always_watchdog
|
||||
|
||||
def prepare(self) -> None:
|
||||
pass
|
||||
|
||||
def start(self) -> None:
|
||||
global _log_dir
|
||||
log_path = _log_dir+"/"+self.name+".log"
|
||||
if self.shutting_down or self.proc is not None:
|
||||
return
|
||||
self.proc = Process(target=nativelauncher, args=(self.cmdline, os.path.join(BASEDIR, self.cwd), self.name, log_path))
|
||||
self.proc.start()
|
||||
|
||||
class PythonProcess(ManagerProcess):
|
||||
def __init__(self, name, module, should_run, enabled=True, sigkill=False, watchdog_max_dt=None):
|
||||
self.name = name
|
||||
self.module = module
|
||||
self.should_run = should_run
|
||||
self.enabled = enabled
|
||||
self.sigkill = sigkill
|
||||
self.watchdog_max_dt = watchdog_max_dt
|
||||
self.launcher = launcher
|
||||
|
||||
def prepare(self) -> None:
|
||||
if self.enabled:
|
||||
cloudlog.info(f"preimporting {self.module}")
|
||||
importlib.import_module(self.module)
|
||||
|
||||
def start(self) -> None:
|
||||
global _log_dir
|
||||
log_path = _log_dir+"/"+self.name+".log"
|
||||
if self.shutting_down or self.proc is not None:
|
||||
return
|
||||
self.proc = Process(name=self.name, target=launcher, args=(self.module, self.name, log_path))
|
||||
self.proc.start()
|
||||
self.watchdog_seen = False
|
||||
self.shutting_down = False
|
||||
|
||||
class DaemonProcess(ManagerProcess):
|
||||
"""Python process that has to stay running across manager restart.
|
||||
This is used for athena so you don't lose SSH access when restarting manager."""
|
||||
def __init__(self, name, module, param_name, enabled=True):
|
||||
self.name = name
|
||||
self.module = module
|
||||
self.param_name = param_name
|
||||
self.enabled = enabled
|
||||
self.params = None
|
||||
|
||||
@staticmethod
|
||||
def should_run(started, params, CP):
|
||||
return True
|
||||
|
||||
def prepare(self) -> None:
|
||||
pass
|
||||
|
||||
def start(self) -> None:
|
||||
global _log_dir
|
||||
log_path = _log_dir+"/"+self.name+".log"
|
||||
if self.params is None:
|
||||
self.params = Params()
|
||||
|
||||
pid = self.params.get(self.param_name, encoding='utf-8')
|
||||
if pid is not None:
|
||||
try:
|
||||
os.kill(int(pid), 0)
|
||||
return # Process is already running
|
||||
except OSError:
|
||||
pass # Process not running, continue to start it
|
||||
|
||||
cloudlog.info(f"starting daemon {self.name}")
|
||||
self.proc = subprocess.Popen(['python', '-m', self.module],
|
||||
stdin=open('/dev/null'),
|
||||
stdout=open(log_path, 'a'),
|
||||
stderr=subprocess.STDOUT,
|
||||
preexec_fn=os.setpgrp)
|
||||
self.params.put(self.param_name, str(self.proc.pid))
|
||||
|
||||
def stop(self, retry=True, block=True, sig=None) -> None:
|
||||
pass
|
||||
|
||||
|
||||
def ensure_running(procs: ValuesView[ManagerProcess], started: bool, params=None, CP: car.CarParams=None, not_run: list[str] | None=None, log_dir: str = None) -> list[ManagerProcess]:
|
||||
global _log_dir
|
||||
_log_dir = log_dir
|
||||
if not_run is None:
|
||||
not_run = []
|
||||
|
||||
running = []
|
||||
for p in procs:
|
||||
if p.enabled and p.name not in not_run and p.should_run(started, params, CP):
|
||||
if p.proc is None or (hasattr(p.proc, 'exitcode') and p.proc.exitcode is not None):
|
||||
p.start()
|
||||
running.append(p)
|
||||
else:
|
||||
p.stop(block=False)
|
||||
|
||||
p.check_watchdog(started)
|
||||
|
||||
return running
|
||||
350
selfdrive/manager/process_wip2.txt
Normal file
350
selfdrive/manager/process_wip2.txt
Normal file
@@ -0,0 +1,350 @@
|
||||
import importlib
|
||||
import os
|
||||
import signal
|
||||
import struct
|
||||
import datetime
|
||||
import time
|
||||
import subprocess
|
||||
from collections.abc import Callable, ValuesView
|
||||
from abc import ABC, abstractmethod
|
||||
from multiprocessing import Process
|
||||
|
||||
from setproctitle import setproctitle
|
||||
|
||||
from cereal import car, log
|
||||
import cereal.messaging as messaging
|
||||
import openpilot.selfdrive.sentry as sentry
|
||||
from openpilot.common.basedir import BASEDIR
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.common.swaglog import cloudlog
|
||||
|
||||
WATCHDOG_FN = "/dev/shm/wd_"
|
||||
ENABLE_WATCHDOG = os.getenv("NO_WATCHDOG") is None
|
||||
|
||||
timestamp = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
|
||||
_log_dir = f"/data/log2/{timestamp}"
|
||||
os.makedirs(_log_dir, exist_ok=True)
|
||||
|
||||
# def launcher(proc: str, name: str, log_path: str) -> None:
|
||||
# try:
|
||||
# # import the process
|
||||
# mod = importlib.import_module(proc)
|
||||
|
||||
# global _log_dir
|
||||
# log_path = os.path.join(_log_dir, f"{name}.log")
|
||||
|
||||
# # rename the process
|
||||
# setproctitle(proc)
|
||||
|
||||
# # create new context since we forked
|
||||
# messaging.context = messaging.Context()
|
||||
|
||||
# # add daemon name tag to logs
|
||||
# cloudlog.bind(daemon=name)
|
||||
# sentry.set_tag("daemon", name)
|
||||
|
||||
# # exec the process
|
||||
# mod.main()
|
||||
# except KeyboardInterrupt:
|
||||
# cloudlog.warning(f"child {proc} got SIGINT")
|
||||
# except Exception as e:
|
||||
# # can't install the crash handler because sys.excepthook doesn't play nice
|
||||
# # with threads, so catch it here.
|
||||
# with open(log_path, 'a') as file: file.write(str(e)+"\n")
|
||||
# sentry.capture_exception()
|
||||
# raise
|
||||
|
||||
|
||||
def launcher(proc: str, name: str) -> None:
|
||||
try:
|
||||
# Import the process module
|
||||
mod = importlib.import_module(proc)
|
||||
|
||||
# Path for logging
|
||||
global _log_dir
|
||||
log_path = os.path.join(_log_dir, f"{name}.log")
|
||||
|
||||
# Rename the process
|
||||
setproctitle(name)
|
||||
|
||||
# Create new context since we forked
|
||||
messaging.context = messaging.Context()
|
||||
|
||||
# Add daemon name tag to logs
|
||||
cloudlog.bind(daemon=name)
|
||||
sentry.set_tag("daemon", name)
|
||||
|
||||
# Command construction
|
||||
command = f"bash -c 'python -m {proc} 2>&1 | tee {log_path}'"
|
||||
|
||||
# Execute the command
|
||||
subprocess.run(command, shell=True, executable='/bin/bash', cwd=os.path.dirname(mod.__file__))
|
||||
|
||||
except KeyboardInterrupt:
|
||||
cloudlog.warning(f"child {proc} got SIGINT")
|
||||
except Exception as e:
|
||||
with open(log_path, 'a') as file:
|
||||
file.write(str(e) + "\n")
|
||||
sentry.capture_exception()
|
||||
raise
|
||||
|
||||
def nativelauncher(pargs: list[str], cwd: str, name: str) -> None:
|
||||
os.environ['MANAGER_DAEMON'] = name
|
||||
|
||||
global _log_dir
|
||||
log_path = os.path.join(_log_dir, f"{name}.log")
|
||||
|
||||
# Command construction
|
||||
command = f"bash -c \"{ ' '.join(pargs) } 2>&1 | tee {log_path}\""
|
||||
|
||||
# Execute the command in the specified directory
|
||||
subprocess.run(command, shell=True, cwd=cwd, executable='/bin/bash')
|
||||
|
||||
|
||||
def join_process(process: Process, timeout: float) -> None:
|
||||
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
|
||||
# We have to poll the exitcode instead
|
||||
t = time.monotonic()
|
||||
while time.monotonic() - t < timeout and process.exitcode is None:
|
||||
time.sleep(0.001)
|
||||
|
||||
|
||||
class ManagerProcess(ABC):
|
||||
daemon = False
|
||||
sigkill = False
|
||||
should_run: Callable[[bool, Params, car.CarParams], bool]
|
||||
proc: Process | None = None
|
||||
enabled = True
|
||||
name = ""
|
||||
|
||||
last_watchdog_time = 0
|
||||
watchdog_max_dt: int | None = None
|
||||
watchdog_seen = False
|
||||
shutting_down = False
|
||||
|
||||
@abstractmethod
|
||||
def prepare(self) -> None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def start(self) -> None:
|
||||
pass
|
||||
|
||||
def restart(self) -> None:
|
||||
self.stop(sig=signal.SIGKILL)
|
||||
self.start()
|
||||
|
||||
def check_watchdog(self, started: bool) -> None:
|
||||
if self.watchdog_max_dt is None or self.proc is None:
|
||||
return
|
||||
|
||||
try:
|
||||
fn = WATCHDOG_FN + str(self.proc.pid)
|
||||
with open(fn, "rb") as f:
|
||||
# TODO: why can't pylint find struct.unpack?
|
||||
self.last_watchdog_time = struct.unpack('Q', f.read())[0]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
dt = time.monotonic() - self.last_watchdog_time / 1e9
|
||||
|
||||
if dt > self.watchdog_max_dt:
|
||||
if (self.watchdog_seen or self.always_watchdog and self.proc.exitcode is not None) and ENABLE_WATCHDOG:
|
||||
cloudlog.error(f"Watchdog timeout for {self.name} (exitcode {self.proc.exitcode}) restarting ({started=})")
|
||||
self.restart()
|
||||
else:
|
||||
self.watchdog_seen = True
|
||||
|
||||
def stop(self, retry: bool = True, block: bool = True, sig: signal.Signals = None) -> int | None:
|
||||
if self.proc is None:
|
||||
return None
|
||||
|
||||
if self.proc.exitcode is None:
|
||||
if not self.shutting_down:
|
||||
cloudlog.info(f"killing {self.name}")
|
||||
if sig is None:
|
||||
sig = signal.SIGKILL if self.sigkill else signal.SIGINT
|
||||
self.signal(sig)
|
||||
self.shutting_down = True
|
||||
|
||||
if not block:
|
||||
return None
|
||||
|
||||
join_process(self.proc, 5)
|
||||
|
||||
# If process failed to die send SIGKILL
|
||||
if self.proc.exitcode is None and retry:
|
||||
cloudlog.info(f"killing {self.name} with SIGKILL")
|
||||
self.signal(signal.SIGKILL)
|
||||
self.proc.join()
|
||||
|
||||
ret = self.proc.exitcode
|
||||
cloudlog.info(f"{self.name} is dead with {ret}")
|
||||
|
||||
if self.proc.exitcode is not None:
|
||||
self.shutting_down = False
|
||||
self.proc = None
|
||||
|
||||
return ret
|
||||
|
||||
def signal(self, sig: int) -> None:
|
||||
if self.proc is None:
|
||||
return
|
||||
|
||||
# Don't signal if already exited
|
||||
if self.proc.exitcode is not None and self.proc.pid is not None:
|
||||
return
|
||||
|
||||
# Can't signal if we don't have a pid
|
||||
if self.proc.pid is None:
|
||||
return
|
||||
|
||||
cloudlog.info(f"sending signal {sig} to {self.name}")
|
||||
os.kill(self.proc.pid, sig)
|
||||
|
||||
def get_process_state_msg(self):
|
||||
state = log.ManagerState.ProcessState.new_message()
|
||||
state.name = self.name
|
||||
if self.proc:
|
||||
state.running = self.proc.is_alive()
|
||||
state.shouldBeRunning = self.proc is not None and not self.shutting_down
|
||||
state.pid = self.proc.pid or 0
|
||||
state.exitCode = self.proc.exitcode or 0
|
||||
return state
|
||||
|
||||
|
||||
class NativeProcess(ManagerProcess):
|
||||
def __init__(self, name, cwd, cmdline, should_run, enabled=True, sigkill=False, watchdog_max_dt=None, always_watchdog=False):
|
||||
self.name = name
|
||||
self.cwd = cwd
|
||||
self.cmdline = cmdline
|
||||
self.should_run = should_run
|
||||
self.enabled = enabled
|
||||
self.sigkill = sigkill
|
||||
self.watchdog_max_dt = watchdog_max_dt
|
||||
self.launcher = nativelauncher
|
||||
self.always_watchdog = always_watchdog
|
||||
|
||||
def prepare(self) -> None:
|
||||
pass
|
||||
|
||||
def start(self) -> None:
|
||||
# In case we only tried a non blocking stop we need to stop it before restarting
|
||||
if self.shutting_down:
|
||||
self.stop()
|
||||
|
||||
if self.proc is not None:
|
||||
return
|
||||
|
||||
global _log_dir
|
||||
log_path = _log_dir+"/"+self.name+".log"
|
||||
|
||||
cwd = os.path.join(BASEDIR, self.cwd)
|
||||
cloudlog.info(f"starting process {self.name}")
|
||||
self.proc = Process(name=self.name, target=self.launcher, args=(self.cmdline, cwd, self.name))
|
||||
self.proc.start()
|
||||
self.watchdog_seen = False
|
||||
self.shutting_down = False
|
||||
|
||||
|
||||
class PythonProcess(ManagerProcess):
|
||||
def __init__(self, name, module, should_run, enabled=True, sigkill=False, watchdog_max_dt=None):
|
||||
self.name = name
|
||||
self.module = module
|
||||
self.should_run = should_run
|
||||
self.enabled = enabled
|
||||
self.sigkill = sigkill
|
||||
self.watchdog_max_dt = watchdog_max_dt
|
||||
self.launcher = launcher
|
||||
|
||||
def prepare(self) -> None:
|
||||
if self.enabled:
|
||||
cloudlog.info(f"preimporting {self.module}")
|
||||
importlib.import_module(self.module)
|
||||
|
||||
def start(self) -> None:
|
||||
# In case we only tried a non blocking stop we need to stop it before restarting
|
||||
if self.shutting_down:
|
||||
self.stop()
|
||||
|
||||
if self.proc is not None:
|
||||
return
|
||||
|
||||
global _log_dir
|
||||
log_path = _log_dir+"/"+self.name+".log"
|
||||
cloudlog.info(f"starting python {self.module}")
|
||||
self.proc = Process(name=self.name, target=self.launcher, args=(self.module, self.name))
|
||||
self.proc.start()
|
||||
self.watchdog_seen = False
|
||||
self.shutting_down = False
|
||||
|
||||
|
||||
class DaemonProcess(ManagerProcess):
|
||||
"""Python process that has to stay running across manager restart.
|
||||
This is used for athena so you don't lose SSH access when restarting manager."""
|
||||
def __init__(self, name, module, param_name, enabled=True):
|
||||
self.name = name
|
||||
self.module = module
|
||||
self.param_name = param_name
|
||||
self.enabled = enabled
|
||||
self.params = None
|
||||
|
||||
|
||||
@staticmethod
|
||||
def should_run(started, params, CP):
|
||||
return True
|
||||
|
||||
def prepare(self) -> None:
|
||||
pass
|
||||
|
||||
def start(self) -> None:
|
||||
if self.params is None:
|
||||
self.params = Params()
|
||||
|
||||
global _log_dir
|
||||
log_path = _log_dir+"/"+self.name+".log"
|
||||
|
||||
pid = self.params.get(self.param_name, encoding='utf-8')
|
||||
if pid is not None:
|
||||
try:
|
||||
os.kill(int(pid), 0)
|
||||
with open(f'/proc/{pid}/cmdline') as f:
|
||||
if self.module in f.read():
|
||||
# daemon is running
|
||||
return
|
||||
except (OSError, FileNotFoundError):
|
||||
# process is dead
|
||||
pass
|
||||
|
||||
cloudlog.info(f"starting daemon {self.name}")
|
||||
proc = subprocess.Popen(['python', '-m', self.module],
|
||||
stdin=open('/dev/null'),
|
||||
stdout=open(log_path, 'a'),
|
||||
stderr=subprocess.STDOUT,
|
||||
preexec_fn=os.setpgrp)
|
||||
|
||||
self.params.put(self.param_name, str(proc.pid))
|
||||
|
||||
def stop(self, retry=True, block=True, sig=None) -> None:
|
||||
pass
|
||||
|
||||
|
||||
def ensure_running(procs: ValuesView[ManagerProcess], started: bool, params=None, CP: car.CarParams=None,
|
||||
not_run: list[str] | None=None) -> list[ManagerProcess]:
|
||||
|
||||
if not_run is None:
|
||||
not_run = []
|
||||
|
||||
running = []
|
||||
for p in procs:
|
||||
if p.enabled and p.name not in not_run and p.should_run(started, params, CP):
|
||||
running.append(p)
|
||||
else:
|
||||
p.stop(block=False)
|
||||
|
||||
p.check_watchdog(started)
|
||||
|
||||
for p in running:
|
||||
p.start()
|
||||
|
||||
return running
|
||||
0
selfdrive/manager/test/__init__.py
Executable file
0
selfdrive/manager/test/__init__.py
Executable file
82
selfdrive/manager/test/test_manager.py
Executable file
82
selfdrive/manager/test/test_manager.py
Executable file
@@ -0,0 +1,82 @@
|
||||
#!/usr/bin/env python3
|
||||
import os
|
||||
import pytest
|
||||
import signal
|
||||
import time
|
||||
import unittest
|
||||
|
||||
from parameterized import parameterized
|
||||
|
||||
from cereal import car
|
||||
from openpilot.common.params import Params
|
||||
import openpilot.selfdrive.manager.manager as manager
|
||||
from openpilot.selfdrive.manager.process import ensure_running
|
||||
from openpilot.selfdrive.manager.process_config import managed_processes
|
||||
from openpilot.system.hardware import HARDWARE
|
||||
|
||||
os.environ['FAKEUPLOAD'] = "1"
|
||||
|
||||
MAX_STARTUP_TIME = 3
|
||||
BLACKLIST_PROCS = ['manage_athenad', 'pandad', 'pigeond']
|
||||
|
||||
|
||||
@pytest.mark.tici
|
||||
class TestManager(unittest.TestCase):
|
||||
def setUp(self):
|
||||
HARDWARE.set_power_save(False)
|
||||
|
||||
# ensure clean CarParams
|
||||
params = Params()
|
||||
params.clear_all()
|
||||
|
||||
def tearDown(self):
|
||||
manager.manager_cleanup()
|
||||
|
||||
def test_manager_prepare(self):
|
||||
os.environ['PREPAREONLY'] = '1'
|
||||
manager.main()
|
||||
|
||||
def test_blacklisted_procs(self):
|
||||
# TODO: ensure there are blacklisted procs until we have a dedicated test
|
||||
self.assertTrue(len(BLACKLIST_PROCS), "No blacklisted procs to test not_run")
|
||||
|
||||
@parameterized.expand([(i,) for i in range(10)])
|
||||
def test_startup_time(self, index):
|
||||
start = time.monotonic()
|
||||
os.environ['PREPAREONLY'] = '1'
|
||||
manager.main()
|
||||
t = time.monotonic() - start
|
||||
assert t < MAX_STARTUP_TIME, f"startup took {t}s, expected <{MAX_STARTUP_TIME}s"
|
||||
|
||||
@unittest.skip("this test is flaky the way it's currently written, should be moved to test_onroad")
|
||||
def test_clean_exit(self):
|
||||
"""
|
||||
Ensure all processes exit cleanly when stopped.
|
||||
"""
|
||||
HARDWARE.set_power_save(False)
|
||||
manager.manager_init()
|
||||
|
||||
CP = car.CarParams.new_message()
|
||||
procs = ensure_running(managed_processes.values(), True, Params(), CP, not_run=BLACKLIST_PROCS)
|
||||
|
||||
time.sleep(10)
|
||||
|
||||
for p in procs:
|
||||
with self.subTest(proc=p.name):
|
||||
state = p.get_process_state_msg()
|
||||
self.assertTrue(state.running, f"{p.name} not running")
|
||||
exit_code = p.stop(retry=False)
|
||||
|
||||
self.assertNotIn(p.name, BLACKLIST_PROCS, f"{p.name} was started")
|
||||
|
||||
self.assertTrue(exit_code is not None, f"{p.name} failed to exit")
|
||||
|
||||
# TODO: interrupted blocking read exits with 1 in cereal. use a more unique return code
|
||||
exit_codes = [0, 1]
|
||||
if p.sigkill:
|
||||
exit_codes = [-signal.SIGKILL]
|
||||
self.assertIn(exit_code, exit_codes, f"{p.name} died with {exit_code}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user