clearpilot: initial commit of full source
This commit is contained in:
15
selfdrive/monitoring/README.md
Executable file
15
selfdrive/monitoring/README.md
Executable file
@@ -0,0 +1,15 @@
|
||||
# driver monitoring (DM)
|
||||
|
||||
Uploading driver-facing camera footage is opt-in, but it is encouraged to opt-in to improve the DM model. You can always change your preference using the "Record and Upload Driver Camera" toggle.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Before creating a bug report, go through these troubleshooting steps.
|
||||
|
||||
* Ensure the driver-facing camera has a good view of the driver in normal driving positions.
|
||||
* This can be checked in Settings -> Device -> Preview Driver Camera (when car is off).
|
||||
* If the camera can't see the driver, the device should be re-mounted.
|
||||
|
||||
## Bug report
|
||||
|
||||
In order for us to look into DM bug reports, we'll need the driver-facing camera footage. If you don't normally have this enabled, simply enable the toggle for a single drive. Also ensure the "Upload Raw Logs" toggle is enabled before going for a drive.
|
||||
90
selfdrive/monitoring/dmonitoringd.py
Executable file
90
selfdrive/monitoring/dmonitoringd.py
Executable file
@@ -0,0 +1,90 @@
|
||||
#!/usr/bin/env python3
|
||||
import gc
|
||||
|
||||
import cereal.messaging as messaging
|
||||
from cereal import car
|
||||
from openpilot.common.params import Params
|
||||
from openpilot.common.realtime import set_realtime_priority
|
||||
from openpilot.selfdrive.controls.lib.events import Events
|
||||
from openpilot.selfdrive.monitoring.driver_monitor import DriverStatus
|
||||
|
||||
|
||||
def dmonitoringd_thread():
|
||||
gc.disable()
|
||||
set_realtime_priority(2)
|
||||
|
||||
params = Params()
|
||||
pm = messaging.PubMaster(['driverMonitoringState'])
|
||||
sm = messaging.SubMaster(['driverStateV2', 'liveCalibration', 'carState', 'controlsState', 'modelV2'], poll='driverStateV2')
|
||||
|
||||
driver_status = DriverStatus(rhd_saved=params.get_bool("IsRhdDetected"))
|
||||
|
||||
v_cruise_last = 0
|
||||
driver_engaged = False
|
||||
|
||||
# 10Hz <- dmonitoringmodeld
|
||||
while True:
|
||||
sm.update()
|
||||
if not sm.updated['driverStateV2']:
|
||||
continue
|
||||
|
||||
# Get interaction
|
||||
if sm.updated['carState']:
|
||||
v_cruise = sm['carState'].cruiseState.speed
|
||||
driver_engaged = len(sm['carState'].buttonEvents) > 0 or \
|
||||
v_cruise != v_cruise_last or \
|
||||
sm['carState'].steeringPressed or \
|
||||
sm['carState'].gasPressed
|
||||
v_cruise_last = v_cruise
|
||||
|
||||
if sm.updated['modelV2']:
|
||||
driver_status.set_policy(sm['modelV2'], sm['carState'].vEgo)
|
||||
|
||||
# Get data from dmonitoringmodeld
|
||||
events = Events()
|
||||
|
||||
if sm.all_checks() and len(sm['liveCalibration'].rpyCalib):
|
||||
driver_status.update_states(sm['driverStateV2'], sm['liveCalibration'].rpyCalib, sm['carState'].vEgo, sm['controlsState'].enabled)
|
||||
|
||||
# Block engaging after max number of distrations
|
||||
if driver_status.terminal_alert_cnt >= driver_status.settings._MAX_TERMINAL_ALERTS or \
|
||||
driver_status.terminal_time >= driver_status.settings._MAX_TERMINAL_DURATION:
|
||||
events.add(car.CarEvent.EventName.tooDistracted)
|
||||
|
||||
# Update events from driver state
|
||||
driver_status.update_events(events, driver_engaged, sm['controlsState'].enabled, sm['carState'].standstill)
|
||||
|
||||
# build driverMonitoringState packet
|
||||
dat = messaging.new_message('driverMonitoringState', valid=sm.all_checks())
|
||||
dat.driverMonitoringState = {
|
||||
"events": events.to_msg(),
|
||||
"faceDetected": driver_status.face_detected,
|
||||
"isDistracted": driver_status.driver_distracted,
|
||||
"distractedType": sum(driver_status.distracted_types),
|
||||
"awarenessStatus": driver_status.awareness,
|
||||
"posePitchOffset": driver_status.pose.pitch_offseter.filtered_stat.mean(),
|
||||
"posePitchValidCount": driver_status.pose.pitch_offseter.filtered_stat.n,
|
||||
"poseYawOffset": driver_status.pose.yaw_offseter.filtered_stat.mean(),
|
||||
"poseYawValidCount": driver_status.pose.yaw_offseter.filtered_stat.n,
|
||||
"stepChange": driver_status.step_change,
|
||||
"awarenessActive": driver_status.awareness_active,
|
||||
"awarenessPassive": driver_status.awareness_passive,
|
||||
"isLowStd": driver_status.pose.low_std,
|
||||
"hiStdCount": driver_status.hi_stds,
|
||||
"isActiveMode": driver_status.active_monitoring_mode,
|
||||
"isRHD": driver_status.wheel_on_right,
|
||||
}
|
||||
pm.send('driverMonitoringState', dat)
|
||||
|
||||
# save rhd virtual toggle every 5 mins
|
||||
if (sm['driverStateV2'].frameId % 6000 == 0 and
|
||||
driver_status.wheelpos_learner.filtered_stat.n > driver_status.settings._WHEELPOS_FILTER_MIN_COUNT and
|
||||
driver_status.wheel_on_right == (driver_status.wheelpos_learner.filtered_stat.M > driver_status.settings._WHEELPOS_THRESHOLD)):
|
||||
params.put_bool_nonblocking("IsRhdDetected", driver_status.wheel_on_right)
|
||||
|
||||
def main():
|
||||
dmonitoringd_thread()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
362
selfdrive/monitoring/driver_monitor.py
Executable file
362
selfdrive/monitoring/driver_monitor.py
Executable file
@@ -0,0 +1,362 @@
|
||||
from math import atan2
|
||||
|
||||
from cereal import car
|
||||
from openpilot.common.numpy_fast import interp
|
||||
from openpilot.common.realtime import DT_DMON
|
||||
from openpilot.common.filter_simple import FirstOrderFilter
|
||||
from openpilot.common.stat_live import RunningStatFilter
|
||||
from openpilot.common.transformations.camera import DEVICE_CAMERAS
|
||||
|
||||
EventName = car.CarEvent.EventName
|
||||
|
||||
# ******************************************************************************************
|
||||
# NOTE: To fork maintainers.
|
||||
# Disabling or nerfing safety features will get you and your users banned from our servers.
|
||||
# We recommend that you do not change these numbers from the defaults.
|
||||
# ******************************************************************************************
|
||||
|
||||
class DRIVER_MONITOR_SETTINGS():
|
||||
def __init__(self):
|
||||
self._DT_DMON = DT_DMON
|
||||
# ref (page15-16): https://eur-lex.europa.eu/legal-content/EN/TXT/PDF/?uri=CELEX:42018X1947&rid=2
|
||||
# self._AWARENESS_TIME = 30. # passive wheeltouch total timeout
|
||||
# self._AWARENESS_PRE_TIME_TILL_TERMINAL = 15.
|
||||
# self._AWARENESS_PROMPT_TIME_TILL_TERMINAL = 6.
|
||||
# self._DISTRACTED_TIME = 11. # active monitoring total timeout
|
||||
# self._DISTRACTED_PRE_TIME_TILL_TERMINAL = 8.
|
||||
# self._DISTRACTED_PROMPT_TIME_TILL_TERMINAL = 6.
|
||||
|
||||
# Temp.
|
||||
# Goals:
|
||||
# - Temp only if daytime and privledged
|
||||
# - Continuously lower speed if in crit state
|
||||
self._AWARENESS_TIME = 30. # passive wheeltouch total timeout
|
||||
self._AWARENESS_PRE_TIME_TILL_TERMINAL = 20.
|
||||
self._AWARENESS_PROMPT_TIME_TILL_TERMINAL = 14.
|
||||
self._DISTRACTED_TIME = 30. # active monitoring total timeout
|
||||
self._DISTRACTED_PRE_TIME_TILL_TERMINAL = 24.
|
||||
self._DISTRACTED_PROMPT_TIME_TILL_TERMINAL = 14.
|
||||
|
||||
self._FACE_THRESHOLD = 0.7
|
||||
self._EYE_THRESHOLD = 0.65
|
||||
self._SG_THRESHOLD = 0.9
|
||||
self._BLINK_THRESHOLD = 0.865
|
||||
|
||||
self._EE_THRESH11 = 0.241
|
||||
self._EE_THRESH12 = 4.7
|
||||
self._EE_MAX_OFFSET1 = 0.06
|
||||
self._EE_MIN_OFFSET1 = 0.025
|
||||
self._EE_THRESH21 = 0.01
|
||||
self._EE_THRESH22 = 0.35
|
||||
|
||||
self._POSE_PITCH_THRESHOLD = 0.3133
|
||||
self._POSE_PITCH_THRESHOLD_SLACK = 0.3237
|
||||
self._POSE_PITCH_THRESHOLD_STRICT = self._POSE_PITCH_THRESHOLD
|
||||
self._POSE_YAW_THRESHOLD = 0.4020
|
||||
self._POSE_YAW_THRESHOLD_SLACK = 0.5042
|
||||
self._POSE_YAW_THRESHOLD_STRICT = self._POSE_YAW_THRESHOLD
|
||||
self._PITCH_NATURAL_OFFSET = 0.029 # initial value before offset is learned
|
||||
self._PITCH_NATURAL_THRESHOLD = 0.449
|
||||
self._YAW_NATURAL_OFFSET = 0.097 # initial value before offset is learned
|
||||
self._PITCH_MAX_OFFSET = 0.124
|
||||
self._PITCH_MIN_OFFSET = -0.0881
|
||||
self._YAW_MAX_OFFSET = 0.289
|
||||
self._YAW_MIN_OFFSET = -0.0246
|
||||
|
||||
self._POSESTD_THRESHOLD = 0.3
|
||||
self._HI_STD_FALLBACK_TIME = int(10 / self._DT_DMON) # fall back to wheel touch if model is uncertain for 10s
|
||||
self._DISTRACTED_FILTER_TS = 0.25 # 0.6Hz
|
||||
|
||||
self._POSE_CALIB_MIN_SPEED = 13 # 30 mph
|
||||
self._POSE_OFFSET_MIN_COUNT = int(60 / self._DT_DMON) # valid data counts before calibration completes, 1min cumulative
|
||||
self._POSE_OFFSET_MAX_COUNT = int(360 / self._DT_DMON) # stop deweighting new data after 6 min, aka "short term memory"
|
||||
|
||||
self._WHEELPOS_CALIB_MIN_SPEED = 11
|
||||
self._WHEELPOS_THRESHOLD = 0.5
|
||||
self._WHEELPOS_FILTER_MIN_COUNT = int(15 / self._DT_DMON) # allow 15 seconds to converge wheel side
|
||||
|
||||
self._RECOVERY_FACTOR_MAX = 5. # relative to minus step change
|
||||
self._RECOVERY_FACTOR_MIN = 1.25 # relative to minus step change
|
||||
|
||||
self._MAX_TERMINAL_ALERTS = 3 # not allowed to engage after 3 terminal alerts
|
||||
self._MAX_TERMINAL_DURATION = int(30 / self._DT_DMON) # not allowed to engage after 30s of terminal alerts
|
||||
|
||||
|
||||
# TODO: get these live
|
||||
# model output refers to center of undistorted+leveled image
|
||||
EFL = 598.0 # focal length in K
|
||||
cam = DEVICE_CAMERAS[("tici", "ar0231")] # corrected image has same size as raw
|
||||
W, H = (cam.dcam.width, cam.dcam.height) # corrected image has same size as raw
|
||||
|
||||
class DistractedType:
|
||||
NOT_DISTRACTED = 0
|
||||
DISTRACTED_POSE = 1
|
||||
DISTRACTED_BLINK = 2
|
||||
DISTRACTED_E2E = 4
|
||||
|
||||
def face_orientation_from_net(angles_desc, pos_desc, rpy_calib):
|
||||
# the output of these angles are in device frame
|
||||
# so from driver's perspective, pitch is up and yaw is right
|
||||
|
||||
pitch_net, yaw_net, roll_net = angles_desc
|
||||
|
||||
face_pixel_position = ((pos_desc[0]+0.5)*W, (pos_desc[1]+0.5)*H)
|
||||
yaw_focal_angle = atan2(face_pixel_position[0] - W//2, EFL)
|
||||
pitch_focal_angle = atan2(face_pixel_position[1] - H//2, EFL)
|
||||
|
||||
pitch = pitch_net + pitch_focal_angle
|
||||
yaw = -yaw_net + yaw_focal_angle
|
||||
|
||||
# no calib for roll
|
||||
pitch -= rpy_calib[1]
|
||||
yaw -= rpy_calib[2]
|
||||
return roll_net, pitch, yaw
|
||||
|
||||
class DriverPose():
|
||||
def __init__(self, max_trackable):
|
||||
self.yaw = 0.
|
||||
self.pitch = 0.
|
||||
self.roll = 0.
|
||||
self.yaw_std = 0.
|
||||
self.pitch_std = 0.
|
||||
self.roll_std = 0.
|
||||
self.pitch_offseter = RunningStatFilter(max_trackable=max_trackable)
|
||||
self.yaw_offseter = RunningStatFilter(max_trackable=max_trackable)
|
||||
self.low_std = True
|
||||
self.cfactor_pitch = 1.
|
||||
self.cfactor_yaw = 1.
|
||||
|
||||
class DriverBlink():
|
||||
def __init__(self):
|
||||
self.left_blink = 0.
|
||||
self.right_blink = 0.
|
||||
|
||||
class DriverStatus():
|
||||
def __init__(self, rhd_saved=False, settings=None):
|
||||
if settings is None:
|
||||
settings = DRIVER_MONITOR_SETTINGS()
|
||||
# init policy settings
|
||||
self.settings = settings
|
||||
|
||||
# init driver status
|
||||
self.wheelpos_learner = RunningStatFilter()
|
||||
self.pose = DriverPose(self.settings._POSE_OFFSET_MAX_COUNT)
|
||||
self.pose_calibrated = False
|
||||
self.blink = DriverBlink()
|
||||
self.eev1 = 0.
|
||||
self.eev2 = 1.
|
||||
self.ee1_offseter = RunningStatFilter(max_trackable=self.settings._POSE_OFFSET_MAX_COUNT)
|
||||
self.ee2_offseter = RunningStatFilter(max_trackable=self.settings._POSE_OFFSET_MAX_COUNT)
|
||||
self.ee1_calibrated = False
|
||||
self.ee2_calibrated = False
|
||||
|
||||
self.awareness = 1.
|
||||
self.awareness_active = 1.
|
||||
self.awareness_passive = 1.
|
||||
self.distracted_types = []
|
||||
self.driver_distracted = False
|
||||
self.driver_distraction_filter = FirstOrderFilter(0., self.settings._DISTRACTED_FILTER_TS, self.settings._DT_DMON)
|
||||
self.wheel_on_right = False
|
||||
self.wheel_on_right_last = None
|
||||
self.wheel_on_right_default = rhd_saved
|
||||
self.face_detected = False
|
||||
self.terminal_alert_cnt = 0
|
||||
self.terminal_time = 0
|
||||
self.step_change = 0.
|
||||
self.active_monitoring_mode = True
|
||||
self.is_model_uncertain = False
|
||||
self.hi_stds = 0
|
||||
self.threshold_pre = self.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
|
||||
self.threshold_prompt = self.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
|
||||
|
||||
self._set_timers(active_monitoring=True)
|
||||
|
||||
def _reset_awareness(self):
|
||||
self.awareness = 1.
|
||||
self.awareness_active = 1.
|
||||
self.awareness_passive = 1.
|
||||
|
||||
def _set_timers(self, active_monitoring):
|
||||
if self.active_monitoring_mode and self.awareness <= self.threshold_prompt:
|
||||
if active_monitoring:
|
||||
self.step_change = self.settings._DT_DMON / self.settings._DISTRACTED_TIME
|
||||
else:
|
||||
self.step_change = 0.
|
||||
return # no exploit after orange alert
|
||||
elif self.awareness <= 0.:
|
||||
return
|
||||
|
||||
if active_monitoring:
|
||||
# when falling back from passive mode to active mode, reset awareness to avoid false alert
|
||||
if not self.active_monitoring_mode:
|
||||
self.awareness_passive = self.awareness
|
||||
self.awareness = self.awareness_active
|
||||
|
||||
self.threshold_pre = self.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
|
||||
self.threshold_prompt = self.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
|
||||
self.step_change = self.settings._DT_DMON / self.settings._DISTRACTED_TIME
|
||||
self.active_monitoring_mode = True
|
||||
else:
|
||||
if self.active_monitoring_mode:
|
||||
self.awareness_active = self.awareness
|
||||
self.awareness = self.awareness_passive
|
||||
|
||||
self.threshold_pre = self.settings._AWARENESS_PRE_TIME_TILL_TERMINAL / self.settings._AWARENESS_TIME
|
||||
self.threshold_prompt = self.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL / self.settings._AWARENESS_TIME
|
||||
self.step_change = self.settings._DT_DMON / self.settings._AWARENESS_TIME
|
||||
self.active_monitoring_mode = False
|
||||
|
||||
def _get_distracted_types(self):
|
||||
distracted_types = []
|
||||
|
||||
if not self.pose_calibrated:
|
||||
pitch_error = self.pose.pitch - self.settings._PITCH_NATURAL_OFFSET
|
||||
yaw_error = self.pose.yaw - self.settings._YAW_NATURAL_OFFSET
|
||||
else:
|
||||
pitch_error = self.pose.pitch - min(max(self.pose.pitch_offseter.filtered_stat.mean(),
|
||||
self.settings._PITCH_MIN_OFFSET), self.settings._PITCH_MAX_OFFSET)
|
||||
yaw_error = self.pose.yaw - min(max(self.pose.yaw_offseter.filtered_stat.mean(),
|
||||
self.settings._YAW_MIN_OFFSET), self.settings._YAW_MAX_OFFSET)
|
||||
pitch_error = 0 if pitch_error > 0 else abs(pitch_error) # no positive pitch limit
|
||||
yaw_error = abs(yaw_error)
|
||||
if pitch_error > (self.settings._POSE_PITCH_THRESHOLD*self.pose.cfactor_pitch if self.pose_calibrated else self.settings._PITCH_NATURAL_THRESHOLD) or \
|
||||
yaw_error > self.settings._POSE_YAW_THRESHOLD*self.pose.cfactor_yaw:
|
||||
distracted_types.append(DistractedType.DISTRACTED_POSE)
|
||||
|
||||
if (self.blink.left_blink + self.blink.right_blink)*0.5 > self.settings._BLINK_THRESHOLD:
|
||||
distracted_types.append(DistractedType.DISTRACTED_BLINK)
|
||||
|
||||
if self.ee1_calibrated:
|
||||
ee1_dist = self.eev1 > max(min(self.ee1_offseter.filtered_stat.M, self.settings._EE_MAX_OFFSET1), self.settings._EE_MIN_OFFSET1) \
|
||||
* self.settings._EE_THRESH12
|
||||
else:
|
||||
ee1_dist = self.eev1 > self.settings._EE_THRESH11
|
||||
# if self.ee2_calibrated:
|
||||
# ee2_dist = self.eev2 < self.ee2_offseter.filtered_stat.M * self.settings._EE_THRESH22
|
||||
# else:
|
||||
# ee2_dist = self.eev2 < self.settings._EE_THRESH21
|
||||
if ee1_dist:
|
||||
distracted_types.append(DistractedType.DISTRACTED_E2E)
|
||||
|
||||
return distracted_types
|
||||
|
||||
def set_policy(self, model_data, car_speed):
|
||||
bp = model_data.meta.disengagePredictions.brakeDisengageProbs[0] # brake disengage prob in next 2s
|
||||
k1 = max(-0.00156*((car_speed-16)**2)+0.6, 0.2)
|
||||
bp_normal = max(min(bp / k1, 0.5),0)
|
||||
self.pose.cfactor_pitch = interp(bp_normal, [0, 0.5],
|
||||
[self.settings._POSE_PITCH_THRESHOLD_SLACK,
|
||||
self.settings._POSE_PITCH_THRESHOLD_STRICT]) / self.settings._POSE_PITCH_THRESHOLD
|
||||
self.pose.cfactor_yaw = interp(bp_normal, [0, 0.5],
|
||||
[self.settings._POSE_YAW_THRESHOLD_SLACK,
|
||||
self.settings._POSE_YAW_THRESHOLD_STRICT]) / self.settings._POSE_YAW_THRESHOLD
|
||||
|
||||
def update_states(self, driver_state, cal_rpy, car_speed, op_engaged):
|
||||
rhd_pred = driver_state.wheelOnRightProb
|
||||
# calibrates only when there's movement and either face detected
|
||||
if car_speed > self.settings._WHEELPOS_CALIB_MIN_SPEED and (driver_state.leftDriverData.faceProb > self.settings._FACE_THRESHOLD or
|
||||
driver_state.rightDriverData.faceProb > self.settings._FACE_THRESHOLD):
|
||||
self.wheelpos_learner.push_and_update(rhd_pred)
|
||||
if self.wheelpos_learner.filtered_stat.n > self.settings._WHEELPOS_FILTER_MIN_COUNT:
|
||||
self.wheel_on_right = self.wheelpos_learner.filtered_stat.M > self.settings._WHEELPOS_THRESHOLD
|
||||
else:
|
||||
self.wheel_on_right = self.wheel_on_right_default # use default/saved if calibration is unfinished
|
||||
# make sure no switching when engaged
|
||||
if op_engaged and self.wheel_on_right_last is not None and self.wheel_on_right_last != self.wheel_on_right:
|
||||
self.wheel_on_right = self.wheel_on_right_last
|
||||
driver_data = driver_state.rightDriverData if self.wheel_on_right else driver_state.leftDriverData
|
||||
if not all(len(x) > 0 for x in (driver_data.faceOrientation, driver_data.facePosition,
|
||||
driver_data.faceOrientationStd, driver_data.facePositionStd,
|
||||
driver_data.readyProb, driver_data.notReadyProb)):
|
||||
return
|
||||
|
||||
self.face_detected = driver_data.faceProb > self.settings._FACE_THRESHOLD
|
||||
self.pose.roll, self.pose.pitch, self.pose.yaw = face_orientation_from_net(driver_data.faceOrientation, driver_data.facePosition, cal_rpy)
|
||||
if self.wheel_on_right:
|
||||
self.pose.yaw *= -1
|
||||
self.wheel_on_right_last = self.wheel_on_right
|
||||
self.pose.pitch_std = driver_data.faceOrientationStd[0]
|
||||
self.pose.yaw_std = driver_data.faceOrientationStd[1]
|
||||
model_std_max = max(self.pose.pitch_std, self.pose.yaw_std)
|
||||
self.pose.low_std = model_std_max < self.settings._POSESTD_THRESHOLD
|
||||
self.blink.left_blink = driver_data.leftBlinkProb * (driver_data.leftEyeProb > self.settings._EYE_THRESHOLD) \
|
||||
* (driver_data.sunglassesProb < self.settings._SG_THRESHOLD)
|
||||
self.blink.right_blink = driver_data.rightBlinkProb * (driver_data.rightEyeProb > self.settings._EYE_THRESHOLD) \
|
||||
* (driver_data.sunglassesProb < self.settings._SG_THRESHOLD)
|
||||
self.eev1 = driver_data.notReadyProb[0]
|
||||
self.eev2 = driver_data.readyProb[0]
|
||||
|
||||
self.distracted_types = self._get_distracted_types()
|
||||
self.driver_distracted = (DistractedType.DISTRACTED_E2E in self.distracted_types or DistractedType.DISTRACTED_POSE in self.distracted_types
|
||||
or DistractedType.DISTRACTED_BLINK in self.distracted_types) \
|
||||
and driver_data.faceProb > self.settings._FACE_THRESHOLD and self.pose.low_std
|
||||
self.driver_distraction_filter.update(self.driver_distracted)
|
||||
|
||||
# update offseter
|
||||
# only update when driver is actively driving the car above a certain speed
|
||||
if self.face_detected and car_speed > self.settings._POSE_CALIB_MIN_SPEED and self.pose.low_std and (not op_engaged or not self.driver_distracted):
|
||||
self.pose.pitch_offseter.push_and_update(self.pose.pitch)
|
||||
self.pose.yaw_offseter.push_and_update(self.pose.yaw)
|
||||
self.ee1_offseter.push_and_update(self.eev1)
|
||||
self.ee2_offseter.push_and_update(self.eev2)
|
||||
|
||||
self.pose_calibrated = self.pose.pitch_offseter.filtered_stat.n > self.settings._POSE_OFFSET_MIN_COUNT and \
|
||||
self.pose.yaw_offseter.filtered_stat.n > self.settings._POSE_OFFSET_MIN_COUNT
|
||||
self.ee1_calibrated = self.ee1_offseter.filtered_stat.n > self.settings._POSE_OFFSET_MIN_COUNT
|
||||
self.ee2_calibrated = self.ee2_offseter.filtered_stat.n > self.settings._POSE_OFFSET_MIN_COUNT
|
||||
|
||||
self.is_model_uncertain = self.hi_stds > self.settings._HI_STD_FALLBACK_TIME
|
||||
self._set_timers(self.face_detected and not self.is_model_uncertain)
|
||||
if self.face_detected and not self.pose.low_std and not self.driver_distracted:
|
||||
self.hi_stds += 1
|
||||
elif self.face_detected and self.pose.low_std:
|
||||
self.hi_stds = 0
|
||||
|
||||
def update_events(self, events, driver_engaged, ctrl_active, standstill):
|
||||
# self.awareness > 0
|
||||
if (driver_engaged and not self.active_monitoring_mode) or not ctrl_active: # reset only when on disengagement if red reached
|
||||
self._reset_awareness()
|
||||
return
|
||||
|
||||
driver_attentive = self.driver_distraction_filter.x < 0.37
|
||||
awareness_prev = self.awareness
|
||||
|
||||
# and self.awareness > 0
|
||||
if (driver_attentive and self.face_detected and self.pose.low_std ):
|
||||
if driver_engaged:
|
||||
self._reset_awareness()
|
||||
return
|
||||
# only restore awareness when paying attention and alert is not red
|
||||
self.awareness = min(self.awareness + ((self.settings._RECOVERY_FACTOR_MAX-self.settings._RECOVERY_FACTOR_MIN)*
|
||||
(1.-self.awareness)+self.settings._RECOVERY_FACTOR_MIN)*self.step_change, 1.)
|
||||
if self.awareness == 1.:
|
||||
self.awareness_passive = min(self.awareness_passive + self.step_change, 1.)
|
||||
# don't display alert banner when awareness is recovering and has cleared orange
|
||||
if self.awareness > self.threshold_prompt:
|
||||
return
|
||||
|
||||
standstill_exemption = standstill and self.awareness - self.step_change <= self.threshold_prompt
|
||||
certainly_distracted = self.driver_distraction_filter.x > 0.63 and self.driver_distracted and self.face_detected
|
||||
maybe_distracted = self.hi_stds > self.settings._HI_STD_FALLBACK_TIME or not self.face_detected
|
||||
if certainly_distracted or maybe_distracted:
|
||||
# should always be counting if distracted unless at standstill and reaching orange
|
||||
if not standstill_exemption:
|
||||
self.awareness = max(self.awareness - self.step_change, -0.1)
|
||||
|
||||
alert = None
|
||||
if self.awareness <= 0.:
|
||||
# terminal red alert: disengagement required
|
||||
alert = EventName.driverDistracted if self.active_monitoring_mode else EventName.driverUnresponsive
|
||||
# self.terminal_time += 1
|
||||
# if awareness_prev > 0.:
|
||||
# self.terminal_alert_cnt += 1
|
||||
# elif self.awareness <= self.threshold_prompt:
|
||||
if self.awareness <= self.threshold_prompt:
|
||||
# prompt orange alert
|
||||
alert = EventName.promptDriverDistracted if self.active_monitoring_mode else EventName.promptDriverUnresponsive
|
||||
elif self.awareness <= self.threshold_pre:
|
||||
# pre green alert
|
||||
alert = EventName.preDriverDistracted if self.active_monitoring_mode else EventName.preDriverUnresponsive
|
||||
|
||||
if alert is not None:
|
||||
events.add(alert)
|
||||
214
selfdrive/monitoring/test_monitoring.py
Executable file
214
selfdrive/monitoring/test_monitoring.py
Executable file
@@ -0,0 +1,214 @@
|
||||
#!/usr/bin/env python3
|
||||
import unittest
|
||||
import numpy as np
|
||||
|
||||
from cereal import car, log
|
||||
from openpilot.common.realtime import DT_DMON
|
||||
from openpilot.selfdrive.controls.lib.events import Events
|
||||
from openpilot.selfdrive.monitoring.driver_monitor import DriverStatus, DRIVER_MONITOR_SETTINGS
|
||||
|
||||
EventName = car.CarEvent.EventName
|
||||
dm_settings = DRIVER_MONITOR_SETTINGS()
|
||||
|
||||
TEST_TIMESPAN = 120 # seconds
|
||||
DISTRACTED_SECONDS_TO_ORANGE = dm_settings._DISTRACTED_TIME - dm_settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL + 1
|
||||
DISTRACTED_SECONDS_TO_RED = dm_settings._DISTRACTED_TIME + 1
|
||||
INVISIBLE_SECONDS_TO_ORANGE = dm_settings._AWARENESS_TIME - dm_settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL + 1
|
||||
INVISIBLE_SECONDS_TO_RED = dm_settings._AWARENESS_TIME + 1
|
||||
|
||||
def make_msg(face_detected, distracted=False, model_uncertain=False):
|
||||
ds = log.DriverStateV2.new_message()
|
||||
ds.leftDriverData.faceOrientation = [0., 0., 0.]
|
||||
ds.leftDriverData.facePosition = [0., 0.]
|
||||
ds.leftDriverData.faceProb = 1. * face_detected
|
||||
ds.leftDriverData.leftEyeProb = 1.
|
||||
ds.leftDriverData.rightEyeProb = 1.
|
||||
ds.leftDriverData.leftBlinkProb = 1. * distracted
|
||||
ds.leftDriverData.rightBlinkProb = 1. * distracted
|
||||
ds.leftDriverData.faceOrientationStd = [1.*model_uncertain, 1.*model_uncertain, 1.*model_uncertain]
|
||||
ds.leftDriverData.facePositionStd = [1.*model_uncertain, 1.*model_uncertain]
|
||||
# TODO: test both separately when e2e is used
|
||||
ds.leftDriverData.readyProb = [0., 0., 0., 0.]
|
||||
ds.leftDriverData.notReadyProb = [0., 0.]
|
||||
return ds
|
||||
|
||||
|
||||
# driver state from neural net, 10Hz
|
||||
msg_NO_FACE_DETECTED = make_msg(False)
|
||||
msg_ATTENTIVE = make_msg(True)
|
||||
msg_DISTRACTED = make_msg(True, distracted=True)
|
||||
msg_ATTENTIVE_UNCERTAIN = make_msg(True, model_uncertain=True)
|
||||
msg_DISTRACTED_UNCERTAIN = make_msg(True, distracted=True, model_uncertain=True)
|
||||
msg_DISTRACTED_BUT_SOMEHOW_UNCERTAIN = make_msg(True, distracted=True, model_uncertain=dm_settings._POSESTD_THRESHOLD*1.5)
|
||||
|
||||
# driver interaction with car
|
||||
car_interaction_DETECTED = True
|
||||
car_interaction_NOT_DETECTED = False
|
||||
|
||||
# some common state vectors
|
||||
always_no_face = [msg_NO_FACE_DETECTED] * int(TEST_TIMESPAN / DT_DMON)
|
||||
always_attentive = [msg_ATTENTIVE] * int(TEST_TIMESPAN / DT_DMON)
|
||||
always_distracted = [msg_DISTRACTED] * int(TEST_TIMESPAN / DT_DMON)
|
||||
always_true = [True] * int(TEST_TIMESPAN / DT_DMON)
|
||||
always_false = [False] * int(TEST_TIMESPAN / DT_DMON)
|
||||
|
||||
# TODO: this only tests DriverStatus
|
||||
class TestMonitoring(unittest.TestCase):
|
||||
def _run_seq(self, msgs, interaction, engaged, standstill):
|
||||
DS = DriverStatus()
|
||||
events = []
|
||||
for idx in range(len(msgs)):
|
||||
e = Events()
|
||||
DS.update_states(msgs[idx], [0, 0, 0], 0, engaged[idx])
|
||||
# cal_rpy and car_speed don't matter here
|
||||
|
||||
# evaluate events at 10Hz for tests
|
||||
DS.update_events(e, interaction[idx], engaged[idx], standstill[idx])
|
||||
events.append(e)
|
||||
assert len(events) == len(msgs), f"got {len(events)} for {len(msgs)} driverState input msgs"
|
||||
return events, DS
|
||||
|
||||
def _assert_no_events(self, events):
|
||||
self.assertTrue(all(not len(e) for e in events))
|
||||
|
||||
# engaged, driver is attentive all the time
|
||||
def test_fully_aware_driver(self):
|
||||
events, _ = self._run_seq(always_attentive, always_false, always_true, always_false)
|
||||
self._assert_no_events(events)
|
||||
|
||||
# engaged, driver is distracted and does nothing
|
||||
def test_fully_distracted_driver(self):
|
||||
events, d_status = self._run_seq(always_distracted, always_false, always_true, always_false)
|
||||
self.assertEqual(len(events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL)/2/DT_DMON)]), 0)
|
||||
self.assertEqual(events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL +
|
||||
((d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL-d_status.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0],
|
||||
EventName.preDriverDistracted)
|
||||
self.assertEqual(events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL +
|
||||
((d_status.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0], EventName.promptDriverDistracted)
|
||||
self.assertEqual(events[int((d_status.settings._DISTRACTED_TIME +
|
||||
((TEST_TIMESPAN-10-d_status.settings._DISTRACTED_TIME)/2))/DT_DMON)].names[0], EventName.driverDistracted)
|
||||
self.assertIs(type(d_status.awareness), float)
|
||||
|
||||
# engaged, no face detected the whole time, no action
|
||||
def test_fully_invisible_driver(self):
|
||||
events, d_status = self._run_seq(always_no_face, always_false, always_true, always_false)
|
||||
self.assertTrue(len(events[int((d_status.settings._AWARENESS_TIME-d_status.settings._AWARENESS_PRE_TIME_TILL_TERMINAL)/2/DT_DMON)]) == 0)
|
||||
self.assertEqual(events[int((d_status.settings._AWARENESS_TIME-d_status.settings._AWARENESS_PRE_TIME_TILL_TERMINAL +
|
||||
((d_status.settings._AWARENESS_PRE_TIME_TILL_TERMINAL-d_status.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0],
|
||||
EventName.preDriverUnresponsive)
|
||||
self.assertEqual(events[int((d_status.settings._AWARENESS_TIME-d_status.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL +
|
||||
((d_status.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0], EventName.promptDriverUnresponsive)
|
||||
self.assertEqual(events[int((d_status.settings._AWARENESS_TIME +
|
||||
((TEST_TIMESPAN-10-d_status.settings._AWARENESS_TIME)/2))/DT_DMON)].names[0], EventName.driverUnresponsive)
|
||||
|
||||
# engaged, down to orange, driver pays attention, back to normal; then down to orange, driver touches wheel
|
||||
# - should have short orange recovery time and no green afterwards; wheel touch only recovers when paying attention
|
||||
def test_normal_driver(self):
|
||||
ds_vector = [msg_DISTRACTED] * int(DISTRACTED_SECONDS_TO_ORANGE/DT_DMON) + \
|
||||
[msg_ATTENTIVE] * int(DISTRACTED_SECONDS_TO_ORANGE/DT_DMON) + \
|
||||
[msg_DISTRACTED] * int((DISTRACTED_SECONDS_TO_ORANGE+2)/DT_DMON) + \
|
||||
[msg_ATTENTIVE] * (int(TEST_TIMESPAN/DT_DMON)-int((DISTRACTED_SECONDS_TO_ORANGE*3+2)/DT_DMON))
|
||||
interaction_vector = [car_interaction_NOT_DETECTED] * int(DISTRACTED_SECONDS_TO_ORANGE*3/DT_DMON) + \
|
||||
[car_interaction_DETECTED] * (int(TEST_TIMESPAN/DT_DMON)-int(DISTRACTED_SECONDS_TO_ORANGE*3/DT_DMON))
|
||||
events, _ = self._run_seq(ds_vector, interaction_vector, always_true, always_false)
|
||||
self.assertEqual(len(events[int(DISTRACTED_SECONDS_TO_ORANGE*0.5/DT_DMON)]), 0)
|
||||
self.assertEqual(events[int((DISTRACTED_SECONDS_TO_ORANGE-0.1)/DT_DMON)].names[0], EventName.promptDriverDistracted)
|
||||
self.assertEqual(len(events[int(DISTRACTED_SECONDS_TO_ORANGE*1.5/DT_DMON)]), 0)
|
||||
self.assertEqual(events[int((DISTRACTED_SECONDS_TO_ORANGE*3-0.1)/DT_DMON)].names[0], EventName.promptDriverDistracted)
|
||||
self.assertEqual(events[int((DISTRACTED_SECONDS_TO_ORANGE*3+0.1)/DT_DMON)].names[0], EventName.promptDriverDistracted)
|
||||
self.assertEqual(len(events[int((DISTRACTED_SECONDS_TO_ORANGE*3+2.5)/DT_DMON)]), 0)
|
||||
|
||||
# engaged, down to orange, driver dodges camera, then comes back still distracted, down to red, \
|
||||
# driver dodges, and then touches wheel to no avail, disengages and reengages
|
||||
# - orange/red alert should remain after disappearance, and only disengaging clears red
|
||||
def test_biggest_comma_fan(self):
|
||||
_invisible_time = 2 # seconds
|
||||
ds_vector = always_distracted[:]
|
||||
interaction_vector = always_false[:]
|
||||
op_vector = always_true[:]
|
||||
ds_vector[int(DISTRACTED_SECONDS_TO_ORANGE/DT_DMON):int((DISTRACTED_SECONDS_TO_ORANGE+_invisible_time)/DT_DMON)] \
|
||||
= [msg_NO_FACE_DETECTED] * int(_invisible_time/DT_DMON)
|
||||
ds_vector[int((DISTRACTED_SECONDS_TO_RED+_invisible_time)/DT_DMON):int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time)/DT_DMON)] \
|
||||
= [msg_NO_FACE_DETECTED] * int(_invisible_time/DT_DMON)
|
||||
interaction_vector[int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+0.5)/DT_DMON):int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+1.5)/DT_DMON)] \
|
||||
= [True] * int(1/DT_DMON)
|
||||
op_vector[int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+2.5)/DT_DMON):int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+3)/DT_DMON)] \
|
||||
= [False] * int(0.5/DT_DMON)
|
||||
events, _ = self._run_seq(ds_vector, interaction_vector, op_vector, always_false)
|
||||
self.assertEqual(events[int((DISTRACTED_SECONDS_TO_ORANGE+0.5*_invisible_time)/DT_DMON)].names[0], EventName.promptDriverDistracted)
|
||||
self.assertEqual(events[int((DISTRACTED_SECONDS_TO_RED+1.5*_invisible_time)/DT_DMON)].names[0], EventName.driverDistracted)
|
||||
self.assertEqual(events[int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+1.5)/DT_DMON)].names[0], EventName.driverDistracted)
|
||||
self.assertTrue(len(events[int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+3.5)/DT_DMON)]) == 0)
|
||||
|
||||
# engaged, invisible driver, down to orange, driver touches wheel; then down to orange again, driver appears
|
||||
# - both actions should clear the alert, but momentary appearance should not
|
||||
def test_sometimes_transparent_commuter(self):
|
||||
_visible_time = np.random.choice([0.5, 10])
|
||||
ds_vector = always_no_face[:]*2
|
||||
interaction_vector = always_false[:]*2
|
||||
ds_vector[int((2*INVISIBLE_SECONDS_TO_ORANGE+1)/DT_DMON):int((2*INVISIBLE_SECONDS_TO_ORANGE+1+_visible_time)/DT_DMON)] = \
|
||||
[msg_ATTENTIVE] * int(_visible_time/DT_DMON)
|
||||
interaction_vector[int((INVISIBLE_SECONDS_TO_ORANGE)/DT_DMON):int((INVISIBLE_SECONDS_TO_ORANGE+1)/DT_DMON)] = [True] * int(1/DT_DMON)
|
||||
events, _ = self._run_seq(ds_vector, interaction_vector, 2*always_true, 2*always_false)
|
||||
self.assertTrue(len(events[int(INVISIBLE_SECONDS_TO_ORANGE*0.5/DT_DMON)]) == 0)
|
||||
self.assertEqual(events[int((INVISIBLE_SECONDS_TO_ORANGE-0.1)/DT_DMON)].names[0], EventName.promptDriverUnresponsive)
|
||||
self.assertTrue(len(events[int((INVISIBLE_SECONDS_TO_ORANGE+0.1)/DT_DMON)]) == 0)
|
||||
if _visible_time == 0.5:
|
||||
self.assertEqual(events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1-0.1)/DT_DMON)].names[0], EventName.promptDriverUnresponsive)
|
||||
self.assertEqual(events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1+0.1+_visible_time)/DT_DMON)].names[0], EventName.preDriverUnresponsive)
|
||||
elif _visible_time == 10:
|
||||
self.assertEqual(events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1-0.1)/DT_DMON)].names[0], EventName.promptDriverUnresponsive)
|
||||
self.assertTrue(len(events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1+0.1+_visible_time)/DT_DMON)]) == 0)
|
||||
|
||||
# engaged, invisible driver, down to red, driver appears and then touches wheel, then disengages/reengages
|
||||
# - only disengage will clear the alert
|
||||
def test_last_second_responder(self):
|
||||
_visible_time = 2 # seconds
|
||||
ds_vector = always_no_face[:]
|
||||
interaction_vector = always_false[:]
|
||||
op_vector = always_true[:]
|
||||
ds_vector[int(INVISIBLE_SECONDS_TO_RED/DT_DMON):int((INVISIBLE_SECONDS_TO_RED+_visible_time)/DT_DMON)] = [msg_ATTENTIVE] * int(_visible_time/DT_DMON)
|
||||
interaction_vector[int((INVISIBLE_SECONDS_TO_RED+_visible_time)/DT_DMON):int((INVISIBLE_SECONDS_TO_RED+_visible_time+1)/DT_DMON)] = [True] * int(1/DT_DMON)
|
||||
op_vector[int((INVISIBLE_SECONDS_TO_RED+_visible_time+1)/DT_DMON):int((INVISIBLE_SECONDS_TO_RED+_visible_time+0.5)/DT_DMON)] = [False] * int(0.5/DT_DMON)
|
||||
events, _ = self._run_seq(ds_vector, interaction_vector, op_vector, always_false)
|
||||
self.assertTrue(len(events[int(INVISIBLE_SECONDS_TO_ORANGE*0.5/DT_DMON)]) == 0)
|
||||
self.assertEqual(events[int((INVISIBLE_SECONDS_TO_ORANGE-0.1)/DT_DMON)].names[0], EventName.promptDriverUnresponsive)
|
||||
self.assertEqual(events[int((INVISIBLE_SECONDS_TO_RED-0.1)/DT_DMON)].names[0], EventName.driverUnresponsive)
|
||||
self.assertEqual(events[int((INVISIBLE_SECONDS_TO_RED+0.5*_visible_time)/DT_DMON)].names[0], EventName.driverUnresponsive)
|
||||
self.assertEqual(events[int((INVISIBLE_SECONDS_TO_RED+_visible_time+0.5)/DT_DMON)].names[0], EventName.driverUnresponsive)
|
||||
self.assertTrue(len(events[int((INVISIBLE_SECONDS_TO_RED+_visible_time+1+0.1)/DT_DMON)]) == 0)
|
||||
|
||||
# disengaged, always distracted driver
|
||||
# - dm should stay quiet when not engaged
|
||||
def test_pure_dashcam_user(self):
|
||||
events, _ = self._run_seq(always_distracted, always_false, always_false, always_false)
|
||||
self.assertTrue(sum(len(event) for event in events) == 0)
|
||||
|
||||
# engaged, car stops at traffic light, down to orange, no action, then car starts moving
|
||||
# - should only reach green when stopped, but continues counting down on launch
|
||||
def test_long_traffic_light_victim(self):
|
||||
_redlight_time = 60 # seconds
|
||||
standstill_vector = always_true[:]
|
||||
standstill_vector[int(_redlight_time/DT_DMON):] = [False] * int((TEST_TIMESPAN-_redlight_time)/DT_DMON)
|
||||
events, d_status = self._run_seq(always_distracted, always_false, always_true, standstill_vector)
|
||||
self.assertEqual(events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL+1)/DT_DMON)].names[0],
|
||||
EventName.preDriverDistracted)
|
||||
self.assertEqual(events[int((_redlight_time-0.1)/DT_DMON)].names[0], EventName.preDriverDistracted)
|
||||
self.assertEqual(events[int((_redlight_time+0.5)/DT_DMON)].names[0], EventName.promptDriverDistracted)
|
||||
|
||||
# engaged, model is somehow uncertain and driver is distracted
|
||||
# - should fall back to wheel touch after uncertain alert
|
||||
def test_somehow_indecisive_model(self):
|
||||
ds_vector = [msg_DISTRACTED_BUT_SOMEHOW_UNCERTAIN] * int(TEST_TIMESPAN/DT_DMON)
|
||||
interaction_vector = always_false[:]
|
||||
events, d_status = self._run_seq(ds_vector, interaction_vector, always_true, always_false)
|
||||
self.assertTrue(EventName.preDriverUnresponsive in
|
||||
events[int((INVISIBLE_SECONDS_TO_ORANGE-1+DT_DMON*d_status.settings._HI_STD_FALLBACK_TIME-0.1)/DT_DMON)].names)
|
||||
self.assertTrue(EventName.promptDriverUnresponsive in
|
||||
events[int((INVISIBLE_SECONDS_TO_ORANGE-1+DT_DMON*d_status.settings._HI_STD_FALLBACK_TIME+0.1)/DT_DMON)].names)
|
||||
self.assertTrue(EventName.driverUnresponsive in
|
||||
events[int((INVISIBLE_SECONDS_TO_RED-1+DT_DMON*d_status.settings._HI_STD_FALLBACK_TIME+0.1)/DT_DMON)].names)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user