Update1206, pp model, anglecontrol, fix DM (#234)

This commit is contained in:
carrot
2025-12-06 15:07:46 +09:00
committed by GitHub
parent 8776533f03
commit 81f8f4d434
27 changed files with 230 additions and 160 deletions

View File

@@ -1,3 +1,9 @@
Carrot2-v9 (2025-12-06)
========================
* PP(planplus) model
* fixDM
* fix angle steering torque(HKG car)
Carrot2-v9 (2025-12-03)
========================
* ST model

View File

@@ -3,15 +3,13 @@ import subprocess
import sys
import sysconfig
import platform
import shlex
import numpy as np
import SCons.Errors
SCons.Warnings.warningAsException(True)
# pending upstream fix - https://github.com/SCons/scons/issues/4461
#SetOption('warn', 'all')
TICI = os.path.isfile('/TICI')
AGNOS = TICI

View File

@@ -2208,13 +2208,10 @@ struct Joystick {
struct DriverStateV2 {
frameId @0 :UInt32;
modelExecutionTime @1 :Float32;
dspExecutionTimeDEPRECATED @2 :Float32;
gpuExecutionTime @8 :Float32;
rawPredictions @3 :Data;
poorVisionProb @4 :Float32;
wheelOnRightProb @5 :Float32;
leftDriverData @6 :DriverData;
rightDriverData @7 :DriverData;
@@ -2229,10 +2226,14 @@ struct DriverStateV2 {
leftBlinkProb @7 :Float32;
rightBlinkProb @8 :Float32;
sunglassesProb @9 :Float32;
occludedProb @10 :Float32;
readyProb @11 :List(Float32);
notReadyProb @12 :List(Float32);
phoneProb @13 :Float32;
notReadyProbDEPRECATED @12 :List(Float32);
occludedProbDEPRECATED @10 :Float32;
readyProbDEPRECATED @11 :List(Float32);
}
dspExecutionTimeDEPRECATED @2 :Float32;
poorVisionProbDEPRECATED @4 :Float32;
}
struct DriverStateDEPRECATED @0xb83c6cc593ed0a00 {
@@ -2284,6 +2285,9 @@ struct DriverMonitoringState @0xb83cda094a1da284 {
hiStdCount @14 :UInt32;
isActiveMode @16 :Bool;
isRHD @4 :Bool;
uncertainCount @19 :UInt32;
phoneProbOffset @20 :Float32;
phoneProbValidCount @21 :UInt32;
isPreviewDEPRECATED @15 :Bool;
rhdCheckedDEPRECATED @5 :Bool;

View File

@@ -34,6 +34,7 @@ inline static std::unordered_map<std::string, uint32_t> keys = {
{"DoReboot", CLEAR_ON_MANAGER_START},
{"DoShutdown", CLEAR_ON_MANAGER_START},
{"DoUninstall", CLEAR_ON_MANAGER_START},
{"DriverTooDistracted", CLEAR_ON_MANAGER_START | CLEAR_ON_ONROAD_TRANSITION},
{"AlphaLongitudinalEnabled", PERSISTENT | DEVELOPMENT_ONLY},
{"ExperimentalMode", PERSISTENT},
{"ExperimentalModeConfirmed", PERSISTENT},

View File

@@ -414,7 +414,6 @@ struct CarControl {
jerk @9: Float32; # m/s^3
aTarget @10: Float32; # m/s^2
yStd @11: Float32;
enum LongControlState @0xe40f3a917d908282{
off @0;

View File

@@ -87,7 +87,7 @@ class CarController(CarControllerBase):
self.apply_angle_last = 0
self.lkas_max_torque = 0
self.angle_max_torque = 240
self.angle_max_torque = 250
self.canfd_debug = 0
self.MainMode_ACC_trigger = 0
@@ -178,24 +178,7 @@ class CarController(CarControllerBase):
self.apply_angle_last = actuators.steeringAngleDeg
self.lkas_max_torque = self.lkas_max_torque = max(self.lkas_max_torque - 20, 25)
else:
if hud_control.modelDesire in [1,2]:
base_max_torque = self.angle_max_torque
else:
curv = abs(actuators.curvature)
y_std = actuators.yStd
#curvature_threshold = np.interp(y_std, [0.0, 0.2], [0.5, 0.006])
curvature_threshold = np.interp(y_std, [0.0, 0.1], [0.5, 0.006])
curve_scale = np.clip(curv / curvature_threshold, 0.0, 1.0)
torque_pts = [
(1 - curve_scale) * self.angle_max_torque + curve_scale * 25,
(1 - curve_scale) * self.angle_max_torque + curve_scale * 50,
self.angle_max_torque
]
#base_max_torque = np.interp(CS.out.vEgo * CV.MS_TO_KPH, [0, 30, 60], torque_pts)
base_max_torque = np.interp(CS.out.vEgo * CV.MS_TO_KPH, [0, 20, 30], torque_pts)
target_torque = np.interp(abs(actuators.curvature), [0.0, 0.003, 0.006], [0.5 * base_max_torque, 0.75 * base_max_torque, base_max_torque])
target_torque = self.angle_max_torque
max_steering_tq = self.params.STEER_DRIVER_ALLOWANCE * 0.7
rate_ratio = max(20, max_steering_tq - abs(CS.out.steeringTorque)) / max_steering_tq

View File

@@ -452,7 +452,14 @@ class CarState(CarStateBase):
ret.brakeLights = ret.brakePressed or cp.vl["TCS"]["BrakeLight"] == 1
ret.steeringRateDeg = cp.vl["STEERING_SENSORS"]["STEERING_RATE"]
# steering angle deg값이 이상함. mdps값이 더 신뢰가 가는듯.. torque steering 차량도 확인해야함.
#ret.steeringAngleDeg = cp.vl["STEERING_SENSORS"]["STEERING_ANGLE"] * -1
if self.CP.flags & HyundaiFlags.ANGLE_CONTROL:
ret.steeringAngleDeg = cp.vl["MDPS"]["STEERING_ANGLE_2"] * -1
else:
ret.steeringAngleDeg = cp.vl["STEERING_SENSORS"]["STEERING_ANGLE"] * -1
ret.steeringTorque = cp.vl["MDPS"]["STEERING_COL_TORQUE"]
ret.steeringTorqueEps = cp.vl["MDPS"]["STEERING_OUT_TORQUE"]
ret.steeringPressed = self.update_steering_pressed(abs(ret.steeringTorque) > self.params.STEER_THRESHOLD, 5)

View File

@@ -59,8 +59,6 @@ class Controls:
self.pose_calibrator = PoseCalibrator()
self.calibrated_pose: Pose | None = None
self.yStd = 0.0
self.side_state = {
"left": {"main": {"dRel": None, "lat": None}, "sub": {"dRel": None, "lat": None}},
"right": {"main": {"dRel": None, "lat": None}, "sub": {"dRel": None, "lat": None}},
@@ -153,12 +151,6 @@ class Controls:
if steer_actuator_delay == 0.0:
steer_actuator_delay = self.sm['liveDelay'].lateralDelay
if len(model_v2.position.yStd) > 0:
yStd = np.interp(steer_actuator_delay + lat_smooth_seconds, ModelConstants.T_IDXS, model_v2.position.yStd)
self.yStd = yStd * 0.02 + self.yStd * 0.98
else:
self.yStd = 0.0
if not CC.latActive:
new_desired_curvature = self.curvature
elif self.lanefull_mode_enabled:
@@ -184,7 +176,6 @@ class Controls:
model_data=self.sm['modelV2'])
actuators.torque = float(steer)
actuators.steeringAngleDeg = float(steeringAngleDeg)
actuators.yStd = float(self.yStd)
# Ensure no NaNs/Infs
for p in ACTUATOR_FIELDS:
attr = getattr(actuators, p)

View File

@@ -11,11 +11,6 @@ class LatControlAngle(LatControl):
def __init__(self, CP, CI):
super().__init__(CP, CI)
self.sat_check_min_speed = 5.
self.angle_steers_des = 0.0
#print(CP.carFingerprint, "using LatControlAngle")
#self.factor = 0.5 if CP.carFingerprint in ["HYUNDAI_IONIQ_5_PE"] else 1.0
#self.factor = 0.5
#print("Angle factor", self.factor)
def update(self, active, CS, VM, params, steer_limited_by_controls, desired_curvature, CC, curvature_limited, model_data=None):
angle_log = log.ControlsState.LateralAngleState.new_message()
@@ -26,7 +21,7 @@ class LatControlAngle(LatControl):
else:
angle_log.active = True
angle_steers_des = math.degrees(VM.get_steer_from_curvature(-desired_curvature, CS.vEgo, params.roll))
angle_steers_des += params.angleOffsetDeg #* self.factor
angle_steers_des += params.angleOffsetDeg
angle_control_saturated = abs(angle_steers_des - CS.steeringAngleDeg) > STEER_ANGLE_SATURATION_THRESHOLD
angle_log.saturated = bool(self._check_saturation(angle_control_saturated, CS, False, curvature_limited))

View File

@@ -247,9 +247,7 @@ def retrieve_initial_vehicle_params(params: Params, CP: car.CarParams, replay: b
if debug and len(initial_filter_std) != 0:
p_initial = np.diag(initial_filter_std)
#steer_ratio, stiffness_factor, angle_offset_deg = lp.steerRatio, lp.stiffnessFactor, lp.angleOffsetAverageDeg
#steer_ratio, stiffness_factor, angle_offset_deg = lp.steerRatio, lp.stiffnessFactor, lp.angleOffsetDeg
steer_ratio, stiffness_factor = lp.steerRatio, lp.stiffnessFactor
steer_ratio, stiffness_factor, angle_offset_deg = lp.steerRatio, lp.stiffnessFactor, lp.angleOffsetAverageDeg
retrieve_success = True
except Exception as e:
cloudlog.error(f"Failed to retrieve initial values: {e}")
@@ -300,7 +298,7 @@ def main():
bearing = gps.bearingDeg
lat = gps.latitude
lon = gps.longitude
params_memory.put("LastGPSPosition", json.dumps({"latitude": lat, "longitude": lon, "bearing": bearing}))
params_memory.put_nonblocking("LastGPSPosition", json.dumps({"latitude": lat, "longitude": lon, "bearing": bearing}))
if sm.updated['livePose']:

View File

@@ -45,11 +45,11 @@ POLICY_METADATA_PATH = Path(__file__).parent / 'models/driving_policy_metadata.p
LAT_SMOOTH_SECONDS = 0.13
LONG_SMOOTH_SECONDS = 0.3
MIN_LAT_CONTROL_SPEED = 0.3
RECOVERY_POWER = 1.0 # The higher this number the more aggressively the model will recover to lanecenter, too high and it will ping-pong
def get_action_from_model(model_output: dict[str, np.ndarray], prev_action: log.ModelDataV2.Action,
lat_action_t: float, long_action_t: float, v_ego: float, lat_smooth_seconds: float, vEgoStopping: float) -> log.ModelDataV2.Action:
plan = model_output['plan'][0]
plan = model_output['plan'][0] + RECOVERY_POWER*model_output['planplus'][0]
desired_accel, should_stop, _, desired_velocity_now = get_accel_from_plan(plan[:,Plan.VELOCITY][:,0],
plan[:,Plan.ACCELERATION][:,0],
ModelConstants.T_IDXS,

View File

@@ -113,6 +113,7 @@ class Parser:
plan_mhp = self.is_mhp(outs, 'plan', ModelConstants.IDX_N * ModelConstants.PLAN_WIDTH)
plan_in_N, plan_out_N = (ModelConstants.PLAN_MHP_N, ModelConstants.PLAN_MHP_SELECTION) if plan_mhp else (0, 0)
self.parse_mdn('plan', outs, in_N=plan_in_N, out_N=plan_out_N, out_shape=(ModelConstants.IDX_N, ModelConstants.PLAN_WIDTH))
self.parse_mdn('planplus', outs, in_N=plan_in_N, out_N=plan_out_N, out_shape=(ModelConstants.IDX_N, ModelConstants.PLAN_WIDTH))
self.parse_categorical_crossentropy('desire_state', outs, out_shape=(ModelConstants.DESIRE_PRED_WIDTH,))
return outs

View File

@@ -13,6 +13,7 @@ def dmonitoringd_thread():
sm = messaging.SubMaster(['driverStateV2', 'liveCalibration', 'carState', 'selfdriveState', 'modelV2'], poll='driverStateV2')
DM = DriverMonitoring(rhd_saved=params.get_bool("IsRhdDetected"), always_on=params.get_bool("AlwaysOnDM"))
demo_mode=False
# 20Hz <- dmonitoringmodeld
while True:
@@ -22,8 +23,10 @@ def dmonitoringd_thread():
continue
valid = sm.all_checks()
if valid:
DM.run_step(sm)
if demo_mode and sm.valid['driverStateV2']:
DM.run_step(sm, demo=demo_mode)
elif valid:
DM.run_step(sm, demo=demo_mode)
# publish
dat = DM.get_state_packet(valid=valid)
@@ -32,11 +35,12 @@ def dmonitoringd_thread():
# load live always-on toggle
if sm['driverStateV2'].frameId % 40 == 1:
DM.always_on = params.get_bool("AlwaysOnDM")
demo_mode = params.get_bool("IsDriverViewEnabled")
# save rhd virtual toggle every 5 mins
if (sm['driverStateV2'].frameId % 6000 == 0 and
DM.wheelpos_learner.filtered_stat.n > DM.settings._WHEELPOS_FILTER_MIN_COUNT and
DM.wheel_on_right == (DM.wheelpos_learner.filtered_stat.M > DM.settings._WHEELPOS_THRESHOLD)):
if (sm['driverStateV2'].frameId % 6000 == 0 and not demo_mode and
DM.wheelpos.prob_offseter.filtered_stat.n > DM.settings._WHEELPOS_FILTER_MIN_COUNT and
DM.wheel_on_right == (DM.wheelpos.prob_offseter.filtered_stat.M > DM.settings._WHEELPOS_THRESHOLD)):
params.put_bool_nonblocking("IsRhdDetected", DM.wheel_on_right)
def main():

View File

@@ -4,10 +4,13 @@ import numpy as np
from cereal import car, log
import cereal.messaging as messaging
from openpilot.selfdrive.selfdrived.events import Events
from openpilot.selfdrive.selfdrived.alertmanager import set_offroad_alert
from openpilot.common.realtime import DT_DMON
from openpilot.common.filter_simple import FirstOrderFilter
from openpilot.common.params import Params
from openpilot.common.stat_live import RunningStatFilter
from openpilot.common.transformations.camera import DEVICE_CAMERAS
from openpilot.system.hardware import HARDWARE
EventName = log.OnroadEvent.EventName
@@ -18,7 +21,7 @@ EventName = log.OnroadEvent.EventName
# ******************************************************************************************
class DRIVER_MONITOR_SETTINGS:
def __init__(self):
def __init__(self, device_type):
self._DT_DMON = DT_DMON
# ref (page15-16): https://eur-lex.europa.eu/legal-content/EN/TXT/PDF/?uri=CELEX:42018X1947&rid=2
self._AWARENESS_TIME = 30. # passive wheeltouch total timeout
@@ -33,12 +36,10 @@ class DRIVER_MONITOR_SETTINGS:
self._SG_THRESHOLD = 0.9
self._BLINK_THRESHOLD = 0.865
self._EE_THRESH11 = 0.4
self._EE_THRESH12 = 15.0
self._EE_MAX_OFFSET1 = 0.06
self._EE_MIN_OFFSET1 = 0.025
self._EE_THRESH21 = 0.01
self._EE_THRESH22 = 0.35
self._PHONE_THRESH = 0.75 if device_type == 'mici' else 0.4
self._PHONE_THRESH2 = 15.0
self._PHONE_MAX_OFFSET = 0.06
self._PHONE_MIN_OFFSET = 0.025
self._POSE_PITCH_THRESHOLD = 0.3133
self._POSE_PITCH_THRESHOLD_SLACK = 0.3237
@@ -54,6 +55,9 @@ class DRIVER_MONITOR_SETTINGS:
self._YAW_MAX_OFFSET = 0.289
self._YAW_MIN_OFFSET = -0.0246
self._DCAM_UNCERTAIN_ALERT_THRESHOLD = 0.1
self._DCAM_UNCERTAIN_ALERT_COUNT = int(60 / self._DT_DMON)
self._DCAM_UNCERTAIN_RESET_COUNT = int(20 / self._DT_DMON)
self._POSESTD_THRESHOLD = 0.3
self._HI_STD_FALLBACK_TIME = int(10 / self._DT_DMON) # fall back to wheel touch if model is uncertain for 10s
self._DISTRACTED_FILTER_TS = 0.25 # 0.6Hz
@@ -77,7 +81,7 @@ class DistractedType:
NOT_DISTRACTED = 0
DISTRACTED_POSE = 1 << 0
DISTRACTED_BLINK = 1 << 1
DISTRACTED_E2E = 1 << 2
DISTRACTED_PHONE = 1 << 2
class DriverPose:
def __init__(self, max_trackable):
@@ -94,6 +98,12 @@ class DriverPose:
self.cfactor_pitch = 1.
self.cfactor_yaw = 1.
class DriverProb:
def __init__(self, max_trackable):
self.prob = 0.
self.prob_offseter = RunningStatFilter(max_trackable=max_trackable)
self.prob_calibrated = False
class DriverBlink:
def __init__(self):
self.left = 0.
@@ -126,21 +136,14 @@ def face_orientation_from_net(angles_desc, pos_desc, rpy_calib):
class DriverMonitoring:
def __init__(self, rhd_saved=False, settings=None, always_on=False):
if settings is None:
settings = DRIVER_MONITOR_SETTINGS()
# init policy settings
self.settings = settings
self.settings = settings if settings is not None else DRIVER_MONITOR_SETTINGS(device_type=HARDWARE.get_device_type())
# init driver status
self.wheelpos_learner = RunningStatFilter()
self.wheelpos = DriverProb(-1)
self.pose = DriverPose(self.settings._POSE_OFFSET_MAX_COUNT)
self.phone = DriverProb(self.settings._POSE_OFFSET_MAX_COUNT)
self.blink = DriverBlink()
self.eev1 = 0.
self.eev2 = 1.
self.ee1_offseter = RunningStatFilter(max_trackable=self.settings._POSE_OFFSET_MAX_COUNT)
self.ee2_offseter = RunningStatFilter(max_trackable=self.settings._POSE_OFFSET_MAX_COUNT)
self.ee1_calibrated = False
self.ee2_calibrated = False
self.always_on = always_on
self.distracted_types = []
@@ -158,6 +161,12 @@ class DriverMonitoring:
self.hi_stds = 0
self.threshold_pre = self.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self.threshold_prompt = self.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL / self.settings._DISTRACTED_TIME
self.dcam_uncertain_cnt = 0
self.dcam_uncertain_alerted = False # once per drive
self.dcam_reset_cnt = 0
self.params = Params()
self.too_distracted = self.params.get_bool("DriverTooDistracted")
self._reset_awareness()
self._set_timers(active_monitoring=True)
@@ -201,8 +210,8 @@ class DriverMonitoring:
self.step_change = self.settings._DT_DMON / self.settings._AWARENESS_TIME
self.active_monitoring_mode = False
def _set_policy(self, model_data, car_speed):
bp = model_data.meta.disengagePredictions.brakeDisengageProbs[0] # brake disengage prob in next 2s
def _set_policy(self, brake_disengage_prob, car_speed):
bp = brake_disengage_prob
k1 = max(-0.00156*((car_speed-16)**2)+0.6, 0.2)
bp_normal = max(min(bp / k1, 0.5),0)
self.pose.cfactor_pitch = np.interp(bp_normal, [0, 0.5],
@@ -232,33 +241,35 @@ class DriverMonitoring:
if (self.blink.left + self.blink.right)*0.5 > self.settings._BLINK_THRESHOLD:
distracted_types.append(DistractedType.DISTRACTED_BLINK)
if self.ee1_calibrated:
ee1_dist = self.eev1 > max(min(self.ee1_offseter.filtered_stat.M, self.settings._EE_MAX_OFFSET1), self.settings._EE_MIN_OFFSET1) \
* self.settings._EE_THRESH12
if self.phone.prob_calibrated:
using_phone = self.phone.prob > max(min(self.phone.prob_offseter.filtered_stat.M, self.settings._PHONE_MAX_OFFSET), self.settings._PHONE_MIN_OFFSET) \
* self.settings._PHONE_THRESH2
else:
ee1_dist = self.eev1 > self.settings._EE_THRESH11
if ee1_dist:
distracted_types.append(DistractedType.DISTRACTED_E2E)
using_phone = self.phone.prob > self.settings._PHONE_THRESH
if using_phone:
distracted_types.append(DistractedType.DISTRACTED_PHONE)
return distracted_types
def _update_states(self, driver_state, cal_rpy, car_speed, op_engaged):
def _update_states(self, driver_state, cal_rpy, car_speed, op_engaged, standstill, demo_mode=False):
rhd_pred = driver_state.wheelOnRightProb
# calibrates only when there's movement and either face detected
if car_speed > self.settings._WHEELPOS_CALIB_MIN_SPEED and (driver_state.leftDriverData.faceProb > self.settings._FACE_THRESHOLD or
driver_state.rightDriverData.faceProb > self.settings._FACE_THRESHOLD):
self.wheelpos_learner.push_and_update(rhd_pred)
if self.wheelpos_learner.filtered_stat.n > self.settings._WHEELPOS_FILTER_MIN_COUNT:
self.wheel_on_right = self.wheelpos_learner.filtered_stat.M > self.settings._WHEELPOS_THRESHOLD
self.wheelpos.prob_offseter.push_and_update(rhd_pred)
self.wheelpos.prob_calibrated = self.wheelpos.prob_offseter.filtered_stat.n > self.settings._WHEELPOS_FILTER_MIN_COUNT
if self.wheelpos.prob_calibrated or demo_mode:
self.wheel_on_right = self.wheelpos.prob_offseter.filtered_stat.M > self.settings._WHEELPOS_THRESHOLD
else:
self.wheel_on_right = self.wheel_on_right_default # use default/saved if calibration is unfinished
# make sure no switching when engaged
if op_engaged and self.wheel_on_right_last is not None and self.wheel_on_right_last != self.wheel_on_right:
if op_engaged and self.wheel_on_right_last is not None and self.wheel_on_right_last != self.wheel_on_right and not demo_mode:
self.wheel_on_right = self.wheel_on_right_last
driver_data = driver_state.rightDriverData if self.wheel_on_right else driver_state.leftDriverData
if not all(len(x) > 0 for x in (driver_data.faceOrientation, driver_data.facePosition,
driver_data.faceOrientationStd, driver_data.facePositionStd,
driver_data.readyProb, driver_data.notReadyProb)):
driver_data.faceOrientationStd, driver_data.facePositionStd)):
return
self.face_detected = driver_data.faceProb > self.settings._FACE_THRESHOLD
@@ -274,11 +285,11 @@ class DriverMonitoring:
* (driver_data.sunglassesProb < self.settings._SG_THRESHOLD)
self.blink.right = driver_data.rightBlinkProb * (driver_data.rightEyeProb > self.settings._EYE_THRESHOLD) \
* (driver_data.sunglassesProb < self.settings._SG_THRESHOLD)
self.eev1 = driver_data.notReadyProb[0]
self.eev2 = driver_data.readyProb[0]
self.phone.prob = driver_data.phoneProb
self.distracted_types = self._get_distracted_types()
self.driver_distracted = (DistractedType.DISTRACTED_E2E in self.distracted_types or DistractedType.DISTRACTED_POSE in self.distracted_types
self.driver_distracted = (DistractedType.DISTRACTED_PHONE in self.distracted_types
or DistractedType.DISTRACTED_POSE in self.distracted_types
or DistractedType.DISTRACTED_BLINK in self.distracted_types) \
and driver_data.faceProb > self.settings._FACE_THRESHOLD and self.pose.low_std
self.driver_distraction_filter.update(self.driver_distracted)
@@ -288,13 +299,21 @@ class DriverMonitoring:
if self.face_detected and car_speed > self.settings._POSE_CALIB_MIN_SPEED and self.pose.low_std and (not op_engaged or not self.driver_distracted):
self.pose.pitch_offseter.push_and_update(self.pose.pitch)
self.pose.yaw_offseter.push_and_update(self.pose.yaw)
self.ee1_offseter.push_and_update(self.eev1)
self.ee2_offseter.push_and_update(self.eev2)
self.phone.prob_offseter.push_and_update(self.phone.prob)
self.pose.calibrated = self.pose.pitch_offseter.filtered_stat.n > self.settings._POSE_OFFSET_MIN_COUNT and \
self.pose.yaw_offseter.filtered_stat.n > self.settings._POSE_OFFSET_MIN_COUNT
self.ee1_calibrated = self.ee1_offseter.filtered_stat.n > self.settings._POSE_OFFSET_MIN_COUNT
self.ee2_calibrated = self.ee2_offseter.filtered_stat.n > self.settings._POSE_OFFSET_MIN_COUNT
self.phone.prob_calibrated = self.phone.prob_offseter.filtered_stat.n > self.settings._POSE_OFFSET_MIN_COUNT
if self.face_detected and not self.driver_distracted:
if model_std_max > self.settings._DCAM_UNCERTAIN_ALERT_THRESHOLD:
if not standstill:
self.dcam_uncertain_cnt += 1
self.dcam_reset_cnt = 0
else:
self.dcam_reset_cnt += 1
if self.dcam_reset_cnt > self.settings._DCAM_UNCERTAIN_RESET_COUNT:
self.dcam_uncertain_cnt = 0
self.is_model_uncertain = self.hi_stds > self.settings._HI_STD_FALLBACK_TIME
self._set_timers(self.face_detected and not self.is_model_uncertain)
@@ -305,10 +324,15 @@ class DriverMonitoring:
def _update_events(self, driver_engaged, op_engaged, standstill, wrong_gear, car_speed):
self._reset_events()
# Block engaging after max number of distrations or when alert active
# Block engaging until ignition cycle after max number or time of distractions
if self.terminal_alert_cnt >= self.settings._MAX_TERMINAL_ALERTS or \
self.terminal_time >= self.settings._MAX_TERMINAL_DURATION or \
self.always_on and self.awareness <= self.threshold_prompt:
self.terminal_time >= self.settings._MAX_TERMINAL_DURATION:
if not self.too_distracted:
self.params.put_bool_nonblocking("DriverTooDistracted", True)
self.too_distracted = True
# Always-on distraction lockout is temporary
if self.too_distracted or (self.always_on and self.awareness <= self.threshold_prompt):
self.current_events.add(EventName.tooDistracted)
always_on_valid = self.always_on and not wrong_gear
@@ -367,6 +391,10 @@ class DriverMonitoring:
if alert is not None:
self.current_events.add(alert)
if self.dcam_uncertain_cnt > self.settings._DCAM_UNCERTAIN_ALERT_COUNT and not self.dcam_uncertain_alerted:
set_offroad_alert("Offroad_DriverMonitoringUncertain", True)
self.dcam_uncertain_alerted = True
def get_state_packet(self, valid=True):
# build driverMonitoringState packet
@@ -381,6 +409,8 @@ class DriverMonitoring:
"posePitchValidCount": self.pose.pitch_offseter.filtered_stat.n,
"poseYawOffset": self.pose.yaw_offseter.filtered_stat.mean(),
"poseYawValidCount": self.pose.yaw_offseter.filtered_stat.n,
"phoneProbOffset": self.phone.prob_offseter.filtered_stat.mean(),
"phoneProbValidCount": self.phone.prob_offseter.filtered_stat.n,
"stepChange": self.step_change,
"awarenessActive": self.awareness_active,
"awarenessPassive": self.awareness_passive,
@@ -388,29 +418,47 @@ class DriverMonitoring:
"hiStdCount": self.hi_stds,
"isActiveMode": self.active_monitoring_mode,
"isRHD": self.wheel_on_right,
"uncertainCount": self.dcam_uncertain_cnt,
}
return dat
def run_step(self, sm):
# Set strictness
def run_step(self, sm, demo=False):
if demo:
highway_speed = 30
enabled = True
wrong_gear = False
standstill = False
driver_engaged = False
brake_disengage_prob = 1.0
rpyCalib = [0., 0., 0.]
else:
highway_speed = sm['carState'].vEgo
enabled = sm['selfdriveState'].enabled
wrong_gear = sm['carState'].gearShifter not in (car.CarState.GearShifter.drive, car.CarState.GearShifter.low)
standstill = sm['carState'].standstill
driver_engaged = sm['carState'].steeringPressed or sm['carState'].gasPressed
brake_disengage_prob = sm['modelV2'].meta.disengagePredictions.brakeDisengageProbs[0] # brake disengage prob in next 2s
rpyCalib = sm['liveCalibration'].rpyCalib
self._set_policy(
model_data=sm['modelV2'],
car_speed=sm['carState'].vEgo
brake_disengage_prob=brake_disengage_prob,
car_speed=highway_speed,
)
# Parse data from dmonitoringmodeld
self._update_states(
driver_state=sm['driverStateV2'],
cal_rpy=sm['liveCalibration'].rpyCalib,
car_speed=sm['carState'].vEgo,
op_engaged=sm['selfdriveState'].enabled
cal_rpy=rpyCalib,
car_speed=highway_speed,
op_engaged=enabled,
standstill=standstill,
demo_mode=demo,
)
# Update distraction events
self._update_events(
driver_engaged=sm['carState'].steeringPressed or sm['carState'].gasPressed,
op_engaged=sm['selfdriveState'].enabled,
standstill=sm['carState'].standstill,
wrong_gear=sm['carState'].gearShifter in [car.CarState.GearShifter.reverse, car.CarState.GearShifter.park],
car_speed=sm['carState'].vEgo
driver_engaged=driver_engaged,
op_engaged=enabled,
standstill=standstill,
wrong_gear=wrong_gear,
car_speed=highway_speed
)

View File

@@ -3,9 +3,10 @@ import numpy as np
from cereal import log
from openpilot.common.realtime import DT_DMON
from openpilot.selfdrive.monitoring.helpers import DriverMonitoring, DRIVER_MONITOR_SETTINGS
from openpilot.system.hardware import HARDWARE
EventName = log.OnroadEvent.EventName
dm_settings = DRIVER_MONITOR_SETTINGS()
dm_settings = DRIVER_MONITOR_SETTINGS(device_type=HARDWARE.get_device_type())
TEST_TIMESPAN = 120 # seconds
DISTRACTED_SECONDS_TO_ORANGE = dm_settings._DISTRACTED_TIME - dm_settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL + 1
@@ -25,8 +26,7 @@ def make_msg(face_detected, distracted=False, model_uncertain=False):
ds.leftDriverData.faceOrientationStd = [1.*model_uncertain, 1.*model_uncertain, 1.*model_uncertain]
ds.leftDriverData.facePositionStd = [1.*model_uncertain, 1.*model_uncertain]
# TODO: test both separately when e2e is used
ds.leftDriverData.readyProb = [0., 0., 0., 0.]
ds.leftDriverData.notReadyProb = [0., 0.]
ds.leftDriverData.phoneProb = 0.
return ds
@@ -54,7 +54,7 @@ class TestMonitoring:
DM = DriverMonitoring()
events = []
for idx in range(len(msgs)):
DM._update_states(msgs[idx], [0, 0, 0], 0, engaged[idx])
DM._update_states(msgs[idx], [0, 0, 0], 0, engaged[idx], standstill[idx])
# cal_rpy and car_speed don't matter here
# evaluate events at 10Hz for tests

View File

@@ -362,7 +362,8 @@ class SelfdriveD:
if not REPLAY:
# Check for mismatch between openpilot and car's PCM
cruise_mismatch = CS.cruiseState.enabled and (not self.enabled or not self.CP.pcmCruise)
#cruise_mismatch = CS.cruiseState.enabled and (not self.enabled or not self.CP.pcmCruise)
cruise_mismatch = CS.cruiseState.enabled and not self.enabled
self.cruise_mismatch_counter = self.cruise_mismatch_counter + 1 if cruise_mismatch else 0
if self.cruise_mismatch_counter > int(6. / DT_CTRL):
self.events.add(EventName.cruiseMismatch)

View File

@@ -26,9 +26,6 @@ void CameraBuf::init(cl_device_id device_id, cl_context context, SpectraCamera *
LOGD("allocated %d CL buffers", frame_buf_count);
}
out_img_width = sensor->frame_width;
out_img_height = sensor->hdr_offset > 0 ? (sensor->frame_height - sensor->hdr_offset) / 2 : sensor->frame_height;
// the encoder HW tells us the size it wants after setting it up.
// TODO: VENUS_BUFFER_SIZE should give the size, but it's too small. dependent on encoder settings?
size_t nv12_size = (out_img_width <= 1344 ? 2900 : 2346)*cam->stride;

View File

@@ -32,7 +32,7 @@ public:
VisionBuf *cur_yuv_buf;
VisionBuf *cur_camera_buf;
std::unique_ptr<VisionBuf[]> camera_bufs_raw;
int out_img_width, out_img_height;
uint32_t out_img_width, out_img_height;
CameraBuf() = default;
~CameraBuf();

View File

@@ -50,7 +50,7 @@ public:
Rect ae_xywh = {};
float measured_grey_fraction = 0;
float target_grey_fraction = 0.3;
float target_grey_fraction = 0.125;
float fl_pix = 0;
std::unique_ptr<PubMaster> pm;
@@ -73,7 +73,7 @@ void CameraState::init(VisionIpcServer *v, cl_device_id device_id, cl_context ct
if (!camera.enabled) return;
fl_pix = camera.cc.focal_len / camera.sensor->pixel_size_mm;
fl_pix = camera.cc.focal_len / camera.sensor->pixel_size_mm / camera.sensor->out_scale;
set_exposure_rect();
dc_gain_weight = camera.sensor->dc_gain_min_weight;
@@ -89,7 +89,7 @@ void CameraState::set_exposure_rect() {
// set areas for each camera, shouldn't be changed
std::vector<std::pair<Rect, float>> ae_targets = {
// (Rect, F)
std::make_pair((Rect){96, 250, 1734, 524}, 567.0), // wide
std::make_pair((Rect){96, 400, 1734, 524}, 567.0), // wide
std::make_pair((Rect){96, 160, 1734, 986}, 2648.0), // road
std::make_pair((Rect){96, 242, 1736, 906}, 567.0) // driver
};
@@ -107,10 +107,10 @@ void CameraState::set_exposure_rect() {
float fl_ref = ae_target.second;
ae_xywh = (Rect){
std::max(0, camera.buf.out_img_width / 2 - (int)(fl_pix / fl_ref * xywh_ref.w / 2)),
std::max(0, camera.buf.out_img_height / 2 - (int)(fl_pix / fl_ref * (h_ref / 2 - xywh_ref.y))),
std::min((int)(fl_pix / fl_ref * xywh_ref.w), camera.buf.out_img_width / 2 + (int)(fl_pix / fl_ref * xywh_ref.w / 2)),
std::min((int)(fl_pix / fl_ref * xywh_ref.h), camera.buf.out_img_height / 2 + (int)(fl_pix / fl_ref * (h_ref / 2 - xywh_ref.y)))
std::max(0, (int)camera.buf.out_img_width / 2 - (int)(fl_pix / fl_ref * xywh_ref.w / 2)),
std::max(0, (int)camera.buf.out_img_height / 2 - (int)(fl_pix / fl_ref * (h_ref / 2 - xywh_ref.y))),
std::min((int)(fl_pix / fl_ref * xywh_ref.w), (int)camera.buf.out_img_width / 2 + (int)(fl_pix / fl_ref * xywh_ref.w / 2)),
std::min((int)(fl_pix / fl_ref * xywh_ref.h), (int)camera.buf.out_img_height / 2 + (int)(fl_pix / fl_ref * (h_ref / 2 - xywh_ref.y)))
};
}

View File

@@ -13,7 +13,7 @@ typedef enum {
ISP_BPS_PROCESSED, // fully processed image through the BPS
} SpectraOutputType;
// For the comma 3/3X three camera platform
// For the comma 3X three camera platform
struct CameraConfig {
int camera_num;

View File

@@ -105,7 +105,7 @@ int build_update(uint8_t *dst, const CameraConfig cam, const SensorInfo *s, std:
}
int build_initial_config(uint8_t *dst, const CameraConfig cam, const SensorInfo *s, std::vector<uint32_t> &patches) {
int build_initial_config(uint8_t *dst, const CameraConfig cam, const SensorInfo *s, std::vector<uint32_t> &patches, uint32_t out_width, uint32_t out_height) {
uint8_t *start = dst;
// start with the every frame config
@@ -185,12 +185,12 @@ int build_initial_config(uint8_t *dst, const CameraConfig cam, const SensorInfo
// output size/scaling
dst += write_cont(dst, 0xa3c, {
0x00000003,
((s->frame_width - 1) << 16) | (s->frame_width - 1),
((out_width - 1) << 16) | (s->frame_width - 1),
0x30036666,
0x00000000,
0x00000000,
s->frame_width - 1,
((s->frame_height - 1) << 16) | (s->frame_height - 1),
((out_height - 1) << 16) | (s->frame_height - 1),
0x30036666,
0x00000000,
0x00000000,
@@ -198,12 +198,12 @@ int build_initial_config(uint8_t *dst, const CameraConfig cam, const SensorInfo
});
dst += write_cont(dst, 0xa68, {
0x00000003,
((s->frame_width/2 - 1) << 16) | (s->frame_width - 1),
((out_width / 2 - 1) << 16) | (s->frame_width - 1),
0x3006cccc,
0x00000000,
0x00000000,
s->frame_width - 1,
((s->frame_height/2 - 1) << 16) | (s->frame_height - 1),
((out_height / 2 - 1) << 16) | (s->frame_height - 1),
0x3006cccc,
0x00000000,
0x00000000,
@@ -212,12 +212,12 @@ int build_initial_config(uint8_t *dst, const CameraConfig cam, const SensorInfo
// cropping
dst += write_cont(dst, 0xe10, {
s->frame_height - 1,
s->frame_width - 1,
out_height - 1,
out_width - 1,
});
dst += write_cont(dst, 0xe30, {
s->frame_height/2 - 1,
s->frame_width - 1,
out_height / 2 - 1,
out_width - 1,
});
dst += write_cont(dst, 0xe18, {
0x0ff00000,

View File

@@ -281,18 +281,21 @@ void SpectraCamera::camera_open(VisionIpcServer *v, cl_device_id device_id, cl_c
if (!enabled) return;
buf.out_img_width = sensor->frame_width / sensor->out_scale;
buf.out_img_height = (sensor->hdr_offset > 0 ? (sensor->frame_height - sensor->hdr_offset) / 2 : sensor->frame_height) / sensor->out_scale;
// size is driven by all the HW that handles frames,
// the video encoder has certain alignment requirements in this case
stride = VENUS_Y_STRIDE(COLOR_FMT_NV12, sensor->frame_width);
y_height = VENUS_Y_SCANLINES(COLOR_FMT_NV12, sensor->frame_height);
uv_height = VENUS_UV_SCANLINES(COLOR_FMT_NV12, sensor->frame_height);
stride = VENUS_Y_STRIDE(COLOR_FMT_NV12, buf.out_img_width);
y_height = VENUS_Y_SCANLINES(COLOR_FMT_NV12, buf.out_img_height);
uv_height = VENUS_UV_SCANLINES(COLOR_FMT_NV12, buf.out_img_height);
uv_offset = stride*y_height;
yuv_size = uv_offset + stride*uv_height;
if (cc.output_type != ISP_RAW_OUTPUT) {
uv_offset = ALIGNED_SIZE(uv_offset, 0x1000);
yuv_size = uv_offset + ALIGNED_SIZE(stride*uv_height, 0x1000);
}
assert(stride == VENUS_UV_STRIDE(COLOR_FMT_NV12, sensor->frame_width));
assert(stride == VENUS_UV_STRIDE(COLOR_FMT_NV12, buf.out_img_width));
assert(y_height/2 == uv_height);
open = true;
@@ -645,14 +648,14 @@ void SpectraCamera::config_bps(int idx, int request_id) {
io_cfg[1].mem_handle[0] = buf_handle_yuv[idx];
io_cfg[1].mem_handle[1] = buf_handle_yuv[idx];
io_cfg[1].planes[0] = (struct cam_plane_cfg){
.width = sensor->frame_width,
.height = sensor->frame_height,
.width = buf.out_img_width,
.height = buf.out_img_height,
.plane_stride = stride,
.slice_height = y_height,
};
io_cfg[1].planes[1] = (struct cam_plane_cfg){
.width = sensor->frame_width,
.height = sensor->frame_height/2,
.width = buf.out_img_width,
.height = buf.out_img_height / 2,
.plane_stride = stride,
.slice_height = uv_height,
};
@@ -737,7 +740,7 @@ void SpectraCamera::config_ife(int idx, int request_id, bool init) {
bool is_raw = cc.output_type != ISP_IFE_PROCESSED;
if (!is_raw) {
if (init) {
buf_desc[0].length = build_initial_config((unsigned char*)ife_cmd.ptr + buf_desc[0].offset, cc, sensor.get(), patches);
buf_desc[0].length = build_initial_config((unsigned char*)ife_cmd.ptr + buf_desc[0].offset, cc, sensor.get(), patches, buf.out_img_width, buf.out_img_height);
} else {
buf_desc[0].length = build_update((unsigned char*)ife_cmd.ptr + buf_desc[0].offset, cc, sensor.get(), patches);
}
@@ -844,14 +847,14 @@ void SpectraCamera::config_ife(int idx, int request_id, bool init) {
io_cfg[0].mem_handle[0] = buf_handle_yuv[idx];
io_cfg[0].mem_handle[1] = buf_handle_yuv[idx];
io_cfg[0].planes[0] = (struct cam_plane_cfg){
.width = sensor->frame_width,
.height = sensor->frame_height,
.width = buf.out_img_width,
.height = buf.out_img_height,
.plane_stride = stride,
.slice_height = y_height,
};
io_cfg[0].planes[1] = (struct cam_plane_cfg){
.width = sensor->frame_width,
.height = sensor->frame_height/2,
.width = buf.out_img_width,
.height = buf.out_img_height / 2,
.plane_stride = stride,
.slice_height = uv_height,
};
@@ -993,6 +996,9 @@ bool SpectraCamera::openSensor() {
LOGD("-- Probing sensor %d", cc.camera_num);
auto init_sensor_lambda = [this](SensorInfo *s) {
if (s->image_sensor == cereal::FrameData::ImageSensor::OS04C10 && cc.output_type == ISP_IFE_PROCESSED) {
((OS04C10*)s)->ife_downscale_configure();
}
sensor.reset(s);
return (sensors_init() == 0);
};
@@ -1065,8 +1071,8 @@ void SpectraCamera::configISP() {
.data[0] = (struct cam_isp_out_port_info){
.res_type = CAM_ISP_IFE_OUT_RES_FULL,
.format = CAM_FORMAT_NV12,
.width = sensor->frame_width,
.height = sensor->frame_height + sensor->extra_height,
.width = buf.out_img_width,
.height = buf.out_img_height + sensor->extra_height,
.comp_grp_id = 0x0, .split_point = 0x0, .secure_mode = 0x0,
},
};
@@ -1141,8 +1147,8 @@ void SpectraCamera::configICP() {
},
.out_res[0] = (struct cam_icp_res_info){
.format = 0x3, // YUV420NV12
.width = sensor->frame_width,
.height = sensor->frame_height,
.width = buf.out_img_width,
.height = buf.out_img_height,
.fps = 20,
},
};

View File

@@ -20,6 +20,17 @@ const uint32_t os04c10_analog_gains_reg[] = {
} // namespace
void OS04C10::ife_downscale_configure() {
out_scale = 2;
pixel_size_mm = 0.002;
frame_width = 2688;
frame_height = 1520;
exposure_time_max = 2352;
init_reg_array.insert(init_reg_array.end(), std::begin(ife_downscale_override_array_os04c10), std::end(ife_downscale_override_array_os04c10));
}
OS04C10::OS04C10() {
image_sensor = cereal::FrameData::ImageSensor::OS04C10;
bayer_pattern = CAM_ISP_PATTERN_BAYER_BGBGBG;

View File

@@ -88,8 +88,6 @@ const struct i2c_random_wr_payload init_array_os04c10[] = {
{0x37c7, 0xa8},
{0x37da, 0x11},
{0x381f, 0x08},
// {0x3829, 0x03},
// {0x3832, 0x00},
{0x3881, 0x00},
{0x3888, 0x04},
{0x388b, 0x00},
@@ -332,3 +330,23 @@ const struct i2c_random_wr_payload init_array_os04c10[] = {
{0x5104, 0x08}, {0x5105, 0xd6},
{0x5144, 0x08}, {0x5145, 0xd6},
};
const struct i2c_random_wr_payload ife_downscale_override_array_os04c10[] = {
// OS04C10_AA_00_02_17_wAO_2688x1524_MIPI728Mbps_Linear12bit_20FPS_4Lane_MCLK24MHz
{0x3c8c, 0x40},
{0x3714, 0x24},
{0x37c2, 0x04},
{0x3662, 0x10},
{0x37d9, 0x08},
{0x4041, 0x07},
{0x4008, 0x02},
{0x4009, 0x0d},
{0x3808, 0x0a}, {0x3809, 0x80},
{0x380a, 0x05}, {0x380b, 0xf0},
{0x3814, 0x01},
{0x3816, 0x01},
{0x380c, 0x08}, {0x380d, 0x5c}, // HTS
{0x380e, 0x09}, {0x380f, 0x38}, // VTS
{0x3820, 0xb0},
{0x3821, 0x00},
};

View File

@@ -29,6 +29,7 @@ public:
uint32_t frame_stride;
uint32_t frame_offset = 0;
uint32_t extra_height = 0;
int out_scale = 1;
int registers_offset = -1;
int stats_offset = -1;
int hdr_offset = -1;
@@ -109,6 +110,7 @@ public:
class OS04C10 : public SensorInfo {
public:
OS04C10();
void ife_downscale_configure();
std::vector<i2c_random_wr_payload> getExposureRegisters(int exposure_time, int new_exp_g, bool dc_gain_enabled) const override;
float getExposureScore(float desired_ev, int exp_t, int exp_g_idx, float exp_gain, int gain_idx) const override;
int getSlaveAddress(int port) const override;