locationd: timing spikes resiliance (#34080)
* Locationd scenario for timing spike * Add test for consistent timing spike * Resiliance to bad timing * Test update * Refactor test * fix comment * Decay based on frequency * Fix * Update comment * Only for critical services * Fix tests
This commit is contained in:
@@ -8,6 +8,7 @@ from enum import Enum
|
||||
from collections import defaultdict
|
||||
|
||||
from cereal import log, messaging
|
||||
from cereal.services import SERVICE_LIST
|
||||
from openpilot.common.transformations.orientation import rot_from_euler
|
||||
from openpilot.common.realtime import config_realtime_process
|
||||
from openpilot.common.params import Params
|
||||
@@ -23,8 +24,10 @@ MIN_STD_SANITY_CHECK = 1e-5 # m or rad
|
||||
MAX_FILTER_REWIND_TIME = 0.8 # s
|
||||
MAX_SENSOR_TIME_DIFF = 0.1 # s
|
||||
YAWRATE_CROSS_ERR_CHECK_FACTOR = 30
|
||||
INPUT_INVALID_THRESHOLD = 0.5
|
||||
INPUT_INVALID_DECAY = 0.9993 # ~10 secs to resume after a bad input
|
||||
INPUT_INVALID_THRESHOLD = 0.5 # 0 bad inputs ignored
|
||||
TIMING_INVALID_THRESHOLD = 2.5 # 2 bad timings ignored
|
||||
INPUT_INVALID_DECAY = 0.9993 # ~10 secs to resume after exceeding allowed bad inputs by one (at 100hz)
|
||||
TIMING_INVALID_DECAY = 0.9990 # ~2 secs to resume after exceeding allowed bad timings by one (at 100hz)
|
||||
POSENET_STD_INITIAL_VALUE = 10.0
|
||||
POSENET_STD_HIST_HALF = 20
|
||||
|
||||
@@ -265,10 +268,13 @@ def main():
|
||||
estimator = LocationEstimator(DEBUG)
|
||||
|
||||
filter_initialized = False
|
||||
critcal_services = ["accelerometer", "gyroscope", "liveCalibration", "cameraOdometry"]
|
||||
observation_timing_invalid = False
|
||||
critcal_services = ["accelerometer", "gyroscope", "cameraOdometry"]
|
||||
observation_timing_invalid = defaultdict(int)
|
||||
observation_input_invalid = defaultdict(int)
|
||||
|
||||
input_invalid_decay = {s: INPUT_INVALID_DECAY ** (100. / SERVICE_LIST[s].frequency) for s in critcal_services}
|
||||
timing_invalid_decay = {s: TIMING_INVALID_DECAY ** (100. / SERVICE_LIST[s].frequency) for s in critcal_services}
|
||||
|
||||
initial_pose = params.get("LocationFilterInitialState")
|
||||
if initial_pose is not None:
|
||||
initial_pose = json.loads(initial_pose)
|
||||
@@ -282,8 +288,6 @@ def main():
|
||||
acc_msgs, gyro_msgs = (messaging.drain_sock(sock) for sock in sensor_sockets)
|
||||
|
||||
if filter_initialized:
|
||||
observation_timing_invalid = False
|
||||
|
||||
msgs = []
|
||||
for msg in acc_msgs + gyro_msgs:
|
||||
t, valid, which, data = msg.logMonoTime, msg.valid, msg.which(), getattr(msg, msg.which())
|
||||
@@ -298,18 +302,23 @@ def main():
|
||||
if valid:
|
||||
t = log_mono_time * 1e-9
|
||||
res = estimator.handle_log(t, which, msg)
|
||||
if which not in critcal_services:
|
||||
continue
|
||||
|
||||
if res == HandleLogResult.TIMING_INVALID:
|
||||
observation_timing_invalid = True
|
||||
observation_timing_invalid[which] += 1
|
||||
elif res == HandleLogResult.INPUT_INVALID:
|
||||
observation_input_invalid[which] += 1
|
||||
else:
|
||||
observation_input_invalid[which] *= INPUT_INVALID_DECAY
|
||||
observation_input_invalid[which] *= input_invalid_decay[which]
|
||||
observation_timing_invalid[which] *= timing_invalid_decay[which]
|
||||
else:
|
||||
filter_initialized = sm.all_checks() and sensor_all_checks(acc_msgs, gyro_msgs, sensor_valid, sensor_recv_time, sensor_alive, SIMULATION)
|
||||
|
||||
if sm.updated["cameraOdometry"]:
|
||||
critical_service_inputs_valid = all(observation_input_invalid[s] < INPUT_INVALID_THRESHOLD for s in critcal_services)
|
||||
inputs_valid = sm.all_valid() and critical_service_inputs_valid and not observation_timing_invalid
|
||||
critical_service_timing_valid = all(observation_timing_invalid[s] < TIMING_INVALID_THRESHOLD for s in critcal_services)
|
||||
inputs_valid = sm.all_valid() and critical_service_inputs_valid and critical_service_timing_valid
|
||||
sensors_valid = sensor_all_checks(acc_msgs, gyro_msgs, sensor_valid, sensor_recv_time, sensor_alive, SIMULATION)
|
||||
|
||||
msg = estimator.get_msg(sensors_valid, inputs_valid, filter_initialized)
|
||||
|
||||
@@ -17,6 +17,7 @@ SELECT_COMPARE_FIELDS = {
|
||||
'sensors_flag': ['sensorsOK'],
|
||||
}
|
||||
JUNK_IDX = 100
|
||||
CONSISTENT_SPIKES_COUNT = 10
|
||||
|
||||
|
||||
class Scenario(Enum):
|
||||
@@ -25,6 +26,8 @@ class Scenario(Enum):
|
||||
GYRO_SPIKE_MIDWAY = 'gyro_spike_midway'
|
||||
ACCEL_OFF = 'accel_off'
|
||||
ACCEL_SPIKE_MIDWAY = 'accel_spike_midway'
|
||||
SENSOR_TIMING_SPIKE_MIDWAY = 'timing_spikes'
|
||||
SENSOR_TIMING_CONSISTENT_SPIKES = 'timing_consistent_spikes'
|
||||
|
||||
|
||||
def get_select_fields_data(logs):
|
||||
@@ -43,6 +46,17 @@ def get_select_fields_data(logs):
|
||||
return data
|
||||
|
||||
|
||||
def modify_logs_midway(logs, which, count, fn):
|
||||
non_which = [x for x in logs if x.which() != which]
|
||||
which = [x for x in logs if x.which() == which]
|
||||
temps = which[len(which) // 2:len(which) // 2 + count]
|
||||
for i, temp in enumerate(temps):
|
||||
temp = temp.as_builder()
|
||||
fn(temp)
|
||||
which[len(which) // 2 + i] = temp.as_reader()
|
||||
return sorted(non_which + which, key=lambda x: x.logMonoTime)
|
||||
|
||||
|
||||
def run_scenarios(scenario, logs):
|
||||
if scenario == Scenario.BASE:
|
||||
pass
|
||||
@@ -51,23 +65,23 @@ def run_scenarios(scenario, logs):
|
||||
logs = sorted([x for x in logs if x.which() != 'gyroscope'], key=lambda x: x.logMonoTime)
|
||||
|
||||
elif scenario == Scenario.GYRO_SPIKE_MIDWAY:
|
||||
non_gyro = [x for x in logs if x.which() not in 'gyroscope']
|
||||
gyro = [x for x in logs if x.which() in 'gyroscope']
|
||||
temp = gyro[len(gyro) // 2].as_builder()
|
||||
temp.gyroscope.gyroUncalibrated.v[0] += 3.0
|
||||
gyro[len(gyro) // 2] = temp.as_reader()
|
||||
logs = sorted(non_gyro + gyro, key=lambda x: x.logMonoTime)
|
||||
def gyro_spike(msg):
|
||||
msg.gyroscope.gyroUncalibrated.v[0] += 3.0
|
||||
logs = modify_logs_midway(logs, 'gyroscope', 1, gyro_spike)
|
||||
|
||||
elif scenario == Scenario.ACCEL_OFF:
|
||||
logs = sorted([x for x in logs if x.which() != 'accelerometer'], key=lambda x: x.logMonoTime)
|
||||
|
||||
elif scenario == Scenario.ACCEL_SPIKE_MIDWAY:
|
||||
non_accel = [x for x in logs if x.which() not in 'accelerometer']
|
||||
accel = [x for x in logs if x.which() in 'accelerometer']
|
||||
temp = accel[len(accel) // 2].as_builder()
|
||||
temp.accelerometer.acceleration.v[0] += 10.0
|
||||
accel[len(accel) // 2] = temp.as_reader()
|
||||
logs = sorted(non_accel + accel, key=lambda x: x.logMonoTime)
|
||||
def acc_spike(msg):
|
||||
msg.accelerometer.acceleration.v[0] += 10.0
|
||||
logs = modify_logs_midway(logs, 'accelerometer', 1, acc_spike)
|
||||
|
||||
elif scenario == Scenario.SENSOR_TIMING_SPIKE_MIDWAY or scenario == Scenario.SENSOR_TIMING_CONSISTENT_SPIKES:
|
||||
def timing_spike(msg):
|
||||
msg.accelerometer.timestamp -= int(0.150 * 1e9)
|
||||
count = 1 if scenario == Scenario.SENSOR_TIMING_SPIKE_MIDWAY else CONSISTENT_SPIKES_COUNT
|
||||
logs = modify_logs_midway(logs, 'accelerometer', count, timing_spike)
|
||||
|
||||
replayed_logs = replay_process_with_name(name='locationd', lr=logs)
|
||||
return get_select_fields_data(logs), get_select_fields_data(replayed_logs)
|
||||
@@ -122,7 +136,7 @@ class TestLocationdScenarios:
|
||||
assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.35))
|
||||
assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.55))
|
||||
assert np.diff(replayed_data['inputs_flag'])[499] == -1.0
|
||||
assert np.diff(replayed_data['inputs_flag'])[696] == 1.0
|
||||
assert np.diff(replayed_data['inputs_flag'])[704] == 1.0
|
||||
|
||||
def test_accel_off(self):
|
||||
"""
|
||||
@@ -146,3 +160,21 @@ class TestLocationdScenarios:
|
||||
orig_data, replayed_data = run_scenarios(Scenario.ACCEL_SPIKE_MIDWAY, self.logs)
|
||||
assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.35))
|
||||
assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.55))
|
||||
|
||||
def test_single_timing_spike(self):
|
||||
"""
|
||||
Test: timing of 150ms off for the single accelerometer message in the middle of the segment
|
||||
Expected Result: the message is ignored, and inputsOK is False for that time
|
||||
"""
|
||||
orig_data, replayed_data = run_scenarios(Scenario.SENSOR_TIMING_SPIKE_MIDWAY, self.logs)
|
||||
assert np.all(replayed_data['inputs_flag'] == orig_data['inputs_flag'])
|
||||
assert np.all(replayed_data['sensors_flag'] == orig_data['sensors_flag'])
|
||||
|
||||
def test_consistent_timing_spikes(self):
|
||||
"""
|
||||
Test: consistent timing spikes for N accelerometer messages in the middle of the segment
|
||||
Expected Result: inputsOK becomes False after N of bad measurements
|
||||
"""
|
||||
orig_data, replayed_data = run_scenarios(Scenario.SENSOR_TIMING_CONSISTENT_SPIKES, self.logs)
|
||||
assert np.diff(replayed_data['inputs_flag'])[500] == -1.0
|
||||
assert np.diff(replayed_data['inputs_flag'])[787] == 1.0
|
||||
|
||||
Reference in New Issue
Block a user