mirror of
https://github.com/sunnypilot/sunnypilot.git
synced 2026-02-19 12:23:53 +08:00
Add laikadOffline subtest to process replay. (#24995)
* Add subtests to process replay. Adds laikadOffline subtest * Update cpp. * Update ref * Update ref again * Update ref again * update ref * Fix disabling fetching orbits * Add proc name to event exception * update ref * Update setup_env * Fix offline test and update refs
This commit is contained in:
@@ -1,5 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
import json
|
||||
import math
|
||||
import os
|
||||
import time
|
||||
from collections import defaultdict
|
||||
@@ -29,11 +30,12 @@ CACHE_VERSION = 0.1
|
||||
|
||||
|
||||
class Laikad:
|
||||
def __init__(self, valid_const=("GPS", "GLONASS"), auto_update=False, valid_ephem_types=(EphemerisType.ULTRA_RAPID_ORBIT, EphemerisType.NAV),
|
||||
def __init__(self, valid_const=("GPS", "GLONASS"), auto_fetch_orbits=True, auto_update=False, valid_ephem_types=(EphemerisType.ULTRA_RAPID_ORBIT, EphemerisType.NAV),
|
||||
save_ephemeris=False, last_known_position=None):
|
||||
self.astro_dog = AstroDog(valid_const=valid_const, auto_update=auto_update, valid_ephem_types=valid_ephem_types, clear_old_ephemeris=True)
|
||||
self.gnss_kf = GNSSKalman(GENERATED_DIR, cython=True)
|
||||
|
||||
self.auto_fetch_orbits = auto_fetch_orbits
|
||||
self.orbit_fetch_executor: Optional[ProcessPoolExecutor] = None
|
||||
self.orbit_fetch_future: Optional[Future] = None
|
||||
|
||||
@@ -41,6 +43,7 @@ class Laikad:
|
||||
self.last_cached_t = None
|
||||
self.save_ephemeris = save_ephemeris
|
||||
self.load_cache()
|
||||
|
||||
self.posfix_functions = {constellation: get_posfix_sympy_fun(constellation) for constellation in (ConstellationId.GPS, ConstellationId.GLONASS)}
|
||||
self.last_pos_fix = last_known_position if last_known_position is not None else []
|
||||
self.last_pos_residual = []
|
||||
@@ -85,7 +88,8 @@ class Laikad:
|
||||
report = ublox_msg.measurementReport
|
||||
if report.gpsWeek > 0:
|
||||
latest_msg_t = GPSTime(report.gpsWeek, report.rcvTow)
|
||||
self.fetch_orbits(latest_msg_t + SECS_IN_MIN, block)
|
||||
if self.auto_fetch_orbits:
|
||||
self.fetch_orbits(latest_msg_t + SECS_IN_MIN, block)
|
||||
|
||||
new_meas = read_raw_ublox(report)
|
||||
processed_measurements = process_measurements(new_meas, self.astro_dog)
|
||||
@@ -146,8 +150,8 @@ class Laikad:
|
||||
|
||||
def kf_valid(self, t: float) -> List[bool]:
|
||||
filter_time = self.gnss_kf.filter.get_filter_time()
|
||||
return [filter_time is not None,
|
||||
filter_time is not None and abs(t - filter_time) < MAX_TIME_GAP,
|
||||
return [not math.isnan(filter_time),
|
||||
abs(t - filter_time) < MAX_TIME_GAP,
|
||||
all(np.isfinite(self.gnss_kf.x[GStates.ECEF_POS]))]
|
||||
|
||||
def init_gnss_localizer(self, est_pos):
|
||||
@@ -275,7 +279,8 @@ def main(sm=None, pm=None):
|
||||
|
||||
replay = "REPLAY" in os.environ
|
||||
# todo get last_known_position
|
||||
laikad = Laikad(save_ephemeris=not replay)
|
||||
use_internet = "LAIKAD_NO_INTERNET" not in os.environ
|
||||
laikad = Laikad(save_ephemeris=not replay, auto_fetch_orbits=use_internet)
|
||||
while True:
|
||||
sm.update()
|
||||
|
||||
|
||||
@@ -27,14 +27,14 @@ TIMEOUT = 15
|
||||
PROC_REPLAY_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
FAKEDATA = os.path.join(PROC_REPLAY_DIR, "fakedata/")
|
||||
|
||||
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback', 'tolerance', 'fake_pubsubmaster', 'submaster_config'], defaults=({},))
|
||||
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback', 'tolerance', 'fake_pubsubmaster', 'submaster_config', 'environ', 'subtest_name'], defaults=({}, {}, ""))
|
||||
|
||||
|
||||
def wait_for_event(evt):
|
||||
if not evt.wait(TIMEOUT):
|
||||
if threading.currentThread().getName() == "MainThread":
|
||||
# tested process likely died. don't let test just hang
|
||||
raise Exception("Timeout reached. Tested process likely crashed.")
|
||||
raise Exception(f"Timeout reached. Tested process {os.environ['PROC_NAME']} likely crashed.")
|
||||
else:
|
||||
# done testing this process, let it die
|
||||
sys.exit(0)
|
||||
@@ -190,6 +190,7 @@ def get_car_params(msgs, fsm, can_sock, fingerprint):
|
||||
_, CP = get_car(can, sendcan)
|
||||
Params().put("CarParams", CP.to_bytes())
|
||||
|
||||
|
||||
def controlsd_rcv_callback(msg, CP, cfg, fsm):
|
||||
# no sendcan until controlsd is initialized
|
||||
socks = [s for s in cfg.pub_sub[msg.which()] if
|
||||
@@ -198,6 +199,7 @@ def controlsd_rcv_callback(msg, CP, cfg, fsm):
|
||||
socks.remove("sendcan")
|
||||
return socks, len(socks) > 0
|
||||
|
||||
|
||||
def radar_rcv_callback(msg, CP, cfg, fsm):
|
||||
if msg.which() != "can":
|
||||
return [], False
|
||||
@@ -240,7 +242,7 @@ def laika_rcv_callback(msg, CP, cfg, fsm):
|
||||
if msg.ubloxGnss.which() == "measurementReport":
|
||||
return ["gnssMeasurements"], True
|
||||
else:
|
||||
return [], False
|
||||
return [], True
|
||||
|
||||
|
||||
CONFIGS = [
|
||||
@@ -345,6 +347,19 @@ CONFIGS = [
|
||||
tolerance=None,
|
||||
fake_pubsubmaster=False,
|
||||
),
|
||||
ProcessConfig(
|
||||
proc_name="laikad",
|
||||
subtest_name="Offline",
|
||||
pub_sub={
|
||||
"ubloxGnss": ["gnssMeasurements"],
|
||||
},
|
||||
ignore=["logMonoTime"],
|
||||
init_callback=get_car_params,
|
||||
should_recv_callback=laika_rcv_callback,
|
||||
tolerance=NUMPY_TOLERANCE,
|
||||
fake_pubsubmaster=True,
|
||||
environ={"LAIKAD_NO_INTERNET": "1"},
|
||||
),
|
||||
ProcessConfig(
|
||||
proc_name="laikad",
|
||||
pub_sub={
|
||||
@@ -366,7 +381,8 @@ def replay_process(cfg, lr, fingerprint=None):
|
||||
else:
|
||||
return cpp_replay_process(cfg, lr, fingerprint)
|
||||
|
||||
def setup_env(simulation=False, CP=None):
|
||||
|
||||
def setup_env(simulation=False, CP=None, cfg=None):
|
||||
params = Params()
|
||||
params.clear_all()
|
||||
params.put_bool("OpenpilotEnabledToggle", True)
|
||||
@@ -380,6 +396,16 @@ def setup_env(simulation=False, CP=None):
|
||||
os.environ['SKIP_FW_QUERY'] = ""
|
||||
os.environ['FINGERPRINT'] = ""
|
||||
|
||||
if cfg is not None:
|
||||
# Clear all custom processConfig environment variables
|
||||
for cfg in CONFIGS:
|
||||
for k, _ in cfg.environ.items():
|
||||
if k in os.environ:
|
||||
del os.environ[k]
|
||||
|
||||
os.environ.update(cfg.environ)
|
||||
os.environ['PROC_NAME'] = cfg.proc_name
|
||||
|
||||
if simulation:
|
||||
os.environ["SIMULATION"] = "1"
|
||||
elif "SIMULATION" in os.environ:
|
||||
@@ -396,6 +422,7 @@ def setup_env(simulation=False, CP=None):
|
||||
os.environ['SKIP_FW_QUERY'] = "1"
|
||||
os.environ['FINGERPRINT'] = CP.carFingerprint
|
||||
|
||||
|
||||
def python_replay_process(cfg, lr, fingerprint=None):
|
||||
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
|
||||
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
|
||||
@@ -413,10 +440,10 @@ def python_replay_process(cfg, lr, fingerprint=None):
|
||||
if fingerprint is not None:
|
||||
os.environ['SKIP_FW_QUERY'] = "1"
|
||||
os.environ['FINGERPRINT'] = fingerprint
|
||||
setup_env()
|
||||
setup_env(cfg=cfg)
|
||||
else:
|
||||
CP = [m for m in lr if m.which() == 'carParams'][0].carParams
|
||||
setup_env(CP=CP)
|
||||
setup_env(CP=CP, cfg=cfg)
|
||||
|
||||
assert(type(managed_processes[cfg.proc_name]) is PythonProcess)
|
||||
managed_processes[cfg.proc_name].prepare()
|
||||
@@ -477,7 +504,7 @@ def cpp_replay_process(cfg, lr, fingerprint=None):
|
||||
log_msgs = []
|
||||
|
||||
# We need to fake SubMaster alive since we can't inject a fake clock
|
||||
setup_env(simulation=True)
|
||||
setup_env(simulation=True, cfg=cfg)
|
||||
|
||||
managed_processes[cfg.proc_name].prepare()
|
||||
managed_processes[cfg.proc_name].start()
|
||||
|
||||
@@ -1 +1 @@
|
||||
a0b5ce7b2e0b9c073e51ac8908402d53e1d99722
|
||||
a9adebff7ce27d6233d443217a30337b761898ee
|
||||
@@ -200,11 +200,11 @@ if __name__ == "__main__":
|
||||
if cfg.proc_name not in tested_procs:
|
||||
continue
|
||||
|
||||
cur_log_fn = os.path.join(FAKEDATA, f"{segment}_{cfg.proc_name}_{cur_commit}.bz2")
|
||||
cur_log_fn = os.path.join(FAKEDATA, f"{segment}_{cfg.proc_name}{cfg.subtest_name}_{cur_commit}.bz2")
|
||||
if args.update_refs: # reference logs will not exist if routes were just regenerated
|
||||
ref_log_path = get_url(*segment.rsplit("--", 1))
|
||||
else:
|
||||
ref_log_fn = os.path.join(FAKEDATA, f"{segment}_{cfg.proc_name}_{ref_commit}.bz2")
|
||||
ref_log_fn = os.path.join(FAKEDATA, f"{segment}_{cfg.proc_name}{cfg.subtest_name}_{ref_commit}.bz2")
|
||||
ref_log_path = ref_log_fn if os.path.exists(ref_log_fn) else BASE_URL + os.path.basename(ref_log_fn)
|
||||
|
||||
dat = None if args.upload_only else log_data[segment]
|
||||
|
||||
Reference in New Issue
Block a user