modeld: configurable camera offset (#1614)

* modeld: configurable camera offset

Negative Values: Shears the image to the left, moving the models center to the Right.

Positive Value: Shears the image to the right, moving the models center to the Left.

* modeld: camera offset class

* verify zero offset I @ A = A

* slithered and slunked

* Update params_metadata.json

* wait

* Update model_renderer.py

* Update model_renderer.py

* requested changes

* stricter

* Update model_renderer.py

* more

* return default

* Update params_metadata.json

* final

---------

Co-authored-by: Jason Wen <haibin.wen3@gmail.com>
This commit is contained in:
James Vecellio-Grant
2026-01-01 12:23:01 -07:00
committed by GitHub
parent 19a7d1d5d7
commit a30fc9bcd2
9 changed files with 162 additions and 11 deletions

View File

@@ -221,6 +221,7 @@ inline static std::unordered_map<std::string, ParamKeyAttributes> keys = {
{"BlindSpot", {PERSISTENT | BACKUP, BOOL, "0"}},
// sunnypilot model params
{"CameraOffset", {PERSISTENT | BACKUP, FLOAT, "0.0"}},
{"LagdToggle", {PERSISTENT | BACKUP, BOOL, "1"}},
{"LagdToggleDelay", {PERSISTENT | BACKUP, FLOAT, "0.2"}},
{"LagdValueCache", {PERSISTENT, FLOAT, "0.2"}},

View File

@@ -80,6 +80,9 @@ class ModelRenderer(Widget):
self._transform_dirty = True
self._clip_region = None
self._counter = -1
self._camera_offset = ui_state.params.get("CameraOffset", return_default=True) if ui_state.active_bundle else 0.0
self._exp_gradient = Gradient(
start=(0.0, 1.0), # Bottom of path
end=(0.0, 0.0), # Top of path
@@ -99,6 +102,10 @@ class ModelRenderer(Widget):
def _render(self, rect: rl.Rectangle):
sm = ui_state.sm
if self._counter % 180 == 0: # This runs at 60fps, so we query every 3 seconds
self._camera_offset = ui_state.params.get("CameraOffset", return_default=True) if ui_state.active_bundle else 0.0
self._counter += 1
self._torque_filter.update(-ui_state.sm['carOutput'].actuatorsOutput.torque)
# Check if data is up-to-date
@@ -150,13 +157,13 @@ class ModelRenderer(Widget):
def _update_raw_points(self, model):
"""Update raw 3D points from model data"""
self._path.raw_points = np.array([model.position.x, model.position.y, model.position.z], dtype=np.float32).T
self._path.raw_points = np.array([model.position.x, np.array(model.position.y) + self._camera_offset, model.position.z], dtype=np.float32).T
for i, lane_line in enumerate(model.laneLines):
self._lane_lines[i].raw_points = np.array([lane_line.x, lane_line.y, lane_line.z], dtype=np.float32).T
self._lane_lines[i].raw_points = np.array([lane_line.x, np.array(lane_line.y) + self._camera_offset, lane_line.z], dtype=np.float32).T
for i, road_edge in enumerate(model.roadEdges):
self._road_edges[i].raw_points = np.array([road_edge.x, road_edge.y, road_edge.z], dtype=np.float32).T
self._road_edges[i].raw_points = np.array([road_edge.x, np.array(road_edge.y) + self._camera_offset, road_edge.z], dtype=np.float32).T
self._lane_line_probs = np.array(model.laneLineProbs, dtype=np.float32)
self._road_edge_stds = np.array(model.roadEdgeStds, dtype=np.float32)
@@ -174,7 +181,7 @@ class ModelRenderer(Widget):
# Get z-coordinate from path at the lead vehicle position
z = self._path.raw_points[idx, 2] if idx < len(self._path.raw_points) else 0.0
point = self._map_to_screen(d_rel, -y_rel, z + self._path_offset_z)
point = self._map_to_screen(d_rel, -y_rel + self._camera_offset, z + self._path_offset_z)
if point:
self._lead_vehicles[i] = self._update_lead_vehicle(d_rel, v_rel, point, self._rect)

View File

@@ -56,7 +56,8 @@ class ModelRenderer(Widget, ChevronMetrics, ModelRendererSP):
self._road_edge_stds = np.zeros(2, dtype=np.float32)
self._lead_vehicles = [LeadVehicle(), LeadVehicle()]
self._path_offset_z = HEIGHT_INIT[0]
self._counter = -1
self._camera_offset = ui_state.params.get("CameraOffset", return_default=True) if ui_state.active_bundle else 0.0
# Initialize ModelPoints objects
self._path = ModelPoints()
self._lane_lines = [ModelPoints() for _ in range(4)]
@@ -103,6 +104,10 @@ class ModelRenderer(Widget, ChevronMetrics, ModelRendererSP):
live_calib = sm['liveCalibration']
self._path_offset_z = live_calib.height[0] if live_calib.height else HEIGHT_INIT[0]
if self._counter % 60 == 0:
self._camera_offset = ui_state.params.get("CameraOffset", return_default=True) if ui_state.active_bundle else 0.0
self._counter += 1
if sm.updated['carParams']:
self._longitudinal_control = sm['carParams'].openpilotLongitudinalControl
@@ -136,13 +141,13 @@ class ModelRenderer(Widget, ChevronMetrics, ModelRendererSP):
def _update_raw_points(self, model):
"""Update raw 3D points from model data"""
self._path.raw_points = np.array([model.position.x, model.position.y, model.position.z], dtype=np.float32).T
self._path.raw_points = np.array([model.position.x, np.array(model.position.y) + self._camera_offset, model.position.z], dtype=np.float32).T
for i, lane_line in enumerate(model.laneLines):
self._lane_lines[i].raw_points = np.array([lane_line.x, lane_line.y, lane_line.z], dtype=np.float32).T
self._lane_lines[i].raw_points = np.array([lane_line.x, np.array(lane_line.y) + self._camera_offset, lane_line.z], dtype=np.float32).T
for i, road_edge in enumerate(model.roadEdges):
self._road_edges[i].raw_points = np.array([road_edge.x, road_edge.y, road_edge.z], dtype=np.float32).T
self._road_edges[i].raw_points = np.array([road_edge.x, np.array(road_edge.y) + self._camera_offset, road_edge.z], dtype=np.float32).T
self._lane_line_probs = np.array(model.laneLineProbs, dtype=np.float32)
self._road_edge_stds = np.array(model.roadEdgeStds, dtype=np.float32)
@@ -160,7 +165,7 @@ class ModelRenderer(Widget, ChevronMetrics, ModelRendererSP):
# Get z-coordinate from path at the lead vehicle position
z = self._path.raw_points[idx, 2] if idx < len(self._path.raw_points) else 0.0
point = self._map_to_screen(d_rel, -y_rel, z + self._path_offset_z)
point = self._map_to_screen(d_rel, -y_rel + self._camera_offset, z + self._path_offset_z)
if point:
self._lead_vehicles[i] = self._update_lead_vehicle(d_rel, v_rel, point, self._rect)

View File

@@ -73,6 +73,7 @@ class UIStateSP:
self.developer_ui = self.params.get("DevUIInfo")
self.rainbow_path = self.params.get_bool("RainbowMode")
self.chevron_metrics = self.params.get("ChevronInfo")
self.active_bundle = self.params.get("ModelManager_ActiveBundle")
class DeviceSP:

View File

@@ -24,6 +24,7 @@ from openpilot.sunnypilot.livedelay.helpers import get_lat_delay
from openpilot.sunnypilot.modeld.runners import ModelRunner, Runtime
from openpilot.sunnypilot.modeld.parse_model_outputs import Parser
from openpilot.sunnypilot.modeld.fill_model_msg import fill_model_msg, fill_pose_msg, PublishState
from openpilot.sunnypilot.modeld_v2.camera_offset_helper import CameraOffsetHelper
from openpilot.sunnypilot.modeld.constants import ModelConstants, Plan
from openpilot.sunnypilot.models.helpers import get_active_bundle, get_model_path, load_metadata, prepare_inputs, load_meta_constants
from openpilot.sunnypilot.modeld.models.commonmodel_pyx import ModelFrame, CLContext
@@ -195,6 +196,7 @@ def main(demo=False):
buf_main, buf_extra = None, None
meta_main = FrameMeta()
meta_extra = FrameMeta()
camera_offset_helper = CameraOffsetHelper()
if demo:
@@ -250,12 +252,14 @@ def main(demo=False):
frame_id = sm["roadCameraState"].frameId
if sm.frame % 60 == 0:
model.lat_delay = get_lat_delay(params, sm["liveDelay"].lateralDelay)
camera_offset_helper.set_offset(params.get("CameraOffset", return_default=True))
lat_delay = model.lat_delay + model.LAT_SMOOTH_SECONDS
if sm.updated["liveCalibration"] and sm.seen['roadCameraState'] and sm.seen['deviceState']:
device_from_calib_euler = np.array(sm["liveCalibration"].rpyCalib, dtype=np.float32)
dc = DEVICE_CAMERAS[(str(sm['deviceState'].deviceType), str(sm['roadCameraState'].sensor))]
model_transform_main = get_warp_matrix(device_from_calib_euler, dc.ecam.intrinsics if main_wide_camera else dc.fcam.intrinsics, False).astype(np.float32)
model_transform_extra = get_warp_matrix(device_from_calib_euler, dc.ecam.intrinsics, True).astype(np.float32)
model_transform_main, model_transform_extra = camera_offset_helper.update(model_transform_main, model_transform_extra, sm, main_wide_camera)
live_calib_seen = True
traffic_convention = np.zeros(2)

View File

@@ -0,0 +1,39 @@
"""
Copyright (c) 2021-, Haibin Wen, sunnypilot, and a number of other contributors.
This file is part of sunnypilot and is licensed under the MIT License.
See the LICENSE.md file in the root directory for more details.
"""
import numpy as np
from openpilot.common.transformations.camera import DEVICE_CAMERAS
class CameraOffsetHelper:
def __init__(self):
self.camera_offset = 0.0
self.actual_camera_offset = 0.0
@staticmethod
def apply_camera_offset(model_transform, intrinsics, height, offset_param):
cy = intrinsics[1, 2]
shear = np.eye(3, dtype=np.float32)
shear[0, 1] = offset_param / height
shear[0, 2] = -offset_param / height * cy
model_transform = (shear @ model_transform).astype(np.float32)
return model_transform
def set_offset(self, offset):
self.camera_offset = offset
def update(self, model_transform_main, model_transform_extra, sm, main_wide_camera):
self.actual_camera_offset = (0.9 * self.actual_camera_offset) + (0.1 * self.camera_offset)
dc = DEVICE_CAMERAS[(str(sm['deviceState'].deviceType), str(sm['roadCameraState'].sensor))]
height = sm["liveCalibration"].height[0] if sm['liveCalibration'].height else 1.22
intrinsics_main = dc.ecam.intrinsics if main_wide_camera else dc.fcam.intrinsics
model_transform_main = self.apply_camera_offset(model_transform_main, intrinsics_main, height, self.actual_camera_offset)
intrinsics_extra = dc.ecam.intrinsics
model_transform_extra = self.apply_camera_offset(model_transform_extra, intrinsics_extra, height, self.actual_camera_offset)
return model_transform_main, model_transform_extra

View File

@@ -21,6 +21,7 @@ from openpilot.sunnypilot.modeld_v2.fill_model_msg import fill_model_msg, fill_p
from openpilot.sunnypilot.modeld_v2.constants import Plan
from openpilot.sunnypilot.modeld_v2.models.commonmodel_pyx import DrivingModelFrame, CLContext
from openpilot.sunnypilot.modeld_v2.meta_helper import load_meta_constants
from openpilot.sunnypilot.modeld_v2.camera_offset_helper import CameraOffsetHelper
from openpilot.sunnypilot.livedelay.helpers import get_lat_delay
from openpilot.sunnypilot.modeld.modeld_base import ModelStateBase
@@ -230,6 +231,7 @@ def main(demo=False):
buf_main, buf_extra = None, None
meta_main = FrameMeta()
meta_extra = FrameMeta()
camera_offset_helper = CameraOffsetHelper()
if demo:
@@ -285,13 +287,14 @@ def main(demo=False):
if sm.frame % 60 == 0:
model.lat_delay = get_lat_delay(params, sm["liveDelay"].lateralDelay)
model.PLANPLUS_CONTROL = params.get("PlanplusControl", return_default=True)
camera_offset_helper.set_offset(params.get("CameraOffset", return_default=True))
lat_delay = model.lat_delay + model.LAT_SMOOTH_SECONDS
if sm.updated["liveCalibration"] and sm.seen['roadCameraState'] and sm.seen['deviceState']:
device_from_calib_euler = np.array(sm["liveCalibration"].rpyCalib, dtype=np.float32)
dc = DEVICE_CAMERAS[(str(sm['deviceState'].deviceType), str(sm['roadCameraState'].sensor))]
model_transform_main = get_warp_matrix(device_from_calib_euler, dc.ecam.intrinsics if main_wide_camera else dc.fcam.intrinsics,
False).astype(np.float32)
model_transform_main = get_warp_matrix(device_from_calib_euler, dc.ecam.intrinsics if main_wide_camera else dc.fcam.intrinsics, False).astype(np.float32)
model_transform_extra = get_warp_matrix(device_from_calib_euler, dc.ecam.intrinsics, True).astype(np.float32)
model_transform_main, model_transform_extra = camera_offset_helper.update(model_transform_main, model_transform_extra, sm, main_wide_camera)
live_calib_seen = True
traffic_convention = np.zeros(2)

View File

@@ -0,0 +1,84 @@
"""
Copyright (c) 2021-, Haibin Wen, sunnypilot, and a number of other contributors.
This file is part of sunnypilot and is licensed under the MIT License.
See the LICENSE.md file in the root directory for more details.
"""
import numpy as np
from openpilot.common.transformations.camera import DEVICE_CAMERAS
from openpilot.common.transformations.model import get_warp_matrix
from openpilot.sunnypilot.modeld_v2.camera_offset_helper import CameraOffsetHelper
class MockStruct:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def __getitem__(self, item):
return getattr(self, item)
class TestCameraOffset:
def setup_method(self):
self.camera_offset = CameraOffsetHelper()
self.dc = DEVICE_CAMERAS[('mici', 'os04c10')]
def test_smoothing(self):
self.camera_offset.set_offset(0.2)
sm = MockStruct(
deviceState=MockStruct(deviceType='mici'),
roadCameraState=MockStruct(sensor='os04c10'),
liveCalibration=MockStruct(rpyCalib=[0.0, 0.0, 0.0], height=[1.22])
)
intrinsics_main = self.dc.fcam.intrinsics
intrinsics_extra = self.dc.ecam.intrinsics
device_from_calib_euler = np.array([0.0, 0.0, 0.0], dtype=np.float32)
main_transform = get_warp_matrix(device_from_calib_euler, intrinsics_main, False).astype(np.float32)
extra_transform = get_warp_matrix(device_from_calib_euler, intrinsics_extra, True).astype(np.float32)
self.camera_offset.update(main_transform, extra_transform, sm, False)
np.testing.assert_almost_equal(self.camera_offset.actual_camera_offset, 0.02)
self.camera_offset.update(main_transform, extra_transform, sm, False)
np.testing.assert_almost_equal(self.camera_offset.actual_camera_offset, 0.038)
def test_camera_offset_(self):
intrinsics = self.dc.fcam.intrinsics
transform = np.eye(3, dtype=np.float32)
height = 1.22
offset = 0.1
cy = intrinsics[1, 2]
expected_shear = np.eye(3, dtype=np.float32)
expected_shear[0, 1] = offset / height
expected_shear[0, 2] = -offset / height * cy
result = CameraOffsetHelper.apply_camera_offset(transform, intrinsics, height, offset)
np.testing.assert_array_almost_equal(result, expected_shear)
def test_update(self):
sm = MockStruct(
deviceState=MockStruct(deviceType='mici'),
roadCameraState=MockStruct(sensor='os04c10'),
liveCalibration=MockStruct(rpyCalib=[0.0, 0.0, 0.0], height=[1.22])
)
intrinsics_main = self.dc.fcam.intrinsics
intrinsics_extra = self.dc.ecam.intrinsics
device_from_calib_euler = np.array([0.0, 0.0, 0.0], dtype=np.float32)
main_transform = get_warp_matrix(device_from_calib_euler, intrinsics_main, False).astype(np.float32)
extra_transform = get_warp_matrix(device_from_calib_euler, intrinsics_extra, True).astype(np.float32)
self.camera_offset.set_offset(0.0) # test default offset doesn't change transformation
main_out, extra_out = self.camera_offset.update(main_transform, extra_transform, sm, False)
np.testing.assert_array_equal(main_out, main_transform)
np.testing.assert_array_equal(extra_out, extra_transform)
self.camera_offset.set_offset(0.2) # test valid offset changes transformation
main_out, extra_out = self.camera_offset.update(main_transform, extra_transform, sm, False)
assert not np.array_equal(main_out, main_transform)
assert not np.array_equal(extra_out, extra_transform)
assert main_out[0, 1] != 0.0
assert main_out[0, 2] != 0.0

View File

@@ -121,6 +121,13 @@
"title": "Camera Debug Exp Time",
"description": ""
},
"CameraOffset": {
"title": "Adjust Camera Offset on non Default Model",
"description": "Adjust this to center the car in its lane by virtually shifting the camera's perspective. Negative Values: Shears the image to the left, moving the model's center to the Right; Positive Values: Shears the image to the right, moving the model's center to the Left. Note: these values are in meters.",
"min": -0.35,
"max": 0.35,
"step": 0.01
},
"CarBatteryCapacity": {
"title": "Car Battery Capacity",
"description": ""