Merge branch 'master-new' into mads-new

This commit is contained in:
Jason Wen
2024-12-17 07:09:01 -05:00
135 changed files with 955 additions and 483 deletions

View File

@@ -8,7 +8,7 @@ assignees: ''
**Checklist**
- [ ] added entry to CAR in selfdrive/car/*/values.py and ran `selfdrive/opcar/docs.py` to generate new docs
- [ ] added entry to CAR in selfdrive/car/*/values.py and ran `selfdrive/car/docs.py` to generate new docs
- [ ] test route added to [routes.py](https://github.com/commaai/openpilot/blob/master/selfdrive/car/tests/routes.py)
- [ ] route with openpilot:
- [ ] route with stock system:

View File

@@ -44,7 +44,7 @@ Explain how you tested this bug fix.
**Checklist**
- [ ] added entry to CAR in selfdrive/car/*/values.py and ran `selfdrive/opcar/docs.py` to generate new docs
- [ ] added entry to CAR in selfdrive/car/*/values.py and ran `selfdrive/car/docs.py` to generate new docs
- [ ] test route added to [routes.py](https://github.com/commaai/openpilot/blob/master/selfdrive/car/tests/routes.py)
- [ ] route with openpilot:
- [ ] route with stock system:

View File

@@ -15,7 +15,3 @@ jobs:
uses: sunnypilot/sunnypilot/.github/workflows/selfdrive_tests.yaml@master
with:
run_number: ${{ inputs.run_number }}
tools_tests:
uses: sunnypilot/sunnypilot/.github/workflows/tools_tests.yaml@master
with:
run_number: ${{ inputs.run_number }}

View File

@@ -18,7 +18,7 @@ concurrency:
jobs:
docs:
name: build docs
runs-on: ubuntu-latest
runs-on: ubuntu-24.04
steps:
- uses: commaai/timeout@v1

View File

@@ -239,7 +239,7 @@ jobs:
uses: actions/cache@v4
with:
path: .ci_cache/comma_download_cache
key: car_models-${{ hashFiles('selfdrive/car/tests/test_models.py', 'selfdrive/car/tests/routes.py') }}-${{ matrix.job }}
key: car_models-${{ hashFiles('selfdrive/car/tests/test_models.py', 'opendbc/car/tests/routes.py') }}-${{ matrix.job }}
- name: Build openpilot
run: ${{ env.RUN }} "scons -j$(nproc)"
- name: Test car models
@@ -317,6 +317,7 @@ jobs:
runs-on:
- ${{ ((github.repository == 'commaai/openpilot') && ((github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == 'commaai/openpilot'))) && 'namespace-profile-amd64-8x16' || 'ubuntu-24.04' }}
- ${{ ((github.repository == 'commaai/openpilot') && ((github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == 'commaai/openpilot'))) && 'namespace-experiments:docker.builds.local-cache=separate' || 'ubuntu-24.04' }}
if: (github.repository == 'commaai/openpilot') && ((github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == 'commaai/openpilot'))
steps:
- uses: actions/checkout@v4
with:

View File

@@ -1,9 +1,9 @@
FROM ghcr.io/commaai/openpilot-base:latest
ENV PYTHONUNBUFFERED 1
ENV PYTHONUNBUFFERED=1
ENV OPENPILOT_PATH /home/batman/openpilot
ENV PYTHONPATH ${OPENPILOT_PATH}:${PYTHONPATH}
ENV OPENPILOT_PATH=/home/batman/openpilot
ENV PYTHONPATH=${OPENPILOT_PATH}:${PYTHONPATH}
RUN mkdir -p ${OPENPILOT_PATH}
WORKDIR ${OPENPILOT_PATH}

View File

@@ -1,6 +1,6 @@
FROM ubuntu:24.04
ENV PYTHONUNBUFFERED 1
ENV PYTHONUNBUFFERED=1
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
@@ -8,9 +8,9 @@ RUN apt-get update && \
rm -rf /var/lib/apt/lists/*
RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && locale-gen
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
ENV LANG=en_US.UTF-8
ENV LANGUAGE=en_US:en
ENV LC_ALL=en_US.UTF-8
COPY tools/install_ubuntu_dependencies.sh /tmp/tools/
RUN /tmp/tools/install_ubuntu_dependencies.sh && \
@@ -55,9 +55,9 @@ RUN mkdir -p /tmp/opencl-driver-intel && \
cd / && \
rm -rf /tmp/opencl-driver-intel
ENV NVIDIA_VISIBLE_DEVICES all
ENV NVIDIA_DRIVER_CAPABILITIES graphics,utility,compute
ENV QTWEBENGINE_DISABLE_SANDBOX 1
ENV NVIDIA_VISIBLE_DEVICES=all
ENV NVIDIA_DRIVER_CAPABILITIES=graphics,utility,compute
ENV QTWEBENGINE_DISABLE_SANDBOX=1
RUN dbus-uuidgen > /etc/machine-id

9
Jenkinsfile vendored
View File

@@ -79,6 +79,10 @@ def deviceStage(String stageName, String deviceType, List extra_env, def steps)
return
}
if (isReplay()) {
error("REPLAYING TESTS IS NOT ALLOWED. FIX THEM INSTEAD.")
}
def extra = extra_env.collect { "export ${it}" }.join('\n');
def branch = env.BRANCH_NAME ?: 'master';
def gitDiff = sh returnStdout: true, script: 'curl -s -H "Authorization: Bearer ${GITHUB_COMMENTS_TOKEN}" https://api.github.com/repos/commaai/openpilot/compare/master...${GIT_BRANCH} | jq .files[].filename || echo "/"', label: 'Getting changes'
@@ -123,6 +127,11 @@ def hasPathChanged(String gitDiff, List<String> paths) {
return false
}
def isReplay() {
def replayClass = "org.jenkinsci.plugins.workflow.cps.replay.ReplayCause"
return currentBuild.rawBuild.getCauses().any{ cause -> cause.toString().contains(replayClass) }
}
def setupCredentials() {
withCredentials([
string(credentialsId: 'azure_token', variable: 'AZURE_TOKEN'),

View File

@@ -38,7 +38,8 @@ Quick start: `bash <(curl -fsSL openpilot.comma.ai)`
</tr>
</table>
To start using openpilot in a car
Using openpilot in a car
------
To use openpilot in a car, you need four things:
@@ -49,6 +50,14 @@ To use openpilot in a car, you need four things:
We have detailed instructions for [how to install the harness and device in a car](https://comma.ai/setup). Note that it's possible to run openpilot on [other hardware](https://blog.comma.ai/self-driving-car-for-free/), although it's not plug-and-play.
### Branches
| branch | URL | description |
|------------------|----------------------------------------|-------------------------------------------------------------------------------------|
| `release3` | openilot.comma.ai | This is openpilot's release branch. |
| `release3-staging` | openpilot-test.comma.ai | This is the staging branch for releases. Use it to get new releases slightly early. |
| `nightly` | openpilot-nightly.comma.ai | This is the bleeding edge development branch. Do not expect this to be stable. |
| `nightly-dev` | installer.comma.ai/commaai/nightly-dev | Same as nightly, but includes experimental development features for some cars. |
To start developing openpilot
------

View File

@@ -2516,6 +2516,14 @@ struct Microphone {
filteredSoundPressureWeightedDb @2 :Float32;
}
struct Touch {
sec @0 :Int64;
usec @1 :Int64;
type @2 :UInt8;
code @3 :Int32;
value @4 :Int32;
}
struct Event {
logMonoTime @0 :UInt64; # nanoseconds
valid @67 :Bool = true;
@@ -2596,6 +2604,9 @@ struct Event {
logMessage @18 :Text;
errorLogMessage @85 :Text;
# touch frame
touch @135 :List(Touch);
# navigation
navInstruction @82 :NavInstruction;
navRoute @83 :NavRoute;

View File

@@ -22,6 +22,7 @@ _services: dict[str, tuple] = {
"temperatureSensor2": (True, 2., 200),
"gpsNMEA": (True, 9.),
"deviceState": (True, 2., 1),
"touch": (True, 20., 1),
"can": (True, 100., 2053), # decimation gives ~3 msgs in a full segment
"controlsState": (True, 100., 10),
"selfdriveState": (True, 100., 10),

View File

@@ -1,7 +1,7 @@
import numpy as np
from openpilot.common.transformations.orientation import rot_from_euler
from openpilot.common.transformations.camera import get_view_frame_from_calib_frame, view_frame_from_device_frame
from openpilot.common.transformations.camera import get_view_frame_from_calib_frame, view_frame_from_device_frame, _ar_ox_fisheye
# segnet
SEGNET_SIZE = (512, 384)
@@ -39,6 +39,13 @@ sbigmodel_intrinsics = np.array([
[0.0, sbigmodel_fl, 0.5 * (256 + MEDMODEL_CY)],
[0.0, 0.0, 1.0]])
DM_INPUT_SIZE = (1440, 960)
dmonitoringmodel_fl = _ar_ox_fisheye.focal_length
dmonitoringmodel_intrinsics = np.array([
[dmonitoringmodel_fl, 0.0, DM_INPUT_SIZE[0]/2],
[0.0, dmonitoringmodel_fl, DM_INPUT_SIZE[1]/2 - (_ar_ox_fisheye.height - DM_INPUT_SIZE[1])/2],
[0.0, 0.0, 1.0]])
bigmodel_frame_from_calib_frame = np.dot(bigmodel_intrinsics,
get_view_frame_from_calib_frame(0, 0, 0, 0))

View File

@@ -103,7 +103,7 @@ A supported vehicle is one that just works when you install a comma device. All
|Hyundai|Ioniq Plug-in Hybrid 2020-22|All|openpilot available[<sup>1</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<details><summary>Parts</summary><sub>- 1 Hyundai H connector<br>- 1 RJ45 cable (7 ft)<br>- 1 comma 3X<br>- 1 comma power v2<br>- 1 harness box<br>- 1 mount<br>- 1 right angle OBD-C cable (1.5 ft)<br><a href="https://comma.ai/shop/comma-3x.html?make=Hyundai&model=Ioniq Plug-in Hybrid 2020-22">Buy Here</a></sub></details>||
|Hyundai|Kona 2020|Smart Cruise Control (SCC)|openpilot available[<sup>1</sup>](#footnotes)|6 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-empty.svg)](##)|<details><summary>Parts</summary><sub>- 1 Hyundai B connector<br>- 1 RJ45 cable (7 ft)<br>- 1 comma 3X<br>- 1 comma power v2<br>- 1 harness box<br>- 1 mount<br>- 1 right angle OBD-C cable (1.5 ft)<br><a href="https://comma.ai/shop/comma-3x.html?make=Hyundai&model=Kona 2020">Buy Here</a></sub></details>||
|Hyundai|Kona Electric 2018-21|Smart Cruise Control (SCC)|openpilot available[<sup>1</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<details><summary>Parts</summary><sub>- 1 Hyundai G connector<br>- 1 RJ45 cable (7 ft)<br>- 1 comma 3X<br>- 1 comma power v2<br>- 1 harness box<br>- 1 mount<br>- 1 right angle OBD-C cable (1.5 ft)<br><a href="https://comma.ai/shop/comma-3x.html?make=Hyundai&model=Kona Electric 2018-21">Buy Here</a></sub></details>||
|Hyundai|Kona Electric 2022-23|Smart Cruise Control (SCC)|Stock|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<details><summary>Parts</summary><sub>- 1 Hyundai O connector<br>- 1 RJ45 cable (7 ft)<br>- 1 comma 3X<br>- 1 comma power v2<br>- 1 harness box<br>- 1 mount<br>- 1 right angle OBD-C cable (1.5 ft)<br><a href="https://comma.ai/shop/comma-3x.html?make=Hyundai&model=Kona Electric 2022-23">Buy Here</a></sub></details>||
|Hyundai|Kona Electric 2022-23|Smart Cruise Control (SCC)|openpilot available[<sup>1</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<details><summary>Parts</summary><sub>- 1 Hyundai O connector<br>- 1 RJ45 cable (7 ft)<br>- 1 comma 3X<br>- 1 comma power v2<br>- 1 harness box<br>- 1 mount<br>- 1 right angle OBD-C cable (1.5 ft)<br><a href="https://comma.ai/shop/comma-3x.html?make=Hyundai&model=Kona Electric 2022-23">Buy Here</a></sub></details>||
|Hyundai|Kona Electric (with HDA II, Korea only) 2023[<sup>5</sup>](#footnotes)|Smart Cruise Control (SCC)|Stock|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<details><summary>Parts</summary><sub>- 1 Hyundai R connector<br>- 1 RJ45 cable (7 ft)<br>- 1 comma 3X<br>- 1 comma power v2<br>- 1 harness box<br>- 1 mount<br>- 1 right angle OBD-C cable (1.5 ft)<br><a href="https://comma.ai/shop/comma-3x.html?make=Hyundai&model=Kona Electric (with HDA II, Korea only) 2023">Buy Here</a></sub></details>|<a href="https://www.youtube.com/watch?v=U2fOCmcQ8hw" target="_blank"><img height="18px" src="assets/icon-youtube.svg"></img></a>|
|Hyundai|Kona Hybrid 2020|Smart Cruise Control (SCC)|openpilot available[<sup>1</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<details><summary>Parts</summary><sub>- 1 Hyundai I connector<br>- 1 RJ45 cable (7 ft)<br>- 1 comma 3X<br>- 1 comma power v2<br>- 1 harness box<br>- 1 mount<br>- 1 right angle OBD-C cable (1.5 ft)<br><a href="https://comma.ai/shop/comma-3x.html?make=Hyundai&model=Kona Hybrid 2020">Buy Here</a></sub></details>||
|Hyundai|Palisade 2020-22|All|openpilot available[<sup>1</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<details><summary>Parts</summary><sub>- 1 Hyundai H connector<br>- 1 RJ45 cable (7 ft)<br>- 1 comma 3X<br>- 1 comma power v2<br>- 1 harness box<br>- 1 mount<br>- 1 right angle OBD-C cable (1.5 ft)<br><a href="https://comma.ai/shop/comma-3x.html?make=Hyundai&model=Palisade 2020-22">Buy Here</a></sub></details>|<a href="https://youtu.be/TAnDqjF4fDY?t=456" target="_blank"><img height="18px" src="assets/icon-youtube.svg"></img></a>|

44
docs/css/tooltip.css Normal file
View File

@@ -0,0 +1,44 @@
[data-tooltip] {
position: relative;
display: inline-block;
border-bottom: 1px dotted black;
}
[data-tooltip] .tooltip-content {
width: max-content;
max-width: 25em;
position: absolute;
top: 100%;
left: 50%;
transform: translateX(-50%);
background-color: white;
color: #404040;
box-shadow: 0 4px 14px 0 rgba(0,0,0,.2), 0 0 0 1px rgba(0,0,0,.05);
padding: 10px;
font: 14px/1.5 Lato, proxima-nova, Helvetica Neue, Arial, sans-serif;
text-decoration: none;
opacity: 0;
visibility: hidden;
transition: opacity 0.1s, visibility 0s;
z-index: 1000;
pointer-events: none; /* Prevent accidental interaction */
}
[data-tooltip]:hover .tooltip-content {
opacity: 1;
visibility: visible;
pointer-events: auto; /* Allow interaction when visible */
}
.tooltip-content .tooltip-glossary-link {
display: inline-block;
margin-top: 8px;
font-size: 12px;
color: #007bff;
text-decoration: none;
}
.tooltip-content .tooltip-glossary-link:hover {
color: #0056b3;
text-decoration: underline;
}

0
docs/glossary.toml Normal file
View File

68
docs/hooks/glossary.py Normal file
View File

@@ -0,0 +1,68 @@
import re
import tomllib
def load_glossary(file_path="docs/glossary.toml"):
with open(file_path, "rb") as f:
glossary_data = tomllib.load(f)
return glossary_data.get("glossary", {})
def generate_anchor_id(name):
return name.replace(" ", "-").replace("_", "-").lower()
def format_markdown_term(name, definition):
anchor_id = generate_anchor_id(name)
markdown = f"* [**{name.replace('_', ' ').title()}**](#{anchor_id})"
if definition.get("abbreviation"):
markdown += f" *({definition['abbreviation']})*"
if definition.get("description"):
markdown += f": {definition['description']}\n"
return markdown
def glossary_markdown(vocabulary):
markdown = ""
for category, terms in vocabulary.items():
markdown += f"## {category.replace('_', ' ').title()}\n\n"
for name, definition in terms.items():
markdown += format_markdown_term(name, definition)
return markdown
def format_tooltip_html(term_key, definition, html):
display_term = term_key.replace("_", " ").title()
clean_description = re.sub(r"\[(.+)]\(.+\)", r"\1", definition["description"])
glossary_link = (
f"<a href='/concepts/glossary#{term_key}' class='tooltip-glossary-link' title='View in glossary'>Glossary🔗</a>"
)
return re.sub(
re.escape(display_term),
lambda
match: f"<span data-tooltip>{match.group(0)}<span class='tooltip-content'>{clean_description} {glossary_link}</span></span>",
html,
flags=re.IGNORECASE,
)
def apply_tooltip(_term_key, _definition, pattern, html):
return re.sub(
pattern,
lambda match: format_tooltip_html(_term_key, _definition, match.group(0)),
html,
flags=re.IGNORECASE,
)
def tooltip_html(vocabulary, html):
for _category, terms in vocabulary.items():
for term_key, definition in terms.items():
if definition.get("description"):
pattern = rf"(?<!\w){re.escape(term_key.replace('_', ' ').title())}(?![^<]*<\/a>)(?!\([^)]*\))"
html = apply_tooltip(term_key, definition, pattern, html)
return html
# Page Hooks
def on_page_markdown(markdown, **kwargs):
glossary = load_glossary()
return markdown.replace("{{GLOSSARY_DEFINITIONS}}", glossary_markdown(glossary))
def on_page_content(html, **kwargs):
if kwargs.get("page").title == "Glossary":
return html
glossary = load_glossary()
return tooltip_html(glossary, html)

View File

@@ -7,7 +7,7 @@ export OPENBLAS_NUM_THREADS=1
export VECLIB_MAXIMUM_THREADS=1
if [ -z "$AGNOS_VERSION" ]; then
export AGNOS_VERSION="11.3"
export AGNOS_VERSION="11.4"
fi
export STAGING_ROOT="/data/safe_staging"

View File

@@ -8,6 +8,10 @@ strict: true
docs_dir: docs
site_dir: docs_site/
hooks:
- docs/hooks/glossary.py
extra_css:
- css/tooltip.css
theme:
name: readthedocs
navigation_depth: 3

2
panda

Submodule panda updated: 3ae376ecfe...45301bf15c

View File

@@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f7565541b4e6213221174839b9b2b67397ced0b9807ea56413989fd37325b3b6
size 4908

View File

@@ -148,7 +148,8 @@ class CarSpecificEvents:
# To avoid re-engaging when openpilot cancels, check user engagement intention via buttons
# Main button also can trigger an engagement on these cars
self.cruise_buttons.append(any(ev.type in HYUNDAI_ENABLE_BUTTONS for ev in CS.buttonEvents))
events = self.create_common_events(CS, CS_prev, pcm_enable=self.CP.pcmCruise, allow_enable=any(self.cruise_buttons))
events = self.create_common_events(CS, CS_prev, extra_gears=(GearShifter.sport, GearShifter.manumatic),
pcm_enable=self.CP.pcmCruise, allow_enable=any(self.cruise_buttons))
# low speed steer alert hysteresis logic (only for cars with steer cut off above 10 m/s)
if CS.vEgo < (self.CP.minSteerSpeed + 2.) and self.CP.minSteerSpeed > 10.:

View File

@@ -17,7 +17,7 @@ class TestCarDocs:
with open(CARS_MD_OUT) as f:
current_cars_md = f.read()
assert generated_cars_md == current_cars_md, "Run selfdrive/opcar/docs.py to update the compatibility documentation"
assert generated_cars_md == current_cars_md, "Run selfdrive/car/docs.py to update the compatibility documentation"
def test_docs_diff(self):
dump_path = os.path.join(BASEDIR, "selfdrive", "car", "tests", "cars_dump")

View File

@@ -5,12 +5,15 @@ from openpilot.common.realtime import DT_CTRL
MIN_SPEED = 1.0
CONTROL_N = 17
CAR_ROTATION_RADIUS = 0.0
# This is a turn radius smaller than most cars can achieve
MAX_CURVATURE = 0.2
# EU guidelines
MAX_LATERAL_JERK = 5.0
MAX_VEL_ERR = 5.0
def clip_curvature(v_ego, prev_curvature, new_curvature):
new_curvature = clip(new_curvature, -MAX_CURVATURE, MAX_CURVATURE)
v_ego = max(MIN_SPEED, v_ego)
max_curvature_rate = MAX_LATERAL_JERK / (v_ego**2) # inexact calculation, check https://github.com/commaai/openpilot/pull/24755
safe_desired_curvature = clip(new_curvature,

48
selfdrive/debug/touch_replay.py Executable file
View File

@@ -0,0 +1,48 @@
#!/usr/bin/env python3
import argparse
import numpy as np
import matplotlib.pyplot as plt
from openpilot.tools.lib.logreader import LogReader
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--width', default=2160, type=int)
parser.add_argument('--height', default=1080, type=int)
parser.add_argument('--route', default='rlog', type=str)
args = parser.parse_args()
w = args.width
h = args.height
route = args.route
fingers = [[-1, -1]] * 5
touch_points = []
current_slot = 0
lr = list(LogReader(route))
for msg in lr:
if msg.which() == 'touch':
for event in msg.touch:
if event.type == 3 and event.code == 47:
current_slot = event.value
elif event.type == 3 and event.code == 57 and event.value == -1:
fingers[current_slot] = [-1, -1]
elif event.type == 3 and event.code == 53:
fingers[current_slot][1] = h - (h - event.value)
if fingers[current_slot][0] != -1:
touch_points.append(fingers[current_slot].copy())
elif event.type == 3 and event.code == 54:
fingers[current_slot][0] = w - event.value
if fingers[current_slot][1] != -1:
touch_points.append(fingers[current_slot].copy())
unique_points, counts = np.unique(touch_points, axis=0, return_counts=True)
plt.figure(figsize=(10, 3))
plt.scatter(unique_points[:, 0], unique_points[:, 1], c=counts, s=counts * 20, edgecolors='red')
plt.colorbar()
plt.title(f'Touches for {route}')
plt.grid(True)
plt.show()

View File

@@ -13,13 +13,13 @@ from cereal.messaging import PubMaster, SubMaster
from msgq.visionipc import VisionIpcClient, VisionStreamType, VisionBuf
from openpilot.common.swaglog import cloudlog
from openpilot.common.realtime import set_realtime_priority
from openpilot.common.transformations.model import dmonitoringmodel_intrinsics
from openpilot.common.transformations.camera import _ar_ox_fisheye, _os_fisheye
from openpilot.selfdrive.modeld.models.commonmodel_pyx import CLContext, MonitoringModelFrame
from openpilot.selfdrive.modeld.runners import ModelRunner, Runtime
from openpilot.selfdrive.modeld.models.commonmodel_pyx import CLContext
from openpilot.selfdrive.modeld.parse_model_outputs import sigmoid
CALIB_LEN = 3
MODEL_WIDTH = 1440
MODEL_HEIGHT = 960
FEATURE_LEN = 512
OUTPUT_SIZE = 84 + FEATURE_LEN
@@ -62,25 +62,21 @@ class ModelState:
def __init__(self, cl_ctx):
assert ctypes.sizeof(DMonitoringModelResult) == OUTPUT_SIZE * ctypes.sizeof(ctypes.c_float)
self.frame = MonitoringModelFrame(cl_ctx)
self.output = np.zeros(OUTPUT_SIZE, dtype=np.float32)
self.inputs = {
'input_img': np.zeros(MODEL_HEIGHT * MODEL_WIDTH, dtype=np.uint8),
'calib': np.zeros(CALIB_LEN, dtype=np.float32)}
self.model = ModelRunner(MODEL_PATHS, self.output, Runtime.GPU, False, cl_ctx)
self.model.addInput("input_img", None)
self.model.addInput("calib", self.inputs['calib'])
def run(self, buf:VisionBuf, calib:np.ndarray) -> tuple[np.ndarray, float]:
def run(self, buf:VisionBuf, calib:np.ndarray, transform:np.ndarray) -> tuple[np.ndarray, float]:
self.inputs['calib'][:] = calib
v_offset = buf.height - MODEL_HEIGHT
h_offset = (buf.width - MODEL_WIDTH) // 2
buf_data = buf.data.reshape(-1, buf.stride)
input_data = self.inputs['input_img'].reshape(MODEL_HEIGHT, MODEL_WIDTH)
input_data[:] = buf_data[v_offset:v_offset+MODEL_HEIGHT, h_offset:h_offset+MODEL_WIDTH]
self.model.setInputBuffer("input_img", self.frame.prepare(buf, transform.flatten(), None).view(np.float32))
self.model.setInputBuffer("input_img", self.inputs['input_img'].view(np.float32))
t1 = time.perf_counter()
self.model.execute()
t2 = time.perf_counter()
@@ -137,18 +133,23 @@ def main():
pm = PubMaster(["driverStateV2"])
calib = np.zeros(CALIB_LEN, dtype=np.float32)
model_transform = None
while True:
buf = vipc_client.recv()
if buf is None:
continue
if model_transform is None:
cam = _os_fisheye if buf.width == _os_fisheye.width else _ar_ox_fisheye
model_transform = np.linalg.inv(np.dot(dmonitoringmodel_intrinsics, np.linalg.inv(cam.intrinsics))).astype(np.float32)
sm.update(0)
if sm.updated["liveCalibration"]:
calib[:] = np.array(sm["liveCalibration"].rpyCalib)
t1 = time.perf_counter()
model_output, gpu_execution_time = model.run(buf, calib)
model_output, gpu_execution_time = model.run(buf, calib, model_transform)
t2 = time.perf_counter()
pm.send("driverStateV2", get_driverstate_packet(model_output, vipc_client.frame_id, vipc_client.timestamp_sof, t2 - t1, gpu_execution_time))

View File

@@ -3,11 +3,22 @@ import capnp
import numpy as np
from cereal import log
from openpilot.selfdrive.modeld.constants import ModelConstants, Plan, Meta
from openpilot.selfdrive.controls.lib.drive_helpers import MIN_SPEED
SEND_RAW_PRED = os.getenv('SEND_RAW_PRED')
ConfidenceClass = log.ModelDataV2.ConfidenceClass
def curv_from_psis(psi_target, psi_rate, vego, delay):
vego = np.clip(vego, MIN_SPEED, np.inf)
curv_from_psi = psi_target / (vego * delay) # epsilon to prevent divide-by-zero
return 2*curv_from_psi - psi_rate / vego
def get_curvature_from_plan(plan, vego, delay):
psi_target = np.interp(delay, ModelConstants.T_IDXS, plan[:, Plan.T_FROM_CURRENT_EULER][:, 2])
psi_rate = plan[:, Plan.ORIENTATION_RATE][0, 2]
return curv_from_psis(psi_target, psi_rate, vego, delay)
class PublishState:
def __init__(self):
self.disengage_buffer = np.zeros(ModelConstants.CONFIDENCE_BUFFER_LEN*ModelConstants.DISENGAGE_WIDTH, dtype=np.float32)
@@ -55,14 +66,17 @@ def fill_lane_line_meta(builder, lane_lines, lane_line_probs):
builder.rightProb = lane_line_probs[2]
def fill_model_msg(base_msg: capnp._DynamicStructBuilder, extended_msg: capnp._DynamicStructBuilder,
net_output_data: dict[str, np.ndarray], publish_state: PublishState,
vipc_frame_id: int, vipc_frame_id_extra: int, frame_id: int, frame_drop: float,
timestamp_eof: int, model_execution_time: float, valid: bool) -> None:
net_output_data: dict[str, np.ndarray], v_ego: float, delay: float,
publish_state: PublishState, vipc_frame_id: int, vipc_frame_id_extra: int,
frame_id: int, frame_drop: float, timestamp_eof: int, model_execution_time: float,
valid: bool) -> None:
frame_age = frame_id - vipc_frame_id if frame_id > vipc_frame_id else 0
frame_drop_perc = frame_drop * 100
extended_msg.valid = valid
base_msg.valid = valid
desired_curv = float(get_curvature_from_plan(net_output_data['plan'][0], v_ego, delay))
driving_model_data = base_msg.drivingModelData
driving_model_data.frameId = vipc_frame_id
@@ -71,7 +85,7 @@ def fill_model_msg(base_msg: capnp._DynamicStructBuilder, extended_msg: capnp._D
driving_model_data.modelExecutionTime = model_execution_time
action = driving_model_data.action
action.desiredCurvature = float(net_output_data['desired_curvature'][0,0])
action.desiredCurvature = desired_curv
modelV2 = extended_msg.modelV2
modelV2.frameId = vipc_frame_id
@@ -106,7 +120,7 @@ def fill_model_msg(base_msg: capnp._DynamicStructBuilder, extended_msg: capnp._D
# lateral planning
action = modelV2.action
action.desiredCurvature = float(net_output_data['desired_curvature'][0,0])
action.desiredCurvature = desired_curv
# times at X_IDXS according to model plan
PLAN_T_IDXS = [np.nan] * ModelConstants.IDX_N

View File

@@ -22,7 +22,7 @@ from openpilot.selfdrive.modeld.runners import ModelRunner, Runtime
from openpilot.selfdrive.modeld.parse_model_outputs import Parser
from openpilot.selfdrive.modeld.fill_model_msg import fill_model_msg, fill_pose_msg, PublishState
from openpilot.selfdrive.modeld.constants import ModelConstants
from openpilot.selfdrive.modeld.models.commonmodel_pyx import ModelFrame, CLContext
from openpilot.selfdrive.modeld.models.commonmodel_pyx import DrivingModelFrame, CLContext
PROCESS_NAME = "selfdrive.modeld.modeld"
SEND_RAW_PRED = os.getenv('SEND_RAW_PRED')
@@ -44,27 +44,24 @@ class FrameMeta:
self.frame_id, self.timestamp_sof, self.timestamp_eof = vipc.frame_id, vipc.timestamp_sof, vipc.timestamp_eof
class ModelState:
frame: ModelFrame
wide_frame: ModelFrame
frame: DrivingModelFrame
wide_frame: DrivingModelFrame
inputs: dict[str, np.ndarray]
output: np.ndarray
prev_desire: np.ndarray # for tracking the rising edge of the pulse
model: ModelRunner
def __init__(self, context: CLContext):
self.frame = ModelFrame(context)
self.wide_frame = ModelFrame(context)
self.frame = DrivingModelFrame(context)
self.wide_frame = DrivingModelFrame(context)
self.prev_desire = np.zeros(ModelConstants.DESIRE_LEN, dtype=np.float32)
self.full_features_20Hz = np.zeros((ModelConstants.FULL_HISTORY_BUFFER_LEN, ModelConstants.FEATURE_LEN), dtype=np.float32)
self.desire_20Hz = np.zeros((ModelConstants.FULL_HISTORY_BUFFER_LEN + 1, ModelConstants.DESIRE_LEN), dtype=np.float32)
self.prev_desired_curv_20hz = np.zeros((ModelConstants.FULL_HISTORY_BUFFER_LEN + 1, ModelConstants.PREV_DESIRED_CURV_LEN), dtype=np.float32)
# img buffers are managed in openCL transform code
self.inputs = {
'desire': np.zeros(ModelConstants.DESIRE_LEN * (ModelConstants.HISTORY_BUFFER_LEN+1), dtype=np.float32),
'traffic_convention': np.zeros(ModelConstants.TRAFFIC_CONVENTION_LEN, dtype=np.float32),
'lateral_control_params': np.zeros(ModelConstants.LATERAL_CONTROL_PARAMS_LEN, dtype=np.float32),
'prev_desired_curv': np.zeros(ModelConstants.PREV_DESIRED_CURV_LEN * (ModelConstants.HISTORY_BUFFER_LEN+1), dtype=np.float32),
'features_buffer': np.zeros(ModelConstants.HISTORY_BUFFER_LEN * ModelConstants.FEATURE_LEN, dtype=np.float32),
}
@@ -100,7 +97,6 @@ class ModelState:
self.inputs['desire'][:] = self.desire_20Hz.reshape((25,4,-1)).max(axis=1).flatten()
self.inputs['traffic_convention'][:] = inputs['traffic_convention']
self.inputs['lateral_control_params'][:] = inputs['lateral_control_params']
self.model.setInputBuffer("input_imgs", self.frame.prepare(buf, transform.flatten(), self.model.getCLBuffer("input_imgs")))
self.model.setInputBuffer("big_input_imgs", self.wide_frame.prepare(wbuf, transform_wide.flatten(), self.model.getCLBuffer("big_input_imgs")))
@@ -114,13 +110,8 @@ class ModelState:
self.full_features_20Hz[:-1] = self.full_features_20Hz[1:]
self.full_features_20Hz[-1] = outputs['hidden_state'][0, :]
self.prev_desired_curv_20hz[:-1] = self.prev_desired_curv_20hz[1:]
self.prev_desired_curv_20hz[-1] = outputs['desired_curvature'][0, :]
idxs = np.arange(-4,-100,-4)[::-1]
self.inputs['features_buffer'][:] = self.full_features_20Hz[idxs].flatten()
# TODO model only uses last value now, once that changes we need to input strided action history buffer
self.inputs['prev_desired_curv'][-ModelConstants.PREV_DESIRED_CURV_LEN:] = 0. * self.prev_desired_curv_20hz[-4, :]
return outputs
@@ -231,7 +222,6 @@ def main(demo=False):
is_rhd = sm["driverMonitoringState"].isRHD
frame_id = sm["roadCameraState"].frameId
v_ego = max(sm["carState"].vEgo, 0.)
lateral_control_params = np.array([v_ego, steer_delay], dtype=np.float32)
if sm.updated["liveCalibration"] and sm.seen['roadCameraState'] and sm.seen['deviceState']:
device_from_calib_euler = np.array(sm["liveCalibration"].rpyCalib, dtype=np.float32)
dc = DEVICE_CAMERAS[(str(sm['deviceState'].deviceType), str(sm['roadCameraState'].sensor))]
@@ -262,7 +252,6 @@ def main(demo=False):
inputs:dict[str, np.ndarray] = {
'desire': vec_desire,
'traffic_convention': traffic_convention,
'lateral_control_params': lateral_control_params,
}
mt1 = time.perf_counter()
@@ -274,7 +263,8 @@ def main(demo=False):
modelv2_send = messaging.new_message('modelV2')
drivingdata_send = messaging.new_message('drivingModelData')
posenet_send = messaging.new_message('cameraOdometry')
fill_model_msg(drivingdata_send, modelv2_send, model_output, publish_state, meta_main.frame_id, meta_extra.frame_id, frame_id,
fill_model_msg(drivingdata_send, modelv2_send, model_output, v_ego, steer_delay,
publish_state, meta_main.frame_id, meta_extra.frame_id, frame_id,
frame_drop_ratio, meta_main.timestamp_eof, model_execution_time, live_calib_seen)
desire_state = modelv2_send.modelV2.meta.desireState

View File

@@ -1,36 +1,30 @@
#include "selfdrive/modeld/models/commonmodel.h"
#include <cassert>
#include <cmath>
#include <cstring>
#include "common/clutil.h"
ModelFrame::ModelFrame(cl_device_id device_id, cl_context context) {
DrivingModelFrame::DrivingModelFrame(cl_device_id device_id, cl_context context) : ModelFrame(device_id, context) {
input_frames = std::make_unique<uint8_t[]>(buf_size);
q = CL_CHECK_ERR(clCreateCommandQueue(context, device_id, 0, &err));
y_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, MODEL_WIDTH * MODEL_HEIGHT, NULL, &err));
u_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, (MODEL_WIDTH / 2) * (MODEL_HEIGHT / 2), NULL, &err));
v_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, (MODEL_WIDTH / 2) * (MODEL_HEIGHT / 2), NULL, &err));
//input_frames_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, buf_size, NULL, &err));
img_buffer_20hz_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, 5*frame_size_bytes, NULL, &err));
region.origin = 4 * frame_size_bytes;
region.size = frame_size_bytes;
last_img_cl = CL_CHECK_ERR(clCreateSubBuffer(img_buffer_20hz_cl, CL_MEM_READ_WRITE, CL_BUFFER_CREATE_TYPE_REGION, &region, &err));
transform_init(&transform, context, device_id);
loadyuv_init(&loadyuv, context, device_id, MODEL_WIDTH, MODEL_HEIGHT);
init_transform(device_id, context, MODEL_WIDTH, MODEL_HEIGHT);
}
uint8_t* ModelFrame::prepare(cl_mem yuv_cl, int frame_width, int frame_height, int frame_stride, int frame_uv_offset, const mat3 &projection, cl_mem *output) {
transform_queue(&this->transform, q,
yuv_cl, frame_width, frame_height, frame_stride, frame_uv_offset,
y_cl, u_cl, v_cl, MODEL_WIDTH, MODEL_HEIGHT, projection);
uint8_t* DrivingModelFrame::prepare(cl_mem yuv_cl, int frame_width, int frame_height, int frame_stride, int frame_uv_offset, const mat3& projection, cl_mem* output) {
run_transform(yuv_cl, MODEL_WIDTH, MODEL_HEIGHT, frame_width, frame_height, frame_stride, frame_uv_offset, projection);
for (int i = 0; i < 4; i++) {
CL_CHECK(clEnqueueCopyBuffer(q, img_buffer_20hz_cl, img_buffer_20hz_cl, (i+1)*frame_size_bytes, i*frame_size_bytes, frame_size_bytes, 0, nullptr, nullptr));
}
loadyuv_queue(&loadyuv, q, y_cl, u_cl, v_cl, last_img_cl);
if (output == NULL) {
CL_CHECK(clEnqueueReadBuffer(q, img_buffer_20hz_cl, CL_TRUE, 0, frame_size_bytes, &input_frames[0], 0, nullptr, nullptr));
CL_CHECK(clEnqueueReadBuffer(q, last_img_cl, CL_TRUE, 0, frame_size_bytes, &input_frames[MODEL_FRAME_SIZE], 0, nullptr, nullptr));
@@ -46,13 +40,30 @@ uint8_t* ModelFrame::prepare(cl_mem yuv_cl, int frame_width, int frame_height, i
}
}
ModelFrame::~ModelFrame() {
transform_destroy(&transform);
DrivingModelFrame::~DrivingModelFrame() {
deinit_transform();
loadyuv_destroy(&loadyuv);
CL_CHECK(clReleaseMemObject(img_buffer_20hz_cl));
CL_CHECK(clReleaseMemObject(last_img_cl));
CL_CHECK(clReleaseMemObject(v_cl));
CL_CHECK(clReleaseMemObject(u_cl));
CL_CHECK(clReleaseMemObject(y_cl));
CL_CHECK(clReleaseCommandQueue(q));
}
}
MonitoringModelFrame::MonitoringModelFrame(cl_device_id device_id, cl_context context) : ModelFrame(device_id, context) {
input_frames = std::make_unique<uint8_t[]>(buf_size);
//input_frame_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, buf_size, NULL, &err));
init_transform(device_id, context, MODEL_WIDTH, MODEL_HEIGHT);
}
uint8_t* MonitoringModelFrame::prepare(cl_mem yuv_cl, int frame_width, int frame_height, int frame_stride, int frame_uv_offset, const mat3& projection, cl_mem* output) {
run_transform(yuv_cl, MODEL_WIDTH, MODEL_HEIGHT, frame_width, frame_height, frame_stride, frame_uv_offset, projection);
CL_CHECK(clEnqueueReadBuffer(q, y_cl, CL_TRUE, 0, MODEL_FRAME_SIZE * sizeof(uint8_t), input_frames.get(), 0, nullptr, nullptr));
clFinish(q);
//return &y_cl;
return input_frames.get();
}
MonitoringModelFrame::~MonitoringModelFrame() {
deinit_transform();
CL_CHECK(clReleaseCommandQueue(q));
}

View File

@@ -2,6 +2,7 @@
#include <cfloat>
#include <cstdlib>
#include <cassert>
#include <memory>
@@ -18,9 +19,56 @@
class ModelFrame {
public:
ModelFrame(cl_device_id device_id, cl_context context);
~ModelFrame();
uint8_t* prepare(cl_mem yuv_cl, int width, int height, int frame_stride, int frame_uv_offset, const mat3& transform, cl_mem *output);
ModelFrame(cl_device_id device_id, cl_context context) {
q = CL_CHECK_ERR(clCreateCommandQueue(context, device_id, 0, &err));
}
virtual ~ModelFrame() {}
virtual uint8_t* prepare(cl_mem yuv_cl, int frame_width, int frame_height, int frame_stride, int frame_uv_offset, const mat3& projection, cl_mem* output) { return NULL; }
/*
uint8_t* buffer_from_cl(cl_mem *in_frames, int buffer_size) {
CL_CHECK(clEnqueueReadBuffer(q, *in_frames, CL_TRUE, 0, buffer_size, input_frames.get(), 0, nullptr, nullptr));
clFinish(q);
return &input_frames[0];
}
*/
int MODEL_WIDTH;
int MODEL_HEIGHT;
int MODEL_FRAME_SIZE;
int buf_size;
protected:
cl_mem y_cl, u_cl, v_cl;
Transform transform;
cl_command_queue q;
std::unique_ptr<uint8_t[]> input_frames;
void init_transform(cl_device_id device_id, cl_context context, int model_width, int model_height) {
y_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, model_width * model_height, NULL, &err));
u_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, (model_width / 2) * (model_height / 2), NULL, &err));
v_cl = CL_CHECK_ERR(clCreateBuffer(context, CL_MEM_READ_WRITE, (model_width / 2) * (model_height / 2), NULL, &err));
transform_init(&transform, context, device_id);
}
void deinit_transform() {
transform_destroy(&transform);
CL_CHECK(clReleaseMemObject(v_cl));
CL_CHECK(clReleaseMemObject(u_cl));
CL_CHECK(clReleaseMemObject(y_cl));
}
void run_transform(cl_mem yuv_cl, int model_width, int model_height, int frame_width, int frame_height, int frame_stride, int frame_uv_offset, const mat3& projection) {
transform_queue(&transform, q,
yuv_cl, frame_width, frame_height, frame_stride, frame_uv_offset,
y_cl, u_cl, v_cl, model_width, model_height, projection);
}
};
class DrivingModelFrame : public ModelFrame {
public:
DrivingModelFrame(cl_device_id device_id, cl_context context);
~DrivingModelFrame();
uint8_t* prepare(cl_mem yuv_cl, int frame_width, int frame_height, int frame_stride, int frame_uv_offset, const mat3& projection, cl_mem* output);
const int MODEL_WIDTH = 512;
const int MODEL_HEIGHT = 256;
@@ -29,10 +77,22 @@ public:
const size_t frame_size_bytes = MODEL_FRAME_SIZE * sizeof(uint8_t);
private:
Transform transform;
LoadYUVState loadyuv;
cl_command_queue q;
cl_mem y_cl, u_cl, v_cl, img_buffer_20hz_cl, last_img_cl;
cl_mem img_buffer_20hz_cl, last_img_cl;//, input_frames_cl;
cl_buffer_region region;
std::unique_ptr<uint8_t[]> input_frames;
};
};
class MonitoringModelFrame : public ModelFrame {
public:
MonitoringModelFrame(cl_device_id device_id, cl_context context);
~MonitoringModelFrame();
uint8_t* prepare(cl_mem yuv_cl, int frame_width, int frame_height, int frame_stride, int frame_uv_offset, const mat3& projection, cl_mem* output);
const int MODEL_WIDTH = 1440;
const int MODEL_HEIGHT = 960;
const int MODEL_FRAME_SIZE = MODEL_WIDTH * MODEL_HEIGHT;
const int buf_size = MODEL_FRAME_SIZE;
private:
// cl_mem input_frame_cl;
};

View File

@@ -14,5 +14,13 @@ cdef extern from "common/clutil.h":
cdef extern from "selfdrive/modeld/models/commonmodel.h":
cppclass ModelFrame:
int buf_size
ModelFrame(cl_device_id, cl_context)
# unsigned char * buffer_from_cl(cl_mem*, int);
unsigned char * prepare(cl_mem, int, int, int, int, mat3, cl_mem*)
cppclass DrivingModelFrame:
int buf_size
DrivingModelFrame(cl_device_id, cl_context)
cppclass MonitoringModelFrame:
int buf_size
MonitoringModelFrame(cl_device_id, cl_context)

View File

@@ -4,11 +4,12 @@
import numpy as np
cimport numpy as cnp
from libc.string cimport memcpy
from libc.stdint cimport uintptr_t
from msgq.visionipc.visionipc cimport cl_mem
from msgq.visionipc.visionipc_pyx cimport VisionBuf, CLContext as BaseCLContext
from .commonmodel cimport CL_DEVICE_TYPE_DEFAULT, cl_get_device_id, cl_create_context
from .commonmodel cimport mat3, ModelFrame as cppModelFrame
from .commonmodel cimport mat3, ModelFrame as cppModelFrame, DrivingModelFrame as cppDrivingModelFrame, MonitoringModelFrame as cppMonitoringModelFrame
cdef class CLContext(BaseCLContext):
@@ -23,11 +24,17 @@ cdef class CLMem:
mem.mem = <cl_mem*> cmem
return mem
@property
def mem_address(self):
return <uintptr_t>(self.mem)
def cl_from_visionbuf(VisionBuf buf):
return CLMem.create(<void*>&buf.buf.buf_cl)
cdef class ModelFrame:
cdef cppModelFrame * frame
def __cinit__(self, CLContext context):
self.frame = new cppModelFrame(context.device_id, context.context)
cdef int buf_size
def __dealloc__(self):
del self.frame
@@ -42,4 +49,28 @@ cdef class ModelFrame:
data = self.frame.prepare(buf.buf.buf_cl, buf.width, buf.height, buf.stride, buf.uv_offset, cprojection, output.mem)
if not data:
return None
return np.asarray(<cnp.uint8_t[:self.frame.buf_size]> data)
return np.asarray(<cnp.uint8_t[:self.buf_size]> data)
# return CLMem.create(data)
# def buffer_from_cl(self, CLMem in_frames):
# cdef unsigned char * data2
# data2 = self.frame.buffer_from_cl(in_frames.mem, self.buf_size)
# return np.asarray(<cnp.uint8_t[:self.buf_size]> data2)
cdef class DrivingModelFrame(ModelFrame):
cdef cppDrivingModelFrame * _frame
def __cinit__(self, CLContext context):
self._frame = new cppDrivingModelFrame(context.device_id, context.context)
self.frame = <cppModelFrame*>(self._frame)
self.buf_size = self._frame.buf_size
cdef class MonitoringModelFrame(ModelFrame):
cdef cppMonitoringModelFrame * _frame
def __cinit__(self, CLContext context):
self._frame = new cppMonitoringModelFrame(context.device_id, context.context)
self.frame = <cppModelFrame*>(self._frame)
self.buf_size = self._frame.buf_size

View File

@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:663f58026cdf0b5c8e079a8a1591c8e2b5fa7e5c0f29a882011a17c405af10f4
size 50320584
oid sha256:0c896681fd6851de3968433e12f37834429eba265e938cf383200be3e5835cec
size 49096168

View File

@@ -96,8 +96,6 @@ class Parser:
out_shape=(ModelConstants.LEAD_TRAJ_LEN,ModelConstants.LEAD_WIDTH))
if 'lat_planner_solution' in outs:
self.parse_mdn('lat_planner_solution', outs, in_N=0, out_N=0, out_shape=(ModelConstants.IDX_N,ModelConstants.LAT_PLANNER_SOLUTION_WIDTH))
if 'desired_curvature' in outs:
self.parse_mdn('desired_curvature', outs, in_N=0, out_N=0, out_shape=(ModelConstants.DESIRED_CURV_WIDTH,))
for k in ['lead_prob', 'lane_lines_prob', 'meta']:
self.parse_binary_crossentropy(k, outs)
self.parse_categorical_crossentropy('desire_state', outs, out_shape=(ModelConstants.DESIRE_PRED_WIDTH,))

View File

@@ -1,39 +1,12 @@
import onnx
import itertools
import os
import onnx
import sys
import numpy as np
from typing import Any
from openpilot.selfdrive.modeld.runners.runmodel_pyx import RunModel
from openpilot.selfdrive.modeld.runners.ort_helpers import convert_fp16_to_fp32, ORT_TYPES_TO_NP_TYPES
ORT_TYPES_TO_NP_TYPES = {'tensor(float16)': np.float16, 'tensor(float)': np.float32, 'tensor(uint8)': np.uint8}
def attributeproto_fp16_to_fp32(attr):
float32_list = np.frombuffer(attr.raw_data, dtype=np.float16)
attr.data_type = 1
attr.raw_data = float32_list.astype(np.float32).tobytes()
def convert_fp16_to_fp32(onnx_path_or_bytes):
if isinstance(onnx_path_or_bytes, bytes):
model = onnx.load_from_string(onnx_path_or_bytes)
elif isinstance(onnx_path_or_bytes, str):
model = onnx.load(onnx_path_or_bytes)
for i in model.graph.initializer:
if i.data_type == 10:
attributeproto_fp16_to_fp32(i)
for i in itertools.chain(model.graph.input, model.graph.output):
if i.type.tensor_type.elem_type == 10:
i.type.tensor_type.elem_type = 1
for i in model.graph.node:
if i.op_type == 'Cast' and i.attribute[0].i == 10:
i.attribute[0].i = 1
for a in i.attribute:
if hasattr(a, 't'):
if a.t.data_type == 10:
attributeproto_fp16_to_fp32(a.t)
return model.SerializeToString()
def create_ort_session(path, fp16_to_fp32):
os.environ["OMP_NUM_THREADS"] = "4"
@@ -49,14 +22,14 @@ def create_ort_session(path, fp16_to_fp32):
provider = 'OpenVINOExecutionProvider'
elif 'CUDAExecutionProvider' in ort.get_available_providers() and 'ONNXCPU' not in os.environ:
options.intra_op_num_threads = 2
provider = ('CUDAExecutionProvider', {'cudnn_conv_algo_search': 'DEFAULT'})
provider = ('CUDAExecutionProvider', {'cudnn_conv_algo_search': 'EXHAUSTIVE'})
else:
options.intra_op_num_threads = 2
options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
provider = 'CPUExecutionProvider'
model_data = convert_fp16_to_fp32(path) if fp16_to_fp32 else path
model_data = convert_fp16_to_fp32(onnx.load(path)) if fp16_to_fp32 else path
print("Onnx selected provider: ", [provider], file=sys.stderr)
ort_session = ort.InferenceSession(model_data, options, providers=[provider])
print("Onnx using ", ort_session.get_providers(), file=sys.stderr)

View File

@@ -0,0 +1,36 @@
import onnx
import onnxruntime as ort
import numpy as np
import itertools
ORT_TYPES_TO_NP_TYPES = {'tensor(float16)': np.float16, 'tensor(float)': np.float32, 'tensor(uint8)': np.uint8}
def attributeproto_fp16_to_fp32(attr):
float32_list = np.frombuffer(attr.raw_data, dtype=np.float16)
attr.data_type = 1
attr.raw_data = float32_list.astype(np.float32).tobytes()
def convert_fp16_to_fp32(model):
for i in model.graph.initializer:
if i.data_type == 10:
attributeproto_fp16_to_fp32(i)
for i in itertools.chain(model.graph.input, model.graph.output):
if i.type.tensor_type.elem_type == 10:
i.type.tensor_type.elem_type = 1
for i in model.graph.node:
if i.op_type == 'Cast' and i.attribute[0].i == 10:
i.attribute[0].i = 1
for a in i.attribute:
if hasattr(a, 't'):
if a.t.data_type == 10:
attributeproto_fp16_to_fp32(a.t)
return model.SerializeToString()
def make_onnx_cpu_runner(model_path):
options = ort.SessionOptions()
options.intra_op_num_threads = 4
options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
model_data = convert_fp16_to_fp32(onnx.load(model_path))
return ort.InferenceSession(model_data, options, providers=['CPUExecutionProvider'])

View File

@@ -432,6 +432,7 @@ void process_peripheral_state(Panda *panda, PubMaster *pm, bool no_fan_control)
if (ir_pwr != prev_ir_pwr || sm.frame % 100 == 0 || ir_pwr >= 50.0) {
panda->set_ir_pwr(ir_pwr);
Hardware::set_ir_power(ir_pwr);
prev_ir_pwr = ir_pwr;
}
}

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:6267aadf44c81fb003130aff6f8902014a1add7f18c5c563239943ab015974c9
size 356177

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:433633d2042cfe1191e5b71ec4b34edca21877377f2ace8aaf6ae43f7b805171
size 256336

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:64aa7c86bf420adfcf78888bfce7233d68ec30ed77f3f9d3f923f26c59f85c74
size 332371

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:aa2451a6697da9fb1cf305081337782eb944a623b7662fd0a58ebbed5d0395e7
size 268892

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:75766b120bc372bad7b2a80be2c195de9b85cf11142f2a9da348637f15e245db
size 437801

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:c692e18506a36eeed22d23d1739af8f26fb3f39f34584fdbcb1ebf3bc6430957
size 308611

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:8d62d5774bb4be70ef337179c4bab2347f153fc959155bef917527cc6868bcd3
size 393161

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:d57c998320672e771712bd0b3f35c07bd159dff472368b1bf1abc9b6a3541dad
size 334258

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:fe9370fb0f7cad039d12ddc2f23dd4ee1c94d5d75faf9b6143ec390c06b28c9d
size 470471

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:917142016b6ed8c800a2c00aecbfdba7eecda3b1a3fcb15a968fa726b5b9241c
size 260324

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:d4b0e05a8a1b21e008ed5c1a41f267d596cc35a6d49ec8eff496856b34866360
size 217481

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:4d8e2889e70c310a84eff142ee481cfc92888bbe257809be3694032c67277675
size 293076

View File

@@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:83241bc1d9704dcc916e1ef2f48e0851d09cdf8134fb317b06cbda496cbe9808
size 164545

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:a4ddb9bfa5f1d835cbf42e2d1a066eca81e29c801046e0ce5ac2f5a3803bfbd1
size 163958

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:d60f784a226d144e1ac2c9ceec53b30509cbcceffb5e28f9c1b3a88163958352
size 260706

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:dc8216930cf4b36dd080f32b6f52172e08c2c4fe946450162950e799345d1829
size 100106

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:cb0195a176d9a917ef87698ef6487547ef19f35e86b4ba12bbecc7b5e6a26295
size 262860

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:3ddaa636dc318b33e356787852aad7559c656ce55469c49b7b97fae093dc7889
size 259081

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:f282a05c43be27ca4b4d522e5dcdddf8597ae92418328e951f7fef74a5a82cbc
size 268145

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:7ef1a55cad5cede01685360813781d30ee576a9f3d201f5caf1144327688fc0b
size 305127

View File

@@ -1,3 +0,0 @@
version https://git-lfs.github.com/spec/v1
oid sha256:606cf3036fbcce5be1140953aac7a388a303e6b901b9fe166bc8d5020af76e3f
size 281374

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:ec49b06b23bcd461cdcfbc4c94157c775d2dd91633e8f6648e4ce50732224335
size 280914

View File

@@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:055486d036c48811a0e0c4e461a7a1bdf66e3bef2fb6afdaf88aefb7b63a4f21
size 255708

Some files were not shown because too many files have changed in this diff Show More