Merge branch 'master' into better-exp

This commit is contained in:
royjr
2026-03-02 02:42:26 -05:00
committed by GitHub
482 changed files with 10447 additions and 23727 deletions

View File

@@ -1,58 +0,0 @@
name: 'automatically cache based on current runner'
inputs:
path:
description: 'path to cache'
required: true
key:
description: 'key'
required: true
restore-keys:
description: 'restore-keys'
required: true
save:
description: 'whether to save the cache'
default: 'true'
required: false
outputs:
cache-hit:
description: 'cache hit occurred'
value: ${{ (contains(runner.name, 'nsc') && steps.ns-cache.outputs.cache-hit) ||
(!contains(runner.name, 'nsc') && inputs.save != 'false' && steps.gha-cache.outputs.cache-hit) ||
(!contains(runner.name, 'nsc') && inputs.save == 'false' && steps.gha-cache-ro.outputs.cache-hit) }}
runs:
using: "composite"
steps:
- name: setup namespace cache
id: ns-cache
if: ${{ contains(runner.name, 'nsc') }}
uses: namespacelabs/nscloud-cache-action@v1
with:
path: ${{ inputs.path }}
- name: setup github cache
id: gha-cache
if: ${{ !contains(runner.name, 'nsc') && inputs.save != 'false' }}
uses: 'actions/cache@v4'
with:
path: ${{ inputs.path }}
key: ${{ inputs.key }}
restore-keys: ${{ inputs.restore-keys }}
- name: setup github cache
id: gha-cache-ro
if: ${{ !contains(runner.name, 'nsc') && inputs.save == 'false' }}
uses: 'actions/cache/restore@v4'
with:
path: ${{ inputs.path }}
key: ${{ inputs.key }}
restore-keys: ${{ inputs.restore-keys }}
# make the directory manually in case we didn't get a hit, so it doesn't fail on future steps
- id: scons-cache-setup
shell: bash
run: |
mkdir -p ${{ inputs.path }}
sudo chmod -R 777 ${{ inputs.path }}
sudo chown -R $USER ${{ inputs.path }}

View File

@@ -5,9 +5,7 @@ on:
workflow_dispatch:
env:
BASE_IMAGE: sunnypilot-base
DOCKER_REGISTRY: ghcr.io/sunnypilot
RUN: docker run --shm-size 2G -v $PWD:/tmp/openpilot -w /tmp/openpilot -e PYTHONPATH=/tmp/openpilot -e NUM_JOBS -e JOB_ID -e GITHUB_ACTION -e GITHUB_REF -e GITHUB_HEAD_REF -e GITHUB_SHA -e GITHUB_REPOSITORY -e GITHUB_RUN_ID -v $GITHUB_WORKSPACE/.ci_cache/scons_cache:/tmp/scons_cache -v $GITHUB_WORKSPACE/.ci_cache/comma_download_cache:/tmp/comma_download_cache -v $GITHUB_WORKSPACE/.ci_cache/openpilot_cache:/tmp/openpilot_cache $DOCKER_REGISTRY/$BASE_IMAGE:latest /bin/bash -c
PYTHONPATH: ${{ github.workspace }}
jobs:
badges:
@@ -20,10 +18,10 @@ jobs:
- uses: actions/checkout@v6
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
- run: ./tools/op.sh setup
- name: Push badges
run: |
${{ env.RUN }} "python3 selfdrive/ui/translations/create_badges.py"
python3 selfdrive/ui/translations/create_badges.py
rm .gitattributes

View File

@@ -140,7 +140,7 @@ jobs:
run: |
echo '${{ needs.setup.outputs.model_matrix }}' > matrix.json
built=(); while IFS= read -r line; do built+=("$line"); done < <(
ls output | sed -E 's/^model-//' | sed -E 's/-[0-9]+$//' | sed -E 's/ \([^)]*\)//' | awk '{gsub(/^ +| +$/, ""); print}'
find output -maxdepth 1 -name 'model-*' -printf "%f\n" | sed -E 's/^model-//' | sed -E 's/-[0-9]+$//' | sed -E 's/ \([^)]*\)//' | awk '{gsub(/^ +| +$/, ""); print}'
)
jq -c --argjson built "$(printf '%s\n' "${built[@]}" | jq -R . | jq -s .)" \
'map(select(.display_name as $n | ($built | index($n | gsub("^ +| +$"; "")) | not)))' matrix.json > retry_matrix.json
@@ -168,6 +168,7 @@ jobs:
if: ${{ !cancelled() && (needs.get_and_build.result != 'failure' || needs.retry_get_and_build.result == 'success' || (needs.retry_failed_models.outputs.retry_matrix != '[]' && needs.retry_failed_models.outputs.retry_matrix != '')) }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
max-parallel: 1
matrix:
model: ${{ fromJson(needs.setup.outputs.model_matrix) }}

View File

@@ -20,27 +20,23 @@ concurrency:
cancel-in-progress: true
env:
PYTHONWARNINGS: error
BASE_IMAGE: openpilot-base
BUILD: selfdrive/test/docker_build.sh base
RUN: docker run --shm-size 2G -v $PWD:/tmp/openpilot -w /tmp/openpilot -e CI=1 -e PYTHONWARNINGS=error -e FILEREADER_CACHE=1 -e PYTHONPATH=/tmp/openpilot -e NUM_JOBS -e JOB_ID -e GITHUB_ACTION -e GITHUB_REF -e GITHUB_HEAD_REF -e GITHUB_SHA -e GITHUB_REPOSITORY -e GITHUB_RUN_ID -v $GITHUB_WORKSPACE/.ci_cache/scons_cache:/tmp/scons_cache -v $GITHUB_WORKSPACE/.ci_cache/comma_download_cache:/tmp/comma_download_cache -v $GITHUB_WORKSPACE/.ci_cache/openpilot_cache:/tmp/openpilot_cache $BASE_IMAGE /bin/bash -c
CI: 1
jobs:
generate_cereal_artifact:
name: Generate cereal validation artifacts
runs-on: ubuntu-24.04
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v6
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
- run: ./tools/op.sh setup
- name: Build openpilot
run: ${{ env.RUN }} "scons -j$(nproc) cereal"
run: scons -j$(nproc) cereal
- name: Generate the log file
run: |
${{ env.RUN }} "cereal/messaging/tests/validate_sp_cereal_upstream.py -g -f schema_instances.bin" && \
ls -la
ls -la cereal/messaging/tests
export PYTHONPATH=${{ github.workspace }}
python3 cereal/messaging/tests/validate_sp_cereal_upstream.py -g -f schema_instances.bin
- name: 'Prepare artifact'
run: |
mkdir -p "cereal/messaging/tests/cereal_validations"
@@ -57,20 +53,26 @@ jobs:
runs-on: ubuntu-24.04
needs: generate_cereal_artifact
steps:
- uses: actions/checkout@v4
- name: Checkout sunnypilot
uses: actions/checkout@v6
- name: Checkout upstream openpilot
uses: actions/checkout@v6
with:
repository: 'commaai/openpilot'
path: openpilot
submodules: true
ref: "refs/heads/master"
- uses: ./.github/workflows/setup-with-retry
- run: ./tools/op.sh setup
- name: Build openpilot
run: ${{ env.RUN }} "scons -j$(nproc) cereal"
working-directory: openpilot
run: scons -j$(nproc) cereal
- name: Download build artifacts
uses: actions/download-artifact@v4
with:
name: cereal_validations
path: cereal/messaging/tests/cereal_validations
path: openpilot/cereal/messaging/tests/cereal_validations
- name: 'Run the validation'
run: |
chmod +x cereal/messaging/tests/cereal_validations/validate_sp_cereal_upstream.py
${{ env.RUN }} "cereal/messaging/tests/cereal_validations/validate_sp_cereal_upstream.py -r -f cereal/messaging/tests/cereal_validations/schema_instances.bin"
export PYTHONPATH=${{ github.workspace }}/openpilot
chmod +x openpilot/cereal/messaging/tests/cereal_validations/validate_sp_cereal_upstream.py
python3 openpilot/cereal/messaging/tests/cereal_validations/validate_sp_cereal_upstream.py -r -f openpilot/cereal/messaging/tests/cereal_validations/schema_instances.bin

View File

@@ -1,101 +0,0 @@
name: weekly CI test report
on:
schedule:
- cron: '37 9 * * 1' # 9:37AM UTC -> 2:37AM PST every monday
workflow_dispatch:
inputs:
ci_runs:
description: 'The amount of runs to trigger in CI test report'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
env:
CI_RUNS: ${{ github.event.inputs.ci_runs || '50' }}
jobs:
setup:
if: github.repository == 'sunnypilot/sunnypilot'
runs-on: ubuntu-latest
outputs:
ci_runs: ${{ steps.ci_runs_setup.outputs.matrix }}
steps:
- id: ci_runs_setup
name: CI_RUNS=${{ env.CI_RUNS }}
run: |
matrix=$(python3 -c "import json; print(json.dumps({ 'run_number' : list(range(${{ env.CI_RUNS }})) }))")
echo "matrix=$matrix" >> $GITHUB_OUTPUT
ci_matrix_run:
needs: [ setup ]
strategy:
fail-fast: false
matrix: ${{fromJSON(needs.setup.outputs.ci_runs)}}
uses: sunnypilot/sunnypilot/.github/workflows/ci_weekly_run.yaml@master
with:
run_number: ${{ matrix.run_number }}
report:
needs: [ci_matrix_run]
runs-on: ubuntu-latest
if: always() && github.repository == 'commaai/openpilot'
steps:
- name: Get job results
uses: actions/github-script@v8
id: get-job-results
with:
script: |
const jobs = await github
.paginate("GET /repos/{owner}/{repo}/actions/runs/{run_id}/attempts/{attempt}/jobs", {
owner: "commaai",
repo: "${{ github.event.repository.name }}",
run_id: "${{ github.run_id }}",
attempt: "${{ github.run_attempt }}",
})
var report = {}
jobs.slice(1, jobs.length-1).forEach(job => {
if (job.conclusion === "skipped") return;
const jobName = job.name.split(" / ")[2];
const runRegex = /\((.*?)\)/;
const run = job.name.match(runRegex)[1];
report[jobName] = report[jobName] || { successes: [], failures: [], canceled: [] };
switch (job.conclusion) {
case "success":
report[jobName].successes.push({ "run_number": run, "link": job.html_url}); break;
case "failure":
report[jobName].failures.push({ "run_number": run, "link": job.html_url }); break;
case "canceled":
report[jobName].canceled.push({ "run_number": run, "link": job.html_url }); break;
}
});
return JSON.stringify({"jobs": report});
- name: Add job results to summary
env:
JOB_RESULTS: ${{ fromJSON(steps.get-job-results.outputs.result) }}
run: |
cat <<EOF >> template.html
<table>
<thead>
<tr>
<th></th>
<th>Job</th>
<th>✅ Passing</th>
<th>❌ Failure Details</th>
</tr>
</thead>
<tbody>
{% for key in jobs.keys() %}<tr>
<td>{% for i in range(5) %}{% if i+1 <= (5 * jobs[key]["successes"]|length // ${{ env.CI_RUNS }}) %}🟩{% else %}🟥{% endif %}{% endfor%}</td>
<td>{{ key }}</td>
<td>{{ 100 * jobs[key]["successes"]|length // ${{ env.CI_RUNS }} }}%</td>
<td>{% if jobs[key]["failures"]|length > 0 %}<details>{% for failure in jobs[key]["failures"] %}<a href="{{ failure['link'] }}">Log for run #{{ failure['run_number'] }}</a><br>{% endfor %}</details>{% else %}{% endif %}</td>
</td>
</tr>{% endfor %}
</table>
EOF
pip install jinja2-cli
echo $JOB_RESULTS | jinja2 template.html > report.html
echo "# CI Test Report - ${{ env.CI_RUNS }} Runs" >> $GITHUB_STEP_SUMMARY
cat report.html >> $GITHUB_STEP_SUMMARY

View File

@@ -1,17 +0,0 @@
name: weekly CI test run
on:
workflow_call:
inputs:
run_number:
required: true
type: string
concurrency:
group: ci-run-${{ inputs.run_number }}-${{ github.ref }}
cancel-in-progress: true
jobs:
tests:
uses: sunnypilot/sunnypilot/.github/workflows/tests.yaml@master
with:
run_number: ${{ inputs.run_number }}

View File

@@ -1,21 +0,0 @@
name: 'compile openpilot'
runs:
using: "composite"
steps:
- shell: bash
name: Build openpilot with all flags
run: |
${{ env.RUN }} "scons -j$(nproc)"
${{ env.RUN }} "release/check-dirty.sh"
- shell: bash
name: Cleanup scons cache and rebuild
run: |
${{ env.RUN }} "rm -rf /tmp/scons_cache/* && \
scons -j$(nproc) --cache-populate"
- name: Save scons cache
uses: actions/cache/save@v4
if: github.ref == 'refs/heads/master'
with:
path: .ci_cache/scons_cache
key: scons-${{ runner.arch }}-${{ env.CACHE_COMMIT_DATE }}-${{ github.sha }}

View File

@@ -1,151 +0,0 @@
name: "mici raylib ui preview"
on:
push:
branches:
- master
pull_request_target:
types: [assigned, opened, synchronize, reopened, edited]
branches:
- 'master'
paths:
- 'selfdrive/assets/**'
- 'selfdrive/ui/**'
- 'system/ui/**'
workflow_dispatch:
env:
UI_JOB_NAME: "Create mici raylib UI Report"
REPORT_NAME: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' && 'master' || github.event.number }}
SHA: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' && github.sha || github.event.pull_request.head.sha }}
BRANCH_NAME: "openpilot/pr-${{ github.event.number }}-mici-raylib-ui"
MASTER_BRANCH_NAME: "openpilot_master_ui_mici_raylib"
# All report files are pushed here
REPORT_FILES_BRANCH_NAME: "mici-raylib-ui-reports"
jobs:
preview:
if: github.repository == 'sunnypilot/sunnypilot'
name: preview
runs-on: ubuntu-latest
timeout-minutes: 20
permissions:
contents: read
pull-requests: write
actions: read
steps:
- uses: actions/checkout@v6
with:
submodules: true
- name: Waiting for ui generation to end
uses: lewagon/wait-on-check-action@v1.3.4
with:
ref: ${{ env.SHA }}
check-name: ${{ env.UI_JOB_NAME }}
repo-token: ${{ secrets.GITHUB_TOKEN }}
allowed-conclusions: success
wait-interval: 20
- name: Getting workflow run ID
id: get_run_id
run: |
echo "run_id=$(curl https://api.github.com/repos/${{ github.repository }}/commits/${{ env.SHA }}/check-runs | jq -r '.check_runs[] | select(.name == "${{ env.UI_JOB_NAME }}") | .html_url | capture("(?<number>[0-9]+)") | .number')" >> $GITHUB_OUTPUT
- name: Getting proposed ui # filename: pr_ui/mici_ui_replay.mp4
id: download-artifact
uses: dawidd6/action-download-artifact@v6
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
run_id: ${{ steps.get_run_id.outputs.run_id }}
search_artifacts: true
name: mici-raylib-report-1-${{ env.REPORT_NAME }}
path: ${{ github.workspace }}/pr_ui
- name: Getting master ui # filename: master_ui_raylib/mici_ui_replay.mp4
uses: actions/checkout@v6
with:
repository: sunnypilot/ci-artifacts
ssh-key: ${{ secrets.CI_ARTIFACTS_DEPLOY_KEY }}
path: ${{ github.workspace }}/master_ui_raylib
ref: ${{ env.MASTER_BRANCH_NAME }}
- name: Saving new master ui
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
working-directory: ${{ github.workspace }}/master_ui_raylib
run: |
git checkout --orphan=new_master_ui_mici_raylib
git rm -rf *
git branch -D ${{ env.MASTER_BRANCH_NAME }}
git branch -m ${{ env.MASTER_BRANCH_NAME }}
git config user.name "GitHub Actions Bot"
git config user.email "<>"
mv ${{ github.workspace }}/pr_ui/* .
git add .
git commit -m "mici raylib video for commit ${{ env.SHA }}"
git push origin ${{ env.MASTER_BRANCH_NAME }} --force
- name: Setup FFmpeg
uses: AnimMouse/setup-ffmpeg@ae28d57dabbb148eff63170b6bf7f2b60062cbae
- name: Finding diff
if: github.event_name == 'pull_request_target'
id: find_diff
run: |
# Find the video file from PR
pr_video="${{ github.workspace }}/pr_ui/mici_ui_replay_proposed.mp4"
mv "${{ github.workspace }}/pr_ui/mici_ui_replay.mp4" "$pr_video"
master_video="${{ github.workspace }}/pr_ui/mici_ui_replay_master.mp4"
mv "${{ github.workspace }}/master_ui_raylib/mici_ui_replay.mp4" "$master_video"
# Run report
export PYTHONPATH=${{ github.workspace }}
baseurl="https://github.com/sunnypilot/ci-artifacts/raw/refs/heads/${{ env.BRANCH_NAME }}"
diff_exit_code=0
python3 ${{ github.workspace }}/selfdrive/ui/tests/diff/diff.py "${{ github.workspace }}/pr_ui/mici_ui_replay_master.mp4" "${{ github.workspace }}/pr_ui/mici_ui_replay_proposed.mp4" "diff.html" --basedir "$baseurl" --no-open || diff_exit_code=$?
# Copy diff report files
cp ${{ github.workspace }}/selfdrive/ui/tests/diff/report/diff.html ${{ github.workspace }}/pr_ui/
cp ${{ github.workspace }}/selfdrive/ui/tests/diff/report/diff.mp4 ${{ github.workspace }}/pr_ui/
REPORT_URL="https://sunnypilot.github.io/ci-artifacts/diff_pr_${{ github.event.number }}.html"
if [ $diff_exit_code -eq 0 ]; then
DIFF="✅ Videos are identical! [View Diff Report]($REPORT_URL)"
else
DIFF="❌ <strong>Videos differ!</strong> [View Diff Report]($REPORT_URL)"
fi
echo "DIFF=$DIFF" >> "$GITHUB_OUTPUT"
- name: Saving proposed ui
if: github.event_name == 'pull_request_target'
working-directory: ${{ github.workspace }}/master_ui_raylib
run: |
# Overwrite PR branch w/ proposed ui, and master ui at this point in time for future reference
git config user.name "GitHub Actions Bot"
git config user.email "<>"
git checkout --orphan=${{ env.BRANCH_NAME }}
git rm -rf *
mv ${{ github.workspace }}/pr_ui/* .
git add .
git commit -m "mici raylib video for PR #${{ github.event.number }}"
git push origin ${{ env.BRANCH_NAME }} --force
# Append diff report to report files branch
git fetch origin ${{ env.REPORT_FILES_BRANCH_NAME }}
git checkout ${{ env.REPORT_FILES_BRANCH_NAME }}
cp ${{ github.workspace }}/selfdrive/ui/tests/diff/report/diff.html diff_pr_${{ github.event.number }}.html
git add diff_pr_${{ github.event.number }}.html
git commit -m "mici raylib ui diff report for PR #${{ github.event.number }}" || echo "No changes to commit"
git push origin ${{ env.REPORT_FILES_BRANCH_NAME }}
- name: Comment Video on PR
if: github.event_name == 'pull_request_target'
uses: thollander/actions-comment-pull-request@v2
with:
message: |
<!-- _(run_id_video_mici_raylib **${{ github.run_id }}**)_ -->
## mici raylib UI Preview
${{ steps.find_diff.outputs.DIFF }}
comment_tag: run_id_video_mici_raylib
pr_number: ${{ github.event.number }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -17,6 +17,8 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v6
with:
submodules: true
- name: Checkout master
uses: actions/checkout@v6
with:
@@ -25,14 +27,12 @@ jobs:
- run: git lfs pull
- run: cd base && git lfs pull
- run: pip install onnx
- name: scripts/reporter.py
id: report
run: |
echo "content<<EOF" >> $GITHUB_OUTPUT
echo "## Model Review" >> $GITHUB_OUTPUT
MASTER_PATH=${{ github.workspace }}/base python scripts/reporter.py >> $GITHUB_OUTPUT
PYTHONPATH=${{ github.workspace }} MASTER_PATH=${{ github.workspace }}/base python scripts/reporter.py >> $GITHUB_OUTPUT
echo "EOF" >> $GITHUB_OUTPUT
- name: Post model report comment

View File

@@ -6,7 +6,7 @@ on:
env:
DOCKER_LOGIN: docker login ghcr.io -u ${{ github.actor }} -p ${{ secrets.GITHUB_TOKEN }}
BUILD: release/ci/docker_build_sp.sh prebuilt
BUILD: release/ci/docker_build_sp.sh
jobs:
build_prebuilt:
@@ -28,7 +28,7 @@ jobs:
wait-interval: 30
running-workflow-name: 'build prebuilt'
repo-token: ${{ secrets.GITHUB_TOKEN }}
check-regexp: ^((?!.*(build master-ci).*).)*$
check-regexp: ^((?!.*(build master-ci|create badges).*).)*$
- uses: actions/checkout@v6
with:
submodules: true

View File

@@ -1,175 +0,0 @@
name: "raylib ui preview"
on:
push:
branches:
- master
pull_request_target:
types: [assigned, opened, synchronize, reopened, edited]
branches:
- 'master'
paths:
- 'selfdrive/assets/**'
- 'selfdrive/ui/**'
- 'system/ui/**'
workflow_dispatch:
env:
UI_JOB_NAME: "Create raylib UI Report"
REPORT_NAME: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' && 'master' || github.event.number }}
SHA: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' && github.sha || github.event.pull_request.head.sha }}
BRANCH_NAME: "openpilot/pr-${{ github.event.number }}-raylib-ui"
jobs:
preview:
if: github.repository == 'sunnypilot/sunnypilot'
name: preview
runs-on: ubuntu-latest
timeout-minutes: 20
permissions:
contents: read
pull-requests: write
actions: read
steps:
- name: Waiting for ui generation to start
run: sleep 30
- name: Waiting for ui generation to end
uses: lewagon/wait-on-check-action@v1.3.4
with:
ref: ${{ env.SHA }}
check-name: ${{ env.UI_JOB_NAME }}
repo-token: ${{ secrets.GITHUB_TOKEN }}
allowed-conclusions: success
wait-interval: 20
- name: Getting workflow run ID
id: get_run_id
run: |
echo "run_id=$(curl https://api.github.com/repos/${{ github.repository }}/commits/${{ env.SHA }}/check-runs | jq -r '.check_runs[] | select(.name == "${{ env.UI_JOB_NAME }}") | .html_url | capture("(?<number>[0-9]+)") | .number')" >> $GITHUB_OUTPUT
- name: Getting proposed ui
id: download-artifact
uses: dawidd6/action-download-artifact@v6
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
run_id: ${{ steps.get_run_id.outputs.run_id }}
search_artifacts: true
name: raylib-report-1-${{ env.REPORT_NAME }}
path: ${{ github.workspace }}/pr_ui
- name: Getting master ui
uses: actions/checkout@v6
with:
repository: sunnypilot/ci-artifacts
ssh-key: ${{ secrets.CI_ARTIFACTS_DEPLOY_KEY }}
path: ${{ github.workspace }}/master_ui_raylib
ref: openpilot_master_ui_raylib
- name: Saving new master ui
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
working-directory: ${{ github.workspace }}/master_ui_raylib
run: |
git checkout --orphan=new_master_ui_raylib
git rm -rf *
git branch -D openpilot_master_ui_raylib
git branch -m openpilot_master_ui_raylib
git config user.name "GitHub Actions Bot"
git config user.email "<>"
mv ${{ github.workspace }}/pr_ui/*.png .
git add .
git commit -m "raylib screenshots for commit ${{ env.SHA }}"
git push origin openpilot_master_ui_raylib --force
- name: Finding diff
if: github.event_name == 'pull_request_target'
id: find_diff
run: >-
sudo apt-get update && sudo apt-get install -y imagemagick
scenes=$(find ${{ github.workspace }}/pr_ui/*.png -type f -printf "%f\n" | cut -d '.' -f 1 | grep -v 'pair_device')
A=($scenes)
DIFF=""
TABLE="<details><summary>All Screenshots</summary>"
TABLE="${TABLE}<table>"
for ((i=0; i<${#A[*]}; i=i+1));
do
# Check if the master file exists
if [ ! -f "${{ github.workspace }}/master_ui_raylib/${A[$i]}.png" ]; then
# This is a new file in PR UI that doesn't exist in master
DIFF="${DIFF}<details open>"
DIFF="${DIFF}<summary>${A[$i]} : \$\${\\color{cyan}\\text{NEW}}\$\$</summary>"
DIFF="${DIFF}<table>"
DIFF="${DIFF}<tr>"
DIFF="${DIFF} <td> <img src=\"https://raw.githubusercontent.com/sunnypilot/ci-artifacts/${{ env.BRANCH_NAME }}/${A[$i]}.png\"> </td>"
DIFF="${DIFF}</tr>"
DIFF="${DIFF}</table>"
DIFF="${DIFF}</details>"
elif ! compare -fuzz 2% -highlight-color DeepSkyBlue1 -lowlight-color Black -compose Src ${{ github.workspace }}/master_ui_raylib/${A[$i]}.png ${{ github.workspace }}/pr_ui/${A[$i]}.png ${{ github.workspace }}/pr_ui/${A[$i]}_diff.png; then
convert ${{ github.workspace }}/pr_ui/${A[$i]}_diff.png -transparent black mask.png
composite mask.png ${{ github.workspace }}/master_ui_raylib/${A[$i]}.png composite_diff.png
convert -delay 100 ${{ github.workspace }}/master_ui_raylib/${A[$i]}.png composite_diff.png -loop 0 ${{ github.workspace }}/pr_ui/${A[$i]}_diff.gif
mv ${{ github.workspace }}/master_ui_raylib/${A[$i]}.png ${{ github.workspace }}/pr_ui/${A[$i]}_master_ref.png
DIFF="${DIFF}<details open>"
DIFF="${DIFF}<summary>${A[$i]} : \$\${\\color{red}\\text{DIFFERENT}}\$\$</summary>"
DIFF="${DIFF}<table>"
DIFF="${DIFF}<tr>"
DIFF="${DIFF} <td> master <img src=\"https://raw.githubusercontent.com/sunnypilot/ci-artifacts/${{ env.BRANCH_NAME }}/${A[$i]}_master_ref.png\"> </td>"
DIFF="${DIFF} <td> proposed <img src=\"https://raw.githubusercontent.com/sunnypilot/ci-artifacts/${{ env.BRANCH_NAME }}/${A[$i]}.png\"> </td>"
DIFF="${DIFF}</tr>"
DIFF="${DIFF}<tr>"
DIFF="${DIFF} <td> diff <img src=\"https://raw.githubusercontent.com/sunnypilot/ci-artifacts/${{ env.BRANCH_NAME }}/${A[$i]}_diff.png\"> </td>"
DIFF="${DIFF} <td> composite diff <img src=\"https://raw.githubusercontent.com/sunnypilot/ci-artifacts/${{ env.BRANCH_NAME }}/${A[$i]}_diff.gif\"> </td>"
DIFF="${DIFF}</tr>"
DIFF="${DIFF}</table>"
DIFF="${DIFF}</details>"
else
rm -f ${{ github.workspace }}/pr_ui/${A[$i]}_diff.png
fi
INDEX=$(($i % 2))
if [[ $INDEX -eq 0 ]]; then
TABLE="${TABLE}<tr>"
fi
TABLE="${TABLE} <td> <img src=\"https://raw.githubusercontent.com/sunnypilot/ci-artifacts/${{ env.BRANCH_NAME }}/${A[$i]}.png\"> </td>"
if [[ $INDEX -eq 1 || $(($i + 1)) -eq ${#A[*]} ]]; then
TABLE="${TABLE}</tr>"
fi
done
TABLE="${TABLE}</table></details>"
echo "DIFF=$DIFF$TABLE" >> "$GITHUB_OUTPUT"
- name: Saving proposed ui
if: github.event_name == 'pull_request_target'
working-directory: ${{ github.workspace }}/master_ui_raylib
run: |
git config user.name "GitHub Actions Bot"
git config user.email "<>"
git checkout --orphan=${{ env.BRANCH_NAME }}
git rm -rf *
mv ${{ github.workspace }}/pr_ui/* .
git add .
git commit -m "raylib screenshots for PR #${{ github.event.number }}"
git push origin ${{ env.BRANCH_NAME }} --force
- name: Comment Screenshots on PR
if: github.event_name == 'pull_request_target'
uses: thollander/actions-comment-pull-request@v2
with:
message: |
<!-- _(run_id_screenshots_raylib **${{ github.run_id }}**)_ -->
## raylib UI Preview
${{ steps.find_diff.outputs.DIFF }}
comment_tag: run_id_screenshots_raylib
pr_number: ${{ github.event.number }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -7,20 +7,12 @@ on:
jobs:
build___nightly:
name: build __nightly
env:
ImageOS: ubuntu24
container:
image: ghcr.io/sunnypilot/sunnypilot-base:latest
runs-on: ubuntu-latest
if: github.repository == 'sunnypilot/sunnypilot'
permissions:
checks: read
contents: write
steps:
- name: Install wait-on-check-action dependencies
run: |
sudo apt-get update
sudo apt-get install -y libyaml-dev
- name: Wait for green check mark
if: ${{ github.event_name == 'schedule' }}
uses: lewagon/wait-on-check-action@ccfb013c15c8afb7bf2b7c028fb74dc5a068cccc
@@ -29,14 +21,11 @@ jobs:
wait-interval: 30
running-workflow-name: 'build __nightly'
repo-token: ${{ secrets.GITHUB_TOKEN }}
check-regexp: ^((?!.*(build prebuilt).*).)*$
check-regexp: ^((?!.*(build prebuilt|create badges).*).)*$
- uses: actions/checkout@v6
with:
submodules: true
fetch-depth: 0
- name: Pull LFS
run: |
git config --global --add safe.directory '*'
git lfs pull
- run: ./tools/op.sh setup
- name: Push __nightly
run: BRANCH=__nightly release/build_stripped.sh

View File

@@ -6,9 +6,7 @@ on:
workflow_dispatch:
env:
BASE_IMAGE: sunnypilot-base
BUILD: release/ci/docker_build_sp.sh base
RUN: docker run --shm-size 2G -v $PWD:/tmp/openpilot -w /tmp/openpilot -e CI=1 -e PYTHONWARNINGS=error -e PYTHONPATH=/tmp/openpilot -e NUM_JOBS -e JOB_ID -e GITHUB_ACTION -e GITHUB_REF -e GITHUB_HEAD_REF -e GITHUB_SHA -e GITHUB_REPOSITORY -e GITHUB_RUN_ID -v $GITHUB_WORKSPACE/.ci_cache/scons_cache:/tmp/scons_cache -v $GITHUB_WORKSPACE/.ci_cache/comma_download_cache:/tmp/comma_download_cache -v $GITHUB_WORKSPACE/.ci_cache/openpilot_cache:/tmp/openpilot_cache $BASE_IMAGE /bin/bash -c
PYTHONPATH: ${{ github.workspace }}
jobs:
update_translations:
@@ -16,10 +14,11 @@ jobs:
if: github.repository == 'sunnypilot/sunnypilot'
steps:
- uses: actions/checkout@v6
- uses: ./.github/workflows/setup-with-retry
with:
submodules: true
- run: ./tools/op.sh setup
- name: Update translations
run: |
${{ env.RUN }} "python3 selfdrive/ui/update_translations.py --vanish"
run: python3 selfdrive/ui/update_translations.py --vanish
- name: Create Pull Request
uses: peter-evans/create-pull-request@c0f553fe549906ede9cf27b5156039d195d2ece0
with:
@@ -35,34 +34,44 @@ jobs:
package_updates:
name: package_updates
runs-on: ubuntu-latest
container:
image: ghcr.io/sunnypilot/sunnypilot-base:latest
if: github.repository == 'sunnypilot/sunnypilot'
steps:
- uses: actions/checkout@v6
with:
submodules: true
- run: ./tools/op.sh setup
- name: uv lock
if: github.repository == 'commaai/openpilot'
run: |
python3 -m ensurepip --upgrade
pip3 install uv
uv lock --upgrade
run: uv lock --upgrade
- name: uv pip tree
id: pip_tree
run: |
echo 'PIP_TREE<<EOF' >> $GITHUB_OUTPUT
uv pip tree >> $GITHUB_OUTPUT
echo 'EOF' >> $GITHUB_OUTPUT
- name: venv size
id: venv_size
run: |
echo 'VENV_SIZE<<EOF' >> $GITHUB_OUTPUT
echo "Total: $(du -sh .venv | cut -f1)" >> $GITHUB_OUTPUT
echo "" >> $GITHUB_OUTPUT
echo "Top 10 by size:" >> $GITHUB_OUTPUT
du -sh .venv/lib/python*/site-packages/* 2>/dev/null \
| grep -v '\.dist-info' \
| grep -v '__pycache__' \
| sort -rh \
| head -10 \
| while IFS=$'\t' read size path; do echo "$size ${path##*/}"; done >> $GITHUB_OUTPUT
echo 'EOF' >> $GITHUB_OUTPUT
- name: bump submodules
run: |
git config --global --add safe.directory '*'
git config submodule.msgq.update none
git config submodule.rednose_repo.update none
git config submodule.teleoprtc_repo.update none
git config submodule.tinygrad.update none
git submodule update --remote
git add .
- name: update car docs
run: |
export PYTHONPATH="$PWD"
scons -j$(nproc) --minimal opendbc_repo
python selfdrive/car/docs.py
git add docs/CARS.md
@@ -80,6 +89,12 @@ jobs:
Automatic PR from repo-maintenance -> package_updates
```
$ du -sh .venv && du -sh .venv/lib/python*/site-packages/* | sort -rh | head -10
${{ steps.venv_size.outputs.VENV_SIZE }}
```
```
$ uv pip tree
${{ steps.pip_tree.outputs.PIP_TREE }}
```
labels: bot

View File

@@ -1,52 +0,0 @@
name: 'openpilot env setup, with retry on failure'
inputs:
docker_hub_pat:
description: 'Auth token for Docker Hub, required for BuildJet jobs'
required: false
default: ''
sleep_time:
description: 'Time to sleep between retries'
required: false
default: 30
outputs:
duration:
description: 'Duration of the setup process in seconds'
value: ${{ steps.get_duration.outputs.duration }}
runs:
using: "composite"
steps:
- id: start_time
shell: bash
run: echo "START_TIME=$(date +%s)" >> $GITHUB_ENV
- id: setup1
uses: ./.github/workflows/setup
continue-on-error: true
with:
is_retried: true
- if: steps.setup1.outcome == 'failure'
shell: bash
run: sleep ${{ inputs.sleep_time }}
- id: setup2
if: steps.setup1.outcome == 'failure'
uses: ./.github/workflows/setup
continue-on-error: true
with:
is_retried: true
- if: steps.setup2.outcome == 'failure'
shell: bash
run: sleep ${{ inputs.sleep_time }}
- id: setup3
if: steps.setup2.outcome == 'failure'
uses: ./.github/workflows/setup
with:
is_retried: true
- id: get_duration
shell: bash
run: |
END_TIME=$(date +%s)
DURATION=$((END_TIME - START_TIME))
echo "Total duration: $DURATION seconds"
echo "duration=$DURATION" >> $GITHUB_OUTPUT

View File

@@ -1,56 +0,0 @@
name: 'openpilot env setup'
inputs:
is_retried:
description: 'A mock param that asserts that we use the setup-with-retry instead of this action directly'
required: false
default: 'false'
runs:
using: "composite"
steps:
# assert that this action is retried using the setup-with-retry
- shell: bash
if: ${{ inputs.is_retried == 'false' }}
run: |
echo "You should not run this action directly. Use setup-with-retry instead"
exit 1
- shell: bash
name: No retries!
run: |
if [ "${{ github.run_attempt }}" -gt ${{ github.event.pull_request.head.repo.fork && github.event.pull_request.author_association == 'NONE' && 2 || 1}} ]; then
echo -e "\033[0;31m##################################################"
echo -e "\033[0;31m Retries not allowed! Fix the flaky test! "
echo -e "\033[0;31m##################################################\033[0m"
exit 1
fi
# do this after checkout to ensure our custom LFS config is used to pull from GitLab
- shell: bash
run: git lfs pull
# build cache
- id: date
shell: bash
run: echo "CACHE_COMMIT_DATE=$(git log -1 --pretty='format:%cd' --date=format:'%Y-%m-%d-%H:%M')" >> $GITHUB_ENV
- shell: bash
run: echo "$CACHE_COMMIT_DATE"
- id: scons-cache
uses: ./.github/workflows/auto-cache
with:
path: .ci_cache/scons_cache
key: scons-${{ runner.arch }}-${{ env.CACHE_COMMIT_DATE }}-${{ github.sha }}
restore-keys: |
scons-${{ runner.arch }}-${{ env.CACHE_COMMIT_DATE }}
scons-${{ runner.arch }}
# as suggested here: https://github.com/moby/moby/issues/32816#issuecomment-910030001
- id: normalize-file-permissions
shell: bash
name: Normalize file permissions to ensure a consistent docker build cache
run: |
find . -type f -executable -not -perm 755 -exec chmod 755 {} \;
find . -type f -not -executable -not -perm 644 -exec chmod 644 {} \;
# build our docker image
- shell: bash
run: eval ${{ env.BUILD }}

View File

@@ -173,9 +173,18 @@ jobs:
echo "Compiling: $onnx_file -> $output_file"
QCOM=1 python3 "${{ env.TINYGRAD_PATH }}/examples/openpilot/compile3.py" "$onnx_file" "$output_file"
QCOM=1 python3 "${{ env.MODELS_DIR }}/../get_model_metadata.py" "$onnx_file" || true
DEV=QCOM FLOAT16=1 NOLOCALS=1 JIT_BATCH_SIZE=0 python3 "${{ env.MODELS_DIR }}/../get_model_metadata.py" "$onnx_file" || true
done
- name: Validate Model Outputs
run: |
source /etc/profile
export UV_PROJECT_ENVIRONMENT=${HOME}/venv
export VIRTUAL_ENV=$UV_PROJECT_ENVIRONMENT
python3 "${{ github.workspace }}/release/ci/model_generator.py" \
--validate-only \
--model-dir "${{ env.MODELS_DIR }}"
- name: Prepare Output
run: |
sudo rm -rf ${{ env.OUTPUT_DIR }}
@@ -184,7 +193,6 @@ jobs:
# Copy the model files
rsync -avm \
--include='*.dlc' \
--include='*.thneed' \
--include='*.pkl' \
--include='*.onnx' \
--exclude='*' \

View File

@@ -180,8 +180,6 @@ jobs:
./release/release_files.py | sort | uniq | rsync -rRl${RUNNER_DEBUG:+v} --files-from=- . $BUILD_DIR/
cd $BUILD_DIR
sed -i '/from .board.jungle import PandaJungle, PandaJungleDFU/s/^/#/' panda/__init__.py
echo "Building sunnypilot's modeld..."
scons -j$(nproc) cache_dir=${{env.SCONS_CACHE_DIR}} --minimal sunnypilot/modeld
echo "Building sunnypilot's modeld_v2..."
scons -j$(nproc) cache_dir=${{env.SCONS_CACHE_DIR}} --minimal sunnypilot/modeld_v2
echo "Building sunnypilot's locationd..."
@@ -219,7 +217,6 @@ jobs:
--exclude='**/.venv/' \
--exclude='selfdrive/modeld/models/driving_vision.onnx' \
--exclude='selfdrive/modeld/models/driving_policy.onnx' \
--exclude='sunnypilot/modeld*/models/supercombo.onnx' \
--exclude='third_party/*x86*' \
--exclude='third_party/*Darwin*' \
--delete-excluded \

View File

@@ -241,10 +241,3 @@ jobs:
gh run watch "$RUN_ID"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Trigger prebuilt workflow
if: success() && steps.push-changes.outputs.has_changes == 'true'
run: |
gh workflow run sunnypilot-build-prebuilt.yaml --ref "${{ inputs.target_branch || env.DEFAULT_TARGET_BRANCH }}"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -18,13 +18,8 @@ concurrency:
cancel-in-progress: true
env:
PYTHONWARNINGS: error
BASE_IMAGE: sunnypilot-base
DOCKER_LOGIN: docker login ghcr.io -u ${{ github.actor }} -p ${{ secrets.GITHUB_TOKEN }}
BUILD: release/ci/docker_build_sp.sh base
RUN: docker run --shm-size 2G -v $PWD:/tmp/openpilot -w /tmp/openpilot -e CI=1 -e PYTHONWARNINGS=error -e PYTHONPATH=/tmp/openpilot -e NUM_JOBS -e JOB_ID -e GITHUB_ACTION -e GITHUB_REF -e GITHUB_HEAD_REF -e GITHUB_SHA -e GITHUB_REPOSITORY -e GITHUB_RUN_ID -v $GITHUB_WORKSPACE/.ci_cache/scons_cache:/tmp/scons_cache -v $GITHUB_WORKSPACE/.ci_cache/comma_download_cache:/tmp/comma_download_cache -v $GITHUB_WORKSPACE/.ci_cache/openpilot_cache:/tmp/openpilot_cache $BASE_IMAGE /bin/bash -c
CI: 1
PYTHONPATH: ${{ github.workspace }}
PYTEST: pytest --continue-on-collection-errors --durations=0 -n logical
jobs:
@@ -34,10 +29,11 @@ jobs:
(github.repository == 'commaai/openpilot') &&
((github.event_name != 'pull_request') ||
(github.event.pull_request.head.repo.full_name == 'commaai/openpilot'))
&& fromJSON('["namespace-profile-amd64-8x16", "namespace-experiments:docker.builds.local-cache=separate"]')
&& fromJSON('["namespace-profile-amd64-8x16"]')
|| fromJSON('["ubuntu-24.04"]') }}
env:
STRIPPED_DIR: /tmp/releasepilot
PYTHONPATH: /tmp/releasepilot
steps:
- uses: actions/checkout@v6
with:
@@ -51,17 +47,15 @@ jobs:
- name: Build devel
timeout-minutes: 1
run: TARGET_DIR=$STRIPPED_DIR release/build_stripped.sh
- uses: ./.github/workflows/setup-with-retry
- run: ./tools/op.sh setup
- name: Build openpilot and run checks
timeout-minutes: ${{ ((steps.restore-scons-cache.outputs.cache-hit == 'true') && 10 || 30) }} # allow more time when we missed the scons cache
run: |
cd $STRIPPED_DIR
${{ env.RUN }} "python3 system/manager/build.py"
timeout-minutes: 30
working-directory: ${{ env.STRIPPED_DIR }}
run: python3 system/manager/build.py
- name: Run tests
timeout-minutes: 1
run: |
cd $STRIPPED_DIR
${{ env.RUN }} "release/check-dirty.sh"
working-directory: ${{ env.STRIPPED_DIR }}
run: release/check-dirty.sh
- name: Check submodules
if: github.repository == 'sunnypilot/sunnypilot'
timeout-minutes: 3
@@ -83,73 +77,20 @@ jobs:
fi
release/check-submodules.sh
build:
runs-on: ${{
(github.repository == 'commaai/openpilot') &&
((github.event_name != 'pull_request') ||
(github.event.pull_request.head.repo.full_name == 'commaai/openpilot'))
&& fromJSON('["namespace-profile-amd64-8x16", "namespace-experiments:docker.builds.local-cache=separate"]')
|| fromJSON('["ubuntu-24.04"]') }}
steps:
- uses: actions/checkout@v6
with:
submodules: true
- name: Setup docker push
if: github.ref == 'refs/heads/master' && github.event_name != 'pull_request' && github.repository == 'sunnypilot/sunnypilot'
run: |
echo "PUSH_IMAGE=true" >> "$GITHUB_ENV"
$DOCKER_LOGIN
- uses: ./.github/workflows/setup-with-retry
- uses: ./.github/workflows/compile-openpilot
timeout-minutes: 30
build_mac:
name: build macOS
if: false # tmp disable due to brew install not working
runs-on: ${{ ((github.repository == 'commaai/openpilot') && ((github.event_name != 'pull_request') || (github.event.pull_request.head.repo.full_name == 'commaai/openpilot'))) && 'namespace-profile-macos-8x14' || 'macos-latest' }}
steps:
- uses: actions/checkout@v6
with:
submodules: true
- run: echo "CACHE_COMMIT_DATE=$(git log -1 --pretty='format:%cd' --date=format:'%Y-%m-%d-%H:%M')" >> $GITHUB_ENV
- name: Homebrew cache
uses: ./.github/workflows/auto-cache
with:
save: false # No need save here if we manually save it later conditionally
path: ~/Library/Caches/Homebrew
key: brew-macos-${{ hashFiles('tools/Brewfile') }}-${{ github.sha }}
restore-keys: |
brew-macos-${{ hashFiles('tools/Brewfile') }}
brew-macos-
- name: Install dependencies
run: ./tools/mac_setup.sh
env:
PYTHONWARNINGS: default # package install has DeprecationWarnings
HOMEBREW_DISPLAY_INSTALL_TIMES: 1
- name: Save Homebrew cache
uses: actions/cache/save@v4
if: github.ref == 'refs/heads/master'
with:
path: ~/Library/Caches/Homebrew
key: brew-macos-${{ hashFiles('tools/Brewfile') }}-${{ github.sha }}
- run: git lfs pull
- name: Getting scons cache
uses: ./.github/workflows/auto-cache
with:
save: false # No need save here if we manually save it later conditionally
path: /tmp/scons_cache
key: scons-${{ runner.arch }}-macos-${{ env.CACHE_COMMIT_DATE }}-${{ github.sha }}
restore-keys: |
scons-${{ runner.arch }}-macos-${{ env.CACHE_COMMIT_DATE }}
scons-${{ runner.arch }}-macos
- name: Remove Homebrew from environment
run: |
FILTERED=$(echo "$PATH" | tr ':' '\n' | grep -v '/opt/homebrew' | tr '\n' ':')
echo "PATH=${FILTERED}/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin" >> $GITHUB_ENV
- run: ./tools/op.sh setup
- name: Building openpilot
run: . .venv/bin/activate && scons -j$(nproc)
- name: Save scons cache
uses: actions/cache/save@v4
if: github.ref == 'refs/heads/master'
with:
path: /tmp/scons_cache
key: scons-${{ runner.arch }}-macos-${{ env.CACHE_COMMIT_DATE }}-${{ github.sha }}
run: scons
static_analysis:
name: static analysis
@@ -157,18 +98,16 @@ jobs:
(github.repository == 'commaai/openpilot') &&
((github.event_name != 'pull_request') ||
(github.event.pull_request.head.repo.full_name == 'commaai/openpilot'))
&& fromJSON('["namespace-profile-amd64-8x16", "namespace-experiments:docker.builds.local-cache=separate"]')
&& fromJSON('["namespace-profile-amd64-8x16"]')
|| fromJSON('["ubuntu-24.04"]') }}
env:
PYTHONWARNINGS: default
steps:
- uses: actions/checkout@v6
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
- run: ./tools/op.sh setup
- name: Static analysis
timeout-minutes: 1
run: ${{ env.RUN }} "scripts/lint/lint.sh"
run: scripts/lint/lint.sh
unit_tests:
name: unit tests
@@ -176,24 +115,22 @@ jobs:
(github.repository == 'commaai/openpilot') &&
((github.event_name != 'pull_request') ||
(github.event.pull_request.head.repo.full_name == 'commaai/openpilot'))
&& fromJSON('["namespace-profile-amd64-8x16", "namespace-experiments:docker.builds.local-cache=separate"]')
&& fromJSON('["namespace-profile-amd64-8x16"]')
|| fromJSON('["ubuntu-24.04"]') }}
steps:
- uses: actions/checkout@v6
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
id: setup-step
- run: ./tools/op.sh setup
- name: Build openpilot
run: ${{ env.RUN }} "scons -j$(nproc)"
run: scons -j$(nproc)
- name: Run unit tests
timeout-minutes: ${{ contains(runner.name, 'nsc') && ((steps.setup-step.outputs.duration < 18) && 1 || 2) || 999 }}
timeout-minutes: ${{ contains(runner.name, 'nsc') && 2 || 999 }}
run: |
${{ env.RUN }} "source selfdrive/test/setup_xvfb.sh && \
# Pre-compile Python bytecode so each pytest worker doesn't need to
$PYTEST --collect-only -m 'not slow' -qq && \
MAX_EXAMPLES=1 $PYTEST -m 'not slow' && \
chmod -R 777 /tmp/comma_download_cache"
source selfdrive/test/setup_xvfb.sh
# Pre-compile Python bytecode so each pytest worker doesn't need to
$PYTEST --collect-only -m 'not slow' -qq
MAX_EXAMPLES=1 $PYTEST -m 'not slow'
process_replay:
name: process replay
@@ -202,29 +139,19 @@ jobs:
(github.repository == 'commaai/openpilot') &&
((github.event_name != 'pull_request') ||
(github.event.pull_request.head.repo.full_name == 'commaai/openpilot'))
&& fromJSON('["namespace-profile-amd64-8x16", "namespace-experiments:docker.builds.local-cache=separate"]')
&& fromJSON('["namespace-profile-amd64-8x16"]')
|| fromJSON('["ubuntu-24.04"]') }}
steps:
- uses: actions/checkout@v6
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
id: setup-step
- name: Cache test routes
id: dependency-cache
uses: actions/cache@v5
with:
path: .ci_cache/comma_download_cache
key: proc-replay-${{ hashFiles('selfdrive/test/process_replay/test_processes.py') }}
- run: ./tools/op.sh setup
- name: Build openpilot
run: |
${{ env.RUN }} "scons -j$(nproc)"
run: scons -j$(nproc)
- name: Run replay
timeout-minutes: ${{ contains(runner.name, 'nsc') && (steps.dependency-cache.outputs.cache-hit == 'true') && ((steps.setup-step.outputs.duration < 18) && 1 || 2) || 20 }}
timeout-minutes: ${{ contains(runner.name, 'nsc') && 2 || 20 }}
continue-on-error: ${{ github.ref == 'refs/heads/master' }}
run: |
${{ env.RUN }} "selfdrive/test/process_replay/test_processes.py -j$(nproc) && \
chmod -R 777 /tmp/comma_download_cache"
run: selfdrive/test/process_replay/test_processes.py -j$(nproc)
- name: Print diff
id: print-diff
if: always()
@@ -246,21 +173,21 @@ jobs:
if: github.repository == 'commaai/openpilot' && github.ref == 'refs/heads/master'
working-directory: ${{ github.workspace }}/ci-artifacts
run: |
git checkout --orphan process-replay
git rm -rf .
git config user.name "GitHub Actions Bot"
git config user.email "<>"
git fetch origin process-replay || true
git checkout process-replay 2>/dev/null || git checkout --orphan process-replay
cp ${{ github.workspace }}/selfdrive/test/process_replay/fakedata/*.zst .
echo "${{ github.sha }}" > ref_commit
git add .
git commit -m "process-replay refs for ${{ github.repository }}@${{ github.sha }}"
git push origin process-replay --force
git commit -m "process-replay refs for ${{ github.repository }}@${{ github.sha }}" || echo "No changes to commit"
git push origin process-replay
- name: Run regen
if: false
timeout-minutes: 4
run: |
${{ env.RUN }} "ONNXCPU=1 $PYTEST selfdrive/test/process_replay/test_regen.py && \
chmod -R 777 /tmp/comma_download_cache"
env:
ONNXCPU: 1
run: $PYTEST selfdrive/test/process_replay/test_regen.py
simulator_driving:
name: simulator driving
@@ -268,73 +195,44 @@ jobs:
(github.repository == 'commaai/openpilot') &&
((github.event_name != 'pull_request') ||
(github.event.pull_request.head.repo.full_name == 'commaai/openpilot'))
&& fromJSON('["namespace-profile-amd64-8x16", "namespace-experiments:docker.builds.local-cache=separate"]')
&& fromJSON('["namespace-profile-amd64-8x16"]')
|| fromJSON('["ubuntu-24.04"]') }}
if: false # FIXME: Started to timeout recently
steps:
- uses: actions/checkout@v6
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
id: setup-step
- run: ./tools/op.sh setup
- name: Build openpilot
run: |
${{ env.RUN }} "scons -j$(nproc)"
run: scons -j$(nproc)
- name: Driving test
timeout-minutes: ${{ (steps.setup-step.outputs.duration < 18) && 1 || 2 }}
timeout-minutes: 2
run: |
${{ env.RUN }} "source selfdrive/test/setup_xvfb.sh && \
source selfdrive/test/setup_vsound.sh && \
CI=1 pytest -s tools/sim/tests/test_metadrive_bridge.py"
source selfdrive/test/setup_xvfb.sh
pytest -s tools/sim/tests/test_metadrive_bridge.py
create_raylib_ui_report:
name: Create raylib UI Report
create_ui_report:
name: Create UI Report
runs-on: ${{
(github.repository == 'commaai/openpilot') &&
((github.event_name != 'pull_request') ||
(github.event.pull_request.head.repo.full_name == 'commaai/openpilot'))
&& fromJSON('["namespace-profile-amd64-8x16", "namespace-experiments:docker.builds.local-cache=separate"]')
&& fromJSON('["namespace-profile-amd64-8x16"]')
|| fromJSON('["ubuntu-24.04"]') }}
steps:
- uses: actions/checkout@v6
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
- run: ./tools/op.sh setup
- name: Build openpilot
run: ${{ env.RUN }} "scons -j$(nproc)"
- name: Create raylib UI Report
run: >
${{ env.RUN }} "PYTHONWARNINGS=ignore &&
source selfdrive/test/setup_xvfb.sh &&
python3 selfdrive/ui/tests/test_ui/raylib_screenshots.py"
- name: Upload Raylib UI Report
run: scons -j$(nproc)
- name: Create UI Report
run: |
source selfdrive/test/setup_xvfb.sh
python3 selfdrive/ui/tests/diff/replay.py
python3 selfdrive/ui/tests/diff/replay.py --big
- name: Upload UI Report
uses: actions/upload-artifact@v6
with:
name: raylib-report-${{ inputs.run_number || '1' }}-${{ github.event_name == 'push' && github.ref == 'refs/heads/master' && 'master' || github.event.number }}
path: selfdrive/ui/tests/test_ui/raylib_report/screenshots
create_mici_raylib_ui_report:
name: Create mici raylib UI Report
runs-on: ${{
(github.repository == 'commaai/openpilot') &&
((github.event_name != 'pull_request') ||
(github.event.pull_request.head.repo.full_name == 'commaai/openpilot'))
&& fromJSON('["namespace-profile-amd64-8x16", "namespace-experiments:docker.builds.local-cache=separate"]')
|| fromJSON('["ubuntu-24.04"]') }}
steps:
- uses: actions/checkout@v6
with:
submodules: true
- uses: ./.github/workflows/setup-with-retry
- name: Build openpilot
run: ${{ env.RUN }} "scons -j$(nproc)"
- name: Create mici raylib UI Report
run: >
${{ env.RUN }} "PYTHONWARNINGS=ignore &&
source selfdrive/test/setup_xvfb.sh &&
WINDOWED=1 python3 selfdrive/ui/tests/diff/replay.py"
- name: Upload Raylib UI Report
uses: actions/upload-artifact@v6
with:
name: mici-raylib-report-${{ inputs.run_number || '1' }}-${{ github.event_name == 'push' && github.ref == 'refs/heads/master' && 'master' || github.event.number }}
name: ui-report-${{ inputs.run_number || '1' }}-${{ github.event_name == 'push' && github.ref == 'refs/heads/master' && 'master' || github.event.number }}
path: selfdrive/ui/tests/diff/report

175
.github/workflows/ui_preview.yaml vendored Normal file
View File

@@ -0,0 +1,175 @@
name: "ui preview"
on:
push:
branches:
- master
pull_request_target:
types: [assigned, opened, synchronize, reopened, edited]
branches:
- 'master'
paths:
- 'selfdrive/assets/**'
- 'selfdrive/ui/**'
- 'system/ui/**'
workflow_dispatch:
env:
UI_JOB_NAME: "Create UI Report"
REPORT_NAME: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' && 'master' || github.event.number }}
SHA: ${{ github.event_name == 'push' && github.ref == 'refs/heads/master' && github.sha || github.event.pull_request.head.sha }}
BRANCH_NAME: "openpilot/pr-${{ github.event.number }}-ui-preview"
REPORT_FILES_BRANCH_NAME: "mici-raylib-ui-reports"
# variant:video_prefix:master_branch
VARIANTS: "mici:mici_ui_replay:openpilot_master_ui_mici_raylib big:tizi_ui_replay:openpilot_master_ui_big_raylib"
jobs:
preview:
if: github.repository == 'sunnypilot/sunnypilot'
name: preview
runs-on: ubuntu-latest
timeout-minutes: 20
permissions:
contents: read
pull-requests: write
actions: read
steps:
- uses: actions/checkout@v6
with:
submodules: true
- name: Waiting for ui generation to end
uses: lewagon/wait-on-check-action@v1.3.4
with:
ref: ${{ env.SHA }}
check-name: ${{ env.UI_JOB_NAME }}
repo-token: ${{ secrets.GITHUB_TOKEN }}
allowed-conclusions: success
wait-interval: 20
- name: Getting workflow run ID
id: get_run_id
run: |
echo "run_id=$(curl https://api.github.com/repos/${{ github.repository }}/commits/${{ env.SHA }}/check-runs | jq -r '.check_runs[] | select(.name == "${{ env.UI_JOB_NAME }}") | .html_url | capture("(?<number>[0-9]+)") | .number')" >> $GITHUB_OUTPUT
- name: Getting proposed ui
uses: dawidd6/action-download-artifact@v6
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
run_id: ${{ steps.get_run_id.outputs.run_id }}
search_artifacts: true
name: ui-report-1-${{ env.REPORT_NAME }}
path: ${{ github.workspace }}/pr_ui
- name: Getting mici master ui
uses: actions/checkout@v6
with:
repository: sunnypilot/ci-artifacts
ssh-key: ${{ secrets.CI_ARTIFACTS_DEPLOY_KEY }}
path: ${{ github.workspace }}/master_mici
ref: openpilot_master_ui_mici_raylib
- name: Getting big master ui
uses: actions/checkout@v6
with:
repository: sunnypilot/ci-artifacts
ssh-key: ${{ secrets.CI_ARTIFACTS_DEPLOY_KEY }}
path: ${{ github.workspace }}/master_big
ref: openpilot_master_ui_big_raylib
- name: Saving new master ui
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
for variant in $VARIANTS; do
IFS=':' read -r name video branch <<< "$variant"
master_dir="${{ github.workspace }}/master_${name}"
cd "$master_dir"
git checkout --orphan=new_branch
git rm -rf *
git branch -D "$branch"
git branch -m "$branch"
git config user.name "GitHub Actions Bot"
git config user.email "<>"
cp "${{ github.workspace }}/pr_ui/${video}.mp4" .
git add .
git commit -m "${name} video for commit ${{ env.SHA }}"
git push origin "$branch" --force
done
- name: Setup FFmpeg
uses: AnimMouse/setup-ffmpeg@ae28d57dabbb148eff63170b6bf7f2b60062cbae
- name: Finding diffs
if: github.event_name == 'pull_request_target'
id: find_diff
run: |
export PYTHONPATH=${{ github.workspace }}
baseurl="https://github.com/sunnypilot/ci-artifacts/raw/refs/heads/${{ env.BRANCH_NAME }}"
COMMENT=""
for variant in $VARIANTS; do
IFS=':' read -r name video _ <<< "$variant"
diff_name="${name}_diff"
mv "${{ github.workspace }}/pr_ui/${video}.mp4" "${{ github.workspace }}/pr_ui/${video}_proposed.mp4"
cp "${{ github.workspace }}/master_${name}/${video}.mp4" "${{ github.workspace }}/pr_ui/${video}_master.mp4"
diff_exit_code=0
python3 ${{ github.workspace }}/selfdrive/ui/tests/diff/diff.py \
"${{ github.workspace }}/pr_ui/${video}_master.mp4" \
"${{ github.workspace }}/pr_ui/${video}_proposed.mp4" \
"${diff_name}.html" --basedir "$baseurl" --no-open || diff_exit_code=$?
cp "${{ github.workspace }}/selfdrive/ui/tests/diff/report/${diff_name}.html" "${{ github.workspace }}/pr_ui/"
cp "${{ github.workspace }}/selfdrive/ui/tests/diff/report/${diff_name}.mp4" "${{ github.workspace }}/pr_ui/"
REPORT_URL="https://sunnypilot.github.io/ci-artifacts/${diff_name}_pr_${{ github.event.number }}.html"
if [ $diff_exit_code -eq 0 ]; then
COMMENT+="**${name}**: Videos are identical! [View Diff Report]($REPORT_URL)"$'\n'
else
COMMENT+="**${name}**: ⚠️ <strong>Videos differ!</strong> [View Diff Report]($REPORT_URL)"$'\n'
fi
done
{
echo "COMMENT<<EOF"
echo "$COMMENT"
echo "EOF"
} >> "$GITHUB_OUTPUT"
- name: Saving proposed ui
if: github.event_name == 'pull_request_target'
working-directory: ${{ github.workspace }}/master_mici
run: |
git config user.name "GitHub Actions Bot"
git config user.email "<>"
git checkout --orphan=${{ env.BRANCH_NAME }}
git rm -rf *
mv ${{ github.workspace }}/pr_ui/* .
git add .
git commit -m "ui videos for PR #${{ github.event.number }}"
git push origin ${{ env.BRANCH_NAME }} --force
# Append diff reports to report files branch
git fetch origin ${{ env.REPORT_FILES_BRANCH_NAME }}
git checkout ${{ env.REPORT_FILES_BRANCH_NAME }}
for variant in $VARIANTS; do
IFS=':' read -r name _ _ <<< "$variant"
diff_name="${name}_diff"
cp "${{ github.workspace }}/selfdrive/ui/tests/diff/report/${diff_name}.html" "${diff_name}_pr_${{ github.event.number }}.html"
git add "${diff_name}_pr_${{ github.event.number }}.html"
done
git commit -m "ui diff reports for PR #${{ github.event.number }}" || echo "No changes to commit"
git push origin ${{ env.REPORT_FILES_BRANCH_NAME }}
- name: Comment on PR
if: github.event_name == 'pull_request_target'
uses: thollander/actions-comment-pull-request@v2
with:
message: |
<!-- _(run_id_ui_preview **${{ github.run_id }}**)_ -->
## UI Preview
${{ steps.find_diff.outputs.COMMENT }}
comment_tag: run_id_ui_preview
pr_number: ${{ github.event.number }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -1,51 +0,0 @@
name: vendor third_party
on:
workflow_dispatch:
jobs:
build:
if: github.ref != 'refs/heads/master'
strategy:
matrix:
os: [ubuntu-24.04, macos-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v6
with:
submodules: true
- name: Build
run: third_party/build.sh
- name: Package artifacts
run: |
git add -A third_party/
git diff --cached --name-only -- third_party/ | tar -cf /tmp/third_party_build.tar -T -
- uses: actions/upload-artifact@v4
with:
name: third-party-${{ runner.os }}
path: /tmp/third_party_build.tar
commit:
needs: build
runs-on: ubuntu-24.04
permissions:
contents: write
steps:
- uses: actions/checkout@v6
- uses: actions/download-artifact@v4
with:
path: /tmp/artifacts
- name: Commit vendored libraries
run: |
for f in /tmp/artifacts/*/third_party_build.tar; do
tar xf "$f"
done
git add third_party/
if git diff --cached --quiet; then
echo "No changes to commit"
exit 0
fi
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
git commit -m "third_party: rebuild vendor libraries"
git push

4
.gitignore vendored
View File

@@ -64,9 +64,7 @@ flycheck_*
cppcheck_report.txt
comma*.sh
selfdrive/modeld/models/*.pkl
sunnypilot/modeld*/thneed/compile
sunnypilot/modeld*/models/*.thneed
selfdrive/modeld/models/*.pkl*
sunnypilot/modeld*/models/*.pkl
# openpilot log files

2
.gitmodules vendored
View File

@@ -15,7 +15,7 @@
url = https://github.com/commaai/teleoprtc
[submodule "tinygrad"]
path = tinygrad_repo
url = https://github.com/commaai/tinygrad.git
url = https://github.com/sunnypilot/tinygrad.git
[submodule "sunnypilot/neural_network_data"]
path = sunnypilot/neural_network_data
url = https://github.com/sunnypilot/neural-network-data.git

View File

@@ -1,5 +1,104 @@
sunnypilot Version 2025.003.000 (20xx-xx-xx)
sunnypilot Version 2026.001.000 (2026-03-xx)
========================
* What's Changed (sunnypilot/sunnypilot)
* Complete rewrite of the user interface from Qt C++ to Raylib Python
* comma four support
* ui: sunnypilot toggle style by @nayan8teen
* ui: fix scroll panel mouse wheel behavior by @nayan8teen
* ui: sunnypilot panels by @nayan8teen
* sunnylink: centralize key pair handling in sunnylink registration by @devtekve
* ui: reimplement sunnypilot branding with Raylib by @sunnyhaibin
* ui: Platform Selector by @Discountchubbs
* ui: vehicle brand settings by @Discountchubbs
* ui: sunnylink client-side implementation by @nayan8teen
* ui: `NetworkUISP` by @Discountchubbs
* ui: add sunnypilot font by @nayan8teen
* ui: sunnypilot sponsor tier color mapping by @sunnyhaibin
* ui: sunnylink panel by @nayan8teen
* ui: Models panel by @Discountchubbs
* ui: software panel by @Discountchubbs
* modeld_v2: support planplus outputs by @Discountchubbs
* ui: OSM panel by @Discountchubbs
* ui: Developer panel extension by @Discountchubbs
* sunnylink: Vehicle Selector support by @sunnyhaibin
* [TIZI/TICI] ui: Developer Metrics by @rav4kumar
* [comma 4] ui: sunnylink panel by @nayan8teen
* ui: lateral-only and longitudinal-only UI statuses support by @royjr
* sunnylink: elliptic curve keys support and improve key path handling by @nayan8teen
* sunnylink: block remote modification of SSH key parameters by @zikeji
* [TIZI/TICI] ui: rainbow path by @rav4kumar
* [TIZI/TICI] ui: chevron metrics by @rav4kumar
* ui: include MADS enabled state to `engaged` check by @sunnyhaibin
* Toyota: Enforce Factory Longitudinal Control by @sunnyhaibin
* ui: fix malformed dongle ID display on the PC if dongleID is not set by @dzid26
* SL: Re enable and validate ingestion of swaglogs by @devtekve
* modeld_v2: planplus model tuning by @Discountchubbs
* ui: fix Always Offroad button visibility by @nayan8teen
* Reimplement sunnypilot Terms of Service & sunnylink Consent Screens by @sunnyhaibin
* [TIZI/TICI] ui: update dmoji position and Developer UI adjustments by @rav4kumar
* modeld: configurable camera offset by @Discountchubbs
* [TIZI/TICI] ui: sunnylink status on sidebar by @Copilot
* ui: Global Brightness Override by @nayan8teen
* ui: Customizable Interactive Timeout by @sunnyhaibin
* sunnylink: add units to param metadata by @nayan8teen
* ui: Customizable Onroad Brightness by @sunnyhaibin
* [TIZI/TICI] ui: Steering panel by @nayan8teen
* [TIZI/TICI] ui: Rocket Fuel by @rav4kumar
* [TIZI/TICI] ui: MICI style turn signals by @rav4kumar
* [TIZI/TICI] ui: MICI style blindspot indicators by @sunnyhaibin
* [MICI] ui: display blindspot indicators when available by @rav4kumar
* [TIZI/TICI] ui: Road Name by @rav4kumar
* [TIZI/TICI] ui: Blue "Exit Always Offroad" button by @dzid26
* [TIZI/TICI] ui: Speed Limit by @rav4kumar
* Reapply "latcontrol_torque: lower kp and lower friction threshold (commaai/openpilot#36619)" by @sunnyhaibin
* [TIZI/TICI] ui: steering arc by @royjr
* [TIZI/TICI] ui: Smart Cruise Control elements by @sunnyhaibin
* [TIZI/TICI] ui: Green Light and Lead Departure elements by @sunnyhaibin
* [TIZI/TICI] ui: standstill timer by @sunnyhaibin
* [MICI] ui: driving models selector by @Discountchubbs
* [TIZI/TICI] ui: Hide vEgo and True vEgo by @sunnyhaibin
* [TIZI/TICI] ui: Visuals panel by @nayan8teen
* Device: Retain QuickBoot state after op switch by @nayan8teen
* [TIZI/TICI] ui: Trips panel by @sunnyhaibin
* [TIZI/TICI] ui: dynamic ICBM status by @sunnyhaibin
* [TIZI/TICI] ui: Cruise panel by @sunnyhaibin
* ui: better wake mode support by @nayan8teen
* Pause Lateral Control with Blinker: Post-Blinker Delay by @CHaucke89
* SCC-V: Use p97 for predicted lateral accel by @yasu-oh
* Controls: Support for Torque Lateral Control v0 Tune by @sunnyhaibin
* What's Changed (sunnypilot/opendbc)
* Honda: DBC for Accord 9th Generation by @mvl-boston
* FCA: update tire stiffness values for `RAM_HD` by @dparring
* Honda: Nidec hybrid baseline brake support by @mvl-boston
* Subaru Global Gen2: bump steering limits and update tuning by @sunnyhaibin
* Toyota: Enforce Stock Longitudinal Control by @rav4kumar
* Nissan: use MADS enabled status for LKAS HUD logic by @downquark7
* Reapply "Lateral: lower friction threshold (#2915)" (#378) by @sunnyhaibin
* HKG: add KIA_FORTE_2019_NON_SCC fingerprint by @royjr
* Nissan: Parse cruise control buttons by @downquark7
* Rivian: Add stalk down ACC behavior to match stock Rivian by @lukasloetkolben
* Tesla: remove `TESLA_MODEL_X` from `dashcamOnly` by @ssysm
* Hyundai Longitudinal: refactor tuning by @Discountchubbs
* Tesla: add fingerprint for Model 3 Performance HW4 by @sunnyhaibin
* Toyota: do not disable radar when smartDSU or CAN Filter detected by @sunnyhaibin
* Honda: add missing `GasInterceptor` messages to Taiwan Odyssey DBC by @mvl-boston
* GM: remove `CHEVROLET_EQUINOX_NON_ACC_3RD_GEN` from `dashcamOnly` by @sunnyhaibin
* GM: remove `CHEVROLET_BOLT_NON_ACC_2ND_GEN` from `dashcamOnly` by @sunnyhaibin
* New Contributors (sunnypilot/sunnypilot)
* @TheSecurityDev made their first contribution in "ui: fix sidebar scroll in UI screenshots"
* @zikeji made their first contribution in "sunnylink: block remote modification of SSH key parameters"
* @Candy0707 made their first contribution in "[TIZI/TICI] ui: Fix misaligned turn signals and blindspot indicators with sidebar"
* @CHaucke89 made their first contribution in "Pause Lateral Control with Blinker: Post-Blinker Delay"
* @yasu-oh made their first contribution in "SCC-V: Use p97 for predicted lateral accel"
* New Contributors (sunnypilot/opendbc)
* @AmyJeanes made their first contribution in "Tesla: Fix stock LKAS being blocked when MADS is enabled"
* @mvl-boston made their first contribution in "Honda: Update Clarity brake to renamed DBC message name"
* @dzid26 made their first contribution in "Tesla: Parse speed limit from CAN"
* @firestar5683 made their first contribution in "GM: Non-ACC platforms with steering only support"
* @downquark7 made their first contribution in "Nissan: use MADS enabled status for LKAS HUD logic"
* @royjr made their first contribution in "HKG: add KIA_FORTE_2019_NON_SCC fingerprint"
* @ssysm made their first contribution in "Tesla: remove `TESLA_MODEL_X` from `dashcamOnly`"
* Full Changelog: https://github.com/sunnypilot/sunnypilot/compare/v2025.002.000...v2026.001.000
sunnypilot Version 2025.002.000 (2025-11-06)
========================

View File

@@ -1,14 +1,38 @@
FROM ghcr.io/commaai/openpilot-base:latest
FROM ubuntu:24.04
ENV PYTHONUNBUFFERED=1
ENV OPENPILOT_PATH=/home/batman/openpilot
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y --no-install-recommends sudo tzdata locales && \
rm -rf /var/lib/apt/lists/*
RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && locale-gen
ENV LANG=en_US.UTF-8
ENV LANGUAGE=en_US:en
ENV LC_ALL=en_US.UTF-8
ENV NVIDIA_VISIBLE_DEVICES=all
ENV NVIDIA_DRIVER_CAPABILITIES=graphics,utility,compute
ARG USER=batman
ARG USER_UID=1001
RUN useradd -m -s /bin/bash -u $USER_UID $USER
RUN usermod -aG sudo $USER
RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
USER $USER
ENV OPENPILOT_PATH=/home/$USER/openpilot
RUN mkdir -p ${OPENPILOT_PATH}
WORKDIR ${OPENPILOT_PATH}
COPY . ${OPENPILOT_PATH}/
COPY --chown=$USER . ${OPENPILOT_PATH}/
ENV UV_BIN="/home/batman/.local/bin/"
ENV PATH="$UV_BIN:$PATH"
RUN UV_PROJECT_ENVIRONMENT=$VIRTUAL_ENV uv run scons --cache-readonly -j$(nproc)
ENV UV_BIN="/home/$USER/.local/bin/"
ENV VIRTUAL_ENV=${OPENPILOT_PATH}/.venv
ENV PATH="$UV_BIN:$VIRTUAL_ENV/bin:$PATH"
RUN tools/setup_dependencies.sh && \
sudo rm -rf /var/lib/apt/lists/*
USER root
RUN git config --global --add safe.directory '*'

View File

@@ -1,82 +0,0 @@
FROM ubuntu:24.04
ENV PYTHONUNBUFFERED=1
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y --no-install-recommends sudo tzdata locales ssh pulseaudio xvfb x11-xserver-utils gnome-screenshot python3-tk python3-dev && \
rm -rf /var/lib/apt/lists/*
RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && locale-gen
ENV LANG=en_US.UTF-8
ENV LANGUAGE=en_US:en
ENV LC_ALL=en_US.UTF-8
COPY tools/install_ubuntu_dependencies.sh /tmp/tools/
RUN /tmp/tools/install_ubuntu_dependencies.sh && \
rm -rf /var/lib/apt/lists/* /tmp/* && \
cd /usr/lib/gcc/arm-none-eabi/* && \
rm -rf arm/ thumb/nofp thumb/v6* thumb/v8* thumb/v7+fp thumb/v7-r+fp.sp
# Add OpenCL
RUN apt-get update && apt-get install -y --no-install-recommends \
apt-utils \
alien \
unzip \
tar \
curl \
xz-utils \
dbus \
gcc-arm-none-eabi \
tmux \
vim \
libx11-6 \
wget \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir -p /tmp/opencl-driver-intel && \
cd /tmp/opencl-driver-intel && \
wget https://github.com/intel/llvm/releases/download/2024-WW14/oclcpuexp-2024.17.3.0.09_rel.tar.gz && \
wget https://github.com/oneapi-src/oneTBB/releases/download/v2021.12.0/oneapi-tbb-2021.12.0-lin.tgz && \
mkdir -p /opt/intel/oclcpuexp_2024.17.3.0.09_rel && \
cd /opt/intel/oclcpuexp_2024.17.3.0.09_rel && \
tar -zxvf /tmp/opencl-driver-intel/oclcpuexp-2024.17.3.0.09_rel.tar.gz && \
mkdir -p /etc/OpenCL/vendors && \
echo /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64/libintelocl.so > /etc/OpenCL/vendors/intel_expcpu.icd && \
cd /opt/intel && \
tar -zxvf /tmp/opencl-driver-intel/oneapi-tbb-2021.12.0-lin.tgz && \
ln -s /opt/intel/oneapi-tbb-2021.12.0/lib/intel64/gcc4.8/libtbb.so /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64 && \
ln -s /opt/intel/oneapi-tbb-2021.12.0/lib/intel64/gcc4.8/libtbbmalloc.so /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64 && \
ln -s /opt/intel/oneapi-tbb-2021.12.0/lib/intel64/gcc4.8/libtbb.so.12 /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64 && \
ln -s /opt/intel/oneapi-tbb-2021.12.0/lib/intel64/gcc4.8/libtbbmalloc.so.2 /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64 && \
mkdir -p /etc/ld.so.conf.d && \
echo /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64 > /etc/ld.so.conf.d/libintelopenclexp.conf && \
ldconfig -f /etc/ld.so.conf.d/libintelopenclexp.conf && \
cd / && \
rm -rf /tmp/opencl-driver-intel
ENV NVIDIA_VISIBLE_DEVICES=all
ENV NVIDIA_DRIVER_CAPABILITIES=graphics,utility,compute
ENV QTWEBENGINE_DISABLE_SANDBOX=1
RUN dbus-uuidgen > /etc/machine-id
RUN apt-get update && apt-get install -y fonts-noto-cjk fonts-noto-color-emoji
ARG USER=batman
ARG USER_UID=1001
RUN useradd -m -s /bin/bash -u $USER_UID $USER
RUN usermod -aG sudo $USER
RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
USER $USER
COPY --chown=$USER pyproject.toml uv.lock /home/$USER
COPY --chown=$USER tools/install_python_dependencies.sh /home/$USER/tools/
ENV VIRTUAL_ENV=/home/$USER/.venv
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
RUN cd /home/$USER && \
tools/install_python_dependencies.sh && \
rm -rf tools/ pyproject.toml uv.lock .cache
USER root
RUN sudo git config --global --add safe.directory /tmp/openpilot

View File

@@ -1,12 +0,0 @@
FROM ghcr.io/sunnypilot/sunnypilot-base:latest
ENV PYTHONUNBUFFERED=1
ENV OPENPILOT_PATH=/home/batman/openpilot
RUN mkdir -p ${OPENPILOT_PATH}
WORKDIR ${OPENPILOT_PATH}
COPY . ${OPENPILOT_PATH}/
RUN scons --cache-readonly -j$(nproc)

View File

@@ -1,83 +0,0 @@
FROM ubuntu:24.04
ENV PYTHONUNBUFFERED=1
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -y --no-install-recommends sudo tzdata locales ssh pulseaudio xvfb x11-xserver-utils gnome-screenshot python3-tk python3-dev && \
rm -rf /var/lib/apt/lists/*
RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && locale-gen
ENV LANG=en_US.UTF-8
ENV LANGUAGE=en_US:en
ENV LC_ALL=en_US.UTF-8
COPY tools/install_ubuntu_dependencies.sh /tmp/tools/
RUN /tmp/tools/install_ubuntu_dependencies.sh && \
rm -rf /var/lib/apt/lists/* /tmp/* && \
cd /usr/lib/gcc/arm-none-eabi/* && \
rm -rf arm/ thumb/nofp thumb/v6* thumb/v8* thumb/v7+fp thumb/v7-r+fp.sp
# Add OpenCL
RUN apt-get update && apt-get install -y --no-install-recommends \
apt-utils \
alien \
unzip \
tar \
curl \
xz-utils \
dbus \
gcc-arm-none-eabi \
tmux \
vim \
libx11-6 \
wget \
rsync \
&& rm -rf /var/lib/apt/lists/*
RUN mkdir -p /tmp/opencl-driver-intel && \
cd /tmp/opencl-driver-intel && \
wget https://github.com/intel/llvm/releases/download/2024-WW14/oclcpuexp-2024.17.3.0.09_rel.tar.gz && \
wget https://github.com/oneapi-src/oneTBB/releases/download/v2021.12.0/oneapi-tbb-2021.12.0-lin.tgz && \
mkdir -p /opt/intel/oclcpuexp_2024.17.3.0.09_rel && \
cd /opt/intel/oclcpuexp_2024.17.3.0.09_rel && \
tar -zxvf /tmp/opencl-driver-intel/oclcpuexp-2024.17.3.0.09_rel.tar.gz && \
mkdir -p /etc/OpenCL/vendors && \
echo /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64/libintelocl.so > /etc/OpenCL/vendors/intel_expcpu.icd && \
cd /opt/intel && \
tar -zxvf /tmp/opencl-driver-intel/oneapi-tbb-2021.12.0-lin.tgz && \
ln -s /opt/intel/oneapi-tbb-2021.12.0/lib/intel64/gcc4.8/libtbb.so /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64 && \
ln -s /opt/intel/oneapi-tbb-2021.12.0/lib/intel64/gcc4.8/libtbbmalloc.so /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64 && \
ln -s /opt/intel/oneapi-tbb-2021.12.0/lib/intel64/gcc4.8/libtbb.so.12 /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64 && \
ln -s /opt/intel/oneapi-tbb-2021.12.0/lib/intel64/gcc4.8/libtbbmalloc.so.2 /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64 && \
mkdir -p /etc/ld.so.conf.d && \
echo /opt/intel/oclcpuexp_2024.17.3.0.09_rel/x64 > /etc/ld.so.conf.d/libintelopenclexp.conf && \
ldconfig -f /etc/ld.so.conf.d/libintelopenclexp.conf && \
cd / && \
rm -rf /tmp/opencl-driver-intel
ENV NVIDIA_VISIBLE_DEVICES=all
ENV NVIDIA_DRIVER_CAPABILITIES=graphics,utility,compute
ENV QTWEBENGINE_DISABLE_SANDBOX=1
RUN dbus-uuidgen > /etc/machine-id
RUN apt-get update && apt-get install -y fonts-noto-cjk fonts-noto-color-emoji
ARG USER=batman
ARG USER_UID=1001
RUN useradd -m -s /bin/bash -u $USER_UID $USER
RUN usermod -aG sudo $USER
RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
USER $USER
COPY --chown=$USER pyproject.toml uv.lock /home/$USER
COPY --chown=$USER tools/install_python_dependencies.sh /home/$USER/tools/
ENV VIRTUAL_ENV=/home/$USER/.venv
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
RUN cd /home/$USER && \
tools/install_python_dependencies.sh && \
rm -rf tools/ pyproject.toml uv.lock .cache
USER root
RUN sudo git config --global --add safe.directory /tmp/openpilot

19
Jenkinsfile vendored
View File

@@ -210,30 +210,23 @@ node {
'HW + Unit Tests': {
deviceStage("tizi-hardware", "tizi-common", ["UNSAFE=1"], [
step("build", "cd system/manager && ./build.py"),
step("test pandad", "pytest selfdrive/pandad/tests/test_pandad.py", [diffPaths: ["panda", "selfdrive/pandad/"]]),
step("test power draw", "pytest -s system/hardware/tici/tests/test_power_draw.py"),
step("test encoder", "LD_LIBRARY_PATH=/usr/local/lib pytest system/loggerd/tests/test_encoder.py", [diffPaths: ["system/loggerd/"]]),
step("test manager", "pytest system/manager/test/test_manager.py"),
])
},
'loopback': {
deviceStage("loopback", "tizi-loopback", ["UNSAFE=1"], [
step("build openpilot", "cd system/manager && ./build.py"),
step("test pandad loopback", "pytest selfdrive/pandad/tests/test_pandad_loopback.py"),
])
},
'camerad OX03C10': {
deviceStage("OX03C10", "tizi-ox03c10", ["UNSAFE=1"], [
step("build", "cd system/manager && ./build.py"),
step("test camerad", "pytest system/camerad/test/test_camerad.py", [timeout: 60]),
step("test exposure", "pytest system/camerad/test/test_exposure.py"),
step("test pandad", "pytest selfdrive/pandad/tests/test_pandad.py", [diffPaths: ["panda", "selfdrive/pandad/"]]),
step("test camerad", "pytest system/camerad/test/test_camerad.py", [timeout: 90]),
])
},
'camerad OS04C10': {
deviceStage("OS04C10", "tici-os04c10", ["UNSAFE=1"], [
step("build", "cd system/manager && ./build.py"),
step("test camerad", "pytest system/camerad/test/test_camerad.py", [timeout: 60]),
step("test exposure", "pytest system/camerad/test/test_exposure.py"),
step("test pandad", "pytest selfdrive/pandad/tests/test_pandad.py", [diffPaths: ["panda", "selfdrive/pandad/"]]),
step("test camerad", "pytest system/camerad/test/test_camerad.py", [timeout: 90]),
])
},
'sensord': {
@@ -251,11 +244,9 @@ node {
'tizi': {
deviceStage("tizi", "tizi", ["UNSAFE=1"], [
step("build openpilot", "cd system/manager && ./build.py"),
step("test pandad loopback", "SINGLE_PANDA=1 pytest selfdrive/pandad/tests/test_pandad_loopback.py"),
step("test pandad loopback", "pytest selfdrive/pandad/tests/test_pandad_loopback.py"),
step("test pandad spi", "pytest selfdrive/pandad/tests/test_pandad_spi.py"),
step("test amp", "pytest system/hardware/tici/tests/test_amplifier.py"),
// TODO: enable once new AGNOS is available
// step("test esim", "pytest system/hardware/tici/tests/test_esim.py"),
step("test qcomgpsd", "pytest system/qcomgpsd/tests/test_qcomgpsd.py", [diffPaths: ["system/qcomgpsd/"]]),
])
},

View File

@@ -1,6 +1,8 @@
Version 0.10.4 (2026-02-17)
========================
* Kia K7 2017 support thanks to royjr!
* Lexus LS 2018 support thanks to Hacheoy!
* Reduce comma four standby power usage by 77% to 52 mW
Version 0.10.3 (2025-12-17)
========================

View File

@@ -18,6 +18,7 @@ AddOption('--asan', action='store_true', help='turn on ASAN')
AddOption('--ubsan', action='store_true', help='turn on UBSan')
AddOption('--mutation', action='store_true', help='generate mutation-ready code')
AddOption('--ccflags', action='store', type='string', default='', help='pass arbitrary flags over the command line')
AddOption('--verbose', action='store_true', default=False, help='show full build commands')
AddOption('--minimal',
action='store_false',
dest='extras',
@@ -28,7 +29,6 @@ AddOption('--minimal',
arch = subprocess.check_output(["uname", "-m"], encoding='utf8').rstrip()
if platform.system() == "Darwin":
arch = "Darwin"
brew_prefix = subprocess.check_output(['brew', '--prefix'], encoding='utf8').strip()
elif arch == "aarch64" and os.path.isfile('/TICI'):
arch = "larch64"
assert arch in [
@@ -38,6 +38,25 @@ assert arch in [
"Darwin", # macOS arm64 (x86 not supported)
]
if arch != "larch64":
import bzip2
import capnproto
import eigen
import ffmpeg as ffmpeg_pkg
import libjpeg
import libyuv
import ncurses
import openssl3
import python3_dev
import zeromq
import zstd
pkgs = [bzip2, capnproto, eigen, ffmpeg_pkg, libjpeg, libyuv, ncurses, openssl3, zeromq, zstd]
py_include = python3_dev.INCLUDE_DIR
else:
# TODO: remove when AGNOS has our new vendor pkgs
pkgs = []
py_include = sysconfig.get_paths()['include']
env = Environment(
ENV={
"PATH": os.environ['PATH'],
@@ -46,15 +65,13 @@ env = Environment(
"ACADOS_PYTHON_INTERFACE_PATH": Dir("#third_party/acados/acados_template").abspath,
"TERA_PATH": Dir("#").abspath + f"/third_party/acados/{arch}/t_renderer"
},
CC='clang',
CXX='clang++',
CCFLAGS=[
"-g",
"-fPIC",
"-O2",
"-Wunused",
"-Werror",
"-Wshadow",
"-Wshadow" if arch in ("Darwin", "larch64") else "-Wshadow=local",
"-Wno-unknown-warning-option",
"-Wno-inconsistent-missing-override",
"-Wno-c99-designator",
@@ -73,7 +90,7 @@ env = Environment(
"#third_party/acados/include/blasfeo/include",
"#third_party/acados/include/hpipm/include",
"#third_party/catch2/include",
"#third_party/libyuv/include",
[x.INCLUDE_DIR for x in pkgs],
],
LIBPATH=[
"#common",
@@ -81,8 +98,8 @@ env = Environment(
"#third_party",
"#selfdrive/pandad",
"#rednose/helpers",
f"#third_party/libyuv/{arch}/lib",
f"#third_party/acados/{arch}/lib",
[x.LIB_DIR for x in pkgs],
],
RPATH=[],
CYTHONCFILESUFFIX=".cpp",
@@ -94,7 +111,8 @@ env = Environment(
# Arch-specific flags and paths
if arch == "larch64":
env.Append(CPPPATH=["#third_party/opencl/include"])
env["CC"] = "clang"
env["CXX"] = "clang++"
env.Append(LIBPATH=[
"/usr/local/lib",
"/system/vendor/lib64",
@@ -105,17 +123,10 @@ if arch == "larch64":
env.Append(CXXFLAGS=arch_flags)
elif arch == "Darwin":
env.Append(LIBPATH=[
f"{brew_prefix}/lib",
f"{brew_prefix}/opt/openssl@3.0/lib",
f"{brew_prefix}/opt/llvm/lib/c++",
"/System/Library/Frameworks/OpenGL.framework/Libraries",
])
env.Append(CCFLAGS=["-DGL_SILENCE_DEPRECATION"])
env.Append(CXXFLAGS=["-DGL_SILENCE_DEPRECATION"])
env.Append(CPPPATH=[
f"{brew_prefix}/include",
f"{brew_prefix}/opt/openssl@3.0/include",
])
else:
env.Append(LIBPATH=[
"/usr/lib",
@@ -138,6 +149,22 @@ if _extra_cc:
if arch != "Darwin":
env.Append(LINKFLAGS=["-Wl,--as-needed", "-Wl,--no-undefined"])
# Shorter build output: show brief descriptions instead of full commands.
# Full command lines are still printed on failure by scons.
if not GetOption('verbose'):
for action, short in (
("CC", "CC"),
("CXX", "CXX"),
("LINK", "LINK"),
("SHCC", "CC"),
("SHCXX", "CXX"),
("SHLINK", "LINK"),
("AR", "AR"),
("RANLIB", "RANLIB"),
("AS", "AS"),
):
env[f"{action}COMSTR"] = f" [{short}] $TARGET"
# progress output
node_interval = 5
node_count = 0
@@ -149,10 +176,9 @@ if os.environ.get('SCONS_PROGRESS'):
Progress(progress_function, interval=node_interval)
# ********** Cython build environment **********
py_include = sysconfig.get_paths()['include']
envCython = env.Clone()
envCython["CPPPATH"] += [py_include, np.get_include()]
envCython["CCFLAGS"] += ["-Wno-#warnings", "-Wno-shadow", "-Wno-deprecated-declarations"]
envCython["CCFLAGS"] += ["-Wno-#warnings", "-Wno-cpp", "-Wno-shadow", "-Wno-deprecated-declarations"]
envCython["CCFLAGS"].remove("-Werror")
envCython["LIBS"] = []
@@ -215,10 +241,8 @@ SConscript(['selfdrive/SConscript'])
SConscript(['sunnypilot/SConscript'])
if Dir('#tools/cabana/').exists() and GetOption('extras'):
SConscript(['tools/replay/SConscript'])
if arch != "larch64":
SConscript(['tools/cabana/SConscript'])
if Dir('#tools/cabana/').exists() and arch != "larch64":
SConscript(['tools/cabana/SConscript'])
env.CompilationDatabase('compile_commands.json')

View File

@@ -499,7 +499,8 @@ struct DeviceState @0xa4d8b5af2aa492eb {
pmicTempC @39 :List(Float32);
intakeTempC @46 :Float32;
exhaustTempC @47 :Float32;
caseTempC @48 :Float32;
gnssTempC @48 :Float32;
bottomSocTempC @50 :Float32;
maxTempC @44 :Float32; # max of other temps, used to control fan
thermalZones @38 :List(ThermalZone);
thermalStatus @14 :ThermalStatus;
@@ -592,6 +593,7 @@ struct PandaState @0xa7649e2575e4591e {
harnessStatus @21 :HarnessStatus;
sbu1Voltage @35 :Float32;
sbu2Voltage @36 :Float32;
soundOutputLevel @37 :UInt16;
# can health
canState0 @29 :PandaCanState;
@@ -2232,9 +2234,9 @@ struct DriverMonitoringState @0xb83cda094a1da284 {
isActiveMode @16 :Bool;
isRHD @4 :Bool;
uncertainCount @19 :UInt32;
phoneProbOffset @20 :Float32;
phoneProbValidCount @21 :UInt32;
phoneProbOffsetDEPRECATED @20 :Float32;
phoneProbValidCountDEPRECATED @21 :UInt32;
isPreviewDEPRECATED @15 :Bool;
rhdCheckedDEPRECATED @5 :Bool;
eventsDEPRECATED @0 :List(Car.OnroadEventDEPRECATED);

View File

@@ -5,7 +5,7 @@ import numbers
import random
import threading
import time
from parameterized import parameterized
from openpilot.common.parameterized import parameterized
import pytest
from cereal import log, car

View File

@@ -1,7 +1,7 @@
import os
import tempfile
from typing import Dict
from parameterized import parameterized
from openpilot.common.parameterized import parameterized
import cereal.services as services
from cereal.services import SERVICE_LIST

View File

@@ -5,7 +5,6 @@ common_libs = [
'swaglog.cc',
'util.cc',
'ratekeeper.cc',
'clutil.cc',
]
_common = env.Library('common', common_libs, LIBS="json11")

View File

@@ -1,98 +0,0 @@
#include "common/clutil.h"
#include <cassert>
#include <iostream>
#include <memory>
#include "common/util.h"
#include "common/swaglog.h"
namespace { // helper functions
template <typename Func, typename Id, typename Name>
std::string get_info(Func get_info_func, Id id, Name param_name) {
size_t size = 0;
CL_CHECK(get_info_func(id, param_name, 0, NULL, &size));
std::string info(size, '\0');
CL_CHECK(get_info_func(id, param_name, size, info.data(), NULL));
return info;
}
inline std::string get_platform_info(cl_platform_id id, cl_platform_info name) { return get_info(&clGetPlatformInfo, id, name); }
inline std::string get_device_info(cl_device_id id, cl_device_info name) { return get_info(&clGetDeviceInfo, id, name); }
void cl_print_info(cl_platform_id platform, cl_device_id device) {
size_t work_group_size = 0;
cl_device_type device_type = 0;
clGetDeviceInfo(device, CL_DEVICE_MAX_WORK_GROUP_SIZE, sizeof(work_group_size), &work_group_size, NULL);
clGetDeviceInfo(device, CL_DEVICE_TYPE, sizeof(device_type), &device_type, NULL);
const char *type_str = "Other...";
switch (device_type) {
case CL_DEVICE_TYPE_CPU: type_str ="CL_DEVICE_TYPE_CPU"; break;
case CL_DEVICE_TYPE_GPU: type_str = "CL_DEVICE_TYPE_GPU"; break;
case CL_DEVICE_TYPE_ACCELERATOR: type_str = "CL_DEVICE_TYPE_ACCELERATOR"; break;
}
LOGD("vendor: %s", get_platform_info(platform, CL_PLATFORM_VENDOR).c_str());
LOGD("platform version: %s", get_platform_info(platform, CL_PLATFORM_VERSION).c_str());
LOGD("profile: %s", get_platform_info(platform, CL_PLATFORM_PROFILE).c_str());
LOGD("extensions: %s", get_platform_info(platform, CL_PLATFORM_EXTENSIONS).c_str());
LOGD("name: %s", get_device_info(device, CL_DEVICE_NAME).c_str());
LOGD("device version: %s", get_device_info(device, CL_DEVICE_VERSION).c_str());
LOGD("max work group size: %zu", work_group_size);
LOGD("type = %d, %s", (int)device_type, type_str);
}
void cl_print_build_errors(cl_program program, cl_device_id device) {
cl_build_status status;
clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_STATUS, sizeof(status), &status, NULL);
size_t log_size;
clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG, 0, NULL, &log_size);
std::string log(log_size, '\0');
clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG, log_size, &log[0], NULL);
LOGE("build failed; status=%d, log: %s", status, log.c_str());
}
} // namespace
cl_device_id cl_get_device_id(cl_device_type device_type) {
cl_uint num_platforms = 0;
CL_CHECK(clGetPlatformIDs(0, NULL, &num_platforms));
std::unique_ptr<cl_platform_id[]> platform_ids = std::make_unique<cl_platform_id[]>(num_platforms);
CL_CHECK(clGetPlatformIDs(num_platforms, &platform_ids[0], NULL));
for (size_t i = 0; i < num_platforms; ++i) {
LOGD("platform[%zu] CL_PLATFORM_NAME: %s", i, get_platform_info(platform_ids[i], CL_PLATFORM_NAME).c_str());
// Get first device
if (cl_device_id device_id = NULL; clGetDeviceIDs(platform_ids[i], device_type, 1, &device_id, NULL) == 0 && device_id) {
cl_print_info(platform_ids[i], device_id);
return device_id;
}
}
LOGE("No valid openCL platform found");
assert(0);
return nullptr;
}
cl_context cl_create_context(cl_device_id device_id) {
return CL_CHECK_ERR(clCreateContext(NULL, 1, &device_id, NULL, NULL, &err));
}
void cl_release_context(cl_context context) {
clReleaseContext(context);
}
cl_program cl_program_from_file(cl_context ctx, cl_device_id device_id, const char* path, const char* args) {
return cl_program_from_source(ctx, device_id, util::read_file(path), args);
}
cl_program cl_program_from_source(cl_context ctx, cl_device_id device_id, const std::string& src, const char* args) {
const char *csrc = src.c_str();
cl_program prg = CL_CHECK_ERR(clCreateProgramWithSource(ctx, 1, &csrc, NULL, &err));
if (int err = clBuildProgram(prg, 1, &device_id, args, NULL, NULL); err != 0) {
cl_print_build_errors(prg, device_id);
assert(0);
}
return prg;
}

View File

@@ -1,28 +0,0 @@
#pragma once
#ifdef __APPLE__
#include <OpenCL/cl.h>
#else
#include <CL/cl.h>
#endif
#include <string>
#define CL_CHECK(_expr) \
do { \
assert(CL_SUCCESS == (_expr)); \
} while (0)
#define CL_CHECK_ERR(_expr) \
({ \
cl_int err = CL_INVALID_VALUE; \
__typeof__(_expr) _ret = _expr; \
assert(_ret&& err == CL_SUCCESS); \
_ret; \
})
cl_device_id cl_get_device_id(cl_device_type device_type);
cl_context cl_create_context(cl_device_id device_id);
void cl_release_context(cl_context context);
cl_program cl_program_from_source(cl_context ctx, cl_device_id device_id, const std::string& src, const char* args = nullptr);
cl_program cl_program_from_file(cl_context ctx, cl_device_id device_id, const char* path, const char* args);

37
common/file_chunker.py Normal file
View File

@@ -0,0 +1,37 @@
import math
import os
from pathlib import Path
CHUNK_SIZE = 45 * 1024 * 1024 # 45MB, under GitHub's 50MB limit
def get_chunk_name(name, idx, num_chunks):
return f"{name}.chunk{idx+1:02d}of{num_chunks:02d}"
def get_manifest_path(name):
return f"{name}.chunkmanifest"
def get_chunk_paths(path, file_size):
num_chunks = math.ceil(file_size / CHUNK_SIZE)
return [get_manifest_path(path)] + [get_chunk_name(path, i, num_chunks) for i in range(num_chunks)]
def chunk_file(path, targets):
manifest_path, *chunk_paths = targets
with open(path, 'rb') as f:
data = f.read()
actual_num_chunks = max(1, math.ceil(len(data) / CHUNK_SIZE))
assert len(chunk_paths) >= actual_num_chunks, f"Allowed {len(chunk_paths)} chunks but needs at least {actual_num_chunks}, for path {path}"
for i, chunk_path in enumerate(chunk_paths):
with open(chunk_path, 'wb') as f:
f.write(data[i * CHUNK_SIZE:(i + 1) * CHUNK_SIZE])
Path(manifest_path).write_text(str(len(chunk_paths)))
os.remove(path)
def read_file_chunked(path):
manifest_path = get_manifest_path(path)
if os.path.isfile(manifest_path):
num_chunks = int(Path(manifest_path).read_text().strip())
return b''.join(Path(get_chunk_name(path, i, num_chunks)).read_bytes() for i in range(num_chunks))
if os.path.isfile(path):
return Path(path).read_bytes()
raise FileNotFoundError(path)

View File

@@ -1,85 +0,0 @@
#pragma once
typedef struct vec3 {
float v[3];
} vec3;
typedef struct vec4 {
float v[4];
} vec4;
typedef struct mat3 {
float v[3*3];
} mat3;
typedef struct mat4 {
float v[4*4];
} mat4;
static inline mat3 matmul3(const mat3 &a, const mat3 &b) {
mat3 ret = {{0.0}};
for (int r=0; r<3; r++) {
for (int c=0; c<3; c++) {
float v = 0.0;
for (int k=0; k<3; k++) {
v += a.v[r*3+k] * b.v[k*3+c];
}
ret.v[r*3+c] = v;
}
}
return ret;
}
static inline vec3 matvecmul3(const mat3 &a, const vec3 &b) {
vec3 ret = {{0.0}};
for (int r=0; r<3; r++) {
for (int c=0; c<3; c++) {
ret.v[r] += a.v[r*3+c] * b.v[c];
}
}
return ret;
}
static inline mat4 matmul(const mat4 &a, const mat4 &b) {
mat4 ret = {{0.0}};
for (int r=0; r<4; r++) {
for (int c=0; c<4; c++) {
float v = 0.0;
for (int k=0; k<4; k++) {
v += a.v[r*4+k] * b.v[k*4+c];
}
ret.v[r*4+c] = v;
}
}
return ret;
}
static inline vec4 matvecmul(const mat4 &a, const vec4 &b) {
vec4 ret = {{0.0}};
for (int r=0; r<4; r++) {
for (int c=0; c<4; c++) {
ret.v[r] += a.v[r*4+c] * b.v[c];
}
}
return ret;
}
// scales the input and output space of a transformation matrix
// that assumes pixel-center origin.
static inline mat3 transform_scale_buffer(const mat3 &in, float s) {
// in_pt = ( transform(out_pt/s + 0.5) - 0.5) * s
mat3 transform_out = {{
1.0f/s, 0.0f, 0.5f,
0.0f, 1.0f/s, 0.5f,
0.0f, 0.0f, 1.0f,
}};
mat3 transform_in = {{
s, 0.0f, -0.5f*s,
0.0f, s, -0.5f*s,
0.0f, 0.0f, 1.0f,
}};
return matmul3(transform_in, matmul3(in, transform_out));
}

47
common/parameterized.py Normal file
View File

@@ -0,0 +1,47 @@
import sys
import pytest
import inspect
class parameterized:
@staticmethod
def expand(cases):
cases = list(cases)
if not cases:
return lambda func: pytest.mark.skip("no parameterized cases")(func)
def decorator(func):
params = [p for p in inspect.signature(func).parameters if p != 'self']
normalized = [c if isinstance(c, tuple) else (c,) for c in cases]
# Infer arg count from first case so extra params (e.g. from @given) are left untouched
expand_params = params[: len(normalized[0])]
if len(expand_params) == 1:
return pytest.mark.parametrize(expand_params[0], [c[0] for c in normalized])(func)
return pytest.mark.parametrize(', '.join(expand_params), normalized)(func)
return decorator
def parameterized_class(attrs, input_list=None):
if isinstance(attrs, list) and (not attrs or isinstance(attrs[0], dict)):
params_list = attrs
else:
assert input_list is not None
attr_names = (attrs,) if isinstance(attrs, str) else tuple(attrs)
params_list = [dict(zip(attr_names, v if isinstance(v, (tuple, list)) else (v,), strict=False)) for v in input_list]
def decorator(cls):
globs = sys._getframe(1).f_globals
for i, params in enumerate(params_list):
name = f"{cls.__name__}_{i}"
new_cls = type(name, (cls,), dict(params))
new_cls.__module__ = cls.__module__
new_cls.__test__ = True # override inherited False so pytest collects this subclass
globs[name] = new_cls
# Don't collect the un-parametrised base, but return it so outer decorators
# (e.g. @pytest.mark.skip) land on it and propagate to subclasses via MRO.
cls.__test__ = False
return cls
return decorator

View File

@@ -170,6 +170,7 @@ inline static std::unordered_map<std::string, ParamKeyAttributes> keys = {
{"OffroadMode", {CLEAR_ON_MANAGER_START, BOOL}},
{"Offroad_TiciSupport", {CLEAR_ON_MANAGER_START, JSON}},
{"OnroadScreenOffBrightness", {PERSISTENT | BACKUP, INT, "0"}},
{"OnroadScreenOffBrightnessMigrated", {PERSISTENT | BACKUP, STRING, "0.0"}},
{"OnroadScreenOffTimer", {PERSISTENT | BACKUP, INT, "15"}},
{"OnroadUploads", {PERSISTENT | BACKUP, BOOL, "1"}},
{"QuickBootToggle", {PERSISTENT | BACKUP, BOOL, "0"}},
@@ -190,7 +191,7 @@ inline static std::unordered_map<std::string, ParamKeyAttributes> keys = {
// Model Manager params
{"ModelManager_ActiveBundle", {PERSISTENT, JSON}},
{"ModelManager_ClearCache", {CLEAR_ON_MANAGER_START, BOOL}},
{"ModelManager_DownloadIndex", {CLEAR_ON_MANAGER_START | CLEAR_ON_ONROAD_TRANSITION, INT, "0"}},
{"ModelManager_DownloadIndex", {CLEAR_ON_MANAGER_START | CLEAR_ON_ONROAD_TRANSITION, INT}},
{"ModelManager_Favs", {PERSISTENT | BACKUP, STRING}},
{"ModelManager_LastSyncTime", {CLEAR_ON_MANAGER_START | CLEAR_ON_OFFROAD_TRANSITION, INT, "0"}},
{"ModelManager_ModelsCache", {PERSISTENT | BACKUP, JSON}},
@@ -218,6 +219,7 @@ inline static std::unordered_map<std::string, ParamKeyAttributes> keys = {
{"SubaruStopAndGoManualParkingBrake", {PERSISTENT | BACKUP, BOOL, "0"}},
{"TeslaCoopSteering", {PERSISTENT | BACKUP, BOOL, "0"}},
{"ToyotaEnforceStockLongitudinal", {PERSISTENT | BACKUP, BOOL, "0"}},
{"ToyotaStopAndGoHack", {PERSISTENT | BACKUP, BOOL, "0"}},
{"DynamicExperimentalControl", {PERSISTENT | BACKUP, BOOL, "0"}},
{"BlindSpot", {PERSISTENT | BACKUP, BOOL, "0"}},
@@ -268,6 +270,7 @@ inline static std::unordered_map<std::string, ParamKeyAttributes> keys = {
{"EnforceTorqueControl", {PERSISTENT | BACKUP, BOOL}},
{"LiveTorqueParamsToggle", {PERSISTENT | BACKUP , BOOL}},
{"LiveTorqueParamsRelaxedToggle", {PERSISTENT | BACKUP , BOOL}},
{"TorqueControlTune", {PERSISTENT | BACKUP, FLOAT}},
{"TorqueParamsOverrideEnabled", {PERSISTENT | BACKUP, BOOL, "0"}},
{"TorqueParamsOverrideFriction", {PERSISTENT | BACKUP, FLOAT, "0.1"}},
{"TorqueParamsOverrideLatAccelFactor", {PERSISTENT | BACKUP, FLOAT, "2.5"}},

View File

@@ -27,14 +27,14 @@ public:
auto param_path = Params().getParamPath();
if (util::file_exists(param_path)) {
std::string real_path = util::readlink(param_path);
system(util::string_format("rm %s -rf", real_path.c_str()).c_str());
util::check_system(util::string_format("rm %s -rf", real_path.c_str()));
unlink(param_path.c_str());
}
if (getenv("COMMA_CACHE") == nullptr) {
system(util::string_format("rm %s -rf", Path::download_cache_root().c_str()).c_str());
util::check_system(util::string_format("rm %s -rf", Path::download_cache_root().c_str()));
}
system(util::string_format("rm %s -rf", Path::comma_home().c_str()).c_str());
system(util::string_format("rm %s -rf", msgq_path.c_str()).c_str());
util::check_system(util::string_format("rm %s -rf", Path::comma_home().c_str()));
util::check_system(util::string_format("rm %s -rf", msgq_path.c_str()));
unsetenv("OPENPILOT_PREFIX");
}

View File

@@ -6,9 +6,9 @@
#include "common/timing.h"
#include "common/util.h"
RateKeeper::RateKeeper(const std::string &name, float rate, float print_delay_threshold)
: name(name),
print_delay_threshold(std::max(0.f, print_delay_threshold)) {
RateKeeper::RateKeeper(const std::string &name_, float rate, float print_delay_threshold_)
: name(name_),
print_delay_threshold(std::max(0.f, print_delay_threshold_)) {
interval = 1 / rate;
last_monitor_time = seconds_since_boot();
next_frame_time = last_monitor_time + interval;

View File

@@ -36,7 +36,7 @@ TEST_CASE("util::read_file") {
REQUIRE(util::read_file(filename).empty());
std::string content = random_bytes(64 * 1024);
write(fd, content.c_str(), content.size());
REQUIRE(write(fd, content.c_str(), content.size()) == (ssize_t)content.size());
std::string ret = util::read_file(filename);
bool equal = (ret == content);
REQUIRE(equal);
@@ -114,12 +114,12 @@ TEST_CASE("util::safe_fwrite") {
}
TEST_CASE("util::create_directories") {
system("rm /tmp/test_create_directories -rf");
REQUIRE(system("rm /tmp/test_create_directories -rf") == 0);
std::string dir = "/tmp/test_create_directories/a/b/c/d/e/f";
auto check_dir_permissions = [](const std::string &dir, mode_t mode) -> bool {
auto check_dir_permissions = [](const std::string &path, mode_t mode) -> bool {
struct stat st = {};
return stat(dir.c_str(), &st) == 0 && (st.st_mode & S_IFMT) == S_IFDIR && (st.st_mode & (S_IRWXU | S_IRWXG | S_IRWXO)) == mode;
return stat(path.c_str(), &st) == 0 && (st.st_mode & S_IFMT) == S_IFDIR && (st.st_mode & (S_IRWXU | S_IRWXG | S_IRWXO)) == mode;
};
SECTION("create_directories") {
@@ -132,7 +132,7 @@ TEST_CASE("util::create_directories") {
}
SECTION("a file exists with the same name") {
REQUIRE(util::create_directories(dir, 0755));
int f = open((dir + "/file").c_str(), O_RDWR | O_CREAT);
int f = open((dir + "/file").c_str(), O_RDWR | O_CREAT, 0644);
REQUIRE(f != -1);
close(f);
REQUIRE(util::create_directories(dir + "/file", 0755) == false);

View File

@@ -181,9 +181,9 @@ bool file_exists(const std::string& fn) {
}
static bool createDirectory(std::string dir, mode_t mode) {
auto verify_dir = [](const std::string& dir) -> bool {
auto verify_dir = [](const std::string& path) -> bool {
struct stat st = {};
return (stat(dir.c_str(), &st) == 0 && (st.st_mode & S_IFMT) == S_IFDIR);
return (stat(path.c_str(), &st) == 0 && (st.st_mode & S_IFMT) == S_IFDIR);
};
// remove trailing /'s
while (dir.size() > 1 && dir.back() == '/') {
@@ -288,7 +288,7 @@ std::string strip(const std::string &str) {
std::string check_output(const std::string& command) {
char buffer[128];
std::string result;
std::unique_ptr<FILE, decltype(&pclose)> pipe(popen(command.c_str(), "r"), pclose);
std::unique_ptr<FILE, int(*)(FILE*)> pipe(popen(command.c_str(), "r"), pclose);
if (!pipe) {
return "";
@@ -303,7 +303,7 @@ std::string check_output(const std::string& command) {
bool system_time_valid() {
// Default to August 26, 2024
tm min_tm = {.tm_year = 2024 - 1900, .tm_mon = 7, .tm_mday = 26};
tm min_tm = {.tm_mday = 26, .tm_mon = 7, .tm_year = 2024 - 1900};
time_t min_date = mktime(&min_tm);
struct stat st;

View File

@@ -97,6 +97,13 @@ bool create_directories(const std::string &dir, mode_t mode);
std::string check_output(const std::string& command);
inline void check_system(const std::string& cmd) {
int ret = std::system(cmd.c_str());
if (ret != 0) {
fprintf(stderr, "system command failed (%d): %s\n", ret, cmd.c_str());
}
}
bool system_time_valid();
inline void sleep_for(const int milliseconds) {

View File

@@ -4,7 +4,7 @@
A supported vehicle is one that just works when you install a comma device. All supported cars provide a better experience than any stock system. Supported vehicles reference the US market unless otherwise specified.
# 335 Supported Cars
# 336 Supported Cars
|Make|Model|Supported Package|ACC|No ACC accel below|No ALC below|Steering Torque|Resume from stop|<a href="##"><img width=2000></a>Hardware Needed<br>&nbsp;|Video|Setup Video|
|---|---|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
@@ -166,6 +166,7 @@ A supported vehicle is one that just works when you install a comma device. All
|Kia|Forte 2022-23|Smart Cruise Control (SCC)|openpilot available[<sup>1</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<details><summary>Parts</summary><sub>- 1 Hyundai E connector<br>- 1 OBD-C cable (2 ft)<br>- 1 comma four<br>- 1 comma power v3<br>- 1 harness box<br>- 1 mount<br><a href="https://comma.ai/shop/comma-3x?harness=Kia Forte 2022-23">Buy Here</a></sub></details>|||
|Kia|K5 2021-24|Smart Cruise Control (SCC)|openpilot available[<sup>1</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<details><summary>Parts</summary><sub>- 1 Hyundai A connector<br>- 1 OBD-C cable (2 ft)<br>- 1 comma four<br>- 1 comma power v3<br>- 1 harness box<br>- 1 mount<br><a href="https://comma.ai/shop/comma-3x?harness=Kia K5 2021-24">Buy Here</a></sub></details>|||
|Kia|K5 Hybrid 2020-22|Smart Cruise Control (SCC)|openpilot available[<sup>1</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<details><summary>Parts</summary><sub>- 1 Hyundai A connector<br>- 1 OBD-C cable (2 ft)<br>- 1 comma four<br>- 1 comma power v3<br>- 1 harness box<br>- 1 mount<br><a href="https://comma.ai/shop/comma-3x?harness=Kia K5 Hybrid 2020-22">Buy Here</a></sub></details>|||
|Kia|K7 2017|Smart Cruise Control (SCC)|openpilot available[<sup>1</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<details><summary>Parts</summary><sub>- 1 Hyundai C connector<br>- 1 OBD-C cable (2 ft)<br>- 1 comma four<br>- 1 comma power v3<br>- 1 harness box<br>- 1 mount<br><a href="https://comma.ai/shop/comma-3x?harness=Kia K7 2017">Buy Here</a></sub></details>|||
|Kia|K8 Hybrid (with HDA II) 2023|Highway Driving Assist II|openpilot available[<sup>1</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<details><summary>Parts</summary><sub>- 1 Hyundai Q connector<br>- 1 OBD-C cable (2 ft)<br>- 1 comma four<br>- 1 comma power v3<br>- 1 harness box<br>- 1 mount<br><a href="https://comma.ai/shop/comma-3x?harness=Kia K8 Hybrid (with HDA II) 2023">Buy Here</a></sub></details>|||
|Kia|Niro EV 2019|All|openpilot available[<sup>1</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<details><summary>Parts</summary><sub>- 1 Hyundai H connector<br>- 1 OBD-C cable (2 ft)<br>- 1 comma four<br>- 1 comma power v3<br>- 1 harness box<br>- 1 mount<br><a href="https://comma.ai/shop/comma-3x?harness=Kia Niro EV 2019">Buy Here</a></sub></details>|<a href="https://www.youtube.com/watch?v=lT7zcG6ZpGo" target="_blank"><img height="18px" src="assets/icon-youtube.svg"></img></a>||
|Kia|Niro EV 2020|All|openpilot available[<sup>1</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<details><summary>Parts</summary><sub>- 1 Hyundai F connector<br>- 1 OBD-C cable (2 ft)<br>- 1 comma four<br>- 1 comma power v3<br>- 1 harness box<br>- 1 mount<br><a href="https://comma.ai/shop/comma-3x?harness=Kia Niro EV 2020">Buy Here</a></sub></details>|<a href="https://www.youtube.com/watch?v=lT7zcG6ZpGo" target="_blank"><img height="18px" src="assets/icon-youtube.svg"></img></a>||
@@ -345,7 +346,7 @@ A supported vehicle is one that just works when you install a comma device. All
|Volkswagen|Touran 2016-23|Adaptive Cruise Control (ACC) & Lane Assist|openpilot available[<sup>1,14</sup>](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|<details><summary>Parts</summary><sub>- 1 OBD-C cable (2 ft)<br>- 1 VW J533 connector<br>- 1 comma four<br>- 1 harness box<br>- 1 long OBD-C cable (9.5 ft)<br>- 1 mount<br><a href="https://comma.ai/shop/comma-3x?harness=Volkswagen Touran 2016-23">Buy Here</a></sub></details>|||
### Footnotes
<sup>1</sup>openpilot Longitudinal Control (Alpha) is available behind a toggle; the toggle is only available in non-release branches such as `devel` or `nightly-dev`. <br />
<sup>1</sup>openpilot Longitudinal Control (Alpha) is available behind a toggle; the toggle is only available in non-release branches such as `nightly-dev`. <br />
<sup>2</sup>Refers only to the Focus Mk4 (C519) available in Europe/China/Taiwan/Australasia, not the Focus Mk3 (C346) in North and South America/Southeast Asia. <br />
<sup>3</sup>See more setup details for <a href="https://github.com/commaai/openpilot/wiki/gm" target="_blank">GM</a>. <br />
<sup>4</sup>2019 Honda Civic 1.6L Diesel Sedan does not have ALC below 12mph. <br />

View File

@@ -13,13 +13,13 @@ Development is coordinated through [Discord](https://discord.comma.ai) and GitHu
## What contributions are we looking for?
**openpilot's priorities are [safety](SAFETY.md), stability, quality, and features, in that order.**
openpilot is part of comma's mission to *solve self-driving cars while delivering shippable intermediaries*, and all development is towards that goal.
openpilot is part of comma's mission to *solve self-driving cars while delivering shippable intermediaries*, and all development is towards that goal.
### What gets merged?
The probability of a pull request being merged is a function of its value to the project and the effort it will take us to get it merged.
If a PR offers *some* value but will take lots of time to get merged, it will be closed.
Simple, well-tested bug fixes are the easiest to merge, and new features are the hardest to get merged.
Simple, well-tested bug fixes are the easiest to merge, and new features are the hardest to get merged.
All of these are examples of good PRs:
* typo fix: https://github.com/commaai/openpilot/pull/30678
@@ -29,17 +29,17 @@ All of these are examples of good PRs:
### What doesn't get merged?
* **style changes**: code is art, and it's up to the author to make it beautiful
* **style changes**: code is art, and it's up to the author to make it beautiful
* **500+ line PRs**: clean it up, break it up into smaller PRs, or both
* **PRs without a clear goal**: every PR must have a singular and clear goal
* **UI design**: we do not have a good review process for this yet
* **New features**: We believe openpilot is mostly feature-complete, and the rest is a matter of refinement and fixing bugs. As a result of this, most feature PRs will be immediately closed, however the beauty of open source is that forks can and do offer features that upstream openpilot doesn't.
* **Negative expected value**: This a class of PRs that makes an improvement, but the risk or validation costs more than the improvement. The risk can be mitigated by first getting a failing test merged.
* **Negative expected value**: This is a class of PRs that makes an improvement, but the risk or validation costs more than the improvement. The risk can be mitigated by first getting a failing test merged.
### First contribution
[Projects / openpilot bounties](https://github.com/orgs/commaai/projects/26/views/1?pane=info) is the best place to get started and goes in-depth on what's expected when working on a bounty.
There's lot of bounties that don't require a comma 3X or a car.
There are a lot of bounties that don't require a comma 3X or a car.
## Pull Requests

View File

@@ -8,7 +8,7 @@ NOTE: Those commands must be run in the root directory of openpilot, **not /docs
**1. Install the docs dependencies**
``` bash
pip install .[docs]
uv pip install .[docs]
```
**2. Build the new site**

View File

@@ -21,10 +21,10 @@ Each car brand is supported by a standard interface structure in `opendbc/car/[b
* `values.py`: Limits for actuation, general constants for cars, and supported car documentation
* `radar_interface.py`: Interface for parsing radar points from the car, if applicable
## panda
## safety
* `board/safety/safety_[brand].h`: Brand-specific safety logic
* `tests/safety/test_[brand].py`: Brand-specific safety CI tests
* `opendbc_repo/opendbc/safety/modes/[brand].h`: Brand-specific safety logic
* `opendbc_repo/opendbc/safety/tests/test_[brand].py`: Brand-specific safety CI tests
## openpilot

2
panda

Submodule panda updated: a95e060e85...f5f296c65c

View File

@@ -20,26 +20,34 @@ dependencies = [
# core
"cffi",
"scons",
"pycapnp==2.1.0",
"pycapnp",
"Cython",
"setuptools",
"numpy >=2.0",
# vendored native dependencies
"bzip2 @ git+https://github.com/commaai/dependencies.git@releases#subdirectory=bzip2",
"capnproto @ git+https://github.com/commaai/dependencies.git@releases#subdirectory=capnproto",
"eigen @ git+https://github.com/commaai/dependencies.git@releases#subdirectory=eigen",
"ffmpeg @ git+https://github.com/commaai/dependencies.git@releases#subdirectory=ffmpeg",
"libjpeg @ git+https://github.com/commaai/dependencies.git@releases#subdirectory=libjpeg",
"libyuv @ git+https://github.com/commaai/dependencies.git@releases#subdirectory=libyuv",
"openssl3 @ git+https://github.com/commaai/dependencies.git@releases#subdirectory=openssl3",
"python3-dev @ git+https://github.com/commaai/dependencies.git@releases#subdirectory=python3-dev",
"zstd @ git+https://github.com/commaai/dependencies.git@releases#subdirectory=zstd",
"ncurses @ git+https://github.com/commaai/dependencies.git@releases#subdirectory=ncurses",
"zeromq @ git+https://github.com/commaai/dependencies.git@releases#subdirectory=zeromq",
"git-lfs @ git+https://github.com/commaai/dependencies.git@releases#subdirectory=git-lfs",
# body / webrtcd
"av",
"aiohttp",
"aiortc",
# aiortc does not put an upper bound on pyopenssl and is now incompatible
# with the latest release
"pyopenssl < 24.3.0",
"pyaudio",
# panda
"libusb1",
"spidev; platform_system == 'Linux'",
# modeld
"onnx >= 1.14.0",
# logging
"pyzmq",
"sentry-sdk",
@@ -67,7 +75,6 @@ dependencies = [
# ui
"raylib > 5.5.0.3",
"qrcode",
"mapbox-earcut",
"jeepney",
]
@@ -86,7 +93,6 @@ testing = [
"pytest-subtests",
# https://github.com/pytest-dev/pytest-xdist/pull/1229
"pytest-xdist @ git+https://github.com/sshane/pytest-xdist@2b4372bd62699fb412c4fe2f95bf9f01bd2018da",
"pytest-timeout",
"pytest-asyncio",
"pytest-mock",
"ruff",
@@ -95,17 +101,13 @@ testing = [
]
dev = [
"av",
"dictdiffer",
"matplotlib",
"opencv-python-headless",
"parameterized >=0.8, <0.9",
"pyautogui",
"pywinctl",
"gcc-arm-none-eabi @ git+https://github.com/commaai/dependencies.git@releases#subdirectory=gcc-arm-none-eabi",
]
tools = [
"metadrive-simulator @ https://github.com/commaai/metadrive/releases/download/MetaDrive-minimal-0.4.2.4/metadrive_simulator-0.4.2.4-py3-none-any.whl ; (platform_machine != 'aarch64')",
"metadrive-simulator @ git+https://github.com/commaai/metadrive.git@minimal ; (platform_machine != 'aarch64')",
"dearpygui>=2.1.0; (sys_platform != 'linux' or platform_machine != 'aarch64')", # not vended for linux aarch64
]
@@ -129,7 +131,6 @@ cpp_files = "test_*"
cpp_harness = "selfdrive/test/cpp_harness.py"
python_files = "test_*.py"
asyncio_default_fixture_loop_scope = "function"
#timeout = "30" # you get this long by default
markers = [
"slow: tests that take awhile to run and can be skipped with -m 'not slow'",
"tici: tests that are only meant to run on the C3/C3X",
@@ -147,7 +148,7 @@ testpaths = [
[tool.codespell]
quiet-level = 3
# if you've got a short variable name that's getting flagged, add it here
ignore-words-list = "bu,ro,te,ue,alo,hda,ois,nam,nams,ned,som,parm,setts,inout,warmup,bumb,nd,sie,preints,whit,indexIn,ws,uint,grey,deque,stdio,amin,BA,LITE,atEnd,UIs,errorString,arange,FocusIn,od,tim,relA,hist,copyable,jupyter,thead,TGE,abl,lite"
ignore-words-list = "bu,ro,te,ue,alo,hda,ois,nam,nams,ned,som,parm,setts,inout,warmup,bumb,nd,sie,preints,whit,indexIn,ws,uint,grey,deque,stdio,amin,BA,LITE,atEnd,UIs,errorString,arange,FocusIn,od,tim,relA,hist,copyable,jupyter,thead,TGE,abl,lite,ser"
builtin = "clear,rare,informal,code,names,en-GB_to_en-US"
skip = "./third_party/*, ./tinygrad/*, ./tinygrad_repo/*, ./msgq/*, ./panda/*, ./opendbc/*, ./opendbc_repo/*, ./rednose/*, ./rednose_repo/*, ./teleoprtc/*, ./teleoprtc_repo/*, *.po, uv.lock, *.onnx, ./cereal/gen/*, */c_generated_code/*, docs/assets/*, tools/plotjuggler/layouts/*, selfdrive/assets/offroad/mici_fcc.html"

View File

@@ -72,9 +72,8 @@ find . -name '*.pyc' -delete
find . -name 'moc_*' -delete
find . -name '__pycache__' -delete
rm -rf .sconsign.dblite Jenkinsfile release/
rm selfdrive/modeld/models/driving_vision.onnx
rm selfdrive/modeld/models/driving_policy.onnx
rm sunnypilot/modeld*/models/supercombo.onnx
rm -f selfdrive/modeld/models/*.onnx
rm -f sunnypilot/modeld*/models/*.onnx
find third_party/ -name '*x86*' -exec rm -r {} +
find third_party/ -name '*Darwin*' -exec rm -r {} +

View File

@@ -1,12 +1,14 @@
#!/usr/bin/env bash
set -e
# To build sim and docs, you can run the following to mount the scons cache to the same place as in CI:
# mkdir -p .ci_cache/scons_cache
# sudo mount --bind /tmp/scons_cache/ .ci_cache/scons_cache
SCRIPT_DIR=$(dirname "$0")
OPENPILOT_DIR=$SCRIPT_DIR/../../
DOCKER_IMAGE=sunnypilot
DOCKER_FILE=Dockerfile.openpilot
DOCKER_REGISTRY=ghcr.io/sunnypilot
COMMIT_SHA=$(git rev-parse HEAD)
if [ -n "$TARGET_ARCHITECTURE" ]; then
PLATFORM="linux/$TARGET_ARCHITECTURE"
TAG_SUFFIX="-$TARGET_ARCHITECTURE"
@@ -15,9 +17,11 @@ else
TAG_SUFFIX=""
fi
source $SCRIPT_DIR/docker_common_sp.sh $1 "$TAG_SUFFIX"
LOCAL_TAG=$DOCKER_IMAGE$TAG_SUFFIX
REMOTE_TAG=$DOCKER_REGISTRY/$LOCAL_TAG
REMOTE_SHA_TAG=$DOCKER_REGISTRY/$LOCAL_TAG:$COMMIT_SHA
DOCKER_BUILDKIT=1 docker buildx build --provenance false --pull --platform $PLATFORM --load --cache-to type=inline --cache-from type=registry,ref=$REMOTE_TAG -t $DOCKER_IMAGE:latest -t $REMOTE_TAG -t $LOCAL_TAG -f $OPENPILOT_DIR/$DOCKER_FILE $OPENPILOT_DIR
DOCKER_BUILDKIT=1 docker buildx build --provenance false --pull --platform $PLATFORM --load -t $DOCKER_IMAGE:latest -t $REMOTE_TAG -t $LOCAL_TAG -f $OPENPILOT_DIR/$DOCKER_FILE $OPENPILOT_DIR
if [ -n "$PUSH_IMAGE" ]; then
docker push $REMOTE_TAG

View File

@@ -1,18 +0,0 @@
if [ "$1" = "base" ]; then
export DOCKER_IMAGE=sunnypilot-base
export DOCKER_FILE=Dockerfile.sunnypilot_base
elif [ "$1" = "prebuilt" ]; then
export DOCKER_IMAGE=sunnypilot-prebuilt
export DOCKER_FILE=Dockerfile.sunnypilot
else
echo "Invalid docker build image: '$1'"
exit 1
fi
export DOCKER_REGISTRY=ghcr.io/sunnypilot
export COMMIT_SHA=$(git rev-parse HEAD)
TAG_SUFFIX=$2
LOCAL_TAG=$DOCKER_IMAGE$TAG_SUFFIX
REMOTE_TAG=$DOCKER_REGISTRY/$LOCAL_TAG
REMOTE_SHA_TAG=$DOCKER_REGISTRY/$LOCAL_TAG:$COMMIT_SHA

View File

@@ -1,4 +1,5 @@
import os
import pickle
import sys
import hashlib
import json
@@ -6,6 +7,41 @@ import re
from pathlib import Path
from datetime import datetime, UTC
REQUIRED_OUTPUT_KEYS = frozenset({
"plan",
"lane_lines",
"road_edges",
"lead",
"desire_state",
"desire_pred",
"meta",
"lead_prob",
"lane_lines_prob",
"pose",
"wide_from_device_euler",
"road_transform",
"hidden_state",
})
OPTIONAL_OUTPUT_KEYS = frozenset({
"planplus",
"sim_pose",
"desired_curvature",
})
def validate_model_outputs(metadata_paths: list[Path]) -> None:
combined_keys: set[str] = set()
for path in metadata_paths:
with open(path, "rb") as f:
metadata = pickle.load(f)
combined_keys.update(metadata.get("output_slices", {}).keys())
missing = REQUIRED_OUTPUT_KEYS - combined_keys
if missing:
raise ValueError(f"Combined model metadata is missing required output keys: {sorted(missing)}")
detected_optional = sorted(OPTIONAL_OUTPUT_KEYS & combined_keys)
if detected_optional:
print(f"Optional output keys detected: {detected_optional}")
def create_short_name(full_name):
# Remove parentheses and extract alphanumeric words
@@ -124,9 +160,19 @@ if __name__ == "__main__":
parser.add_argument("--output-dir", default="./output", help="Output directory for metadata")
parser.add_argument("--custom-name", help="Custom display name for the model")
parser.add_argument("--is-20hz", action="store_true", help="Whether this is a 20Hz model")
parser.add_argument("--validate-only", action="store_true")
parser.add_argument("--upstream-branch", default="unknown", help="Upstream branch name")
args = parser.parse_args()
if args.validate_only:
metadata_paths = glob.glob(os.path.join(args.model_dir, "*_metadata.pkl"))
if not metadata_paths:
print(f"No metadata files found in {args.model_dir}", file=sys.stderr)
sys.exit(1)
validate_model_outputs([Path(p) for p in metadata_paths])
print(f"Validated {len(metadata_paths)} metadata files successfully.")
sys.exit(0)
# Find all ONNX files in the given directory
model_paths = glob.glob(os.path.join(args.model_dir, "*.onnx"))
if not model_paths:

View File

@@ -1,17 +1,33 @@
#!/usr/bin/env python3
import os
import glob
import onnx
from tinygrad.nn.onnx import OnnxPBParser
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../"))
MASTER_PATH = os.getenv("MASTER_PATH", BASEDIR)
MODEL_PATH = "/selfdrive/modeld/models/"
class MetadataOnnxPBParser(OnnxPBParser):
def _parse_ModelProto(self) -> dict:
obj = {"metadata_props": []}
for fid, wire_type in self._parse_message(self.reader.len):
match fid:
case 14:
obj["metadata_props"].append(self._parse_StringStringEntryProto())
case _:
self.reader.skip_field(wire_type)
return obj
def get_checkpoint(f):
model = onnx.load(f)
metadata = {prop.key: prop.value for prop in model.metadata_props}
model = MetadataOnnxPBParser(f).parse()
metadata = {prop["key"]: prop["value"] for prop in model["metadata_props"]}
return metadata['model_checkpoint'].split('/')[0]
if __name__ == "__main__":
print("| | master | PR branch |")
print("|-| ----- | --------- |")
@@ -24,8 +40,4 @@ if __name__ == "__main__":
fn = os.path.basename(f)
master = get_checkpoint(MASTER_PATH + MODEL_PATH + fn)
pr = get_checkpoint(BASEDIR + MODEL_PATH + fn)
print(
"|", fn, "|",
f"[{master}](https://reporter.comma.life/experiment/{master})", "|",
f"[{pr}](https://reporter.comma.life/experiment/{pr})", "|"
)
print("|", fn, "|", f"[{master}](https://reporter.comma.life/experiment/{master})", "|", f"[{pr}](https://reporter.comma.life/experiment/{pr})", "|")

Binary file not shown.

Binary file not shown.

View File

@@ -48,14 +48,14 @@ class VCruiseHelper(VCruiseHelperSP):
self.get_minimum_set_speed(is_metric)
_enabled = self.update_enabled_state(CS, enabled)
if CS.cruiseState.available:
_enabled = self.update_enabled_state(CS, enabled)
if not self.CP.pcmCruise or (not self.CP_SP.pcmCruiseSpeed and _enabled):
# if stock cruise is completely disabled, then we can use our own set speed logic
self._update_v_cruise_non_pcm(CS, _enabled, is_metric)
self.update_speed_limit_assist_v_cruise_non_pcm()
self.v_cruise_cluster_kph = self.v_cruise_kph
self.update_button_timers(CS, enabled)
else:
self.v_cruise_kph = CS.cruiseState.speed * CV.MS_TO_KPH
self.v_cruise_cluster_kph = CS.cruiseState.speedCluster * CV.MS_TO_KPH
@@ -69,6 +69,9 @@ class VCruiseHelper(VCruiseHelperSP):
self.v_cruise_kph = V_CRUISE_UNSET
self.v_cruise_cluster_kph = V_CRUISE_UNSET
if not self.CP.pcmCruise or not self.CP_SP.pcmCruiseSpeed:
self.update_button_timers(CS, enabled)
def _update_v_cruise_non_pcm(self, CS, enabled, is_metric):
# handle button presses. TODO: this should be in state_control, but a decelCruise press
# would have the effect of both enabling and changing speed is checked after the state transition

View File

@@ -1,7 +1,7 @@
import os
import hypothesis.strategies as st
from hypothesis import Phase, given, settings
from parameterized import parameterized
from openpilot.common.parameterized import parameterized
from cereal import car, custom
from opendbc.car import DT_CTRL

View File

@@ -2,7 +2,7 @@ import pytest
import itertools
import numpy as np
from parameterized import parameterized_class
from openpilot.common.parameterized import parameterized_class
from cereal import log
from openpilot.selfdrive.car.cruise import VCruiseHelper, V_CRUISE_MIN, V_CRUISE_MAX, V_CRUISE_INITIAL, IMPERIAL_INCREMENT
from cereal import car, custom

View File

@@ -7,7 +7,7 @@ import unittest # noqa: TID251
from collections import defaultdict, Counter
import hypothesis.strategies as st
from hypothesis import Phase, given, settings
from parameterized import parameterized_class
from openpilot.common.parameterized import parameterized_class
from opendbc.car import DT_CTRL, gen_empty_fingerprint, structs
from opendbc.car.can_definitions import CanData

View File

@@ -64,6 +64,8 @@ class Controls(ControlsExt):
elif self.CP.lateralTuning.which() == 'torque':
self.LaC = LatControlTorque(self.CP, self.CP_SP, self.CI, DT_CTRL)
self.LaC = ControlsExt.initialize_lateral_control(self, self.LaC, self.CI, DT_CTRL)
def update(self):
self.sm.update(15)
if self.sm.updated["liveCalibration"]:
@@ -183,7 +185,7 @@ class Controls(ControlsExt):
hudControl.leftLaneDepart = self.sm['driverAssistance'].leftLaneDeparture
hudControl.rightLaneDepart = self.sm['driverAssistance'].rightLaneDeparture
if self.sm['selfdriveState'].active:
if self.get_lat_active(self.sm):
CO = self.sm['carOutput']
if self.CP.steerControlType == car.CarParams.SteerControlType.angle:
self.steer_limited_by_safety = abs(CC.actuators.steeringAngleDeg - CO.actuatorsOutput.steeringAngleDeg) > \

View File

@@ -1,6 +1,6 @@
import pytest
import itertools
from parameterized import parameterized_class
from openpilot.common.parameterized import parameterized_class
from cereal import log
@@ -42,4 +42,5 @@ class TestFollowingDistance:
simulation_steady_state = run_following_distance_simulation(v_lead, e2e=self.e2e, personality=self.personality)
correct_steady_state = desired_follow_distance(v_lead, v_lead, get_T_FOLLOW(self.personality))
err_ratio = 0.2 if self.e2e else 0.1
assert simulation_steady_state == pytest.approx(correct_steady_state, abs=err_ratio * correct_steady_state + .5)
abs_err_margin = 0.5 if v_lead > 0.0 else 1.15
assert simulation_steady_state == pytest.approx(correct_steady_state, abs=err_ratio * correct_steady_state + abs_err_margin)

View File

@@ -1,4 +1,4 @@
from parameterized import parameterized
from openpilot.common.parameterized import parameterized
from cereal import car, log
from opendbc.car.car_helpers import interfaces

View File

@@ -1,4 +1,4 @@
from parameterized import parameterized
from openpilot.common.parameterized import parameterized
from cereal import car, log
from opendbc.car.car_helpers import interfaces

View File

@@ -4,11 +4,11 @@ import os
from collections import defaultdict
import numpy as np
from tabulate import tabulate
from openpilot.common.utils import tabulate
from openpilot.tools.lib.logreader import LogReader
DEMO_ROUTE = "a2a0ccea32023010|2023-07-27--13-01-19"
DEMO_ROUTE = "5beb9b58bd12b691/0000010a--a51155e496"
MB = 1024 * 1024
TABULATE_OPTS = dict(tablefmt="simple_grid", stralign="center", numalign="center")

View File

@@ -3,7 +3,7 @@ import time
from cereal import car, log, messaging
from openpilot.common.params import Params
from openpilot.system.manager.process_config import managed_processes, is_snpe_model, is_tinygrad_model, is_stock_model
from openpilot.system.manager.process_config import managed_processes, is_tinygrad_model, is_stock_model
from openpilot.system.hardware import HARDWARE
if __name__ == "__main__":
@@ -11,8 +11,6 @@ if __name__ == "__main__":
params = Params()
params.put("CarParams", CP.to_bytes())
if use_snpe_modeld := is_snpe_model(False, params, CP):
print("Using SNPE modeld")
if use_tinygrad_modeld := is_tinygrad_model(False, params, CP):
print("Using TinyGrad modeld")
if use_stock_modeld := is_stock_model(False, params, CP):
@@ -21,7 +19,7 @@ if __name__ == "__main__":
HARDWARE.set_power_save(False)
procs = ['camerad', 'ui', 'calibrationd', 'plannerd', 'dmonitoringmodeld', 'dmonitoringd']
procs += ["modeld_snpe" if use_snpe_modeld else "modeld_tinygrad" if use_tinygrad_modeld else "modeld"]
procs += ["modeld_tinygrad" if use_tinygrad_modeld else "modeld"]
for p in procs:
managed_processes[p].start()

View File

@@ -25,6 +25,7 @@ MIN_ABS_YAW_RATE = 0.0
MAX_YAW_RATE_SANITY_CHECK = 1.0
MIN_NCC = 0.95
MAX_LAG = 1.0
MIN_LAG = 0.15
MAX_LAG_STD = 0.1
MAX_LAT_ACCEL = 2.0
MAX_LAT_ACCEL_DIFF = 0.6
@@ -216,7 +217,7 @@ class LateralLagEstimator:
liveDelay.status = log.LiveDelayData.Status.unestimated
if liveDelay.status == log.LiveDelayData.Status.estimated:
liveDelay.lateralDelay = valid_mean_lag
liveDelay.lateralDelay = min(MAX_LAG, max(MIN_LAG, valid_mean_lag))
else:
liveDelay.lateralDelay = self.initial_lag
@@ -299,7 +300,7 @@ class LateralLagEstimator:
new_values_start_idx = next(-i for i, t in enumerate(reversed(times)) if t <= self.last_estimate_t)
is_valid = is_valid and not (new_values_start_idx == 0 or not np.any(okay[new_values_start_idx:]))
delay, corr, confidence = self.actuator_delay(desired, actual, okay, self.dt, MAX_LAG)
delay, corr, confidence = self.actuator_delay(desired, actual, okay, self.dt, MIN_LAG, MAX_LAG)
if corr < self.min_ncc or confidence < self.min_confidence or not is_valid:
return
@@ -307,22 +308,23 @@ class LateralLagEstimator:
self.last_estimate_t = self.t
@staticmethod
def actuator_delay(expected_sig: np.ndarray, actual_sig: np.ndarray, mask: np.ndarray, dt: float, max_lag: float) -> tuple[float, float, float]:
def actuator_delay(expected_sig: np.ndarray, actual_sig: np.ndarray, mask: np.ndarray,
dt: float, min_lag: float, max_lag: float) -> tuple[float, float, float]:
assert len(expected_sig) == len(actual_sig)
max_lag_samples = int(max_lag / dt)
min_lag_samples, max_lag_samples = int(round(min_lag / dt)), int(round(max_lag / dt))
padded_size = fft_next_good_size(len(expected_sig) + max_lag_samples)
ncc = masked_normalized_cross_correlation(expected_sig, actual_sig, mask, padded_size)
# only consider lags from 0 to max_lag
roi = np.s_[len(expected_sig) - 1: len(expected_sig) - 1 + max_lag_samples]
# only consider lags from min_lag to max_lag
roi = np.s_[len(expected_sig) - 1 + min_lag_samples: len(expected_sig) - 1 + max_lag_samples]
extended_roi = np.s_[roi.start - CORR_BORDER_OFFSET: roi.stop + CORR_BORDER_OFFSET]
roi_ncc = ncc[roi]
extended_roi_ncc = ncc[extended_roi]
max_corr_index = np.argmax(roi_ncc)
corr = roi_ncc[max_corr_index]
lag = parabolic_peak_interp(roi_ncc, max_corr_index) * dt
lag = parabolic_peak_interp(roi_ncc, max_corr_index) * dt + min_lag
# to estimate lag confidence, gather all high-correlation candidates and see how spread they are
# if e.g. 0.8 and 0.4 are both viable, this is an ambiguous case

View File

@@ -97,7 +97,7 @@ class TestLagd:
assert msg.liveDelay.calPerc == 0
def test_estimator_basics(self, subtests):
for lag_frames in range(5):
for lag_frames in range(3, 10):
with subtests.test(msg=f"lag_frames={lag_frames}"):
mocked_CP = car.CarParams(steerActuatorDelay=0.8)
estimator = LateralLagEstimator(mocked_CP, DT, min_recovery_buffer_sec=0.0, min_yr=0.0)
@@ -111,7 +111,7 @@ class TestLagd:
assert msg.liveDelay.calPerc == 100
def test_estimator_masking(self):
mocked_CP, lag_frames = car.CarParams(steerActuatorDelay=0.8), random.randint(1, 19)
mocked_CP, lag_frames = car.CarParams(steerActuatorDelay=0.8), random.randint(3, 19)
estimator = LateralLagEstimator(mocked_CP, DT, min_recovery_buffer_sec=0.0, min_yr=0.0, min_valid_block_count=1)
process_messages(estimator, lag_frames, (int(MIN_OKAY_WINDOW_SEC / DT) + BLOCK_SIZE) * 2, rejection_threshold=0.4)
msg = estimator.get_msg(True)
@@ -120,7 +120,6 @@ class TestLagd:
assert msg.liveDelay.calPerc == 100
@pytest.mark.skipif(PC, reason="only on device")
@pytest.mark.timeout(60)
def test_estimator_performance(self):
mocked_CP = car.CarParams(steerActuatorDelay=0.8)
estimator = LateralLagEstimator(mocked_CP, DT)

View File

@@ -1,59 +1,62 @@
import os
import glob
from openpilot.common.file_chunker import chunk_file, get_chunk_paths
Import('env', 'envCython', 'arch', 'cereal', 'messaging', 'common', 'visionipc')
Import('env', 'arch')
chunker_file = File("#common/file_chunker.py")
lenv = env.Clone()
lenvCython = envCython.Clone()
libs = [cereal, messaging, visionipc, common, 'capnp', 'kj', 'pthread']
frameworks = []
tinygrad_root = env.Dir("#").abspath
tinygrad_files = ["#"+x for x in glob.glob(env.Dir("#tinygrad_repo").relpath + "/**", recursive=True, root_dir=tinygrad_root)
if 'pycache' not in x and os.path.isfile(os.path.join(tinygrad_root, x))]
common_src = [
"models/commonmodel.cc",
"transforms/loadyuv.cc",
"transforms/transform.cc",
]
def estimate_pickle_max_size(onnx_size):
return 1.2 * onnx_size + 10 * 1024 * 1024 # 20% + 10MB is plenty
# OpenCL is a framework on Mac
if arch == "Darwin":
frameworks += ['OpenCL']
else:
libs += ['OpenCL']
# Set path definitions
for pathdef, fn in {'TRANSFORM': 'transforms/transform.cl', 'LOADYUV': 'transforms/loadyuv.cl'}.items():
for xenv in (lenv, lenvCython):
xenv['CXXFLAGS'].append(f'-D{pathdef}_PATH=\\"{File(fn).abspath}\\"')
# Compile cython
cython_libs = envCython["LIBS"] + libs
commonmodel_lib = lenv.Library('commonmodel', common_src)
lenvCython.Program('models/commonmodel_pyx.so', 'models/commonmodel_pyx.pyx', LIBS=[commonmodel_lib, *cython_libs], FRAMEWORKS=frameworks)
tinygrad_files = sorted(["#"+x for x in glob.glob(env.Dir("#tinygrad_repo").relpath + "/**", recursive=True, root_dir=env.Dir("#").abspath) if 'pycache' not in x])
# compile warp
# THREADS=0 is need to prevent bug: https://github.com/tinygrad/tinygrad/issues/14689
tg_flags = {
'larch64': 'DEV=QCOM FLOAT16=1 NOLOCALS=1 JIT_BATCH_SIZE=0',
'Darwin': f'DEV=CPU THREADS=0 HOME={os.path.expanduser("~")} IMAGE=0', # tinygrad calls brew which needs a $HOME in the env
}.get(arch, 'DEV=CPU CPU_LLVM=1 THREADS=0 IMAGE=0')
# Get model metadata
for model_name in ['driving_vision', 'driving_policy', 'dmonitoring_model']:
fn = File(f"models/{model_name}").abspath
script_files = [File(Dir("#selfdrive/modeld").File("get_model_metadata.py").abspath)]
cmd = f'python3 {Dir("#selfdrive/modeld").abspath}/get_model_metadata.py {fn}.onnx'
cmd = f'{tg_flags} python3 {Dir("#selfdrive/modeld").abspath}/get_model_metadata.py {fn}.onnx'
lenv.Command(fn + "_metadata.pkl", [fn + ".onnx"] + tinygrad_files + script_files, cmd)
image_flag = {
'larch64': 'IMAGE=2',
}.get(arch, 'IMAGE=0')
script_files = [File(Dir("#selfdrive/modeld").File("compile_warp.py").abspath)]
compile_warp_cmd = f'{tg_flags} python3 {Dir("#selfdrive/modeld").abspath}/compile_warp.py '
from openpilot.common.transformations.camera import _ar_ox_fisheye, _os_fisheye
warp_targets = []
for cam in [_ar_ox_fisheye, _os_fisheye]:
w, h = cam.width, cam.height
warp_targets += [File(f"models/warp_{w}x{h}_tinygrad.pkl").abspath, File(f"models/dm_warp_{w}x{h}_tinygrad.pkl").abspath]
lenv.Command(warp_targets, tinygrad_files + script_files, compile_warp_cmd)
def tg_compile(flags, model_name):
pythonpath_string = 'PYTHONPATH="${PYTHONPATH}:' + env.Dir("#tinygrad_repo").abspath + '"'
fn = File(f"models/{model_name}").abspath
pkl = fn + "_tinygrad.pkl"
onnx_path = fn + ".onnx"
chunk_targets = get_chunk_paths(pkl, estimate_pickle_max_size(os.path.getsize(onnx_path)))
def do_chunk(target, source, env):
chunk_file(pkl, chunk_targets)
return lenv.Command(
fn + "_tinygrad.pkl",
[fn + ".onnx"] + tinygrad_files,
f'{pythonpath_string} {flags} python3 {Dir("#tinygrad_repo").abspath}/examples/openpilot/compile3.py {fn}.onnx {fn}_tinygrad.pkl'
chunk_targets,
[onnx_path] + tinygrad_files + [chunker_file],
[f'{pythonpath_string} {flags} {image_flag} python3 {Dir("#tinygrad_repo").abspath}/examples/openpilot/compile3.py {fn}.onnx {pkl}',
do_chunk]
)
# Compile small models
for model_name in ['driving_vision', 'driving_policy', 'dmonitoring_model']:
flags = {
'larch64': 'DEV=QCOM',
'Darwin': f'DEV=CPU HOME={os.path.expanduser("~")} IMAGE=0', # tinygrad calls brew which needs a $HOME in the env
}.get(arch, 'DEV=CPU CPU_LLVM=1 IMAGE=0')
tg_compile(flags, model_name)
tg_compile(tg_flags, model_name)
# Compile BIG model if USB GPU is available
if "USBGPU" in os.environ:

209
selfdrive/modeld/compile_warp.py Executable file
View File

@@ -0,0 +1,209 @@
#!/usr/bin/env python3
import time
import pickle
import numpy as np
from pathlib import Path
from tinygrad.tensor import Tensor
from tinygrad.helpers import Context
from tinygrad.device import Device
from tinygrad.engine.jit import TinyJit
from openpilot.system.camerad.cameras.nv12_info import get_nv12_info
from openpilot.common.transformations.model import MEDMODEL_INPUT_SIZE, DM_INPUT_SIZE
from openpilot.common.transformations.camera import _ar_ox_fisheye, _os_fisheye
MODELS_DIR = Path(__file__).parent / 'models'
CAMERA_CONFIGS = [
(_ar_ox_fisheye.width, _ar_ox_fisheye.height), # tici: 1928x1208
(_os_fisheye.width, _os_fisheye.height), # mici: 1344x760
]
UV_SCALE_MATRIX = np.array([[0.5, 0, 0], [0, 0.5, 0], [0, 0, 1]], dtype=np.float32)
UV_SCALE_MATRIX_INV = np.linalg.inv(UV_SCALE_MATRIX)
IMG_BUFFER_SHAPE = (30, MEDMODEL_INPUT_SIZE[1] // 2, MEDMODEL_INPUT_SIZE[0] // 2)
def warp_pkl_path(w, h):
return MODELS_DIR / f'warp_{w}x{h}_tinygrad.pkl'
def dm_warp_pkl_path(w, h):
return MODELS_DIR / f'dm_warp_{w}x{h}_tinygrad.pkl'
def warp_perspective_tinygrad(src_flat, M_inv, dst_shape, src_shape, stride_pad):
w_dst, h_dst = dst_shape
h_src, w_src = src_shape
x = Tensor.arange(w_dst).reshape(1, w_dst).expand(h_dst, w_dst).reshape(-1)
y = Tensor.arange(h_dst).reshape(h_dst, 1).expand(h_dst, w_dst).reshape(-1)
# inline 3x3 matmul as elementwise to avoid reduce op (enables fusion with gather)
src_x = M_inv[0, 0] * x + M_inv[0, 1] * y + M_inv[0, 2]
src_y = M_inv[1, 0] * x + M_inv[1, 1] * y + M_inv[1, 2]
src_w = M_inv[2, 0] * x + M_inv[2, 1] * y + M_inv[2, 2]
src_x = src_x / src_w
src_y = src_y / src_w
x_nn_clipped = Tensor.round(src_x).clip(0, w_src - 1).cast('int')
y_nn_clipped = Tensor.round(src_y).clip(0, h_src - 1).cast('int')
idx = y_nn_clipped * (w_src + stride_pad) + x_nn_clipped
return src_flat[idx]
def frames_to_tensor(frames, model_w, model_h):
H = (frames.shape[0] * 2) // 3
W = frames.shape[1]
in_img1 = Tensor.cat(frames[0:H:2, 0::2],
frames[1:H:2, 0::2],
frames[0:H:2, 1::2],
frames[1:H:2, 1::2],
frames[H:H+H//4].reshape((H//2, W//2)),
frames[H+H//4:H+H//2].reshape((H//2, W//2)), dim=0).reshape((6, H//2, W//2))
return in_img1
def make_frame_prepare(cam_w, cam_h, model_w, model_h):
stride, y_height, uv_height, _ = get_nv12_info(cam_w, cam_h)
uv_offset = stride * y_height
stride_pad = stride - cam_w
def frame_prepare_tinygrad(input_frame, M_inv):
# UV_SCALE @ M_inv @ UV_SCALE_INV simplifies to elementwise scaling
M_inv_uv = M_inv * Tensor([[1.0, 1.0, 0.5], [1.0, 1.0, 0.5], [2.0, 2.0, 1.0]])
# deinterleave NV12 UV plane (UVUV... -> separate U, V)
uv = input_frame[uv_offset:uv_offset + uv_height * stride].reshape(uv_height, stride)
with Context(SPLIT_REDUCEOP=0):
y = warp_perspective_tinygrad(input_frame[:cam_h*stride],
M_inv, (model_w, model_h),
(cam_h, cam_w), stride_pad).realize()
u = warp_perspective_tinygrad(uv[:cam_h//2, :cam_w:2].flatten(),
M_inv_uv, (model_w//2, model_h//2),
(cam_h//2, cam_w//2), 0).realize()
v = warp_perspective_tinygrad(uv[:cam_h//2, 1:cam_w:2].flatten(),
M_inv_uv, (model_w//2, model_h//2),
(cam_h//2, cam_w//2), 0).realize()
yuv = y.cat(u).cat(v).reshape((model_h * 3 // 2, model_w))
tensor = frames_to_tensor(yuv, model_w, model_h)
return tensor
return frame_prepare_tinygrad
def make_update_img_input(frame_prepare, model_w, model_h):
def update_img_input_tinygrad(tensor, frame, M_inv):
M_inv = M_inv.to(Device.DEFAULT)
new_img = frame_prepare(frame, M_inv)
full_buffer = tensor[6:].cat(new_img, dim=0).contiguous()
return full_buffer, Tensor.cat(full_buffer[:6], full_buffer[-6:], dim=0).contiguous().reshape(1, 12, model_h//2, model_w//2)
return update_img_input_tinygrad
def make_update_both_imgs(frame_prepare, model_w, model_h):
update_img = make_update_img_input(frame_prepare, model_w, model_h)
def update_both_imgs_tinygrad(calib_img_buffer, new_img, M_inv,
calib_big_img_buffer, new_big_img, M_inv_big):
calib_img_buffer, calib_img_pair = update_img(calib_img_buffer, new_img, M_inv)
calib_big_img_buffer, calib_big_img_pair = update_img(calib_big_img_buffer, new_big_img, M_inv_big)
return calib_img_buffer, calib_img_pair, calib_big_img_buffer, calib_big_img_pair
return update_both_imgs_tinygrad
def make_warp_dm(cam_w, cam_h, dm_w, dm_h):
stride, y_height, _, _ = get_nv12_info(cam_w, cam_h)
stride_pad = stride - cam_w
def warp_dm(input_frame, M_inv):
M_inv = M_inv.to(Device.DEFAULT)
result = warp_perspective_tinygrad(input_frame[:cam_h*stride], M_inv, (dm_w, dm_h), (cam_h, cam_w), stride_pad).reshape(-1, dm_h * dm_w)
return result
return warp_dm
def compile_modeld_warp(cam_w, cam_h):
model_w, model_h = MEDMODEL_INPUT_SIZE
_, _, _, yuv_size = get_nv12_info(cam_w, cam_h)
print(f"Compiling modeld warp for {cam_w}x{cam_h}...")
frame_prepare = make_frame_prepare(cam_w, cam_h, model_w, model_h)
update_both_imgs = make_update_both_imgs(frame_prepare, model_w, model_h)
update_img_jit = TinyJit(update_both_imgs, prune=True)
full_buffer = Tensor.zeros(IMG_BUFFER_SHAPE, dtype='uint8').contiguous().realize()
big_full_buffer = Tensor.zeros(IMG_BUFFER_SHAPE, dtype='uint8').contiguous().realize()
full_buffer_np = np.zeros(IMG_BUFFER_SHAPE, dtype=np.uint8)
big_full_buffer_np = np.zeros(IMG_BUFFER_SHAPE, dtype=np.uint8)
for i in range(10):
new_frame_np = (32 * np.random.randn(yuv_size).astype(np.float32) + 128).clip(0, 255).astype(np.uint8)
img_inputs = [full_buffer,
Tensor.from_blob(new_frame_np.ctypes.data, (yuv_size,), dtype='uint8').realize(),
Tensor(Tensor.randn(3, 3).mul(8).realize().numpy(), device='NPY')]
new_big_frame_np = (32 * np.random.randn(yuv_size).astype(np.float32) + 128).clip(0, 255).astype(np.uint8)
big_img_inputs = [big_full_buffer,
Tensor.from_blob(new_big_frame_np.ctypes.data, (yuv_size,), dtype='uint8').realize(),
Tensor(Tensor.randn(3, 3).mul(8).realize().numpy(), device='NPY')]
inputs = img_inputs + big_img_inputs
Device.default.synchronize()
inputs_np = [x.numpy() for x in inputs]
inputs_np[0] = full_buffer_np
inputs_np[3] = big_full_buffer_np
st = time.perf_counter()
out = update_img_jit(*inputs)
full_buffer = out[0].contiguous().realize().clone()
big_full_buffer = out[2].contiguous().realize().clone()
mt = time.perf_counter()
Device.default.synchronize()
et = time.perf_counter()
print(f" [{i+1}/10] enqueue {(mt-st)*1e3:6.2f} ms -- total {(et-st)*1e3:6.2f} ms")
pkl_path = warp_pkl_path(cam_w, cam_h)
with open(pkl_path, "wb") as f:
pickle.dump(update_img_jit, f)
print(f" Saved to {pkl_path}")
jit = pickle.load(open(pkl_path, "rb"))
jit(*inputs)
def compile_dm_warp(cam_w, cam_h):
dm_w, dm_h = DM_INPUT_SIZE
_, _, _, yuv_size = get_nv12_info(cam_w, cam_h)
print(f"Compiling DM warp for {cam_w}x{cam_h}...")
warp_dm = make_warp_dm(cam_w, cam_h, dm_w, dm_h)
warp_dm_jit = TinyJit(warp_dm, prune=True)
for i in range(10):
inputs = [Tensor.from_blob((32 * Tensor.randn(yuv_size,) + 128).cast(dtype='uint8').realize().numpy().ctypes.data, (yuv_size,), dtype='uint8'),
Tensor(Tensor.randn(3, 3).mul(8).realize().numpy(), device='NPY')]
Device.default.synchronize()
st = time.perf_counter()
warp_dm_jit(*inputs)
mt = time.perf_counter()
Device.default.synchronize()
et = time.perf_counter()
print(f" [{i+1}/10] enqueue {(mt-st)*1e3:6.2f} ms -- total {(et-st)*1e3:6.2f} ms")
pkl_path = dm_warp_pkl_path(cam_w, cam_h)
with open(pkl_path, "wb") as f:
pickle.dump(warp_dm_jit, f)
print(f" Saved to {pkl_path}")
def run_and_save_pickle():
for cam_w, cam_h in CAMERA_CONFIGS:
compile_modeld_warp(cam_w, cam_h)
compile_dm_warp(cam_w, cam_h)
if __name__ == "__main__":
run_and_save_pickle()

View File

@@ -3,7 +3,6 @@ import os
from openpilot.system.hardware import TICI
os.environ['DEV'] = 'QCOM' if TICI else 'CPU'
from tinygrad.tensor import Tensor
from tinygrad.dtype import dtypes
import time
import pickle
import numpy as np
@@ -16,50 +15,57 @@ from openpilot.common.swaglog import cloudlog
from openpilot.common.realtime import config_realtime_process
from openpilot.common.transformations.model import dmonitoringmodel_intrinsics
from openpilot.common.transformations.camera import _ar_ox_fisheye, _os_fisheye
from openpilot.selfdrive.modeld.models.commonmodel_pyx import CLContext, MonitoringModelFrame
from openpilot.system.camerad.cameras.nv12_info import get_nv12_info
from openpilot.common.file_chunker import read_file_chunked
from openpilot.selfdrive.modeld.parse_model_outputs import sigmoid, safe_exp
from openpilot.selfdrive.modeld.runners.tinygrad_helpers import qcom_tensor_from_opencl_address
PROCESS_NAME = "selfdrive.modeld.dmonitoringmodeld"
SEND_RAW_PRED = os.getenv('SEND_RAW_PRED')
MODEL_PKL_PATH = Path(__file__).parent / 'models/dmonitoring_model_tinygrad.pkl'
METADATA_PATH = Path(__file__).parent / 'models/dmonitoring_model_metadata.pkl'
MODELS_DIR = Path(__file__).parent / 'models'
class ModelState:
inputs: dict[str, np.ndarray]
output: np.ndarray
def __init__(self, cl_ctx):
def __init__(self):
with open(METADATA_PATH, 'rb') as f:
model_metadata = pickle.load(f)
self.input_shapes = model_metadata['input_shapes']
self.output_slices = model_metadata['output_slices']
self.frame = MonitoringModelFrame(cl_ctx)
self.numpy_inputs = {
'calib': np.zeros(self.input_shapes['calib'], dtype=np.float32),
}
self.warp_inputs_np = {'transform': np.zeros((3,3), dtype=np.float32)}
self.warp_inputs = {k: Tensor(v, device='NPY') for k,v in self.warp_inputs_np.items()}
self.frame_buf_params = None
self.tensor_inputs = {k: Tensor(v, device='NPY').realize() for k,v in self.numpy_inputs.items()}
with open(MODEL_PKL_PATH, "rb") as f:
self.model_run = pickle.load(f)
self._blob_cache : dict[int, Tensor] = {}
self.image_warp = None
self.model_run = pickle.loads(read_file_chunked(str(MODEL_PKL_PATH)))
def run(self, buf: VisionBuf, calib: np.ndarray, transform: np.ndarray) -> tuple[np.ndarray, float]:
self.numpy_inputs['calib'][0,:] = calib
t1 = time.perf_counter()
input_img_cl = self.frame.prepare(buf, transform.flatten())
if TICI:
# The imgs tensors are backed by opencl memory, only need init once
if 'input_img' not in self.tensor_inputs:
self.tensor_inputs['input_img'] = qcom_tensor_from_opencl_address(input_img_cl.mem_address, self.input_shapes['input_img'], dtype=dtypes.uint8)
else:
self.tensor_inputs['input_img'] = Tensor(self.frame.buffer_from_cl(input_img_cl).reshape(self.input_shapes['input_img']), dtype=dtypes.uint8).realize()
if self.image_warp is None:
self.frame_buf_params = get_nv12_info(buf.width, buf.height)
warp_path = MODELS_DIR / f'dm_warp_{buf.width}x{buf.height}_tinygrad.pkl'
with open(warp_path, "rb") as f:
self.image_warp = pickle.load(f)
ptr = buf.data.ctypes.data
# There is a ringbuffer of imgs, just cache tensors pointing to all of them
if ptr not in self._blob_cache:
self._blob_cache[ptr] = Tensor.from_blob(ptr, (self.frame_buf_params[3],), dtype='uint8')
self.warp_inputs_np['transform'][:] = transform[:]
self.tensor_inputs['input_img'] = self.image_warp(self._blob_cache[ptr], self.warp_inputs['transform']).realize()
output = self.model_run(**self.tensor_inputs).numpy().flatten()
output = self.model_run(**self.tensor_inputs).contiguous().realize().uop.base.buffer.numpy().flatten()
t2 = time.perf_counter()
return output, t2 - t1
@@ -107,12 +113,11 @@ def get_driverstate_packet(model_output, frame_id: int, location_ts: int, exec_t
def main():
config_realtime_process(7, 5)
cl_context = CLContext()
model = ModelState(cl_context)
model = ModelState()
cloudlog.warning("models loaded, dmonitoringmodeld starting")
cloudlog.warning("connecting to driver stream")
vipc_client = VisionIpcClient("camerad", VisionStreamType.VISION_STREAM_DRIVER, True, cl_context)
vipc_client = VisionIpcClient("camerad", VisionStreamType.VISION_STREAM_DRIVER, True)
while not vipc_client.connect(False):
time.sleep(0.1)
assert vipc_client.is_connected()

Some files were not shown because too many files have changed in this diff Show More