diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 4ade42b665..fe7c8ef336 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -20,8 +20,6 @@ concurrency: env: PYTHONWARNINGS: error BASE_IMAGE: openpilot-base - AZURE_TOKEN: ${{ secrets.AZURE_COMMADATACI_OPENPILOTCI_TOKEN }} - DOCKER_LOGIN: docker login ghcr.io -u ${{ github.actor }} -p ${{ secrets.GITHUB_TOKEN }} BUILD: selfdrive/test/docker_build.sh base @@ -185,12 +183,13 @@ jobs: uses: actions/cache@v5 with: path: .ci_cache/comma_download_cache - key: proc-replay-${{ hashFiles('selfdrive/test/process_replay/ref_commit', 'selfdrive/test/process_replay/test_processes.py') }} + key: proc-replay-${{ hashFiles('selfdrive/test/process_replay/test_processes.py') }} - name: Build openpilot run: | ${{ env.RUN }} "scons -j$(nproc)" - name: Run replay timeout-minutes: ${{ contains(runner.name, 'nsc') && (steps.dependency-cache.outputs.cache-hit == 'true') && ((steps.setup-step.outputs.duration < 18) && 1 || 2) || 20 }} + continue-on-error: ${{ github.ref == 'refs/heads/master' }} run: | ${{ env.RUN }} "selfdrive/test/process_replay/test_processes.py -j$(nproc) && \ chmod -R 777 /tmp/comma_download_cache" @@ -204,10 +203,26 @@ jobs: with: name: process_replay_diff.txt path: selfdrive/test/process_replay/diff.txt - - name: Upload reference logs - if: false # TODO: move this to github instead of azure + - name: Checkout ci-artifacts + if: github.repository == 'commaai/openpilot' && github.ref == 'refs/heads/master' + uses: actions/checkout@v4 + with: + repository: commaai/ci-artifacts + ssh-key: ${{ secrets.CI_ARTIFACTS_DEPLOY_KEY }} + path: ${{ github.workspace }}/ci-artifacts + - name: Push refs + if: github.repository == 'commaai/openpilot' && github.ref == 'refs/heads/master' + working-directory: ${{ github.workspace }}/ci-artifacts run: | - ${{ env.RUN }} "unset PYTHONWARNINGS && AZURE_TOKEN='$AZURE_TOKEN' python3 selfdrive/test/process_replay/test_processes.py -j$(nproc) --upload-only" + git checkout --orphan process-replay + git rm -rf . + git config user.name "GitHub Actions Bot" + git config user.email "<>" + cp ${{ github.workspace }}/selfdrive/test/process_replay/fakedata/*.zst . + echo "${{ github.sha }}" > ref_commit + git add . + git commit -m "process-replay refs for ${{ github.repository }}@${{ github.sha }}" + git push origin process-replay --force - name: Run regen if: false timeout-minutes: 4 diff --git a/selfdrive/test/process_replay/README.md b/selfdrive/test/process_replay/README.md index 8e279c71cd..28f3b7cd2a 100644 --- a/selfdrive/test/process_replay/README.md +++ b/selfdrive/test/process_replay/README.md @@ -22,7 +22,7 @@ Currently the following processes are tested: ### Usage ``` Usage: test_processes.py [-h] [--whitelist-procs PROCS] [--whitelist-cars CARS] [--blacklist-procs PROCS] - [--blacklist-cars CARS] [--ignore-fields FIELDS] [--ignore-msgs MSGS] [--update-refs] [--upload-only] + [--blacklist-cars CARS] [--ignore-fields FIELDS] [--ignore-msgs MSGS] [--update-refs] Regression test to identify changes in a process's output optional arguments: -h, --help show this help message and exit @@ -33,7 +33,6 @@ optional arguments: --ignore-fields IGNORE_FIELDS Extra fields or msgs to ignore (e.g. driverMonitoringState.events) --ignore-msgs IGNORE_MSGS Msgs to ignore (e.g. onroadEvents) --update-refs Updates reference logs using current commit - --upload-only Skips testing processes and uploads logs from previous test run ``` ## Forks diff --git a/selfdrive/test/process_replay/ref_commit b/selfdrive/test/process_replay/ref_commit deleted file mode 100644 index 85b79391c3..0000000000 --- a/selfdrive/test/process_replay/ref_commit +++ /dev/null @@ -1 +0,0 @@ -67f3daf309dc6cbb6844fcbaeb83e6596637e551 \ No newline at end of file diff --git a/selfdrive/test/process_replay/test_processes.py b/selfdrive/test/process_replay/test_processes.py index 59e1ae054e..9f79c8ddcb 100755 --- a/selfdrive/test/process_replay/test_processes.py +++ b/selfdrive/test/process_replay/test_processes.py @@ -9,12 +9,13 @@ from typing import Any from opendbc.car.car_helpers import interface_names from openpilot.common.git import get_commit -from openpilot.tools.lib.openpilotci import get_url, upload_file +from openpilot.tools.lib.openpilotci import get_url from openpilot.selfdrive.test.process_replay.compare_logs import compare_logs, format_diff from openpilot.selfdrive.test.process_replay.process_replay import CONFIGS, PROC_REPLAY_DIR, FAKEDATA, replay_process, \ check_most_messages_valid from openpilot.tools.lib.filereader import FileReader from openpilot.tools.lib.logreader import LogReader, save_log +from openpilot.tools.lib.url_file import URLFile source_segments = [ ("HYUNDAI", "02c45f73a2e5c6e9|2021-01-01--19-08-22--1"), # HYUNDAI.HYUNDAI_SONATA @@ -64,25 +65,17 @@ segments = [ # dashcamOnly makes don't need to be tested until a full port is done excluded_interfaces = ["mock", "body", "psa"] -BASE_URL = "https://commadataci.blob.core.windows.net/openpilotci/" +BASE_URL = "https://raw.githubusercontent.com/commaai/ci-artifacts/refs/heads/process-replay/" REF_COMMIT_FN = os.path.join(PROC_REPLAY_DIR, "ref_commit") EXCLUDED_PROCS = {"modeld", "dmonitoringmodeld"} def run_test_process(data): segment, cfg, args, cur_log_fn, ref_log_path, lr_dat = data - res = None - if not args.upload_only: - lr = LogReader.from_bytes(lr_dat) - res, log_msgs = test_process(cfg, lr, segment, ref_log_path, cur_log_fn, args.ignore_fields, args.ignore_msgs) - # save logs so we can upload when updating refs - save_log(cur_log_fn, log_msgs) - - if args.update_refs or args.upload_only: - print(f'Uploading: {os.path.basename(cur_log_fn)}') - assert os.path.exists(cur_log_fn), f"Cannot find log to upload: {cur_log_fn}" - upload_file(cur_log_fn, os.path.basename(cur_log_fn)) - os.remove(cur_log_fn) + lr = LogReader.from_bytes(lr_dat) + res, log_msgs = test_process(cfg, lr, segment, ref_log_path, cur_log_fn, args.ignore_fields, args.ignore_msgs) + # save logs so we can update refs + save_log(cur_log_fn, log_msgs) return (segment, cfg.proc_name, res) @@ -142,8 +135,6 @@ if __name__ == "__main__": help="Msgs to ignore (e.g. carEvents)") parser.add_argument("--update-refs", action="store_true", help="Updates reference logs using current commit") - parser.add_argument("--upload-only", action="store_true", - help="Skips testing processes and uploads logs from previous test run") parser.add_argument("-j", "--jobs", type=int, default=max(cpu_count - 2, 1), help="Max amount of parallel jobs") args = parser.parse_args() @@ -153,18 +144,16 @@ if __name__ == "__main__": tested_cars = {c.upper() for c in tested_cars} full_test = (tested_procs == all_procs) and (tested_cars == all_cars) and all(len(x) == 0 for x in (args.ignore_fields, args.ignore_msgs)) - upload = args.update_refs or args.upload_only os.makedirs(os.path.dirname(FAKEDATA), exist_ok=True) - if upload: + if args.update_refs: assert full_test, "Need to run full test when updating refs" try: with open(REF_COMMIT_FN) as f: ref_commit = f.read().strip() except FileNotFoundError: - print("Couldn't find reference commit") - sys.exit(1) + ref_commit = URLFile(BASE_URL + "ref_commit", cache=False).read().decode().strip() cur_commit = get_commit() if not cur_commit: @@ -179,12 +168,11 @@ if __name__ == "__main__": log_paths: defaultdict[str, dict[str, dict[str, str]]] = defaultdict(lambda: defaultdict(dict)) with concurrent.futures.ProcessPoolExecutor(max_workers=args.jobs) as pool: - if not args.upload_only: - download_segments = [seg for car, seg in segments if car in tested_cars] - log_data: dict[str, LogReader] = {} - p1 = pool.map(get_log_data, download_segments) - for segment, lr in tqdm(p1, desc="Getting Logs", total=len(download_segments)): - log_data[segment] = lr + download_segments = [seg for car, seg in segments if car in tested_cars] + log_data: dict[str, LogReader] = {} + p1 = pool.map(get_log_data, download_segments) + for segment, lr in tqdm(p1, desc="Getting Logs", total=len(download_segments)): + log_data[segment] = lr pool_args: Any = [] for car_brand, segment in segments: @@ -199,15 +187,14 @@ if __name__ == "__main__": if cfg.proc_name not in ('card', 'controlsd', 'lagd') and car_brand not in ('HYUNDAI', 'TOYOTA'): continue - cur_log_fn = os.path.join(FAKEDATA, f"{segment}_{cfg.proc_name}_{cur_commit}.zst") + cur_log_fn = os.path.join(FAKEDATA, f"{segment}_{cfg.proc_name}_{cur_commit}.zst".replace("|", "_")) if args.update_refs: # reference logs will not exist if routes were just regenerated ref_log_path = get_url(*segment.rsplit("--", 1,), "rlog.zst") else: - ref_log_fn = os.path.join(FAKEDATA, f"{segment}_{cfg.proc_name}_{ref_commit}.zst") + ref_log_fn = os.path.join(FAKEDATA, f"{segment}_{cfg.proc_name}_{ref_commit}.zst".replace("|", "_")) ref_log_path = ref_log_fn if os.path.exists(ref_log_fn) else BASE_URL + os.path.basename(ref_log_fn) - dat = None if args.upload_only else log_data[segment] - pool_args.append((segment, cfg, args, cur_log_fn, ref_log_path, dat)) + pool_args.append((segment, cfg, args, cur_log_fn, ref_log_path, log_data[segment])) log_paths[segment][cfg.proc_name]['ref'] = ref_log_path log_paths[segment][cfg.proc_name]['new'] = cur_log_fn @@ -215,19 +202,16 @@ if __name__ == "__main__": results: Any = defaultdict(dict) p2 = pool.map(run_test_process, pool_args) for (segment, proc, result) in tqdm(p2, desc="Running Tests", total=len(pool_args)): - if not args.upload_only: - results[segment][proc] = result + results[segment][proc] = result diff_short, diff_long, failed = format_diff(results, log_paths, ref_commit) - if not upload: + if not args.update_refs: with open(os.path.join(PROC_REPLAY_DIR, "diff.txt"), "w") as f: f.write(diff_long) print(diff_short) if failed: print("TEST FAILED") - print("\n\nTo push the new reference logs for this commit run:") - print("./test_processes.py --upload-only") else: print("TEST SUCCEEDED")