tools cleanup (#37520)
This commit is contained in:
@@ -1,67 +0,0 @@
|
||||
# JotPluggler
|
||||
|
||||
JotPluggler is a tool to quickly visualize openpilot logs.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
$ ./jotpluggler/pluggle.py -h
|
||||
usage: pluggle.py [-h] [--demo] [--layout LAYOUT] [route]
|
||||
|
||||
A tool for visualizing openpilot logs.
|
||||
|
||||
positional arguments:
|
||||
route Optional route name to load on startup.
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
--demo Use the demo route instead of providing one
|
||||
--layout LAYOUT Path to YAML layout file to load on startup
|
||||
```
|
||||
|
||||
Example using route name:
|
||||
|
||||
`./pluggle.py "5beb9b58bd12b691/0000010a--a51155e496"`
|
||||
|
||||
Examples using segment:
|
||||
|
||||
`./pluggle.py "5beb9b58bd12b691/0000010a--a51155e496/1"`
|
||||
|
||||
`./pluggle.py "5beb9b58bd12b691/0000010a--a51155e496/1/q" # use qlogs`
|
||||
|
||||
Example using segment range:
|
||||
|
||||
`./pluggle.py "5beb9b58bd12b691/0000010a--a51155e496/0:1"`
|
||||
|
||||
## Demo
|
||||
|
||||
For a quick demo, run this command:
|
||||
|
||||
`./pluggle.py --demo --layout=layouts/torque-controller.yaml`
|
||||
|
||||
|
||||
## Basic Usage/Features:
|
||||
- The text box to load a route is a the top left of the page, accepts standard openpilot format routes (e.g. `5beb9b58bd12b691/0000010a--a51155e496/0:1`, `https://connect.comma.ai/5beb9b58bd12b691/0000010a--a51155e496/`)
|
||||
- The Play/Pause button is at the bottom of the screen, you can drag the bottom slider to seek. The timeline in timeseries plots are synced with the slider.
|
||||
- The Timeseries List sidebar has several dropdowns, the fields each show the field name and value, synced with the timeline (will show N/A until the time of the first message in that field is reached).
|
||||
- There is a search bar for the timeseries list, you can search for structs or fields, or both by separating with a "/"
|
||||
- You can drag and drop any numeric/boolean field from the timeseries list into a timeseries panel.
|
||||
- You can create more panels with the split buttons (buttons with two rectangles, either horizontal or vertical). You can resize the panels by dragging the grip in between any panel.
|
||||
- You can load and save layouts with the corresponding buttons. Layouts will save all tabs, panels, titles, timeseries, etc.
|
||||
|
||||
## Layouts
|
||||
|
||||
If you create a layout that's useful for others, consider upstreaming it.
|
||||
|
||||
## Plot Interaction Controls
|
||||
|
||||
- **Left click and drag within the plot area** to pan X
|
||||
- Left click and drag on an axis to pan an individual axis (disabled for Y-axis)
|
||||
- **Scroll in the plot area** to zoom in X axes, Y-axis is autofit
|
||||
- Scroll on an axis to zoom an individual axis
|
||||
- **Right click and drag** to select data and zoom into the selected data
|
||||
- Left click while box selecting to cancel the selection
|
||||
- **Double left click** to fit all visible data
|
||||
- Double left click on an axis to fit the individual axis (disabled for Y-axis, always autofit)
|
||||
- **Double right click** to open the plot context menu
|
||||
- **Click legend label icons** to show/hide plot items
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
tools/jotpluggler/assets/x.png
LFS
BIN
tools/jotpluggler/assets/x.png
LFS
Binary file not shown.
@@ -1,360 +0,0 @@
|
||||
import numpy as np
|
||||
import threading
|
||||
import multiprocessing
|
||||
import bisect
|
||||
from collections import defaultdict
|
||||
from tqdm import tqdm
|
||||
from openpilot.common.swaglog import cloudlog
|
||||
from openpilot.selfdrive.test.process_replay.migration import migrate_all
|
||||
from openpilot.tools.lib.logreader import _LogFileReader, LogReader
|
||||
|
||||
|
||||
def flatten_dict(d: dict, sep: str = "/", prefix: str | None = None) -> dict:
|
||||
result = {}
|
||||
stack: list[tuple] = [(d, prefix)]
|
||||
|
||||
while stack:
|
||||
obj, current_prefix = stack.pop()
|
||||
|
||||
if isinstance(obj, dict):
|
||||
for key, val in obj.items():
|
||||
new_prefix = key if current_prefix is None else f"{current_prefix}{sep}{key}"
|
||||
if isinstance(val, (dict, list)):
|
||||
stack.append((val, new_prefix))
|
||||
else:
|
||||
result[new_prefix] = val
|
||||
elif isinstance(obj, list):
|
||||
for i, item in enumerate(obj):
|
||||
new_prefix = f"{current_prefix}{sep}{i}"
|
||||
if isinstance(item, (dict, list)):
|
||||
stack.append((item, new_prefix))
|
||||
else:
|
||||
result[new_prefix] = item
|
||||
else:
|
||||
if current_prefix is not None:
|
||||
result[current_prefix] = obj
|
||||
return result
|
||||
|
||||
|
||||
def extract_field_types(schema, prefix, field_types_dict):
|
||||
stack = [(schema, prefix)]
|
||||
|
||||
while stack:
|
||||
current_schema, current_prefix = stack.pop()
|
||||
|
||||
for field in current_schema.fields_list:
|
||||
field_name = field.proto.name
|
||||
field_path = f"{current_prefix}/{field_name}"
|
||||
field_proto = field.proto
|
||||
field_which = field_proto.which()
|
||||
|
||||
field_type = field_proto.slot.type.which() if field_which == 'slot' else field_which
|
||||
field_types_dict[field_path] = field_type
|
||||
|
||||
if field_which == 'slot':
|
||||
slot_type = field_proto.slot.type
|
||||
type_which = slot_type.which()
|
||||
|
||||
if type_which == 'list':
|
||||
element_type = slot_type.list.elementType.which()
|
||||
list_path = f"{field_path}/*"
|
||||
field_types_dict[list_path] = element_type
|
||||
|
||||
if element_type == 'struct':
|
||||
stack.append((field.schema.elementType, list_path))
|
||||
|
||||
elif type_which == 'struct':
|
||||
stack.append((field.schema, field_path))
|
||||
|
||||
elif field_which == 'group':
|
||||
stack.append((field.schema, field_path))
|
||||
|
||||
|
||||
def _convert_to_optimal_dtype(values_list, capnp_type):
|
||||
dtype_mapping = {
|
||||
'bool': np.bool_, 'int8': np.int8, 'int16': np.int16, 'int32': np.int32, 'int64': np.int64,
|
||||
'uint8': np.uint8, 'uint16': np.uint16, 'uint32': np.uint32, 'uint64': np.uint64,
|
||||
'float32': np.float32, 'float64': np.float64, 'text': object, 'data': object,
|
||||
'enum': object, 'anyPointer': object,
|
||||
}
|
||||
|
||||
target_dtype = dtype_mapping.get(capnp_type, object)
|
||||
return np.array(values_list, dtype=target_dtype)
|
||||
|
||||
|
||||
def _match_field_type(field_path, field_types):
|
||||
if field_path in field_types:
|
||||
return field_types[field_path]
|
||||
|
||||
path_parts = field_path.split('/')
|
||||
template_parts = [p if not p.isdigit() else '*' for p in path_parts]
|
||||
template_path = '/'.join(template_parts)
|
||||
return field_types.get(template_path)
|
||||
|
||||
|
||||
def _get_field_times_values(segment, field_name):
|
||||
if field_name not in segment:
|
||||
return None, None
|
||||
|
||||
field_data = segment[field_name]
|
||||
segment_times = segment['t']
|
||||
|
||||
if field_data['sparse']:
|
||||
if len(field_data['t_index']) == 0:
|
||||
return None, None
|
||||
return segment_times[field_data['t_index']], field_data['values']
|
||||
else:
|
||||
return segment_times, field_data['values']
|
||||
|
||||
|
||||
def msgs_to_time_series(msgs):
|
||||
"""Extract scalar fields and return (time_series_data, start_time, end_time)."""
|
||||
collected_data = defaultdict(lambda: {'timestamps': [], 'columns': defaultdict(list), 'sparse_fields': set()})
|
||||
field_types = {}
|
||||
extracted_schemas = set()
|
||||
min_time = max_time = None
|
||||
|
||||
for msg in msgs:
|
||||
typ = msg.which()
|
||||
timestamp = msg.logMonoTime * 1e-9
|
||||
if typ != 'initData':
|
||||
if min_time is None:
|
||||
min_time = timestamp
|
||||
max_time = timestamp
|
||||
|
||||
sub_msg = getattr(msg, typ)
|
||||
if not hasattr(sub_msg, 'to_dict'):
|
||||
continue
|
||||
|
||||
if hasattr(sub_msg, 'schema') and typ not in extracted_schemas:
|
||||
extract_field_types(sub_msg.schema, typ, field_types)
|
||||
extracted_schemas.add(typ)
|
||||
|
||||
try:
|
||||
msg_dict = sub_msg.to_dict(verbose=True)
|
||||
except Exception as e:
|
||||
cloudlog.warning(f"Failed to convert sub_msg.to_dict() for message of type: {typ}: {e}")
|
||||
continue
|
||||
|
||||
flat_dict = flatten_dict(msg_dict)
|
||||
flat_dict['_valid'] = msg.valid
|
||||
field_types[f"{typ}/_valid"] = 'bool'
|
||||
|
||||
type_data = collected_data[typ]
|
||||
columns, sparse_fields = type_data['columns'], type_data['sparse_fields']
|
||||
known_fields = set(columns.keys())
|
||||
missing_fields = known_fields - flat_dict.keys()
|
||||
|
||||
for field, value in flat_dict.items():
|
||||
if field not in known_fields and type_data['timestamps']:
|
||||
sparse_fields.add(field)
|
||||
columns[field].append(value)
|
||||
if value is None:
|
||||
sparse_fields.add(field)
|
||||
|
||||
for field in missing_fields:
|
||||
columns[field].append(None)
|
||||
sparse_fields.add(field)
|
||||
|
||||
type_data['timestamps'].append(timestamp)
|
||||
|
||||
final_result = {}
|
||||
for typ, data in collected_data.items():
|
||||
if not data['timestamps']:
|
||||
continue
|
||||
|
||||
typ_result = {'t': np.array(data['timestamps'], dtype=np.float64)}
|
||||
sparse_fields = data['sparse_fields']
|
||||
|
||||
for field_name, values in data['columns'].items():
|
||||
if len(values) < len(data['timestamps']):
|
||||
values = [None] * (len(data['timestamps']) - len(values)) + values
|
||||
sparse_fields.add(field_name)
|
||||
|
||||
capnp_type = _match_field_type(f"{typ}/{field_name}", field_types)
|
||||
|
||||
if field_name in sparse_fields: # extract non-None values and their indices
|
||||
non_none_indices = []
|
||||
non_none_values = []
|
||||
for i, value in enumerate(values):
|
||||
if value is not None:
|
||||
non_none_indices.append(i)
|
||||
non_none_values.append(value)
|
||||
|
||||
if non_none_values: # check if indices > uint16 max, currently would require a 1000+ Hz signal since indices are within segments
|
||||
assert max(non_none_indices) <= 65535, f"Sparse field {typ}/{field_name} has timestamp indices exceeding uint16 max. Max: {max(non_none_indices)}"
|
||||
|
||||
typ_result[field_name] = {
|
||||
'values': _convert_to_optimal_dtype(non_none_values, capnp_type),
|
||||
'sparse': True,
|
||||
't_index': np.array(non_none_indices, dtype=np.uint16),
|
||||
}
|
||||
else: # dense representation
|
||||
typ_result[field_name] = {'values': _convert_to_optimal_dtype(values, capnp_type), 'sparse': False}
|
||||
|
||||
final_result[typ] = typ_result
|
||||
|
||||
return final_result, min_time or 0.0, max_time or 0.0
|
||||
|
||||
|
||||
def _process_segment(segment_identifier: str):
|
||||
try:
|
||||
lr = _LogFileReader(segment_identifier, sort_by_time=True)
|
||||
migrated_msgs = migrate_all(lr)
|
||||
return msgs_to_time_series(migrated_msgs)
|
||||
except Exception as e:
|
||||
cloudlog.warning(f"Warning: Failed to process segment {segment_identifier}: {e}")
|
||||
return {}, 0.0, 0.0
|
||||
|
||||
|
||||
class DataManager:
|
||||
def __init__(self):
|
||||
self._segments = []
|
||||
self._segment_starts = []
|
||||
self._start_time = 0.0
|
||||
self._duration = 0.0
|
||||
self._paths = set()
|
||||
self._observers = []
|
||||
self._loading = False
|
||||
self._lock = threading.RLock()
|
||||
|
||||
def load_route(self, route: str) -> None:
|
||||
if self._loading:
|
||||
return
|
||||
self._reset()
|
||||
threading.Thread(target=self._load_async, args=(route,), daemon=True).start()
|
||||
|
||||
def get_timeseries(self, path: str):
|
||||
with self._lock:
|
||||
msg_type, field = path.split('/', 1)
|
||||
times, values = [], []
|
||||
|
||||
for segment in self._segments:
|
||||
if msg_type in segment:
|
||||
field_times, field_values = _get_field_times_values(segment[msg_type], field)
|
||||
if field_times is not None:
|
||||
times.append(field_times)
|
||||
values.append(field_values)
|
||||
|
||||
if not times:
|
||||
return np.array([]), np.array([])
|
||||
|
||||
combined_times = np.concatenate(times) - self._start_time
|
||||
|
||||
if len(values) > 1:
|
||||
first_dtype = values[0].dtype
|
||||
if all(arr.dtype == first_dtype for arr in values): # check if all arrays have compatible dtypes
|
||||
combined_values = np.concatenate(values)
|
||||
else:
|
||||
combined_values = np.concatenate([arr.astype(object) for arr in values])
|
||||
else:
|
||||
combined_values = values[0] if values else np.array([])
|
||||
|
||||
return combined_times, combined_values
|
||||
|
||||
def get_value_at(self, path: str, time: float):
|
||||
with self._lock:
|
||||
MAX_LOOKBACK = 5.0 # seconds
|
||||
absolute_time = self._start_time + time
|
||||
message_type, field = path.split('/', 1)
|
||||
current_index = bisect.bisect_right(self._segment_starts, absolute_time) - 1
|
||||
for index in (current_index, current_index - 1):
|
||||
if not 0 <= index < len(self._segments):
|
||||
continue
|
||||
segment = self._segments[index].get(message_type)
|
||||
if not segment:
|
||||
continue
|
||||
times, values = _get_field_times_values(segment, field)
|
||||
if times is None or len(times) == 0 or (index != current_index and absolute_time - times[-1] > MAX_LOOKBACK):
|
||||
continue
|
||||
position = np.searchsorted(times, absolute_time, 'right') - 1
|
||||
if position >= 0 and absolute_time - times[position] <= MAX_LOOKBACK:
|
||||
return values[position]
|
||||
return None
|
||||
|
||||
def get_all_paths(self):
|
||||
with self._lock:
|
||||
return sorted(self._paths)
|
||||
|
||||
def get_duration(self):
|
||||
with self._lock:
|
||||
return self._duration
|
||||
|
||||
def is_plottable(self, path: str):
|
||||
_, values = self.get_timeseries(path)
|
||||
if len(values) == 0:
|
||||
return False
|
||||
return np.issubdtype(values.dtype, np.number) or np.issubdtype(values.dtype, np.bool_)
|
||||
|
||||
def add_observer(self, callback):
|
||||
with self._lock:
|
||||
self._observers.append(callback)
|
||||
|
||||
def remove_observer(self, callback):
|
||||
with self._lock:
|
||||
if callback in self._observers:
|
||||
self._observers.remove(callback)
|
||||
|
||||
def _reset(self):
|
||||
with self._lock:
|
||||
self._loading = True
|
||||
self._segments.clear()
|
||||
self._segment_starts.clear()
|
||||
self._paths.clear()
|
||||
self._start_time = self._duration = 0.0
|
||||
observers = self._observers.copy()
|
||||
|
||||
for callback in observers:
|
||||
callback({'reset': True})
|
||||
|
||||
def _load_async(self, route: str):
|
||||
try:
|
||||
lr = LogReader(route, sort_by_time=True)
|
||||
if not lr.logreader_identifiers:
|
||||
cloudlog.warning(f"Warning: No log segments found for route: {route}")
|
||||
return
|
||||
|
||||
total_segments = len(lr.logreader_identifiers)
|
||||
with self._lock:
|
||||
observers = self._observers.copy()
|
||||
for callback in observers:
|
||||
callback({'metadata_loaded': True, 'total_segments': total_segments})
|
||||
|
||||
num_processes = max(1, multiprocessing.cpu_count() // 2)
|
||||
with multiprocessing.Pool(processes=num_processes) as pool, tqdm(total=len(lr.logreader_identifiers), desc="Processing Segments") as pbar:
|
||||
for segment_result, start_time, end_time in pool.imap(_process_segment, lr.logreader_identifiers):
|
||||
pbar.update(1)
|
||||
if segment_result:
|
||||
self._add_segment(segment_result, start_time, end_time)
|
||||
except Exception:
|
||||
cloudlog.exception(f"Error loading route {route}:")
|
||||
finally:
|
||||
self._finalize_loading()
|
||||
|
||||
def _add_segment(self, segment_data: dict, start_time: float, end_time: float):
|
||||
with self._lock:
|
||||
self._segments.append(segment_data)
|
||||
self._segment_starts.append(start_time)
|
||||
|
||||
if len(self._segments) == 1:
|
||||
self._start_time = start_time
|
||||
self._duration = end_time - self._start_time
|
||||
|
||||
for msg_type, data in segment_data.items():
|
||||
for field_name in data.keys():
|
||||
if field_name != 't':
|
||||
self._paths.add(f"{msg_type}/{field_name}")
|
||||
|
||||
observers = self._observers.copy()
|
||||
|
||||
for callback in observers:
|
||||
callback({'segment_added': True, 'duration': self._duration, 'segment_count': len(self._segments)})
|
||||
|
||||
def _finalize_loading(self):
|
||||
with self._lock:
|
||||
self._loading = False
|
||||
observers = self._observers.copy()
|
||||
duration = self._duration
|
||||
|
||||
for callback in observers:
|
||||
callback({'loading_complete': True, 'duration': duration})
|
||||
@@ -1,315 +0,0 @@
|
||||
import os
|
||||
import re
|
||||
import threading
|
||||
import numpy as np
|
||||
import dearpygui.dearpygui as dpg
|
||||
|
||||
|
||||
class DataTreeNode:
|
||||
def __init__(self, name: str, full_path: str = "", parent=None):
|
||||
self.name = name
|
||||
self.full_path = full_path
|
||||
self.parent = parent
|
||||
self.children: dict[str, DataTreeNode] = {}
|
||||
self.filtered_children: dict[str, DataTreeNode] = {}
|
||||
self.created_children: dict[str, DataTreeNode] = {}
|
||||
self.is_leaf = False
|
||||
self.is_plottable: bool | None = None
|
||||
self.ui_created = False
|
||||
self.children_ui_created = False
|
||||
self.ui_tag: str | None = None
|
||||
|
||||
|
||||
class DataTree:
|
||||
MAX_NODES_PER_FRAME = 50
|
||||
|
||||
def __init__(self, data_manager, playback_manager):
|
||||
self.data_manager = data_manager
|
||||
self.playback_manager = playback_manager
|
||||
self.current_search = ""
|
||||
self.data_tree = DataTreeNode(name="root")
|
||||
self._build_queue: dict[str, tuple[DataTreeNode, DataTreeNode, str | int]] = {} # full_path -> (node, parent, before_tag)
|
||||
self._current_created_paths: set[str] = set()
|
||||
self._current_filtered_paths: set[str] = set()
|
||||
self._path_to_node: dict[str, DataTreeNode] = {} # full_path -> node
|
||||
self._expanded_tags: set[str] = set()
|
||||
self._item_handlers: dict[str, str] = {} # ui_tag -> handler_tag
|
||||
self._char_width = None
|
||||
self._queued_search = None
|
||||
self._new_data = False
|
||||
self._ui_lock = threading.RLock()
|
||||
self._handlers_to_delete = []
|
||||
self.data_manager.add_observer(self._on_data_loaded)
|
||||
|
||||
def create_ui(self, parent_tag: str):
|
||||
with dpg.child_window(parent=parent_tag, border=False, width=-1, height=-1):
|
||||
dpg.add_text("Timeseries List")
|
||||
dpg.add_separator()
|
||||
dpg.add_input_text(tag="search_input", width=-1, hint="Search fields...", callback=self.search_data)
|
||||
dpg.add_separator()
|
||||
with dpg.child_window(border=False, width=-1, height=-1):
|
||||
with dpg.group(tag="data_tree_container"):
|
||||
pass
|
||||
|
||||
def _on_data_loaded(self, data: dict):
|
||||
with self._ui_lock:
|
||||
if data.get('segment_added') or data.get('reset'):
|
||||
self._new_data = True
|
||||
|
||||
def update_frame(self, font):
|
||||
if self._handlers_to_delete: # we need to do everything in main thread, frame callbacks are flaky
|
||||
dpg.render_dearpygui_frame() # wait a frame to ensure queued callbacks are done
|
||||
with self._ui_lock:
|
||||
for handler in self._handlers_to_delete:
|
||||
dpg.delete_item(handler)
|
||||
self._handlers_to_delete.clear()
|
||||
|
||||
with self._ui_lock:
|
||||
if self._char_width is None:
|
||||
if size := dpg.get_text_size(" ", font=font):
|
||||
self._char_width = size[0] / 2 # we scale font 2x and downscale to fix hidpi bug
|
||||
|
||||
if self._new_data:
|
||||
self._process_path_change()
|
||||
self._new_data = False
|
||||
return
|
||||
|
||||
if self._queued_search is not None:
|
||||
self.current_search = self._queued_search
|
||||
self._process_path_change()
|
||||
self._queued_search = None
|
||||
return
|
||||
|
||||
nodes_processed = 0
|
||||
while self._build_queue and nodes_processed < self.MAX_NODES_PER_FRAME:
|
||||
child_node, parent, before_tag = self._build_queue.pop(next(iter(self._build_queue)))
|
||||
parent_tag = "data_tree_container" if parent.name == "root" else parent.ui_tag
|
||||
if not child_node.ui_created:
|
||||
if child_node.is_leaf:
|
||||
self._create_leaf_ui(child_node, parent_tag, before_tag)
|
||||
else:
|
||||
self._create_tree_node_ui(child_node, parent_tag, before_tag)
|
||||
parent.created_children[child_node.name] = parent.children[child_node.name]
|
||||
self._current_created_paths.add(child_node.full_path)
|
||||
nodes_processed += 1
|
||||
|
||||
def _process_path_change(self):
|
||||
self._build_queue.clear()
|
||||
search_term = self.current_search.strip().lower()
|
||||
all_paths = set(self.data_manager.get_all_paths())
|
||||
new_filtered_leafs = {path for path in all_paths if self._should_show_path(path, search_term)}
|
||||
new_filtered_paths = set(new_filtered_leafs)
|
||||
for path in new_filtered_leafs:
|
||||
parts = path.split('/')
|
||||
for i in range(1, len(parts)):
|
||||
prefix = '/'.join(parts[:i])
|
||||
new_filtered_paths.add(prefix)
|
||||
created_paths_to_remove = self._current_created_paths - new_filtered_paths
|
||||
filtered_paths_to_remove = self._current_filtered_paths - new_filtered_leafs
|
||||
|
||||
if created_paths_to_remove or filtered_paths_to_remove:
|
||||
self._remove_paths_from_tree(created_paths_to_remove, filtered_paths_to_remove)
|
||||
self._apply_expansion_to_tree(self.data_tree, search_term)
|
||||
|
||||
paths_to_add = new_filtered_leafs - self._current_created_paths
|
||||
if paths_to_add:
|
||||
self._add_paths_to_tree(paths_to_add)
|
||||
self._apply_expansion_to_tree(self.data_tree, search_term)
|
||||
self._current_filtered_paths = new_filtered_paths
|
||||
|
||||
def _remove_paths_from_tree(self, created_paths_to_remove, filtered_paths_to_remove):
|
||||
for path in sorted(created_paths_to_remove, reverse=True):
|
||||
current_node = self._path_to_node[path]
|
||||
|
||||
if len(current_node.created_children) == 0:
|
||||
self._current_created_paths.remove(current_node.full_path)
|
||||
if item_handler_tag := self._item_handlers.get(current_node.ui_tag):
|
||||
dpg.configure_item(item_handler_tag, show=False)
|
||||
self._handlers_to_delete.append(item_handler_tag)
|
||||
del self._item_handlers[current_node.ui_tag]
|
||||
dpg.delete_item(current_node.ui_tag)
|
||||
current_node.ui_created = False
|
||||
current_node.ui_tag = None
|
||||
current_node.children_ui_created = False
|
||||
del current_node.parent.created_children[current_node.name]
|
||||
del current_node.parent.filtered_children[current_node.name]
|
||||
|
||||
for path in filtered_paths_to_remove:
|
||||
parts = path.split('/')
|
||||
current_node = self._path_to_node[path]
|
||||
|
||||
part_array_index = -1
|
||||
while len(current_node.filtered_children) == 0 and part_array_index >= -len(parts):
|
||||
current_node = current_node.parent
|
||||
if parts[part_array_index] in current_node.filtered_children:
|
||||
del current_node.filtered_children[parts[part_array_index]]
|
||||
part_array_index -= 1
|
||||
|
||||
def _add_paths_to_tree(self, paths):
|
||||
parent_nodes_to_recheck = set()
|
||||
for path in sorted(paths):
|
||||
parts = path.split('/')
|
||||
current_node = self.data_tree
|
||||
current_path_prefix = ""
|
||||
|
||||
for i, part in enumerate(parts):
|
||||
current_path_prefix = f"{current_path_prefix}/{part}" if current_path_prefix else part
|
||||
if i < len(parts):
|
||||
parent_nodes_to_recheck.add(current_node) # for incremental changes from new data
|
||||
if part not in current_node.children:
|
||||
current_node.children[part] = DataTreeNode(name=part, full_path=current_path_prefix, parent=current_node)
|
||||
self._path_to_node[current_path_prefix] = current_node.children[part]
|
||||
current_node.filtered_children[part] = current_node.children[part]
|
||||
current_node = current_node.children[part]
|
||||
|
||||
if not current_node.is_leaf:
|
||||
current_node.is_leaf = True
|
||||
|
||||
for p_node in parent_nodes_to_recheck:
|
||||
p_node.children_ui_created = False
|
||||
self._request_children_build(p_node)
|
||||
|
||||
def _get_node_label_and_expand(self, node: DataTreeNode, search_term: str):
|
||||
label = f"{node.name} ({len(node.filtered_children)} fields)"
|
||||
expand = len(search_term) > 0 and any(search_term in path for path in self._get_descendant_paths(node))
|
||||
if expand and node.parent and len(node.parent.filtered_children) > 100 and len(node.filtered_children) > 2:
|
||||
label += " (+)" # symbol for large lists which aren't fully expanded for performance (only affects procLog rn)
|
||||
expand = False
|
||||
return label, expand
|
||||
|
||||
def _apply_expansion_to_tree(self, node: DataTreeNode, search_term: str):
|
||||
if node.ui_created and not node.is_leaf and node.ui_tag and dpg.does_item_exist(node.ui_tag):
|
||||
label, expand = self._get_node_label_and_expand(node, search_term)
|
||||
if expand:
|
||||
self._expanded_tags.add(node.ui_tag)
|
||||
dpg.set_value(node.ui_tag, expand)
|
||||
elif node.ui_tag in self._expanded_tags: # not expanded and was expanded
|
||||
self._expanded_tags.remove(node.ui_tag)
|
||||
dpg.set_value(node.ui_tag, expand)
|
||||
dpg.delete_item(node.ui_tag, children_only=True) # delete children (not visible since collapsed)
|
||||
self._reset_ui_state_recursive(node)
|
||||
node.children_ui_created = False
|
||||
dpg.set_item_label(node.ui_tag, label)
|
||||
for child in node.created_children.values():
|
||||
self._apply_expansion_to_tree(child, search_term)
|
||||
|
||||
def _reset_ui_state_recursive(self, node: DataTreeNode):
|
||||
for child in node.created_children.values():
|
||||
if child.ui_tag is not None:
|
||||
if item_handler_tag := self._item_handlers.get(child.ui_tag):
|
||||
self._handlers_to_delete.append(item_handler_tag)
|
||||
dpg.configure_item(item_handler_tag, show=False)
|
||||
del self._item_handlers[child.ui_tag]
|
||||
self._reset_ui_state_recursive(child)
|
||||
child.ui_created = False
|
||||
child.ui_tag = None
|
||||
child.children_ui_created = False
|
||||
self._current_created_paths.remove(child.full_path)
|
||||
node.created_children.clear()
|
||||
|
||||
def search_data(self):
|
||||
with self._ui_lock:
|
||||
self._queued_search = dpg.get_value("search_input")
|
||||
|
||||
def _create_tree_node_ui(self, node: DataTreeNode, parent_tag: str, before: str | int):
|
||||
node.ui_tag = f"tree_{node.full_path}"
|
||||
search_term = self.current_search.strip().lower()
|
||||
label, expand = self._get_node_label_and_expand(node, search_term)
|
||||
if expand:
|
||||
self._expanded_tags.add(node.ui_tag)
|
||||
elif node.ui_tag in self._expanded_tags:
|
||||
self._expanded_tags.remove(node.ui_tag)
|
||||
|
||||
with dpg.tree_node(
|
||||
label=label, parent=parent_tag, tag=node.ui_tag, default_open=expand, open_on_arrow=True, open_on_double_click=True, before=before, delay_search=True
|
||||
):
|
||||
with dpg.item_handler_registry() as handler_tag:
|
||||
dpg.add_item_toggled_open_handler(callback=lambda s, a, u: self._request_children_build(node))
|
||||
dpg.add_item_visible_handler(callback=lambda s, a, u: self._request_children_build(node))
|
||||
dpg.bind_item_handler_registry(node.ui_tag, handler_tag)
|
||||
self._item_handlers[node.ui_tag] = handler_tag
|
||||
node.ui_created = True
|
||||
|
||||
def _create_leaf_ui(self, node: DataTreeNode, parent_tag: str, before: str | int):
|
||||
node.ui_tag = f"leaf_{node.full_path}"
|
||||
with dpg.group(parent=parent_tag, tag=node.ui_tag, before=before, delay_search=True):
|
||||
with dpg.table(header_row=False, policy=dpg.mvTable_SizingStretchProp, delay_search=True):
|
||||
dpg.add_table_column(init_width_or_weight=0.5)
|
||||
dpg.add_table_column(init_width_or_weight=0.5)
|
||||
with dpg.table_row():
|
||||
dpg.add_text(node.name)
|
||||
dpg.add_text("N/A", tag=f"value_{node.full_path}")
|
||||
|
||||
if node.is_plottable is None:
|
||||
node.is_plottable = self.data_manager.is_plottable(node.full_path)
|
||||
if node.is_plottable:
|
||||
with dpg.drag_payload(parent=node.ui_tag, drag_data=node.full_path, payload_type="TIMESERIES_PAYLOAD"):
|
||||
dpg.add_text(f"Plot: {node.full_path}")
|
||||
|
||||
with dpg.item_handler_registry() as handler_tag:
|
||||
dpg.add_item_visible_handler(callback=self._on_item_visible, user_data=node.full_path)
|
||||
dpg.bind_item_handler_registry(node.ui_tag, handler_tag)
|
||||
self._item_handlers[node.ui_tag] = handler_tag
|
||||
node.ui_created = True
|
||||
|
||||
def _on_item_visible(self, sender, app_data, user_data):
|
||||
with self._ui_lock:
|
||||
path = user_data
|
||||
value_tag = f"value_{path}"
|
||||
if not dpg.does_item_exist(value_tag):
|
||||
return
|
||||
value_column_width = dpg.get_item_rect_size(f"leaf_{path}")[0] // 2
|
||||
value = self.data_manager.get_value_at(path, self.playback_manager.current_time_s)
|
||||
if value is not None:
|
||||
formatted_value = self.format_and_truncate(value, value_column_width, self._char_width)
|
||||
dpg.set_value(value_tag, formatted_value)
|
||||
else:
|
||||
dpg.set_value(value_tag, "N/A")
|
||||
|
||||
def _request_children_build(self, node: DataTreeNode):
|
||||
with self._ui_lock:
|
||||
if not node.children_ui_created and (node.name == "root" or (node.ui_tag is not None and dpg.get_value(node.ui_tag))): # check root or node expanded
|
||||
sorted_children = sorted(node.filtered_children.values(), key=self._natural_sort_key)
|
||||
next_existing: list[int | str] = [0] * len(sorted_children)
|
||||
current_before_tag: int | str = 0
|
||||
|
||||
for i in range(len(sorted_children) - 1, -1, -1): # calculate "before_tag" for correct ordering when incrementally building tree
|
||||
child = sorted_children[i]
|
||||
next_existing[i] = current_before_tag
|
||||
if child.ui_created:
|
||||
candidate_tag = f"leaf_{child.full_path}" if child.is_leaf else f"tree_{child.full_path}"
|
||||
if dpg.does_item_exist(candidate_tag):
|
||||
current_before_tag = candidate_tag
|
||||
|
||||
for i, child_node in enumerate(sorted_children):
|
||||
if not child_node.ui_created:
|
||||
before_tag = next_existing[i]
|
||||
self._build_queue[child_node.full_path] = (child_node, node, before_tag)
|
||||
node.children_ui_created = True
|
||||
|
||||
def _should_show_path(self, path: str, search_term: str) -> bool:
|
||||
if 'DEPRECATED' in path and not os.environ.get('SHOW_DEPRECATED'):
|
||||
return False
|
||||
return not search_term or search_term in path.lower()
|
||||
|
||||
def _natural_sort_key(self, node: DataTreeNode):
|
||||
node_type_key = node.is_leaf
|
||||
parts = [int(p) if p.isdigit() else p.lower() for p in re.split(r'(\d+)', node.name) if p]
|
||||
return (node_type_key, parts)
|
||||
|
||||
def _get_descendant_paths(self, node: DataTreeNode):
|
||||
for child_name, child_node in node.filtered_children.items():
|
||||
child_name_lower = child_name.lower()
|
||||
if child_node.is_leaf:
|
||||
yield child_name_lower
|
||||
else:
|
||||
for path in self._get_descendant_paths(child_node):
|
||||
yield f"{child_name_lower}/{path}"
|
||||
|
||||
@staticmethod
|
||||
def format_and_truncate(value, available_width: float, char_width: float) -> str:
|
||||
s = f"{value:.5f}" if np.issubdtype(type(value), np.floating) else str(value)
|
||||
max_chars = int(available_width / char_width)
|
||||
if len(s) > max_chars:
|
||||
return s[: max(0, max_chars - 3)] + "..."
|
||||
return s
|
||||
@@ -1,477 +0,0 @@
|
||||
import dearpygui.dearpygui as dpg
|
||||
from openpilot.tools.jotpluggler.data import DataManager
|
||||
from openpilot.tools.jotpluggler.views import TimeSeriesPanel
|
||||
|
||||
GRIP_SIZE = 4
|
||||
MIN_PANE_SIZE = 60
|
||||
|
||||
class LayoutManager:
|
||||
def __init__(self, data_manager, playback_manager, worker_manager, scale: float = 1.0):
|
||||
self.data_manager = data_manager
|
||||
self.playback_manager = playback_manager
|
||||
self.worker_manager = worker_manager
|
||||
self.scale = scale
|
||||
self.container_tag = "plot_layout_container"
|
||||
self.tab_bar_tag = "tab_bar_container"
|
||||
self.tab_content_tag = "tab_content_area"
|
||||
|
||||
self.active_tab = 0
|
||||
initial_panel_layout = PanelLayoutManager(data_manager, playback_manager, worker_manager, scale)
|
||||
self.tabs: dict = {0: {"name": "Tab 1", "panel_layout": initial_panel_layout}}
|
||||
self._next_tab_id = self.active_tab + 1
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"tabs": {
|
||||
str(tab_id): {
|
||||
"name": tab_data["name"],
|
||||
"panel_layout": tab_data["panel_layout"].to_dict()
|
||||
}
|
||||
for tab_id, tab_data in self.tabs.items()
|
||||
}
|
||||
}
|
||||
|
||||
def clear_and_load_from_dict(self, data: dict):
|
||||
tab_ids_to_close = list(self.tabs.keys())
|
||||
for tab_id in tab_ids_to_close:
|
||||
self.close_tab(tab_id, force=True)
|
||||
|
||||
for tab_id_str, tab_data in data["tabs"].items():
|
||||
tab_id = int(tab_id_str)
|
||||
panel_layout = PanelLayoutManager.load_from_dict(
|
||||
tab_data["panel_layout"], self.data_manager, self.playback_manager,
|
||||
self.worker_manager, self.scale
|
||||
)
|
||||
self.tabs[tab_id] = {
|
||||
"name": tab_data["name"],
|
||||
"panel_layout": panel_layout
|
||||
}
|
||||
|
||||
self.active_tab = min(self.tabs.keys()) if self.tabs else 0
|
||||
self._next_tab_id = max(self.tabs.keys()) + 1 if self.tabs else 1
|
||||
|
||||
def create_ui(self, parent_tag: str):
|
||||
if dpg.does_item_exist(self.container_tag):
|
||||
dpg.delete_item(self.container_tag)
|
||||
|
||||
with dpg.child_window(tag=self.container_tag, parent=parent_tag, border=False, width=-1, height=-1, no_scrollbar=True, no_scroll_with_mouse=True):
|
||||
self._create_tab_bar()
|
||||
self._create_tab_content()
|
||||
dpg.bind_item_theme(self.tab_bar_tag, "tab_bar_theme")
|
||||
|
||||
def _create_tab_bar(self):
|
||||
text_size = int(13 * self.scale)
|
||||
with dpg.child_window(tag=self.tab_bar_tag, parent=self.container_tag, height=(text_size + 8), border=False, horizontal_scrollbar=True):
|
||||
with dpg.group(horizontal=True, tag="tab_bar_group"):
|
||||
for tab_id, tab_data in self.tabs.items():
|
||||
self._create_tab_ui(tab_id, tab_data["name"])
|
||||
dpg.add_image_button(texture_tag="plus_texture", callback=self.add_tab, width=text_size, height=text_size, tag="add_tab_button")
|
||||
dpg.bind_item_theme("add_tab_button", "inactive_tab_theme")
|
||||
|
||||
def _create_tab_ui(self, tab_id: int, tab_name: str):
|
||||
text_size = int(13 * self.scale)
|
||||
tab_width = int(140 * self.scale)
|
||||
with dpg.child_window(width=tab_width, height=-1, border=False, no_scrollbar=True, tag=f"tab_window_{tab_id}", parent="tab_bar_group"):
|
||||
with dpg.group(horizontal=True, tag=f"tab_group_{tab_id}"):
|
||||
dpg.add_input_text(
|
||||
default_value=tab_name, width=tab_width - text_size - 16, callback=lambda s, v, u: self.rename_tab(u, v), user_data=tab_id, tag=f"tab_input_{tab_id}"
|
||||
)
|
||||
dpg.add_image_button(
|
||||
texture_tag="x_texture", callback=lambda s, a, u: self.close_tab(u), user_data=tab_id, width=text_size, height=text_size, tag=f"tab_close_{tab_id}"
|
||||
)
|
||||
with dpg.item_handler_registry(tag=f"tab_handler_{tab_id}"):
|
||||
dpg.add_item_clicked_handler(callback=lambda s, a, u: self.switch_tab(u), user_data=tab_id)
|
||||
dpg.bind_item_handler_registry(f"tab_group_{tab_id}", f"tab_handler_{tab_id}")
|
||||
|
||||
theme_tag = "active_tab_theme" if tab_id == self.active_tab else "inactive_tab_theme"
|
||||
dpg.bind_item_theme(f"tab_window_{tab_id}", theme_tag)
|
||||
|
||||
def _create_tab_content(self):
|
||||
with dpg.child_window(tag=self.tab_content_tag, parent=self.container_tag, border=False, width=-1, height=-1, no_scrollbar=True, no_scroll_with_mouse=True):
|
||||
if self.active_tab in self.tabs:
|
||||
active_panel_layout = self.tabs[self.active_tab]["panel_layout"]
|
||||
active_panel_layout.create_ui()
|
||||
|
||||
def add_tab(self):
|
||||
new_panel_layout = PanelLayoutManager(self.data_manager, self.playback_manager, self.worker_manager, self.scale)
|
||||
new_tab = {"name": f"Tab {self._next_tab_id + 1}", "panel_layout": new_panel_layout}
|
||||
self.tabs[self._next_tab_id] = new_tab
|
||||
self._create_tab_ui(self._next_tab_id, new_tab["name"])
|
||||
dpg.move_item("add_tab_button", parent="tab_bar_group") # move plus button to end
|
||||
self.switch_tab(self._next_tab_id)
|
||||
self._next_tab_id += 1
|
||||
|
||||
def close_tab(self, tab_id: int, force = False):
|
||||
if len(self.tabs) <= 1 and not force:
|
||||
return # don't allow closing the last tab
|
||||
|
||||
tab_to_close = self.tabs[tab_id]
|
||||
tab_to_close["panel_layout"].destroy_ui()
|
||||
for suffix in ["window", "group", "input", "close", "handler"]:
|
||||
tag = f"tab_{suffix}_{tab_id}"
|
||||
if dpg.does_item_exist(tag):
|
||||
dpg.delete_item(tag)
|
||||
del self.tabs[tab_id]
|
||||
|
||||
if self.active_tab == tab_id and self.tabs: # switch to another tab if we closed the active one
|
||||
self.active_tab = next(iter(self.tabs.keys()))
|
||||
self._switch_tab_content()
|
||||
dpg.bind_item_theme(f"tab_window_{self.active_tab}", "active_tab_theme")
|
||||
|
||||
def switch_tab(self, tab_id: int):
|
||||
if tab_id == self.active_tab or tab_id not in self.tabs:
|
||||
return
|
||||
|
||||
current_panel_layout = self.tabs[self.active_tab]["panel_layout"]
|
||||
current_panel_layout.destroy_ui()
|
||||
dpg.bind_item_theme(f"tab_window_{self.active_tab}", "inactive_tab_theme") # deactivate old tab
|
||||
self.active_tab = tab_id
|
||||
dpg.bind_item_theme(f"tab_window_{tab_id}", "active_tab_theme") # activate new tab
|
||||
self._switch_tab_content()
|
||||
|
||||
def _switch_tab_content(self):
|
||||
dpg.delete_item(self.tab_content_tag, children_only=True)
|
||||
active_panel_layout = self.tabs[self.active_tab]["panel_layout"]
|
||||
active_panel_layout.create_ui()
|
||||
active_panel_layout.update_all_panels()
|
||||
|
||||
def rename_tab(self, tab_id: int, new_name: str):
|
||||
if tab_id in self.tabs:
|
||||
self.tabs[tab_id]["name"] = new_name
|
||||
|
||||
def update_all_panels(self):
|
||||
self.tabs[self.active_tab]["panel_layout"].update_all_panels()
|
||||
|
||||
def on_viewport_resize(self):
|
||||
self.tabs[self.active_tab]["panel_layout"].on_viewport_resize()
|
||||
|
||||
class PanelLayoutManager:
|
||||
def __init__(self, data_manager: DataManager, playback_manager, worker_manager, scale: float = 1.0):
|
||||
self.data_manager = data_manager
|
||||
self.playback_manager = playback_manager
|
||||
self.worker_manager = worker_manager
|
||||
self.scale = scale
|
||||
self.active_panels: list = []
|
||||
self.parent_tag = "tab_content_area"
|
||||
self._queue_resize = False
|
||||
self._created_handler_tags: set[str] = set()
|
||||
|
||||
self.grip_size = int(GRIP_SIZE * self.scale)
|
||||
self.min_pane_size = int(MIN_PANE_SIZE * self.scale)
|
||||
|
||||
initial_panel = TimeSeriesPanel(data_manager, playback_manager, worker_manager)
|
||||
self.layout: dict = {"type": "panel", "panel": initial_panel}
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return self._layout_to_dict(self.layout)
|
||||
|
||||
def _layout_to_dict(self, layout: dict) -> dict:
|
||||
if layout["type"] == "panel":
|
||||
return {
|
||||
"type": "panel",
|
||||
"panel": layout["panel"].to_dict()
|
||||
}
|
||||
else: # split
|
||||
return {
|
||||
"type": "split",
|
||||
"orientation": layout["orientation"],
|
||||
"proportions": layout["proportions"],
|
||||
"children": [self._layout_to_dict(child) for child in layout["children"]]
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def load_from_dict(cls, data: dict, data_manager, playback_manager, worker_manager, scale: float = 1.0):
|
||||
manager = cls(data_manager, playback_manager, worker_manager, scale)
|
||||
manager.layout = manager._dict_to_layout(data)
|
||||
return manager
|
||||
|
||||
def _dict_to_layout(self, data: dict) -> dict:
|
||||
if data["type"] == "panel":
|
||||
panel_data = data["panel"]
|
||||
if panel_data["type"] == "timeseries":
|
||||
panel = TimeSeriesPanel.load_from_dict(
|
||||
panel_data, self.data_manager, self.playback_manager, self.worker_manager
|
||||
)
|
||||
return {"type": "panel", "panel": panel}
|
||||
else:
|
||||
# Handle future panel types here or make a general mapping
|
||||
raise ValueError(f"Unknown panel type: {panel_data['type']}")
|
||||
else: # split
|
||||
return {
|
||||
"type": "split",
|
||||
"orientation": data["orientation"],
|
||||
"proportions": data["proportions"],
|
||||
"children": [self._dict_to_layout(child) for child in data["children"]]
|
||||
}
|
||||
|
||||
def create_ui(self):
|
||||
self.active_panels.clear()
|
||||
if dpg.does_item_exist(self.parent_tag):
|
||||
dpg.delete_item(self.parent_tag, children_only=True)
|
||||
self._cleanup_all_handlers()
|
||||
|
||||
container_width, container_height = dpg.get_item_rect_size(self.parent_tag)
|
||||
if container_width == 0 and container_height == 0:
|
||||
self._queue_resize = True
|
||||
self._create_ui_recursive(self.layout, self.parent_tag, [], container_width, container_height)
|
||||
|
||||
def destroy_ui(self):
|
||||
self._cleanup_ui_recursive(self.layout, [])
|
||||
self._cleanup_all_handlers()
|
||||
self.active_panels.clear()
|
||||
|
||||
def _cleanup_all_handlers(self):
|
||||
for handler_tag in list(self._created_handler_tags):
|
||||
if dpg.does_item_exist(handler_tag):
|
||||
dpg.delete_item(handler_tag)
|
||||
self._created_handler_tags.clear()
|
||||
|
||||
def _create_ui_recursive(self, layout: dict, parent_tag: str, path: list[int], width: int, height: int):
|
||||
if layout["type"] == "panel":
|
||||
self._create_panel_ui(layout, parent_tag, path, width, height)
|
||||
else:
|
||||
self._create_split_ui(layout, parent_tag, path, width, height)
|
||||
|
||||
def _create_panel_ui(self, layout: dict, parent_tag: str, path: list[int], width: int, height: int):
|
||||
panel_tag = self._path_to_tag(path, "panel")
|
||||
panel = layout["panel"]
|
||||
self.active_panels.append(panel)
|
||||
text_size = int(13 * self.scale)
|
||||
bar_height = (text_size + 24) if width < int(329 * self.scale + 64) else (text_size + 8) # adjust height to allow for scrollbar
|
||||
|
||||
with dpg.child_window(parent=parent_tag, border=False, width=-1, height=-1, no_scrollbar=True):
|
||||
with dpg.group(horizontal=True):
|
||||
with dpg.child_window(tag=panel_tag, width=-(text_size + 16), height=bar_height, horizontal_scrollbar=True, no_scroll_with_mouse=True, border=False):
|
||||
with dpg.group(horizontal=True):
|
||||
# if you change the widths make sure to change the sum of widths (currently 329 * scale)
|
||||
dpg.add_input_text(default_value=panel.title, width=int(150 * self.scale), callback=lambda s, v: setattr(panel, "title", v))
|
||||
dpg.add_combo(items=["Time Series"], default_value="Time Series", width=int(100 * self.scale))
|
||||
dpg.add_button(label="Clear", callback=lambda: self.clear_panel(panel), width=int(40 * self.scale))
|
||||
dpg.add_image_button(texture_tag="split_h_texture", callback=lambda: self.split_panel(path, 0), width=text_size, height=text_size)
|
||||
dpg.add_image_button(texture_tag="split_v_texture", callback=lambda: self.split_panel(path, 1), width=text_size, height=text_size)
|
||||
dpg.add_image_button(texture_tag="x_texture", callback=lambda: self.delete_panel(path), width=text_size, height=text_size)
|
||||
|
||||
dpg.add_separator()
|
||||
|
||||
content_tag = self._path_to_tag(path, "content")
|
||||
with dpg.child_window(tag=content_tag, border=False, height=-1, width=-1, no_scrollbar=True):
|
||||
panel.create_ui(content_tag)
|
||||
|
||||
def _create_split_ui(self, layout: dict, parent_tag: str, path: list[int], width: int, height: int):
|
||||
split_tag = self._path_to_tag(path, "split")
|
||||
orientation, _, pane_sizes = self._get_split_geometry(layout, (width, height))
|
||||
|
||||
with dpg.group(tag=split_tag, parent=parent_tag, horizontal=orientation == 0):
|
||||
for i, child_layout in enumerate(layout["children"]):
|
||||
child_path = path + [i]
|
||||
container_tag = self._path_to_tag(child_path, "container")
|
||||
pane_width, pane_height = [(pane_sizes[i], -1), (-1, pane_sizes[i])][orientation] # fill 2nd dim up to the border
|
||||
with dpg.child_window(tag=container_tag, width=pane_width, height=pane_height, border=False, no_scrollbar=True):
|
||||
child_width, child_height = [(pane_sizes[i], height), (width, pane_sizes[i])][orientation]
|
||||
self._create_ui_recursive(child_layout, container_tag, child_path, child_width, child_height)
|
||||
if i < len(layout["children"]) - 1:
|
||||
self._create_grip(split_tag, path, i, orientation)
|
||||
|
||||
def clear_panel(self, panel):
|
||||
panel.clear()
|
||||
|
||||
def delete_panel(self, panel_path: list[int]):
|
||||
if not panel_path: # Root deletion
|
||||
old_panel = self.layout["panel"]
|
||||
old_panel.destroy_ui()
|
||||
self.active_panels.remove(old_panel)
|
||||
new_panel = TimeSeriesPanel(self.data_manager, self.playback_manager, self.worker_manager)
|
||||
self.layout = {"type": "panel", "panel": new_panel}
|
||||
self._rebuild_ui_at_path([])
|
||||
return
|
||||
|
||||
parent, child_index = self._get_parent_and_index(panel_path)
|
||||
layout_to_delete = parent["children"][child_index]
|
||||
self._cleanup_ui_recursive(layout_to_delete, panel_path)
|
||||
|
||||
parent["children"].pop(child_index)
|
||||
parent["proportions"].pop(child_index)
|
||||
|
||||
if len(parent["children"]) == 1: # remove parent and collapse
|
||||
remaining_child = parent["children"][0]
|
||||
if len(panel_path) == 1: # parent is at root level - promote remaining child to root
|
||||
self.layout = remaining_child
|
||||
self._rebuild_ui_at_path([])
|
||||
else: # replace parent with remaining child in grandparent
|
||||
grandparent_path = panel_path[:-2]
|
||||
parent_index = panel_path[-2]
|
||||
self._replace_layout_at_path(grandparent_path + [parent_index], remaining_child)
|
||||
self._rebuild_ui_at_path(grandparent_path + [parent_index])
|
||||
else: # redistribute proportions
|
||||
equal_prop = 1.0 / len(parent["children"])
|
||||
parent["proportions"] = [equal_prop] * len(parent["children"])
|
||||
self._rebuild_ui_at_path(panel_path[:-1])
|
||||
|
||||
def split_panel(self, panel_path: list[int], orientation: int):
|
||||
current_layout = self._get_layout_at_path(panel_path)
|
||||
existing_panel = current_layout["panel"]
|
||||
new_panel = TimeSeriesPanel(self.data_manager, self.playback_manager, self.worker_manager)
|
||||
parent, child_index = self._get_parent_and_index(panel_path)
|
||||
|
||||
if parent is None: # Root split
|
||||
self.layout = {
|
||||
"type": "split",
|
||||
"orientation": orientation,
|
||||
"children": [{"type": "panel", "panel": existing_panel}, {"type": "panel", "panel": new_panel}],
|
||||
"proportions": [0.5, 0.5],
|
||||
}
|
||||
self._rebuild_ui_at_path([])
|
||||
elif parent["type"] == "split" and parent["orientation"] == orientation: # Same orientation - insert into existing split
|
||||
parent["children"].insert(child_index + 1, {"type": "panel", "panel": new_panel})
|
||||
parent["proportions"] = [1.0 / len(parent["children"])] * len(parent["children"])
|
||||
self._rebuild_ui_at_path(panel_path[:-1])
|
||||
else: # Different orientation - create new split level
|
||||
new_split = {"type": "split", "orientation": orientation, "children": [current_layout, {"type": "panel", "panel": new_panel}], "proportions": [0.5, 0.5]}
|
||||
self._replace_layout_at_path(panel_path, new_split)
|
||||
self._rebuild_ui_at_path(panel_path)
|
||||
|
||||
def _rebuild_ui_at_path(self, path: list[int]):
|
||||
layout = self._get_layout_at_path(path)
|
||||
if path:
|
||||
container_tag = self._path_to_tag(path, "container")
|
||||
else: # Root update
|
||||
container_tag = self.parent_tag
|
||||
|
||||
self._cleanup_ui_recursive(layout, path)
|
||||
dpg.delete_item(container_tag, children_only=True)
|
||||
width, height = dpg.get_item_rect_size(container_tag)
|
||||
self._create_ui_recursive(layout, container_tag, path, width, height)
|
||||
|
||||
def _cleanup_ui_recursive(self, layout: dict, path: list[int]):
|
||||
if layout["type"] == "panel":
|
||||
panel = layout["panel"]
|
||||
panel.destroy_ui()
|
||||
if panel in self.active_panels:
|
||||
self.active_panels.remove(panel)
|
||||
else:
|
||||
for i in range(len(layout["children"]) - 1):
|
||||
handler_tag = f"{self._path_to_tag(path, f'grip_{i}')}_handler"
|
||||
if dpg.does_item_exist(handler_tag):
|
||||
dpg.delete_item(handler_tag)
|
||||
self._created_handler_tags.discard(handler_tag)
|
||||
|
||||
for i, child in enumerate(layout["children"]):
|
||||
self._cleanup_ui_recursive(child, path + [i])
|
||||
|
||||
def update_all_panels(self):
|
||||
if self._queue_resize:
|
||||
if (size := dpg.get_item_rect_size(self.parent_tag)) != [0, 0]:
|
||||
self._queue_resize = False
|
||||
self._resize_splits_recursive(self.layout, [], *size)
|
||||
for panel in self.active_panels:
|
||||
panel.update()
|
||||
|
||||
def on_viewport_resize(self):
|
||||
self._resize_splits_recursive(self.layout, [])
|
||||
|
||||
def _resize_splits_recursive(self, layout: dict, path: list[int], width: int | None = None, height: int | None = None):
|
||||
if layout["type"] == "split":
|
||||
split_tag = self._path_to_tag(path, "split")
|
||||
if dpg.does_item_exist(split_tag):
|
||||
available_sizes = (width, height) if width and height else dpg.get_item_rect_size(dpg.get_item_parent(split_tag))
|
||||
orientation, _, pane_sizes = self._get_split_geometry(layout, available_sizes)
|
||||
size_properties = ("width", "height")
|
||||
|
||||
for i, child_layout in enumerate(layout["children"]):
|
||||
child_path = path + [i]
|
||||
container_tag = self._path_to_tag(child_path, "container")
|
||||
if dpg.does_item_exist(container_tag):
|
||||
dpg.configure_item(container_tag, **{size_properties[orientation]: pane_sizes[i]})
|
||||
child_width, child_height = [(pane_sizes[i], available_sizes[1]), (available_sizes[0], pane_sizes[i])][orientation]
|
||||
self._resize_splits_recursive(child_layout, child_path, child_width, child_height)
|
||||
else: # leaf node/panel - adjust bar height to allow for scrollbar
|
||||
panel_tag = self._path_to_tag(path, "panel")
|
||||
if width is not None and width < int(329 * self.scale + 64): # scaled widths of the elements in top bar + fixed 8 padding on left and right of each item
|
||||
dpg.configure_item(panel_tag, height=(int(13 * self.scale) + 24))
|
||||
else:
|
||||
dpg.configure_item(panel_tag, height=(int(13 * self.scale) + 8))
|
||||
|
||||
def _get_split_geometry(self, layout: dict, available_size: tuple[int, int]) -> tuple[int, int, list[int]]:
|
||||
orientation = layout["orientation"]
|
||||
num_grips = len(layout["children"]) - 1
|
||||
usable_size = max(self.min_pane_size, available_size[orientation] - (num_grips * (self.grip_size + 8 * (2 - orientation)))) # approximate, scaling is weird
|
||||
pane_sizes = [max(self.min_pane_size, int(usable_size * prop)) for prop in layout["proportions"]]
|
||||
return orientation, usable_size, pane_sizes
|
||||
|
||||
def _get_layout_at_path(self, path: list[int]) -> dict:
|
||||
current = self.layout
|
||||
for index in path:
|
||||
current = current["children"][index]
|
||||
return current
|
||||
|
||||
def _get_parent_and_index(self, path: list[int]) -> tuple:
|
||||
return (None, -1) if not path else (self._get_layout_at_path(path[:-1]), path[-1])
|
||||
|
||||
def _replace_layout_at_path(self, path: list[int], new_layout: dict):
|
||||
if not path:
|
||||
self.layout = new_layout
|
||||
else:
|
||||
parent, index = self._get_parent_and_index(path)
|
||||
parent["children"][index] = new_layout
|
||||
|
||||
def _path_to_tag(self, path: list[int], prefix: str = "") -> str:
|
||||
path_str = "_".join(map(str, path)) if path else "root"
|
||||
return f"{prefix}_{path_str}" if prefix else path_str
|
||||
|
||||
def _create_grip(self, parent_tag: str, path: list[int], grip_index: int, orientation: int):
|
||||
grip_tag = self._path_to_tag(path, f"grip_{grip_index}")
|
||||
handler_tag = f"{grip_tag}_handler"
|
||||
width, height = [(self.grip_size, -1), (-1, self.grip_size)][orientation]
|
||||
|
||||
with dpg.child_window(tag=grip_tag, parent=parent_tag, width=width, height=height, no_scrollbar=True, border=False):
|
||||
button_tag = dpg.add_button(label="", width=-1, height=-1)
|
||||
|
||||
with dpg.item_handler_registry(tag=handler_tag):
|
||||
user_data = (path, grip_index, orientation)
|
||||
dpg.add_item_active_handler(callback=self._on_grip_drag, user_data=user_data)
|
||||
dpg.add_item_deactivated_handler(callback=self._on_grip_end, user_data=user_data)
|
||||
dpg.bind_item_handler_registry(button_tag, handler_tag)
|
||||
self._created_handler_tags.add(handler_tag)
|
||||
|
||||
def _on_grip_drag(self, sender, app_data, user_data):
|
||||
path, grip_index, orientation = user_data
|
||||
layout = self._get_layout_at_path(path)
|
||||
|
||||
if "_drag_data" not in layout:
|
||||
layout["_drag_data"] = {"initial_proportions": layout["proportions"][:], "start_mouse": dpg.get_mouse_pos(local=False)[orientation]}
|
||||
return
|
||||
|
||||
drag_data = layout["_drag_data"]
|
||||
split_tag = self._path_to_tag(path, "split")
|
||||
if not dpg.does_item_exist(split_tag):
|
||||
return
|
||||
|
||||
_, usable_size, _ = self._get_split_geometry(layout, dpg.get_item_rect_size(split_tag))
|
||||
current_coord = dpg.get_mouse_pos(local=False)[orientation]
|
||||
delta = current_coord - drag_data["start_mouse"]
|
||||
delta_prop = delta / usable_size
|
||||
|
||||
left_idx = grip_index
|
||||
right_idx = left_idx + 1
|
||||
initial = drag_data["initial_proportions"]
|
||||
min_prop = self.min_pane_size / usable_size
|
||||
|
||||
new_left = max(min_prop, initial[left_idx] + delta_prop)
|
||||
new_right = max(min_prop, initial[right_idx] - delta_prop)
|
||||
|
||||
total_available = initial[left_idx] + initial[right_idx]
|
||||
if new_left + new_right > total_available:
|
||||
if new_left > new_right:
|
||||
new_left = total_available - new_right
|
||||
else:
|
||||
new_right = total_available - new_left
|
||||
|
||||
layout["proportions"] = initial[:]
|
||||
layout["proportions"][left_idx] = new_left
|
||||
layout["proportions"][right_idx] = new_right
|
||||
|
||||
self._resize_splits_recursive(layout, path)
|
||||
|
||||
def _on_grip_end(self, sender, app_data, user_data):
|
||||
path, _, _ = user_data
|
||||
self._get_layout_at_path(path).pop("_drag_data", None)
|
||||
@@ -1,128 +0,0 @@
|
||||
tabs:
|
||||
'0':
|
||||
name: Lateral Plan Conformance
|
||||
panel_layout:
|
||||
type: split
|
||||
orientation: 1
|
||||
proportions:
|
||||
- 0.3333333333333333
|
||||
- 0.3333333333333333
|
||||
- 0.3333333333333333
|
||||
children:
|
||||
- type: panel
|
||||
panel:
|
||||
type: timeseries
|
||||
title: desired vs actual
|
||||
series_paths:
|
||||
- controlsState/lateralControlState/torqueState/desiredLateralAccel
|
||||
- controlsState/lateralControlState/torqueState/actualLateralAccel
|
||||
- type: panel
|
||||
panel:
|
||||
type: timeseries
|
||||
title: ff vs output
|
||||
series_paths:
|
||||
- controlsState/lateralControlState/torqueState/f
|
||||
- carState/steeringPressed
|
||||
- carControl/actuators/torque
|
||||
- type: panel
|
||||
panel:
|
||||
type: timeseries
|
||||
title: vehicle speed
|
||||
series_paths:
|
||||
- carState/vEgo
|
||||
'1':
|
||||
name: Actuator Performance
|
||||
panel_layout:
|
||||
type: split
|
||||
orientation: 1
|
||||
proportions:
|
||||
- 0.3333333333333333
|
||||
- 0.3333333333333333
|
||||
- 0.3333333333333333
|
||||
children:
|
||||
- type: panel
|
||||
panel:
|
||||
type: timeseries
|
||||
title: calc vs learned latAccelFactor
|
||||
series_paths:
|
||||
- liveTorqueParameters/latAccelFactorFiltered
|
||||
- liveTorqueParameters/latAccelFactorRaw
|
||||
- carParams/lateralTuning/torque/latAccelFactor
|
||||
- type: panel
|
||||
panel:
|
||||
type: timeseries
|
||||
title: learned latAccelOffset
|
||||
series_paths:
|
||||
- liveTorqueParameters/latAccelOffsetRaw
|
||||
- liveTorqueParameters/latAccelOffsetFiltered
|
||||
- type: panel
|
||||
panel:
|
||||
type: timeseries
|
||||
title: calc vs learned friction
|
||||
series_paths:
|
||||
- liveTorqueParameters/frictionCoefficientFiltered
|
||||
- liveTorqueParameters/frictionCoefficientRaw
|
||||
- carParams/lateralTuning/torque/friction
|
||||
'2':
|
||||
name: Vehicle Dynamics
|
||||
panel_layout:
|
||||
type: split
|
||||
orientation: 1
|
||||
proportions:
|
||||
- 0.3333333333333333
|
||||
- 0.3333333333333333
|
||||
- 0.3333333333333333
|
||||
children:
|
||||
- type: panel
|
||||
panel:
|
||||
type: timeseries
|
||||
title: initial vs learned steerRatio
|
||||
series_paths:
|
||||
- carParams/steerRatio
|
||||
- liveParameters/steerRatio
|
||||
- type: panel
|
||||
panel:
|
||||
type: timeseries
|
||||
title: initial vs learned tireStiffnessFactor
|
||||
series_paths:
|
||||
- carParams/tireStiffnessFactor
|
||||
- liveParameters/stiffnessFactor
|
||||
- type: panel
|
||||
panel:
|
||||
type: timeseries
|
||||
title: live steering angle offsets
|
||||
series_paths:
|
||||
- liveParameters/angleOffsetDeg
|
||||
- liveParameters/angleOffsetAverageDeg
|
||||
'3':
|
||||
name: Controller PIF Terms
|
||||
panel_layout:
|
||||
type: split
|
||||
orientation: 1
|
||||
proportions:
|
||||
- 0.3333333333333333
|
||||
- 0.3333333333333333
|
||||
- 0.3333333333333333
|
||||
children:
|
||||
- type: panel
|
||||
panel:
|
||||
type: timeseries
|
||||
title: ff vs output
|
||||
series_paths:
|
||||
- carControl/actuators/torque
|
||||
- controlsState/lateralControlState/torqueState/f
|
||||
- carState/steeringPressed
|
||||
- type: panel
|
||||
panel:
|
||||
type: timeseries
|
||||
title: PIF terms
|
||||
series_paths:
|
||||
- controlsState/lateralControlState/torqueState/f
|
||||
- controlsState/lateralControlState/torqueState/p
|
||||
- controlsState/lateralControlState/torqueState/i
|
||||
- type: panel
|
||||
panel:
|
||||
type: timeseries
|
||||
title: road roll angle
|
||||
series_paths:
|
||||
- liveParameters/roll
|
||||
@@ -1,368 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
import argparse
|
||||
import os
|
||||
import dearpygui.dearpygui as dpg
|
||||
import multiprocessing
|
||||
import uuid
|
||||
import signal
|
||||
import yaml
|
||||
from openpilot.common.swaglog import cloudlog
|
||||
from openpilot.common.basedir import BASEDIR
|
||||
from openpilot.tools.jotpluggler.data import DataManager
|
||||
from openpilot.tools.jotpluggler.datatree import DataTree
|
||||
from openpilot.tools.jotpluggler.layout import LayoutManager
|
||||
|
||||
DEMO_ROUTE = "5beb9b58bd12b691/0000010a--a51155e496"
|
||||
|
||||
|
||||
class WorkerManager:
|
||||
def __init__(self, max_workers=None):
|
||||
self.pool = multiprocessing.Pool(max_workers or min(4, multiprocessing.cpu_count()), initializer=WorkerManager.worker_initializer)
|
||||
self.active_tasks = {}
|
||||
|
||||
def submit_task(self, func, args_list, callback=None, task_id=None):
|
||||
task_id = task_id or str(uuid.uuid4())
|
||||
|
||||
if task_id in self.active_tasks:
|
||||
try:
|
||||
self.active_tasks[task_id].terminate()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def handle_success(result):
|
||||
self.active_tasks.pop(task_id, None)
|
||||
if callback:
|
||||
try:
|
||||
callback(result)
|
||||
except Exception as e:
|
||||
print(f"Callback for task {task_id} failed: {e}")
|
||||
|
||||
def handle_error(error):
|
||||
self.active_tasks.pop(task_id, None)
|
||||
print(f"Task {task_id} failed: {error}")
|
||||
|
||||
async_result = self.pool.starmap_async(func, args_list, callback=handle_success, error_callback=handle_error)
|
||||
self.active_tasks[task_id] = async_result
|
||||
return task_id
|
||||
|
||||
@staticmethod
|
||||
def worker_initializer():
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
def shutdown(self):
|
||||
for task in self.active_tasks.values():
|
||||
try:
|
||||
task.terminate()
|
||||
except Exception:
|
||||
pass
|
||||
self.pool.terminate()
|
||||
self.pool.join()
|
||||
|
||||
|
||||
class PlaybackManager:
|
||||
def __init__(self):
|
||||
self.is_playing = False
|
||||
self.current_time_s = 0.0
|
||||
self.duration_s = 0.0
|
||||
self.num_segments = 0
|
||||
|
||||
self.x_axis_bounds = (0.0, 0.0) # (min_time, max_time)
|
||||
self.x_axis_observers = [] # callbacks for x-axis changes
|
||||
self._updating_x_axis = False
|
||||
|
||||
def set_route_duration(self, duration: float):
|
||||
self.duration_s = duration
|
||||
self.seek(min(self.current_time_s, duration))
|
||||
|
||||
def toggle_play_pause(self):
|
||||
if not self.is_playing and self.current_time_s >= self.duration_s:
|
||||
self.seek(0.0)
|
||||
self.is_playing = not self.is_playing
|
||||
texture_tag = "pause_texture" if self.is_playing else "play_texture"
|
||||
dpg.configure_item("play_pause_button", texture_tag=texture_tag)
|
||||
|
||||
def seek(self, time_s: float):
|
||||
self.current_time_s = max(0.0, min(time_s, self.duration_s))
|
||||
|
||||
def update_time(self, delta_t: float):
|
||||
if self.is_playing:
|
||||
self.current_time_s = min(self.current_time_s + delta_t, self.duration_s)
|
||||
if self.current_time_s >= self.duration_s:
|
||||
self.is_playing = False
|
||||
dpg.configure_item("play_pause_button", texture_tag="play_texture")
|
||||
return self.current_time_s
|
||||
|
||||
def set_x_axis_bounds(self, min_time: float, max_time: float, source_panel=None):
|
||||
if self._updating_x_axis:
|
||||
return
|
||||
|
||||
new_bounds = (min_time, max_time)
|
||||
if new_bounds == self.x_axis_bounds:
|
||||
return
|
||||
|
||||
self.x_axis_bounds = new_bounds
|
||||
self._updating_x_axis = True # prevent recursive updates
|
||||
|
||||
try:
|
||||
for callback in self.x_axis_observers:
|
||||
try:
|
||||
callback(min_time, max_time, source_panel)
|
||||
except Exception as e:
|
||||
print(f"Error in x-axis sync callback: {e}")
|
||||
finally:
|
||||
self._updating_x_axis = False
|
||||
|
||||
def add_x_axis_observer(self, callback):
|
||||
if callback not in self.x_axis_observers:
|
||||
self.x_axis_observers.append(callback)
|
||||
|
||||
def remove_x_axis_observer(self, callback):
|
||||
if callback in self.x_axis_observers:
|
||||
self.x_axis_observers.remove(callback)
|
||||
|
||||
class MainController:
|
||||
def __init__(self, scale: float = 1.0):
|
||||
self.scale = scale
|
||||
self.data_manager = DataManager()
|
||||
self.playback_manager = PlaybackManager()
|
||||
self.worker_manager = WorkerManager()
|
||||
self._create_global_themes()
|
||||
self.data_tree = DataTree(self.data_manager, self.playback_manager)
|
||||
self.layout_manager = LayoutManager(self.data_manager, self.playback_manager, self.worker_manager, scale=self.scale)
|
||||
self.data_manager.add_observer(self.on_data_loaded)
|
||||
self._total_segments = 0
|
||||
|
||||
def _create_global_themes(self):
|
||||
with dpg.theme(tag="line_theme"):
|
||||
with dpg.theme_component(dpg.mvLineSeries):
|
||||
scaled_thickness = max(1.0, self.scale)
|
||||
dpg.add_theme_style(dpg.mvPlotStyleVar_LineWeight, scaled_thickness, category=dpg.mvThemeCat_Plots)
|
||||
|
||||
with dpg.theme(tag="timeline_theme"):
|
||||
with dpg.theme_component(dpg.mvInfLineSeries):
|
||||
scaled_thickness = max(1.0, self.scale)
|
||||
dpg.add_theme_style(dpg.mvPlotStyleVar_LineWeight, scaled_thickness, category=dpg.mvThemeCat_Plots)
|
||||
dpg.add_theme_color(dpg.mvPlotCol_Line, (255, 0, 0, 128), category=dpg.mvThemeCat_Plots)
|
||||
|
||||
for tag, color in (("active_tab_theme", (37, 37, 38, 255)), ("inactive_tab_theme", (70, 70, 75, 255))):
|
||||
with dpg.theme(tag=tag):
|
||||
for cmp, target in ((dpg.mvChildWindow, dpg.mvThemeCol_ChildBg), (dpg.mvInputText, dpg.mvThemeCol_FrameBg), (dpg.mvImageButton, dpg.mvThemeCol_Button)):
|
||||
with dpg.theme_component(cmp):
|
||||
dpg.add_theme_color(target, color)
|
||||
|
||||
with dpg.theme(tag="tab_bar_theme"):
|
||||
with dpg.theme_component(dpg.mvChildWindow):
|
||||
dpg.add_theme_color(dpg.mvThemeCol_ChildBg, (51, 51, 55, 255))
|
||||
|
||||
def on_data_loaded(self, data: dict):
|
||||
duration = data.get('duration', 0.0)
|
||||
self.playback_manager.set_route_duration(duration)
|
||||
|
||||
if data.get('metadata_loaded'):
|
||||
self.playback_manager.num_segments = data.get('total_segments', 0)
|
||||
self._total_segments = data.get('total_segments', 0)
|
||||
dpg.set_value("load_status", f"Loading... 0/{self._total_segments} segments processed")
|
||||
elif data.get('reset'):
|
||||
self.playback_manager.current_time_s = 0.0
|
||||
self.playback_manager.duration_s = 0.0
|
||||
self.playback_manager.is_playing = False
|
||||
self._total_segments = 0
|
||||
dpg.set_value("load_status", "Loading...")
|
||||
dpg.set_value("timeline_slider", 0.0)
|
||||
dpg.configure_item("timeline_slider", max_value=0.0)
|
||||
dpg.configure_item("play_pause_button", texture_tag="play_texture")
|
||||
dpg.configure_item("load_button", enabled=True)
|
||||
elif data.get('loading_complete'):
|
||||
num_paths = len(self.data_manager.get_all_paths())
|
||||
dpg.set_value("load_status", f"Loaded {num_paths} data paths")
|
||||
dpg.configure_item("load_button", enabled=True)
|
||||
elif data.get('segment_added'):
|
||||
segment_count = data.get('segment_count', 0)
|
||||
dpg.set_value("load_status", f"Loading... {segment_count}/{self._total_segments} segments processed")
|
||||
|
||||
dpg.configure_item("timeline_slider", max_value=duration)
|
||||
|
||||
def save_layout_to_yaml(self, filepath: str):
|
||||
layout_dict = self.layout_manager.to_dict()
|
||||
with open(filepath, 'w') as f:
|
||||
yaml.dump(layout_dict, f, default_flow_style=False, sort_keys=False)
|
||||
|
||||
def load_layout_from_yaml(self, filepath: str):
|
||||
with open(filepath) as f:
|
||||
layout_dict = yaml.safe_load(f)
|
||||
self.layout_manager.clear_and_load_from_dict(layout_dict)
|
||||
self.layout_manager.create_ui("main_plot_area")
|
||||
|
||||
def save_layout_dialog(self):
|
||||
if dpg.does_item_exist("save_layout_dialog"):
|
||||
dpg.delete_item("save_layout_dialog")
|
||||
with dpg.file_dialog(
|
||||
callback=self._save_layout_callback, tag="save_layout_dialog", width=int(700 * self.scale), height=int(400 * self.scale),
|
||||
default_filename="layout", default_path=os.path.join(os.path.dirname(os.path.realpath(__file__)), "layouts")
|
||||
):
|
||||
dpg.add_file_extension(".yaml")
|
||||
|
||||
def load_layout_dialog(self):
|
||||
if dpg.does_item_exist("load_layout_dialog"):
|
||||
dpg.delete_item("load_layout_dialog")
|
||||
with dpg.file_dialog(
|
||||
callback=self._load_layout_callback, tag="load_layout_dialog", width=int(700 * self.scale), height=int(400 * self.scale),
|
||||
default_path=os.path.join(os.path.dirname(os.path.realpath(__file__)), "layouts")
|
||||
):
|
||||
dpg.add_file_extension(".yaml")
|
||||
|
||||
def _save_layout_callback(self, sender, app_data):
|
||||
filepath = app_data['file_path_name']
|
||||
try:
|
||||
self.save_layout_to_yaml(filepath)
|
||||
dpg.set_value("load_status", f"Layout saved to {os.path.basename(filepath)}")
|
||||
except Exception:
|
||||
dpg.set_value("load_status", "Error saving layout")
|
||||
cloudlog.exception(f"Error saving layout to {filepath}")
|
||||
dpg.delete_item("save_layout_dialog")
|
||||
|
||||
def _load_layout_callback(self, sender, app_data):
|
||||
filepath = app_data['file_path_name']
|
||||
try:
|
||||
self.load_layout_from_yaml(filepath)
|
||||
dpg.set_value("load_status", f"Layout loaded from {os.path.basename(filepath)}")
|
||||
except Exception:
|
||||
dpg.set_value("load_status", "Error loading layout")
|
||||
cloudlog.exception(f"Error loading layout from {filepath}:")
|
||||
dpg.delete_item("load_layout_dialog")
|
||||
|
||||
def setup_ui(self):
|
||||
with dpg.texture_registry():
|
||||
script_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
for image in ["play", "pause", "x", "split_h", "split_v", "plus"]:
|
||||
texture = dpg.load_image(os.path.join(script_dir, "assets", f"{image}.png"))
|
||||
dpg.add_static_texture(width=texture[0], height=texture[1], default_value=texture[3], tag=f"{image}_texture")
|
||||
|
||||
with dpg.window(tag="Primary Window"):
|
||||
with dpg.group(horizontal=True):
|
||||
# Left panel - Data tree
|
||||
with dpg.child_window(label="Sidebar", width=int(300 * self.scale), tag="sidebar_window", border=True, resizable_x=True):
|
||||
with dpg.group(horizontal=True):
|
||||
dpg.add_input_text(tag="route_input", width=int(-75 * self.scale), hint="Enter route name...")
|
||||
dpg.add_button(label="Load", callback=self.load_route, tag="load_button", width=-1)
|
||||
dpg.add_text("Ready to load route", tag="load_status")
|
||||
dpg.add_separator()
|
||||
|
||||
with dpg.table(header_row=False, policy=dpg.mvTable_SizingStretchProp):
|
||||
dpg.add_table_column(init_width_or_weight=0.5)
|
||||
dpg.add_table_column(init_width_or_weight=0.5)
|
||||
with dpg.table_row():
|
||||
dpg.add_button(label="Save Layout", callback=self.save_layout_dialog, width=-1)
|
||||
dpg.add_button(label="Load Layout", callback=self.load_layout_dialog, width=-1)
|
||||
dpg.add_separator()
|
||||
|
||||
self.data_tree.create_ui("sidebar_window")
|
||||
|
||||
# Right panel - Plots and timeline
|
||||
with dpg.group(tag="right_panel"):
|
||||
with dpg.child_window(label="Plot Window", border=True, height=int(-(32 + 13 * self.scale)), tag="main_plot_area"):
|
||||
self.layout_manager.create_ui("main_plot_area")
|
||||
|
||||
with dpg.child_window(label="Timeline", border=True):
|
||||
with dpg.table(header_row=False):
|
||||
btn_size = int(13 * self.scale)
|
||||
dpg.add_table_column(width_fixed=True, init_width_or_weight=(btn_size + 8)) # Play button
|
||||
dpg.add_table_column(width_stretch=True) # Timeline slider
|
||||
dpg.add_table_column(width_fixed=True, init_width_or_weight=int(50 * self.scale)) # FPS counter
|
||||
with dpg.table_row():
|
||||
dpg.add_image_button(texture_tag="play_texture", tag="play_pause_button", callback=self.toggle_play_pause, width=btn_size, height=btn_size)
|
||||
dpg.add_slider_float(tag="timeline_slider", default_value=0.0, label="", width=-1, callback=self.timeline_drag)
|
||||
dpg.add_text("", tag="fps_counter")
|
||||
with dpg.item_handler_registry(tag="plot_resize_handler"):
|
||||
dpg.add_item_resize_handler(callback=self.on_plot_resize)
|
||||
dpg.bind_item_handler_registry("right_panel", "plot_resize_handler")
|
||||
|
||||
dpg.set_primary_window("Primary Window", True)
|
||||
|
||||
def on_plot_resize(self, sender, app_data, user_data):
|
||||
self.layout_manager.on_viewport_resize()
|
||||
|
||||
def load_route(self):
|
||||
route_name = dpg.get_value("route_input").strip()
|
||||
if route_name:
|
||||
dpg.set_value("load_status", "Loading route...")
|
||||
dpg.configure_item("load_button", enabled=False)
|
||||
self.data_manager.load_route(route_name)
|
||||
|
||||
def toggle_play_pause(self, sender):
|
||||
self.playback_manager.toggle_play_pause()
|
||||
|
||||
def timeline_drag(self, sender, app_data):
|
||||
self.playback_manager.seek(app_data)
|
||||
|
||||
def update_frame(self, font):
|
||||
self.data_tree.update_frame(font)
|
||||
|
||||
new_time = self.playback_manager.update_time(dpg.get_delta_time())
|
||||
if not dpg.is_item_active("timeline_slider"):
|
||||
dpg.set_value("timeline_slider", new_time)
|
||||
|
||||
self.layout_manager.update_all_panels()
|
||||
|
||||
dpg.set_value("fps_counter", f"{dpg.get_frame_rate():.1f} FPS")
|
||||
|
||||
def shutdown(self):
|
||||
self.worker_manager.shutdown()
|
||||
|
||||
|
||||
def main(route_to_load=None, layout_to_load=None):
|
||||
dpg.create_context()
|
||||
|
||||
# TODO: find better way of calculating display scaling
|
||||
#try:
|
||||
# w, h = next(tuple(map(int, l.split()[0].split('x'))) for l in subprocess.check_output(['xrandr']).decode().split('\n') if '*' in l) # actual resolution
|
||||
# scale = pyautogui.size()[0] / w # scaled resolution
|
||||
#except Exception:
|
||||
# scale = 1
|
||||
scale = 1
|
||||
|
||||
with dpg.font_registry():
|
||||
default_font = dpg.add_font(os.path.join(BASEDIR, "selfdrive/assets/fonts/JetBrainsMono-Medium.ttf"), int(13 * scale * 2)) # 2x then scale for hidpi
|
||||
dpg.bind_font(default_font)
|
||||
dpg.set_global_font_scale(0.5)
|
||||
|
||||
viewport_width, viewport_height = int(1200 * scale), int(800 * scale)
|
||||
dpg.create_viewport(
|
||||
title='JotPluggler', width=viewport_width, height=viewport_height,
|
||||
)
|
||||
dpg.setup_dearpygui()
|
||||
|
||||
controller = MainController(scale=scale)
|
||||
controller.setup_ui()
|
||||
|
||||
if layout_to_load:
|
||||
try:
|
||||
controller.load_layout_from_yaml(layout_to_load)
|
||||
print(f"Loaded layout from {layout_to_load}")
|
||||
except Exception as e:
|
||||
print(f"Failed to load layout from {layout_to_load}: {e}")
|
||||
cloudlog.exception(f"Error loading layout from {layout_to_load}")
|
||||
|
||||
if route_to_load:
|
||||
dpg.set_value("route_input", route_to_load)
|
||||
controller.load_route()
|
||||
|
||||
dpg.show_viewport()
|
||||
|
||||
# Main loop
|
||||
try:
|
||||
while dpg.is_dearpygui_running():
|
||||
controller.update_frame(default_font)
|
||||
dpg.render_dearpygui_frame()
|
||||
finally:
|
||||
controller.shutdown()
|
||||
dpg.destroy_context()
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="A tool for visualizing openpilot logs.")
|
||||
parser.add_argument("--demo", action="store_true", help="Use the demo route instead of providing one")
|
||||
parser.add_argument("--layout", type=str, help="Path to YAML layout file to load on startup")
|
||||
parser.add_argument("route", nargs='?', default=None, help="Optional route name to load on startup.")
|
||||
args = parser.parse_args()
|
||||
route = DEMO_ROUTE if args.demo else args.route
|
||||
main(route_to_load=route, layout_to_load=args.layout)
|
||||
@@ -1,294 +0,0 @@
|
||||
import uuid
|
||||
import threading
|
||||
import numpy as np
|
||||
from collections import deque
|
||||
import dearpygui.dearpygui as dpg
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class ViewPanel(ABC):
|
||||
"""Abstract base class for all view panels that can be displayed in a plot container"""
|
||||
|
||||
def __init__(self, panel_id: str | None = None):
|
||||
self.panel_id = panel_id or str(uuid.uuid4())
|
||||
self.title = "Untitled Panel"
|
||||
|
||||
@abstractmethod
|
||||
def clear(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def create_ui(self, parent_tag: str):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def destroy_ui(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_panel_type(self) -> str:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def update(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def to_dict(self) -> dict:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def load_from_dict(cls, data: dict, data_manager, playback_manager, worker_manager):
|
||||
pass
|
||||
|
||||
|
||||
class TimeSeriesPanel(ViewPanel):
|
||||
def __init__(self, data_manager, playback_manager, worker_manager, panel_id: str | None = None):
|
||||
super().__init__(panel_id)
|
||||
self.data_manager = data_manager
|
||||
self.playback_manager = playback_manager
|
||||
self.worker_manager = worker_manager
|
||||
self.title = "Time Series Plot"
|
||||
self.plot_tag = f"plot_{self.panel_id}"
|
||||
self.x_axis_tag = f"{self.plot_tag}_x_axis"
|
||||
self.y_axis_tag = f"{self.plot_tag}_y_axis"
|
||||
self.timeline_indicator_tag = f"{self.plot_tag}_timeline"
|
||||
self._ui_created = False
|
||||
self._series_data: dict[str, tuple[np.ndarray, np.ndarray]] = {}
|
||||
self._last_plot_duration = 0
|
||||
self._update_lock = threading.RLock()
|
||||
self._results_deque: deque[tuple[str, list, list]] = deque()
|
||||
self._new_data = False
|
||||
self._last_x_limits = (0.0, 0.0)
|
||||
self._queued_x_sync: tuple | None = None
|
||||
self._queued_reallow_x_zoom = False
|
||||
self._total_segments = self.playback_manager.num_segments
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"type": "timeseries",
|
||||
"title": self.title,
|
||||
"series_paths": list(self._series_data.keys())
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def load_from_dict(cls, data: dict, data_manager, playback_manager, worker_manager):
|
||||
panel = cls(data_manager, playback_manager, worker_manager)
|
||||
panel.title = data.get("title", "Time Series Plot")
|
||||
panel._series_data = {path: (np.array([]), np.array([])) for path in data.get("series_paths", [])}
|
||||
return panel
|
||||
|
||||
def create_ui(self, parent_tag: str):
|
||||
self.data_manager.add_observer(self.on_data_loaded)
|
||||
self.playback_manager.add_x_axis_observer(self._on_x_axis_sync)
|
||||
with dpg.plot(height=-1, width=-1, tag=self.plot_tag, parent=parent_tag, drop_callback=self._on_series_drop, payload_type="TIMESERIES_PAYLOAD"):
|
||||
dpg.add_plot_legend()
|
||||
dpg.add_plot_axis(dpg.mvXAxis, no_label=True, tag=self.x_axis_tag)
|
||||
dpg.add_plot_axis(dpg.mvYAxis, no_label=True, tag=self.y_axis_tag)
|
||||
timeline_series_tag = dpg.add_inf_line_series(x=[0], label="Timeline", parent=self.y_axis_tag, tag=self.timeline_indicator_tag)
|
||||
dpg.bind_item_theme(timeline_series_tag, "timeline_theme")
|
||||
|
||||
self._new_data = True
|
||||
self._queued_x_sync = self.playback_manager.x_axis_bounds
|
||||
self._ui_created = True
|
||||
|
||||
def update(self):
|
||||
with self._update_lock:
|
||||
if not self._ui_created:
|
||||
return
|
||||
|
||||
if self._queued_x_sync:
|
||||
min_time, max_time = self._queued_x_sync
|
||||
self._queued_x_sync = None
|
||||
dpg.set_axis_limits(self.x_axis_tag, min_time, max_time)
|
||||
self._last_x_limits = (min_time, max_time)
|
||||
self._fit_y_axis(min_time, max_time)
|
||||
self._queued_reallow_x_zoom = True # must wait a frame before allowing user changes so that axis limits take effect
|
||||
return
|
||||
|
||||
if self._queued_reallow_x_zoom:
|
||||
self._queued_reallow_x_zoom = False
|
||||
if tuple(dpg.get_axis_limits(self.x_axis_tag)) == self._last_x_limits:
|
||||
dpg.set_axis_limits_auto(self.x_axis_tag)
|
||||
else:
|
||||
self._queued_x_sync = self._last_x_limits # retry, likely too early
|
||||
return
|
||||
|
||||
if self._new_data: # handle new data in main thread
|
||||
self._new_data = False
|
||||
if self._total_segments > 0:
|
||||
dpg.set_axis_limits_constraints(self.x_axis_tag, -10, self._total_segments * 60 + 10)
|
||||
self._fit_y_axis(*dpg.get_axis_limits(self.x_axis_tag))
|
||||
for series_path in list(self._series_data.keys()):
|
||||
self.add_series(series_path, update=True)
|
||||
|
||||
current_limits = dpg.get_axis_limits(self.x_axis_tag)
|
||||
# downsample if plot zoom changed significantly
|
||||
plot_duration = current_limits[1] - current_limits[0]
|
||||
if plot_duration > self._last_plot_duration * 2 or plot_duration < self._last_plot_duration * 0.5:
|
||||
self._downsample_all_series(plot_duration)
|
||||
# sync x-axis if changed by user
|
||||
if self._last_x_limits != current_limits:
|
||||
self.playback_manager.set_x_axis_bounds(current_limits[0], current_limits[1], source_panel=self)
|
||||
self._last_x_limits = current_limits
|
||||
self._fit_y_axis(current_limits[0], current_limits[1])
|
||||
|
||||
while self._results_deque: # handle downsampled results in main thread
|
||||
results = self._results_deque.popleft()
|
||||
for series_path, downsampled_time, downsampled_values in results:
|
||||
series_tag = f"series_{self.panel_id}_{series_path}"
|
||||
if dpg.does_item_exist(series_tag):
|
||||
dpg.set_value(series_tag, (downsampled_time, downsampled_values.astype(float)))
|
||||
|
||||
# update timeline
|
||||
current_time_s = self.playback_manager.current_time_s
|
||||
dpg.set_value(self.timeline_indicator_tag, [[current_time_s], [0]])
|
||||
|
||||
# update timeseries legend label
|
||||
for series_path, (time_array, value_array) in self._series_data.items():
|
||||
position = np.searchsorted(time_array, current_time_s, side='right') - 1
|
||||
if position >= 0 and (current_time_s - time_array[position]) <= 1.0:
|
||||
value = value_array[position]
|
||||
formatted_value = f"{value:.5f}" if np.issubdtype(type(value), np.floating) else str(value)
|
||||
series_tag = f"series_{self.panel_id}_{series_path}"
|
||||
if dpg.does_item_exist(series_tag):
|
||||
dpg.configure_item(series_tag, label=f"{series_path}: {formatted_value}")
|
||||
|
||||
def _on_x_axis_sync(self, min_time: float, max_time: float, source_panel):
|
||||
with self._update_lock:
|
||||
if source_panel != self:
|
||||
self._queued_x_sync = (min_time, max_time)
|
||||
|
||||
def _fit_y_axis(self, x_min: float, x_max: float):
|
||||
if not self._series_data:
|
||||
dpg.set_axis_limits(self.y_axis_tag, -1, 1)
|
||||
return
|
||||
|
||||
global_min = float('inf')
|
||||
global_max = float('-inf')
|
||||
found_data = False
|
||||
|
||||
for time_array, value_array in self._series_data.values():
|
||||
if len(time_array) == 0:
|
||||
continue
|
||||
start_idx, end_idx = np.searchsorted(time_array, [x_min, x_max])
|
||||
end_idx = min(end_idx, len(time_array) - 1)
|
||||
if start_idx <= end_idx:
|
||||
y_slice = value_array[start_idx:end_idx + 1]
|
||||
series_min, series_max = np.min(y_slice), np.max(y_slice)
|
||||
global_min = min(global_min, series_min)
|
||||
global_max = max(global_max, series_max)
|
||||
found_data = True
|
||||
|
||||
if not found_data:
|
||||
dpg.set_axis_limits(self.y_axis_tag, -1, 1)
|
||||
return
|
||||
|
||||
if global_min == global_max:
|
||||
padding = max(abs(global_min) * 0.1, 1.0)
|
||||
y_min, y_max = global_min - padding, global_max + padding
|
||||
else:
|
||||
range_size = global_max - global_min
|
||||
padding = range_size * 0.1
|
||||
y_min, y_max = global_min - padding, global_max + padding
|
||||
|
||||
dpg.set_axis_limits(self.y_axis_tag, y_min, y_max)
|
||||
|
||||
def _downsample_all_series(self, plot_duration):
|
||||
plot_width = dpg.get_item_rect_size(self.plot_tag)[0]
|
||||
if plot_width <= 0 or plot_duration <= 0:
|
||||
return
|
||||
|
||||
self._last_plot_duration = plot_duration
|
||||
target_points_per_second = plot_width / plot_duration
|
||||
work_items = []
|
||||
for series_path, (time_array, value_array) in self._series_data.items():
|
||||
if len(time_array) == 0:
|
||||
continue
|
||||
series_duration = time_array[-1] - time_array[0] if len(time_array) > 1 else 1
|
||||
points_per_second = len(time_array) / series_duration
|
||||
if points_per_second > target_points_per_second * 2:
|
||||
target_points = max(int(target_points_per_second * series_duration), plot_width)
|
||||
work_items.append((series_path, time_array, value_array, target_points))
|
||||
elif dpg.does_item_exist(f"series_{self.panel_id}_{series_path}"):
|
||||
dpg.set_value(f"series_{self.panel_id}_{series_path}", (time_array, value_array.astype(float)))
|
||||
|
||||
if work_items:
|
||||
self.worker_manager.submit_task(
|
||||
TimeSeriesPanel._downsample_worker, work_items, callback=lambda results: self._results_deque.append(results), task_id=f"downsample_{self.panel_id}"
|
||||
)
|
||||
|
||||
def add_series(self, series_path: str, update: bool = False):
|
||||
with self._update_lock:
|
||||
if update or series_path not in self._series_data:
|
||||
self._series_data[series_path] = self.data_manager.get_timeseries(series_path)
|
||||
|
||||
time_array, value_array = self._series_data[series_path]
|
||||
series_tag = f"series_{self.panel_id}_{series_path}"
|
||||
if dpg.does_item_exist(series_tag):
|
||||
dpg.set_value(series_tag, (time_array, value_array.astype(float)))
|
||||
else:
|
||||
line_series_tag = dpg.add_line_series(x=time_array, y=value_array.astype(float), label=series_path, parent=self.y_axis_tag, tag=series_tag)
|
||||
dpg.bind_item_theme(line_series_tag, "line_theme")
|
||||
self._fit_y_axis(*dpg.get_axis_limits(self.x_axis_tag))
|
||||
plot_duration = dpg.get_axis_limits(self.x_axis_tag)[1] - dpg.get_axis_limits(self.x_axis_tag)[0]
|
||||
self._downsample_all_series(plot_duration)
|
||||
|
||||
def destroy_ui(self):
|
||||
with self._update_lock:
|
||||
self.data_manager.remove_observer(self.on_data_loaded)
|
||||
self.playback_manager.remove_x_axis_observer(self._on_x_axis_sync)
|
||||
if dpg.does_item_exist(self.plot_tag):
|
||||
dpg.delete_item(self.plot_tag)
|
||||
self._ui_created = False
|
||||
|
||||
def get_panel_type(self) -> str:
|
||||
return "timeseries"
|
||||
|
||||
def clear(self):
|
||||
with self._update_lock:
|
||||
for series_path in list(self._series_data.keys()):
|
||||
self.remove_series(series_path)
|
||||
|
||||
def remove_series(self, series_path: str):
|
||||
with self._update_lock:
|
||||
if series_path in self._series_data:
|
||||
if dpg.does_item_exist(f"series_{self.panel_id}_{series_path}"):
|
||||
dpg.delete_item(f"series_{self.panel_id}_{series_path}")
|
||||
del self._series_data[series_path]
|
||||
|
||||
def on_data_loaded(self, data: dict):
|
||||
with self._update_lock:
|
||||
self._new_data = True
|
||||
if data.get('metadata_loaded'):
|
||||
self._total_segments = data.get('total_segments', 0)
|
||||
limits = (-10, self._total_segments * 60 + 10)
|
||||
self._queued_x_sync = limits
|
||||
|
||||
def _on_series_drop(self, sender, app_data, user_data):
|
||||
self.add_series(app_data)
|
||||
|
||||
@staticmethod
|
||||
def _downsample_worker(series_path, time_array, value_array, target_points):
|
||||
if len(time_array) <= target_points:
|
||||
return series_path, time_array, value_array
|
||||
|
||||
step = len(time_array) / target_points
|
||||
indices = []
|
||||
|
||||
for i in range(target_points):
|
||||
start_idx = int(i * step)
|
||||
end_idx = int((i + 1) * step)
|
||||
if start_idx == end_idx:
|
||||
indices.append(start_idx)
|
||||
else:
|
||||
bucket_values = value_array[start_idx:end_idx]
|
||||
min_idx = start_idx + np.argmin(bucket_values)
|
||||
max_idx = start_idx + np.argmax(bucket_values)
|
||||
if min_idx != max_idx:
|
||||
indices.extend([min(min_idx, max_idx), max(min_idx, max_idx)])
|
||||
else:
|
||||
indices.append(min_idx)
|
||||
indices = sorted(set(indices))
|
||||
return series_path, time_array[indices], value_array[indices]
|
||||
Reference in New Issue
Block a user