2022-01-10 22:21:48 +08:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
import os
|
|
|
|
import zmq
|
|
|
|
import time
|
|
|
|
from pathlib import Path
|
2022-03-28 23:00:09 +08:00
|
|
|
from collections import defaultdict
|
2024-05-21 08:43:54 +08:00
|
|
|
from datetime import datetime, UTC
|
2024-02-25 08:41:23 +08:00
|
|
|
from typing import NoReturn
|
2022-01-22 07:11:16 +08:00
|
|
|
|
2023-08-21 11:49:55 +08:00
|
|
|
from openpilot.common.params import Params
|
2022-01-10 22:21:48 +08:00
|
|
|
from cereal.messaging import SubMaster
|
2023-12-01 01:55:48 +08:00
|
|
|
from openpilot.system.hardware.hw import Paths
|
2023-12-07 09:27:51 +08:00
|
|
|
from openpilot.common.swaglog import cloudlog
|
2023-08-21 11:49:55 +08:00
|
|
|
from openpilot.system.hardware import HARDWARE
|
|
|
|
from openpilot.common.file_helpers import atomic_write_in_dir
|
2024-03-22 01:15:29 +08:00
|
|
|
from openpilot.system.version import get_build_metadata
|
2023-12-01 01:55:48 +08:00
|
|
|
from openpilot.system.loggerd.config import STATS_DIR_FILE_LIMIT, STATS_SOCKET, STATS_FLUSH_TIME_S
|
2022-01-10 22:21:48 +08:00
|
|
|
|
|
|
|
|
|
|
|
class METRIC_TYPE:
|
|
|
|
GAUGE = 'g'
|
2022-03-28 23:00:09 +08:00
|
|
|
SAMPLE = 'sa'
|
2022-01-10 22:21:48 +08:00
|
|
|
|
|
|
|
class StatLog:
|
|
|
|
def __init__(self):
|
|
|
|
self.pid = None
|
2023-08-14 08:22:56 +08:00
|
|
|
self.zctx = None
|
|
|
|
self.sock = None
|
2022-01-10 22:21:48 +08:00
|
|
|
|
2022-01-22 07:11:16 +08:00
|
|
|
def connect(self) -> None:
|
2022-01-10 22:21:48 +08:00
|
|
|
self.zctx = zmq.Context()
|
|
|
|
self.sock = self.zctx.socket(zmq.PUSH)
|
|
|
|
self.sock.setsockopt(zmq.LINGER, 10)
|
|
|
|
self.sock.connect(STATS_SOCKET)
|
|
|
|
self.pid = os.getpid()
|
|
|
|
|
2023-08-14 08:22:56 +08:00
|
|
|
def __del__(self):
|
|
|
|
if self.sock is not None:
|
|
|
|
self.sock.close()
|
|
|
|
if self.zctx is not None:
|
|
|
|
self.zctx.term()
|
|
|
|
|
2022-01-22 07:11:16 +08:00
|
|
|
def _send(self, metric: str) -> None:
|
2022-01-10 22:21:48 +08:00
|
|
|
if os.getpid() != self.pid:
|
|
|
|
self.connect()
|
|
|
|
|
|
|
|
try:
|
|
|
|
self.sock.send_string(metric, zmq.NOBLOCK)
|
|
|
|
except zmq.error.Again:
|
|
|
|
# drop :/
|
|
|
|
pass
|
|
|
|
|
2022-01-22 07:11:16 +08:00
|
|
|
def gauge(self, name: str, value: float) -> None:
|
2022-01-10 22:21:48 +08:00
|
|
|
self._send(f"{name}:{value}|{METRIC_TYPE.GAUGE}")
|
|
|
|
|
2022-03-28 23:00:09 +08:00
|
|
|
# Samples will be recorded in a buffer and at aggregation time,
|
|
|
|
# statistical properties will be logged (mean, count, percentiles, ...)
|
|
|
|
def sample(self, name: str, value: float):
|
|
|
|
self._send(f"{name}:{value}|{METRIC_TYPE.SAMPLE}")
|
|
|
|
|
2022-01-10 22:21:48 +08:00
|
|
|
|
2022-01-22 07:11:16 +08:00
|
|
|
def main() -> NoReturn:
|
2022-01-22 00:10:43 +08:00
|
|
|
dongle_id = Params().get("DongleId", encoding='utf-8')
|
2024-02-25 08:41:23 +08:00
|
|
|
def get_influxdb_line(measurement: str, value: float | dict[str, float], timestamp: datetime, tags: dict) -> str:
|
2022-01-10 22:21:48 +08:00
|
|
|
res = f"{measurement}"
|
2022-01-12 19:42:50 +08:00
|
|
|
for k, v in tags.items():
|
|
|
|
res += f",{k}={str(v)}"
|
2022-03-28 23:00:09 +08:00
|
|
|
res += " "
|
|
|
|
|
|
|
|
if isinstance(value, float):
|
|
|
|
value = {'value': value}
|
|
|
|
|
|
|
|
for k, v in value.items():
|
|
|
|
res += f"{k}={v},"
|
|
|
|
|
|
|
|
res += f"dongle_id=\"{dongle_id}\" {int(timestamp.timestamp() * 1e9)}\n"
|
2022-01-10 22:21:48 +08:00
|
|
|
return res
|
|
|
|
|
|
|
|
# open statistics socket
|
2023-08-14 08:22:56 +08:00
|
|
|
ctx = zmq.Context.instance()
|
2022-01-10 22:21:48 +08:00
|
|
|
sock = ctx.socket(zmq.PULL)
|
|
|
|
sock.bind(STATS_SOCKET)
|
|
|
|
|
2023-12-01 01:55:48 +08:00
|
|
|
STATS_DIR = Paths.stats_root()
|
|
|
|
|
2022-01-10 22:21:48 +08:00
|
|
|
# initialize stats directory
|
|
|
|
Path(STATS_DIR).mkdir(parents=True, exist_ok=True)
|
|
|
|
|
2024-03-22 01:15:29 +08:00
|
|
|
build_metadata = get_build_metadata()
|
|
|
|
|
2022-01-10 22:21:48 +08:00
|
|
|
# initialize tags
|
|
|
|
tags = {
|
|
|
|
'started': False,
|
2024-03-22 01:15:29 +08:00
|
|
|
'version': build_metadata.openpilot.version,
|
|
|
|
'branch': build_metadata.channel,
|
|
|
|
'dirty': build_metadata.openpilot.is_dirty,
|
|
|
|
'origin': build_metadata.openpilot.git_normalized_origin,
|
2022-01-10 22:21:48 +08:00
|
|
|
'deviceType': HARDWARE.get_device_type(),
|
|
|
|
}
|
|
|
|
|
|
|
|
# subscribe to deviceState for started state
|
|
|
|
sm = SubMaster(['deviceState'])
|
|
|
|
|
2022-05-19 21:36:07 +08:00
|
|
|
idx = 0
|
2022-01-10 22:21:48 +08:00
|
|
|
last_flush_time = time.monotonic()
|
|
|
|
gauges = {}
|
2024-02-25 08:41:23 +08:00
|
|
|
samples: dict[str, list[float]] = defaultdict(list)
|
2023-08-14 08:22:56 +08:00
|
|
|
try:
|
2022-01-11 21:24:18 +08:00
|
|
|
while True:
|
2023-08-14 08:22:56 +08:00
|
|
|
started_prev = sm['deviceState'].started
|
|
|
|
sm.update()
|
|
|
|
|
|
|
|
# Update metrics
|
|
|
|
while True:
|
2022-01-11 21:24:18 +08:00
|
|
|
try:
|
2023-08-14 08:22:56 +08:00
|
|
|
metric = sock.recv_string(zmq.NOBLOCK)
|
|
|
|
try:
|
|
|
|
metric_type = metric.split('|')[1]
|
|
|
|
metric_name = metric.split(':')[0]
|
|
|
|
metric_value = float(metric.split('|')[0].split(':')[1])
|
|
|
|
|
|
|
|
if metric_type == METRIC_TYPE.GAUGE:
|
|
|
|
gauges[metric_name] = metric_value
|
|
|
|
elif metric_type == METRIC_TYPE.SAMPLE:
|
|
|
|
samples[metric_name].append(metric_value)
|
|
|
|
else:
|
|
|
|
cloudlog.event("unknown metric type", metric_type=metric_type)
|
|
|
|
except Exception:
|
|
|
|
cloudlog.event("malformed metric", metric=metric)
|
|
|
|
except zmq.error.Again:
|
|
|
|
break
|
|
|
|
|
|
|
|
# flush when started state changes or after FLUSH_TIME_S
|
|
|
|
if (time.monotonic() > last_flush_time + STATS_FLUSH_TIME_S) or (sm['deviceState'].started != started_prev):
|
|
|
|
result = ""
|
2024-06-12 04:45:15 +08:00
|
|
|
current_time = datetime.now(UTC)
|
2023-08-14 08:22:56 +08:00
|
|
|
tags['started'] = sm['deviceState'].started
|
|
|
|
|
|
|
|
for key, value in gauges.items():
|
|
|
|
result += get_influxdb_line(f"gauge.{key}", value, current_time, tags)
|
|
|
|
|
|
|
|
for key, values in samples.items():
|
|
|
|
values.sort()
|
|
|
|
sample_count = len(values)
|
|
|
|
sample_sum = sum(values)
|
|
|
|
|
|
|
|
stats = {
|
|
|
|
'count': sample_count,
|
|
|
|
'min': values[0],
|
|
|
|
'max': values[-1],
|
|
|
|
'mean': sample_sum / sample_count,
|
|
|
|
}
|
|
|
|
for percentile in [0.05, 0.5, 0.95]:
|
|
|
|
value = values[int(round(percentile * (sample_count - 1)))]
|
|
|
|
stats[f"p{int(percentile * 100)}"] = value
|
|
|
|
|
|
|
|
result += get_influxdb_line(f"sample.{key}", stats, current_time, tags)
|
|
|
|
|
|
|
|
# clear intermediate data
|
|
|
|
gauges.clear()
|
|
|
|
samples.clear()
|
|
|
|
last_flush_time = time.monotonic()
|
|
|
|
|
|
|
|
# check that we aren't filling up the drive
|
|
|
|
if len(os.listdir(STATS_DIR)) < STATS_DIR_FILE_LIMIT:
|
|
|
|
if len(result) > 0:
|
|
|
|
stats_path = os.path.join(STATS_DIR, f"{current_time.timestamp():.0f}_{idx}")
|
|
|
|
with atomic_write_in_dir(stats_path) as f:
|
|
|
|
f.write(result)
|
|
|
|
idx += 1
|
|
|
|
else:
|
|
|
|
cloudlog.error("stats dir full")
|
|
|
|
finally:
|
|
|
|
sock.close()
|
|
|
|
ctx.term()
|
2022-01-10 22:21:48 +08:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
main()
|
|
|
|
else:
|
|
|
|
statlog = StatLog()
|