common folder

old-commit-hash: e8d888c45b
This commit is contained in:
George Hotz 2020-01-17 10:28:44 -08:00
parent cc60fedeba
commit b1f13418e1
57 changed files with 3840 additions and 0 deletions

1
common/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
*.cpp

6
common/SConscript Normal file
View File

@ -0,0 +1,6 @@
Import('env')
# parser
env.Command(['common_pyx.so'],
['common_pyx_setup.py', 'clock.pyx'],
"cd common && python3 common_pyx_setup.py build_ext --inplace")

0
common/__init__.py Normal file
View File

89
common/android.py Normal file
View File

@ -0,0 +1,89 @@
import os
import binascii
import itertools
import re
import struct
import subprocess
ANDROID = os.path.isfile('/EON')
def getprop(key):
if not ANDROID:
return ""
return subprocess.check_output(["getprop", key], encoding='utf8').strip()
def get_imei(slot):
slot = str(slot)
if slot not in ("0", "1"):
raise ValueError("SIM slot must be 0 or 1")
ret = parse_service_call_string(service_call(["iphonesubinfo", "3" ,"i32", str(slot)]))
if not ret:
ret = "000000000000000"
return ret
def get_serial():
ret = getprop("ro.serialno")
if ret == "":
ret = "cccccccc"
return ret
def get_subscriber_info():
ret = parse_service_call_string(service_call(["iphonesubinfo", "7"]))
if ret is None or len(ret) < 8:
return ""
return ret
def reboot(reason=None):
if reason is None:
reason_args = ["null"]
else:
reason_args = ["s16", reason]
subprocess.check_output([
"service", "call", "power", "16", # IPowerManager.reboot
"i32", "0", # no confirmation,
*reason_args,
"i32", "1" # wait
])
def service_call(call):
if not ANDROID:
return None
ret = subprocess.check_output(["service", "call", *call], encoding='utf8').strip()
if 'Parcel' not in ret:
return None
return parse_service_call_bytes(ret)
def parse_service_call_unpack(r, fmt):
try:
return struct.unpack(fmt, r)[0]
except Exception:
return None
def parse_service_call_string(r):
try:
r = r[8:] # Cut off length field
r = r.decode('utf_16_be')
# All pairs of two characters seem to be swapped. Not sure why
result = ""
for a, b, in itertools.zip_longest(r[::2], r[1::2], fillvalue='\x00'):
result += b + a
result = result.replace('\x00', '')
return result
except Exception:
return None
def parse_service_call_bytes(ret):
try:
r = b""
for hex_part in re.findall(r'[ (]([0-9a-f]{8})', ret):
r += binascii.unhexlify(hex_part)
return r
except Exception:
return None

42
common/api/__init__.py Normal file
View File

@ -0,0 +1,42 @@
import jwt
import requests
from datetime import datetime, timedelta
from selfdrive.version import version
class Api():
def __init__(self, dongle_id):
self.dongle_id = dongle_id
with open('/persist/comma/id_rsa') as f:
self.private_key = f.read()
def get(self, *args, **kwargs):
return self.request('GET', *args, **kwargs)
def post(self, *args, **kwargs):
return self.request('POST', *args, **kwargs)
def request(self, method, endpoint, timeout=None, access_token=None, **params):
return api_get(endpoint, method=method, timeout=timeout, access_token=access_token, **params)
def get_token(self):
now = datetime.utcnow()
payload = {
'identity': self.dongle_id,
'nbf': now,
'iat': now,
'exp': now + timedelta(hours=1)
}
return jwt.encode(payload, self.private_key, algorithm='RS256').decode('utf8')
def api_get(endpoint, method='GET', timeout=None, access_token=None, **params):
backend = "https://api.commadotai.com/"
headers = {}
if access_token is not None:
headers['Authorization'] = "JWT "+access_token
headers['User-Agent'] = "openpilot-" + version
return requests.request(method, backend+endpoint, timeout=timeout, headers = headers, params=params)

98
common/apk.py Normal file
View File

@ -0,0 +1,98 @@
import os
import subprocess
import glob
import hashlib
import shutil
from common.basedir import BASEDIR
from selfdrive.swaglog import cloudlog
android_packages = ("ai.comma.plus.offroad", "ai.comma.plus.frame")
def get_installed_apks():
dat = subprocess.check_output(["pm", "list", "packages", "-f"], encoding='utf8').strip().split("\n")
ret = {}
for x in dat:
if x.startswith("package:"):
v,k = x.split("package:")[1].split("=")
ret[k] = v
return ret
def install_apk(path):
# can only install from world readable path
install_path = "/sdcard/%s" % os.path.basename(path)
shutil.copyfile(path, install_path)
ret = subprocess.call(["pm", "install", "-r", install_path])
os.remove(install_path)
return ret == 0
def start_frame():
set_package_permissions()
system("am start -n ai.comma.plus.frame/.MainActivity")
def set_package_permissions():
pm_grant("ai.comma.plus.offroad", "android.permission.ACCESS_FINE_LOCATION")
appops_set("ai.comma.plus.offroad", "SU", "allow")
appops_set("ai.comma.plus.offroad", "WIFI_SCAN", "allow")
appops_set("ai.comma.plus.offroad", "READ_EXTERNAL_STORAGE", "allow")
appops_set("ai.comma.plus.offroad", "WRITE_EXTERNAL_STORAGE", "allow")
def appops_set(package, op, mode):
system(f"LD_LIBRARY_PATH= appops set {package} {op} {mode}")
def pm_grant(package, permission):
system(f"pm grant {package} {permission}")
def system(cmd):
try:
cloudlog.info("running %s" % cmd)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
cloudlog.event("running failed",
cmd=e.cmd,
output=e.output[-1024:],
returncode=e.returncode)
# *** external functions ***
def update_apks():
# install apks
installed = get_installed_apks()
install_apks = glob.glob(os.path.join(BASEDIR, "apk/*.apk"))
for apk in install_apks:
app = os.path.basename(apk)[:-4]
if app not in installed:
installed[app] = None
cloudlog.info("installed apks %s" % (str(installed), ))
for app in installed.keys():
apk_path = os.path.join(BASEDIR, "apk/"+app+".apk")
if not os.path.exists(apk_path):
continue
h1 = hashlib.sha1(open(apk_path, 'rb').read()).hexdigest()
h2 = None
if installed[app] is not None:
h2 = hashlib.sha1(open(installed[app], 'rb').read()).hexdigest()
cloudlog.info("comparing version of %s %s vs %s" % (app, h1, h2))
if h2 is None or h1 != h2:
cloudlog.info("installing %s" % app)
success = install_apk(apk_path)
if not success:
cloudlog.info("needing to uninstall %s" % app)
system("pm uninstall %s" % app)
success = install_apk(apk_path)
assert success
def pm_apply_packages(cmd):
for p in android_packages:
system("pm %s %s" % (cmd, p))
if __name__ == "__main__":
update_apks()

4
common/basedir.py Normal file
View File

@ -0,0 +1,4 @@
import os
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../"))

16
common/clock.pyx Normal file
View File

@ -0,0 +1,16 @@
from posix.time cimport clock_gettime, timespec, CLOCK_BOOTTIME, CLOCK_MONOTONIC_RAW
cdef double readclock(int clock_id):
cdef timespec ts
cdef double current
clock_gettime(clock_id, &ts)
current = ts.tv_sec + (ts.tv_nsec / 1000000000.)
return current
def monotonic_time():
return readclock(CLOCK_MONOTONIC_RAW)
def sec_since_boot():
return readclock(CLOCK_BOOTTIME)

150
common/column_store.py Normal file
View File

@ -0,0 +1,150 @@
import os
import numpy as np
import collections
from contextlib import closing
from common.file_helpers import mkdirs_exists_ok
class ColumnStoreReader():
def __init__(self, path, mmap=False, allow_pickle=False, direct_io=False):
if not (path and os.path.isdir(path)):
raise ValueError("Not a column store: {}".format(path))
self._path = os.path.realpath(path)
self._keys = os.listdir(self._path)
self._mmap = mmap
self._allow_pickle = allow_pickle
self._direct_io = direct_io
@property
def path(self):
return self._path
def close(self):
pass
def get(self, key):
try:
return self[key]
except KeyError:
return None
def keys(self):
return list(self._keys)
def iteritems(self):
for k in self:
yield (k, self[k])
def itervalues(self):
for k in self:
yield self[k]
def get_npy_path(self, key):
"""Gets a filesystem path for an npy file containing the specified array,
or none if the column store does not contain key.
"""
if key in self:
return os.path.join(self._path, key)
else:
return None
def __getitem__(self, key):
try:
path = os.path.join(self._path, key)
# TODO(mgraczyk): This implementation will need to change for zip.
if os.path.isdir(path):
return ColumnStoreReader(path)
else:
if self._mmap:
# note that direct i/o does nothing for mmap since file read/write interface is not used
ret = np.load(path, mmap_mode='r', allow_pickle=self._allow_pickle, fix_imports=False)
else:
if self._direct_io:
opener = lambda path, flags: os.open(path, os.O_RDONLY | os.O_DIRECT)
with open(path, 'rb', buffering=0, opener=opener) as f:
ret = np.load(f, allow_pickle=self._allow_pickle, fix_imports=False)
else:
ret = np.load(path, allow_pickle=self._allow_pickle, fix_imports=False)
if type(ret) == np.lib.npyio.NpzFile:
# if it's saved as compressed, it has arr_0 only in the file. deref this
return ret['arr_0']
else:
return ret
except IOError:
raise KeyError(key)
def __contains__(self, item):
try:
self[item]
return True
except KeyError:
return False
def __len__(self):
return len(self._keys)
def __bool__(self):
return bool(self._keys)
def __iter__(self):
return iter(self._keys)
def __str__(self):
return "ColumnStoreReader({})".format(str({k: "..." for k in self._keys}))
def __enter__(self): return self
def __exit__(self, type, value, traceback): self.close()
class ColumnStoreWriter():
def __init__(self, path, allow_pickle=False):
self._path = path
self._allow_pickle = allow_pickle
mkdirs_exists_ok(self._path)
def map_column(self, path, dtype, shape):
npy_path = os.path.join(self._path, path)
mkdirs_exists_ok(os.path.dirname(npy_path))
return np.lib.format.open_memmap(npy_path, mode='w+', dtype=dtype, shape=shape)
def add_column(self, path, data, dtype=None, compression=False, overwrite=False):
npy_path = os.path.join(self._path, path)
mkdirs_exists_ok(os.path.dirname(npy_path))
if overwrite:
f = open(npy_path, "wb")
else:
f = os.fdopen(os.open(npy_path, os.O_WRONLY | os.O_CREAT | os.O_EXCL), "wb")
with closing(f) as f:
data2 = np.array(data, copy=False, dtype=dtype)
if compression:
np.savez_compressed(f, data2)
else:
np.save(f, data2, allow_pickle=self._allow_pickle, fix_imports=False)
def add_group(self, group_name):
# TODO(mgraczyk): This implementation will need to change if we add zip or compression.
return ColumnStoreWriter(os.path.join(self._path, group_name))
def close(self):
pass
def __enter__(self): return self
def __exit__(self, type, value, traceback): self.close()
def _save_dict_as_column_store(values, writer):
for k, v in values.items():
if isinstance(v, collections.Mapping):
_save_dict_as_column_store(v, writer.add_group(k))
else:
writer.add_column(k, v)
def save_dict_as_column_store(values, output_path):
with ColumnStoreWriter(output_path) as writer:
_save_dict_as_column_store(values, writer)

View File

@ -0,0 +1,20 @@
from distutils.core import Extension, setup # pylint: disable=import-error,no-name-in-module
from Cython.Build import cythonize
from common.cython_hacks import BuildExtWithoutPlatformSuffix
sourcefiles = ['clock.pyx']
extra_compile_args = ["-std=c++11"]
setup(name='Common',
cmdclass={'build_ext': BuildExtWithoutPlatformSuffix},
ext_modules=cythonize(
Extension(
"common_pyx",
language="c++",
sources=sourcefiles,
extra_compile_args=extra_compile_args,
)
),
nthreads=4,
)

3
common/compat.py Normal file
View File

@ -0,0 +1,3 @@
# py2,3 compatiblity helpers
basestring = (str, bytes)

23
common/cython_hacks.py Normal file
View File

@ -0,0 +1,23 @@
import os
import sysconfig
from Cython.Distutils import build_ext
def get_ext_filename_without_platform_suffix(filename):
name, ext = os.path.splitext(filename)
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix == ext:
return filename
ext_suffix = ext_suffix.replace(ext, '')
idx = name.find(ext_suffix)
if idx == -1:
return filename
else:
return name[:idx] + ext
class BuildExtWithoutPlatformSuffix(build_ext):
def get_ext_filename(self, ext_name):
filename = super().get_ext_filename(ext_name)
return get_ext_filename_without_platform_suffix(filename)

49
common/ffi_wrapper.py Normal file
View File

@ -0,0 +1,49 @@
import os
import sys
import fcntl
import hashlib
from cffi import FFI
def ffi_wrap(name, c_code, c_header, tmpdir="/tmp/ccache", cflags="", libraries=None):
if libraries is None:
libraries = []
cache = name + "_" + hashlib.sha1(c_code.encode('utf-8')).hexdigest()
try:
os.mkdir(tmpdir)
except OSError:
pass
fd = os.open(tmpdir, 0)
fcntl.flock(fd, fcntl.LOCK_EX)
try:
sys.path.append(tmpdir)
try:
mod = __import__(cache)
except Exception:
print("cache miss {0}".format(cache))
compile_code(cache, c_code, c_header, tmpdir, cflags, libraries)
mod = __import__(cache)
finally:
os.close(fd)
return mod.ffi, mod.lib
def compile_code(name, c_code, c_header, directory, cflags="", libraries=None):
if libraries is None:
libraries = []
ffibuilder = FFI()
ffibuilder.set_source(name, c_code, source_extension='.cpp', libraries=libraries)
ffibuilder.cdef(c_header)
os.environ['OPT'] = "-fwrapv -O2 -DNDEBUG -std=c++11"
os.environ['CFLAGS'] = cflags
ffibuilder.compile(verbose=True, debug=False, tmpdir=directory)
def wrap_compiled(name, directory):
sys.path.append(directory)
mod = __import__(name)
return mod.ffi, mod.lib

109
common/file_helpers.py Normal file
View File

@ -0,0 +1,109 @@
import os
import shutil
import tempfile
from atomicwrites import AtomicWriter
def mkdirs_exists_ok(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def rm_not_exists_ok(path):
try:
os.remove(path)
except OSError:
if os.path.exists(path):
raise
def rm_tree_or_link(path):
if os.path.islink(path):
os.unlink(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def get_tmpdir_on_same_filesystem(path):
normpath = os.path.normpath(path)
parts = normpath.split("/")
if len(parts) > 1 and parts[1] == "scratch":
return "/scratch/tmp"
elif len(parts) > 2 and parts[2] == "runner":
return "/{}/runner/tmp".format(parts[1])
return "/tmp"
class AutoMoveTempdir():
def __init__(self, target_path, temp_dir=None):
self._target_path = target_path
self._path = tempfile.mkdtemp(dir=temp_dir)
@property
def name(self):
return self._path
def close(self):
os.rename(self._path, self._target_path)
def __enter__(self): return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
shutil.rmtree(self._path)
class NamedTemporaryDir():
def __init__(self, temp_dir=None):
self._path = tempfile.mkdtemp(dir=temp_dir)
@property
def name(self):
return self._path
def close(self):
shutil.rmtree(self._path)
def __enter__(self): return self
def __exit__(self, type, value, traceback):
self.close()
def _get_fileobject_func(writer, temp_dir):
def _get_fileobject():
file_obj = writer.get_fileobject(dir=temp_dir)
os.chmod(file_obj.name, 0o644)
return file_obj
return _get_fileobject
def atomic_write_on_fs_tmp(path, **kwargs):
"""Creates an atomic writer using a temporary file in a temporary directory
on the same filesystem as path.
"""
# TODO(mgraczyk): This use of AtomicWriter relies on implementation details to set the temp
# directory.
writer = AtomicWriter(path, **kwargs)
return writer._open(_get_fileobject_func(writer, get_tmpdir_on_same_filesystem(path)))
def atomic_write_in_dir(path, **kwargs):
"""Creates an atomic writer using a temporary file in the same directory
as the destination file.
"""
writer = AtomicWriter(path, **kwargs)
return writer._open(_get_fileobject_func(writer, os.path.dirname(path)))
def atomic_write_in_dir_neos(path, contents, mode=None):
"""
Atomically writes contents to path using a temporary file in the same directory
as path. Useful on NEOS, where `os.link` (required by atomic_write_in_dir) is missing.
"""
f = tempfile.NamedTemporaryFile(delete=False, prefix=".tmp", dir=os.path.dirname(path))
f.write(contents)
f.flush()
if mode is not None:
os.fchmod(f.fileno(), mode)
os.fsync(f.fileno())
f.close()
os.rename(f.name, path)

10
common/filter_simple.py Normal file
View File

@ -0,0 +1,10 @@
class FirstOrderFilter():
# first order filter
def __init__(self, x0, ts, dt):
self.k = (dt / ts) / (1. + dt / ts)
self.x = x0
def update(self, x):
self.x = (1. - self.k) * self.x + self.k * x

1
common/kalman/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
simple_kalman_impl.c

6
common/kalman/SConscript Normal file
View File

@ -0,0 +1,6 @@
Import('env')
env.Command(['simple_kalman_impl.so'],
['simple_kalman_impl.pyx', 'simple_kalman_impl.pxd', 'simple_kalman_setup.py'],
"cd common/kalman && python3 simple_kalman_setup.py build_ext --inplace")

View File

View File

@ -0,0 +1,3 @@
# pylint: skip-file
from common.kalman.simple_kalman_impl import KF1D as KF1D
assert KF1D

View File

@ -0,0 +1,16 @@
cdef class KF1D:
cdef public:
double x0_0
double x1_0
double K0_0
double K1_0
double A0_0
double A0_1
double A1_0
double A1_1
double C0_0
double C0_1
double A_K_0
double A_K_1
double A_K_2
double A_K_3

View File

@ -0,0 +1,36 @@
# cython: language_level=3
cdef class KF1D:
def __init__(self, x0, A, C, K):
self.x0_0 = x0[0][0]
self.x1_0 = x0[1][0]
self.A0_0 = A[0][0]
self.A0_1 = A[0][1]
self.A1_0 = A[1][0]
self.A1_1 = A[1][1]
self.C0_0 = C[0]
self.C0_1 = C[1]
self.K0_0 = K[0][0]
self.K1_0 = K[1][0]
self.A_K_0 = self.A0_0 - self.K0_0 * self.C0_0
self.A_K_1 = self.A0_1 - self.K0_0 * self.C0_1
self.A_K_2 = self.A1_0 - self.K1_0 * self.C0_0
self.A_K_3 = self.A1_1 - self.K1_0 * self.C0_1
def update(self, meas):
cdef double x0_0 = self.A_K_0 * self.x0_0 + self.A_K_1 * self.x1_0 + self.K0_0 * meas
cdef double x1_0 = self.A_K_2 * self.x0_0 + self.A_K_3 * self.x1_0 + self.K1_0 * meas
self.x0_0 = x0_0
self.x1_0 = x1_0
return [self.x0_0, self.x1_0]
@property
def x(self):
return [[self.x0_0], [self.x1_0]]
@x.setter
def x(self, x):
self.x0_0 = x[0][0]
self.x1_0 = x[1][0]

View File

@ -0,0 +1,23 @@
import numpy as np
class KF1D:
# this EKF assumes constant covariance matrix, so calculations are much simpler
# the Kalman gain also needs to be precomputed using the control module
def __init__(self, x0, A, C, K):
self.x = x0
self.A = A
self.C = C
self.K = K
self.A_K = self.A - np.dot(self.K, self.C)
# K matrix needs to be pre-computed as follow:
# import control
# (x, l, K) = control.dare(np.transpose(self.A), np.transpose(self.C), Q, R)
# self.K = np.transpose(K)
def update(self, meas):
self.x = np.dot(self.A_K, self.x) + np.dot(self.K, meas)
return self.x

View File

@ -0,0 +1,9 @@
from distutils.core import Extension, setup
from Cython.Build import cythonize
from common.cython_hacks import BuildExtWithoutPlatformSuffix
setup(name='Simple Kalman Implementation',
cmdclass={'build_ext': BuildExtWithoutPlatformSuffix},
ext_modules=cythonize(Extension("simple_kalman_impl", ["simple_kalman_impl.pyx"])))

View File

View File

@ -0,0 +1,85 @@
import unittest
import random
import timeit
import numpy as np
from common.kalman.simple_kalman import KF1D
from common.kalman.simple_kalman_old import KF1D as KF1D_old
class TestSimpleKalman(unittest.TestCase):
def setUp(self):
dt = 0.01
x0_0 = 0.0
x1_0 = 0.0
A0_0 = 1.0
A0_1 = dt
A1_0 = 0.0
A1_1 = 1.0
C0_0 = 1.0
C0_1 = 0.0
K0_0 = 0.12287673
K1_0 = 0.29666309
self.kf_old = KF1D_old(x0=np.matrix([[x0_0], [x1_0]]),
A=np.matrix([[A0_0, A0_1], [A1_0, A1_1]]),
C=np.matrix([C0_0, C0_1]),
K=np.matrix([[K0_0], [K1_0]]))
self.kf = KF1D(x0=[[x0_0], [x1_0]],
A=[[A0_0, A0_1], [A1_0, A1_1]],
C=[C0_0, C0_1],
K=[[K0_0], [K1_0]])
def test_getter_setter(self):
self.kf.x = [[1.0], [1.0]]
self.assertEqual(self.kf.x, [[1.0], [1.0]])
def update_returns_state(self):
x = self.kf.update(100)
self.assertEqual(x, self.kf.x)
def test_old_equal_new(self):
for _ in range(1000):
v_wheel = random.uniform(0, 200)
x_old = self.kf_old.update(v_wheel)
x = self.kf.update(v_wheel)
# Compare the output x, verify that the error is less than 1e-4
self.assertAlmostEqual(x_old[0], x[0])
self.assertAlmostEqual(x_old[1], x[1])
def test_new_is_faster(self):
setup = """
import numpy as np
from common.kalman.simple_kalman import KF1D
from common.kalman.simple_kalman_old import KF1D as KF1D_old
dt = 0.01
x0_0 = 0.0
x1_0 = 0.0
A0_0 = 1.0
A0_1 = dt
A1_0 = 0.0
A1_1 = 1.0
C0_0 = 1.0
C0_1 = 0.0
K0_0 = 0.12287673
K1_0 = 0.29666309
kf_old = KF1D_old(x0=np.matrix([[x0_0], [x1_0]]),
A=np.matrix([[A0_0, A0_1], [A1_0, A1_1]]),
C=np.matrix([C0_0, C0_1]),
K=np.matrix([[K0_0], [K1_0]]))
kf = KF1D(x0=[[x0_0], [x1_0]],
A=[[A0_0, A0_1], [A1_0, A1_1]],
C=[C0_0, C0_1],
K=[[K0_0], [K1_0]])
"""
kf_speed = timeit.timeit("kf.update(1234)", setup=setup, number=10000)
kf_old_speed = timeit.timeit("kf_old.update(1234)", setup=setup, number=10000)
self.assertTrue(kf_speed < kf_old_speed / 4)

12
common/lazy_property.py Normal file
View File

@ -0,0 +1,12 @@
class lazy_property():
"""Defines a property whose value will be computed only once and as needed.
This can only be used on instance methods.
"""
def __init__(self, func):
self._func = func
def __get__(self, obj_self, cls):
value = self._func(obj_self)
setattr(obj_self, self._func.__name__, value)
return value

284
common/loader.py Normal file
View File

@ -0,0 +1,284 @@
import os
import struct
import bisect
import numpy as np
import _io
import capnp
from cereal import log as capnp_log
class RawData():
def __init__(self, f):
self.f = _io.FileIO(f, 'rb')
self.lenn = struct.unpack("I", self.f.read(4))[0]
self.count = os.path.getsize(f) / (self.lenn+4)
def read(self, i):
self.f.seek((self.lenn+4)*i + 4)
return self.f.read(self.lenn)
def yuv420_to_rgb(raw, image_dim=None, swizzled=False):
def expand(x):
x = np.repeat(x, 2, axis=0)
return np.repeat(x, 2, axis=1)
if image_dim is None:
image_dim = (raw.shape[1]*2, raw.shape[2]*2)
swizzled = True
if not swizzled:
img_data = np.array(raw, copy=False, dtype=np.uint8)
uv_len = (image_dim[0]/2)*(image_dim[1]/2)
img_data_u = expand(img_data[image_dim[0]*image_dim[1]: \
image_dim[0]*image_dim[1]+uv_len]. \
reshape(image_dim[0]/2, image_dim[1]/2))
img_data_v = expand(img_data[image_dim[0]*image_dim[1]+uv_len: \
image_dim[0]*image_dim[1]+2*uv_len]. \
reshape(image_dim[0]/2, image_dim[1]/2))
img_data_y = img_data[0:image_dim[0]*image_dim[1]].reshape(image_dim)
else:
img_data_y = np.zeros(image_dim, dtype=np.uint8)
img_data_y[0::2, 0::2] = raw[0]
img_data_y[1::2, 0::2] = raw[1]
img_data_y[0::2, 1::2] = raw[2]
img_data_y[1::2, 1::2] = raw[3]
img_data_u = expand(raw[4])
img_data_v = expand(raw[5])
yuv = np.stack((img_data_y, img_data_u, img_data_v)).swapaxes(0,2).swapaxes(0,1)
yuv = yuv.astype(np.int16)
# http://maxsharabayko.blogspot.com/2016/01/fast-yuv-to-rgb-conversion-in-python-3.html
# according to ITU-R BT.709
yuv[:,:, 0] = yuv[:,:, 0].clip(16, 235).astype(yuv.dtype) - 16
yuv[:,:,1:] = yuv[:,:,1:].clip(16, 240).astype(yuv.dtype) - 128
A = np.array([[1.164, 0.000, 1.793],
[1.164, -0.213, -0.533],
[1.164, 2.112, 0.000]])
# our result
img = np.dot(yuv, A.T).clip(0, 255).astype('uint8')
return img
class YuvData():
def __init__(self, f, dim=(160,320)):
self.f = _io.FileIO(f, 'rb')
self.image_dim = dim
self.image_size = self.image_dim[0]/2 * self.image_dim[1]/2 * 6
self.count = os.path.getsize(f) / self.image_size
def read_frame(self, frame):
self.f.seek(self.image_size*frame)
raw = self.f.read(self.image_size)
return raw
def read_frames(self, range_start, range_len):
self.f.seek(self.image_size*range_start)
raw = self.f.read(self.image_size*range_len)
return raw
def read_frames_into(self, range_start, buf):
self.f.seek(self.image_size*range_start)
return self.f.readinto(buf)
def read(self, frame):
return yuv420_to_rgb(self.read_frame(frame), self.image_dim)
def close(self):
self.f.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
class OneReader():
def __init__(self, base_path, goofy=False, segment_range=None):
self.base_path = base_path
route_name = os.path.basename(base_path)
self.rcamera_size = (304, 560)
if segment_range is None:
parent_path = os.path.dirname(base_path)
self.segment_nums = []
for p in os.listdir(parent_path):
if not p.startswith(route_name+"--"):
continue
self.segment_nums.append(int(p.rsplit("--", 1)[-1]))
if not self.segment_nums:
raise Exception("no route segments found")
self.segment_nums.sort()
self.segment_range = (self.segment_nums[0], self.segment_nums[-1])
else:
self.segment_range = segment_range
self.segment_nums = range(segment_range[0], segment_range[1]+1)
for i in self.segment_nums:
if not os.path.exists(base_path+"--"+str(i)):
raise Exception("missing segment in provided range")
# goofy data is broken with discontinuous logs
if goofy and (self.segment_range[0] != 0
or self.segment_nums != range(self.segment_range[0], self.segment_range[1]+1)):
raise Exception("goofy data needs all the segments for a route")
self.cur_seg = None
self.cur_seg_f = None
# index the frames
print("indexing frames {}...".format(self.segment_nums))
self.rcamera_encode_map = {} # frame_id -> (segment num, segment id, frame_time)
last_frame_id = -1
if goofy:
# goofy is goofy
frame_size = self.rcamera_size[0]*self.rcamera_size[1]*3/2
# find the encode id ranges for each segment by using the rcamera file size
segment_encode_ids = []
cur_encode_id = 0
for n in self.segment_nums:
camera_path = os.path.join(self.seg_path(n), "rcamera")
if not os.path.exists(camera_path):
# for goofy, missing camera files means a bad route
raise Exception("Missing camera file {}".format(camera_path))
camera_size = os.path.getsize(camera_path)
assert (camera_size % frame_size) == 0
num_frames = camera_size / frame_size
segment_encode_ids.append(cur_encode_id)
cur_encode_id += num_frames
last_encode_id = -1
# use the segment encode id map and frame events to build the frame index
for n in self.segment_nums:
log_path = os.path.join(self.seg_path(n), "rlog")
if os.path.exists(log_path):
with open(log_path, "rb") as f:
for evt in capnp_log.Event.read_multiple(f):
if evt.which() == 'frame':
if evt.frame.frameId < last_frame_id:
# a non-increasing frame id is bad route (eg visiond was restarted)
raise Exception("non-increasing frame id")
last_frame_id = evt.frame.frameId
seg_i = bisect.bisect_right(segment_encode_ids, evt.frame.encodeId)-1
assert seg_i >= 0
seg_num = self.segment_nums[seg_i]
seg_id = evt.frame.encodeId-segment_encode_ids[seg_i]
frame_time = evt.logMonoTime / 1.0e9
self.rcamera_encode_map[evt.frame.frameId] = (seg_num, seg_id,
frame_time)
last_encode_id = evt.frame.encodeId
if last_encode_id-cur_encode_id > 10:
# too many missing frames is a bad route (eg route from before encoder rotating worked)
raise Exception("goofy route is missing frames: {}, {}".format(
last_encode_id, cur_encode_id))
else:
# for harry data, build the index from encodeIdx events
for n in self.segment_nums:
log_path = os.path.join(self.seg_path(n), "rlog")
if os.path.exists(log_path):
with open(log_path, "rb") as f:
for evt in capnp_log.Event.read_multiple(f):
if evt.which() == 'encodeIdx' and evt.encodeIdx.type == 'bigBoxLossless':
frame_time = evt.logMonoTime / 1.0e9
self.rcamera_encode_map[evt.encodeIdx.frameId] = (
evt.encodeIdx.segmentNum, evt.encodeIdx.segmentId,
frame_time)
print("done")
# read the first event to find the start time
self.reset_to_seg(self.segment_range[0])
for evt in self.events():
if evt.which() != 'initData':
self.start_mono = evt.logMonoTime
break
self.reset_to_seg(self.segment_range[0])
def seg_path(self, num):
return self.base_path+"--"+str(num)
def reset_to_seg(self, seg):
self.cur_seg = seg
if self.cur_seg_f:
self.cur_seg_f.close()
self.cur_seg_f = None
def seek_ts(self, ts):
seek_seg = int(ts/60)
if seek_seg < self.segment_range[0] or seek_seg > self.segment_range[1]:
raise ValueError
self.reset_to_seg(seek_seg)
target_mono = self.start_mono + int(ts*1e9)
for evt in self.events():
if evt.logMonoTime >= target_mono:
break
def read_event(self):
while True:
if self.cur_seg > self.segment_range[1]:
return None
if self.cur_seg_f is None:
log_path = os.path.join(self.seg_path(self.cur_seg), "rlog")
if not os.path.exists(log_path):
print("missing log file!", log_path)
self.cur_seg += 1
continue
self.cur_seg_f = open(log_path, "rb")
try:
return capnp_log.Event.read(self.cur_seg_f)
except capnp.lib.capnp.KjException as e:
if 'EOF' in str(e): # dumb, but pycapnp does this too
self.cur_seg_f.close()
self.cur_seg_f = None
self.cur_seg += 1
else:
raise
def events(self):
while True:
r = self.read_event()
if r is None:
break
yield r
def read_frame(self, frame_id):
encode_idx = self.rcamera_encode_map.get(frame_id)
if encode_idx is None:
return None
seg_num, seg_id, _ = encode_idx
camera_path = os.path.join(self.seg_path(seg_num), "rcamera")
if not os.path.exists(camera_path):
return None
with YuvData(camera_path, self.rcamera_size) as data:
return data.read_frame(seg_id)
def close(self):
if self.cur_seg_f is not None:
self.cur_seg_f.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()

92
common/log_compressor.py Normal file
View File

@ -0,0 +1,92 @@
import sys
import json
# pip2 install msgpack-python
import msgpack
import zlib
import os
import logging
from Crypto.Cipher import AES
ext = ".gz"
SWAG = '\xde\xe2\x11\x15VVC\xf2\x8ep\xd7\xe4\x87\x8d,9'
def compress_json(in_file, out_file):
logging.debug("compressing %s -> %s", in_file, out_file)
errors = 0
good = []
last_can_time = 0
with open(in_file, 'r') as inf:
for ln in inf:
ln = ln.rstrip()
if not ln: continue
try:
ll = json.loads(ln)
except ValueError:
errors += 1
continue
if ll is None or ll[0] is None:
continue
if ll[0][1] == 1:
# no CAN in hex
ll[1][2] = ll[1][2].decode("hex")
# relativize the CAN timestamps
this_can_time = ll[1][1]
ll[1] = [ll[1][0], this_can_time - last_can_time, ll[1][2]]
last_can_time = this_can_time
good.append(ll)
logging.debug("compressing %s -> %s, read done", in_file, out_file)
data = msgpack.packb(good)
data_compressed = zlib.compress(data)
# zlib doesn't care about this
data_compressed += "\x00" * (16 - len(data_compressed)%16)
aes = AES.new(SWAG, AES.MODE_CBC, "\x00"*16)
data_encrypted = aes.encrypt(data_compressed)
with open(out_file, "wb") as outf:
outf.write(data_encrypted)
logging.debug("compressing %s -> %s, write done", in_file, out_file)
return errors
def decompress_json_internal(data_encrypted):
aes = AES.new(SWAG, AES.MODE_CBC, "\x00"*16)
data_compressed = aes.decrypt(data_encrypted)
data = zlib.decompress(data_compressed)
msgs = msgpack.unpackb(data)
good = []
last_can_time = 0
for ll in msgs:
if ll[0][1] == 1:
# back into hex
ll[1][2] = ll[1][2].encode("hex")
# derelativize CAN timestamps
last_can_time += ll[1][1]
ll[1] = [ll[1][0], last_can_time, ll[1][2]]
good.append(ll)
return good
def decompress_json(in_file, out_file):
logging.debug("decompressing %s -> %s", in_file, out_file)
f = open(in_file)
data_encrypted = f.read()
f.close()
good = decompress_json_internal(data_encrypted)
out = '\n'.join(map(lambda x: json.dumps(x), good)) + "\n"
logging.debug("decompressing %s -> %s, writing", in_file, out_file)
f = open(out_file, 'w')
f.write(out)
f.close()
logging.debug("decompressing %s -> %s, write finished", in_file, out_file)
if __name__ == "__main__":
for dat in sys.argv[1:]:
print(dat)
compress_json(dat, "/tmp/out"+ext)
decompress_json("/tmp/out"+ext, "/tmp/test")
os.system("diff "+dat+" /tmp/test")

69
common/logging_es.py Normal file
View File

@ -0,0 +1,69 @@
import logging
from common.compat import basestring
from common.logging_extra import SwagFormatter, json_robust_dumps
from logstash_async.handler import AsynchronousLogstashHandler
from logstash_async.transport import UdpTransport
# elasticsearch doesnt allow different 'keys' with the different types in the same index
class SwagLogstashFormatter(SwagFormatter):
def __init__(self, swaglogger):
super(SwagLogstashFormatter, self).__init__(swaglogger)
def fix_kv(self, k, v):
if isinstance(v, basestring):
k += "$s"
elif isinstance(v, float):
k += "$f"
elif isinstance(v, bool):
k += "$b"
elif isinstance(v, int):
k += "$i"
elif isinstance(v, dict):
nv = {}
for ik, iv in v.items():
ik, iv = self.fix_kv(ik, iv)
nv[ik] = iv
v = nv
elif isinstance(v, list):
k += "$a"
return k, v
def format(self, record):
v = self.format_dict(record)
mk, mv = self.fix_kv('msg', v['msg'])
del v['msg']
v[mk] = mv
return json_robust_dumps(v)
class SwagLogstashHandler(AsynchronousLogstashHandler):
def __init__(self, host, port, name, formatter):
super(SwagLogstashHandler, self).__init__(host, port, database_path=None, transport=UdpTransport)
self.name = name
if not isinstance(formatter, SwagLogstashFormatter):
raise ValueError("formatter must be swag")
self.setFormatter(formatter)
def emit(self, record):
record.name = self.name
super(SwagLogstashHandler, self).emit(record)
if __name__ == "__main__":
from common.logging_extra import SwagLogger
log = SwagLogger()
ls_formatter = SwagLogstashFormatter(log)
ls_handler = SwagLogstashHandler("logstash.comma.life", 5040, "pipeline", ls_formatter)
log.addHandler(ls_handler)
s_handler = logging.StreamHandler()
log.addHandler(s_handler)
log.info("asynclogtest %s", "1")
log.info({'asynclogtest': 2})
log.warning("asynclogtest warning")
log.error("asynclogtest error")
log.critical("asynclogtest critical")
log.event("asynclogtest", a="b")

152
common/logging_extra.py Normal file
View File

@ -0,0 +1,152 @@
import os
import sys
import copy
import json
import socket
import logging
from threading import local
from collections import OrderedDict
from contextlib import contextmanager
def json_handler(obj):
# if isinstance(obj, (datetime.date, datetime.time)):
# return obj.isoformat()
return repr(obj)
def json_robust_dumps(obj):
return json.dumps(obj, default=json_handler)
class NiceOrderedDict(OrderedDict):
def __str__(self):
return json_robust_dumps(self)
class SwagFormatter(logging.Formatter):
def __init__(self, swaglogger):
logging.Formatter.__init__(self, None, '%a %b %d %H:%M:%S %Z %Y')
self.swaglogger = swaglogger
self.host = socket.gethostname()
def format_dict(self, record):
record_dict = NiceOrderedDict()
if isinstance(record.msg, dict):
record_dict['msg'] = record.msg
else:
try:
record_dict['msg'] = record.getMessage()
except (ValueError, TypeError):
record_dict['msg'] = [record.msg]+record.args
record_dict['ctx'] = self.swaglogger.get_ctx()
if record.exc_info:
record_dict['exc_info'] = self.formatException(record.exc_info)
record_dict['level'] = record.levelname
record_dict['levelnum'] = record.levelno
record_dict['name'] = record.name
record_dict['filename'] = record.filename
record_dict['lineno'] = record.lineno
record_dict['pathname'] = record.pathname
record_dict['module'] = record.module
record_dict['funcName'] = record.funcName
record_dict['host'] = self.host
record_dict['process'] = record.process
record_dict['thread'] = record.thread
record_dict['threadName'] = record.threadName
record_dict['created'] = record.created
return record_dict
def format(self, record):
return json_robust_dumps(self.format_dict(record))
class SwagErrorFilter(logging.Filter):
def filter(self, record):
return record.levelno < logging.ERROR
_tmpfunc = lambda: 0
_srcfile = os.path.normcase(_tmpfunc.__code__.co_filename)
class SwagLogger(logging.Logger):
def __init__(self):
logging.Logger.__init__(self, "swaglog")
self.global_ctx = {}
self.log_local = local()
self.log_local.ctx = {}
def local_ctx(self):
try:
return self.log_local.ctx
except AttributeError:
self.log_local.ctx = {}
return self.log_local.ctx
def get_ctx(self):
return dict(self.local_ctx(), **self.global_ctx)
@contextmanager
def ctx(self, **kwargs):
old_ctx = self.local_ctx()
self.log_local.ctx = copy.copy(old_ctx) or {}
self.log_local.ctx.update(kwargs)
try:
yield
finally:
self.log_local.ctx = old_ctx
def bind(self, **kwargs):
self.local_ctx().update(kwargs)
def bind_global(self, **kwargs):
self.global_ctx.update(kwargs)
def event(self, event_name, *args, **kwargs):
evt = NiceOrderedDict()
evt['event'] = event_name
if args:
evt['args'] = args
evt.update(kwargs)
ctx = self.get_ctx()
if ctx:
evt['ctx'] = self.get_ctx()
if 'error' in kwargs:
self.error(evt)
else:
self.info(evt)
if __name__ == "__main__":
log = SwagLogger()
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.INFO)
stdout_handler.addFilter(SwagErrorFilter())
log.addHandler(stdout_handler)
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setLevel(logging.ERROR)
log.addHandler(stderr_handler)
log.info("asdasd %s", "a")
log.info({'wut': 1})
log.warning("warning")
log.error("error")
log.critical("critical")
log.event("test", x="y")
with log.ctx():
stdout_handler.setFormatter(SwagFormatter(log))
stderr_handler.setFormatter(SwagFormatter(log))
log.bind(user="some user")
log.info("in req")
print("")
log.warning("warning")
print("")
log.error("error")
print("")
log.critical("critical")
print("")
log.event("do_req", a=1, b="c")

21
common/numpy_fast.py Normal file
View File

@ -0,0 +1,21 @@
def int_rnd(x):
return int(round(x))
def clip(x, lo, hi):
return max(lo, min(hi, x))
def interp(x, xp, fp):
N = len(xp)
def get_interp(xv):
hi = 0
while hi < N and xv > xp[hi]:
hi += 1
low = hi - 1
return fp[-1] if hi == N and xv > xp[low] else (
fp[0] if hi == 0 else
(xv - xp[low]) * (fp[hi] - fp[low]) / (xp[hi] - xp[low]) + fp[low])
return [get_interp(v) for v in x] if hasattr(
x, '__iter__') else get_interp(x)
def mean(x):
return sum(x) / len(x)

66
common/numpy_helpers.py Normal file
View File

@ -0,0 +1,66 @@
import bisect
import numpy as np
from scipy.interpolate import interp1d
def deep_interp_0_fast(dx, x, y):
FIX = False
if len(y.shape) == 1:
y = y[:, None]
FIX = True
ret = np.zeros((dx.shape[0], y.shape[1]))
index = list(x)
for i in range(dx.shape[0]):
idx = bisect.bisect_left(index, dx[i])
if idx == x.shape[0]:
idx = x.shape[0] - 1
ret[i] = y[idx]
if FIX:
return ret[:, 0]
else:
return ret
def running_mean(x, N):
cumsum = np.cumsum(np.insert(x, [0]*(int(N/2)) + [-1]*(N-int(N/2)), [x[0]]*int(N/2) + [x[-1]]*(N-int(N/2))))
return (cumsum[N:] - cumsum[:-N]) / N
def deep_interp_np(x, xp, fp):
x = np.atleast_1d(x)
xp = np.array(xp)
if len(xp) < 2:
return np.repeat(fp, len(x), axis=0)
if min(np.diff(xp)) < 0:
raise RuntimeError('Bad x array for interpolation')
j = np.searchsorted(xp, x) - 1
j = np.clip(j, 0, len(xp)-2)
d = np.divide(x - xp[j], xp[j + 1] - xp[j], out=np.ones_like(x, dtype=np.float64), where=xp[j + 1] - xp[j] != 0)
vals_interp = (fp[j].T*(1 - d)).T + (fp[j + 1].T*d).T
if len(vals_interp) == 1:
return vals_interp[0]
else:
return vals_interp
def clipping_deep_interp(x, xp, fp):
if len(xp) < 2:
return deep_interp_np(x, xp, fp)
bad_idx = np.where(np.diff(xp) < 0)[0]
if len(bad_idx) > 0:
if bad_idx[0] ==1:
return np.zeros([] + list(fp.shape[1:]))
return deep_interp_np(x, xp[:bad_idx[0]], fp[:bad_idx[0]])
else:
return deep_interp_np(x, xp, fp)
def deep_interp(dx, x, y, kind="slinear"):
return interp1d(
x, y,
axis=0,
kind=kind,
bounds_error=False,
fill_value="extrapolate",
assume_sorted=True)(dx)

409
common/params.py Executable file
View File

@ -0,0 +1,409 @@
#!/usr/bin/env python3
"""ROS has a parameter server, we have files.
The parameter store is a persistent key value store, implemented as a directory with a writer lock.
On Android, we store params under params_dir = /data/params. The writer lock is a file
"<params_dir>/.lock" taken using flock(), and data is stored in a directory symlinked to by
"<params_dir>/d".
Each key, value pair is stored as a file with named <key> with contents <value>, located in
<params_dir>/d/<key>
Readers of a single key can just open("<params_dir>/d/<key>") and read the file contents.
Readers who want a consistent snapshot of multiple keys should take the lock.
Writers should take the lock before modifying anything. Writers should also leave the DB in a
consistent state after a crash. The implementation below does this by copying all params to a temp
directory <params_dir>/<tmp>, then atomically symlinking <params_dir>/<d> to <params_dir>/<tmp>
before deleting the old <params_dir>/<d> directory.
Writers that only modify a single key can simply take the lock, then swap the corresponding value
file in place without messing with <params_dir>/d.
"""
import time
import os
import errno
import sys
import shutil
import fcntl
import tempfile
import threading
from enum import Enum
def mkdirs_exists_ok(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
class TxType(Enum):
PERSISTENT = 1
CLEAR_ON_MANAGER_START = 2
CLEAR_ON_PANDA_DISCONNECT = 3
class UnknownKeyName(Exception):
pass
keys = {
"AccessToken": [TxType.PERSISTENT],
"AthenadPid": [TxType.PERSISTENT],
"CalibrationParams": [TxType.PERSISTENT],
"CarParams": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CarVin": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"CommunityFeaturesToggle": [TxType.PERSISTENT],
"CompletedTrainingVersion": [TxType.PERSISTENT],
"ControlsParams": [TxType.PERSISTENT],
"DoUninstall": [TxType.CLEAR_ON_MANAGER_START],
"DongleId": [TxType.PERSISTENT],
"GitBranch": [TxType.PERSISTENT],
"GitCommit": [TxType.PERSISTENT],
"GitRemote": [TxType.PERSISTENT],
"GithubSshKeys": [TxType.PERSISTENT],
"HasAcceptedTerms": [TxType.PERSISTENT],
"HasCompletedSetup": [TxType.PERSISTENT],
"IsLdwEnabled": [TxType.PERSISTENT],
"IsGeofenceEnabled": [TxType.PERSISTENT],
"IsMetric": [TxType.PERSISTENT],
"IsOffroad": [TxType.CLEAR_ON_MANAGER_START],
"IsRHD": [TxType.PERSISTENT],
"IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
"IsUpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"IsUploadRawEnabled": [TxType.PERSISTENT],
"LastUpdateTime": [TxType.PERSISTENT],
"LimitSetSpeed": [TxType.PERSISTENT],
"LimitSetSpeedNeural": [TxType.PERSISTENT],
"LiveParameters": [TxType.PERSISTENT],
"LongitudinalControl": [TxType.PERSISTENT],
"OpenpilotEnabledToggle": [TxType.PERSISTENT],
"PandaFirmware": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaFirmwareHex": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"PandaDongleId": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Passive": [TxType.PERSISTENT],
"RecordFront": [TxType.PERSISTENT],
"ReleaseNotes": [TxType.PERSISTENT],
"ShouldDoUpdate": [TxType.CLEAR_ON_MANAGER_START],
"SpeedLimitOffset": [TxType.PERSISTENT],
"SubscriberInfo": [TxType.PERSISTENT],
"TermsVersion": [TxType.PERSISTENT],
"TrainingVersion": [TxType.PERSISTENT],
"UpdateAvailable": [TxType.CLEAR_ON_MANAGER_START],
"Version": [TxType.PERSISTENT],
"Offroad_ChargeDisabled": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_ConnectivityNeeded": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_ConnectivityNeededPrompt": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_TemperatureTooHigh": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_PandaFirmwareMismatch": [TxType.CLEAR_ON_MANAGER_START, TxType.CLEAR_ON_PANDA_DISCONNECT],
"Offroad_InvalidTime": [TxType.CLEAR_ON_MANAGER_START],
"Offroad_IsTakingSnapshot": [TxType.CLEAR_ON_MANAGER_START],
}
def fsync_dir(path):
fd = os.open(path, os.O_RDONLY)
try:
os.fsync(fd)
finally:
os.close(fd)
class FileLock():
def __init__(self, path, create):
self._path = path
self._create = create
self._fd = None
def acquire(self):
self._fd = os.open(self._path, os.O_CREAT if self._create else 0)
fcntl.flock(self._fd, fcntl.LOCK_EX)
def release(self):
if self._fd is not None:
os.close(self._fd)
self._fd = None
class DBAccessor():
def __init__(self, path):
self._path = path
self._vals = None
def keys(self):
self._check_entered()
return self._vals.keys()
def get(self, key):
self._check_entered()
try:
return self._vals[key]
except KeyError:
return None
def _get_lock(self, create):
lock = FileLock(os.path.join(self._path, ".lock"), create)
lock.acquire()
return lock
def _read_values_locked(self):
"""Callers should hold a lock while calling this method."""
vals = {}
try:
data_path = self._data_path()
keys = os.listdir(data_path)
for key in keys:
with open(os.path.join(data_path, key), "rb") as f:
vals[key] = f.read()
except (OSError, IOError) as e:
# Either the DB hasn't been created yet, or somebody wrote a bug and left the DB in an
# inconsistent state. Either way, return empty.
if e.errno == errno.ENOENT:
return {}
return vals
def _data_path(self):
return os.path.join(self._path, "d")
def _check_entered(self):
if self._vals is None:
raise Exception("Must call __enter__ before using DB")
class DBReader(DBAccessor):
def __enter__(self):
try:
lock = self._get_lock(False)
except OSError as e:
# Do not create lock if it does not exist.
if e.errno == errno.ENOENT:
self._vals = {}
return self
try:
# Read everything.
self._vals = self._read_values_locked()
return self
finally:
lock.release()
def __exit__(self, type, value, traceback): pass
class DBWriter(DBAccessor):
def __init__(self, path):
super(DBWriter, self).__init__(path)
self._lock = None
self._prev_umask = None
def put(self, key, value):
self._vals[key] = value
def delete(self, key):
self._vals.pop(key, None)
def __enter__(self):
mkdirs_exists_ok(self._path)
# Make sure we can write and that permissions are correct.
self._prev_umask = os.umask(0)
try:
os.chmod(self._path, 0o777)
self._lock = self._get_lock(True)
self._vals = self._read_values_locked()
except:
os.umask(self._prev_umask)
self._prev_umask = None
raise
return self
def __exit__(self, type, value, traceback):
self._check_entered()
try:
# data_path refers to the externally used path to the params. It is a symlink.
# old_data_path is the path currently pointed to by data_path.
# tempdir_path is a path where the new params will go, which the new data path will point to.
# new_data_path is a temporary symlink that will atomically overwrite data_path.
#
# The current situation is:
# data_path -> old_data_path
# We're going to write params data to tempdir_path
# tempdir_path -> params data
# Then point new_data_path to tempdir_path
# new_data_path -> tempdir_path
# Then atomically overwrite data_path with new_data_path
# data_path -> tempdir_path
old_data_path = None
new_data_path = None
tempdir_path = tempfile.mkdtemp(prefix=".tmp", dir=self._path)
try:
# Write back all keys.
os.chmod(tempdir_path, 0o777)
for k, v in self._vals.items():
with open(os.path.join(tempdir_path, k), "wb") as f:
f.write(v)
f.flush()
os.fsync(f.fileno())
fsync_dir(tempdir_path)
data_path = self._data_path()
try:
old_data_path = os.path.join(self._path, os.readlink(data_path))
except (OSError, IOError):
# NOTE(mgraczyk): If other DB implementations have bugs, this could cause
# copies to be left behind, but we still want to overwrite.
pass
new_data_path = "{}.link".format(tempdir_path)
os.symlink(os.path.basename(tempdir_path), new_data_path)
os.rename(new_data_path, data_path)
fsync_dir(self._path)
finally:
# If the rename worked, we can delete the old data. Otherwise delete the new one.
success = new_data_path is not None and os.path.exists(data_path) and (
os.readlink(data_path) == os.path.basename(tempdir_path))
if success:
if old_data_path is not None:
shutil.rmtree(old_data_path)
else:
shutil.rmtree(tempdir_path)
# Regardless of what happened above, there should be no link at new_data_path.
if new_data_path is not None and os.path.islink(new_data_path):
os.remove(new_data_path)
finally:
os.umask(self._prev_umask)
self._prev_umask = None
# Always release the lock.
self._lock.release()
self._lock = None
def read_db(params_path, key):
path = "%s/d/%s" % (params_path, key)
try:
with open(path, "rb") as f:
return f.read()
except IOError:
return None
def write_db(params_path, key, value):
if isinstance(value, str):
value = value.encode('utf8')
prev_umask = os.umask(0)
lock = FileLock(params_path+"/.lock", True)
lock.acquire()
try:
tmp_path = tempfile.mktemp(prefix=".tmp", dir=params_path)
with open(tmp_path, "wb") as f:
f.write(value)
f.flush()
os.fsync(f.fileno())
path = "%s/d/%s" % (params_path, key)
os.rename(tmp_path, path)
fsync_dir(os.path.dirname(path))
finally:
os.umask(prev_umask)
lock.release()
class Params():
def __init__(self, db='/data/params'):
self.db = db
# create the database if it doesn't exist...
if not os.path.exists(self.db+"/d"):
with self.transaction(write=True):
pass
def transaction(self, write=False):
if write:
return DBWriter(self.db)
else:
return DBReader(self.db)
def _clear_keys_with_type(self, tx_type):
with self.transaction(write=True) as txn:
for key in keys:
if tx_type in keys[key]:
txn.delete(key)
def manager_start(self):
self._clear_keys_with_type(TxType.CLEAR_ON_MANAGER_START)
def panda_disconnect(self):
self._clear_keys_with_type(TxType.CLEAR_ON_PANDA_DISCONNECT)
def delete(self, key):
with self.transaction(write=True) as txn:
txn.delete(key)
def get(self, key, block=False, encoding=None):
if key not in keys:
raise UnknownKeyName(key)
while 1:
ret = read_db(self.db, key)
if not block or ret is not None:
break
# is polling really the best we can do?
time.sleep(0.05)
if ret is not None and encoding is not None:
ret = ret.decode(encoding)
return ret
def put(self, key, dat):
"""
Warning: This function blocks until the param is written to disk!
In very rare cases this can take over a second, and your code will hang.
Use the put_nonblocking helper function in time sensitive code, but
in general try to avoid writing params as much as possible.
"""
if key not in keys:
raise UnknownKeyName(key)
write_db(self.db, key, dat)
def put_nonblocking(key, val):
def f(key, val):
params = Params()
params.put(key, val)
t = threading.Thread(target=f, args=(key, val))
t.start()
return t
if __name__ == "__main__":
params = Params()
if len(sys.argv) > 2:
params.put(sys.argv[1], sys.argv[2])
else:
for k in keys:
pp = params.get(k)
if pp is None:
print("%s is None" % k)
elif all(ord(c) < 128 and ord(c) >= 32 for c in pp):
print("%s = %s" % (k, pp))
else:
print("%s = %s" % (k, pp.encode("hex")))
# Test multiprocess:
# seq 0 100000 | xargs -P20 -I{} python common/params.py DongleId {} && sleep 0.05
# while python common/params.py DongleId; do sleep 0.05; done

163
common/peakdetect.py Normal file
View File

@ -0,0 +1,163 @@
#!/usr/bin/env python3
# Copyright (C) 2016 Sixten Bergman
# License WTFPL
#
# This program is free software. It comes without any warranty, to the extent
# permitted by applicable law.
# You can redistribute it and/or modify it under the terms of the Do What The
# Fuck You Want To Public License, Version 2, as published by Sam Hocevar. See
# http://www.wtfpl.net/ for more details.
#
# note that the function peakdetect is derived from code which was released to
# public domain see: http://billauer.co.il/peakdet.html
#
from math import log
import numpy as np
__all__ = ["peakdetect"]
def _datacheck_peakdetect(x_axis, y_axis):
if x_axis is None:
x_axis = range(len(y_axis))
if len(y_axis) != len(x_axis):
raise ValueError("Input vectors y_axis and x_axis must have same length")
#needs to be a numpy array
y_axis = np.array(y_axis)
x_axis = np.array(x_axis)
return x_axis, y_axis
def _pad(fft_data, pad_len):
"""
Pads fft data to interpolate in time domain
keyword arguments:
fft_data -- the fft
pad_len -- By how many times the time resolution should be increased by
return: padded list
"""
length = len(fft_data)
n = _n(length * pad_len)
fft_data = list(fft_data)
return fft_data[:length // 2] + [0] * (2**n-length) + fft_data[length // 2:]
def _n(x):
"""
Find the smallest value for n, which fulfils 2**n >= x
keyword arguments:
x -- the value, which 2**n must surpass
return: the integer n
"""
return int(log(x)/log(2)) + 1
def peakdetect(y_axis, x_axis=None, lookahead=200, delta=0):
"""
Converted from/based on a MATLAB script at:
http://billauer.co.il/peakdet.html
function for detecting local maxima and minima in a signal.
Discovers peaks by searching for values which are surrounded by lower
or larger values for maxima and minima respectively
keyword arguments:
y_axis -- A list containing the signal over which to find peaks
x_axis -- A x-axis whose values correspond to the y_axis list and is used
in the return to specify the position of the peaks. If omitted an
index of the y_axis is used.
(default: None)
lookahead -- distance to look ahead from a peak candidate to determine if
it is the actual peak
(default: 200)
'(samples / period) / f' where '4 >= f >= 1.25' might be a good value
delta -- this specifies a minimum difference between a peak and
the following points, before a peak may be considered a peak. Useful
to hinder the function from picking up false peaks towards to end of
the signal. To work well delta should be set to delta >= RMSnoise * 5.
(default: 0)
When omitted delta function causes a 20% decrease in speed.
When used Correctly it can double the speed of the function
return: two lists [max_peaks, min_peaks] containing the positive and
negative peaks respectively. Each cell of the lists contains a tuple
of: (position, peak_value)
to get the average peak value do: np.mean(max_peaks, 0)[1] on the
results to unpack one of the lists into x, y coordinates do:
x, y = zip(*max_peaks)
"""
max_peaks = []
min_peaks = []
dump = [] # Used to pop the first hit which almost always is false
# check input data
x_axis, y_axis = _datacheck_peakdetect(x_axis, y_axis)
# store data length for later use
length = len(y_axis)
#perform some checks
if lookahead < 1:
raise ValueError("Lookahead must be '1' or above in value")
if not (np.isscalar(delta) and delta >= 0):
raise ValueError("delta must be a positive number")
#maxima and minima candidates are temporarily stored in
#mx and mn respectively
mn, mx = np.Inf, -np.Inf
#Only detect peak if there is 'lookahead' amount of points after it
for index, (x, y) in enumerate(zip(x_axis[:-lookahead],
y_axis[:-lookahead])):
if y > mx:
mx = y
mxpos = x
if y < mn:
mn = y
mnpos = x
####look for max####
if y < mx-delta and mx != np.Inf:
#Maxima peak candidate found
#look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index+lookahead].max() < mx:
max_peaks.append([mxpos, mx])
dump.append(True)
#set algorithm to only find minima now
mx = np.Inf
mn = np.Inf
if index+lookahead >= length:
#end is within lookahead no more peaks can be found
break
continue
#else: #slows shit down this does
# mx = ahead
# mxpos = x_axis[np.where(y_axis[index:index+lookahead]==mx)]
####look for min####
if y > mn+delta and mn != -np.Inf:
#Minima peak candidate found
#look ahead in signal to ensure that this is a peak and not jitter
if y_axis[index:index+lookahead].min() > mn:
min_peaks.append([mnpos, mn])
dump.append(False)
#set algorithm to only find maxima now
mn = -np.Inf
mx = -np.Inf
if index+lookahead >= length:
#end is within lookahead no more peaks can be found
break
#else: #slows shit down this does
# mn = ahead
# mnpos = x_axis[np.where(y_axis[index:index+lookahead]==mn)]
#Remove the false hit on the first value of the y_axis
try:
if dump[0]:
max_peaks.pop(0)
else:
min_peaks.pop(0)
del dump
except IndexError:
#no peaks were found, should the function return empty lists?
pass
return [max_peaks, min_peaks]

46
common/profiler.py Normal file
View File

@ -0,0 +1,46 @@
import time
class Profiler():
def __init__(self, enabled=False):
self.enabled = enabled
self.cp = {}
self.cp_ignored = []
self.iter = 0
self.start_time = time.time()
self.last_time = self.start_time
self.tot = 0.
def reset(self, enabled=False):
self.enabled = enabled
self.cp = {}
self.cp_ignored = []
self.iter = 0
self.start_time = time.time()
self.last_time = self.start_time
def checkpoint(self, name, ignore=False):
# ignore flag needed when benchmarking threads with ratekeeper
if not self.enabled:
return
tt = time.time()
if name not in self.cp:
self.cp[name] = 0.
if ignore:
self.cp_ignored.append(name)
self.cp[name] += tt - self.last_time
if not ignore:
self.tot += tt - self.last_time
self.last_time = tt
def display(self):
if not self.enabled:
return
self.iter += 1
print("******* Profiling *******")
for n, ms in sorted(self.cp.items(), key=lambda x: -x[1]):
if n in self.cp_ignored:
print("%30s: %7.2f percent: %3.0f IGNORED" % (n, ms*1000.0, ms/self.tot*100))
else:
print("%30s: %7.2f percent: %3.0f" % (n, ms*1000.0, ms/self.tot*100))
print("Iter clock: %2.6f TOTAL: %2.2f" % (self.tot/self.iter, self.tot))

74
common/realtime.py Normal file
View File

@ -0,0 +1,74 @@
"""Utilities for reading real time clocks and keeping soft real time constraints."""
import os
import time
import platform
import subprocess
import multiprocessing
from cffi import FFI
from common.common_pyx import sec_since_boot # pylint: disable=no-name-in-module, import-error
# time step for each process
DT_CTRL = 0.01 # controlsd
DT_MDL = 0.05 # model
DT_DMON = 0.1 # driver monitoring
DT_TRML = 0.5 # thermald and manager
ffi = FFI()
ffi.cdef("long syscall(long number, ...);")
libc = ffi.dlopen(None)
def set_realtime_priority(level):
if os.getuid() != 0:
print("not setting priority, not root")
return
if platform.machine() == "x86_64":
NR_gettid = 186
elif platform.machine() == "aarch64":
NR_gettid = 178
else:
raise NotImplementedError
tid = libc.syscall(NR_gettid)
return subprocess.call(['chrt', '-f', '-p', str(level), str(tid)])
class Ratekeeper():
def __init__(self, rate, print_delay_threshold=0.):
"""Rate in Hz for ratekeeping. print_delay_threshold must be nonnegative."""
self._interval = 1. / rate
self._next_frame_time = sec_since_boot() + self._interval
self._print_delay_threshold = print_delay_threshold
self._frame = 0
self._remaining = 0
self._process_name = multiprocessing.current_process().name
@property
def frame(self):
return self._frame
@property
def remaining(self):
return self._remaining
# Maintain loop rate by calling this at the end of each loop
def keep_time(self):
lagged = self.monitor_time()
if self._remaining > 0:
time.sleep(self._remaining)
return lagged
# this only monitor the cumulative lag, but does not enforce a rate
def monitor_time(self):
lagged = False
remaining = self._next_frame_time - sec_since_boot()
self._next_frame_time += self._interval
if self._print_delay_threshold is not None and remaining < -self._print_delay_threshold:
print("%s lagging by %.2f ms" % (self._process_name, -remaining * 1000))
lagged = True
self._frame += 1
self._remaining = remaining
return lagged

78
common/sampling_buffer.py Normal file
View File

@ -0,0 +1,78 @@
import os
import numpy as np
import random
class SamplingBuffer():
def __init__(self, fn, size, write=False):
self._fn = fn
self._write = write
if self._write:
self._f = open(self._fn, "ab")
else:
self._f = open(self._fn, "rb")
self._size = size
self._refresh()
def _refresh(self):
self.cnt = os.path.getsize(self._fn) / self._size
@property
def count(self):
self._refresh()
return self.cnt
def _fetch_one(self, x):
assert self._write == False
self._f.seek(x*self._size)
return self._f.read(self._size)
def sample(self, count, indices = None):
if indices == None:
cnt = self.count
assert cnt != 0
indices = map(lambda x: random.randint(0, cnt-1), range(count))
return map(self._fetch_one, indices)
def write(self, dat):
assert self._write == True
assert (len(dat) % self._size) == 0
self._f.write(dat)
self._f.flush()
class NumpySamplingBuffer():
def __init__(self, fn, size, dtype, write=False):
self._size = size
self._dtype = dtype
self._buf = SamplingBuffer(fn, len(np.zeros(size, dtype=dtype).tobytes()), write)
@property
def count(self):
return self._buf.count
def write(self, dat):
self._buf.write(dat.tobytes())
def sample(self, count, indices = None):
return np.fromstring(''.join(self._buf.sample(count, indices)), dtype=self._dtype).reshape([count]+list(self._size))
# TODO: n IOPS needed where n is the Multi
class MultiNumpySamplingBuffer():
def __init__(self, fn, npa, write=False):
self._bufs = []
for i,n in enumerate(npa):
self._bufs.append(NumpySamplingBuffer(fn + ("_%d" % i), n[0], n[1], write))
def write(self, dat):
for b,x in zip(self._bufs, dat):
b.write(x)
@property
def count(self):
return min(map(lambda x: x.count, self._bufs))
def sample(self, count):
cnt = self.count
assert cnt != 0
indices = map(lambda x: random.randint(0, cnt-1), range(count))
return map(lambda x: x.sample(count, indices), self._bufs)

63
common/spinner.py Normal file
View File

@ -0,0 +1,63 @@
import os
import subprocess
from common.basedir import BASEDIR
class Spinner():
def __init__(self):
try:
self.spinner_proc = subprocess.Popen(["./spinner"],
stdin=subprocess.PIPE,
cwd=os.path.join(BASEDIR, "selfdrive", "ui", "spinner"),
close_fds=True)
except OSError:
self.spinner_proc = None
def __enter__(self):
return self
def update(self, spinner_text):
if self.spinner_proc is not None:
self.spinner_proc.stdin.write(spinner_text.encode('utf8') + b"\n")
try:
self.spinner_proc.stdin.flush()
except BrokenPipeError:
pass
def close(self):
if self.spinner_proc is not None:
try:
self.spinner_proc.stdin.close()
except BrokenPipeError:
pass
self.spinner_proc.terminate()
self.spinner_proc = None
def __del__(self):
self.close()
def __exit__(self, type, value, traceback):
self.close()
class FakeSpinner():
def __init__(self):
pass
def __enter__(self):
return self
def update(self, _):
pass
def __exit__(self, type, value, traceback):
pass
if __name__ == "__main__":
import time
with Spinner() as s:
s.update("Spinner text")
time.sleep(5.0)
print("gone")
time.sleep(5.0)

73
common/stat_live.py Normal file
View File

@ -0,0 +1,73 @@
import numpy as np
class RunningStat():
# tracks realtime mean and standard deviation without storing any data
def __init__(self, priors=None, max_trackable=-1):
self.max_trackable = max_trackable
if priors is not None:
# initialize from history
self.M = priors[0]
self.S = priors[1]
self.n = priors[2]
self.M_last = self.M
self.S_last = self.S
else:
self.reset()
def reset(self):
self.M = 0.
self.S = 0.
self.M_last = 0.
self.S_last = 0.
self.n = 0
def push_data(self, new_data):
# short term memory hack
if self.max_trackable < 0 or self.n < self.max_trackable:
self.n += 1
if self.n == 0:
self.M_last = new_data
self.M = self.M_last
self.S_last = 0.
else:
self.M = self.M_last + (new_data - self.M_last) / self.n
self.S = self.S_last + (new_data - self.M_last) * (new_data - self.M);
self.M_last = self.M
self.S_last = self.S
def mean(self):
return self.M
def variance(self):
if self.n >= 2:
return self.S / (self.n - 1.)
else:
return 0
def std(self):
return np.sqrt(self.variance())
def params_to_save(self):
return [self.M, self.S, self.n]
class RunningStatFilter():
def __init__(self, raw_priors=None, filtered_priors=None, max_trackable=-1):
self.raw_stat = RunningStat(raw_priors, max_trackable)
self.filtered_stat = RunningStat(filtered_priors, max_trackable)
def reset(self):
self.raw_stat.reset()
self.filtered_stat.reset()
def push_and_update(self, new_data):
_std_last = self.raw_stat.std()
self.raw_stat.push_data(new_data)
_delta_std = self.raw_stat.std() - _std_last
if _delta_std<=0:
self.filtered_stat.push_data(new_data)
else:
pass
# self.filtered_stat.push_data(self.filtered_stat.mean())
# class SequentialBayesian():

94
common/stat_tracker.py Normal file
View File

@ -0,0 +1,94 @@
import numpy as np
_DESC_FMT = """
{} (n={}):
MEAN={}
VAR={}
MIN={}
MAX={}
"""
class StatTracker():
def __init__(self, name):
self._name = name
self._mean = 0.
self._var = 0.
self._n = 0
self._min = -float("-inf")
self._max = -float("inf")
@property
def mean(self):
return self._mean
@property
def var(self):
return (self._n * self._var) / (self._n - 1.)
@property
def min(self):
return self._min
@property
def max(self):
return self._max
def update(self, samples):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
data = samples.reshape(-1)
n_a = data.size
mean_a = np.mean(data)
var_a = np.var(data, ddof=0)
n_b = self._n
mean_b = self._mean
delta = mean_b - mean_a
m_a = var_a * (n_a - 1)
m_b = self._var * (n_b - 1)
m2 = m_a + m_b + delta**2 * n_a * n_b / (n_a + n_b)
self._var = m2 / (n_a + n_b)
self._mean = (n_a * mean_a + n_b * mean_b) / (n_a + n_b)
self._n = n_a + n_b
self._min = min(self._min, np.min(data))
self._max = max(self._max, np.max(data))
def __str__(self):
return _DESC_FMT.format(self._name, self._n, self._mean, self.var, self._min,
self._max)
# FIXME(mgraczyk): The variance computation does not work with 1 sample batches.
class VectorStatTracker(StatTracker):
def __init__(self, name, dim):
self._name = name
self._mean = np.zeros((dim, ))
self._var = np.zeros((dim, dim))
self._n = 0
self._min = np.full((dim, ), -float("-inf"))
self._max = np.full((dim, ), -float("inf"))
@property
def cov(self):
return self.var
def update(self, samples):
n_a = samples.shape[0]
mean_a = np.mean(samples, axis=0)
var_a = np.cov(samples, ddof=0, rowvar=False)
n_b = self._n
mean_b = self._mean
delta = mean_b - mean_a
m_a = var_a * (n_a - 1)
m_b = self._var * (n_b - 1)
m2 = m_a + m_b + delta**2 * n_a * n_b / (n_a + n_b)
self._var = m2 / (n_a + n_b)
self._mean = (n_a * mean_a + n_b * mean_b) / (n_a + n_b)
self._n = n_a + n_b
self._min = np.minimum(self._min, np.min(samples, axis=0))
self._max = np.maximum(self._max, np.max(samples, axis=0))

6
common/string_helpers.py Normal file
View File

@ -0,0 +1,6 @@
def replace_right(s, old, new, occurrence):
# replace_right('1232425', '2', ' ', 1) -> '12324 5'
# replace_right('1232425', '2', ' ', 2) -> '123 4 5'
split = s.rsplit(old, occurrence)
return new.join(split)

81
common/sympy_helpers.py Normal file
View File

@ -0,0 +1,81 @@
#!/usr/bin/env python3
import sympy as sp
import numpy as np
def cross(x):
ret = sp.Matrix(np.zeros((3,3)))
ret[0,1], ret[0,2] = -x[2], x[1]
ret[1,0], ret[1,2] = x[2], -x[0]
ret[2,0], ret[2,1] = -x[1], x[0]
return ret
def euler_rotate(roll, pitch, yaw):
# make symbolic rotation matrix from eulers
matrix_roll = sp.Matrix([[1, 0, 0],
[0, sp.cos(roll), -sp.sin(roll)],
[0, sp.sin(roll), sp.cos(roll)]])
matrix_pitch = sp.Matrix([[sp.cos(pitch), 0, sp.sin(pitch)],
[0, 1, 0],
[-sp.sin(pitch), 0, sp.cos(pitch)]])
matrix_yaw = sp.Matrix([[sp.cos(yaw), -sp.sin(yaw), 0],
[sp.sin(yaw), sp.cos(yaw), 0],
[0, 0, 1]])
return matrix_yaw*matrix_pitch*matrix_roll
def quat_rotate(q0, q1, q2, q3):
# make symbolic rotation matrix from quat
return sp.Matrix([[q0**2 + q1**2 - q2**2 - q3**2, 2*(q1*q2 + q0*q3), 2*(q1*q3 - q0*q2)],
[2*(q1*q2 - q0*q3), q0**2 - q1**2 + q2**2 - q3**2, 2*(q2*q3 + q0*q1)],
[2*(q1*q3 + q0*q2), 2*(q2*q3 - q0*q1), q0**2 - q1**2 - q2**2 + q3**2]]).T
def quat_matrix_l(p):
return sp.Matrix([[p[0], -p[1], -p[2], -p[3]],
[p[1], p[0], -p[3], p[2]],
[p[2], p[3], p[0], -p[1]],
[p[3], -p[2], p[1], p[0]]])
def quat_matrix_r(p):
return sp.Matrix([[p[0], -p[1], -p[2], -p[3]],
[p[1], p[0], p[3], -p[2]],
[p[2], -p[3], p[0], p[1]],
[p[3], p[2], -p[1], p[0]]])
def sympy_into_c(sympy_functions):
from sympy.utilities import codegen
routines = []
for name, expr, args in sympy_functions:
r = codegen.make_routine(name, expr, language="C99")
# argument ordering input to sympy is broken with function with output arguments
nargs = []
# reorder the input arguments
for aa in args:
if aa is None:
nargs.append(codegen.InputArgument(sp.Symbol('unused'), dimensions=[1,1]))
continue
found = False
for a in r.arguments:
if str(aa.name) == str(a.name):
nargs.append(a)
found = True
break
if not found:
# [1,1] is a hack for Matrices
nargs.append(codegen.InputArgument(aa, dimensions=[1,1]))
# add the output arguments
for a in r.arguments:
if type(a) == codegen.OutputArgument:
nargs.append(a)
#assert len(r.arguments) == len(args)+1
r.arguments = nargs
# add routine to list
routines.append(r)
[(c_name, c_code), (h_name, c_header)] = codegen.get_code_generator('C', 'ekf', 'C99').write(routines, "ekf")
c_code = '\n'.join(x for x in c_code.split("\n") if len(x) > 0 and x[0] != '#')
c_header = '\n'.join(x for x in c_header.split("\n") if len(x) > 0 and x[0] != '#')
return c_header, c_code

9
common/testing.py Normal file
View File

@ -0,0 +1,9 @@
import os
from nose.tools import nottest
def phone_only(x):
if os.path.isfile("/init.qcom.rc"):
return x
else:
return nottest(x)

0
common/tests/__init__.py Normal file
View File

View File

@ -0,0 +1,28 @@
import os
import unittest
from uuid import uuid4
from common.file_helpers import atomic_write_on_fs_tmp
from common.file_helpers import atomic_write_in_dir
class TestFileHelpers(unittest.TestCase):
def run_atomic_write_func(self, atomic_write_func):
path = "/tmp/tmp{}".format(uuid4())
with atomic_write_func(path) as f:
f.write("test")
with open(path) as f:
self.assertEqual(f.read(), "test")
self.assertEqual(os.stat(path).st_mode & 0o777, 0o644)
os.remove(path)
def test_atomic_write_on_fs_tmp(self):
self.run_atomic_write_func(atomic_write_on_fs_tmp)
def test_atomic_write_in_dir(self):
self.run_atomic_write_func(atomic_write_in_dir)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,27 @@
import numpy as np
import unittest
import timeit
from common.numpy_fast import interp
class InterpTest(unittest.TestCase):
def test_correctness_controls(self):
_A_CRUISE_MIN_BP = np.asarray([0., 5., 10., 20., 40.])
_A_CRUISE_MIN_V = np.asarray([-1.0, -.8, -.67, -.5, -.30])
v_ego_arr = [-1, -1e-12, 0, 4, 5, 6, 7, 10, 11, 15.2, 20, 21, 39,
39.999999, 40, 41]
expected = np.interp(v_ego_arr, _A_CRUISE_MIN_BP, _A_CRUISE_MIN_V)
actual = interp(v_ego_arr, _A_CRUISE_MIN_BP, _A_CRUISE_MIN_V)
np.testing.assert_equal(actual, expected)
for v_ego in v_ego_arr:
expected = np.interp(v_ego, _A_CRUISE_MIN_BP, _A_CRUISE_MIN_V)
actual = interp(v_ego, _A_CRUISE_MIN_BP, _A_CRUISE_MIN_V)
np.testing.assert_equal(actual, expected)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,63 @@
from common.params import Params, UnknownKeyName
import threading
import time
import tempfile
import shutil
import unittest
class TestParams(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
print("using", self.tmpdir)
self.params = Params(self.tmpdir)
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_params_put_and_get(self):
self.params.put("DongleId", "cb38263377b873ee")
assert self.params.get("DongleId") == b"cb38263377b873ee"
def test_params_non_ascii(self):
st = b"\xe1\x90\xff"
self.params.put("CarParams", st)
assert self.params.get("CarParams") == st
def test_params_get_cleared_panda_disconnect(self):
self.params.put("CarParams", "test")
self.params.put("DongleId", "cb38263377b873ee")
assert self.params.get("CarParams") == b"test"
self.params.panda_disconnect()
assert self.params.get("CarParams") is None
assert self.params.get("DongleId") is not None
def test_params_get_cleared_manager_start(self):
self.params.put("CarParams", "test")
self.params.put("DongleId", "cb38263377b873ee")
assert self.params.get("CarParams") == b"test"
self.params.manager_start()
assert self.params.get("CarParams") is None
assert self.params.get("DongleId") is not None
def test_params_two_things(self):
self.params.put("DongleId", "bob")
self.params.put("AccessToken", "knope")
assert self.params.get("DongleId") == b"bob"
assert self.params.get("AccessToken") == b"knope"
def test_params_get_block(self):
def _delayed_writer():
time.sleep(0.1)
self.params.put("CarParams", "test")
threading.Thread(target=_delayed_writer).start()
assert self.params.get("CarParams") is None
assert self.params.get("CarParams", True) == b"test"
def test_params_unknown_key_fails(self):
with self.assertRaises(UnknownKeyName):
self.params.get("swag")
if __name__ == "__main__":
unittest.main()

28
common/timeout.py Normal file
View File

@ -0,0 +1,28 @@
import signal
class TimeoutException(Exception):
pass
class Timeout:
"""
Timeout context manager.
For example this code will raise a TimeoutException:
with Timeout(seconds=5, error_msg="Sleep was too long"):
time.sleep(10)
"""
def __init__(self, seconds, error_msg=None):
if error_msg is None:
error_msg = 'Timed out after {} seconds'.format(seconds)
self.seconds = seconds
self.error_msg = error_msg
def handle_timeout(self, signume, frame):
raise TimeoutException(self.error_msg)
def __enter__(self):
signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, exc_type, exc_val, exc_tb):
signal.alarm(0)

View File

@ -0,0 +1,61 @@
Reference Frames
------
Many reference frames are used throughout. This
folder contains all helper functions needed to
transform between them. Generally this is done
by generating a rotation matrix and multiplying.
| Name | [x, y, z] | Units | Notes |
| :-------------: |:-------------:| :-----:| :----: |
| Geodetic | [Latitude, Longitude, Altitude] | geodetic coordinates | Sometimes used as [lon, lat, alt], avoid this frame. |
| ECEF | [x, y, z] | meters | We use **ITRF14 (IGS14)**, NOT NAD83. <br> This is the global Mesh3D frame. |
| NED | [North, East, Down] | meters | Relative to earth's surface, useful for vizualizing. |
| Device | [Forward, Right, Down] | meters | This is the Mesh3D local frame. <br> Relative to camera, **not imu.** <br> ![img](http://upload.wikimedia.org/wikipedia/commons/thumb/2/2f/RPY_angles_of_airplanes.png/440px-RPY_angles_of_airplanes.png)|
| Road | [Forward, Left, Up] | meters | On the road plane aligned to the vehicle. <br> ![img](https://upload.wikimedia.org/wikipedia/commons/f/f5/RPY_angles_of_cars.png) |
| View | [Right, Down, Forward] | meters | Like device frame, but according to camera conventions. |
| Camera | [u, v, focal] | pixels | Like view frame, but 2d on the camera image.|
| Normalized Camera | [u / focal, v / focal, 1] | / | |
| Model | [u, v, focal] | pixels | The sampled rectangle of the full camera frame the model uses. |
| Normalized Model | [u / focal, v / focal, 1] | / | |
Orientation Conventations
------
Quaternions, rotation matrices and euler angles are three
equivalent representations of orientation and all three are
used throughout the code base.
For euler angles the preferred convention is [roll, pitch, yaw]
which corresponds to rotations around the [x, y, z] axes. All
euler angles should always be in radians or radians/s unless
for plotting or display purposes. For quaternions the hamilton
notations is preferred which is [q<sub>w</sub>, q<sub>x</sub>, q<sub>y</sub>, q<sub>z</sub>]. All quaternions
should always be normalized with a strictly positive q<sub>w</sub>. **These
quaternions are a unique representation of orientation whereas euler angles
or rotation matrices are not.**
To rotate from one frame into another with euler angles the
convention is to rotate around roll, then pitch and then yaw,
while rotating around the rotated axes, not the original axes.
Calibration
------
EONs are not all mounted in the exact same way. To compensate for the effects of this the vision model takes in an image that is "calibrated". This means the image is aligned so the direction of travel of the car when it is going straight and the road is flat is always in the location on the image. This calibration is defined by a pitch and yaw angle that describe the direction of travel vector in device frame.
Example
------
To transform global Mesh3D positions and orientations (positions_ecef, quats_ecef) into the local frame described by the
first position and orientation from Mesh3D one would do:
```
ecef_from_local = rot_from_quat(quats_ecef[0])
local_from_ecef = ecef_from_local.T
positions_local = np.einsum('ij,kj->ki', local_from_ecef, postions_ecef - positions_ecef[0])
rotations_global = rot_from_quat(quats_ecef)
rotations_local = np.einsum('ij,kjl->kil', local_from_ecef, rotations_global)
eulers_local = euler_from_rot(rotations_local)
```

View File

View File

@ -0,0 +1,258 @@
import numpy as np
import common.transformations.orientation as orient
import math
FULL_FRAME_SIZE = (1164, 874)
W, H = FULL_FRAME_SIZE[0], FULL_FRAME_SIZE[1]
eon_focal_length = FOCAL = 910.0
# aka 'K' aka camera_frame_from_view_frame
eon_intrinsics = np.array([
[FOCAL, 0., W/2.],
[ 0., FOCAL, H/2.],
[ 0., 0., 1.]])
leon_dcam_intrinsics = np.array([
[650, 0, 816//2],
[ 0, 650, 612//2],
[ 0, 0, 1]])
eon_dcam_intrinsics = np.array([
[860, 0, 1152//2],
[ 0, 860, 864//2],
[ 0, 0, 1]])
# aka 'K_inv' aka view_frame_from_camera_frame
eon_intrinsics_inv = np.linalg.inv(eon_intrinsics)
# device/mesh : x->forward, y-> right, z->down
# view : x->right, y->down, z->forward
device_frame_from_view_frame = np.array([
[ 0., 0., 1.],
[ 1., 0., 0.],
[ 0., 1., 0.]
])
view_frame_from_device_frame = device_frame_from_view_frame.T
def get_calib_from_vp(vp):
vp_norm = normalize(vp)
yaw_calib = np.arctan(vp_norm[0])
pitch_calib = -np.arctan(vp_norm[1]*np.cos(yaw_calib))
roll_calib = 0
return roll_calib, pitch_calib, yaw_calib
# aka 'extrinsic_matrix'
# road : x->forward, y -> left, z->up
def get_view_frame_from_road_frame(roll, pitch, yaw, height):
device_from_road = orient.rot_from_euler([roll, pitch, yaw]).dot(np.diag([1, -1, -1]))
view_from_road = view_frame_from_device_frame.dot(device_from_road)
return np.hstack((view_from_road, [[0], [height], [0]]))
def vp_from_ke(m):
"""
Computes the vanishing point from the product of the intrinsic and extrinsic
matrices C = KE.
The vanishing point is defined as lim x->infinity C (x, 0, 0, 1).T
"""
return (m[0, 0]/m[2,0], m[1,0]/m[2,0])
def vp_from_rpy(rpy):
e = get_view_frame_from_road_frame(rpy[0], rpy[1], rpy[2], 1.22)
ke = np.dot(eon_intrinsics, e)
return vp_from_ke(ke)
def roll_from_ke(m):
# note: different from calibration.h/RollAnglefromKE: i think that one's just wrong
return np.arctan2(-(m[1, 0] - m[1, 1] * m[2, 0] / m[2, 1]),
-(m[0, 0] - m[0, 1] * m[2, 0] / m[2, 1]))
def normalize(img_pts, intrinsics=eon_intrinsics):
# normalizes image coordinates
# accepts single pt or array of pts
intrinsics_inv = np.linalg.inv(intrinsics)
img_pts = np.array(img_pts)
input_shape = img_pts.shape
img_pts = np.atleast_2d(img_pts)
img_pts = np.hstack((img_pts, np.ones((img_pts.shape[0],1))))
img_pts_normalized = img_pts.dot(intrinsics_inv.T)
img_pts_normalized[(img_pts < 0).any(axis=1)] = np.nan
return img_pts_normalized[:,:2].reshape(input_shape)
def denormalize(img_pts, intrinsics=eon_intrinsics):
# denormalizes image coordinates
# accepts single pt or array of pts
img_pts = np.array(img_pts)
input_shape = img_pts.shape
img_pts = np.atleast_2d(img_pts)
img_pts = np.hstack((img_pts, np.ones((img_pts.shape[0],1))))
img_pts_denormalized = img_pts.dot(intrinsics.T)
img_pts_denormalized[img_pts_denormalized[:,0] > W] = np.nan
img_pts_denormalized[img_pts_denormalized[:,0] < 0] = np.nan
img_pts_denormalized[img_pts_denormalized[:,1] > H] = np.nan
img_pts_denormalized[img_pts_denormalized[:,1] < 0] = np.nan
return img_pts_denormalized[:,:2].reshape(input_shape)
def device_from_ecef(pos_ecef, orientation_ecef, pt_ecef):
# device from ecef frame
# device frame is x -> forward, y-> right, z -> down
# accepts single pt or array of pts
input_shape = pt_ecef.shape
pt_ecef = np.atleast_2d(pt_ecef)
ecef_from_device_rot = orient.rotations_from_quats(orientation_ecef)
device_from_ecef_rot = ecef_from_device_rot.T
pt_ecef_rel = pt_ecef - pos_ecef
pt_device = np.einsum('jk,ik->ij', device_from_ecef_rot, pt_ecef_rel)
return pt_device.reshape(input_shape)
def img_from_device(pt_device):
# img coordinates from pts in device frame
# first transforms to view frame, then to img coords
# accepts single pt or array of pts
input_shape = pt_device.shape
pt_device = np.atleast_2d(pt_device)
pt_view = np.einsum('jk,ik->ij', view_frame_from_device_frame, pt_device)
# This function should never return negative depths
pt_view[pt_view[:,2] < 0] = np.nan
pt_img = pt_view/pt_view[:,2:3]
return pt_img.reshape(input_shape)[:,:2]
#TODO please use generic img transform below
def rotate_img(img, eulers, crop=None, intrinsics=eon_intrinsics):
import cv2 # pylint: disable=import-error
size = img.shape[:2]
rot = orient.rot_from_euler(eulers)
quadrangle = np.array([[0, 0],
[size[1]-1, 0],
[0, size[0]-1],
[size[1]-1, size[0]-1]], dtype=np.float32)
quadrangle_norm = np.hstack((normalize(quadrangle, intrinsics=intrinsics), np.ones((4,1))))
warped_quadrangle_full = np.einsum('ij, kj->ki', intrinsics.dot(rot), quadrangle_norm)
warped_quadrangle = np.column_stack((warped_quadrangle_full[:,0]/warped_quadrangle_full[:,2],
warped_quadrangle_full[:,1]/warped_quadrangle_full[:,2])).astype(np.float32)
if crop:
W_border = (size[1] - crop[0])//2
H_border = (size[0] - crop[1])//2
outside_crop = (((warped_quadrangle[:,0] < W_border) |
(warped_quadrangle[:,0] >= size[1] - W_border)) &
((warped_quadrangle[:,1] < H_border) |
(warped_quadrangle[:,1] >= size[0] - H_border)))
if not outside_crop.all():
raise ValueError("warped image not contained inside crop")
else:
H_border, W_border = 0, 0
M = cv2.getPerspectiveTransform(quadrangle, warped_quadrangle)
img_warped = cv2.warpPerspective(img, M, size[::-1])
return img_warped[H_border: size[0] - H_border,
W_border: size[1] - W_border]
def get_camera_frame_from_calib_frame(camera_frame_from_road_frame):
camera_frame_from_ground = camera_frame_from_road_frame[:, (0, 1, 3)]
calib_frame_from_ground = np.dot(eon_intrinsics,
get_view_frame_from_road_frame(0, 0, 0, 1.22))[:, (0, 1, 3)]
ground_from_calib_frame = np.linalg.inv(calib_frame_from_ground)
camera_frame_from_calib_frame = np.dot(camera_frame_from_ground, ground_from_calib_frame)
return camera_frame_from_calib_frame
def pretransform_from_calib(calib):
roll, pitch, yaw, height = calib
view_frame_from_road_frame = get_view_frame_from_road_frame(roll, pitch, yaw, height)
camera_frame_from_road_frame = np.dot(eon_intrinsics, view_frame_from_road_frame)
camera_frame_from_calib_frame = get_camera_frame_from_calib_frame(camera_frame_from_road_frame)
return np.linalg.inv(camera_frame_from_calib_frame)
def transform_img(base_img,
augment_trans=np.array([0,0,0]),
augment_eulers=np.array([0,0,0]),
from_intr=eon_intrinsics,
to_intr=eon_intrinsics,
output_size=None,
pretransform=None,
top_hacks=False,
yuv=False,
alpha=1.0,
beta=0,
blur=0):
import cv2 # pylint: disable=import-error
cv2.setNumThreads(1)
if yuv:
base_img = cv2.cvtColor(base_img, cv2.COLOR_YUV2RGB_I420)
size = base_img.shape[:2]
if not output_size:
output_size = size[::-1]
cy = from_intr[1,2]
def get_M(h=1.22):
quadrangle = np.array([[0, cy + 20],
[size[1]-1, cy + 20],
[0, size[0]-1],
[size[1]-1, size[0]-1]], dtype=np.float32)
quadrangle_norm = np.hstack((normalize(quadrangle, intrinsics=from_intr), np.ones((4,1))))
quadrangle_world = np.column_stack((h*quadrangle_norm[:,0]/quadrangle_norm[:,1],
h*np.ones(4),
h/quadrangle_norm[:,1]))
rot = orient.rot_from_euler(augment_eulers)
to_extrinsics = np.hstack((rot.T, -augment_trans[:,None]))
to_KE = to_intr.dot(to_extrinsics)
warped_quadrangle_full = np.einsum('jk,ik->ij', to_KE, np.hstack((quadrangle_world, np.ones((4,1)))))
warped_quadrangle = np.column_stack((warped_quadrangle_full[:,0]/warped_quadrangle_full[:,2],
warped_quadrangle_full[:,1]/warped_quadrangle_full[:,2])).astype(np.float32)
M = cv2.getPerspectiveTransform(quadrangle, warped_quadrangle.astype(np.float32))
return M
M = get_M()
if pretransform is not None:
M = M.dot(pretransform)
augmented_rgb = cv2.warpPerspective(base_img, M, output_size, borderMode=cv2.BORDER_REPLICATE)
if top_hacks:
cyy = int(math.ceil(to_intr[1,2]))
M = get_M(1000)
if pretransform is not None:
M = M.dot(pretransform)
augmented_rgb[:cyy] = cv2.warpPerspective(base_img, M, (output_size[0], cyy), borderMode=cv2.BORDER_REPLICATE)
# brightness and contrast augment
augmented_rgb = np.clip((float(alpha)*augmented_rgb + beta), 0, 255).astype(np.uint8)
# gaussian blur
if blur > 0:
augmented_rgb = cv2.GaussianBlur(augmented_rgb,(blur*2+1,blur*2+1),cv2.BORDER_DEFAULT)
if yuv:
augmented_img = cv2.cvtColor(augmented_rgb, cv2.COLOR_RGB2YUV_I420)
else:
augmented_img = augmented_rgb
return augmented_img
def yuv_crop(frame, output_size, center=None):
# output_size in camera coordinates so u,v
# center in array coordinates so row, column
import cv2 # pylint: disable=import-error
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
if not center:
center = (rgb.shape[0]/2, rgb.shape[1]/2)
rgb_crop = rgb[center[0] - output_size[1]/2: center[0] + output_size[1]/2,
center[1] - output_size[0]/2: center[1] + output_size[0]/2]
return cv2.cvtColor(rgb_crop, cv2.COLOR_RGB2YUV_I420)

View File

@ -0,0 +1,108 @@
import numpy as np
"""
Coordinate transformation module. All methods accept arrays as input
with each row as a position.
"""
a = 6378137
b = 6356752.3142
esq = 6.69437999014 * 0.001
e1sq = 6.73949674228 * 0.001
def geodetic2ecef(geodetic, radians=False):
geodetic = np.array(geodetic)
input_shape = geodetic.shape
geodetic = np.atleast_2d(geodetic)
ratio = 1.0 if radians else (np.pi / 180.0)
lat = ratio*geodetic[:,0]
lon = ratio*geodetic[:,1]
alt = geodetic[:,2]
xi = np.sqrt(1 - esq * np.sin(lat)**2)
x = (a / xi + alt) * np.cos(lat) * np.cos(lon)
y = (a / xi + alt) * np.cos(lat) * np.sin(lon)
z = (a / xi * (1 - esq) + alt) * np.sin(lat)
ecef = np.array([x, y, z]).T
return ecef.reshape(input_shape)
def ecef2geodetic(ecef, radians=False):
"""
Convert ECEF coordinates to geodetic using ferrari's method
"""
# Save shape and export column
ecef = np.atleast_1d(ecef)
input_shape = ecef.shape
ecef = np.atleast_2d(ecef)
x, y, z = ecef[:, 0], ecef[:, 1], ecef[:, 2]
ratio = 1.0 if radians else (180.0 / np.pi)
# Conver from ECEF to geodetic using Ferrari's methods
# https://en.wikipedia.org/wiki/Geographic_coordinate_conversion#Ferrari.27s_solution
r = np.sqrt(x * x + y * y)
Esq = a * a - b * b
F = 54 * b * b * z * z
G = r * r + (1 - esq) * z * z - esq * Esq
C = (esq * esq * F * r * r) / (pow(G, 3))
S = np.cbrt(1 + C + np.sqrt(C * C + 2 * C))
P = F / (3 * pow((S + 1 / S + 1), 2) * G * G)
Q = np.sqrt(1 + 2 * esq * esq * P)
r_0 = -(P * esq * r) / (1 + Q) + np.sqrt(0.5 * a * a*(1 + 1.0 / Q) - \
P * (1 - esq) * z * z / (Q * (1 + Q)) - 0.5 * P * r * r)
U = np.sqrt(pow((r - esq * r_0), 2) + z * z)
V = np.sqrt(pow((r - esq * r_0), 2) + (1 - esq) * z * z)
Z_0 = b * b * z / (a * V)
h = U * (1 - b * b / (a * V))
lat = ratio*np.arctan((z + e1sq * Z_0) / r)
lon = ratio*np.arctan2(y, x)
# stack the new columns and return to the original shape
geodetic = np.column_stack((lat, lon, h))
return geodetic.reshape(input_shape)
class LocalCoord():
"""
Allows conversions to local frames. In this case NED.
That is: North East Down from the start position in
meters.
"""
def __init__(self, init_geodetic, init_ecef):
self.init_ecef = init_ecef
lat, lon, _ = (np.pi/180)*np.array(init_geodetic)
self.ned2ecef_matrix = np.array([[-np.sin(lat)*np.cos(lon), -np.sin(lon), -np.cos(lat)*np.cos(lon)],
[-np.sin(lat)*np.sin(lon), np.cos(lon), -np.cos(lat)*np.sin(lon)],
[np.cos(lat), 0, -np.sin(lat)]])
self.ecef2ned_matrix = self.ned2ecef_matrix.T
@classmethod
def from_geodetic(cls, init_geodetic):
init_ecef = geodetic2ecef(init_geodetic)
return LocalCoord(init_geodetic, init_ecef)
@classmethod
def from_ecef(cls, init_ecef):
init_geodetic = ecef2geodetic(init_ecef)
return LocalCoord(init_geodetic, init_ecef)
def ecef2ned(self, ecef):
ecef = np.array(ecef)
return np.dot(self.ecef2ned_matrix, (ecef - self.init_ecef).T).T
def ned2ecef(self, ned):
ned = np.array(ned)
# Transpose so that init_ecef will broadcast correctly for 1d or 2d ned.
return (np.dot(self.ned2ecef_matrix, ned.T).T + self.init_ecef)
def geodetic2ned(self, geodetic):
ecef = geodetic2ecef(geodetic)
return self.ecef2ned(ecef)
def ned2geodetic(self, ned):
ecef = self.ned2ecef(ned)
return ecef2geodetic(ecef)

View File

@ -0,0 +1,150 @@
import numpy as np
from common.transformations.camera import (FULL_FRAME_SIZE, eon_focal_length,
get_view_frame_from_road_frame,
vp_from_ke)
# segnet
SEGNET_SIZE = (512, 384)
segnet_frame_from_camera_frame = np.array([
[float(SEGNET_SIZE[0])/FULL_FRAME_SIZE[0], 0., ],
[ 0., float(SEGNET_SIZE[1])/FULL_FRAME_SIZE[1]]])
# model
MODEL_INPUT_SIZE = (320, 160)
MODEL_YUV_SIZE = (MODEL_INPUT_SIZE[0], MODEL_INPUT_SIZE[1] * 3 // 2)
MODEL_CX = MODEL_INPUT_SIZE[0]/2.
MODEL_CY = 21.
model_zoom = 1.25
model_height = 1.22
# canonical model transform
model_intrinsics = np.array(
[[ eon_focal_length / model_zoom, 0. , MODEL_CX],
[ 0. , eon_focal_length / model_zoom, MODEL_CY],
[ 0. , 0. , 1.]])
# MED model
MEDMODEL_INPUT_SIZE = (512, 256)
MEDMODEL_YUV_SIZE = (MEDMODEL_INPUT_SIZE[0], MEDMODEL_INPUT_SIZE[1] * 3 // 2)
MEDMODEL_CY = 47.6
medmodel_zoom = 1.
medmodel_intrinsics = np.array(
[[ eon_focal_length / medmodel_zoom, 0. , 0.5 * MEDMODEL_INPUT_SIZE[0]],
[ 0. , eon_focal_length / medmodel_zoom, MEDMODEL_CY],
[ 0. , 0. , 1.]])
# BIG model
BIGMODEL_INPUT_SIZE = (864, 288)
BIGMODEL_YUV_SIZE = (BIGMODEL_INPUT_SIZE[0], BIGMODEL_INPUT_SIZE[1] * 3 // 2)
bigmodel_zoom = 1.
bigmodel_intrinsics = np.array(
[[ eon_focal_length / bigmodel_zoom, 0. , 0.5 * BIGMODEL_INPUT_SIZE[0]],
[ 0. , eon_focal_length / bigmodel_zoom, 0.2 * BIGMODEL_INPUT_SIZE[1]],
[ 0. , 0. , 1.]])
bigmodel_border = np.array([
[0,0,1],
[BIGMODEL_INPUT_SIZE[0], 0, 1],
[BIGMODEL_INPUT_SIZE[0], BIGMODEL_INPUT_SIZE[1], 1],
[0, BIGMODEL_INPUT_SIZE[1], 1],
])
model_frame_from_road_frame = np.dot(model_intrinsics,
get_view_frame_from_road_frame(0, 0, 0, model_height))
bigmodel_frame_from_road_frame = np.dot(bigmodel_intrinsics,
get_view_frame_from_road_frame(0, 0, 0, model_height))
medmodel_frame_from_road_frame = np.dot(medmodel_intrinsics,
get_view_frame_from_road_frame(0, 0, 0, model_height))
model_frame_from_bigmodel_frame = np.dot(model_intrinsics, np.linalg.inv(bigmodel_intrinsics))
# 'camera from model camera'
def get_model_height_transform(camera_frame_from_road_frame, height):
camera_frame_from_road_ground = np.dot(camera_frame_from_road_frame, np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 0],
[0, 0, 1],
]))
camera_frame_from_road_high = np.dot(camera_frame_from_road_frame, np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, height - model_height],
[0, 0, 1],
]))
road_high_from_camera_frame = np.linalg.inv(camera_frame_from_road_high)
high_camera_from_low_camera = np.dot(camera_frame_from_road_ground, road_high_from_camera_frame)
return high_camera_from_low_camera
# camera_frame_from_model_frame aka 'warp matrix'
# was: calibration.h/CalibrationTransform
def get_camera_frame_from_model_frame(camera_frame_from_road_frame, height=model_height):
vp = vp_from_ke(camera_frame_from_road_frame)
model_camera_from_model_frame = np.array([
[model_zoom, 0., vp[0] - MODEL_CX * model_zoom],
[ 0., model_zoom, vp[1] - MODEL_CY * model_zoom],
[ 0., 0., 1.],
])
# This function is super slow, so skip it if height is very close to canonical
# TODO: speed it up!
if abs(height - model_height) > 0.001: #
camera_from_model_camera = get_model_height_transform(camera_frame_from_road_frame, height)
else:
camera_from_model_camera = np.eye(3)
return np.dot(camera_from_model_camera, model_camera_from_model_frame)
def get_camera_frame_from_medmodel_frame(camera_frame_from_road_frame):
camera_frame_from_ground = camera_frame_from_road_frame[:, (0, 1, 3)]
medmodel_frame_from_ground = medmodel_frame_from_road_frame[:, (0, 1, 3)]
ground_from_medmodel_frame = np.linalg.inv(medmodel_frame_from_ground)
camera_frame_from_medmodel_frame = np.dot(camera_frame_from_ground, ground_from_medmodel_frame)
return camera_frame_from_medmodel_frame
def get_camera_frame_from_bigmodel_frame(camera_frame_from_road_frame):
camera_frame_from_ground = camera_frame_from_road_frame[:, (0, 1, 3)]
bigmodel_frame_from_ground = bigmodel_frame_from_road_frame[:, (0, 1, 3)]
ground_from_bigmodel_frame = np.linalg.inv(bigmodel_frame_from_ground)
camera_frame_from_bigmodel_frame = np.dot(camera_frame_from_ground, ground_from_bigmodel_frame)
return camera_frame_from_bigmodel_frame
def get_model_frame(snu_full, camera_frame_from_model_frame, size):
idxs = camera_frame_from_model_frame.dot(np.column_stack([np.tile(np.arange(size[0]), size[1]),
np.tile(np.arange(size[1]), (size[0],1)).T.flatten(),
np.ones(size[0] * size[1])]).T).T.astype(int)
calib_flat = snu_full[idxs[:,1], idxs[:,0]]
if len(snu_full.shape) == 3:
calib = calib_flat.reshape((size[1], size[0], 3))
elif len(snu_full.shape) == 2:
calib = calib_flat.reshape((size[1], size[0]))
else:
raise ValueError("shape of input img is weird")
return calib

View File

@ -0,0 +1,295 @@
import numpy as np
from numpy import dot, inner, array, linalg
from common.transformations.coordinates import LocalCoord
'''
Vectorized functions that transform between
rotation matrices, euler angles and quaternions.
All support lists, array or array of arrays as inputs.
Supports both x2y and y_from_x format (y_from_x preferred!).
'''
def euler2quat(eulers):
eulers = array(eulers)
if len(eulers.shape) > 1:
output_shape = (-1,4)
else:
output_shape = (4,)
eulers = np.atleast_2d(eulers)
gamma, theta, psi = eulers[:,0], eulers[:,1], eulers[:,2]
q0 = np.cos(gamma / 2) * np.cos(theta / 2) * np.cos(psi / 2) + \
np.sin(gamma / 2) * np.sin(theta / 2) * np.sin(psi / 2)
q1 = np.sin(gamma / 2) * np.cos(theta / 2) * np.cos(psi / 2) - \
np.cos(gamma / 2) * np.sin(theta / 2) * np.sin(psi / 2)
q2 = np.cos(gamma / 2) * np.sin(theta / 2) * np.cos(psi / 2) + \
np.sin(gamma / 2) * np.cos(theta / 2) * np.sin(psi / 2)
q3 = np.cos(gamma / 2) * np.cos(theta / 2) * np.sin(psi / 2) - \
np.sin(gamma / 2) * np.sin(theta / 2) * np.cos(psi / 2)
quats = array([q0, q1, q2, q3]).T
for i in range(len(quats)):
if quats[i,0] < 0:
quats[i] = -quats[i]
return quats.reshape(output_shape)
def quat2euler(quats):
quats = array(quats)
if len(quats.shape) > 1:
output_shape = (-1,3)
else:
output_shape = (3,)
quats = np.atleast_2d(quats)
q0, q1, q2, q3 = quats[:,0], quats[:,1], quats[:,2], quats[:,3]
gamma = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1**2 + q2**2))
theta = np.arcsin(2 * (q0 * q2 - q3 * q1))
psi = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2**2 + q3**2))
eulers = array([gamma, theta, psi]).T
return eulers.reshape(output_shape)
def quat2rot(quats):
quats = array(quats)
input_shape = quats.shape
quats = np.atleast_2d(quats)
Rs = np.zeros((quats.shape[0], 3, 3))
q0 = quats[:, 0]
q1 = quats[:, 1]
q2 = quats[:, 2]
q3 = quats[:, 3]
Rs[:, 0, 0] = q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3
Rs[:, 0, 1] = 2 * (q1 * q2 - q0 * q3)
Rs[:, 0, 2] = 2 * (q0 * q2 + q1 * q3)
Rs[:, 1, 0] = 2 * (q1 * q2 + q0 * q3)
Rs[:, 1, 1] = q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3
Rs[:, 1, 2] = 2 * (q2 * q3 - q0 * q1)
Rs[:, 2, 0] = 2 * (q1 * q3 - q0 * q2)
Rs[:, 2, 1] = 2 * (q0 * q1 + q2 * q3)
Rs[:, 2, 2] = q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3
if len(input_shape) < 2:
return Rs[0]
else:
return Rs
def rot2quat(rots):
input_shape = rots.shape
if len(input_shape) < 3:
rots = array([rots])
K3 = np.empty((len(rots), 4, 4))
K3[:, 0, 0] = (rots[:, 0, 0] - rots[:, 1, 1] - rots[:, 2, 2]) / 3.0
K3[:, 0, 1] = (rots[:, 1, 0] + rots[:, 0, 1]) / 3.0
K3[:, 0, 2] = (rots[:, 2, 0] + rots[:, 0, 2]) / 3.0
K3[:, 0, 3] = (rots[:, 1, 2] - rots[:, 2, 1]) / 3.0
K3[:, 1, 0] = K3[:, 0, 1]
K3[:, 1, 1] = (rots[:, 1, 1] - rots[:, 0, 0] - rots[:, 2, 2]) / 3.0
K3[:, 1, 2] = (rots[:, 2, 1] + rots[:, 1, 2]) / 3.0
K3[:, 1, 3] = (rots[:, 2, 0] - rots[:, 0, 2]) / 3.0
K3[:, 2, 0] = K3[:, 0, 2]
K3[:, 2, 1] = K3[:, 1, 2]
K3[:, 2, 2] = (rots[:, 2, 2] - rots[:, 0, 0] - rots[:, 1, 1]) / 3.0
K3[:, 2, 3] = (rots[:, 0, 1] - rots[:, 1, 0]) / 3.0
K3[:, 3, 0] = K3[:, 0, 3]
K3[:, 3, 1] = K3[:, 1, 3]
K3[:, 3, 2] = K3[:, 2, 3]
K3[:, 3, 3] = (rots[:, 0, 0] + rots[:, 1, 1] + rots[:, 2, 2]) / 3.0
q = np.empty((len(rots), 4))
for i in range(len(rots)):
_, eigvecs = linalg.eigh(K3[i].T)
eigvecs = eigvecs[:,3:]
q[i, 0] = eigvecs[-1]
q[i, 1:] = -eigvecs[:-1].flatten()
if q[i, 0] < 0:
q[i] = -q[i]
if len(input_shape) < 3:
return q[0]
else:
return q
def euler2rot(eulers):
return rotations_from_quats(euler2quat(eulers))
def rot2euler(rots):
return quat2euler(quats_from_rotations(rots))
quats_from_rotations = rot2quat
quat_from_rot = rot2quat
rotations_from_quats = quat2rot
rot_from_quat= quat2rot
rot_from_quat= quat2rot
euler_from_rot = rot2euler
euler_from_quat = quat2euler
rot_from_euler = euler2rot
quat_from_euler = euler2quat
'''
Random helpers below
'''
def quat_product(q, r):
t = np.zeros(4)
t[0] = r[0] * q[0] - r[1] * q[1] - r[2] * q[2] - r[3] * q[3]
t[1] = r[0] * q[1] + r[1] * q[0] - r[2] * q[3] + r[3] * q[2]
t[2] = r[0] * q[2] + r[1] * q[3] + r[2] * q[0] - r[3] * q[1]
t[3] = r[0] * q[3] - r[1] * q[2] + r[2] * q[1] + r[3] * q[0]
return t
def rot_matrix(roll, pitch, yaw):
cr, sr = np.cos(roll), np.sin(roll)
cp, sp = np.cos(pitch), np.sin(pitch)
cy, sy = np.cos(yaw), np.sin(yaw)
rr = array([[1,0,0],[0, cr,-sr],[0, sr, cr]])
rp = array([[cp,0,sp],[0, 1,0],[-sp, 0, cp]])
ry = array([[cy,-sy,0],[sy, cy,0],[0, 0, 1]])
return ry.dot(rp.dot(rr))
def rot(axis, angle):
# Rotates around an arbitrary axis
ret_1 = (1 - np.cos(angle)) * array([[axis[0]**2, axis[0] * axis[1], axis[0] * axis[2]], [
axis[1] * axis[0], axis[1]**2, axis[1] * axis[2]
], [axis[2] * axis[0], axis[2] * axis[1], axis[2]**2]])
ret_2 = np.cos(angle) * np.eye(3)
ret_3 = np.sin(angle) * array([[0, -axis[2], axis[1]], [axis[2], 0, -axis[0]],
[-axis[1], axis[0], 0]])
return ret_1 + ret_2 + ret_3
def ecef_euler_from_ned(ned_ecef_init, ned_pose):
'''
Got it from here:
Using Rotations to Build Aerospace Coordinate Systems
-Don Koks
'''
converter = LocalCoord.from_ecef(ned_ecef_init)
x0 = converter.ned2ecef([1, 0, 0]) - converter.ned2ecef([0, 0, 0])
y0 = converter.ned2ecef([0, 1, 0]) - converter.ned2ecef([0, 0, 0])
z0 = converter.ned2ecef([0, 0, 1]) - converter.ned2ecef([0, 0, 0])
x1 = rot(z0, ned_pose[2]).dot(x0)
y1 = rot(z0, ned_pose[2]).dot(y0)
z1 = rot(z0, ned_pose[2]).dot(z0)
x2 = rot(y1, ned_pose[1]).dot(x1)
y2 = rot(y1, ned_pose[1]).dot(y1)
z2 = rot(y1, ned_pose[1]).dot(z1)
x3 = rot(x2, ned_pose[0]).dot(x2)
y3 = rot(x2, ned_pose[0]).dot(y2)
#z3 = rot(x2, ned_pose[0]).dot(z2)
x0 = array([1, 0, 0])
y0 = array([0, 1, 0])
z0 = array([0, 0, 1])
psi = np.arctan2(inner(x3, y0), inner(x3, x0))
theta = np.arctan2(-inner(x3, z0), np.sqrt(inner(x3, x0)**2 + inner(x3, y0)**2))
y2 = rot(z0, psi).dot(y0)
z2 = rot(y2, theta).dot(z0)
phi = np.arctan2(inner(y3, z2), inner(y3, y2))
ret = array([phi, theta, psi])
return ret
def ned_euler_from_ecef(ned_ecef_init, ecef_poses):
'''
Got the math from here:
Using Rotations to Build Aerospace Coordinate Systems
-Don Koks
Also accepts array of ecef_poses and array of ned_ecef_inits.
Where each row is a pose and an ecef_init.
'''
ned_ecef_init = array(ned_ecef_init)
ecef_poses = array(ecef_poses)
output_shape = ecef_poses.shape
ned_ecef_init = np.atleast_2d(ned_ecef_init)
if ned_ecef_init.shape[0] == 1:
ned_ecef_init = np.tile(ned_ecef_init[0], (output_shape[0], 1))
ecef_poses = np.atleast_2d(ecef_poses)
ned_poses = np.zeros(ecef_poses.shape)
for i, ecef_pose in enumerate(ecef_poses):
converter = LocalCoord.from_ecef(ned_ecef_init[i])
x0 = array([1, 0, 0])
y0 = array([0, 1, 0])
z0 = array([0, 0, 1])
x1 = rot(z0, ecef_pose[2]).dot(x0)
y1 = rot(z0, ecef_pose[2]).dot(y0)
z1 = rot(z0, ecef_pose[2]).dot(z0)
x2 = rot(y1, ecef_pose[1]).dot(x1)
y2 = rot(y1, ecef_pose[1]).dot(y1)
z2 = rot(y1, ecef_pose[1]).dot(z1)
x3 = rot(x2, ecef_pose[0]).dot(x2)
y3 = rot(x2, ecef_pose[0]).dot(y2)
#z3 = rot(x2, ecef_pose[0]).dot(z2)
x0 = converter.ned2ecef([1, 0, 0]) - converter.ned2ecef([0, 0, 0])
y0 = converter.ned2ecef([0, 1, 0]) - converter.ned2ecef([0, 0, 0])
z0 = converter.ned2ecef([0, 0, 1]) - converter.ned2ecef([0, 0, 0])
psi = np.arctan2(inner(x3, y0), inner(x3, x0))
theta = np.arctan2(-inner(x3, z0), np.sqrt(inner(x3, x0)**2 + inner(x3, y0)**2))
y2 = rot(z0, psi).dot(y0)
z2 = rot(y2, theta).dot(z0)
phi = np.arctan2(inner(y3, z2), inner(y3, y2))
ned_poses[i] = array([phi, theta, psi])
return ned_poses.reshape(output_shape)
def ecef2car(car_ecef, psi, theta, points_ecef, ned_converter):
"""
TODO: add roll rotation
Converts an array of points in ecef coordinates into
x-forward, y-left, z-up coordinates
Parameters
----------
psi: yaw, radian
theta: pitch, radian
Returns
-------
[x, y, z] coordinates in car frame
"""
# input is an array of points in ecef cocrdinates
# output is an array of points in car's coordinate (x-front, y-left, z-up)
# convert points to NED
points_ned = []
for p in points_ecef:
points_ned.append(ned_converter.ecef2ned_matrix.dot(array(p) - car_ecef))
points_ned = np.vstack(points_ned).T
# n, e, d -> x, y, z
# Calculate relative postions and rotate wrt to heading and pitch of car
invert_R = array([[1., 0., 0.], [0., -1., 0.], [0., 0., -1.]])
c, s = np.cos(psi), np.sin(psi)
yaw_R = array([[c, s, 0.], [-s, c, 0.], [0., 0., 1.]])
c, s = np.cos(theta), np.sin(theta)
pitch_R = array([[c, 0., -s], [0., 1., 0.], [s, 0., c]])
return dot(pitch_R, dot(yaw_R, dot(invert_R, points_ned)))

View File

@ -0,0 +1,122 @@
#!/usr/bin/env python3
import numpy as np
import unittest
import common.transformations.coordinates as coord
geodetic_positions = np.array([[37.7610403, -122.4778699, 115],
[27.4840915, -68.5867592, 2380],
[32.4916858, -113.652821, -6],
[15.1392514, 103.6976037, 24],
[24.2302229, 44.2835412, 1650]])
geodetic_positions_radians = np.array([[0.65905448, -2.13764209, 115],
[0.47968789, -1.19706477, 2380],
[0.5670869, -1.98361593, -6],
[0.26422978, 1.80986461, 24],
[0.42289717, 0.7728936, 1650]])
ecef_positions = np.array([[-2711076.55270557, -4259167.14692758, 3884579.87669935],
[ 2068042.69652729, -5273435.40316622, 2927004.89190746],
[-2160412.60461669, -4932588.89873832, 3406542.29652851],
[-1458247.92550567, 5983060.87496612, 1654984.6099885 ],
[ 4167239.10867871, 4064301.90363223, 2602234.6065749 ]])
ecef_positions_offset = np.array([[-2711004.46961115, -4259099.33540613, 3884605.16002147],
[ 2068074.30639499, -5273413.78835412, 2927012.48741131],
[-2160344.53748176, -4932586.20092211, 3406636.2962545 ],
[-1458211.98517094, 5983151.11161276, 1655077.02698447],
[ 4167271.20055269, 4064398.22619263, 2602238.95265847]])
ned_offsets = np.array([[78.722153649976391, 24.396208657446344, 60.343017506838436],
[10.699003365155221, 37.319278617604269, 4.1084100025050407],
[95.282646251726959, 61.266689955574428, -25.376506058505054],
[68.535769283630003, -56.285970011848889, -100.54840137956515],
[-33.066609321880179, 46.549821994306861, -84.062540548335591]])
ecef_init_batch = np.array([2068042.69652729, -5273435.40316622, 2927004.89190746])
ecef_positions_offset_batch = np.array([[ 2068089.41454771, -5273434.46829148, 2927074.04783672],
[ 2068103.31628647, -5273393.92275431, 2927102.08725987],
[ 2068108.49939636, -5273359.27047121, 2927045.07091581],
[ 2068075.12395611, -5273381.69432566, 2927041.08207992],
[ 2068060.72033399, -5273430.6061505, 2927094.54928305]])
ned_offsets_batch = np.array([[ 53.88103168, 43.83445935, -46.27488057],
[ 93.83378995, 71.57943024, -30.23113187],
[ 57.26725796, 89.05602684, 23.02265814],
[ 49.71775195, 49.79767572, 17.15351015],
[ 78.56272609, 18.53100158, -43.25290759]])
class TestNED(unittest.TestCase):
def test_small_distances(self):
start_geodetic = np.array([33.8042184, -117.888593, 0.0])
local_coord = coord.LocalCoord.from_geodetic(start_geodetic)
start_ned = local_coord.geodetic2ned(start_geodetic)
np.testing.assert_array_equal(start_ned, np.zeros(3,))
west_geodetic = start_geodetic + [0, -0.0005, 0]
west_ned = local_coord.geodetic2ned(west_geodetic)
self.assertLess(np.abs(west_ned[0]), 1e-3)
self.assertLess(west_ned[1], 0)
southwest_geodetic = start_geodetic + [-0.0005, -0.002, 0]
southwest_ned = local_coord.geodetic2ned(southwest_geodetic)
self.assertLess(southwest_ned[0], 0)
self.assertLess(southwest_ned[1], 0)
def test_ecef_geodetic(self):
# testing single
np.testing.assert_allclose(ecef_positions[0], coord.geodetic2ecef(geodetic_positions[0]), rtol=1e-9)
np.testing.assert_allclose(geodetic_positions[0,:2], coord.ecef2geodetic(ecef_positions[0])[:2], rtol=1e-9)
np.testing.assert_allclose(geodetic_positions[0,2], coord.ecef2geodetic(ecef_positions[0])[2], rtol=1e-9, atol=1e-4)
np.testing.assert_allclose(geodetic_positions[:,:2], coord.ecef2geodetic(ecef_positions)[:,:2], rtol=1e-9)
np.testing.assert_allclose(geodetic_positions[:,2], coord.ecef2geodetic(ecef_positions)[:,2], rtol=1e-9, atol=1e-4)
np.testing.assert_allclose(ecef_positions, coord.geodetic2ecef(geodetic_positions), rtol=1e-9)
np.testing.assert_allclose(geodetic_positions_radians[0], coord.ecef2geodetic(ecef_positions[0], radians=True), rtol=1e-5)
np.testing.assert_allclose(geodetic_positions_radians[:,:2], coord.ecef2geodetic(ecef_positions, radians=True)[:,:2], rtol=1e-7)
np.testing.assert_allclose(geodetic_positions_radians[:,2], coord.ecef2geodetic(ecef_positions, radians=True)[:,2], rtol=1e-7, atol=1e-4)
def test_ned(self):
for ecef_pos in ecef_positions:
converter = coord.LocalCoord.from_ecef(ecef_pos)
ecef_pos_moved = ecef_pos + [25, -25, 25]
ecef_pos_moved_double_converted = converter.ned2ecef(converter.ecef2ned(ecef_pos_moved))
np.testing.assert_allclose(ecef_pos_moved, ecef_pos_moved_double_converted, rtol=1e-9)
for geo_pos in geodetic_positions:
converter = coord.LocalCoord.from_geodetic(geo_pos)
geo_pos_moved = geo_pos + np.array([0, 0, 10])
geo_pos_double_converted_moved = converter.ned2geodetic(converter.geodetic2ned(geo_pos) + np.array([0,0,-10]))
np.testing.assert_allclose(geo_pos_moved[:2], geo_pos_double_converted_moved[:2], rtol=1e-9, atol=1e-6)
np.testing.assert_allclose(geo_pos_moved[2], geo_pos_double_converted_moved[2], rtol=1e-9, atol=1e-4)
def test_ned_saved_results(self):
for i, ecef_pos in enumerate(ecef_positions):
converter = coord.LocalCoord.from_ecef(ecef_pos)
np.testing.assert_allclose(converter.ned2ecef(ned_offsets[i]),
ecef_positions_offset[i],
rtol=1e-9, atol=1e-4)
np.testing.assert_allclose(converter.ecef2ned(ecef_positions_offset[i]),
ned_offsets[i],
rtol=1e-9, atol=1e-4)
def test_ned_batch(self):
converter = coord.LocalCoord.from_ecef(ecef_init_batch)
np.testing.assert_allclose(converter.ecef2ned(ecef_positions_offset_batch),
ned_offsets_batch,
rtol=1e-9, atol=1e-7)
np.testing.assert_allclose(converter.ned2ecef(ned_offsets_batch),
ecef_positions_offset_batch,
rtol=1e-9, atol=1e-7)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,69 @@
#!/usr/bin/env python3
import numpy as np
import unittest
from common.transformations.orientation import euler2quat, quat2euler, euler2rot, rot2euler, \
rot2quat, quat2rot, \
ned_euler_from_ecef
eulers = np.array([[ 1.46520501, 2.78688383, 2.92780854],
[ 4.86909526, 3.60618161, 4.30648981],
[ 3.72175965, 2.68763705, 5.43895988],
[ 5.92306687, 5.69573614, 0.81100357],
[ 0.67838374, 5.02402037, 2.47106426]])
quats = np.array([[ 0.66855182, -0.71500939, 0.19539353, 0.06017818],
[ 0.43163717, 0.70013301, 0.28209145, 0.49389021],
[ 0.44121991, -0.08252646, 0.34257534, 0.82532207],
[ 0.88578382, -0.04515356, -0.32936046, 0.32383617],
[ 0.06578165, 0.61282835, 0.07126891, 0.78424163]])
ecef_positions = np.array([[-2711076.55270557, -4259167.14692758, 3884579.87669935],
[ 2068042.69652729, -5273435.40316622, 2927004.89190746],
[-2160412.60461669, -4932588.89873832, 3406542.29652851],
[-1458247.92550567, 5983060.87496612, 1654984.6099885 ],
[ 4167239.10867871, 4064301.90363223, 2602234.6065749 ]])
ned_eulers = np.array([[ 0.46806039, -0.4881889 , 1.65697808],
[-2.14525969, -0.36533066, 0.73813479],
[-1.39523364, -0.58540761, -1.77376356],
[-1.84220435, 0.61828016, -1.03310421],
[ 2.50450101, 0.36304151, 0.33136365]])
class TestOrientation(unittest.TestCase):
def test_quat_euler(self):
for i, eul in enumerate(eulers):
np.testing.assert_allclose(quats[i], euler2quat(eul), rtol=1e-7)
np.testing.assert_allclose(quats[i], euler2quat(quat2euler(quats[i])), rtol=1e-6)
for i, eul in enumerate(eulers):
np.testing.assert_allclose(quats[i], euler2quat(list(eul)), rtol=1e-7)
np.testing.assert_allclose(quats[i], euler2quat(quat2euler(list(quats[i]))), rtol=1e-6)
np.testing.assert_allclose(quats, euler2quat(eulers), rtol=1e-7)
np.testing.assert_allclose(quats, euler2quat(quat2euler(quats)), rtol=1e-6)
def test_rot_euler(self):
for eul in eulers:
np.testing.assert_allclose(euler2quat(eul), euler2quat(rot2euler(euler2rot(eul))), rtol=1e-7)
for eul in eulers:
np.testing.assert_allclose(euler2quat(eul), euler2quat(rot2euler(euler2rot(list(eul)))), rtol=1e-7)
np.testing.assert_allclose(euler2quat(eulers), euler2quat(rot2euler(euler2rot(eulers))), rtol=1e-7)
def test_rot_quat(self):
for quat in quats:
np.testing.assert_allclose(quat, rot2quat(quat2rot(quat)), rtol=1e-7)
for quat in quats:
np.testing.assert_allclose(quat, rot2quat(quat2rot(list(quat))), rtol=1e-7)
np.testing.assert_allclose(quats, rot2quat(quat2rot(quats)), rtol=1e-7)
def test_euler_ned(self):
for i in range(len(eulers)):
np.testing.assert_allclose(ned_eulers[i], ned_euler_from_ecef(ecef_positions[i], eulers[i]), rtol=1e-7)
#np.testing.assert_allclose(eulers[i], ecef_euler_from_ned(ecef_positions[i], ned_eulers[i]), rtol=1e-7)
np.testing.assert_allclose(ned_eulers, ned_euler_from_ecef(ecef_positions, eulers), rtol=1e-7)
if __name__ == "__main__":
unittest.main()

40
common/window.py Normal file
View File

@ -0,0 +1,40 @@
import sys
import pygame
from pygame.locals import *
class Window():
def __init__(self, w, h, caption="window", double=False):
self.w = w
self.h = h
pygame.init()
pygame.display.set_caption(caption)
self.double = double
if self.double:
self.screen = pygame.display.set_mode((w*2,h*2), pygame.DOUBLEBUF)
else:
self.screen = pygame.display.set_mode((w,h), pygame.DOUBLEBUF)
self.camera_surface = pygame.surface.Surface((w,h), 0, 24).convert()
def draw(self, out):
pygame.surfarray.blit_array(self.camera_surface, out.swapaxes(0,1))
if self.double:
camera_surface_2x = pygame.transform.scale2x(self.camera_surface)
self.screen.blit(camera_surface_2x, (0, 0))
else:
self.screen.blit(self.camera_surface, (0, 0))
pygame.display.flip()
def getkey(self):
while 1:
event = pygame.event.wait()
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
return event.key
def getclick(self):
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
mx, my = pygame.mouse.get_pos()
return mx, my