initial commit

This commit is contained in:
Willem Melching 2020-05-13 13:43:32 -07:00
commit f9d3744670
24 changed files with 2573 additions and 0 deletions

1
.dockerignore Normal file
View File

@ -0,0 +1 @@
.sconsign.dblite

11
.editorconfig Normal file
View File

@ -0,0 +1,11 @@
root = true
[*]
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
[{*.py, *.pyx, *pxd}]
charset = utf-8
indent_style = space
indent_size = 2

13
.github/workflows/tests.yml vendored Normal file
View File

@ -0,0 +1,13 @@
name: Tests
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-16.04
steps:
- uses: actions/checkout@v2
- name: Build docker image
run: docker build -t rednose .

141
.gitignore vendored Normal file
View File

@ -0,0 +1,141 @@
generated/
.sconsign.dblite
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/

585
.pylintrc Normal file
View File

@ -0,0 +1,585 @@
[MASTER]
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code
extension-pkg-whitelist=scipy
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=CVS
# Add files or directories matching the regex patterns to the blacklist. The
# regex matches against base names, not paths.
ignore-patterns=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Use multiple processes to speed up Pylint.
jobs=4
# List of plugins (as comma separated values of python modules names) to load,
# usually to register additional checkers.
load-plugins=
# Pickle collected data for later comparisons.
persistent=yes
# Specify a configuration file.
#rcfile=
# When enabled, pylint would attempt to guess common misconfiguration and emit
# user-friendly hints instead of false-positive error messages
suggestion-mode=yes
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
confidence=
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once).You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
disable=print-statement,
parameter-unpacking,
unpacking-in-except,
old-raise-syntax,
backtick,
long-suffix,
old-ne-operator,
old-octal-literal,
import-star-module-level,
non-ascii-bytes-literal,
raw-checker-failed,
bad-inline-option,
locally-disabled,
locally-enabled,
file-ignored,
suppressed-message,
useless-suppression,
deprecated-pragma,
apply-builtin,
basestring-builtin,
buffer-builtin,
cmp-builtin,
coerce-builtin,
execfile-builtin,
file-builtin,
long-builtin,
raw_input-builtin,
reduce-builtin,
standarderror-builtin,
unicode-builtin,
xrange-builtin,
coerce-method,
delslice-method,
getslice-method,
setslice-method,
no-absolute-import,
old-division,
dict-iter-method,
dict-view-method,
next-method-called,
metaclass-assignment,
indexing-exception,
raising-string,
reload-builtin,
oct-method,
hex-method,
nonzero-method,
cmp-method,
input-builtin,
round-builtin,
intern-builtin,
unichr-builtin,
map-builtin-not-iterating,
zip-builtin-not-iterating,
range-builtin-not-iterating,
filter-builtin-not-iterating,
using-cmp-argument,
eq-without-hash,
div-method,
idiv-method,
rdiv-method,
exception-message-attribute,
invalid-str-codec,
sys-max-int,
bad-python3-import,
deprecated-string-function,
deprecated-str-translate-call,
deprecated-itertools-function,
deprecated-types-field,
next-method-defined,
dict-items-not-iterating,
dict-keys-not-iterating,
dict-values-not-iterating,
bad-indentation,
line-too-long,
missing-docstring,
multiple-statements,
bad-continuation,
invalid-name,
too-many-arguments,
too-many-locals,
superfluous-parens,
bad-whitespace,
too-many-instance-attributes,
wrong-import-position,
ungrouped-imports,
wrong-import-order,
protected-access,
trailing-whitespace,
too-many-branches,
too-few-public-methods,
too-many-statements,
trailing-newlines,
attribute-defined-outside-init,
too-many-return-statements,
too-many-public-methods,
unused-argument,
old-style-class,
no-init,
len-as-condition,
unneeded-not,
no-self-use,
multiple-imports,
no-else-return,
logging-not-lazy,
fixme,
redefined-outer-name,
unused-variable,
unsubscriptable-object,
expression-not-assigned,
too-many-boolean-expressions,
consider-using-ternary,
invalid-unary-operand-type,
relative-import,
deprecated-lambda
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
enable=c-extension-no-member
[REPORTS]
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total
# number of statements analyzed. This is used by the global evaluation report
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
#msg-template=
# Set the output format. Available formats are text, parseable, colorized, json
# and msvs (visual studio).You can also give a reporter class, eg
# mypackage.mymodule.MyReporterClass.
output-format=text
# Tells whether to display a full report or only the messages
reports=no
# Activate the evaluation score.
score=yes
[REFACTORING]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
# Complete name of functions that never returns. When checking for
# inconsistent-return-statements if a never returning function is called then
# it will be considered as an explicit return statement and no message will be
# printed.
never-returning-functions=optparse.Values,sys.exit
[LOGGING]
# Logging modules to check that the string format arguments are in logging
# function parameter format
logging-modules=logging
[SPELLING]
# Limits count of emitted suggestions for spelling mistakes
max-spelling-suggestions=4
# Spelling dictionary name. Available dictionaries: none. To make it working
# install python-enchant package.
spelling-dict=
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to indicated private dictionary in
# --spelling-private-dict-file option instead of raising a message.
spelling-store-unknown-words=no
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,
XXX,
TODO
[SIMILARITIES]
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
# Ignore imports when computing similarities.
ignore-imports=no
# Minimum lines number of a similarity.
min-similarity-lines=4
[TYPECHECK]
# List of decorators that produce context managers, such as
# contextlib.contextmanager. Add to this list to register other decorators that
# produce valid context managers.
contextmanager-decorators=contextlib.contextmanager
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=capnp.* cereal.* pygame.* zmq.* setproctitle.* smbus2.* usb1.* serial.* cv2.*
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
ignore-mixin-members=yes
# This flag controls whether pylint should warn about no-member and similar
# checks whenever an opaque object is returned when inferring. The inference
# can return multiple potential results while evaluating a Python object, but
# some branches might not be evaluated, which results in partial inference. In
# that case, it might be useful to still emit no-member and other checks for
# the rest of the inferred objects.
ignore-on-opaque-inference=yes
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=flask setproctitle usb1 flask.ext.socketio smbus2 usb1.*
# Show a hint with possible names when a member name was not found. The aspect
# of finding the hint is based on edit distance.
missing-member-hint=yes
# The minimum edit distance a name should have in order to be considered a
# similar match for a missing member name.
missing-member-hint-distance=1
# The total number of similar names that should be taken in consideration when
# showing a hint for a missing member.
missing-member-max-choices=1
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
additional-builtins=
# Tells whether unused global variables should be treated as a violation.
allow-global-unused-variables=yes
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,
_cb
# A regular expression matching the name of dummy variables (i.e. expectedly
# not used).
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
# Argument names that match this expression will be ignored. Default to name
# with leading underscore
ignored-argument-names=_.*|^ignored_|^unused_
# Tells whether we should check for unused import in __init__ files.
init-import=no
# List of qualified module names which can have objects that can redefine
# builtins.
redefining-builtins-modules=six.moves,past.builtins,future.builtins
[FORMAT]
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Maximum number of characters on a single line.
max-line-length=100
# Maximum number of lines in a module
max-module-lines=1000
# List of optional constructs for which whitespace checking is disabled. `dict-
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
# `empty-line` allows space-only lines.
no-space-check=trailing-comma,
dict-separator
# Allow the body of a class to be on the same line as the declaration if body
# contains single statement.
single-line-class-stmt=no
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
[BASIC]
# Naming style matching correct argument names
argument-naming-style=snake_case
# Regular expression matching correct argument names. Overrides argument-
# naming-style
#argument-rgx=
# Naming style matching correct attribute names
attr-naming-style=snake_case
# Regular expression matching correct attribute names. Overrides attr-naming-
# style
#attr-rgx=
# Bad variable names which should always be refused, separated by a comma
bad-names=foo,
bar,
baz,
toto,
tutu,
tata
# Naming style matching correct class attribute names
class-attribute-naming-style=any
# Regular expression matching correct class attribute names. Overrides class-
# attribute-naming-style
#class-attribute-rgx=
# Naming style matching correct class names
class-naming-style=PascalCase
# Regular expression matching correct class names. Overrides class-naming-style
#class-rgx=
# Naming style matching correct constant names
const-naming-style=UPPER_CASE
# Regular expression matching correct constant names. Overrides const-naming-
# style
#const-rgx=
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
# Naming style matching correct function names
function-naming-style=snake_case
# Regular expression matching correct function names. Overrides function-
# naming-style
#function-rgx=
# Good variable names which should always be accepted, separated by a comma
good-names=i,
j,
k,
ex,
Run,
_
# Include a hint for the correct naming format with invalid-name
include-naming-hint=no
# Naming style matching correct inline iteration names
inlinevar-naming-style=any
# Regular expression matching correct inline iteration names. Overrides
# inlinevar-naming-style
#inlinevar-rgx=
# Naming style matching correct method names
method-naming-style=snake_case
# Regular expression matching correct method names. Overrides method-naming-
# style
#method-rgx=
# Naming style matching correct module names
module-naming-style=snake_case
# Regular expression matching correct module names. Overrides module-naming-
# style
#module-rgx=
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# List of decorators that produce properties, such as abc.abstractproperty. Add
# to this list to register other decorators that produce valid properties.
property-classes=abc.abstractproperty
# Naming style matching correct variable names
variable-naming-style=snake_case
# Regular expression matching correct variable names. Overrides variable-
# naming-style
#variable-rgx=
[DESIGN]
# Maximum number of arguments for function / method
max-args=5
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Maximum number of boolean expressions in a if statement
max-bool-expr=5
# Maximum number of branch for function / method body
max-branches=12
# Maximum number of locals for function / method body
max-locals=15
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
# Maximum number of return / yield for function / method body
max-returns=6
# Maximum number of statements in function / method body
max-statements=50
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
[CLASSES]
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,
__new__,
setUp
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,
_fields,
_replace,
_source,
_make
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=mcs
[IMPORTS]
# Allow wildcard imports from modules that define __all__.
allow-wildcard-with-all=no
# Analyse import fallback blocks. This can be used to support both Python 2 and
# 3 compatible code, which means that the block might have code that exists
# only in one or another interpreter, leading to false positives when analysed.
analyse-fallback-blocks=no
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=regsub,
TERMIOS,
Bastion,
rexec
# Create a graph of external dependencies in the given file (report RP0402 must
# not be disabled)
ext-import-graph=
# Create a graph of every (i.e. internal and external) dependencies in the
# given file (report RP0402 must not be disabled)
import-graph=
# Create a graph of internal dependencies in the given file (report RP0402 must
# not be disabled)
int-import-graph=
# Force import order to recognize a module as part of the standard
# compatibility libraries.
known-standard-library=
# Force import order to recognize a module as part of a third party library.
known-third-party=enchant
[EXCEPTIONS]
# Exceptions that will emit a warning when being caught. Defaults to
# "Exception"
overgeneral-exceptions=Exception

18
Dockerfile Normal file
View File

@ -0,0 +1,18 @@
from ubuntu:16.04
RUN apt-get update && apt-get install -y libzmq3-dev capnproto libcapnp-dev clang wget git autoconf libtool curl make build-essential libssl-dev zlib1g-dev libbz2-dev libreadline-dev libsqlite3-dev llvm libncurses5-dev libncursesw5-dev xz-utils tk-dev libffi-dev liblzma-dev python-openssl libeigen3-dev
RUN curl -L https://github.com/pyenv/pyenv-installer/raw/master/bin/pyenv-installer | bash
ENV PATH="/root/.pyenv/bin:/root/.pyenv/shims:${PATH}"
RUN pyenv install 3.7.3
RUN pyenv global 3.7.3
RUN pyenv rehash
RUN pip3 install scons==3.1.1
WORKDIR /project/rednose
ENV PYTHONPATH=/project
COPY . .
RUN python3 setup.py install
RUN scons -c && scons -j$(nproc)

21
LICENSE Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2020 comma.ai
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

52
README.md Normal file
View File

@ -0,0 +1,52 @@
# Kalman filter library
## Introduction
The kalman filter framework described here is an incredibly powerful tool for any optimization problem,
but particularly for visual odometry, sensor fusion localization or SLAM. It is designed to provide very
accurate results, work online or offline, be fairly computationally efficient, be easy to design filters with in
python.
## Feature walkthrough
### Extended Kalman Filter with symbolic Jacobian computation
Most dynamic systems can be described as a Hidden Markov Process. To estimate the state of such a system with noisy
measurements one can use a Recursive Bayesian estimator. For a linear Markov Process a regular linear Kalman filter is optimal.
Unfortunately, a lot of systems are non-linear. Extended Kalman Filters can model systems by linearizing the non-linear
system at every step, this provides a close to optimal estimator when the linearization is good enough. If the linearization
introduces too much noise, one can use an Iterated Extended Kalman Filter, Unscented Kalman Filter or a Particle Filter. For
most applications those estimators are overkill and introduce too much complexity and require a lot of additional compute.
Conventionally Extended Kalman Filters are implemented by writing the system's dynamic equations and then manually symbolically
calculating the Jacobians for the linearization. For complex systems this is time consuming and very prone to calculation errors.
This library symbolically computes the Jacobians using sympy to simplify the system's definition and remove the possiblity of introducing calculation errors.
### Error State Kalman Filter
3D localization algorithms ussually also require estimating orientation of an object in 3D. Orientation is generally represented
with euler angles or quaternions.
Euler angles have several problems, there are mulitple ways to represent the same orientation,
gimbal lock can cause the loss of a degree of freedom and lastly their behaviour is very non-linear when errors are large.
Quaternions with one strictly positive dimension don't suffer from these issues, but have another set of problems.
Quaternions need to be normalized otherwise they will grow unbounded, this is cannot be cleanly enforced in a kalman filter.
Most importantly though a quaternion has 4 dimensions, but only represents 3 degrees of freedom, so there is one redundant dimension.
Kalman filters are designed to minimize the error of the system's state. It is possible to have a kalman filter where state and the error of the state are represented in a different space. As long as there is an error function that can compute the error based on the true state and estimated state. It is problematic to have redundant dimensions in the error of the kalman filter, but not in the state. A good compromise then, is to use the quaternion to represent the system's attitude state and use euler angles to describe the error in attitude. This library supports and defining an arbitrary error that is in a different space than the state. [Joan Solà](https://arxiv.org/abs/1711.02508) has written a comprehensive description of using ESKFs for robust 3D orientation estimation.
### Multi-State Constraint Kalman Filter
How do you integrate feature-based visual odometry with a Kalman filter? The problem is that one cannot write an observation equation for 2D feature observations in image space for a localization kalman filter. One needs to give the feature observation a depth so it has a 3D position, then one can write an obvervation equation in the kalman filter. This is possible by tracking the feature across frames and then estimating the depth. However, the solution is not that simple, the depth estimated by tracking the feature across frames depends on the location of the camera at those frames, and thus the state of the kalman filter. This creates a positive feedback loop where the kalman filter wrongly gains confidence in it's position because the feature position updates reinforce it.
The solution is to use an [MSCKF](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.437.1085&rep=rep1&type=pdf), which this library fully supports.
### RauchTungStriebel smoothing
When doing offline estimation with a kalman filter there can be an initialization period where states are badly estimated.
Global estimators don't suffer from this, to make our kalman filter competitive with global optimizers we can run the filter
backwards using an RTS smoother. Those combined with potentially multiple forward and backwards passes of the data should make
performance very close to global optimization.
### Mahalanobis distance outlier rejector
A lot of measurements do not come from a Gaussian distribution and as such have outliers that do not fit the statistical model
of the Kalman filter. This can cause a lot of performance issues if not dealt with. This library allows the use of a mahalanobis
distance statistical test on the incoming measurements to deal with this. Note that good initialization is critical to prevent
good measurements from being rejected.

29
SConscript Normal file
View File

@ -0,0 +1,29 @@
Import('env', 'arch')
templates = Glob('#rednose/templates/*')
# TODO: get dependencies based on installation
sympy_helpers = "#rednose/helpers/sympy_helpers.py"
ekf_sym = "#rednose/helpers/ekf_sym.py"
to_build = {
'live': ('examples/live_kf.py', 'examples/generated'),
}
found = {}
for target, (command, generated_folder) in to_build.items():
if File(command).exists():
found[target] = (command, generated_folder)
for target, (command, generated_folder) in found.items():
target_files = File([f'{generated_folder}/{target}.cpp', f'{generated_folder}/{target}.h'])
command_file = File(command)
env.Command(target_files,
[templates, command_file, sympy_helpers, ekf_sym],
command_file.get_abspath()+" "+target
)
env.SharedLibrary(f'{generated_folder}/' + target, target_files[0])

26
SConstruct Normal file
View File

@ -0,0 +1,26 @@
import os
import subprocess
arch = subprocess.check_output(["uname", "-m"], encoding='utf8').rstrip()
env = Environment(
ENV=os.environ,
CC='clang',
CXX='clang++',
CCFLAGS=[
"-g",
"-fPIC",
"-O2",
"-Werror=implicit-function-declaration",
"-Werror=incompatible-pointer-types",
"-Werror=int-conversion",
"-Werror=return-type",
"-Werror=format-extra-args",
],
CFLAGS="-std=gnu11",
CXXFLAGS="-std=c++14",
)
Export('env', 'arch')
SConscript(['SConscript'])

0
examples/__init__.py Normal file
View File

338
examples/live_kf.py Executable file
View File

@ -0,0 +1,338 @@
#!/usr/bin/env python3
import os
import numpy as np
import sympy as sp
from rednose.helpers import KalmanError
from rednose.helpers.ekf_sym import EKF_sym, gen_code
from rednose.helpers.sympy_helpers import (euler_rotate, quat_matrix_r, quat_rotate)
GENERATED_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), 'generated'))
EARTH_GM = 3.986005e14 # m^3/s^2 (gravitational constant * mass of earth)
class ObservationKind():
UNKNOWN = 0
NO_OBSERVATION = 1
GPS_NED = 2
ODOMETRIC_SPEED = 3
PHONE_GYRO = 4
GPS_VEL = 5
PSEUDORANGE_GPS = 6
PSEUDORANGE_RATE_GPS = 7
SPEED = 8
NO_ROT = 9
PHONE_ACCEL = 10
ORB_POINT = 11
ECEF_POS = 12
CAMERA_ODO_TRANSLATION = 13
CAMERA_ODO_ROTATION = 14
ORB_FEATURES = 15
MSCKF_TEST = 16
FEATURE_TRACK_TEST = 17
LANE_PT = 18
IMU_FRAME = 19
PSEUDORANGE_GLONASS = 20
PSEUDORANGE_RATE_GLONASS = 21
PSEUDORANGE = 22
PSEUDORANGE_RATE = 23
names = [
'Unknown',
'No observation',
'GPS NED',
'Odometric speed',
'Phone gyro',
'GPS velocity',
'GPS pseudorange',
'GPS pseudorange rate',
'Speed',
'No rotation',
'Phone acceleration',
'ORB point',
'ECEF pos',
'camera odometric translation',
'camera odometric rotation',
'ORB features',
'MSCKF test',
'Feature track test',
'Lane ecef point',
'imu frame eulers',
'GLONASS pseudorange',
'GLONASS pseudorange rate',
]
@classmethod
def to_string(cls, kind):
return cls.names[kind]
class States():
ECEF_POS = slice(0, 3) # x, y and z in ECEF in meters
ECEF_ORIENTATION = slice(3, 7) # quat for pose of phone in ecef
ECEF_VELOCITY = slice(7, 10) # ecef velocity in m/s
ANGULAR_VELOCITY = slice(10, 13) # roll, pitch and yaw rates in device frame in radians/s
GYRO_BIAS = slice(13, 16) # roll, pitch and yaw biases
ODO_SCALE = slice(16, 17) # odometer scale
ACCELERATION = slice(17, 20) # Acceleration in device frame in m/s**2
IMU_OFFSET = slice(20, 23) # imu offset angles in radians
# Error-state has different slices because it is an ESKF
ECEF_POS_ERR = slice(0, 3)
ECEF_ORIENTATION_ERR = slice(3, 6) # euler angles for orientation error
ECEF_VELOCITY_ERR = slice(6, 9)
ANGULAR_VELOCITY_ERR = slice(9, 12)
GYRO_BIAS_ERR = slice(12, 15)
ODO_SCALE_ERR = slice(15, 16)
ACCELERATION_ERR = slice(16, 19)
IMU_OFFSET_ERR = slice(19, 22)
class LiveKalman():
name = 'live'
initial_x = np.array([-2.7e6, 4.2e6, 3.8e6,
1, 0, 0, 0,
0, 0, 0,
0, 0, 0,
0, 0, 0,
1,
0, 0, 0,
0, 0, 0])
# state covariance
initial_P_diag = np.array([10000**2, 10000**2, 10000**2,
10**2, 10**2, 10**2,
10**2, 10**2, 10**2,
1**2, 1**2, 1**2,
0.05**2, 0.05**2, 0.05**2,
0.02**2,
1**2, 1**2, 1**2,
(0.01)**2, (0.01)**2, (0.01)**2])
# process noise
Q = np.diag([0.03**2, 0.03**2, 0.03**2,
0.0**2, 0.0**2, 0.0**2,
0.0**2, 0.0**2, 0.0**2,
0.1**2, 0.1**2, 0.1**2,
(0.005 / 100)**2, (0.005 / 100)**2, (0.005 / 100)**2,
(0.02 / 100)**2,
3**2, 3**2, 3**2,
(0.05 / 60)**2, (0.05 / 60)**2, (0.05 / 60)**2])
@staticmethod
def generate_code():
name = LiveKalman.name
dim_state = LiveKalman.initial_x.shape[0]
dim_state_err = LiveKalman.initial_P_diag.shape[0]
state_sym = sp.MatrixSymbol('state', dim_state, 1)
state = sp.Matrix(state_sym)
x, y, z = state[States.ECEF_POS, :]
q = state[States.ECEF_ORIENTATION, :]
v = state[States.ECEF_VELOCITY, :]
vx, vy, vz = v
omega = state[States.ANGULAR_VELOCITY, :]
vroll, vpitch, vyaw = omega
roll_bias, pitch_bias, yaw_bias = state[States.GYRO_BIAS, :]
odo_scale = state[States.ODO_SCALE, :][0,:]
acceleration = state[States.ACCELERATION, :]
imu_angles = state[States.IMU_OFFSET, :]
dt = sp.Symbol('dt')
# calibration and attitude rotation matrices
quat_rot = quat_rotate(*q)
# Got the quat predict equations from here
# A New Quaternion-Based Kalman Filter for
# Real-Time Attitude Estimation Using the Two-Step
# Geometrically-Intuitive Correction Algorithm
A = 0.5 * sp.Matrix([[0, -vroll, -vpitch, -vyaw],
[vroll, 0, vyaw, -vpitch],
[vpitch, -vyaw, 0, vroll],
[vyaw, vpitch, -vroll, 0]])
q_dot = A * q
# Time derivative of the state as a function of state
state_dot = sp.Matrix(np.zeros((dim_state, 1)))
state_dot[States.ECEF_POS, :] = v
state_dot[States.ECEF_ORIENTATION, :] = q_dot
state_dot[States.ECEF_VELOCITY, 0] = quat_rot * acceleration
# Basic descretization, 1st order intergrator
# Can be pretty bad if dt is big
f_sym = state + dt * state_dot
state_err_sym = sp.MatrixSymbol('state_err', dim_state_err, 1)
state_err = sp.Matrix(state_err_sym)
quat_err = state_err[States.ECEF_ORIENTATION_ERR, :]
v_err = state_err[States.ECEF_VELOCITY_ERR, :]
omega_err = state_err[States.ANGULAR_VELOCITY_ERR, :]
acceleration_err = state_err[States.ACCELERATION_ERR, :]
# Time derivative of the state error as a function of state error and state
quat_err_matrix = euler_rotate(quat_err[0], quat_err[1], quat_err[2])
q_err_dot = quat_err_matrix * quat_rot * (omega + omega_err)
state_err_dot = sp.Matrix(np.zeros((dim_state_err, 1)))
state_err_dot[States.ECEF_POS_ERR, :] = v_err
state_err_dot[States.ECEF_ORIENTATION_ERR, :] = q_err_dot
state_err_dot[States.ECEF_VELOCITY_ERR, :] = quat_err_matrix * quat_rot * (acceleration + acceleration_err)
f_err_sym = state_err + dt * state_err_dot
# Observation matrix modifier
H_mod_sym = sp.Matrix(np.zeros((dim_state, dim_state_err)))
H_mod_sym[States.ECEF_POS, States.ECEF_POS_ERR] = np.eye(States.ECEF_POS.stop - States.ECEF_POS.start)
H_mod_sym[States.ECEF_ORIENTATION, States.ECEF_ORIENTATION_ERR] = 0.5 * quat_matrix_r(state[3:7])[:, 1:]
H_mod_sym[States.ECEF_ORIENTATION.stop:, States.ECEF_ORIENTATION_ERR.stop:] = np.eye(dim_state - States.ECEF_ORIENTATION.stop)
# these error functions are defined so that say there
# is a nominal x and true x:
# true x = err_function(nominal x, delta x)
# delta x = inv_err_function(nominal x, true x)
nom_x = sp.MatrixSymbol('nom_x', dim_state, 1)
true_x = sp.MatrixSymbol('true_x', dim_state, 1)
delta_x = sp.MatrixSymbol('delta_x', dim_state_err, 1)
err_function_sym = sp.Matrix(np.zeros((dim_state, 1)))
delta_quat = sp.Matrix(np.ones((4)))
delta_quat[1:, :] = sp.Matrix(0.5 * delta_x[States.ECEF_ORIENTATION_ERR, :])
err_function_sym[States.ECEF_POS, :] = sp.Matrix(nom_x[States.ECEF_POS, :] + delta_x[States.ECEF_POS_ERR, :])
err_function_sym[States.ECEF_ORIENTATION, 0] = quat_matrix_r(nom_x[States.ECEF_ORIENTATION, 0]) * delta_quat
err_function_sym[States.ECEF_ORIENTATION.stop:, :] = sp.Matrix(nom_x[States.ECEF_ORIENTATION.stop:, :] + delta_x[States.ECEF_ORIENTATION_ERR.stop:, :])
inv_err_function_sym = sp.Matrix(np.zeros((dim_state_err, 1)))
inv_err_function_sym[States.ECEF_POS_ERR, 0] = sp.Matrix(-nom_x[States.ECEF_POS, 0] + true_x[States.ECEF_POS, 0])
delta_quat = quat_matrix_r(nom_x[States.ECEF_ORIENTATION, 0]).T * true_x[States.ECEF_ORIENTATION, 0]
inv_err_function_sym[States.ECEF_ORIENTATION_ERR, 0] = sp.Matrix(2 * delta_quat[1:])
inv_err_function_sym[States.ECEF_ORIENTATION_ERR.stop:, 0] = sp.Matrix(-nom_x[States.ECEF_ORIENTATION.stop:, 0] + true_x[States.ECEF_ORIENTATION.stop:, 0])
eskf_params = [[err_function_sym, nom_x, delta_x],
[inv_err_function_sym, nom_x, true_x],
H_mod_sym, f_err_sym, state_err_sym]
#
# Observation functions
#
imu_rot = euler_rotate(*imu_angles)
h_gyro_sym = imu_rot * sp.Matrix([vroll + roll_bias,
vpitch + pitch_bias,
vyaw + yaw_bias])
pos = sp.Matrix([x, y, z])
gravity = quat_rot.T * ((EARTH_GM / ((x**2 + y**2 + z**2)**(3.0 / 2.0))) * pos)
h_acc_sym = imu_rot * (gravity + acceleration)
h_phone_rot_sym = sp.Matrix([vroll, vpitch, vyaw])
speed = sp.sqrt(vx**2 + vy**2 + vz**2)
h_speed_sym = sp.Matrix([speed * odo_scale])
h_pos_sym = sp.Matrix([x, y, z])
h_imu_frame_sym = sp.Matrix(imu_angles)
h_relative_motion = sp.Matrix(quat_rot.T * v)
obs_eqs = [[h_speed_sym, ObservationKind.ODOMETRIC_SPEED, None],
[h_gyro_sym, ObservationKind.PHONE_GYRO, None],
[h_phone_rot_sym, ObservationKind.NO_ROT, None],
[h_acc_sym, ObservationKind.PHONE_ACCEL, None],
[h_pos_sym, ObservationKind.ECEF_POS, None],
[h_relative_motion, ObservationKind.CAMERA_ODO_TRANSLATION, None],
[h_phone_rot_sym, ObservationKind.CAMERA_ODO_ROTATION, None],
[h_imu_frame_sym, ObservationKind.IMU_FRAME, None]]
gen_code(GENERATED_DIR, name, f_sym, dt, state_sym, obs_eqs, dim_state, dim_state_err, eskf_params)
def __init__(self):
self.dim_state = self.initial_x.shape[0]
self.dim_state_err = self.initial_P_diag.shape[0]
self.obs_noise = {ObservationKind.ODOMETRIC_SPEED: np.atleast_2d(0.2**2),
ObservationKind.PHONE_GYRO: np.diag([0.025**2, 0.025**2, 0.025**2]),
ObservationKind.PHONE_ACCEL: np.diag([.5**2, .5**2, .5**2]),
ObservationKind.CAMERA_ODO_ROTATION: np.diag([0.05**2, 0.05**2, 0.05**2]),
ObservationKind.IMU_FRAME: np.diag([0.05**2, 0.05**2, 0.05**2]),
ObservationKind.NO_ROT: np.diag([0.00025**2, 0.00025**2, 0.00025**2]),
ObservationKind.ECEF_POS: np.diag([5**2, 5**2, 5**2])}
# init filter
self.filter = EKF_sym(GENERATED_DIR, self.name, self.Q, self.initial_x, np.diag(self.initial_P_diag), self.dim_state, self.dim_state_err)
@property
def x(self):
return self.filter.state()
@property
def t(self):
return self.filter.filter_time
@property
def P(self):
return self.filter.covs()
def rts_smooth(self, estimates):
return self.filter.rts_smooth(estimates, norm_quats=True)
def init_state(self, state, covs_diag=None, covs=None, filter_time=None):
if covs_diag is not None:
P = np.diag(covs_diag)
elif covs is not None:
P = covs
else:
P = self.filter.covs()
self.filter.init_state(state, P, filter_time)
def predict_and_observe(self, t, kind, data):
if len(data) > 0:
data = np.atleast_2d(data)
if kind == ObservationKind.CAMERA_ODO_TRANSLATION:
r = self.predict_and_update_odo_trans(data, t, kind)
elif kind == ObservationKind.CAMERA_ODO_ROTATION:
r = self.predict_and_update_odo_rot(data, t, kind)
elif kind == ObservationKind.ODOMETRIC_SPEED:
r = self.predict_and_update_odo_speed(data, t, kind)
else:
r = self.filter.predict_and_update_batch(t, kind, data, self.get_R(kind, len(data)))
# Normalize quats
quat_norm = np.linalg.norm(self.filter.x[3:7, 0])
# Should not continue if the quats behave this weirdly
if not (0.1 < quat_norm < 10):
raise KalmanError("Kalman filter quaternions unstable")
self.filter.x[States.ECEF_ORIENTATION, 0] = self.filter.x[States.ECEF_ORIENTATION, 0] / quat_norm
return r
def get_R(self, kind, n):
obs_noise = self.obs_noise[kind]
dim = obs_noise.shape[0]
R = np.zeros((n, dim, dim))
for i in range(n):
R[i, :, :] = obs_noise
return R
def predict_and_update_odo_speed(self, speed, t, kind):
z = np.array(speed)
R = np.zeros((len(speed), 1, 1))
for i, _ in enumerate(z):
R[i, :, :] = np.diag([0.2**2])
return self.filter.predict_and_update_batch(t, kind, z, R)
def predict_and_update_odo_trans(self, trans, t, kind):
z = trans[:, :3]
R = np.zeros((len(trans), 3, 3))
for i, _ in enumerate(z):
R[i, :, :] = np.diag(trans[i, 3:]**2)
return self.filter.predict_and_update_batch(t, kind, z, R)
def predict_and_update_odo_rot(self, rot, t, kind):
z = rot[:, :3]
R = np.zeros((len(rot), 3, 3))
for i, _ in enumerate(z):
R[i, :, :] = np.diag(rot[i, 3:]**2)
return self.filter.predict_and_update_batch(t, kind, z, R)
if __name__ == "__main__":
LiveKalman.generate_code()

0
rednose/__init__.py Normal file
View File

View File

@ -0,0 +1,26 @@
import os
from cffi import FFI
TEMPLATE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'templates'))
def write_code(folder, name, code, header):
if not os.path.exists(folder):
os.mkdir(folder)
open(os.path.join(folder, f"{name}.cpp"), 'w').write(code)
open(os.path.join(folder, f"{name}.h"), 'w').write(header)
def load_code(folder, name):
shared_fn = os.path.join(folder, f"lib{name}.so")
header_fn = os.path.join(folder, f"{name}.h")
header = open(header_fn).read()
ffi = FFI()
ffi.cdef(header)
return (ffi, ffi.dlopen(shared_fn))
class KalmanError(Exception):
pass

View File

@ -0,0 +1,22 @@
import os
import numpy as np
def gen_chi2_ppf_lookup(max_dim=200):
from scipy.stats import chi2
table = np.zeros((max_dim, 98))
for dim in range(1, max_dim):
table[dim] = chi2.ppf(np.arange(.01, .99, .01), dim)
np.save('chi2_lookup_table', table)
def chi2_ppf(p, dim):
table = np.load(os.path.dirname(os.path.realpath(__file__)) + '/chi2_lookup_table.npy')
result = np.interp(p, np.arange(.01, .99, .01), table[dim])
return result
if __name__ == "__main__":
gen_chi2_ppf_lookup()

Binary file not shown.

600
rednose/helpers/ekf_sym.py Normal file
View File

@ -0,0 +1,600 @@
import os
from bisect import bisect_right
import numpy as np
import sympy as sp
from numpy import dot
from rednose.helpers.sympy_helpers import sympy_into_c
from rednose.helpers import (TEMPLATE_DIR, load_code, write_code)
from rednose.helpers.chi2_lookup import chi2_ppf
def solve(a, b):
if a.shape[0] == 1 and a.shape[1] == 1:
return b / a[0][0]
else:
return np.linalg.solve(a, b)
def null(H, eps=1e-12):
u, s, vh = np.linalg.svd(H)
padding = max(0, np.shape(H)[1] - np.shape(s)[0])
null_mask = np.concatenate(((s <= eps), np.ones((padding,), dtype=bool)), axis=0)
null_space = np.compress(null_mask, vh, axis=0)
return np.transpose(null_space)
def gen_code(folder, name, f_sym, dt_sym, x_sym, obs_eqs, dim_x, dim_err, eskf_params=None, msckf_params=None, maha_test_kinds=[], global_vars=None):
# optional state transition matrix, H modifier
# and err_function if an error-state kalman filter (ESKF)
# is desired. Best described in "Quaternion kinematics
# for the error-state Kalman filter" by Joan Sola
if eskf_params:
err_eqs = eskf_params[0]
inv_err_eqs = eskf_params[1]
H_mod_sym = eskf_params[2]
f_err_sym = eskf_params[3]
x_err_sym = eskf_params[4]
else:
nom_x = sp.MatrixSymbol('nom_x', dim_x, 1)
true_x = sp.MatrixSymbol('true_x', dim_x, 1)
delta_x = sp.MatrixSymbol('delta_x', dim_x, 1)
err_function_sym = sp.Matrix(nom_x + delta_x)
inv_err_function_sym = sp.Matrix(true_x - nom_x)
err_eqs = [err_function_sym, nom_x, delta_x]
inv_err_eqs = [inv_err_function_sym, nom_x, true_x]
H_mod_sym = sp.Matrix(np.eye(dim_x))
f_err_sym = f_sym
x_err_sym = x_sym
# This configures the multi-state augmentation
# needed for EKF-SLAM with MSCKF (Mourikis et al 2007)
if msckf_params:
msckf = True
dim_main = msckf_params[0] # size of the main state
dim_augment = msckf_params[1] # size of one augment state chunk
dim_main_err = msckf_params[2]
dim_augment_err = msckf_params[3]
N = msckf_params[4]
feature_track_kinds = msckf_params[5]
assert dim_main + dim_augment * N == dim_x
assert dim_main_err + dim_augment_err * N == dim_err
else:
msckf = False
dim_main = dim_x
dim_augment = 0
dim_main_err = dim_err
dim_augment_err = 0
N = 0
# linearize with jacobians
F_sym = f_err_sym.jacobian(x_err_sym)
if eskf_params:
for sym in x_err_sym:
F_sym = F_sym.subs(sym, 0)
assert dt_sym in F_sym.free_symbols
for i in range(len(obs_eqs)):
obs_eqs[i].append(obs_eqs[i][0].jacobian(x_sym))
if msckf and obs_eqs[i][1] in feature_track_kinds:
obs_eqs[i].append(obs_eqs[i][0].jacobian(obs_eqs[i][2]))
else:
obs_eqs[i].append(None)
# collect sympy functions
sympy_functions = []
# error functions
sympy_functions.append(('err_fun', err_eqs[0], [err_eqs[1], err_eqs[2]]))
sympy_functions.append(('inv_err_fun', inv_err_eqs[0], [inv_err_eqs[1], inv_err_eqs[2]]))
# H modifier for ESKF updates
sympy_functions.append(('H_mod_fun', H_mod_sym, [x_sym]))
# state propagation function
sympy_functions.append(('f_fun', f_sym, [x_sym, dt_sym]))
sympy_functions.append(('F_fun', F_sym, [x_sym, dt_sym]))
# observation functions
for h_sym, kind, ea_sym, H_sym, He_sym in obs_eqs:
sympy_functions.append(('h_%d' % kind, h_sym, [x_sym, ea_sym]))
sympy_functions.append(('H_%d' % kind, H_sym, [x_sym, ea_sym]))
if msckf and kind in feature_track_kinds:
sympy_functions.append(('He_%d' % kind, He_sym, [x_sym, ea_sym]))
# Generate and wrap all th c code
header, code = sympy_into_c(sympy_functions, global_vars)
extra_header = "#define DIM %d\n" % dim_x
extra_header += "#define EDIM %d\n" % dim_err
extra_header += "#define MEDIM %d\n" % dim_main_err
extra_header += "typedef void (*Hfun)(double *, double *, double *);\n"
extra_header += "\nvoid predict(double *x, double *P, double *Q, double dt);"
extra_post = ""
for h_sym, kind, ea_sym, H_sym, He_sym in obs_eqs:
if msckf and kind in feature_track_kinds:
He_str = 'He_%d' % kind
# ea_dim = ea_sym.shape[0]
else:
He_str = 'NULL'
# ea_dim = 1 # not really dim of ea but makes c function work
maha_thresh = chi2_ppf(0.95, int(h_sym.shape[0])) # mahalanobis distance for outlier detection
maha_test = kind in maha_test_kinds
extra_post += """
void update_%d(double *in_x, double *in_P, double *in_z, double *in_R, double *in_ea) {
update<%d,%d,%d>(in_x, in_P, h_%d, H_%d, %s, in_z, in_R, in_ea, MAHA_THRESH_%d);
}
""" % (kind, h_sym.shape[0], 3, maha_test, kind, kind, He_str, kind)
extra_header += "\nconst static double MAHA_THRESH_%d = %f;" % (kind, maha_thresh)
extra_header += "\nvoid update_%d(double *, double *, double *, double *, double *);" % kind
code += '\nextern "C"{\n' + extra_header + "\n}\n"
code += "\n" + open(os.path.join(TEMPLATE_DIR, "ekf_c.c")).read()
code += '\nextern "C"{\n' + extra_post + "\n}\n"
if global_vars is not None:
global_code = '\nextern "C"{\n'
for var in global_vars:
global_code += f"\ndouble {var.name};\n"
global_code += f"\nvoid set_{var.name}(double x){{ {var.name} = x;}}\n"
extra_header += f"\nvoid set_{var.name}(double x);\n"
global_code += '\n}\n'
code = global_code + code
header += "\n" + extra_header
write_code(folder, name, code, header)
class EKF_sym():
def __init__(self, folder, name, Q, x_initial, P_initial, dim_main, dim_main_err,
N=0, dim_augment=0, dim_augment_err=0, maha_test_kinds=[], global_vars=None):
"""Generates process function and all observation functions for the kalman filter."""
self.msckf = N > 0
self.N = N
self.dim_augment = dim_augment
self.dim_augment_err = dim_augment_err
self.dim_main = dim_main
self.dim_main_err = dim_main_err
# state
x_initial = x_initial.reshape((-1, 1))
self.dim_x = x_initial.shape[0]
self.dim_err = P_initial.shape[0]
assert dim_main + dim_augment * N == self.dim_x
assert dim_main_err + dim_augment_err * N == self.dim_err
assert Q.shape == P_initial.shape
# kinds that should get mahalanobis distance
# tested for outlier rejection
self.maha_test_kinds = maha_test_kinds
self.global_vars = global_vars
# process noise
self.Q = Q
# rewind stuff
self.rewind_t = []
self.rewind_states = []
self.rewind_obscache = []
self.init_state(x_initial, P_initial, None)
ffi, lib = load_code(folder, name)
kinds, self.feature_track_kinds = [], []
for func in dir(lib):
if func[:2] == 'h_':
kinds.append(int(func[2:]))
if func[:3] == 'He_':
self.feature_track_kinds.append(int(func[3:]))
# wrap all the sympy functions
def wrap_1lists(name):
func = eval("lib.%s" % name, {"lib": lib})
def ret(lst1, out):
func(ffi.cast("double *", lst1.ctypes.data),
ffi.cast("double *", out.ctypes.data))
return ret
def wrap_2lists(name):
func = eval("lib.%s" % name, {"lib": lib})
def ret(lst1, lst2, out):
func(ffi.cast("double *", lst1.ctypes.data),
ffi.cast("double *", lst2.ctypes.data),
ffi.cast("double *", out.ctypes.data))
return ret
def wrap_1list_1float(name):
func = eval("lib.%s" % name, {"lib": lib})
def ret(lst1, fl, out):
func(ffi.cast("double *", lst1.ctypes.data),
ffi.cast("double", fl),
ffi.cast("double *", out.ctypes.data))
return ret
self.f = wrap_1list_1float("f_fun")
self.F = wrap_1list_1float("F_fun")
self.err_function = wrap_2lists("err_fun")
self.inv_err_function = wrap_2lists("inv_err_fun")
self.H_mod = wrap_1lists("H_mod_fun")
self.hs, self.Hs, self.Hes = {}, {}, {}
for kind in kinds:
self.hs[kind] = wrap_2lists("h_%d" % kind)
self.Hs[kind] = wrap_2lists("H_%d" % kind)
if self.msckf and kind in self.feature_track_kinds:
self.Hes[kind] = wrap_2lists("He_%d" % kind)
if self.global_vars is not None:
for var in self.global_vars:
fun_name = f"set_{var.name}"
setattr(self, fun_name, getattr(lib, fun_name))
# wrap the C++ predict function
def _predict_blas(x, P, dt):
lib.predict(ffi.cast("double *", x.ctypes.data),
ffi.cast("double *", P.ctypes.data),
ffi.cast("double *", self.Q.ctypes.data),
ffi.cast("double", dt))
return x, P
# wrap the C++ update function
def fun_wrapper(f, kind):
f = eval("lib.%s" % f, {"lib": lib})
def _update_inner_blas(x, P, z, R, extra_args):
f(ffi.cast("double *", x.ctypes.data),
ffi.cast("double *", P.ctypes.data),
ffi.cast("double *", z.ctypes.data),
ffi.cast("double *", R.ctypes.data),
ffi.cast("double *", extra_args.ctypes.data))
if self.msckf and kind in self.feature_track_kinds:
y = z[:-len(extra_args)]
else:
y = z
return x, P, y
return _update_inner_blas
self._updates = {}
for kind in kinds:
self._updates[kind] = fun_wrapper("update_%d" % kind, kind)
def _update_blas(x, P, kind, z, R, extra_args=[]):
return self._updates[kind](x, P, z, R, extra_args)
# assign the functions
self._predict = _predict_blas
# self._predict = self._predict_python
self._update = _update_blas
# self._update = self._update_python
def init_state(self, state, covs, filter_time):
self.x = np.array(state.reshape((-1, 1))).astype(np.float64)
self.P = np.array(covs).astype(np.float64)
self.filter_time = filter_time
self.augment_times = [0] * self.N
self.rewind_obscache = []
self.rewind_t = []
self.rewind_states = []
def reset_rewind(self):
self.rewind_obscache = []
self.rewind_t = []
self.rewind_states = []
def augment(self):
# TODO this is not a generalized way of doing this and implies that the augmented states
# are simply the first (dim_augment_state) elements of the main state.
assert self.msckf
d1 = self.dim_main
d2 = self.dim_main_err
d3 = self.dim_augment
d4 = self.dim_augment_err
# push through augmented states
self.x[d1:-d3] = self.x[d1 + d3:]
self.x[-d3:] = self.x[:d3]
assert self.x.shape == (self.dim_x, 1)
# push through augmented covs
assert self.P.shape == (self.dim_err, self.dim_err)
P_reduced = self.P
P_reduced = np.delete(P_reduced, np.s_[d2:d2 + d4], axis=1)
P_reduced = np.delete(P_reduced, np.s_[d2:d2 + d4], axis=0)
assert P_reduced.shape == (self.dim_err - d4, self.dim_err - d4)
to_mult = np.zeros((self.dim_err, self.dim_err - d4))
to_mult[:-d4, :] = np.eye(self.dim_err - d4)
to_mult[-d4:, :d4] = np.eye(d4)
self.P = to_mult.dot(P_reduced.dot(to_mult.T))
self.augment_times = self.augment_times[1:]
self.augment_times.append(self.filter_time)
assert self.P.shape == (self.dim_err, self.dim_err)
def state(self):
return np.array(self.x).flatten()
def covs(self):
return self.P
def rewind(self, t):
# find where we are rewinding to
idx = bisect_right(self.rewind_t, t)
assert self.rewind_t[idx - 1] <= t
assert self.rewind_t[idx] > t # must be true, or rewind wouldn't be called
# set the state to the time right before that
self.filter_time = self.rewind_t[idx - 1]
self.x[:] = self.rewind_states[idx - 1][0]
self.P[:] = self.rewind_states[idx - 1][1]
# return the observations we rewound over for fast forwarding
ret = self.rewind_obscache[idx:]
# throw away the old future
# TODO: is this making a copy?
self.rewind_t = self.rewind_t[:idx]
self.rewind_states = self.rewind_states[:idx]
self.rewind_obscache = self.rewind_obscache[:idx]
return ret
def checkpoint(self, obs):
# push to rewinder
self.rewind_t.append(self.filter_time)
self.rewind_states.append((np.copy(self.x), np.copy(self.P)))
self.rewind_obscache.append(obs)
# only keep a certain number around
REWIND_TO_KEEP = 512
self.rewind_t = self.rewind_t[-REWIND_TO_KEEP:]
self.rewind_states = self.rewind_states[-REWIND_TO_KEEP:]
self.rewind_obscache = self.rewind_obscache[-REWIND_TO_KEEP:]
def predict(self, t):
# initialize time
if self.filter_time is None:
self.filter_time = t
# predict
dt = t - self.filter_time
assert dt >= 0
self.x, self.P = self._predict(self.x, self.P, dt)
self.filter_time = t
def predict_and_update_batch(self, t, kind, z, R, extra_args=[[]], augment=False):
# TODO handle rewinding at this level"
# rewind
if self.filter_time is not None and t < self.filter_time:
if len(self.rewind_t) == 0 or t < self.rewind_t[0] or t < self.rewind_t[-1] - 1.0:
print("observation too old at %.3f with filter at %.3f, ignoring" % (t, self.filter_time))
return None
rewound = self.rewind(t)
else:
rewound = []
ret = self._predict_and_update_batch(t, kind, z, R, extra_args, augment)
# optional fast forward
for r in rewound:
self._predict_and_update_batch(*r)
return ret
def _predict_and_update_batch(self, t, kind, z, R, extra_args, augment=False):
"""The main kalman filter function
Predicts the state and then updates a batch of observations
dim_x: dimensionality of the state space
dim_z: dimensionality of the observation and depends on kind
n: number of observations
Args:
t (float): Time of observation
kind (int): Type of observation
z (vec [n,dim_z]): Measurements
R (mat [n,dim_z, dim_z]): Measurement Noise
extra_args (list, [n]): Values used in H computations
"""
# initialize time
if self.filter_time is None:
self.filter_time = t
# predict
dt = t - self.filter_time
assert dt >= 0
self.x, self.P = self._predict(self.x, self.P, dt)
self.filter_time = t
xk_km1, Pk_km1 = np.copy(self.x).flatten(), np.copy(self.P)
# update batch
y = []
for i in range(len(z)):
# these are from the user, so we canonicalize them
z_i = np.array(z[i], dtype=np.float64, order='F')
R_i = np.array(R[i], dtype=np.float64, order='F')
extra_args_i = np.array(extra_args[i], dtype=np.float64, order='F')
# update
self.x, self.P, y_i = self._update(self.x, self.P, kind, z_i, R_i, extra_args=extra_args_i)
y.append(y_i)
xk_k, Pk_k = np.copy(self.x).flatten(), np.copy(self.P)
if augment:
self.augment()
# checkpoint
self.checkpoint((t, kind, z, R, extra_args))
return xk_km1, xk_k, Pk_km1, Pk_k, t, kind, y, z, extra_args
def _predict_python(self, x, P, dt):
x_new = np.zeros(x.shape, dtype=np.float64)
self.f(x, dt, x_new)
F = np.zeros(P.shape, dtype=np.float64)
self.F(x, dt, F)
if not self.msckf:
P = dot(dot(F, P), F.T)
else:
# Update the predicted state covariance:
# Pk+1|k = |F*Pii*FT + Q*dt F*Pij |
# |PijT*FT Pjj |
# Where F is the jacobian of the main state
# predict function, Pii is the main state's
# covariance and Q its process noise. Pij
# is the covariance between the augmented
# states and the main state.
#
d2 = self.dim_main_err # known at compile time
F_curr = F[:d2, :d2]
P[:d2, :d2] = (F_curr.dot(P[:d2, :d2])).dot(F_curr.T)
P[:d2, d2:] = F_curr.dot(P[:d2, d2:])
P[d2:, :d2] = P[d2:, :d2].dot(F_curr.T)
P += dt * self.Q
return x_new, P
def _update_python(self, x, P, kind, z, R, extra_args=[]):
# init vars
z = z.reshape((-1, 1))
h = np.zeros(z.shape, dtype=np.float64)
H = np.zeros((z.shape[0], self.dim_x), dtype=np.float64)
# C functions
self.hs[kind](x, extra_args, h)
self.Hs[kind](x, extra_args, H)
# y is the "loss"
y = z - h
# *** same above this line ***
if self.msckf and kind in self.Hes:
# Do some algebraic magic to decorrelate
He = np.zeros((z.shape[0], len(extra_args)), dtype=np.float64)
self.Hes[kind](x, extra_args, He)
# TODO: Don't call a function here, do projection locally
A = null(He.T)
y = A.T.dot(y)
H = A.T.dot(H)
R = A.T.dot(R.dot(A))
# TODO If nullspace isn't the dimension we want
if A.shape[1] + He.shape[1] != A.shape[0]:
print('Warning: null space projection failed, measurement ignored')
return x, P, np.zeros(A.shape[0] - He.shape[1])
# if using eskf
H_mod = np.zeros((x.shape[0], P.shape[0]), dtype=np.float64)
self.H_mod(x, H_mod)
H = H.dot(H_mod)
# Do mahalobis distance test
# currently just runs on msckf observations
# could run on anything if needed
if self.msckf and kind in self.maha_test_kinds:
a = np.linalg.inv(H.dot(P).dot(H.T) + R)
maha_dist = y.T.dot(a.dot(y))
if maha_dist > chi2_ppf(0.95, y.shape[0]):
R = 10e16 * R
# *** same below this line ***
# Outlier resilient weighting as described in:
# "A Kalman Filter for Robust Outlier Detection - Jo-Anne Ting, ..."
weight = 1 # (1.5)/(1 + np.sum(y**2)/np.sum(R))
S = dot(dot(H, P), H.T) + R / weight
K = solve(S, dot(H, P.T)).T
I_KH = np.eye(P.shape[0]) - dot(K, H)
# update actual state
delta_x = dot(K, y)
P = dot(dot(I_KH, P), I_KH.T) + dot(dot(K, R), K.T)
# inject observed error into state
x_new = np.zeros(x.shape, dtype=np.float64)
self.err_function(x, delta_x, x_new)
return x_new, P, y.flatten()
def maha_test(self, x, P, kind, z, R, extra_args=[], maha_thresh=0.95):
# init vars
z = z.reshape((-1, 1))
h = np.zeros(z.shape, dtype=np.float64)
H = np.zeros((z.shape[0], self.dim_x), dtype=np.float64)
# C functions
self.hs[kind](x, extra_args, h)
self.Hs[kind](x, extra_args, H)
# y is the "loss"
y = z - h
# if using eskf
H_mod = np.zeros((x.shape[0], P.shape[0]), dtype=np.float64)
self.H_mod(x, H_mod)
H = H.dot(H_mod)
a = np.linalg.inv(H.dot(P).dot(H.T) + R)
maha_dist = y.T.dot(a.dot(y))
if maha_dist > chi2_ppf(maha_thresh, y.shape[0]):
return False
else:
return True
def rts_smooth(self, estimates, norm_quats=False):
'''
Returns rts smoothed results of
kalman filter estimates
If the kalman state is augmented with
old states only the main state is smoothed
'''
xk_n = estimates[-1][0]
Pk_n = estimates[-1][2]
Fk_1 = np.zeros(Pk_n.shape, dtype=np.float64)
states_smoothed = [xk_n]
covs_smoothed = [Pk_n]
for k in range(len(estimates) - 2, -1, -1):
xk1_n = xk_n
if norm_quats:
xk1_n[3:7] /= np.linalg.norm(xk1_n[3:7])
Pk1_n = Pk_n
xk1_k, _, Pk1_k, _, t2, _, _, _, _ = estimates[k + 1]
_, xk_k, _, Pk_k, t1, _, _, _, _ = estimates[k]
dt = t2 - t1
self.F(xk_k, dt, Fk_1)
d1 = self.dim_main
d2 = self.dim_main_err
Ck = np.linalg.solve(Pk1_k[:d2, :d2], Fk_1[:d2, :d2].dot(Pk_k[:d2, :d2].T)).T
xk_n = xk_k
delta_x = np.zeros((Pk_n.shape[0], 1), dtype=np.float64)
self.inv_err_function(xk1_k, xk1_n, delta_x)
delta_x[:d2] = Ck.dot(delta_x[:d2])
x_new = np.zeros((xk_n.shape[0], 1), dtype=np.float64)
self.err_function(xk_k, delta_x, x_new)
xk_n[:d1] = x_new[:d1, 0]
Pk_n = Pk_k
Pk_n[:d2, :d2] = Pk_k[:d2, :d2] + Ck.dot(Pk1_n[:d2, :d2] - Pk1_k[:d2, :d2]).dot(Ck.T)
states_smoothed.append(xk_n)
covs_smoothed.append(Pk_n)
return np.flipud(np.vstack(states_smoothed)), np.stack(covs_smoothed, 0)[::-1]

View File

@ -0,0 +1,166 @@
#!/usr/bin/env python3
import os
import numpy as np
from rednose.helpers import (TEMPLATE_DIR, load_code, write_code)
from rednose.helpers.sympy_helpers import quat_matrix_l
def rot_matrix(roll, pitch, yaw):
cr, sr = np.cos(roll), np.sin(roll)
cp, sp = np.cos(pitch), np.sin(pitch)
cy, sy = np.cos(yaw), np.sin(yaw)
rr = np.array([[1,0,0],[0, cr,-sr],[0, sr, cr]])
rp = np.array([[cp,0,sp],[0, 1,0],[-sp, 0, cp]])
ry = np.array([[cy,-sy,0],[sy, cy,0],[0, 0, 1]])
return ry.dot(rp.dot(rr))
def sane(track):
img_pos = track[1:, 2:4]
diffs_x = abs(img_pos[1:, 0] - img_pos[:-1, 0])
diffs_y = abs(img_pos[1:, 1] - img_pos[:-1, 1])
for i in range(1, len(diffs_x)):
if ((diffs_x[i] > 0.05 or diffs_x[i - 1] > 0.05) and
(diffs_x[i] > 2 * diffs_x[i - 1] or
diffs_x[i] < .5 * diffs_x[i - 1])) or \
((diffs_y[i] > 0.05 or diffs_y[i - 1] > 0.05) and
(diffs_y[i] > 2 * diffs_y[i - 1] or
diffs_y[i] < .5 * diffs_y[i - 1])):
return False
return True
class FeatureHandler():
name = 'feature_handler'
@staticmethod
def generate_code(K=5):
# Wrap c code for slow matching
c_header = "\nvoid merge_features(double *tracks, double *features, long long *empty_idxs);"
c_code = "#include <math.h>\n"
c_code += "#include <string.h>\n"
c_code += "#define K %d\n" % K
c_code += "\n" + open(os.path.join(TEMPLATE_DIR, "feature_handler.c")).read()
filename = f"{FeatureHandler.name}_{K}"
write_code(filename, c_code, c_header)
def __init__(self, K=5):
self.MAX_TRACKS = 6000
self.K = K
# Array of tracks, each track has K 5D features preceded
# by 5 params that inidicate [f_idx, last_idx, updated, complete, valid]
# f_idx: idx of current last feature in track
# idx of of last feature in frame
# bool for whether this track has been update
# bool for whether this track is complete
# bool for whether this track is valid
self.tracks = np.zeros((self.MAX_TRACKS, K + 1, 5))
self.tracks[:] = np.nan
name = f"{FeatureHandler.name}_{K}"
ffi, lib = load_code(name)
def merge_features_c(tracks, features, empty_idxs):
lib.merge_features(ffi.cast("double *", tracks.ctypes.data),
ffi.cast("double *", features.ctypes.data),
ffi.cast("long long *", empty_idxs.ctypes.data))
# self.merge_features = self.merge_features_python
self.merge_features = merge_features_c
def reset(self):
self.tracks[:] = np.nan
def merge_features_python(self, tracks, features, empty_idxs):
empty_idx = 0
for f in features:
match_idx = int(f[4])
if tracks[match_idx, 0, 1] == match_idx and tracks[match_idx, 0, 2] == 0:
tracks[match_idx, 0, 0] += 1
tracks[match_idx, 0, 1] = f[1]
tracks[match_idx, 0, 2] = 1
tracks[match_idx, int(tracks[match_idx, 0, 0])] = f
if tracks[match_idx, 0, 0] == self.K:
tracks[match_idx, 0, 3] = 1
if sane(tracks[match_idx]):
tracks[match_idx, 0, 4] = 1
else:
if empty_idx == len(empty_idxs):
print('need more empty space')
continue
tracks[empty_idxs[empty_idx], 0, 0] = 1
tracks[empty_idxs[empty_idx], 0, 1] = f[1]
tracks[empty_idxs[empty_idx], 0, 2] = 1
tracks[empty_idxs[empty_idx], 1] = f
empty_idx += 1
def update_tracks(self, features):
last_idxs = np.copy(self.tracks[:, 0, 1])
real = np.isfinite(last_idxs)
self.tracks[last_idxs[real].astype(int)] = self.tracks[real]
mask = np.ones(self.MAX_TRACKS, np.bool)
mask[last_idxs[real].astype(int)] = 0
empty_idxs = np.arange(self.MAX_TRACKS)[mask]
self.tracks[empty_idxs] = np.nan
self.tracks[:, 0, 2] = 0
self.merge_features(self.tracks, features, empty_idxs)
def handle_features(self, features):
self.update_tracks(features)
valid_idxs = self.tracks[:, 0, 4] == 1
complete_idxs = self.tracks[:, 0, 3] == 1
stale_idxs = self.tracks[:, 0, 2] == 0
valid_tracks = self.tracks[valid_idxs]
self.tracks[complete_idxs] = np.nan
self.tracks[stale_idxs] = np.nan
return valid_tracks[:, 1:, :4].reshape((len(valid_tracks), self.K * 4))
def generate_orient_error_jac(K):
import sympy as sp
from rednose.helpers.sympy_helpers import quat_rotate
x_sym = sp.MatrixSymbol('abr', 3, 1)
dtheta = sp.MatrixSymbol('dtheta', 3, 1)
delta_quat = sp.Matrix(np.ones(4))
delta_quat[1:, :] = sp.Matrix(0.5 * dtheta[0:3, :])
poses_sym = sp.MatrixSymbol('poses', 7 * K, 1)
img_pos_sym = sp.MatrixSymbol('img_positions', 2 * K, 1)
alpha, beta, rho = x_sym
to_c = sp.Matrix(rot_matrix(-np.pi / 2, -np.pi / 2, 0))
pos_0 = sp.Matrix(np.array(poses_sym[K * 7 - 7:K * 7 - 4])[:, 0])
q = quat_matrix_l(poses_sym[K * 7 - 4:K * 7]) * delta_quat
quat_rot = quat_rotate(*q)
rot_g_to_0 = to_c * quat_rot.T
rows = []
for i in range(K):
pos_i = sp.Matrix(np.array(poses_sym[i * 7:i * 7 + 3])[:, 0])
q = quat_matrix_l(poses_sym[7 * i + 3:7 * i + 7]) * delta_quat
quat_rot = quat_rotate(*q)
rot_g_to_i = to_c * quat_rot.T
rot_0_to_i = rot_g_to_i * (rot_g_to_0.T)
trans_0_to_i = rot_g_to_i * (pos_0 - pos_i)
funct_vec = rot_0_to_i * sp.Matrix([alpha, beta, 1]) + rho * trans_0_to_i
h1, h2, h3 = funct_vec
rows.append(h1 / h3 - img_pos_sym[i * 2 + 0])
rows.append(h2 / h3 - img_pos_sym[i * 2 + 1])
img_pos_residual_sym = sp.Matrix(rows)
# sympy into c
sympy_functions = []
sympy_functions.append(('orient_error_jac', img_pos_residual_sym.jacobian(dtheta), [x_sym, poses_sym, img_pos_sym, dtheta]))
return sympy_functions
if __name__ == "__main__":
# TODO: get K from argparse
FeatureHandler.generate_code()

View File

@ -0,0 +1,173 @@
#!/usr/bin/env python3
import os
import sys
import numpy as np
import sympy as sp
import common.transformations.orientation as orient
from rednose.helpers import (TEMPLATE_DIR, load_code, write_code)
from rednose.helpers.sympy_helpers import (quat_rotate, sympy_into_c)
def generate_residual(K):
x_sym = sp.MatrixSymbol('abr', 3, 1)
poses_sym = sp.MatrixSymbol('poses', 7 * K, 1)
img_pos_sym = sp.MatrixSymbol('img_positions', 2 * K, 1)
alpha, beta, rho = x_sym
to_c = sp.Matrix(orient.rot_matrix(-np.pi / 2, -np.pi / 2, 0))
pos_0 = sp.Matrix(np.array(poses_sym[K * 7 - 7:K * 7 - 4])[:, 0])
q = poses_sym[K * 7 - 4:K * 7]
quat_rot = quat_rotate(*q)
rot_g_to_0 = to_c * quat_rot.T
rows = []
for i in range(K):
pos_i = sp.Matrix(np.array(poses_sym[i * 7:i * 7 + 3])[:, 0])
q = poses_sym[7 * i + 3:7 * i + 7]
quat_rot = quat_rotate(*q)
rot_g_to_i = to_c * quat_rot.T
rot_0_to_i = rot_g_to_i * rot_g_to_0.T
trans_0_to_i = rot_g_to_i * (pos_0 - pos_i)
funct_vec = rot_0_to_i * sp.Matrix([alpha, beta, 1]) + rho * trans_0_to_i
h1, h2, h3 = funct_vec
rows.append(h1 / h3 - img_pos_sym[i * 2 + 0])
rows.append(h2 / h3 - img_pos_sym[i * 2 + 1])
img_pos_residual_sym = sp.Matrix(rows)
# sympy into c
sympy_functions = []
sympy_functions.append(('res_fun', img_pos_residual_sym, [x_sym, poses_sym, img_pos_sym]))
sympy_functions.append(('jac_fun', img_pos_residual_sym.jacobian(x_sym), [x_sym, poses_sym, img_pos_sym]))
return sympy_functions
class LstSqComputer():
name = 'pos_computer'
@staticmethod
def generate_code(K=4):
sympy_functions = generate_residual(K)
header, code = sympy_into_c(sympy_functions)
code += "\n#define KDIM %d\n" % K
code += "\n" + open(os.path.join(TEMPLATE_DIR, "compute_pos.c")).read()
header += """
void compute_pos(double *to_c, double *in_poses, double *in_img_positions, double *param, double *pos);
"""
filename = f"{LstSqComputer.name}_{K}"
write_code(filename, code, header)
def __init__(self, K=4, MIN_DEPTH=2, MAX_DEPTH=500):
self.to_c = orient.rot_matrix(-np.pi / 2, -np.pi / 2, 0)
self.MAX_DEPTH = MAX_DEPTH
self.MIN_DEPTH = MIN_DEPTH
name = f"{LstSqComputer.name}_{K}"
ffi, lib = load_code(name)
# wrap c functions
def residual_jac(x, poses, img_positions):
out = np.zeros(((K * 2, 3)), dtype=np.float64)
lib.jac_fun(ffi.cast("double *", x.ctypes.data),
ffi.cast("double *", poses.ctypes.data),
ffi.cast("double *", img_positions.ctypes.data),
ffi.cast("double *", out.ctypes.data))
return out
self.residual_jac = residual_jac
def residual(x, poses, img_positions):
out = np.zeros((K * 2), dtype=np.float64)
lib.res_fun(ffi.cast("double *", x.ctypes.data),
ffi.cast("double *", poses.ctypes.data),
ffi.cast("double *", img_positions.ctypes.data),
ffi.cast("double *", out.ctypes.data))
return out
self.residual = residual
def compute_pos_c(poses, img_positions):
pos = np.zeros(3, dtype=np.float64)
param = np.zeros(3, dtype=np.float64)
# Can't be a view for the ctype
img_positions = np.copy(img_positions)
lib.compute_pos(ffi.cast("double *", self.to_c.ctypes.data),
ffi.cast("double *", poses.ctypes.data),
ffi.cast("double *", img_positions.ctypes.data),
ffi.cast("double *", param.ctypes.data),
ffi.cast("double *", pos.ctypes.data))
return pos, param
self.compute_pos_c = compute_pos_c
def compute_pos(self, poses, img_positions, debug=False):
pos, param = self.compute_pos_c(poses, img_positions)
# pos, param = self.compute_pos_python(poses, img_positions)
depth = 1 / param[2]
if debug:
# orient_err_jac = self.orient_error_jac(param, poses, img_positions, np.zeros(3)).reshape((-1,2,3))
jac = self.residual_jac(param, poses, img_positions).reshape((-1, 2, 3))
res = self.residual(param, poses, img_positions).reshape((-1, 2))
return pos, param, res, jac # , orient_err_jac
elif (self.MIN_DEPTH < depth < self.MAX_DEPTH):
return pos
else:
return None
def gauss_newton(self, fun, jac, x, args):
poses, img_positions = args
delta = 1
counter = 0
while abs(np.linalg.norm(delta)) > 1e-4 and counter < 30:
delta = np.linalg.pinv(jac(x, poses, img_positions)).dot(fun(x, poses, img_positions))
x = x - delta
counter += 1
return [x]
def compute_pos_python(self, poses, img_positions, check_quality=False):
import scipy.optimize as opt
# This procedure is also described
# in the MSCKF paper (Mourikis et al. 2007)
x = np.array([img_positions[-1][0],
img_positions[-1][1], 0.1])
res = opt.leastsq(self.residual, x, Dfun=self.residual_jac, args=(poses, img_positions)) # scipy opt
# res = self.gauss_newton(self.residual, self.residual_jac, x, (poses, img_positions)) # diy gauss_newton
alpha, beta, rho = res[0]
rot_0_to_g = (orient.rotations_from_quats(poses[-1, 3:])).dot(self.to_c.T)
return (rot_0_to_g.dot(np.array([alpha, beta, 1]))) / rho + poses[-1, :3]
# EXPERIMENTAL CODE
def unroll_shutter(img_positions, poses, v, rot_rates, ecef_pos):
# only speed correction for now
t_roll = 0.016 # 16ms rolling shutter?
vroll, vpitch, vyaw = rot_rates
A = 0.5 * np.array([[-1, -vroll, -vpitch, -vyaw],
[vroll, 0, vyaw, -vpitch],
[vpitch, -vyaw, 0, vroll],
[vyaw, vpitch, -vroll, 0]])
q_dot = A.dot(poses[-1][3:7])
v = np.append(v, q_dot)
v = np.array([v[0], v[1], v[2], 0, 0, 0, 0])
current_pose = poses[-1] + v * 0.05
poses = np.vstack((current_pose, poses))
dt = -img_positions[:, 1] * t_roll / 0.48
errs = project(poses, ecef_pos) - project(poses + np.atleast_2d(dt).T.dot(np.atleast_2d(v)), ecef_pos)
return img_positions - errs
def project(poses, ecef_pos):
img_positions = np.zeros((len(poses), 2))
for i, p in enumerate(poses):
cam_frame = orient.rotations_from_quats(p[3:]).T.dot(ecef_pos - p[:3])
img_positions[i] = np.array([cam_frame[1] / cam_frame[0], cam_frame[2] / cam_frame[0]])
return img_positions
if __name__ == "__main__":
K = int(sys.argv[1].split("_")[-1])
LstSqComputer.generate_code(K=K)

View File

@ -0,0 +1,90 @@
#!/usr/bin/env python3
import sympy as sp
import numpy as np
def cross(x):
ret = sp.Matrix(np.zeros((3, 3)))
ret[0, 1], ret[0, 2] = -x[2], x[1]
ret[1, 0], ret[1, 2] = x[2], -x[0]
ret[2, 0], ret[2, 1] = -x[1], x[0]
return ret
def euler_rotate(roll, pitch, yaw):
# make symbolic rotation matrix from eulers
matrix_roll = sp.Matrix([[1, 0, 0],
[0, sp.cos(roll), -sp.sin(roll)],
[0, sp.sin(roll), sp.cos(roll)]])
matrix_pitch = sp.Matrix([[sp.cos(pitch), 0, sp.sin(pitch)],
[0, 1, 0],
[-sp.sin(pitch), 0, sp.cos(pitch)]])
matrix_yaw = sp.Matrix([[sp.cos(yaw), -sp.sin(yaw), 0],
[sp.sin(yaw), sp.cos(yaw), 0],
[0, 0, 1]])
return matrix_yaw * matrix_pitch * matrix_roll
def quat_rotate(q0, q1, q2, q3):
# make symbolic rotation matrix from quat
return sp.Matrix([[q0**2 + q1**2 - q2**2 - q3**2, 2 * (q1 * q2 + q0 * q3), 2 * (q1 * q3 - q0 * q2)],
[2 * (q1 * q2 - q0 * q3), q0**2 - q1**2 + q2**2 - q3**2, 2 * (q2 * q3 + q0 * q1)],
[2 * (q1 * q3 + q0 * q2), 2 * (q2 * q3 - q0 * q1), q0**2 - q1**2 - q2**2 + q3**2]]).T
def quat_matrix_l(p):
return sp.Matrix([[p[0], -p[1], -p[2], -p[3]],
[p[1], p[0], -p[3], p[2]],
[p[2], p[3], p[0], -p[1]],
[p[3], -p[2], p[1], p[0]]])
def quat_matrix_r(p):
return sp.Matrix([[p[0], -p[1], -p[2], -p[3]],
[p[1], p[0], p[3], -p[2]],
[p[2], -p[3], p[0], p[1]],
[p[3], p[2], -p[1], p[0]]])
def sympy_into_c(sympy_functions, global_vars=None):
from sympy.utilities import codegen
routines = []
for name, expr, args in sympy_functions:
r = codegen.make_routine(name, expr, language="C99", global_vars=global_vars)
# argument ordering input to sympy is broken with function with output arguments
nargs = []
# reorder the input arguments
for aa in args:
if aa is None:
nargs.append(codegen.InputArgument(sp.Symbol('unused'), dimensions=[1, 1]))
continue
found = False
for a in r.arguments:
if str(aa.name) == str(a.name):
nargs.append(a)
found = True
break
if not found:
# [1,1] is a hack for Matrices
nargs.append(codegen.InputArgument(aa, dimensions=[1, 1]))
# add the output arguments
for a in r.arguments:
if type(a) == codegen.OutputArgument:
nargs.append(a)
# assert len(r.arguments) == len(args)+1
r.arguments = nargs
# add routine to list
routines.append(r)
[(c_name, c_code), (h_name, c_header)] = codegen.get_code_generator('C', 'ekf', 'C99').write(routines, "ekf")
c_header = '\n'.join(x for x in c_header.split("\n") if len(x) > 0 and x[0] != '#')
c_code = '\n'.join(x for x in c_code.split("\n") if len(x) > 0 and x[0] != '#')
c_code = 'extern "C" {\n#include <math.h>\n' + c_code + "\n}\n"
return c_header, c_code

View File

@ -0,0 +1,54 @@
#include <eigen3/Eigen/QR>
#include <eigen3/Eigen/Dense>
#include <iostream>
typedef Eigen::Matrix<double, KDIM*2, 3, Eigen::RowMajor> R3M;
typedef Eigen::Matrix<double, KDIM*2, 1> R1M;
typedef Eigen::Matrix<double, 3, 1> O1M;
typedef Eigen::Matrix<double, 3, 3, Eigen::RowMajor> M3D;
extern "C" {
void gauss_newton(double *in_x, double *in_poses, double *in_img_positions) {
double res[KDIM*2] = {0};
double jac[KDIM*6] = {0};
O1M x(in_x);
O1M delta;
int counter = 0;
while ((delta.squaredNorm() > 0.0001 and counter < 30) or counter == 0){
res_fun(in_x, in_poses, in_img_positions, res);
jac_fun(in_x, in_poses, in_img_positions, jac);
R1M E(res); R3M J(jac);
delta = (J.transpose()*J).inverse() * J.transpose() * E;
x = x - delta;
memcpy(in_x, x.data(), 3 * sizeof(double));
counter = counter + 1;
}
}
void compute_pos(double *to_c, double *poses, double *img_positions, double *param, double *pos) {
param[0] = img_positions[KDIM*2-2];
param[1] = img_positions[KDIM*2-1];
param[2] = 0.1;
gauss_newton(param, poses, img_positions);
Eigen::Quaterniond q;
q.w() = poses[KDIM*7-4];
q.x() = poses[KDIM*7-3];
q.y() = poses[KDIM*7-2];
q.z() = poses[KDIM*7-1];
M3D RC(to_c);
Eigen::Matrix3d R = q.normalized().toRotationMatrix();
Eigen::Matrix3d rot = R * RC.transpose();
pos[0] = param[0]/param[2];
pos[1] = param[1]/param[2];
pos[2] = 1.0/param[2];
O1M ecef_offset(poses + KDIM*7-7);
O1M ecef_output(pos);
ecef_output = rot*ecef_output + ecef_offset;
memcpy(pos, ecef_output.data(), 3 * sizeof(double));
}
}

123
rednose/templates/ekf_c.c Normal file
View File

@ -0,0 +1,123 @@
#include <eigen3/Eigen/Dense>
#include <iostream>
typedef Eigen::Matrix<double, DIM, DIM, Eigen::RowMajor> DDM;
typedef Eigen::Matrix<double, EDIM, EDIM, Eigen::RowMajor> EEM;
typedef Eigen::Matrix<double, DIM, EDIM, Eigen::RowMajor> DEM;
void predict(double *in_x, double *in_P, double *in_Q, double dt) {
typedef Eigen::Matrix<double, MEDIM, MEDIM, Eigen::RowMajor> RRM;
double nx[DIM] = {0};
double in_F[EDIM*EDIM] = {0};
// functions from sympy
f_fun(in_x, dt, nx);
F_fun(in_x, dt, in_F);
EEM F(in_F);
EEM P(in_P);
EEM Q(in_Q);
RRM F_main = F.topLeftCorner(MEDIM, MEDIM);
P.topLeftCorner(MEDIM, MEDIM) = (F_main * P.topLeftCorner(MEDIM, MEDIM)) * F_main.transpose();
P.topRightCorner(MEDIM, EDIM - MEDIM) = F_main * P.topRightCorner(MEDIM, EDIM - MEDIM);
P.bottomLeftCorner(EDIM - MEDIM, MEDIM) = P.bottomLeftCorner(EDIM - MEDIM, MEDIM) * F_main.transpose();
P = P + dt*Q;
// copy out state
memcpy(in_x, nx, DIM * sizeof(double));
memcpy(in_P, P.data(), EDIM * EDIM * sizeof(double));
}
// note: extra_args dim only correct when null space projecting
// otherwise 1
template <int ZDIM, int EADIM, bool MAHA_TEST>
void update(double *in_x, double *in_P, Hfun h_fun, Hfun H_fun, Hfun Hea_fun, double *in_z, double *in_R, double *in_ea, double MAHA_THRESHOLD) {
typedef Eigen::Matrix<double, ZDIM, ZDIM, Eigen::RowMajor> ZZM;
typedef Eigen::Matrix<double, ZDIM, DIM, Eigen::RowMajor> ZDM;
typedef Eigen::Matrix<double, Eigen::Dynamic, EDIM, Eigen::RowMajor> XEM;
//typedef Eigen::Matrix<double, EDIM, ZDIM, Eigen::RowMajor> EZM;
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> X1M;
typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> XXM;
double in_hx[ZDIM] = {0};
double in_H[ZDIM * DIM] = {0};
double in_H_mod[EDIM * DIM] = {0};
double delta_x[EDIM] = {0};
double x_new[DIM] = {0};
// state x, P
Eigen::Matrix<double, ZDIM, 1> z(in_z);
EEM P(in_P);
ZZM pre_R(in_R);
// functions from sympy
h_fun(in_x, in_ea, in_hx);
H_fun(in_x, in_ea, in_H);
ZDM pre_H(in_H);
// get y (y = z - hx)
Eigen::Matrix<double, ZDIM, 1> pre_y(in_hx); pre_y = z - pre_y;
X1M y; XXM H; XXM R;
if (Hea_fun){
typedef Eigen::Matrix<double, ZDIM, EADIM, Eigen::RowMajor> ZAM;
double in_Hea[ZDIM * EADIM] = {0};
Hea_fun(in_x, in_ea, in_Hea);
ZAM Hea(in_Hea);
XXM A = Hea.transpose().fullPivLu().kernel();
y = A.transpose() * pre_y;
H = A.transpose() * pre_H;
R = A.transpose() * pre_R * A;
} else {
y = pre_y;
H = pre_H;
R = pre_R;
}
// get modified H
H_mod_fun(in_x, in_H_mod);
DEM H_mod(in_H_mod);
XEM H_err = H * H_mod;
// Do mahalobis distance test
if (MAHA_TEST){
XXM a = (H_err * P * H_err.transpose() + R).inverse();
double maha_dist = y.transpose() * a * y;
if (maha_dist > MAHA_THRESHOLD){
R = 1.0e16 * R;
}
}
// Outlier resilient weighting
double weight = 1;//(1.5)/(1 + y.squaredNorm()/R.sum());
// kalman gains and I_KH
XXM S = ((H_err * P) * H_err.transpose()) + R/weight;
XEM KT = S.fullPivLu().solve(H_err * P.transpose());
//EZM K = KT.transpose(); TODO: WHY DOES THIS NOT COMPILE?
//EZM K = S.fullPivLu().solve(H_err * P.transpose()).transpose();
//std::cout << "Here is the matrix rot:\n" << K << std::endl;
EEM I_KH = Eigen::Matrix<double, EDIM, EDIM>::Identity() - (KT.transpose() * H_err);
// update state by injecting dx
Eigen::Matrix<double, EDIM, 1> dx(delta_x);
dx = (KT.transpose() * y);
memcpy(delta_x, dx.data(), EDIM * sizeof(double));
err_fun(in_x, delta_x, x_new);
Eigen::Matrix<double, DIM, 1> x(x_new);
// update cov
P = ((I_KH * P) * I_KH.transpose()) + ((KT.transpose() * R) * KT);
// copy out state
memcpy(in_x, x.data(), DIM * sizeof(double));
memcpy(in_P, P.data(), EDIM * EDIM * sizeof(double));
memcpy(in_z, y.data(), y.rows() * sizeof(double));
}

View File

@ -0,0 +1,58 @@
extern "C"{
bool sane(double track [K + 1][5]) {
double diffs_x [K-1];
double diffs_y [K-1];
int i;
for (i = 0; i < K-1; i++) {
diffs_x[i] = fabs(track[i+2][2] - track[i+1][2]);
diffs_y[i] = fabs(track[i+2][3] - track[i+1][3]);
}
for (i = 1; i < K-1; i++) {
if (((diffs_x[i] > 0.05 or diffs_x[i-1] > 0.05) and
(diffs_x[i] > 2*diffs_x[i-1] or
diffs_x[i] < .5*diffs_x[i-1])) or
((diffs_y[i] > 0.05 or diffs_y[i-1] > 0.05) and
(diffs_y[i] > 2*diffs_y[i-1] or
diffs_y[i] < .5*diffs_y[i-1]))){
return false;
}
}
return true;
}
void merge_features(double *tracks, double *features, long long *empty_idxs) {
double feature_arr [3000][5];
memcpy(feature_arr, features, 3000 * 5 * sizeof(double));
double track_arr [6000][K + 1][5];
memcpy(track_arr, tracks, (K+1) * 6000 * 5 * sizeof(double));
int match;
int empty_idx = 0;
int idx;
for (int i = 0; i < 3000; i++) {
match = feature_arr[i][4];
if (track_arr[match][0][1] == match and track_arr[match][0][2] == 0){
track_arr[match][0][0] = track_arr[match][0][0] + 1;
track_arr[match][0][1] = feature_arr[i][1];
track_arr[match][0][2] = 1;
idx = track_arr[match][0][0];
memcpy(track_arr[match][idx], feature_arr[i], 5 * sizeof(double));
if (idx == K){
// label complete
track_arr[match][0][3] = 1;
if (sane(track_arr[match])){
// label valid
track_arr[match][0][4] = 1;
}
}
} else {
// gen new track with this feature
track_arr[empty_idxs[empty_idx]][0][0] = 1;
track_arr[empty_idxs[empty_idx]][0][1] = feature_arr[i][1];
track_arr[empty_idxs[empty_idx]][0][2] = 1;
memcpy(track_arr[empty_idxs[empty_idx]][1], feature_arr[i], 5 * sizeof(double));
empty_idx = empty_idx + 1;
}
}
memcpy(tracks, track_arr, (K+1) * 6000 * 5 * sizeof(double));
}
}

26
setup.py Normal file
View File

@ -0,0 +1,26 @@
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
setup(
name='rednose',
version='0.0.1',
url='https://github.com/commaai/rednose',
author='comma.ai',
author_email='harald@comma.ai',
packages=find_packages(),
platforms='any',
license='MIT',
package_data={'': ['helpers/chi2_lookup_table.npy', 'templates/*']},
install_requires=[
'sympy',
'numpy',
'scipy',
'tqdm',
'cffi',
],
ext_modules=[],
description="Kalman filter library",
long_description='See https://github.com/commaai/rednose',
)