mirror of
https://github.com/sunnypilot/sunnypilot.git
synced 2026-02-18 18:53:55 +08:00
* modeld: Retain pre-20hz drive model support * Method not available anymore on OP * some fixes * Revert "Long planner get accel: new function args (#34288)" * Revert "Fix low-speed allow_throttle behavior in long planner (#33894)" * Revert "long planner: allow throttle reflects usage (#33792)" * Revert "Gate acceleration on model gas press predictions (#33643)" * Reapply "Gate acceleration on model gas press predictions (#33643)" This reverts commit76b08e37cb. * Reapply "long planner: allow throttle reflects usage (#33792)" This reverts commitc75244ca4e. * Reapply "Fix low-speed allow_throttle behavior in long planner (#33894)" This reverts commitb2b7d21b7b. * Reapply "Long planner get accel: new function args (#34288)" This reverts commit74dca2fccf. * don't need * retain snpe * wrong * they're symlinks * remove * put back into VCS * add back * don't include built * Refactor model runner retrieval with caching support Added caching for active model runner type via `ModelRunnerTypeCache` to enhance performance and avoid redundant checks. Introduced a `force_check` flag to bypass the cache when necessary. Updated related code to handle cache clearing during onroad transitions. * Update model runner determination logic with caching fix Enhances `get_active_model_runner` to utilize caching more effectively by ensuring type consistency and updating cache only when necessary. Also updates `is_snpe_model` to pass the `started` state to the runner determination function, improving behavior for dynamic checks. * default to none * enable in next PR * more --------- Co-authored-by: DevTekVE <devtekve@gmail.com>
147 lines
4.0 KiB
C++
147 lines
4.0 KiB
C++
//=============================================================================
|
|
//
|
|
// Copyright (c) 2015-2020 Qualcomm Technologies, Inc.
|
|
// All Rights Reserved.
|
|
// Confidential and Proprietary - Qualcomm Technologies, Inc.
|
|
//
|
|
//=============================================================================
|
|
|
|
#ifndef _ITENSOR_HPP_
|
|
#define _ITENSOR_HPP_
|
|
|
|
#include "ITensorItr.hpp"
|
|
#include "ITensorItrImpl.hpp"
|
|
#include "TensorShape.hpp"
|
|
#include "ZdlExportDefine.hpp"
|
|
#include <memory>
|
|
#include <ostream>
|
|
#include <cmath>
|
|
|
|
namespace zdl {
|
|
namespace DlSystem
|
|
{
|
|
class ITensor;
|
|
}}
|
|
|
|
namespace zdl { namespace DlSystem
|
|
{
|
|
/** @addtogroup c_plus_plus_apis C++
|
|
@{ */
|
|
|
|
/**
|
|
* Represents a tensor which holds n-dimensional data. It is important to
|
|
* understand how the tensor data is represented in memory
|
|
* relative to the tensor dimensions. Tensors store data in
|
|
* memory in row-major order (i.e. the last tensor dimension is
|
|
* the fastest varying one). For example, if you have a two
|
|
* dimensional tensor with 3 rows and 2 columns (i.e. the tensor
|
|
* dimensions are 3,2 as returned in tensor dimension vectors)
|
|
* with the following data in terms rows and columns:
|
|
*
|
|
* | 1 2 | <br/>
|
|
* | 3 4 | <br/>
|
|
* | 5 6 | <br/>
|
|
*
|
|
* This data would be stored in memory as 1,2,3,4,5,6.
|
|
*/
|
|
class ZDL_EXPORT ITensor
|
|
{
|
|
public:
|
|
|
|
typedef zdl::DlSystem::ITensorItr<false> iterator;
|
|
typedef zdl::DlSystem::ITensorItr<true> const_iterator;
|
|
|
|
virtual ~ITensor() {}
|
|
|
|
/**
|
|
* Returns a tensor iterator pointing to the beginning
|
|
* of the data in the tensor.
|
|
*
|
|
* @return A tensor iterator that points to the first data
|
|
* element in the tensor.
|
|
*/
|
|
virtual iterator begin() = 0;
|
|
|
|
/**
|
|
* Returns the const version of a tensor iterator
|
|
* pointing to the beginning of the data in the tensor.
|
|
*
|
|
* @return A tensor const iterator that points to the first data
|
|
* element in the tensor.
|
|
*/
|
|
virtual const_iterator cbegin() const = 0;
|
|
|
|
/**
|
|
* Returns a tensor iterator pointing to the end of the
|
|
* data in the tensor. This tensor should not be
|
|
* dereferenced.
|
|
*
|
|
* @return A tensor iterator that points to the end of the data
|
|
* (one past the last element) in the tensor.
|
|
*/
|
|
virtual iterator end() = 0;
|
|
|
|
/**
|
|
* Returns the const version of a tensor iterator
|
|
* pointing to the end of the data in the tensor. This
|
|
* tensor should not be dereferenced.
|
|
*
|
|
* @return A tensor const iterator that points to the end of the
|
|
* data (one past the last element) in the tensor.
|
|
*/
|
|
virtual const_iterator cend() const = 0;
|
|
|
|
/**
|
|
* @brief Gets the shape of this tensor.
|
|
*
|
|
* The last element of the vector represents the fastest varying
|
|
* dimension and the zeroth element represents the slowest
|
|
* varying dimension, etc.
|
|
*
|
|
* @return A shape class holding the tensor dimensions.
|
|
*/
|
|
virtual TensorShape getShape() const = 0;
|
|
|
|
/**
|
|
* Returns the element size of the data in the tensor
|
|
* (discounting strides). This is how big a buffer would
|
|
* need to be to hold the tensor data contiguously in
|
|
* memory.
|
|
*
|
|
* @return The size of the tensor (in elements).
|
|
*/
|
|
virtual size_t getSize() const = 0;
|
|
|
|
/**
|
|
* @brief Serializes the tensor to an output stream.
|
|
*
|
|
* @param[in] output The output stream to which to write the tensor
|
|
*
|
|
* @throw std::runtime_error If the stream is ever in a bad
|
|
* state before the tensor is fully serialized.
|
|
*/
|
|
virtual void serialize(std::ostream &output) const = 0;
|
|
|
|
friend iterator;
|
|
friend const_iterator;
|
|
|
|
virtual bool isQuantized() {return false;}
|
|
virtual float GetDelta() {return NAN;};
|
|
virtual float GetOffset() {return NAN;};
|
|
|
|
protected:
|
|
|
|
/**
|
|
* Returns the tensor iterator implementation.
|
|
*
|
|
* @return A pointer to the tensor iterator implementation.
|
|
*/
|
|
virtual std::unique_ptr<::DlSystem::ITensorItrImpl> getItrImpl() const = 0;
|
|
};
|
|
|
|
}}
|
|
|
|
/** @} */ /* end_addtogroup c_plus_plus_apis C++ */
|
|
|
|
#endif
|