|
| GMathLut1 () |
| Default constructor that creates an empty LUT.
|
|
template<class F > |
| GMathLut1 (const F &f, const T &t_min, const T &t_max, const Mode &mode, const unsigned int lut_size=512) |
| Constructor that creates a LUT from a given function.
|
|
template<class F > |
T | eval_interpolate (const T &t, const F &f) const |
| Evaluate the LUT using information about interpolation.
|
|
T | eval (const T &t) const |
| Evaluate the LUT.
|
|
T | eval (const T &t, const T &lerp_threshold) const |
| Evaluate the LUT.
|
|
T | eval_abs (const T &t) const |
| Evaluate the LUT.
|
|
T | eval_abs (const T &t, const T &lerp_threshold) const |
| Evaluate the LUT.
|
|
T | eval_diff (const T &t, T &diff) const |
| Evaluate the LUT.
|
|
T | eval_diff (const T &t, const T &lerp_threshold, T &diff) const |
| Evaluate the LUT.
|
|
T | eval_diff_abs (const T &t, T &diff) const |
| Evaluate the LUT.
|
|
T | eval_diff_abs (const T &t, const T &lerp_threshold, T &diff) const |
| Evaluate the LUT.
|
|
void | init (const unsigned int lut_size, const T &t_min=GMath< T >::zero(), const T &t_max=GMath< T >::one()) |
| Set the size of the LUT but leave it empty.
|
|
void | init (const CoreBasicArray< T > &values, const T &t_min, const T &t_max) |
| Set the values of the LUT.
|
|
template<class F > |
void | init (const F &f, const T &t_min, const T &t_max, const Mode &mode, const unsigned int lut_size=512) |
| Fill the LUT with the given function.
|
|
template<class F > |
void | init_raw (const F &f, const T &t_min, const T &t_max, const unsigned int lut_size=512) |
| Fill the LUT by evaluating the given function.
|
|
template<class F > |
void | init_cdf (const F &f, const T &t_min, const T &t_max, const unsigned int lut_size=512) |
| Fill the LUT by computing the CDF of the given function.
|
|
template<class F > |
void | init_inv_cdf (const F &f, const T &t_min, const T &t_max, const unsigned int lut_size=512, const unsigned int sample_count=8192) |
| Fill the LUT by computing the inverse CDF of the given function.
|
|
void | init_curve_cdf (const CoreBasicArray< GMathVec2< T > > &curve, const T &t_min, const T &t_max, const unsigned int lut_size=512) |
| Fill the LUT by computing the CDF of the given curve.
|
|
void | init_curve_inv_cdf (const CoreBasicArray< GMathVec2< T > > &curve, const T &t_min, const T &t_max, const unsigned int lut_size=512) |
| Fill the LUT by computing the inverse CDF of the given curve.
|
|
size_t | get_memory_size () const |
|
const unsigned int & | get_size () const |
|
const T * | get_data () const |
|
const T & | operator[] (const unsigned int &index) const |
|
T & | operator[] (const unsigned int &index) |
|
const CoreClassInfo & | get_class_info () const |
|
bool | is_kindof (const CoreClassInfo &cinfo) const |
|
void | set_range (const T &t_min, const T &t_max) |
| Set the range of the LUT.
|
|
T | eval (const CoreBasicArray< T > &lut, const T &t) const |
| Evaluate the LUT.
|
|
T | eval (const CoreBasicArray< T > &lut, const T &t, const T &lerp_threshold) const |
| Evaluate the LUT.
|
|
template<class F > |
T | eval_interpolate (const CoreBasicArray< T > &lut, const T &t, F &f) const |
| Evaluate the LUT using interpolation info.
|
|
T | eval_abs (const CoreBasicArray< T > &lut, const T &t) const |
| Evaluate the LUT.
|
|
T | eval_abs (const CoreBasicArray< T > &lut, const T &t, const T &lerp_threshold) const |
| Evaluate the LUT.
|
|
T | eval_diff (const CoreBasicArray< T > &lut, const T &t, T &diff) const |
| Evaluate the LUT.
|
|
T | eval_diff (const CoreBasicArray< T > &lut, const T &t, const T &lerp_threshold, T &diff) const |
| Evaluate the LUT.
|
|
T | eval_diff_abs (const CoreBasicArray< T > &lut, const T &t, T &diff) const |
| Evaluate the LUT.
|
|
T | eval_diff_abs (const CoreBasicArray< T > &lut, const T &t, const T &lerp_threshold, T &diff) const |
| Evaluate the LUT.
|
|
void | init (CoreBasicArray< T > &lut, const CoreBasicArray< T > &values, const T &t_min, const T &t_max) |
| Set the values of the LUT.
|
|
template<class F > |
void | init (CoreBasicArray< T > &lut, const F &f, const T &t_min, const T &t_max, const Mode &mode) |
| Fill the LUT with the given function.
|
|
template<class F > |
void | init_raw (CoreBasicArray< T > &lut, const F &f, const T &t_min, const T &t_max) |
| Fill the LUT by evaluating the given function.
|
|
template<class F > |
void | init_cdf (CoreBasicArray< T > &lut, const F &f, const T &t_min, const T &t_max) |
| Fill the LUT by computing the CDF of the given function.
|
|
template<class F > |
void | init_inv_cdf (CoreBasicArray< T > &lut, const F &f, const T &t_min, const T &t_max, const unsigned int sample_count) |
| Fill the LUT by computing the inverse CDF of the given function.
|
|
void | init_curve_cdf (CoreBasicArray< T > &lut, const CoreBasicArray< GMathVec2< T > > &curve, const T &t_min, const T &t_max) |
| Fill the LUT by computing the CDF of the given curve.
|
|
void | init_curve_inv_cdf (CoreBasicArray< T > &lut, const CoreBasicArray< GMathVec2< T > > &curve, const T &t_min, const T &t_max) |
| Fill the LUT by computing the inverse CDF of the given curve.
|
|
const T & | get_t_min () const |
|
const T & | get_t_max () const |
|
template<class T>
class GMathLut1< T >
Class allowing to create and manipulate a single dimension LUT.