Clarisse 5.0 SP8 SDK
5.0.5.8.0
|
Public Types | |
enum | Mode { RAW, CDF, INV_CDF } |
Public Member Functions | |
void | set_range (const T &t_min, const T &t_max) |
Set the range of the LUT. | |
T | eval (const CoreBasicArray< T > &lut, const T &t) const |
Evaluate the LUT. | |
T | eval (const CoreBasicArray< T > &lut, const T &t, const T &lerp_threshold) const |
Evaluate the LUT. | |
template<class F > | |
T | eval_interpolate (const CoreBasicArray< T > &lut, const T &t, F &f) const |
Evaluate the LUT using interpolation info. | |
T | eval_abs (const CoreBasicArray< T > &lut, const T &t) const |
Evaluate the LUT. | |
T | eval_abs (const CoreBasicArray< T > &lut, const T &t, const T &lerp_threshold) const |
Evaluate the LUT. | |
T | eval_diff (const CoreBasicArray< T > &lut, const T &t, T &diff) const |
Evaluate the LUT. | |
T | eval_diff (const CoreBasicArray< T > &lut, const T &t, const T &lerp_threshold, T &diff) const |
Evaluate the LUT. | |
T | eval_diff_abs (const CoreBasicArray< T > &lut, const T &t, T &diff) const |
Evaluate the LUT. | |
T | eval_diff_abs (const CoreBasicArray< T > &lut, const T &t, const T &lerp_threshold, T &diff) const |
Evaluate the LUT. | |
void | init (CoreBasicArray< T > &lut, const CoreBasicArray< T > &values, const T &t_min, const T &t_max) |
Set the values of the LUT. | |
template<class F > | |
void | init (CoreBasicArray< T > &lut, const F &f, const T &t_min, const T &t_max, const Mode &mode) |
Fill the LUT with the given function. | |
template<class F > | |
void | init_raw (CoreBasicArray< T > &lut, const F &f, const T &t_min, const T &t_max) |
Fill the LUT by evaluating the given function. | |
template<class F > | |
void | init_cdf (CoreBasicArray< T > &lut, const F &f, const T &t_min, const T &t_max) |
Fill the LUT by computing the CDF of the given function. | |
template<class F > | |
void | init_inv_cdf (CoreBasicArray< T > &lut, const F &f, const T &t_min, const T &t_max, const unsigned int sample_count) |
Fill the LUT by computing the inverse CDF of the given function. | |
void | init_curve_cdf (CoreBasicArray< T > &lut, const CoreBasicArray< GMathVec2< T > > &curve, const T &t_min, const T &t_max) |
Fill the LUT by computing the CDF of the given curve. | |
void | init_curve_inv_cdf (CoreBasicArray< T > &lut, const CoreBasicArray< GMathVec2< T > > &curve, const T &t_min, const T &t_max) |
Fill the LUT by computing the inverse CDF of the given curve. | |
const T & | get_t_min () const |
const T & | get_t_max () const |
|
inline |
Evaluate the LUT.
[in] | lut | values to use |
[in] | t | the position at which the LUT has to be evaluated (must be between 0 and 1) |
|
inline |
Evaluate the LUT.
[in] | lut | values to use |
[in] | t | the position at which the LUT has to be evaluated (must be between 0 and 1) |
[in] | lerp_threshold | threshold between 2 values over which the lerp is not used |
|
inline |
Evaluate the LUT.
[in] | lut | values to use |
[in] | t | the absolute t value at which the LUT has to be evaluated (must be between t_min and t_max) |
|
inline |
Evaluate the LUT.
[in] | lut | values to use |
[in] | t | the absolute t value at which the LUT has to be evaluated (must be between t_min and t_max) |
[in] | lerp_threshold | threshold between 2 values over which the lerp is not used |
|
inline |
Evaluate the LUT.
[in] | lut | values to use |
[in] | t | the position at which the LUT has to be evaluated (must be between 0 and 1) |
[out] | diff | the differential of the resulting value |
|
inline |
Evaluate the LUT.
[in] | lut | values to use |
[in] | t | the position at which the LUT has to be evaluated (must be between 0 and 1) |
[in] | lerp_threshold | threshold between 2 values over which the lerp is not used |
[out] | diff | the differential of the resulting value |
|
inline |
Evaluate the LUT.
[in] | lut | values to use |
[in] | t | the absolute t value at which the LUT has to be evaluated (must be between t_min and t_max) |
[out] | diff | the differential of the resulting value |
|
inline |
Evaluate the LUT.
[in] | lut | values to use |
[in] | t | the absolute t value at which the LUT has to be evaluated (must be between t_min and t_max) |
[in] | lerp_threshold | threshold between 2 values over which the lerp is not used |
[out] | diff | the differential of the resulting value |
|
inline |
Evaluate the LUT using interpolation info.
[in] | lut | values to use |
[in] | t | the position at which the LUT has to be evaluated (must be between 0 and 1) |
[in] | f | functor which interpolates the lut |
|
inline |
Set the values of the LUT.
[out] | lut | output values |
[in] | values | values to store in the LUT |
[in] | t_min | the minimum bound of the LUT |
[in] | t_max | the maximum bound of the LUT |
void GMathBasicLut1< T >::init | ( | CoreBasicArray< T > & | lut, |
const F & | f, | ||
const T & | t_min, | ||
const T & | t_max, | ||
const Mode & | mode | ||
) |
Fill the LUT with the given function.
[out] | lut | output values |
[in] | f | the functor defining the function |
[in] | t_min | the minimum bound of the function |
[in] | t_max | the maximum bound of the function |
[in] | mode | the creation mode of the LUT |
void GMathBasicLut1< T >::init_cdf | ( | CoreBasicArray< T > & | lut, |
const F & | f, | ||
const T & | t_min, | ||
const T & | t_max | ||
) |
Fill the LUT by computing the CDF of the given function.
[out] | lut | output values |
[in] | f | the functor defining the function |
[in] | t_min | the minimum bound of the function |
[in] | t_max | the maximum bound of the function |
void GMathBasicLut1< T >::init_curve_cdf | ( | CoreBasicArray< T > & | lut, |
const CoreBasicArray< GMathVec2< T > > & | curve, | ||
const T & | t_min, | ||
const T & | t_max | ||
) |
Fill the LUT by computing the CDF of the given curve.
[out] | lut | output values |
[in] | curve | array of points defining the curve |
[in] | t_min | the minimum bound of the curve |
[in] | t_max | the maximum bound of the curve |
void GMathBasicLut1< T >::init_curve_inv_cdf | ( | CoreBasicArray< T > & | lut, |
const CoreBasicArray< GMathVec2< T > > & | curve, | ||
const T & | t_min, | ||
const T & | t_max | ||
) |
Fill the LUT by computing the inverse CDF of the given curve.
[out] | lut | output values |
[in] | curve | array of points defining the curve |
[in] | t_min | the minimum bound of the curve |
[in] | t_max | the maximum bound of the curve |
void GMathBasicLut1< T >::init_inv_cdf | ( | CoreBasicArray< T > & | lut, |
const F & | f, | ||
const T & | t_min, | ||
const T & | t_max, | ||
const unsigned int | sample_count | ||
) |
Fill the LUT by computing the inverse CDF of the given function.
[out] | lut | output values |
[in] | f | the functor defining the function |
[in] | t_min | the minimum bound of the function |
[in] | t_max | the maximum bound of the function |
[in] | sample_count | the number of samples to use to estimate the CDF |
void GMathBasicLut1< T >::init_raw | ( | CoreBasicArray< T > & | lut, |
const F & | f, | ||
const T & | t_min, | ||
const T & | t_max | ||
) |
Fill the LUT by evaluating the given function.
[out] | lut | output values |
[in] | f | the functor defining the function |
[in] | t_min | the minimum bound of the function |
[in] | t_max | the maximum bound of the function |
|
inline |
Set the range of the LUT.
[in] | t_min | the minimum bound of the LUT |
[in] | t_max | the maximum bound of the LUT |