Skip to content

adbudg

DiminishingAdbudg

Bases: OneToOneInversableDerivableTransformer

Transform series by applying the Adbudg equation (same as in Masster) from diminishing return multiply by the max parameter.

Parameters:

Name Type Description Default
- gamma (float

Shape parameter. Strictly positive parameter of the adbudg diminishing function, influencing the concavity of the response curve (interpreted as the largest absolute value of the slope of the curve).

required
- rho (float

Half saturation parameter. Strictly positive parameter of the adbudg diminishing function, representing the proportion of the max parameter for which half of the maximal impact of the variable is reached.

required
- max (float

Symbolic maximum, which represents the value above which the variable is in the area of saturation (default to None and then set to the maximum value of the training serie).

required
Source code in eki_mmo_equations/one_to_one_transformations/diminishing_return/adbudg.py
class DiminishingAdbudg(OneToOneInversableDerivableTransformer):
    """Transform series by applying the Adbudg equation (same as in Masster) from diminishing return
        multiply by the max parameter.

    ```math
        \\frac{1}{1 + (\\frac {\\rho \\times max}{serie \\times 100})^\\gamma}
    ```

    Args:
        - gamma (float): Shape parameter. Strictly positive parameter of the adbudg diminishing function, influencing
                         the concavity of the response curve (interpreted as the largest absolute value of the slope
                         of the curve).
        - rho (float): Half saturation parameter. Strictly positive parameter of the adbudg diminishing function,
                       representing the proportion of the max parameter for which half of the maximal impact of the
                       variable is reached.
        - max (float): Symbolic maximum, which represents the value above which the variable is in the area of
                       saturation (default to None and then set to the maximum value of the training serie).
    """

    def __init__(self, gamma, rho, max=None) -> None:
        self.gamma = gamma
        self.rho = rho
        self.max = max

    @property
    def parameters(self) -> Dict[str, float]:
        return self.__dict__

    # ------- METHODS -------

    def fit(self, serie: np.ndarray, y=None):
        if self.max is None:
            if np.any(serie[serie > 0]):
                self.max = serie.max()
            else:
                self.max = serie.min() or 1

        return super().fit(serie, y)

    def transform(self, serie: np.ndarray, copy=False) -> np.ndarray:
        serie = super().transform(serie, copy)

        return self._transformer(serie, self.gamma, self.rho, self.max)

    def inverse_transform(self, serie: np.ndarray, copy=False) -> np.ndarray:
        serie = super().inverse_transform(serie, copy)

        return self._inverse_transformer(serie, self.gamma, self.rho, self.max)

    def derivative_transform(self, serie: np.ndarray, copy=False) -> np.ndarray:
        serie = super().derivative_transform(serie, copy)

        return self._derivative_transformer(serie, self.gamma, self.rho, self.max)

    # ------- TRANSFORMERS -------

    @staticmethod
    def _transformer(serie: np.ndarray, gamma, rho, max) -> np.ndarray:
        d = serie * 100 / max
        with np.errstate(divide="ignore"):
            return np.nan_to_num(1 / (1 + ((rho / d) ** gamma)))

    @staticmethod
    def _inverse_transformer(serie: np.ndarray, gamma, rho, max) -> np.ndarray:
        with np.errstate(divide="ignore"):
            return np.nan_to_num((max * rho) / (100 * (((1 / serie) - 1)) ** (1 / gamma)))

    @staticmethod
    def _derivative_transformer(serie: np.ndarray, gamma, rho, max) -> np.ndarray:
        d = serie * 100 / max
        with np.errstate(divide="ignore"):
            return np.nan_to_num(((gamma / serie) * ((rho / d) ** gamma)) / (1 + ((rho / d) ** gamma)) ** 2)

    # ------- CHECKERS -------

    def check_params(self, serie: np.ndarray):
        """Check if parameters respect their application scope."""
        if self.rho <= 0 or self.rho >= 100:
            raise ParameterScopeException(f"Parameter rho must be in ]0, 100[, not {self.rho}.")

        if self.gamma <= 0:
            raise ParameterScopeException(f"Parameter gamma must be strictly positive, not {self.gamma}.")

        if self.max <= 0:
            raise ParameterScopeException(f"Parameter max must be strictly positive, not {self.max}.")

        if self.rho <= 1:
            logger.warning(
                "Just be aware that values range of parameter rho is expected to be "
                "between ]0, 100[ and not between ]0, 1[."
            )

check_params(serie)

Check if parameters respect their application scope.

Source code in eki_mmo_equations/one_to_one_transformations/diminishing_return/adbudg.py
def check_params(self, serie: np.ndarray):
    """Check if parameters respect their application scope."""
    if self.rho <= 0 or self.rho >= 100:
        raise ParameterScopeException(f"Parameter rho must be in ]0, 100[, not {self.rho}.")

    if self.gamma <= 0:
        raise ParameterScopeException(f"Parameter gamma must be strictly positive, not {self.gamma}.")

    if self.max <= 0:
        raise ParameterScopeException(f"Parameter max must be strictly positive, not {self.max}.")

    if self.rho <= 1:
        logger.warning(
            "Just be aware that values range of parameter rho is expected to be "
            "between ]0, 100[ and not between ]0, 1[."
        )

DiminishingAdbudgUnscale

Bases: DiminishingAdbudg

Transform series by applying the Adbudg equation (same as in Masster) from diminishing return multiply by the max parameter.

Parameters:

Name Type Description Default
- gamma (float

Shape parameter. Strictly positive parameter of the adbudg diminishing function, influencing the concavity of the response curve (interpreted as the largest absolute value of the slope of the curve).

required
- rho (float

Half saturation parameter. Strictly positive parameter of the adbudg diminishing function, representing the proportion of the max parameter for which half of the maximal impact of the variable is reached.

required
- max (float

Symbolic maximum, which represents the value above which the variable is in the area of saturation (default to None and then set to the maximum value of the training serie).

required
Source code in eki_mmo_equations/one_to_one_transformations/diminishing_return/adbudg.py
class DiminishingAdbudgUnscale(DiminishingAdbudg):
    """Transform series by applying the Adbudg equation (same as in Masster) from diminishing return
        multiply by the max parameter.

    ```math
        \\frac{max}{1 + (\\frac {\\rho \\times max}{serie \\times 100})^\\gamma}
    ```

    Args:
        - gamma (float): Shape parameter. Strictly positive parameter of the adbudg diminishing function, influencing
                         the concavity of the response curve (interpreted as the largest absolute value of the slope
                         of the curve).
        - rho (float): Half saturation parameter. Strictly positive parameter of the adbudg diminishing function,
                       representing the proportion of the max parameter for which half of the maximal impact of the
                       variable is reached.
        - max (float): Symbolic maximum, which represents the value above which the variable is in the area of
                       saturation (default to None and then set to the maximum value of the training serie).
    """

    @staticmethod
    def _transformer(serie: np.ndarray, gamma, rho, max) -> np.ndarray:
        return max * super(DiminishingAdbudgUnscale, DiminishingAdbudgUnscale)._transformer(
            serie=serie, gamma=gamma, rho=rho, max=max
        )

    @staticmethod
    def _inverse_transformer(serie: np.ndarray, gamma, rho, max) -> np.ndarray:
        with np.errstate(divide="ignore"):
            return np.nan_to_num((max * rho) / (100 * (((max / serie) - 1)) ** (1 / gamma)))

    @staticmethod
    def _derivative_transformer(serie: np.ndarray, gamma, rho, max) -> np.ndarray:
        return max * super(DiminishingAdbudgUnscale, DiminishingAdbudgUnscale)._derivative_transformer(
            serie=serie, gamma=gamma, rho=rho, max=max
        )

    # ------- CHECKERS -------

    def check_params(self, serie: np.ndarray):
        """Check if parameters respect their application scope."""
        if self.rho <= 0 or self.rho >= 100:
            raise ParameterScopeException(f"Parameter rho must be in ]0, 1[, not {self.rho}.")

        if self.gamma <= 0:
            raise ParameterScopeException(f"Parameter gamma must be strictly positive, not {self.gamma}.")

        if self.max <= 0:
            raise ParameterScopeException(f"Parameter max must be strictly positive, not {self.max}.")

        if self.rho <= 1:
            logger.warning(
                "Just be aware that values range of parameter rho is expected to be "
                "between ]0, 100[ and not between ]0, 1[."
            )

check_params(serie)

Check if parameters respect their application scope.

Source code in eki_mmo_equations/one_to_one_transformations/diminishing_return/adbudg.py
def check_params(self, serie: np.ndarray):
    """Check if parameters respect their application scope."""
    if self.rho <= 0 or self.rho >= 100:
        raise ParameterScopeException(f"Parameter rho must be in ]0, 1[, not {self.rho}.")

    if self.gamma <= 0:
        raise ParameterScopeException(f"Parameter gamma must be strictly positive, not {self.gamma}.")

    if self.max <= 0:
        raise ParameterScopeException(f"Parameter max must be strictly positive, not {self.max}.")

    if self.rho <= 1:
        logger.warning(
            "Just be aware that values range of parameter rho is expected to be "
            "between ]0, 100[ and not between ]0, 1[."
        )