diff --git a/kats/data/utils.py b/kats/data/utils.py index 4332eb1e..4514e64b 100644 --- a/kats/data/utils.py +++ b/kats/data/utils.py @@ -51,13 +51,11 @@ def load_data(file_name: str, reset_columns: bool = False) -> pd.DataFrame: @overload -def load_air_passengers(return_ts: Literal[True]) -> TimeSeriesData: - ... +def load_air_passengers(return_ts: Literal[True]) -> TimeSeriesData: ... @overload -def load_air_passengers(return_ts: Literal[False] = ...) -> pd.DataFrame: - ... +def load_air_passengers(return_ts: Literal[False] = ...) -> pd.DataFrame: ... def load_air_passengers(return_ts: bool = True) -> Union[pd.DataFrame, TimeSeriesData]: diff --git a/kats/detectors/cusum_detection.py b/kats/detectors/cusum_detection.py index eb510856..fbbb6216 100644 --- a/kats/detectors/cusum_detection.py +++ b/kats/detectors/cusum_detection.py @@ -1224,10 +1224,12 @@ def detector_(self, **kwargs: Any) -> List[List[CUSUMChangePoint]]: if not list(change_meta_["changepoint"]): continue change_meta = { - k: change_meta_[k][col_idx] - if isinstance(change_meta_[k], np.ndarray) - or isinstance(change_meta_[k], list) - else change_meta_[k] + k: ( + change_meta_[k][col_idx] + if isinstance(change_meta_[k], np.ndarray) + or isinstance(change_meta_[k], list) + else change_meta_[k] + ) for k in change_meta_ } change_meta["llr"] = llr = self._get_llr( diff --git a/kats/detectors/cusum_model.py b/kats/detectors/cusum_model.py index 08b9a1c6..7752f6cc 100644 --- a/kats/detectors/cusum_model.py +++ b/kats/detectors/cusum_model.py @@ -1276,9 +1276,11 @@ def _fit( ) cps = [ - sorted(x, key=lambda x: x.start_time)[0] - if x and alert_set_on_mask[i] - else None + ( + sorted(x, key=lambda x: x.start_time)[0] + if x and alert_set_on_mask[i] + else None + ) for i, x in enumerate(changepoints) ] diff --git a/kats/detectors/detector_consts.py b/kats/detectors/detector_consts.py index db5667f1..41014b66 100644 --- a/kats/detectors/detector_consts.py +++ b/kats/detectors/detector_consts.py @@ -260,10 +260,12 @@ def stat_sig(self) -> Union[bool, ArrayLike]: if self.num_series > 1: return np.array( [ - False - if cast(np.ndarray, self.upper)[i] > 1.0 - and cast(np.ndarray, self.lower)[i] < 1 - else True + ( + False + if cast(np.ndarray, self.upper)[i] > 1.0 + and cast(np.ndarray, self.lower)[i] < 1 + else True + ) for i in range(self.current.num_series) ] ) @@ -649,11 +651,13 @@ def get_last_n(self, N: int) -> AnomalyResponse: return AnomalyResponse( scores=self.scores[-N:], - confidence_band=None - if cb is None - else ConfidenceBand( - upper=cb.upper[-N:], - lower=cb.lower[-N:], + confidence_band=( + None + if cb is None + else ConfidenceBand( + upper=cb.upper[-N:], + lower=cb.lower[-N:], + ) ), predicted_ts=None if pts is None else pts[-N:], anomaly_magnitude_ts=self.anomaly_magnitude_ts[-N:], diff --git a/kats/detectors/distribution_distance_model.py b/kats/detectors/distribution_distance_model.py index 3b8b8c0d..473be4e9 100644 --- a/kats/detectors/distribution_distance_model.py +++ b/kats/detectors/distribution_distance_model.py @@ -260,10 +260,12 @@ def fit_predict( window=str(self.window_size_sec) + "s", closed="both", ).agg( - lambda rows: rows[0] - if (rows.index[-1] - rows.index[0]).total_seconds() - > 0.9 * self.window_size_sec # tolerance - else np.nan + lambda rows: ( + rows[0] + if (rows.index[-1] - rows.index[0]).total_seconds() + > 0.9 * self.window_size_sec # tolerance + else np.nan + ) ) # exclude the beginning part of NANs diff --git a/kats/detectors/meta_learning/synth_metadata_reader.py b/kats/detectors/meta_learning/synth_metadata_reader.py index 80f0b17a..276ad7d1 100644 --- a/kats/detectors/meta_learning/synth_metadata_reader.py +++ b/kats/detectors/meta_learning/synth_metadata_reader.py @@ -70,9 +70,11 @@ def get_metadata(self, algorithm_name: str) -> Dict[str, pd.DataFrame]: .map(lambda kv: kv[a][0]) .map( lambda kv: { - k: v - if k not in self.PARAMS_TO_SCALE_DOWN - else v / SynthMetadataReader.NUM_SECS_IN_DAY + k: ( + v + if k not in self.PARAMS_TO_SCALE_DOWN + else v / SynthMetadataReader.NUM_SECS_IN_DAY + ) for k, v in kv.items() } ) diff --git a/kats/detectors/outlier.py b/kats/detectors/outlier.py index ce901602..87b03265 100644 --- a/kats/detectors/outlier.py +++ b/kats/detectors/outlier.py @@ -62,7 +62,7 @@ def __init__( def __clean_ts__( self, - original: Union[pd.Series, pd.DataFrame] + original: Union[pd.Series, pd.DataFrame], # pyre-fixme[11]: Annotation `Timestamp` is not defined as a type. ) -> Tuple[List[int], List[float], List[pd.Timestamp]]: """ diff --git a/kats/detectors/prophet_detector.py b/kats/detectors/prophet_detector.py index 130f9132..b87c2a35 100644 --- a/kats/detectors/prophet_detector.py +++ b/kats/detectors/prophet_detector.py @@ -41,6 +41,8 @@ import sys NOT_SUPPRESS_PROPHET_FIT_LOGS_VAR_NAME = "NOT_SUPPRESS_PROPHET_FIT_LOGS" + + # this is a bug in prophet which was discussed in open source thread # issues was also suggested # details https://github.com/facebook/prophet/issues/223#issuecomment-326455744 diff --git a/kats/detectors/stat_sig_detector.py b/kats/detectors/stat_sig_detector.py index 3116f4f6..98483024 100644 --- a/kats/detectors/stat_sig_detector.py +++ b/kats/detectors/stat_sig_detector.py @@ -922,7 +922,6 @@ def predict( class MultiStatSigDetectorModel(StatSigDetectorModel): - """ MultiStatSigDetectorModel is a multivariate version of the StatSigDetector. It applies a univariate t-test to each of the components of the multivariate time series to see if the means between the control diff --git a/kats/metrics/metrics.py b/kats/metrics/metrics.py index c75c184d..588436a6 100644 --- a/kats/metrics/metrics.py +++ b/kats/metrics/metrics.py @@ -24,6 +24,7 @@ # from numpy.typing import ArrayLike ArrayLike = Union[np.ndarray, Sequence[float]] + # Type aliases # # Most metrics have the shape: @@ -47,13 +48,13 @@ def __call__( y_true: ArrayLike, y_pred: ArrayLike, sample_weight: Optional[ArrayLike] = ..., - ) -> np.ndarray: - ... # pragma: no cover + ) -> np.ndarray: ... # pragma: no cover class Metric(Protocol): - def __call__(self, y_true: ArrayLike, y_pred: ArrayLike) -> float: - ... # pragma: no cover + def __call__( + self, y_true: ArrayLike, y_pred: ArrayLike + ) -> float: ... # pragma: no cover class WeightedMetric(Protocol): @@ -62,8 +63,7 @@ def __call__( y_true: ArrayLike, y_pred: ArrayLike, sample_weight: Optional[ArrayLike] = ..., - ) -> float: - ... # pragma: no cover + ) -> float: ... # pragma: no cover class MultiOutputMetric(Protocol): @@ -73,8 +73,7 @@ def __call__( y_pred: ArrayLike, sample_weight: Optional[ArrayLike] = ..., multioutput: Union[str, ArrayLike] = ..., - ) -> float: - ... # pragma: no cover + ) -> float: ... # pragma: no cover class ThresholdMetric(Protocol): @@ -83,8 +82,7 @@ def __call__( y_true: ArrayLike, y_pred: ArrayLike, threshold: float, - ) -> float: - ... # pragma: no cover + ) -> float: ... # pragma: no cover class MultiThresholdMetric(Protocol): @@ -93,8 +91,7 @@ def __call__( y_true: ArrayLike, y_pred: ArrayLike, threshold: ArrayLike, - ) -> np.ndarray: - ... # pragma: no cover + ) -> np.ndarray: ... # pragma: no cover KatsMetric = Union[ diff --git a/kats/models/ensemble/kats_ensemble.py b/kats/models/ensemble/kats_ensemble.py index e9ae7932..371b537b 100644 --- a/kats/models/ensemble/kats_ensemble.py +++ b/kats/models/ensemble/kats_ensemble.py @@ -747,7 +747,7 @@ def _fit_single( data: TimeSeriesData, # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. model_func: Callable, - model_param: Params + model_param: Params, # pyre-fixme[24]: Generic type `Model` expects 1 type parameter. ) -> Model: """Private method to fit individual model diff --git a/kats/models/globalmodel/data_processor.py b/kats/models/globalmodel/data_processor.py index 71279758..64d8b34f 100644 --- a/kats/models/globalmodel/data_processor.py +++ b/kats/models/globalmodel/data_processor.py @@ -263,12 +263,12 @@ def __init__( if params.model_type == "rnn" and params.seasonality > 1: init_seasonality = self._get_seasonality(train_x, params.seasonality) # bound initial seasonalities - init_seasonality[ - init_seasonality < params.init_seasonality[0] - ] = params.init_seasonality[0] - init_seasonality[ - init_seasonality > params.init_seasonality[1] - ] = params.init_seasonality[1] + init_seasonality[init_seasonality < params.init_seasonality[0]] = ( + params.init_seasonality[0] + ) + init_seasonality[init_seasonality > params.init_seasonality[1]] = ( + params.init_seasonality[1] + ) # pyre-fixme[4]: Attribute must be annotated. self.init_seasonality = torch.tensor(init_seasonality, dtype=tdtype) else: @@ -451,7 +451,6 @@ def _get_array( Optional[np.ndarray], Optional[np.ndarray], ]: - """ Helper function for transforming TS to arrays, including truncating/padding values, diff --git a/kats/models/globalmodel/model.py b/kats/models/globalmodel/model.py index 4011f11f..d98e077f 100644 --- a/kats/models/globalmodel/model.py +++ b/kats/models/globalmodel/model.py @@ -545,9 +545,7 @@ def _format_fcst( for i, idx in enumerate(ids): df = pd.DataFrame( - fcst[i].transpose()[ - :steps, - ], + fcst[i].transpose()[:steps,], columns=cols, ) df["time"] = pd.date_range( @@ -651,12 +649,12 @@ def save_model(self, file_name: str) -> None: info = { "gmparam_string": self.params.to_string(), "state_dict": self.rnn.state_dict() if self.rnn is not None else None, - "encoder_state_dict": self.encoder.state_dict() - if self.encoder is not None - else None, - "decoder_state_dict": self.decoder.state_dict() - if self.decoder is not None - else None, + "encoder_state_dict": ( + self.encoder.state_dict() if self.encoder is not None else None + ), + "decoder_state_dict": ( + self.decoder.state_dict() if self.decoder is not None else None + ), } with open(file_name, "wb") as f: joblib.dump(info, f) @@ -1099,7 +1097,11 @@ def _single_pass_s2s( cur_step + 1 ) - (x_t, anchor_level, x_lt,) = self._process_s2s( + ( + x_t, + anchor_level, + x_lt, + ) = self._process_s2s( prev_idx, cur_idx, batch.x, x_lt, period, params.input_window ) diff --git a/kats/models/globalmodel/stdmodel.py b/kats/models/globalmodel/stdmodel.py index 48390036..858932ef 100644 --- a/kats/models/globalmodel/stdmodel.py +++ b/kats/models/globalmodel/stdmodel.py @@ -195,7 +195,6 @@ def _deseasonal( def _predict_seasonality( self, steps: int, tsd_model: Union[ProphetModel, np.ndarray] ) -> np.ndarray: - """Predict the future seasonality. Args: diff --git a/kats/models/globalmodel/utils.py b/kats/models/globalmodel/utils.py index 188471d1..cae8154e 100644 --- a/kats/models/globalmodel/utils.py +++ b/kats/models/globalmodel/utils.py @@ -73,7 +73,7 @@ def get_filters(isna_idx, seasonality) -> np.ndarray: else: i += 1 filters = np.array([True] * n) - for (i, j) in flips: + for i, j in flips: filters[i:j] = False return filters @@ -188,9 +188,11 @@ def split( split_data = [ ( {t: train_TSs[t] for t in keys[~index[i]]}, - {t: valid_TSs[t] for t in keys[~index[i]]} - if valid_TSs is not None - else None, + ( + {t: valid_TSs[t] for t in keys[~index[i]]} + if valid_TSs is not None + else None + ), ) for i in range(splits) ] @@ -198,9 +200,11 @@ def split( split_data = [ ( {t: train_TSs[t] for t in keys[index[i]]}, - {t: valid_TSs[t] for t in keys[index[i]]} - if valid_TSs is not None - else None, + ( + {t: valid_TSs[t] for t in keys[index[i]]} + if valid_TSs is not None + else None + ), ) for i in range(splits) ] diff --git a/kats/models/metalearner/get_metadata.py b/kats/models/metalearner/get_metadata.py index 3d34c762..9cc14af3 100644 --- a/kats/models/metalearner/get_metadata.py +++ b/kats/models/metalearner/get_metadata.py @@ -220,7 +220,7 @@ def _tune_single( # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. single_model: Callable, # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. - single_params: Callable + single_params: Callable, # pyre-fixme[24]: Generic type `dict` expects 2 type parameters, use # `typing.Dict` to avoid runtime subscripting errors. ) -> Tuple[Dict, float]: diff --git a/kats/models/metalearner/metalearner_hpt.py b/kats/models/metalearner/metalearner_hpt.py index a8dec3d2..58b24410 100644 --- a/kats/models/metalearner/metalearner_hpt.py +++ b/kats/models/metalearner/metalearner_hpt.py @@ -456,9 +456,7 @@ def build_network( print("Multi-task neural network structure:") print(self.model) - def _prepare_data( - self, val_size: float - ) -> Tuple[ + def _prepare_data(self, val_size: float) -> Tuple[ torch.FloatTensor, Optional[torch.LongTensor], Optional[torch.FloatTensor], diff --git a/kats/models/ml_ar.py b/kats/models/ml_ar.py index 268757fa..9323f196 100644 --- a/kats/models/ml_ar.py +++ b/kats/models/ml_ar.py @@ -934,9 +934,9 @@ def _merge_past_and_future_reg( num_rows_dat = norm_in_data[target_var].shape[0] num_cols_dat = norm_in_data[target_var].shape[1] - full_mat[ - tv_idx : (tv_idx + num_rows_dat), 0:num_cols_dat - ] = norm_in_data[target_var] + full_mat[tv_idx : (tv_idx + num_rows_dat), 0:num_cols_dat] = ( + norm_in_data[target_var] + ) full_mat[ tv_idx : (tv_idx + num_rows_dat), diff --git a/kats/tests/test_ensemble.py b/kats/tests/test_ensemble.py index 79533b25..7da27d53 100644 --- a/kats/tests/test_ensemble.py +++ b/kats/tests/test_ensemble.py @@ -73,35 +73,47 @@ def get_predict_model(m: Model, model_name: str, steps: int, freq: str) -> np.nd def get_ensemble_param(ts_param: Dict[str, bool]) -> EnsembleParams: """Returns EnsembleParams based on which base_models are included.""" base_model_list = [ - BaseModelParams("arima", arima.ARIMAParams(p=1, d=1, q=1)) - if ts_param["arima"] - else "", - BaseModelParams("holtwinters", holtwinters.HoltWintersParams()) - if ts_param["holtwinters"] - else "", - BaseModelParams( - "sarima", - sarima.SARIMAParams( - p=2, - d=1, - q=1, - trend="ct", - seasonal_order=(1, 0, 1, 12), - enforce_invertibility=False, - enforce_stationarity=False, - ), - ) - if ts_param["sarima"] - else "", - BaseModelParams("prophet", prophet.ProphetParams()) - if ts_param["prophet"] - else "", - BaseModelParams("linear", linear_model.LinearModelParams()) - if ts_param["linear"] - else "", - BaseModelParams("quadratic", quadratic_model.QuadraticModelParams()) - if ts_param["quadratic"] - else "", + ( + BaseModelParams("arima", arima.ARIMAParams(p=1, d=1, q=1)) + if ts_param["arima"] + else "" + ), + ( + BaseModelParams("holtwinters", holtwinters.HoltWintersParams()) + if ts_param["holtwinters"] + else "" + ), + ( + BaseModelParams( + "sarima", + sarima.SARIMAParams( + p=2, + d=1, + q=1, + trend="ct", + seasonal_order=(1, 0, 1, 12), + enforce_invertibility=False, + enforce_stationarity=False, + ), + ) + if ts_param["sarima"] + else "" + ), + ( + BaseModelParams("prophet", prophet.ProphetParams()) + if ts_param["prophet"] + else "" + ), + ( + BaseModelParams("linear", linear_model.LinearModelParams()) + if ts_param["linear"] + else "" + ), + ( + BaseModelParams("quadratic", quadratic_model.QuadraticModelParams()) + if ts_param["quadratic"] + else "" + ), BaseModelParams("theta", theta.ThetaParams(m=12)) if ts_param["theta"] else "", ] return EnsembleParams( diff --git a/kats/tests/tsfeatures/test_tsfeatures.py b/kats/tests/tsfeatures/test_tsfeatures.py index 12c0facd..d75d2ebf 100644 --- a/kats/tests/tsfeatures/test_tsfeatures.py +++ b/kats/tests/tsfeatures/test_tsfeatures.py @@ -180,7 +180,7 @@ def test_tsfeatures_basic(self, test_name: str, ts_name: str) -> None: "firstzero_ac": 4.0, # holt_params "holt_alpha": 0.0, - "holt_beta": 0.0 + "holt_beta": 0.0, # hw_params # cusum_detector # robust_stat_detector diff --git a/kats/tests/utils/test_backtest.py b/kats/tests/utils/test_backtest.py index 2ddd6673..d3b89769 100644 --- a/kats/tests/utils/test_backtest.py +++ b/kats/tests/utils/test_backtest.py @@ -727,7 +727,7 @@ def test_forbidden_initialization_parameters(self) -> None: (50, 50, 110, "Invalid window percentage"), ] - for (train_p, test_p, window_p, expected_msg) in forbidden_init_params: + for train_p, test_p, window_p, expected_msg in forbidden_init_params: # Create backtester with forbidden initialization parameters with self.assertRaises(ValueError) as e: @@ -1048,7 +1048,7 @@ def test_forbidden_initialization_parameters(self) -> None: (50, 50, -10, "Invalid number of folds"), ] - for (train_p, test_p, num_folds, expected_msg) in forbidden_init_params: + for train_p, test_p, num_folds, expected_msg in forbidden_init_params: # Create cross validation object with forbidden initialization parameters with self.assertRaises(ValueError) as e: diff --git a/kats/tests/utils/test_decomposition.py b/kats/tests/utils/test_decomposition.py index 26ba5514..4976d5d1 100644 --- a/kats/tests/utils/test_decomposition.py +++ b/kats/tests/utils/test_decomposition.py @@ -434,9 +434,11 @@ def test_10_minutes_level_sparse_data(self) -> None: # dates are sparse, there are some gaps between dates sparse_dates_df = dense_dates_ts.to_dataframe().copy() sparse_dates_df["time"] = sparse_dates_df["time"].map( - lambda x: x + pd.Timedelta(365, "D") - if (x >= pd.Timestamp(2021, 1, 2)) & (x < pd.Timestamp(2021, 1, 3)) - else x + lambda x: ( + x + pd.Timedelta(365, "D") + if (x >= pd.Timestamp(2021, 1, 2)) & (x < pd.Timestamp(2021, 1, 3)) + else x + ) ) sparse_dates_ts = TimeSeriesData(sparse_dates_df) diff --git a/kats/tests/utils/test_time_series_parameter_tuning.py b/kats/tests/utils/test_time_series_parameter_tuning.py index 87566587..5b55d66b 100644 --- a/kats/tests/utils/test_time_series_parameter_tuning.py +++ b/kats/tests/utils/test_time_series_parameter_tuning.py @@ -349,6 +349,7 @@ def test_compute_search_cardinality(self) -> None: def test_time_series_parameter_tuning_prophet_bayes_opt_scheduler(self) -> None: random_state: RandomState = RandomState(seed=0) + # # pyre-fixme[2]: Parameter must be annotated. def prophet_evaluation_function(params) -> Tuple[float, float]: error: float = random_state.random() @@ -423,6 +424,7 @@ def prophet_evaluation_function(params) -> Tuple[float, float]: def test_time_series_parameter_tuning_NeverGrad(self) -> None: random_state: RandomState = RandomState(seed=0) + # pyre-fixme[2]: Parameter must be annotated. def prophet_evaluation_function(**kwargs) -> float: error: float = random_state.random() @@ -549,6 +551,7 @@ def prophet_evaluation_function(**kwargs) -> float: def test_time_series_parameter_tuning_NeverGrad_multi(self) -> None: random_state: RandomState = RandomState(seed=0) + # # pyre-fixme[2]: Parameter must be annotated. def prophet_evaluation_function(**kwargs) -> float: error: float = random_state.random() diff --git a/kats/tsfeatures/tsfeatures.py b/kats/tsfeatures/tsfeatures.py index 246b71bf..66be0af8 100644 --- a/kats/tsfeatures/tsfeatures.py +++ b/kats/tsfeatures/tsfeatures.py @@ -2122,7 +2122,10 @@ def __init__( def get_features( self, data: Union[TimeSeriesData, pd.Series], raw: bool = False - ) -> Union[pd.DataFrame, np.ndarray,]: + ) -> Union[ + pd.DataFrame, + np.ndarray, + ]: if isinstance(data, TimeSeriesData): timestamps = data.time.dt diff --git a/kats/utils/backtesters.py b/kats/utils/backtesters.py index ab335e06..10c1f7a0 100644 --- a/kats/utils/backtesters.py +++ b/kats/utils/backtesters.py @@ -76,8 +76,9 @@ class BacktesterResult: class Forecaster(Protocol): - def __call__(self, train: DataPartition, test: DataPartition) -> pd.DataFrame: - ... # pragma: no cover + def __call__( + self, train: DataPartition, test: DataPartition + ) -> pd.DataFrame: ... # pragma: no cover """ Function of fitting a forecasting model with `train` and evaluate the fitted model on `test`. @@ -86,8 +87,9 @@ def __call__(self, train: DataPartition, test: DataPartition) -> pd.DataFrame: class Scorer(Protocol): - def __call__(self, result: pd.DataFrame) -> Dict[str, float]: - ... # pragma: no cover + def __call__( + self, result: pd.DataFrame + ) -> Dict[str, float]: ... # pragma: no cover """Function for calculating evaluation metrics based on `result`. """ @@ -122,6 +124,7 @@ def _get_scorer( except Exception as e: msg = f"Unsupported error function {error} with error message {e}." _log_error(msg) + # define scorer function # pyre-fixme Incompatible return type [7]: Expected `Optional[typing.Callable[[DataFrame], Dict[str, float]]]` but got `Union[Metric, MultiOutputMetric, WeightedMetric]`. def calc_error(result: pd.DataFrame) -> Dict[str, float]: diff --git a/kats/utils/cupik.py b/kats/utils/cupik.py index 31cce322..4453a319 100644 --- a/kats/utils/cupik.py +++ b/kats/utils/cupik.py @@ -23,19 +23,16 @@ class Step(Protocol): __type__: str data: TimeSeriesData - def remover(self, interpolate: bool) -> TimeSeriesData: - ... + def remover(self, interpolate: bool) -> TimeSeriesData: ... - def transform(self, data: TimeSeriesData) -> object: - ... + def transform(self, data: TimeSeriesData) -> object: ... def fit( self, x: Union[pd.DataFrame, np.ndarray], y: Optional[Union[pd.Series, np.ndarray]], **kwargs: Any, - ) -> List[TimeSeriesData]: - ... + ) -> List[TimeSeriesData]: ... PipelineStep = Tuple[str, Step] diff --git a/kats/utils/simulator.py b/kats/utils/simulator.py index 0579f353..3ced15a7 100644 --- a/kats/utils/simulator.py +++ b/kats/utils/simulator.py @@ -207,7 +207,6 @@ def add_noise( magnitude: float = 1.0, multiply: bool = False, ) -> Simulator: - """Add noise to the generated time series for STL-based simulator. Noise type is normal - noise will be generated from iid normal distribution; diff --git a/kats/utils/time_series_parameter_tuning.py b/kats/utils/time_series_parameter_tuning.py index 08262fa9..674283aa 100644 --- a/kats/utils/time_series_parameter_tuning.py +++ b/kats/utils/time_series_parameter_tuning.py @@ -481,7 +481,7 @@ def generate_evaluate_new_parameter_values( self, # pyre-fixme[24]: Generic type `Callable` expects 2 type parameters. evaluation_function: Callable, - arm_count: int = -1 # -1 means + arm_count: int = -1, # -1 means # create all arms (i.e. all combinations of parameter values) ) -> None: """A place holder method for users that are still using it. @@ -894,7 +894,9 @@ class BayesMethodOptions(SearchMethodOptions): window_global_stop_size: int = 3 experiment: Optional[Experiment] = None timeout_hours: Optional[int] = None # timeout in hours for optimization - improvement_bar: float = 0.02 # imporvement step for gloabl stop strategy, imporvement bar default value sets for f_score func + improvement_bar: float = ( + 0.02 # imporvement step for gloabl stop strategy, imporvement bar default value sets for f_score func + ) max_initialization_trials: int = 5 seed: Optional[int] = None @@ -1106,9 +1108,11 @@ def generate_evaluate_new_parameter_values_with_options( search_space=self._exp.search_space, max_parallelism_cap=min( cpu_count(), - options.multiprocessing - if options.multiprocessing > 0 - else options.max_trials, + ( + options.multiprocessing + if options.multiprocessing > 0 + else options.max_trials + ), ), # use_batch_trials option somehow on parallel run limits initial number ob trials to 1 ¯\_(ツ)_/¯ # use_batch_trials=bool(self.multiprocessing),