From 4e4ea761475d6f025712a3ad9861349e94c6f489 Mon Sep 17 00:00:00 2001 From: Gal Rotem Date: Fri, 18 Aug 2023 13:01:17 -0700 Subject: [PATCH] fix pyre targets Summary: # Context `pyre` is not configured properly for `torchtnt`. The config is in `torchtnt/.pyre_configuration.local` and the coverage is very partial # This diff 1. Fix `torchtnt/.pyre_configuration.local` 2. Codemod and add `pyre-fixme` to any pyre errors as a result `pyre --output=json check | pyre-upgrade fixme` 3. Fix any `lints` as a result 4. Fix the `pyre` issues that `lint` resurfaced Differential Revision: D48478829 fbshipit-source-id: df73a245409c0da20bb00371b1aa30d0cd9af491 --- examples/auto_unit_example.py | 30 +++++++++ examples/mingpt/char_dataset.py | 12 ++++ examples/mingpt/main.py | 47 +++++++++++++- examples/mingpt/model.py | 38 ++++++++++- examples/mnist/main.py | 43 ++++++++++++- examples/torchdata_train_example.py | 1 + examples/torchrec/main.py | 10 +++ .../torchrec/tests/torchrec_example_test.py | 2 + examples/train_unit_example.py | 1 + tests/framework/callbacks/test_csv_writer.py | 3 + tests/framework/callbacks/test_lambda.py | 12 ++++ .../callbacks/test_learning_rate_monitor.py | 1 + .../callbacks/test_module_summary.py | 3 + .../test_system_resources_monitor.py | 1 + .../test_tensorboard_parameter_monitor.py | 2 + .../callbacks/test_torchsnapshot_saver.py | 15 +++++ .../callbacks/test_tqdm_progress_bar.py | 5 ++ tests/framework/test_app_state_mixin.py | 14 ++++ tests/framework/test_auto_unit.py | 60 ++++++++++++++++- tests/framework/test_callback_handler.py | 3 + tests/framework/test_train.py | 1 + tests/framework/test_utils.py | 37 +++++++++++ tests/utils/data/test_data_prefetcher.py | 1 + tests/utils/data/test_multi_dataloader.py | 64 ++++++++++++++++++- tests/utils/data/test_profile_dataloader.py | 2 + tests/utils/loggers/test_csv.py | 1 + tests/utils/loggers/test_in_memory.py | 3 + tests/utils/loggers/test_tensorboard.py | 2 + tests/utils/test_device.py | 5 ++ tests/utils/test_distributed.py | 38 +++++++++++ tests/utils/test_early_stop_checker.py | 5 ++ tests/utils/test_env.py | 1 + tests/utils/test_memory.py | 22 +++++++ tests/utils/test_misc.py | 14 ++++ tests/utils/test_oom.py | 4 ++ tests/utils/test_prepare_module.py | 12 ++++ tests/utils/test_timer.py | 8 +++ 37 files changed, 511 insertions(+), 12 deletions(-) diff --git a/examples/auto_unit_example.py b/examples/auto_unit_example.py index 4d070a29d4..eef55c1102 100644 --- a/examples/auto_unit_example.py +++ b/examples/auto_unit_example.py @@ -61,6 +61,7 @@ def prepare_dataloader( class MyUnit(AutoUnit[Batch]): + # pyre-fixme[3]: Return type must be annotated. def __init__( self, *, @@ -70,6 +71,31 @@ def __init__( log_every_n_steps: int, **kwargs: Dict[str, Any], # kwargs to be passed to AutoUnit ): + # pyre-fixme[6]: For 1st argument expected `Optional[bool]` but got + # `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Optional[float]` but got + # `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Optional[device]` but got + # `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected + # `Optional[ActivationCheckpointParams]` but got `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Optional[SWAParams]` but got + # `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Optional[TorchCompileParams]` + # but got `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected + # `Union[typing_extensions.Literal['epoch'], + # typing_extensions.Literal['step']]` but got `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Union[None, str, dtype]` but got + # `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Union[None, str, Strategy]` but + # got `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `bool` but got `Dict[str, + # typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `int` but got `Dict[str, + # typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Module` but got `Dict[str, + # typing.Any]`. super().__init__(**kwargs) self.tb_logger = tb_logger # create accuracy metrics to compute the accuracy of training and evaluation @@ -84,6 +110,7 @@ def configure_optimizers_and_lr_scheduler( lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9) return optimizer, lr_scheduler + # pyre-fixme[3]: Return annotation cannot contain `Any`. def compute_loss(self, state: State, data: Batch) -> Tuple[torch.Tensor, Any]: inputs, targets = data # convert targets to float Tensor for binary_cross_entropy_with_logits @@ -100,6 +127,7 @@ def on_train_step_end( data: Batch, step: int, loss: torch.Tensor, + # pyre-fixme[2]: Parameter annotation cannot be `Any`. outputs: Any, ) -> None: _, targets = data @@ -115,6 +143,7 @@ def on_eval_step_end( data: Batch, step: int, loss: torch.Tensor, + # pyre-fixme[2]: Parameter annotation cannot be `Any`. outputs: Any, ) -> None: _, targets = data @@ -205,6 +234,7 @@ def get_args() -> Namespace: if __name__ == "__main__": + # pyre-fixme[5]: Global expression must be annotated. args = get_args() lc = pet.LaunchConfig( min_nodes=1, diff --git a/examples/mingpt/char_dataset.py b/examples/mingpt/char_dataset.py index 3c7b6b1b86..d80d333ed8 100644 --- a/examples/mingpt/char_dataset.py +++ b/examples/mingpt/char_dataset.py @@ -17,13 +17,17 @@ @dataclass class DataConfig: + # pyre-fixme[8]: Attribute has type `str`; used as `None`. path: str = None + # pyre-fixme[8]: Attribute has type `int`; used as `None`. block_size: int = None + # pyre-fixme[8]: Attribute has type `float`; used as `None`. train_split: float = None truncate: float = 1.0 class CharDataset(Dataset): + # pyre-fixme[3]: Return type must be annotated. def __init__(self, data_cfg: DataConfig): print(data_cfg.path) data = fsspec.open(data_cfg.path).open().read().decode("utf-8") @@ -33,15 +37,23 @@ def __init__(self, data_cfg: DataConfig): data_size, vocab_size = len(data), len(chars) print("Data has %d characters, %d unique." % (data_size, vocab_size)) + # pyre-fixme[4]: Attribute must be annotated. self.stoi = {ch: i for i, ch in enumerate(chars)} + # pyre-fixme[4]: Attribute must be annotated. self.itos = {i: ch for i, ch in enumerate(chars)} + # pyre-fixme[4]: Attribute must be annotated. self.block_size = data_cfg.block_size + # pyre-fixme[4]: Attribute must be annotated. self.vocab_size = vocab_size + # pyre-fixme[4]: Attribute must be annotated. self.data = data + # pyre-fixme[3]: Return type must be annotated. def __len__(self): return len(self.data) - self.block_size + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. def __getitem__(self, idx): # grab a chunk of (block_size + 1) characters from the data chunk = self.data[idx : idx + self.block_size + 1] diff --git a/examples/mingpt/main.py b/examples/mingpt/main.py index 845c7c440f..58364894d0 100644 --- a/examples/mingpt/main.py +++ b/examples/mingpt/main.py @@ -32,11 +32,15 @@ logging.basicConfig(level=logging.INFO) Batch = Tuple[torch.Tensor, torch.Tensor] +# pyre-fixme[5]: Global expression must be annotated. PATH = parutil.get_file_path("data/input.txt", pkg=__package__) def prepare_dataloader( - dataset: Dataset, batch_size: int, device: torch.device + # pyre-fixme[24]: Generic type `Dataset` expects 1 type parameter. + dataset: Dataset, + batch_size: int, + device: torch.device, ) -> torch.utils.data.DataLoader: """Instantiate DataLoader""" # pin_memory enables faster host to GPU copies @@ -48,6 +52,7 @@ def prepare_dataloader( ) +# pyre-fixme[3]: Return type must be annotated. def get_datasets(data_cfg: DataConfig): dataset = CharDataset(data_cfg) train_len = int(len(dataset) * data_cfg.train_split) @@ -55,6 +60,7 @@ def get_datasets(data_cfg: DataConfig): return train_set, eval_set, dataset +# pyre-fixme[24]: Generic type `AutoUnit` expects 1 type parameter. class MinGPTUnit(AutoUnit): def __init__( self, @@ -63,24 +69,58 @@ def __init__( log_every_n_steps: int, **kwargs: Dict[str, Any], ) -> None: + # pyre-fixme[6]: For 1st argument expected `Optional[bool]` but got + # `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Optional[float]` but got + # `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Optional[device]` but got + # `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected + # `Optional[ActivationCheckpointParams]` but got `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Optional[SWAParams]` but got + # `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Optional[TorchCompileParams]` + # but got `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected + # `Union[typing_extensions.Literal['epoch'], + # typing_extensions.Literal['step']]` but got `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Union[None, str, dtype]` but got + # `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Union[None, str, Strategy]` but + # got `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `bool` but got `Dict[str, + # typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `int` but got `Dict[str, + # typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Module` but got `Dict[str, + # typing.Any]`. super().__init__(**kwargs) self.tb_logger = tb_logger self.opt_cfg = opt_cfg self.log_every_n_steps = log_every_n_steps def configure_optimizers_and_lr_scheduler( - self, module + self, + # pyre-fixme[2]: Parameter must be annotated. + module, ) -> Tuple[torch.optim.Optimizer, Optional[TLRScheduler]]: optimizer = create_optimizer(module, self.opt_cfg) return optimizer, None + # pyre-fixme[3]: Return annotation cannot contain `Any`. def compute_loss(self, state: State, data: Batch) -> Tuple[torch.Tensor, Any]: input, target = data outputs, loss = self.module(input, target) return loss, outputs def on_train_step_end( - self, state: State, data: Batch, step: int, loss: torch.Tensor, outputs: Any + self, + state: State, + data: Batch, + step: int, + loss: torch.Tensor, + # pyre-fixme[2]: Parameter annotation cannot be `Any`. + outputs: Any, ) -> None: if step % self.log_every_n_steps == 0: self.tb_logger.log("loss", loss, step) @@ -107,6 +147,7 @@ def main(args: Namespace) -> None: n_embd=args.n_embd, vocab_size=dataset.vocab_size, block_size=dataset.block_size, + # pyre-fixme[6]: For 6th argument expected `str` but got `device`. device=device, ) module = GPT(gpt_cfg) diff --git a/examples/mingpt/model.py b/examples/mingpt/model.py index f72c5cce7a..f9f825e79f 100644 --- a/examples/mingpt/model.py +++ b/examples/mingpt/model.py @@ -21,8 +21,11 @@ class GPTConfig: model_type: str = "gpt2" # model configurations + # pyre-fixme[8]: Attribute has type `int`; used as `None`. n_layer: int = None + # pyre-fixme[8]: Attribute has type `int`; used as `None`. n_head: int = None + # pyre-fixme[8]: Attribute has type `int`; used as `None`. n_embd: int = None # openai's values for gpt2 vocab_size: int = 50257 @@ -45,6 +48,8 @@ class MultiheadAttentionLayer(nn.Module): A multi-head masked self-attention layer with a projection at the end. """ + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. def __init__(self, config, dtype=torch.float32): super().__init__() assert config.n_embd % config.n_head == 0 @@ -67,6 +72,8 @@ def __init__(self, config, dtype=torch.float32): dtype=dtype, ) + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. def forward(self, x): _, seq_size, _ = x.size() y = self.attn(x, x, x, attn_mask=self.mask[0, 0, :seq_size, :seq_size])[0] @@ -77,6 +84,7 @@ def forward(self, x): class Block(nn.Module): """an unassuming Transformer block""" + # pyre-fixme[3]: Return type must be annotated. def __init__(self, config: GPTConfig): super().__init__() self.ln1 = nn.LayerNorm(config.n_embd) @@ -89,6 +97,8 @@ def __init__(self, config: GPTConfig): nn.Dropout(config.resid_pdrop), ) + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. def forward(self, x): x = x + self.attn(self.ln1(x)) x = x + self.mlp(self.ln2(x)) @@ -96,6 +106,8 @@ def forward(self, x): class EmbeddingStem(nn.Module): + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. def __init__(self, config: GPTConfig, dtype=torch.float32): super().__init__() self.tok_emb = nn.Embedding( @@ -107,11 +119,15 @@ def __init__(self, config: GPTConfig, dtype=torch.float32): ) ) self.drop = nn.Dropout(config.embd_pdrop) + # pyre-fixme[4]: Attribute must be annotated. self.block_size = config.block_size + # pyre-fixme[3]: Return type must be annotated. def reset_parameters(self): self.tok_emb.reset_parameters() + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. def forward(self, idx): b, t = idx.size() assert ( @@ -130,8 +146,10 @@ def forward(self, idx): class GPT(nn.Module): """GPT Language Model""" + # pyre-fixme[3]: Return type must be annotated. def __init__(self, config: GPTConfig): super().__init__() + # pyre-fixme[4]: Attribute must be annotated. self.block_size = config.block_size config = self._set_model_config(config) @@ -153,6 +171,8 @@ def __init__(self, config: GPTConfig): n_params = sum(p.numel() for p in self.blocks.parameters()) print("number of parameters: %.2fM" % (n_params / 1e6,)) + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. def _set_model_config(self, config): type_given = config.model_type is not None params_given = all( @@ -202,6 +222,8 @@ def _set_model_config(self, config): ) return config + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. def _init_weights(self, module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=0.02) @@ -211,6 +233,8 @@ def _init_weights(self, module): module.bias.data.zero_() module.weight.data.fill_(1.0) + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. def forward(self, idx, targets=None): x = self.emb_stem(idx) x = self.blocks(x) @@ -227,8 +251,19 @@ def forward(self, idx, targets=None): return logits, loss @torch.no_grad() + # pyre-fixme[3]: Return type must be annotated. def generate( - self, idx, max_new_tokens, temperature=1.0, do_sample=False, top_k=None + self, + # pyre-fixme[2]: Parameter must be annotated. + idx, + # pyre-fixme[2]: Parameter must be annotated. + max_new_tokens, + # pyre-fixme[2]: Parameter must be annotated. + temperature=1.0, + # pyre-fixme[2]: Parameter must be annotated. + do_sample=False, + # pyre-fixme[2]: Parameter must be annotated. + top_k=None, ): """ Take a conditioning sequence of indices idx (LongTensor of shape (b,t)) and complete @@ -261,6 +296,7 @@ def generate( return idx +# pyre-fixme[3]: Return type must be annotated. def create_optimizer(model: torch.nn.Module, opt_config: OptimizerConfig): """ This long function is unfortunately doing something very simple and is being very defensive: diff --git a/examples/mnist/main.py b/examples/mnist/main.py index 067559afe3..f301a1b996 100644 --- a/examples/mnist/main.py +++ b/examples/mnist/main.py @@ -29,6 +29,7 @@ class Net(nn.Module): + # pyre-fixme[3]: Return type must be annotated. def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 32, 3, 1) @@ -66,6 +67,31 @@ def __init__( gamma: float, **kwargs: Dict[str, Any], # kwargs to be passed to AutoUnit ) -> None: + # pyre-fixme[6]: For 1st argument expected `Optional[bool]` but got + # `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Optional[float]` but got + # `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Optional[device]` but got + # `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected + # `Optional[ActivationCheckpointParams]` but got `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Optional[SWAParams]` but got + # `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Optional[TorchCompileParams]` + # but got `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected + # `Union[typing_extensions.Literal['epoch'], + # typing_extensions.Literal['step']]` but got `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Union[None, str, dtype]` but got + # `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Union[None, str, Strategy]` but + # got `Dict[str, typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `bool` but got `Dict[str, + # typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `int` but got `Dict[str, + # typing.Any]`. + # pyre-fixme[6]: For 1st argument expected `Module` but got `Dict[str, + # typing.Any]`. super().__init__(**kwargs) self.tb_logger = tb_logger self.lr = lr @@ -82,6 +108,7 @@ def configure_optimizers_and_lr_scheduler( lr_scheduler = StepLR(optimizer, step_size=1, gamma=self.gamma) return optimizer, lr_scheduler + # pyre-fixme[3]: Return annotation cannot contain `Any`. def compute_loss(self, state: State, data: Batch) -> Tuple[torch.Tensor, Any]: inputs, targets = data outputs = self.module(inputs) @@ -91,7 +118,13 @@ def compute_loss(self, state: State, data: Batch) -> Tuple[torch.Tensor, Any]: return loss, outputs def on_train_step_end( - self, state: State, data: Batch, step: int, loss: torch.Tensor, outputs: Any + self, + state: State, + data: Batch, + step: int, + loss: torch.Tensor, + # pyre-fixme[2]: Parameter annotation cannot be `Any`. + outputs: Any, ) -> None: _, targets = data self.train_accuracy.update(outputs, targets) @@ -106,7 +139,13 @@ def on_train_epoch_end(self, state: State) -> None: self.train_accuracy.reset() def on_eval_step_end( - self, state: State, data: Batch, step: int, loss: torch.Tensor, outputs: Any + self, + state: State, + data: Batch, + step: int, + loss: torch.Tensor, + # pyre-fixme[2]: Parameter annotation cannot be `Any`. + outputs: Any, ) -> None: if step % self.log_every_n_steps == 0: self.tb_logger.log("evaluation loss", loss, step) diff --git a/examples/torchdata_train_example.py b/examples/torchdata_train_example.py index 3aca3c0a0d..5df4c62b3e 100644 --- a/examples/torchdata_train_example.py +++ b/examples/torchdata_train_example.py @@ -40,6 +40,7 @@ def prepare_model(input_dim: int, device: torch.device) -> nn.Module: return nn.Linear(input_dim, 1, device=device) +# pyre-fixme[24]: Generic type `ShufflerIterDataPipe` expects 1 type parameter. def _generate_datapipe(num_samples: int, input_dim: int) -> ShufflerIterDataPipe: """ Returns a shuffled datapipe of random inputs and labels. diff --git a/examples/torchrec/main.py b/examples/torchrec/main.py index 73aa00e610..d724bdfcfa 100644 --- a/examples/torchrec/main.py +++ b/examples/torchrec/main.py @@ -73,6 +73,8 @@ def init_dataloader( keys=DEFAULT_CAT_NAMES, batch_size=batch_size, hash_size=num_embeddings, + # pyre-fixme[6]: For 4th argument expected `Optional[List[int]]` but got + # `Optional[int]`. hash_sizes=num_embeddings_per_feature, manual_seed=seed, ids_per_feature=1, @@ -193,6 +195,7 @@ def __init__( ) -> None: super().__init__() self.module = module + # pyre-fixme[4]: Attribute must be annotated. self.pipeline = TrainPipelineSparseDist( module, optimizer, device, execute_all_batches=True ) @@ -202,6 +205,8 @@ def __init__( self.tb_logger = tb_logger self.log_every_n_steps = log_every_n_steps + # pyre-fixme[14]: `train_step` overrides method defined in `TrainUnit` + # inconsistently. def train_step(self, state: State, data: Iterator[Batch]) -> None: step = self.train_progress.num_steps_completed loss, logits, labels = self.pipeline.progress(data) @@ -217,6 +222,7 @@ def on_train_epoch_end(self, state: State) -> None: # reset the metric every epoch self.train_auroc.reset() + # pyre-fixme[14]: `eval_step` overrides method defined in `EvalUnit` inconsistently. def eval_step(self, state: State, data: Iterator[Batch]) -> None: step = self.eval_progress.num_steps_completed loss, _, _ = self.pipeline.progress(data) @@ -258,7 +264,11 @@ def init_model( tables=eb_configs, device=torch.device("meta") ), dense_in_features=len(DEFAULT_INT_NAMES), + # pyre-fixme[6]: For 3rd argument expected `List[int]` but got + # `Optional[List[int]]`. dense_arch_layer_sizes=dense_arch_layer_sizes, + # pyre-fixme[6]: For 4th argument expected `List[int]` but got + # `Optional[List[int]]`. over_arch_layer_sizes=over_arch_layer_sizes, dense_device=device, ) diff --git a/examples/torchrec/tests/torchrec_example_test.py b/examples/torchrec/tests/torchrec_example_test.py index 205686dd2a..a1335be4c6 100644 --- a/examples/torchrec/tests/torchrec_example_test.py +++ b/examples/torchrec/tests/torchrec_example_test.py @@ -22,6 +22,8 @@ class TorchrecExampleTest(unittest.TestCase): @skip_if_asan + # pyre-fixme[56]: Pyre was not able to infer the type of argument `not + # torch.cuda.is_available()` to decorator factory `unittest.skipIf`. @unittest.skipIf( not torch.cuda.is_available(), "Skip when CUDA is not available", diff --git a/examples/train_unit_example.py b/examples/train_unit_example.py index 32089e981d..6bf79b07b8 100644 --- a/examples/train_unit_example.py +++ b/examples/train_unit_example.py @@ -146,6 +146,7 @@ def main(argv: List[str]) -> None: args.log_every_n_steps, ) + # pyre-fixme[28]: Unexpected keyword argument `dataloader`. train( my_unit, dataloader=dataloader, diff --git a/tests/framework/callbacks/test_csv_writer.py b/tests/framework/callbacks/test_csv_writer.py index d54bc912b5..c9b4aee8f0 100644 --- a/tests/framework/callbacks/test_csv_writer.py +++ b/tests/framework/callbacks/test_csv_writer.py @@ -24,6 +24,7 @@ def get_step_output_rows( self, state: State, unit: PredictUnit[TPredictData], + # pyre-fixme[2]: Parameter annotation cannot be `Any`. step_output: Any, ) -> Union[List[str], List[List[str]]]: return [["1"], ["2"]] @@ -34,6 +35,7 @@ def get_step_output_rows( self, state: State, unit: PredictUnit[TPredictData], + # pyre-fixme[2]: Parameter annotation cannot be `Any`. step_output: Any, ) -> Union[List[str], List[List[str]]]: return ["1"] @@ -98,6 +100,7 @@ def test_csv_writer_with_no_output_rows_def(self) -> None: # Throw exception because get_step_output_rows is not defined. with self.assertRaises(TypeError): + # pyre-fixme[45]: Cannot instantiate abstract class `BaseCSVWriter`. csv_callback = BaseCSVWriter( header_row=_HEADER_ROW, dir_path="", filename=_FILENAME ) diff --git a/tests/framework/callbacks/test_lambda.py b/tests/framework/callbacks/test_lambda.py index 31aa4d0056..3492c537e8 100644 --- a/tests/framework/callbacks/test_lambda.py +++ b/tests/framework/callbacks/test_lambda.py @@ -57,9 +57,12 @@ def test_lambda_callback_train(self) -> None: ) checker = set() + # pyre-fixme[53]: Captured variable `checker` is not annotated. + # pyre-fixme[2]: Parameter must be annotated. def call(hook: str, *_, **__) -> None: checker.add(hook) + # pyre-fixme[6]: For 1st argument expected `Callback` but got `Type[Callback]`. hooks = _get_members_in_different_name(Callback, "train") hooks_args = {h: partial(call, h) for h in hooks} my_train_unit = DummyTrainUnit(input_dim=input_dim) @@ -83,9 +86,12 @@ def test_lambda_callback_eval(self) -> None: ) checker = set() + # pyre-fixme[53]: Captured variable `checker` is not annotated. + # pyre-fixme[2]: Parameter must be annotated. def call(hook: str, *_, **__) -> None: checker.add(hook) + # pyre-fixme[6]: For 1st argument expected `Callback` but got `Type[Callback]`. hooks = _get_members_in_different_name(Callback, "eval") hooks_args = {h: partial(call, h) for h in hooks} my_eval_unit = DummyEvalUnit(input_dim=input_dim) @@ -104,9 +110,12 @@ def test_lambda_callback_predict(self) -> None: max_steps_per_epoch = 6 checker = set() + # pyre-fixme[53]: Captured variable `checker` is not annotated. + # pyre-fixme[2]: Parameter must be annotated. def call(hook: str, *_, **__) -> None: checker.add(hook) + # pyre-fixme[6]: For 1st argument expected `Callback` but got `Type[Callback]`. hooks = _get_members_in_different_name(Callback, "predict") hooks_args = {h: partial(call, h) for h in hooks} predict_dataloader = generate_random_dataloader( @@ -132,10 +141,13 @@ def test_lambda_callback_train_with_except(self) -> None: ) checker = set() + # pyre-fixme[53]: Captured variable `checker` is not annotated. + # pyre-fixme[2]: Parameter must be annotated. def call(hook: str, *_, **__) -> None: checker.add(hook) # with on_exception, training will not be ended + # pyre-fixme[6]: For 1st argument expected `Callback` but got `Type[Callback]`. hooks = _get_members_in_different_name(Callback, "train") - { "on_train_end", "on_train_epoch_end", diff --git a/tests/framework/callbacks/test_learning_rate_monitor.py b/tests/framework/callbacks/test_learning_rate_monitor.py index 25a14e6ab8..8f97d56249 100644 --- a/tests/framework/callbacks/test_learning_rate_monitor.py +++ b/tests/framework/callbacks/test_learning_rate_monitor.py @@ -54,6 +54,7 @@ def test_learning_rate_monitor_step(self) -> None: my_unit, dataloader, max_epochs=max_epochs, + # pyre-fixme[6]: For 4th argument expected `Optional[int]` but got `float`. max_steps=total_steps, callbacks=[monitor], ) diff --git a/tests/framework/callbacks/test_module_summary.py b/tests/framework/callbacks/test_module_summary.py index d665ce01f0..58f37e7212 100644 --- a/tests/framework/callbacks/test_module_summary.py +++ b/tests/framework/callbacks/test_module_summary.py @@ -122,10 +122,13 @@ def forward(self, x): self.assertTrue(ms.forward_elapsed_time_ms != "?") +# pyre-fixme[5]: Global expression must be annotated. Batch = Tuple[torch.tensor, torch.tensor] +# pyre-fixme[11]: Annotation `Batch` is not defined as a type. class DummyAutoUnit(AutoUnit[Batch]): + # pyre-fixme[3]: Return annotation cannot contain `Any`. def compute_loss(self, state: State, data: Batch) -> Tuple[torch.Tensor, Any]: inputs, targets = data outputs = self.module(inputs) diff --git a/tests/framework/callbacks/test_system_resources_monitor.py b/tests/framework/callbacks/test_system_resources_monitor.py index 6d23b02306..5b0b557994 100644 --- a/tests/framework/callbacks/test_system_resources_monitor.py +++ b/tests/framework/callbacks/test_system_resources_monitor.py @@ -60,6 +60,7 @@ def test_system_resources_monitor_step(self) -> None: my_unit, dataloader, max_epochs=max_epochs, + # pyre-fixme[6]: For 4th argument expected `Optional[int]` but got `float`. max_steps=total_steps, callbacks=[monitor], ) diff --git a/tests/framework/callbacks/test_tensorboard_parameter_monitor.py b/tests/framework/callbacks/test_tensorboard_parameter_monitor.py index caa28532cb..75546c4191 100644 --- a/tests/framework/callbacks/test_tensorboard_parameter_monitor.py +++ b/tests/framework/callbacks/test_tensorboard_parameter_monitor.py @@ -32,4 +32,6 @@ def test_monitor_train(self) -> None: dataloader = generate_random_dataloader(dataset_len, input_dim, batch_size) train(my_unit, dataloader, max_epochs=max_epochs, callbacks=[monitor]) + # pyre-fixme[6]: For 2nd argument expected `SupportsDunderLT[Variable[_T]]` + # but got `int`. self.assertGreater(summary_writer.add_histogram.call_count, 0) diff --git a/tests/framework/callbacks/test_torchsnapshot_saver.py b/tests/framework/callbacks/test_torchsnapshot_saver.py index 0d54fed8e2..9f30234ced 100644 --- a/tests/framework/callbacks/test_torchsnapshot_saver.py +++ b/tests/framework/callbacks/test_torchsnapshot_saver.py @@ -35,6 +35,7 @@ class TorchSnapshotSaverTest(unittest.TestCase): + # pyre-fixme[4]: Attribute must be annotated. cuda_available = torch.cuda.is_available() def test_save_every_n_train_steps(self) -> None: @@ -300,6 +301,8 @@ def test_save_on_train_end(self) -> None: ) self.assertTrue(os.path.exists(os.path.join(temp_dir, expected_path))) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -325,6 +328,8 @@ def _directory_sync_collective() -> None: if get_global_rank() == 0: shutil.rmtree(temp_dir) # delete temp directory + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -413,6 +418,8 @@ def test_latest_checkpoint_path(self) -> None: os.mkdir(path_4) self.assertEqual(_get_latest_checkpoint_path(temp_dir), path_3) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -459,13 +466,18 @@ def _latest_checkpoint_path_distributed() -> None: shutil.rmtree(temp_dir) # delete temp directory +# pyre-fixme[5]: Global expression must be annotated. Batch = Tuple[torch.tensor, torch.tensor] +# pyre-fixme[11]: Annotation `Batch` is not defined as a type. class DummyAutoUnit(AutoUnit[Batch]): + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. def __init__(self, input_dim: int, *args, **kwargs): super().__init__(module=torch.nn.Linear(input_dim, 2), *args, **kwargs) + # pyre-fixme[3]: Return annotation cannot contain `Any`. def compute_loss(self, state: State, data: Batch) -> Tuple[torch.Tensor, Any]: inputs, targets = data outputs = self.module(inputs) @@ -482,6 +494,8 @@ def configure_optimizers_and_lr_scheduler( class DummyStatefulDataLoader: + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[24]: Generic type `Iterable` expects 1 type parameter. def __init__(self, dataloader: Iterable): self.dataloader = dataloader self.state_dict_call_count = 0 @@ -495,5 +509,6 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None: self.load_state_dict_call_count += 1 return None + # pyre-fixme[3]: Return type must be annotated. def __iter__(self): return iter(self.dataloader) diff --git a/tests/framework/callbacks/test_tqdm_progress_bar.py b/tests/framework/callbacks/test_tqdm_progress_bar.py index 28165d9470..8daf0a31ff 100644 --- a/tests/framework/callbacks/test_tqdm_progress_bar.py +++ b/tests/framework/callbacks/test_tqdm_progress_bar.py @@ -41,6 +41,7 @@ def test_progress_bar_train(self) -> None: my_unit = DummyTrainUnit(2) progress_bar = TQDMProgressBar() progress_bar.on_train_epoch_start(state, my_unit) + # pyre-fixme[16]: Optional type has no attribute `total`. self.assertEqual(progress_bar._train_progress_bar.total, expected_total) def test_progress_bar_train_integration(self) -> None: @@ -80,6 +81,7 @@ def test_progress_bar_evaluate(self) -> None: my_unit = DummyEvalUnit(2) progress_bar = TQDMProgressBar() progress_bar.on_eval_epoch_start(state, my_unit) + # pyre-fixme[16]: Optional type has no attribute `total`. self.assertEqual(progress_bar._eval_progress_bar.total, expected_total) def test_progress_bar_predict(self) -> None: @@ -104,6 +106,7 @@ def test_progress_bar_predict(self) -> None: my_unit = DummyPredictUnit(2) progress_bar = TQDMProgressBar() progress_bar.on_predict_epoch_start(state, my_unit) + # pyre-fixme[16]: Optional type has no attribute `total`. self.assertEqual(progress_bar._predict_progress_bar.total, expected_total) def test_progress_bar_mid_progress(self) -> None: @@ -128,5 +131,7 @@ def test_progress_bar_mid_progress(self) -> None: my_unit.predict_progress._num_steps_completed = 2 progress_bar = TQDMProgressBar() progress_bar.on_predict_epoch_start(state, my_unit) + # pyre-fixme[16]: Optional type has no attribute `total`. self.assertEqual(progress_bar._predict_progress_bar.total, expected_total) + # pyre-fixme[16]: Optional type has no attribute `n`. self.assertEqual(progress_bar._predict_progress_bar.n, 2) diff --git a/tests/framework/test_app_state_mixin.py b/tests/framework/test_app_state_mixin.py index 75294a74b7..9bcd1017af 100644 --- a/tests/framework/test_app_state_mixin.py +++ b/tests/framework/test_app_state_mixin.py @@ -39,7 +39,9 @@ def test_tracked_modules(self) -> None: self.assertEqual(my_unit.tracked_modules()["loss_fn_b"], my_unit.loss_fn_b) # delete the attributes + # pyre-fixme[8]: Attribute has type `Linear`; used as `None`. my_unit.module_a = None + # pyre-fixme[8]: Attribute has type `CrossEntropyLoss`; used as `None`. my_unit.loss_fn_b = None # the attributes should be removed from tracked_modules @@ -58,6 +60,7 @@ def test_tracked_optimizers(self) -> None: ) # delete the attribute + # pyre-fixme[8]: Attribute has type `SGD`; used as `None`. my_unit.optimizer_c = None # the attribute should be removed from tracked_optimizers @@ -76,6 +79,7 @@ def test_tracked_lr_schedulers(self) -> None: ) # delete the attribute + # pyre-fixme[8]: Attribute has type `StepLR`; used as `None`. my_unit.lr_scheduler_d = None # the attribute should be removed from tracked_lr_schedulers @@ -92,6 +96,7 @@ def test_miscellaneous_stateful(self) -> None: self.assertEqual(my_unit.app_state()["grad_scaler_e"], my_unit.grad_scaler_e) # delete the attribute + # pyre-fixme[8]: Attribute has type `GradScaler`; used as `None`. my_unit.grad_scaler_e = None # the attribute should be removed from tracked_misc_statefuls @@ -115,10 +120,15 @@ def test_app_state(self) -> None: self.assertTrue(key in my_unit.app_state()) # delete the attributes + # pyre-fixme[8]: Attribute has type `Linear`; used as `None`. my_unit.module_a = None + # pyre-fixme[8]: Attribute has type `CrossEntropyLoss`; used as `None`. my_unit.loss_fn_b = None + # pyre-fixme[8]: Attribute has type `SGD`; used as `None`. my_unit.optimizer_c = None + # pyre-fixme[8]: Attribute has type `StepLR`; used as `None`. my_unit.lr_scheduler_d = None + # pyre-fixme[8]: Attribute has type `GradScaler`; used as `None`. my_unit.grad_scaler_e = None # the attributes should no longer be in app_state @@ -148,23 +158,27 @@ def test_reassigning_attributes(self) -> None: # reassigning module_a to be an optimizer should work self.assertTrue("module_a" in my_unit.tracked_modules()) + # pyre-fixme[8]: Attribute has type `Linear`; used as `SGD`. my_unit.module_a = optimizer_g self.assertTrue("module_a" not in my_unit.tracked_modules()) self.assertTrue("module_a" in my_unit.tracked_optimizers()) # reassigning optimizer_c to be an lr_scheduler should work self.assertTrue("optimizer_c" in my_unit.tracked_optimizers()) + # pyre-fixme[8]: Attribute has type `SGD`; used as `StepLR`. my_unit.optimizer_c = lr_scheduler_h self.assertTrue("optimizer_c" not in my_unit.tracked_optimizers()) self.assertTrue("optimizer_c" in my_unit.tracked_lr_schedulers()) # reassigning lr_scheduler_d to be a nn.module should work self.assertTrue("lr_scheduler_d" in my_unit.tracked_lr_schedulers()) + # pyre-fixme[8]: Attribute has type `StepLR`; used as `CrossEntropyLoss`. my_unit.lr_scheduler_d = loss_fn_f self.assertTrue("lr_scheduler_d" not in my_unit.tracked_lr_schedulers()) self.assertTrue("lr_scheduler_d" in my_unit.tracked_modules()) self.assertTrue("grad_scaler_e" in my_unit.tracked_misc_statefuls()) + # pyre-fixme[8]: Attribute has type `GradScaler`; used as `CrossEntropyLoss`. my_unit.grad_scaler_e = loss_fn_f self.assertTrue("grad_scaler_e" not in my_unit.tracked_misc_statefuls()) self.assertTrue("grad_scaler_e" in my_unit.tracked_modules()) diff --git a/tests/framework/test_auto_unit.py b/tests/framework/test_auto_unit.py index 4ac2e93ba4..59d33929e1 100644 --- a/tests/framework/test_auto_unit.py +++ b/tests/framework/test_auto_unit.py @@ -48,6 +48,7 @@ class TestAutoUnit(unittest.TestCase): + # pyre-fixme[4]: Attribute must be annotated. cuda_available = torch.cuda.is_available() def test_app_state_mixin(self) -> None: @@ -119,6 +120,8 @@ def test_lr_scheduler_epoch(self) -> None: condition=cuda_available, reason="This test needs a GPU host to run." ) @patch("torch.autocast") + # pyre-fixme[30]: Terminating analysis - type + # `torchtnt.tests.framework.test_auto_unit.Batch` not defined. def test_mixed_precision_fp16(self, mock_autocast) -> None: """ Test that the mixed precision autocast context is called when fp16 precision is set @@ -139,6 +142,8 @@ def test_mixed_precision_fp16(self, mock_autocast) -> None: condition=cuda_available, reason="This test needs a GPU host to run." ) @patch("torch.autocast") + # pyre-fixme[30]: Terminating analysis - type + # `torchtnt.tests.framework.test_auto_unit.Batch` not defined. def test_mixed_precision_bf16(self, mock_autocast) -> None: """ Test that the mixed precision autocast context is called when bf16 precision is set @@ -405,6 +410,7 @@ def forward(self, x): condition=cuda_available, reason="This test needs a GPU host to run." ) @patch("torch.autocast") + # pyre-fixme[2]: Parameter must be annotated. def test_eval_mixed_precision_bf16(self, mock_autocast) -> None: """ Test that the mixed precision autocast context is called during evaluate when precision = bf16 @@ -425,6 +431,8 @@ def test_eval_mixed_precision_bf16(self, mock_autocast) -> None: device_type="cuda", dtype=torch.bfloat16, enabled=True ) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -440,6 +448,8 @@ def test_no_sync(self) -> None: launcher.elastic_launch(config, entrypoint=self._test_fsdp_no_sync)() @staticmethod + # pyre-fixme[30]: Terminating analysis - type + # `torchtnt.tests.framework.test_auto_unit.Batch` not defined. def _test_ddp_no_sync() -> None: """ Test that the no_sync autocast context is correctly applied when using gradient accumulation and DDP @@ -470,6 +480,8 @@ def _test_ddp_no_sync() -> None: no_sync_mock.assert_not_called() @staticmethod + # pyre-fixme[30]: Terminating analysis - type + # `torchtnt.tests.framework.test_auto_unit.Batch` not defined. def _test_fsdp_no_sync() -> None: """ Test that the no_sync autocast context is correctly applied when using gradient accumulation and FSDP @@ -500,6 +512,8 @@ def _test_fsdp_no_sync() -> None: auto_unit.train_step(state=state, data=dummy_iterator) no_sync_mock.assert_not_called() + # pyre-fixme[30]: Terminating analysis - type + # `torchtnt.tests.framework.test_auto_unit.Batch` not defined. def test_move_data_to_device(self) -> None: """ Test that move_data_to_device is called @@ -558,6 +572,8 @@ def test_configure_optimizers_and_lr_scheduler_called_once(self) -> None: ) self.assertEqual(configure_optimizers_and_lr_scheduler_mock.call_count, 1) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -675,6 +691,7 @@ def custom_noop_hook( ) -> torch.futures.Future[torch.Tensor]: nonlocal custom_noop_hook_called + # pyre-fixme[29]: `Type[torch.futures.Future]` is not a function. fut: torch.futures.Future[torch.Tensor] = torch.futures.Future() fut.set_result(bucket.buffer()) custom_noop_hook_called = True @@ -721,6 +738,8 @@ def test_strategy_invalid_str(self) -> None: strategy="foo", ) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -818,6 +837,7 @@ def test_is_last_batch(self) -> None: my_unit = LastBatchAutoUnit( module=my_module, + # pyre-fixme[6]: For 2nd argument expected `int` but got `float`. expected_steps_per_epoch=expected_steps_per_epoch, ) @@ -871,6 +891,7 @@ def test_auto_unit_timing_eval(self) -> None: condition=cuda_available, reason="This test needs a GPU host to run." ) @patch("torch.autocast") + # pyre-fixme[2]: Parameter must be annotated. def test_predict_mixed_precision_fp16(self, mock_autocast) -> None: """ Test that the mixed precision autocast context is called during predict when precision = fp16 @@ -898,6 +919,7 @@ def test_predict_mixed_precision_fp16(self, mock_autocast) -> None: condition=cuda_available, reason="This test needs a GPU host to run." ) @patch("torch.compile") + # pyre-fixme[2]: Parameter must be annotated. def test_compile_predict(self, mock_dynamo) -> None: """ e2e torch compile on predict @@ -939,6 +961,7 @@ def test_auto_predict_unit_timing_predict(self) -> None: ) @patch("torch.autograd.set_detect_anomaly") + # pyre-fixme[2]: Parameter must be annotated. def test_predict_detect_anomaly(self, mock_detect_anomaly) -> None: my_module = torch.nn.Linear(2, 2) auto_unit = AutoPredictUnit(module=my_module, detect_anomaly=True) @@ -954,14 +977,19 @@ def test_predict_detect_anomaly(self, mock_detect_anomaly) -> None: mock_detect_anomaly.assert_called() +# pyre-fixme[5]: Global expression must be annotated. Batch = Tuple[torch.tensor, torch.tensor] +# pyre-fixme[11]: Annotation `Batch` is not defined as a type. class DummyAutoUnit(AutoUnit[Batch]): + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._compile_used = False + # pyre-fixme[3]: Return annotation cannot contain `Any`. def compute_loss(self, state: State, data: Batch) -> Tuple[torch.Tensor, Any]: if COMPILE_AVAIL: self._compile_used = torch._dynamo.is_compiling() @@ -982,9 +1010,12 @@ def configure_optimizers_and_lr_scheduler( class DummyLRSchedulerAutoUnit(AutoUnit[Batch]): + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + # pyre-fixme[3]: Return annotation cannot contain `Any`. def compute_loss(self, state: State, data: Batch) -> Tuple[torch.Tensor, Any]: inputs, targets = data outputs = self.module(inputs) @@ -1001,10 +1032,13 @@ def configure_optimizers_and_lr_scheduler( class DummyComplexAutoUnit(AutoUnit[Batch]): + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. def __init__(self, lr: float, *args, **kwargs): super().__init__(*args, **kwargs) self.lr = lr + # pyre-fixme[3]: Return annotation cannot contain `Any`. def compute_loss(self, state: State, data: Batch) -> Tuple[torch.Tensor, Any]: inputs, targets = data outputs = self.module(inputs) @@ -1071,11 +1105,18 @@ def configure_optimizers_and_lr_scheduler( return my_optimizer, my_lr_scheduler def on_train_step_end( - self, state: State, data: Batch, step: int, loss: torch.Tensor, outputs: Any + self, + state: State, + data: Batch, + step: int, + loss: torch.Tensor, + # pyre-fixme[2]: Parameter annotation cannot be `Any`. + outputs: Any, ) -> None: assert state.train_state if self.train_progress.num_steps_completed_in_epoch == 1: tc = unittest.TestCase() + # pyre-fixme[16]: Optional type has no attribute `recorded_durations`. recorded_timer_keys = state.timer.recorded_durations.keys() for k in ( "TimingAutoUnit.on_train_start", @@ -1096,10 +1137,17 @@ def on_train_step_end( tc.assertNotIn("TimingAutoUnit.train_step", recorded_timer_keys) def on_eval_step_end( - self, state: State, data: Batch, step: int, loss: torch.Tensor, outputs: Any + self, + state: State, + data: Batch, + step: int, + loss: torch.Tensor, + # pyre-fixme[2]: Parameter annotation cannot be `Any`. + outputs: Any, ) -> None: if self.eval_progress.num_steps_completed_in_epoch == 1: tc = unittest.TestCase() + # pyre-fixme[16]: Optional type has no attribute `recorded_durations`. recorded_timer_keys = state.timer.recorded_durations.keys() for k in ( "TimingAutoUnit.on_eval_start", @@ -1138,10 +1186,16 @@ def configure_optimizers_and_lr_scheduler( return my_optimizer, my_lr_scheduler def on_predict_step_end( - self, state: State, data: TPredictData, step: int, outputs: Any + self, + state: State, + data: TPredictData, + step: int, + # pyre-fixme[2]: Parameter annotation cannot be `Any`. + outputs: Any, ) -> None: if self.predict_progress.num_steps_completed_in_epoch == 1: tc = unittest.TestCase() + # pyre-fixme[16]: Optional type has no attribute `recorded_durations`. recorded_timer_keys = state.timer.recorded_durations.keys() for k in ( "AutoPredictUnit.on_predict_start", diff --git a/tests/framework/test_callback_handler.py b/tests/framework/test_callback_handler.py index f67e54f435..776aa2d763 100644 --- a/tests/framework/test_callback_handler.py +++ b/tests/framework/test_callback_handler.py @@ -24,6 +24,7 @@ class DummyCallback(Callback): def __init__(self) -> None: self.called_hooks: Set[str] = set() + # pyre-fixme[3]: Return type must be annotated. def on_exception( self, state: State, @@ -194,6 +195,8 @@ def test_get_implemented_callback_mapping(self) -> None: "on_predict_end", ) + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. def dummy_fn(x, y): print("foo") diff --git a/tests/framework/test_train.py b/tests/framework/test_train.py index 26bbc274f1..7882c172ef 100644 --- a/tests/framework/test_train.py +++ b/tests/framework/test_train.py @@ -253,6 +253,7 @@ def train_step(self, state: State, data: Batch) -> torch.Tensor: state.stop() self.steps_processed += 1 + # pyre-fixme[7]: Expected `Tensor` but got `Tuple[typing.Any, typing.Any]`. return loss, outputs diff --git a/tests/framework/test_utils.py b/tests/framework/test_utils.py index 028e2dc617..29c71c5a1b 100644 --- a/tests/framework/test_utils.py +++ b/tests/framework/test_utils.py @@ -42,6 +42,7 @@ class UtilsTest(unittest.TestCase): + # pyre-fixme[4]: Attribute must be annotated. cuda_available = torch.cuda.is_available() def test_maybe_set_distributed_sampler_epoch(self) -> None: @@ -58,6 +59,8 @@ def _test_maybe_set_distributed_sampler_epoch() -> bool: Test _maybe_set_distributed_sampler_epoch util function """ dist.init_process_group("gloo") + # pyre-fixme[6]: For 1st argument expected `Iterable[typing.Any]` but got + # `None`. _maybe_set_distributed_sampler_epoch(None, 10) random_dataset = generate_random_dataset(10, 3) @@ -68,6 +71,8 @@ def _test_maybe_set_distributed_sampler_epoch() -> bool: _maybe_set_distributed_sampler_epoch( dummy_dataloader_with_distributed_sampler, 20 ) + # pyre-fixme[16]: Item `Sampler` of `Union[Sampler[typing.Any], + # Iterable[typing.Any]]` has no attribute `epoch`. return dummy_dataloader_with_distributed_sampler.sampler.epoch == 20 def test_set_module_training_mode(self) -> None: @@ -80,6 +85,8 @@ def test_set_module_training_mode(self) -> None: tracked_modules = {"module": module, "loss_fn": loss_fn} # set module training mode to False + # pyre-fixme[6]: For 1st argument expected `Dict[str, Module]` but got + # `Dict[str, Union[Linear, CrossEntropyLoss]]`. prior_module_train_states = _set_module_training_mode(tracked_modules, False) self.assertFalse(module.training) @@ -89,6 +96,8 @@ def test_set_module_training_mode(self) -> None: self.assertTrue(prior_module_train_states["loss_fn"]) # set back to True + # pyre-fixme[6]: For 1st argument expected `Dict[str, Module]` but got + # `Dict[str, Union[Linear, CrossEntropyLoss]]`. prior_module_train_states = _set_module_training_mode(tracked_modules, True) self.assertTrue(module.training) @@ -107,12 +116,16 @@ def test_reset_module_training_mode(self) -> None: tracked_modules = {"module": module, "loss_fn": loss_fn} # set module training mode to False + # pyre-fixme[6]: For 1st argument expected `Dict[str, Module]` but got + # `Dict[str, Union[Linear, CrossEntropyLoss]]`. prior_module_train_states = _set_module_training_mode(tracked_modules, False) self.assertFalse(module.training) self.assertFalse(loss_fn.training) # set back to True using reset + # pyre-fixme[6]: For 1st argument expected `Dict[str, Module]` but got + # `Dict[str, Union[Linear, CrossEntropyLoss]]`. _reset_module_training_mode(tracked_modules, prior_module_train_states) self.assertTrue(module.training) @@ -131,7 +144,12 @@ def dummy(a: int, b: str, data: Iterator[str]) -> None: foo = Foo() + # pyre-fixme[6]: For 1st argument expected `(State, object) -> object` but + # got `BoundMethod[typing.Callable(Foo.bar)[[Named(self, Foo)], None], Foo]`. self.assertFalse(_step_requires_iterator(foo.bar)) + # pyre-fixme[6]: For 1st argument expected `(State, object) -> object` but + # got `BoundMethod[typing.Callable(Foo.baz)[[Named(self, Foo), Named(data, + # Iterator[int]), Named(b, int), Named(c, str)], int], Foo]`. self.assertTrue(_step_requires_iterator(foo.baz)) self.assertTrue(_step_requires_iterator(dummy)) @@ -170,6 +188,7 @@ def test_is_epoch_done(self) -> None: self.assertFalse(_is_epoch_done(p, max_steps_per_epoch=None, max_steps=None)) @patch("torchtnt.framework.utils.record_function") + # pyre-fixme[2]: Parameter must be annotated. def test_get_timing_context(self, mock_record_function) -> None: state = MagicMock() state.timer = None @@ -193,13 +212,19 @@ def test_find_optimizers_for_module(self) -> None: optim2 = torch.optim.Adagrad(module2.parameters()) opts = {"optim1": optim1, "optim2": optim2} + # pyre-fixme[6]: For 2nd argument expected `Dict[str, Optimizer]` but got + # `Dict[str, Union[Adagrad, Adam]]`. optimizers = _find_optimizers_for_module(module1, opts) optim_name, _ = optimizers[0] self.assertEqual(optim_name, "optim1") + # pyre-fixme[6]: For 2nd argument expected `Dict[str, Optimizer]` but got + # `Dict[str, Union[Adagrad, Adam]]`. optimizers = _find_optimizers_for_module(module2, opts) optim_name, _ = optimizers[0] self.assertEqual(optim_name, "optim2") + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -221,15 +246,21 @@ def _find_optimizers_for_FSDP_module() -> None: optim2 = torch.optim.Adagrad(module2.parameters()) opts = {"optim1": optim1, "optim2": optim2} + # pyre-fixme[6]: For 2nd argument expected `Dict[str, Optimizer]` but got + # `Dict[str, Union[Adagrad, Adam]]`. optim_list = _find_optimizers_for_module(module1, opts) optim_name, _ = optim_list[0] tc = unittest.TestCase() tc.assertEqual(optim_name, "optim1") + # pyre-fixme[6]: For 2nd argument expected `Dict[str, Optimizer]` but got + # `Dict[str, Union[Adagrad, Adam]]`. optim_list = _find_optimizers_for_module(module2, opts) optim_name, _ = optim_list[0] tc.assertEqual(optim_name, "optim2") + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -254,16 +285,22 @@ def _construct_optimizers() -> None: tc.assertTrue(isinstance(result["lr_scheduler"], TLRScheduler)) +# pyre-fixme[5]: Global expression must be annotated. Batch = Tuple[torch.tensor, torch.tensor] +# pyre-fixme[11]: Annotation `Batch` is not defined as a type. class DummyAutoUnit(AutoUnit[Batch]): + # pyre-fixme[3]: Return type must be annotated. + # pyre-fixme[2]: Parameter must be annotated. def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + # pyre-fixme[4]: Attribute must be annotated. self.module2 = torch.nn.Linear(10, 10).to(self.device) self.optim = torch.optim.SGD(self.module.parameters(), lr=0.01) self.optim2 = torch.optim.Adam(self.module2.parameters()) + # pyre-fixme[3]: Return annotation cannot contain `Any`. def compute_loss(self, state: State, data: Batch) -> Tuple[torch.Tensor, Any]: inputs, targets = data outputs = self.module(inputs) diff --git a/tests/utils/data/test_data_prefetcher.py b/tests/utils/data/test_data_prefetcher.py index 0dfffee2bd..fb017ae960 100644 --- a/tests/utils/data/test_data_prefetcher.py +++ b/tests/utils/data/test_data_prefetcher.py @@ -17,6 +17,7 @@ class DataTest(unittest.TestCase): + # pyre-fixme[4]: Attribute must be annotated. cuda_available = torch.cuda.is_available() def _generate_dataset(self, num_samples: int, input_dim: int) -> Dataset[Batch]: diff --git a/tests/utils/data/test_multi_dataloader.py b/tests/utils/data/test_multi_dataloader.py index a3ad13da95..7ad6b57502 100644 --- a/tests/utils/data/test_multi_dataloader.py +++ b/tests/utils/data/test_multi_dataloader.py @@ -83,6 +83,9 @@ def test_round_robin_smallest_dataset_exhausted(self) -> None: ) multi_dataloader = iter( MultiDataLoader( + # pyre-fixme[6]: For 1st argument expected `Dict[str, + # Union[DataLoader[typing.Any], Iterable[typing.Any]]]` but got + # `Dict[str, DataLoader[Tensor]]`. individual_dataloaders, round_robin, ) @@ -111,6 +114,9 @@ def test_round_robin_all_datasets_exhausted(self) -> None: round_robin = RoundRobin(iteration_order=["2", "1"]) multi_dataloader = iter( MultiDataLoader( + # pyre-fixme[6]: For 1st argument expected `Dict[str, + # Union[DataLoader[typing.Any], Iterable[typing.Any]]]` but got + # `Dict[str, DataLoader[Tensor]]`. individual_dataloaders, round_robin, ) @@ -139,6 +145,9 @@ def test_all_dataset_batches_all_datasets_exhausted(self) -> None: multi_dataloader = iter( MultiDataLoader( + # pyre-fixme[6]: For 1st argument expected `Dict[str, + # Union[DataLoader[typing.Any], Iterable[typing.Any]]]` but got + # `Dict[str, DataLoader[Tensor]]`. individual_dataloaders, all_dataset_batches, ) @@ -170,6 +179,9 @@ def test_all_dataset_batches_restart(self) -> None: multi_dataloader = iter( MultiDataLoader( + # pyre-fixme[6]: For 1st argument expected `Dict[str, + # Union[DataLoader[typing.Any], Iterable[typing.Any]]]` but got + # `Dict[str, DataLoader[Tensor]]`. individual_dataloaders, all_dataset_batches, ) @@ -200,6 +212,9 @@ def test_all_dataset_batches_smallest_dataset_exhausted(self) -> None: ) multi_dataloader = iter( MultiDataLoader( + # pyre-fixme[6]: For 1st argument expected `Dict[str, + # Union[DataLoader[typing.Any], Iterable[typing.Any]]]` but got + # `Dict[str, DataLoader[Tensor]]`. individual_dataloaders, all_dataset_batches, ) @@ -222,7 +237,12 @@ def test_custom_iterator(self) -> None: multi_dataloader = iter( MultiDataLoader( - individual_dataloaders, DataIterationStrategy(), CustomRandomIterator + # pyre-fixme[6]: For 1st argument expected `Dict[str, + # Union[DataLoader[typing.Any], Iterable[typing.Any]]]` but got + # `Dict[str, DataLoader[Tensor]]`. + individual_dataloaders, + DataIterationStrategy(), + CustomRandomIterator, ) ) @@ -243,6 +263,9 @@ def test_random_sampling_dataloader_wrap_around(self) -> None: multi_dataloader = iter( MultiDataLoader( + # pyre-fixme[6]: For 1st argument expected `Dict[str, + # Union[DataLoader[typing.Any], Iterable[typing.Any]]]` but got + # `Dict[str, DataLoader[Tensor]]`. individual_dataloaders, RandomizedBatchSampler( weights={"1": 1, "2": 100}, @@ -265,6 +288,9 @@ def test_random_sampling_dataloader_with_empty_data(self) -> None: multi_dataloader = iter( MultiDataLoader( + # pyre-fixme[6]: For 1st argument expected `Dict[str, + # Union[DataLoader[typing.Any], Iterable[typing.Any]]]` but got + # `Dict[str, DataLoader[Tensor]]`. individual_dataloaders, RandomizedBatchSampler( weights={"1": 1, "2": 100}, @@ -285,6 +311,9 @@ def test_random_sampling_dataloader_with_empty_data(self) -> None: with self.assertRaises(ValueError): individual_dataloaders = {"1": dataloader_1, "2": dataloader_2} MultiDataLoader( + # pyre-fixme[6]: For 1st argument expected `Dict[str, + # Union[DataLoader[typing.Any], Iterable[typing.Any]]]` but got + # `Dict[str, DataLoader[Tensor]]`. individual_dataloaders, RandomizedBatchSampler(weights={"1": 1, "2": 100}), ) @@ -296,6 +325,9 @@ def test_random_sampling_dataloader_smallest_dataset_exhausted(self) -> None: individual_dataloaders = {"1": dataloader_1, "2": dataloader_2} multi_dataloader = MultiDataLoader( + # pyre-fixme[6]: For 1st argument expected `Dict[str, + # Union[DataLoader[typing.Any], Iterable[typing.Any]]]` but got `Dict[str, + # DataLoader[Tensor]]`. individual_dataloaders, RandomizedBatchSampler( weights={"1": 1, "2": 100}, @@ -322,6 +354,9 @@ def test_random_sampling_dataloader_all_datasets_exhausted(self) -> None: individual_dataloaders = {"1": dataloader_1, "2": dataloader_2} multi_dataloader = MultiDataLoader( + # pyre-fixme[6]: For 1st argument expected `Dict[str, + # Union[DataLoader[typing.Any], Iterable[typing.Any]]]` but got `Dict[str, + # DataLoader[Tensor]]`. individual_dataloaders, RandomizedBatchSampler( weights={"1": 1, "2": 100}, @@ -348,6 +383,9 @@ def test_random_sampling_dataloader_restart_until_all_datasets_exhausted( multi_dataloader = iter( MultiDataLoader( + # pyre-fixme[6]: For 1st argument expected `Dict[str, + # Union[DataLoader[typing.Any], Iterable[typing.Any]]]` but got + # `Dict[str, DataLoader[Tensor]]`. individual_dataloaders, RandomizedBatchSampler( weights={"1": 100, "2": 1}, @@ -374,6 +412,9 @@ def test_inorder(self) -> None: multi_dataloader = iter( MultiDataLoader( + # pyre-fixme[6]: For 1st argument expected `Dict[str, + # Union[DataLoader[typing.Any], Iterable[typing.Any]]]` but got + # `Dict[str, DataLoader[Tensor]]`. individual_dataloaders, in_order, ) @@ -402,6 +443,9 @@ def test_in_order_with_repetitions(self) -> None: multi_dataloader = iter( MultiDataLoader( + # pyre-fixme[6]: For 1st argument expected `Dict[str, + # Union[DataLoader[typing.Any], Iterable[typing.Any]]]` but got + # `Dict[str, DataLoader[Tensor]]`. individual_dataloaders, in_order, ) @@ -470,14 +514,22 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None: for _ in multi_dataloader: pass + # pyre-fixme[16]: Item `DataLoader` of `Union[DataLoader[typing.Any], + # Iterable[typing.Any]]` has no attribute `iter_count`. self.assertEqual(multi_dataloader.individual_dataloaders["foo"].iter_count, 1) + # pyre-fixme[16]: Item `DataLoader` of `Union[DataLoader[typing.Any], + # Iterable[typing.Any]]` has no attribute `iter_count`. self.assertEqual(multi_dataloader.individual_dataloaders["bar"].iter_count, 1) new_state_dict = multi_dataloader.state_dict() # Load state dict to reset to initial state multi_dataloader.load_state_dict(original_state_dict) + # pyre-fixme[16]: Item `DataLoader` of `Union[DataLoader[typing.Any], + # Iterable[typing.Any]]` has no attribute `iter_count`. self.assertEqual(multi_dataloader.individual_dataloaders["foo"].iter_count, 0) + # pyre-fixme[16]: Item `DataLoader` of `Union[DataLoader[typing.Any], + # Iterable[typing.Any]]` has no attribute `iter_count`. self.assertEqual(multi_dataloader.individual_dataloaders["bar"].iter_count, 0) # instantiate a new multi-dataloader with a new different name @@ -492,9 +544,15 @@ def load_state_dict(self, state_dict: Dict[str, Any]) -> None: new_multi_dataloader.load_state_dict(new_state_dict) # foo's count should be loaded correctly self.assertEqual( - new_multi_dataloader.individual_dataloaders["foo"].iter_count, 1 + # pyre-fixme[16]: Item `DataLoader` of `Union[DataLoader[typing.Any], + # Iterable[typing.Any]]` has no attribute `iter_count`. + new_multi_dataloader.individual_dataloaders["foo"].iter_count, + 1, ) # qux's iter_count should still be 0 because it was not in the original state dict self.assertEqual( - new_multi_dataloader.individual_dataloaders["qux"].iter_count, 0 + # pyre-fixme[16]: Item `DataLoader` of `Union[DataLoader[typing.Any], + # Iterable[typing.Any]]` has no attribute `iter_count`. + new_multi_dataloader.individual_dataloaders["qux"].iter_count, + 0, ) diff --git a/tests/utils/data/test_profile_dataloader.py b/tests/utils/data/test_profile_dataloader.py index 7499ccf64e..16d0542053 100644 --- a/tests/utils/data/test_profile_dataloader.py +++ b/tests/utils/data/test_profile_dataloader.py @@ -46,6 +46,8 @@ def test_profile_dataloader_profiler(self) -> None: timer = profile_dataloader(iterable, p) self.assertEqual(len(timer.recorded_durations["next(iter)"]), max_length) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.cuda.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.cuda.is_available(), reason="This test needs a GPU host to run." ) diff --git a/tests/utils/loggers/test_csv.py b/tests/utils/loggers/test_csv.py index e61174a2a6..b416a4eb3c 100644 --- a/tests/utils/loggers/test_csv.py +++ b/tests/utils/loggers/test_csv.py @@ -26,5 +26,6 @@ def test_csv_log(self) -> None: with open(csv_path) as f: output = list(csv.DictReader(f)) + # pyre-fixme[16]: `_DictReadMapping` has no attribute `__getitem__`. self.assertEqual(float(output[0][log_name]), log_value) self.assertEqual(int(output[0]["step"]), log_step) diff --git a/tests/utils/loggers/test_in_memory.py b/tests/utils/loggers/test_in_memory.py index 879a29f9c0..0aa61e0b01 100644 --- a/tests/utils/loggers/test_in_memory.py +++ b/tests/utils/loggers/test_in_memory.py @@ -21,6 +21,7 @@ def test_in_memory_log(self) -> None: logger.log(name="metric1", data=456.0, step=1) logger.log(name="metric1", data=789.0, step=2) # Test flushing. + # pyre-fixme[16]: `None` has no attribute `__enter__`. with captured_output() as (out, err): logger.flush() self.assertTrue(out.getvalue().startswith("OrderedDict([")) @@ -54,6 +55,8 @@ def captured_output() -> None: old_out, old_err = sys.stdout, sys.stderr try: sys.stdout, sys.stderr = new_out, new_err + # pyre-fixme[7]: Expected `None` but got `Generator[Tuple[TextIO, TextIO], + # typing.Any, typing.Any]`. yield sys.stdout, sys.stderr finally: sys.stdout, sys.stderr = old_out, old_err diff --git a/tests/utils/loggers/test_tensorboard.py b/tests/utils/loggers/test_tensorboard.py index e6757c2b50..e79a001dc0 100644 --- a/tests/utils/loggers/test_tensorboard.py +++ b/tests/utils/loggers/test_tensorboard.py @@ -85,6 +85,8 @@ def _test_distributed() -> None: assert test_path in logger.path assert invalid_path not in logger.path + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( dist.is_available(), reason="Torch distributed is needed to run" ) diff --git a/tests/utils/test_device.py b/tests/utils/test_device.py index 0084aaf1a1..ee5ccf71f2 100644 --- a/tests/utils/test_device.py +++ b/tests/utils/test_device.py @@ -25,8 +25,12 @@ class DeviceTest(unittest.TestCase): + # pyre-fixme[4]: Attribute must be annotated. cuda_available = torch.cuda.is_available() + # pyre-fixme[56]: Pyre was not able to infer the type of argument `not + # torchtnt.tests.utils.test_device.DeviceTest.cuda_available` to decorator factory + # `unittest.skipUnless`. @unittest.skipUnless( condition=(not cuda_available), reason="This test shouldn't run on a GPU host." ) @@ -176,6 +180,7 @@ class FrozenDataClass: new_data_class = copy_data_to_device(original_data_class, cuda_0) self.assertEqual(new_data_class.val.device.type, "cuda") with self.assertRaises(dataclasses.FrozenInstanceError): + # pyre-fixme[41]: Cannot reassign final attribute `val`. new_data_class.val = torch.tensor([1, 2, 3], device=cuda_0) # no-init field diff --git a/tests/utils/test_distributed.py b/tests/utils/test_distributed.py index 3a46b7ccd4..2c0c36ace3 100644 --- a/tests/utils/test_distributed.py +++ b/tests/utils/test_distributed.py @@ -47,6 +47,8 @@ def test_get_process_group_backend_gpu(self) -> None: def test_get_world_size_single(self) -> None: self.assertEqual(get_world_size(), 1) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -64,6 +66,8 @@ def _test_get_world_size(world_size: int) -> None: dist.init_process_group("gloo") assert get_world_size() == dist.get_world_size() + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -82,6 +86,8 @@ def test_get_global_rank_single(self) -> None: def test_get_local_rank_single(self) -> None: self.assertEqual(get_local_rank(), 0) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -100,6 +106,8 @@ def _destroy_process_group() -> None: destroy_process_group() assert not torch.distributed.is_initialized() + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -109,6 +117,8 @@ def test_destroy_process_group(self) -> None: config = get_pet_launch_config(2) launcher.elastic_launch(config, entrypoint=self._destroy_process_group)() + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -131,6 +141,8 @@ def _test_ddp_gather_uneven_tensors() -> None: assert len(result[idx]) == idx assert (result[idx] == torch.ones_like(result[idx])).all() + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -153,6 +165,8 @@ def _test_ddp_gather_uneven_tensors_multidim() -> None: assert val.shape == (idx + 1, 4 - idx) assert (val == torch.ones_like(val)).all() + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.cuda.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( condition=torch.cuda.is_available(), reason="This test should only run on a GPU host.", @@ -185,6 +199,7 @@ def foo() -> int: assert x == 1 @patch("torchtnt.utils.distributed.get_global_rank") + # pyre-fixme[2]: Parameter must be annotated. def test_rank_zero_fn_rank_non_zero(self, get_global_rank) -> None: get_global_rank.return_value = 1 @@ -197,6 +212,7 @@ def foo() -> int: def test_revert_sync_batchnorm(self) -> None: original_batchnorm = torch.nn.modules.batchnorm.BatchNorm1d(4) + # pyre-fixme[16]: `Optional` has no attribute `random_`. original_batchnorm.running_mean.random_(-1, 1) original_batchnorm.running_var.random_(0, 1) model = torch.nn.Sequential( @@ -211,9 +227,13 @@ def test_revert_sync_batchnorm(self) -> None: self.assertIsInstance(batch_norm, torch.nn.modules.batchnorm._BatchNorm) self.assertNotIsInstance(batch_norm, torch.nn.SyncBatchNorm) self.assertTrue( + # pyre-fixme[6]: For 2nd argument expected `Tensor` but got + # `Optional[Tensor]`. torch.equal(batch_norm.running_mean, original_batchnorm.running_mean) ) self.assertTrue( + # pyre-fixme[6]: For 2nd argument expected `Tensor` but got + # `Optional[Tensor]`. torch.equal(batch_norm.running_var, original_batchnorm.running_var) ) @@ -224,6 +244,10 @@ def _full_sync_worker(cls, coherence_mode: Optional[str]) -> bool: val = True else: val = False + # pyre-fixme[6]: For 2nd argument expected + # `Union[typing_extensions.Literal['all'], typing_extensions.Literal['any'], + # typing_extensions.Literal['rank_zero'], float, int]` but got + # `Optional[str]`. return sync_bool(val, coherence_mode=coherence_mode) def test_sync_bool_single_process(self) -> None: @@ -232,6 +256,8 @@ def test_sync_bool_single_process(self) -> None: # these should be the same in a single process case self.assertEqual(val, new_val) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -244,6 +270,8 @@ def test_sync_bool_multi_process_coherence_mode_rank_zero(self) -> None: self.assertTrue(result[0]) self.assertTrue(result[1]) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -256,6 +284,8 @@ def test_sync_bool_multi_process_coherence_mode_any(self) -> None: self.assertTrue(result[0]) self.assertTrue(result[1]) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -268,6 +298,8 @@ def test_sync_bool_multi_process_coherence_mode_all(self) -> None: self.assertFalse(result[0]) self.assertFalse(result[1]) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -278,6 +310,8 @@ def test_sync_bool_multi_process_coherence_mode_int_false(self) -> None: self.assertFalse(result[0]) self.assertFalse(result[1]) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -288,6 +322,8 @@ def test_sync_bool_multi_process_coherence_mode_int_true(self) -> None: self.assertTrue(result[0]) self.assertTrue(result[1]) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -298,6 +334,8 @@ def test_sync_bool_multi_process_coherence_mode_float_true(self) -> None: self.assertTrue(result[0]) self.assertTrue(result[1]) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) diff --git a/tests/utils/test_early_stop_checker.py b/tests/utils/test_early_stop_checker.py index a231f39df0..3d47eb8cb6 100644 --- a/tests/utils/test_early_stop_checker.py +++ b/tests/utils/test_early_stop_checker.py @@ -13,6 +13,7 @@ class EarlyStopCheckerTest(unittest.TestCase): + # pyre-fixme[4]: Attribute must be annotated. cuda_available = torch.cuda.is_available() def test_early_stop_patience(self) -> None: @@ -366,6 +367,10 @@ def test_early_stop_invalid_mode(self) -> None: # Check for invalid mode with self.assertRaisesRegex(ValueError, "Got `invalid`"): + # pyre-fixme[6]: For 1st argument expected + # `Union[typing_extensions.Literal['max'], + # typing_extensions.Literal['min']]` but got + # `typing_extensions.Literal['invalid']`. EarlyStopChecker("invalid", 3) def test_early_stop_invalid_min_delta(self) -> None: diff --git a/tests/utils/test_env.py b/tests/utils/test_env.py index 144a4b9403..4e66d06640 100644 --- a/tests/utils/test_env.py +++ b/tests/utils/test_env.py @@ -48,6 +48,7 @@ def _test_worker_fn(init_pg_explicit: bool) -> None: raise AssertionError( f"Expected different process group backend: received {pg_backend}, expected {expected_pg_backend}" ) + # pyre-fixme[7]: Expected `None` but got `device`. return device def _test_launch_worker( diff --git a/tests/utils/test_memory.py b/tests/utils/test_memory.py index e00149d3a2..ce73c0e252 100644 --- a/tests/utils/test_memory.py +++ b/tests/utils/test_memory.py @@ -131,17 +131,39 @@ def test_get_tensor_size_bytes_map_with_nested_input(self) -> None: self.assertEqual(len(tensor_map), 3) self.assertTrue(inputs["x"] in tensor_map) self.assertEqual( + # pyre-fixme[6]: For 1st argument expected `Tensor` but got + # `Union[Dict[str, Tensor], Tensor]`. tensor_map[inputs["x"]], + # pyre-fixme[16]: Item `Dict` of `Union[Dict[str, torch._tensor.Tensor], + # Tensor]` has no attribute `size`. + # pyre-fixme[16]: Item `Dict` of `Union[Dict[str, torch._tensor.Tensor], + # Tensor]` has no attribute `element_size`. inputs["x"].size().numel() * inputs["x"].element_size(), ) + # pyre-fixme[6]: For 1st argument expected `Union[None, List[typing.Any], + # int, slice, Tensor, typing.Tuple[typing.Any, ...]]` but got `str`. self.assertTrue(inputs["y"]["z"] in tensor_map) self.assertEqual( + # pyre-fixme[6]: For 1st argument expected `Union[None, + # List[typing.Any], int, slice, Tensor, typing.Tuple[typing.Any, ...]]` + # but got `str`. tensor_map[inputs["y"]["z"]], + # pyre-fixme[6]: For 1st argument expected `Union[None, + # List[typing.Any], int, slice, Tensor, typing.Tuple[typing.Any, ...]]` + # but got `str`. inputs["y"]["z"].size().numel() * inputs["y"]["z"].element_size(), ) + # pyre-fixme[6]: For 1st argument expected `Union[None, List[typing.Any], + # int, slice, Tensor, typing.Tuple[typing.Any, ...]]` but got `str`. self.assertTrue(inputs["y"]["t"] in tensor_map) self.assertEqual( + # pyre-fixme[6]: For 1st argument expected `Union[None, + # List[typing.Any], int, slice, Tensor, typing.Tuple[typing.Any, ...]]` + # but got `str`. tensor_map[inputs["y"]["t"]], + # pyre-fixme[6]: For 1st argument expected `Union[None, + # List[typing.Any], int, slice, Tensor, typing.Tuple[typing.Any, ...]]` + # but got `str`. inputs["y"]["t"].size().numel() * inputs["y"]["t"].element_size(), ) diff --git a/tests/utils/test_misc.py b/tests/utils/test_misc.py index 398e5cdbc2..a5f07236fd 100644 --- a/tests/utils/test_misc.py +++ b/tests/utils/test_misc.py @@ -58,12 +58,26 @@ def test_transfer_weights(self) -> None: def test_transfer_batch_norm_stats(self) -> None: module1 = torch.nn.BatchNorm2d(3) # change running mean and var + # pyre-fixme[8]: Attribute has type `Optional[Tensor]`; used as `int`. + # pyre-fixme[58]: `+` is not supported for operand types + # `Optional[torch._tensor.Tensor]` and `int`. module1.running_mean = module1.running_mean + 2 + # pyre-fixme[8]: Attribute has type `Optional[Tensor]`; used as `int`. + # pyre-fixme[58]: `+` is not supported for operand types + # `Optional[torch._tensor.Tensor]` and `int`. module1.running_var = module1.running_var + 4 module2 = torch.nn.BatchNorm2d(3) + # pyre-fixme[6]: For 1st argument expected `Tensor` but got `Optional[Tensor]`. + # pyre-fixme[6]: For 2nd argument expected `Tensor` but got `Optional[Tensor]`. self.assertFalse(torch.equal(module1.running_mean, module2.running_mean)) + # pyre-fixme[6]: For 1st argument expected `Tensor` but got `Optional[Tensor]`. + # pyre-fixme[6]: For 2nd argument expected `Tensor` but got `Optional[Tensor]`. self.assertFalse(torch.equal(module1.running_var, module2.running_var)) transfer_batch_norm_stats(module1, module2) + # pyre-fixme[6]: For 1st argument expected `Tensor` but got `Optional[Tensor]`. + # pyre-fixme[6]: For 2nd argument expected `Tensor` but got `Optional[Tensor]`. self.assertTrue(torch.equal(module1.running_mean, module2.running_mean)) + # pyre-fixme[6]: For 1st argument expected `Tensor` but got `Optional[Tensor]`. + # pyre-fixme[6]: For 2nd argument expected `Tensor` but got `Optional[Tensor]`. self.assertTrue(torch.equal(module1.running_var, module2.running_var)) diff --git a/tests/utils/test_oom.py b/tests/utils/test_oom.py index 4333e9eb4a..2c7357be42 100644 --- a/tests/utils/test_oom.py +++ b/tests/utils/test_oom.py @@ -22,6 +22,7 @@ class OomTest(unittest.TestCase): + # pyre-fixme[4]: Attribute must be annotated. cuda_available = torch.cuda.is_available() def test_is_out_of_cpu_memory(self) -> None: @@ -58,6 +59,9 @@ def test_is_out_of_memory_error(self) -> None: @unittest.skipUnless( condition=cuda_available, reason="This test needs a GPU host to run." ) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torchtnt.utils.version.is_torch_version_geq_2_0()` to decorator factory + # `unittest.skipUnless`. @unittest.skipUnless( condition=is_torch_version_geq_2_0(), reason="This test needs changes from PyTorch 2.0 to run.", diff --git a/tests/utils/test_prepare_module.py b/tests/utils/test_prepare_module.py index 76368223f2..2dff0f66e1 100644 --- a/tests/utils/test_prepare_module.py +++ b/tests/utils/test_prepare_module.py @@ -30,11 +30,14 @@ class PrepareModelTest(unittest.TestCase): + # pyre-fixme[4]: Attribute must be annotated. cuda_available = torch.cuda.is_available() @unittest.skipUnless( condition=(cuda_available), reason="This test should run on a GPU host." ) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -57,6 +60,8 @@ def _test_prepare_ddp() -> None: @unittest.skipUnless( condition=(cuda_available), reason="This test should run on a GPU host." ) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -72,6 +77,8 @@ def _test_prepare_fsdp() -> None: tc = unittest.TestCase() tc.assertTrue(isinstance(fsdp_module, FSDP)) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) @@ -115,6 +122,9 @@ def _test_is_fsdp_module() -> None: @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.cuda.is_available() and torch.cuda.device_count() > 2` to decorator + # factory `unittest.skipUnless`. @unittest.skipUnless( condition=torch.cuda.is_available() and torch.cuda.device_count() > 2, reason="This test needs 2 GPUs to run.", @@ -123,6 +133,8 @@ def test_is_fsdp_module(self) -> None: config = get_pet_launch_config(2) launcher.elastic_launch(config, entrypoint=self._test_is_fsdp_module)() + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( torch.distributed.is_available(), reason="Torch distributed is needed to run" ) diff --git a/tests/utils/test_timer.py b/tests/utils/test_timer.py index 8505671201..ef8090b2ba 100644 --- a/tests/utils/test_timer.py +++ b/tests/utils/test_timer.py @@ -78,6 +78,8 @@ def test_timer_context_manager(self) -> None: timer.recorded_durations["action_4"][0], intervals[2] ) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.cuda.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( condition=torch.cuda.is_available(), reason="This test needs a GPU host to run." ) @@ -214,6 +216,8 @@ def _get_synced_durations_histogram_multi_process() -> None: tc = unittest.TestCase() tc.assertEqual(durations, expected_durations) + # pyre-fixme[56]: Pyre was not able to infer the type of argument + # `torch.distributed.is_available()` to decorator factory `unittest.skipUnless`. @unittest.skipUnless( condition=dist.is_available(), reason="This test should only run if torch.distributed is available.", @@ -235,6 +239,8 @@ def _full_sync_worker_without_timeout(cls) -> bool: dist.init_process_group("gloo") process_group = dist.group.WORLD interval_threshold = timedelta(seconds=5) + # pyre-fixme[6]: For 2nd argument expected `ProcessGroup` but got + # `Optional[ProcessGroup]`. fsp_timer = FullSyncPeriodicTimer(interval_threshold, process_group) return fsp_timer.check() @@ -243,6 +249,8 @@ def _full_sync_worker_with_timeout(cls, timeout: int) -> bool: dist.init_process_group("gloo") process_group = dist.group.WORLD interval_threshold = timedelta(seconds=5) + # pyre-fixme[6]: For 2nd argument expected `ProcessGroup` but got + # `Optional[ProcessGroup]`. fsp_timer = FullSyncPeriodicTimer(interval_threshold, process_group) time.sleep(timeout) fsp_timer.check() # self._prev_work is assigned, next time the check is called, it will be executed