diff --git a/docs/nlp/customize_encoder.ipynb b/docs/nlp/customize_encoder.ipynb index 7e6fd0f32af..92baee21da4 100644 --- a/docs/nlp/customize_encoder.ipynb +++ b/docs/nlp/customize_encoder.ipynb @@ -497,7 +497,7 @@ "source": [ "#### Customize Feedforward Layer\n", "\n", - "Similiarly, one could also customize the feedforward layer.\n", + "Similarly, one could also customize the feedforward layer.\n", "\n", "See [the source of `nlp.layers.GatedFeedforward`](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/gated_feedforward.py) for how to implement a customized feedforward layer.\n", "\n", diff --git a/docs/vision/object_detection.ipynb b/docs/vision/object_detection.ipynb index f27c4b0d509..619bdaa1d71 100644 --- a/docs/vision/object_detection.ipynb +++ b/docs/vision/object_detection.ipynb @@ -218,7 +218,7 @@ "# Need to provide\n", " # 1. image_dir: where images are present\n", " # 2. object_annotations_file: where annotations are listed in json format\n", - " # 3. output_file_prefix: where to write output convered TFRecords files\n", + " # 3. output_file_prefix: where to write output converted TFRecords files\n", "python -m official.vision.data.create_coco_tf_record --logtostderr \\\n", " --image_dir=${TRAIN_DATA_DIR} \\\n", " --object_annotations_file=${TRAIN_ANNOTATION_FILE_DIR} \\\n", diff --git a/docs/vision/semantic_segmentation.ipynb b/docs/vision/semantic_segmentation.ipynb index 76e1230ad1e..ad32ec0edb3 100644 --- a/docs/vision/semantic_segmentation.ipynb +++ b/docs/vision/semantic_segmentation.ipynb @@ -420,7 +420,7 @@ "exp_config.task.train_data.dtype = 'float32'\n", "exp_config.task.train_data.output_size = [HEIGHT, WIDTH]\n", "exp_config.task.train_data.preserve_aspect_ratio = False\n", - "exp_config.task.train_data.seed = 21 # Reproducable Training Data\n", + "exp_config.task.train_data.seed = 21 # Reproducible Training Data\n", "\n", "# Validation Data Config\n", "exp_config.task.validation_data.input_path = val_data_tfrecords\n", @@ -429,7 +429,7 @@ "exp_config.task.validation_data.output_size = [HEIGHT, WIDTH]\n", "exp_config.task.validation_data.preserve_aspect_ratio = False\n", "exp_config.task.validation_data.groundtruth_padded_size = [HEIGHT, WIDTH]\n", - "exp_config.task.validation_data.seed = 21 # Reproducable Validation Data\n", + "exp_config.task.validation_data.seed = 21 # Reproducible Validation Data\n", "exp_config.task.validation_data.resize_eval_groundtruth = True # To enable validation loss" ] }, diff --git a/research/adversarial_text/graphs.py b/research/adversarial_text/graphs.py index 9610a698dd0..4d84f5260ac 100644 --- a/research/adversarial_text/graphs.py +++ b/research/adversarial_text/graphs.py @@ -45,7 +45,7 @@ flags.DEFINE_integer('batch_size', 64, 'Size of the batch.') flags.DEFINE_integer('num_timesteps', 100, 'Number of timesteps for BPTT') -# Model architechture +# Model architecture flags.DEFINE_bool('bidir_lstm', False, 'Whether to build a bidirectional LSTM.') flags.DEFINE_bool('single_label', True, 'Whether the sequence has a single ' 'label, for optimization.') diff --git a/research/audioset/vggish/README.md b/research/audioset/vggish/README.md index 46994e69ab1..188ed136606 100644 --- a/research/audioset/vggish/README.md +++ b/research/audioset/vggish/README.md @@ -23,7 +23,7 @@ VGGish depends on the following Python packages: These are all easily installable via, e.g., `pip install numpy` (as in the sample installation session below). Any reasonably recent version of these -packages shold work. +packages should work. VGGish also requires downloading two data files: diff --git a/research/audioset/vggish/vggish_smoke_test.py b/research/audioset/vggish/vggish_smoke_test.py index 3c52b03da29..dae9ec06871 100644 --- a/research/audioset/vggish/vggish_smoke_test.py +++ b/research/audioset/vggish/vggish_smoke_test.py @@ -17,7 +17,7 @@ This is a simple smoke test of a local install of VGGish and its associated downloaded files. We create a synthetic sound, extract log mel spectrogram -features, run them through VGGish, post-process the embedding ouputs, and +features, run them through VGGish, post-process the embedding outputs, and check some simple statistics of the results, allowing for variations that might occur due to platform/version differences in the libraries we use. diff --git a/research/autoaugment/train_cifar.py b/research/autoaugment/train_cifar.py index 9e3942ee26b..0b0e9aa2c76 100644 --- a/research/autoaugment/train_cifar.py +++ b/research/autoaugment/train_cifar.py @@ -353,7 +353,7 @@ def _compute_final_accuracies(self, meval): return valid_accuracy, test_accuracy def run_model(self): - """Trains and evalutes the image model.""" + """Trains and evaluates the image model.""" hparams = self.hparams # Build the child graph diff --git a/research/cvt_text/base/embeddings.py b/research/cvt_text/base/embeddings.py index 8863f547efb..0f638ee6031 100644 --- a/research/cvt_text/base/embeddings.py +++ b/research/cvt_text/base/embeddings.py @@ -52,7 +52,7 @@ '00-00', '00-00-00', '0-00-00', '00-00-0000', '0-00-0000', '0000-00-00', '00-0-00-0', '00000000', '0:00.000', '00:00.000', '0%', '00%', '00.' '0000.', '0.0bn', '0.0m', '0-', '00-', - # ontonotes uses **f to represent formulas and -amp- instead of amperstands + # ontonotes uses **f to represent formulas and -amp- instead of ampersands '**f', '-amp-' ] SPECIAL_TOKENS = ['', '', '', '', ''] diff --git a/research/efficient-hrl/agents/ddpg_agent.py b/research/efficient-hrl/agents/ddpg_agent.py index 904eb650271..303711c3c1f 100644 --- a/research/efficient-hrl/agents/ddpg_agent.py +++ b/research/efficient-hrl/agents/ddpg_agent.py @@ -189,7 +189,7 @@ def actor_net(self, states, stop_gradients=False): Args: states: A [batch_size, num_state_dims] tensor representing a batch of states. - stop_gradients: (boolean) if true, gradients cannot be propogated through + stop_gradients: (boolean) if true, gradients cannot be propagated through this operation. Returns: A [batch_size, num_action_dims] tensor of actions. diff --git a/research/efficient-hrl/eval.py b/research/efficient-hrl/eval.py index 4f5a4b20a53..690d0ef77db 100644 --- a/research/efficient-hrl/eval.py +++ b/research/efficient-hrl/eval.py @@ -210,7 +210,7 @@ def get_eval_step(uvf_agent, mode: a string representing the mode=[train, explore, eval]. Returns: - A collect_experience_op that excute an action and store into the + A collect_experience_op that execute an action and store into the replay_buffer """ diff --git a/research/efficient-hrl/train.py b/research/efficient-hrl/train.py index a40e81dbec6..b0d393fa674 100644 --- a/research/efficient-hrl/train.py +++ b/research/efficient-hrl/train.py @@ -70,7 +70,7 @@ def collect_experience(tf_env, agent, meta_agent, state_preprocess, disable_agent_reset: A boolean that disables agent from resetting. Returns: - A collect_experience_op that excute an action and store into the + A collect_experience_op that execute an action and store into the replay_buffers """ tf_env.start_collect() diff --git a/research/lfads/distributions.py b/research/lfads/distributions.py index 351d019af2b..6cead11fd9e 100644 --- a/research/lfads/distributions.py +++ b/research/lfads/distributions.py @@ -434,7 +434,7 @@ def __init__(self, zs, prior_zs): """ # L = -KL + log p(x|z), to maximize bound on likelihood # -L = KL - log p(x|z), to minimize bound on NLL - # so 'KL cost' is postive KL divergence + # so 'KL cost' is positive KL divergence kl_b = 0.0 for z, prior_z in zip(zs, prior_zs): assert isinstance(z, Gaussian) @@ -475,7 +475,7 @@ def __init__(self, post_zs, prior_z_process): # L = -KL + log p(x|z), to maximize bound on likelihood # -L = KL - log p(x|z), to minimize bound on NLL - # so 'KL cost' is postive KL divergence + # so 'KL cost' is positive KL divergence z0_bxu = post_zs[0].sample logq_bxu = post_zs[0].logp(z0_bxu) logp_bxu = prior_z_process.logp_t(z0_bxu) diff --git a/research/lfads/lfads.py b/research/lfads/lfads.py index 925484c62eb..013ac0b1a10 100644 --- a/research/lfads/lfads.py +++ b/research/lfads/lfads.py @@ -1127,7 +1127,7 @@ def randomize_example_idxs_mod_batch_size(nexamples, batch_size): batch_size: Number of elements in batch. Returns: - The randomized, properly shaped indicies. + The randomized, properly shaped indices. """ assert nexamples > batch_size, "Problems" bmrem = batch_size - nexamples % batch_size @@ -1164,7 +1164,7 @@ def shuffle_spikes_in_time(self, data_bxtxd): max_counts = np.max(data_bxtxd) S_bxtxd = np.zeros([B,T,N]) - # Intuitively, shuffle spike occurances, 0 or 1, but since we have counts, + # Intuitively, shuffle spike occurrences, 0 or 1, but since we have counts, # Do it over and over again up to the max count. for mc in range(1,max_counts+1): idxs = np.nonzero(data_bxtxd >= mc) diff --git a/research/lfads/run_lfads.py b/research/lfads/run_lfads.py index bd1c0d5e4de..24ca06be3e7 100755 --- a/research/lfads/run_lfads.py +++ b/research/lfads/run_lfads.py @@ -136,7 +136,7 @@ flags.DEFINE_integer("ext_input_dim", EXT_INPUT_DIM, "Dimension of external \ inputs") flags.DEFINE_integer("num_steps_for_gen_ic", NUM_STEPS_FOR_GEN_IC, - "Number of steps to train the generator initial conditon.") + "Number of steps to train the generator initial condition.") # If there are observed inputs, there are two ways to add that observed @@ -193,7 +193,7 @@ "Input scaling for rec weights in generator.") # KL DISTRIBUTIONS -# If you don't know what you are donig here, please leave alone, the +# If you don't know what you are doing here, please leave alone, the # defaults should be fine for most cases, irregardless of other parameters. # # If you don't want the prior variance to be learned, set the @@ -379,7 +379,7 @@ # what those LL lower bounds mean anymore, and they cannot be compared # (AFAIK). flags.DEFINE_float("kl_ic_weight", KL_IC_WEIGHT, - "Strength of KL weight on initial conditions KL penatly.") + "Strength of KL weight on initial conditions KL penalty.") flags.DEFINE_float("kl_co_weight", KL_CO_WEIGHT, "Strength of KL weight on controller output KL penalty.") diff --git a/research/lfads/utils.py b/research/lfads/utils.py index e64825ffc1d..8f2c93a8007 100644 --- a/research/lfads/utils.py +++ b/research/lfads/utils.py @@ -49,7 +49,7 @@ def linear(x, out_size, do_bias=True, alpha=1.0, identity_if_possible=False, configurations. Args: - x: input The tensor to tranformation. + x: input The tensor to transformation. out_size: The integer size of non-batch output dimension. do_bias (optional): Add a learnable bias vector to the operation. alpha (optional): A multiplicative scaling for the weight initialization @@ -171,7 +171,7 @@ def write_data(data_fname, data_dict, use_json=False, compression=None): """Write data in HD5F format. Args: - data_fname: The filename of teh file in which to write the data. + data_fname: The filename of the file in which to write the data. data_dict: The dictionary of data to write. The keys are strings and the values are numpy arrays. use_json (optional): human readable format for simple items diff --git a/research/object_detection/meta_architectures/center_net_meta_arch.py b/research/object_detection/meta_architectures/center_net_meta_arch.py index 66a87431d04..a9b6dffa3ee 100644 --- a/research/object_detection/meta_architectures/center_net_meta_arch.py +++ b/research/object_detection/meta_architectures/center_net_meta_arch.py @@ -2269,7 +2269,7 @@ def __new__(cls, estimation, the class id should correspond to the "human" class. Note that the ID is 0-based, meaning that class 0 corresponds to the first non-background object class. - keypoint_indices: A list of integers representing the indicies of the + keypoint_indices: A list of integers representing the indices of the keypoints to be considered in this task. This is used to retrieve the subset of the keypoints from gt_keypoints that should be considered in this task. @@ -4247,7 +4247,7 @@ def postprocess(self, prediction_dict, true_image_shapes, **params): tf.one_hot( postprocess_dict[fields.DetectionResultFields.detection_classes], depth=num_classes), tf.bool) - # Surpress the scores of those unselected classes to be zeros. Otherwise, + # Suppress the scores of those unselected classes to be zeros. Otherwise, # the downstream NMS ops might be confused and introduce issues. multiclass_scores = tf.where( class_mask, multiclass_scores, tf.zeros_like(multiclass_scores)) diff --git a/research/object_detection/meta_architectures/faster_rcnn_meta_arch.py b/research/object_detection/meta_architectures/faster_rcnn_meta_arch.py index a7dd5b6efa4..71910215751 100644 --- a/research/object_detection/meta_architectures/faster_rcnn_meta_arch.py +++ b/research/object_detection/meta_architectures/faster_rcnn_meta_arch.py @@ -88,7 +88,7 @@ `_postprocess_rpn` is run as part of the `_predict_second_stage` using `resized_image_shapes` to clip proposals, perform non-max suppression and normalize them. Subsequently, `_postprocess_box_classifier` is run as part of - `_predict_third_stage` using `true_image_shapes` to clip detections, peform + `_predict_third_stage` using `true_image_shapes` to clip detections, perform non-max suppression and normalize them. In this case, the `postprocess` method skips both `_postprocess_rpn` and `_postprocess_box_classifier`. """ diff --git a/research/rebar/rebar.py b/research/rebar/rebar.py index 1c61c3ab22b..1f947784702 100644 --- a/research/rebar/rebar.py +++ b/research/rebar/rebar.py @@ -864,7 +864,7 @@ def get_rebar_gradient(self): return total_grads, debug, variance_objective ### -# Create varaints +# Create variants ### class SBNSimpleMuProp(SBN): def _create_loss(self): diff --git a/research/rebar/utils.py b/research/rebar/utils.py index ab2cb79bccf..01bdd5426fb 100644 --- a/research/rebar/utils.py +++ b/research/rebar/utils.py @@ -27,7 +27,7 @@ import tensorflow as tf # -# Python utlities +# Python utilities # def exp_moving_average(x, alpha=0.9): res = [] diff --git a/research/vid2depth/model.py b/research/vid2depth/model.py index 7b73b5edf8b..c23e73fd288 100644 --- a/research/vid2depth/model.py +++ b/research/vid2depth/model.py @@ -183,7 +183,7 @@ def build_loss(self): if i > j: # Need to inverse egomotion when going back in sequence. egomotion_mult *= -1 - # For compatiblity with SfMLearner, interpret all egomotion vectors + # For compatibility with SfMLearner, interpret all egomotion vectors # as pointing toward the middle frame. Note that unlike SfMLearner, # each vector captures the motion to/from its next frame, and not # the center frame. Although with seq_length == 3, there is no diff --git a/research/vid2depth/repo.bzl b/research/vid2depth/repo.bzl index 25830cd20a3..21fb4cd906a 100644 --- a/research/vid2depth/repo.bzl +++ b/research/vid2depth/repo.bzl @@ -1,6 +1,6 @@ """ TensorFlow Http Archive -Modified http_arhive that allows us to override the TensorFlow commit that is +Modified http_archive that allows us to override the TensorFlow commit that is downloaded by setting an environment variable. This override is to be used for testing purposes. diff --git a/research/vid2depth/util.py b/research/vid2depth/util.py index 2cd488ae87b..85913bbe899 100644 --- a/research/vid2depth/util.py +++ b/research/vid2depth/util.py @@ -59,7 +59,7 @@ def is_a_numpy_array(obj): def count_parameters(also_print=True): - """Cound the number of parameters in the model. + """count the number of parameters in the model. Args: also_print: Boolean. If True also print the numbers.