From b3f64c8bf49e45d0d7fb2beacd96b95cc8e7990d Mon Sep 17 00:00:00 2001 From: Samuel Marks <807580+SamuelMarks@users.noreply.github.com> Date: Sat, 5 Dec 2020 21:49:14 +1100 Subject: [PATCH] [*.py] Rename "Arguments:" to "Args:" --- models/official/detection/modeling/architecture/nn_ops.py | 2 +- models/official/efficientnet/condconv/condconv_layers.py | 2 +- models/official/efficientnet/imagenet_input.py | 2 +- models/official/mask_rcnn/distributed_executer.py | 2 +- models/official/mask_rcnn/tpu_normalization.py | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/models/official/detection/modeling/architecture/nn_ops.py b/models/official/detection/modeling/architecture/nn_ops.py index cb8dd8ecd..2bbbf8693 100644 --- a/models/official/detection/modeling/architecture/nn_ops.py +++ b/models/official/detection/modeling/architecture/nn_ops.py @@ -41,7 +41,7 @@ class BatchNormalization(tf.layers.BatchNormalization): def __init__(self, fused=False, max_shards_for_local=8, **kwargs): """Builds the batch normalization layer. - Arguments: + Args: fused: If `False`, use the system recommended implementation. Only support `False` in the current implementation. max_shards_for_local: The maximum number of TPU shards that should use diff --git a/models/official/efficientnet/condconv/condconv_layers.py b/models/official/efficientnet/condconv/condconv_layers.py index 5468e72b6..c14c5fb8f 100644 --- a/models/official/efficientnet/condconv/condconv_layers.py +++ b/models/official/efficientnet/condconv/condconv_layers.py @@ -36,7 +36,7 @@ def get_condconv_initializer(initializer, num_experts, expert_shape): is correctly initialized with the given initializer before being flattened into the correctly shaped CondConv variable. - Arguments: + Args: initializer: The initializer to apply for each individual expert. num_experts: The number of experts to be initialized. expert_shape: The original shape of each individual expert. diff --git a/models/official/efficientnet/imagenet_input.py b/models/official/efficientnet/imagenet_input.py index a5ea0b424..d648929b9 100644 --- a/models/official/efficientnet/imagenet_input.py +++ b/models/official/efficientnet/imagenet_input.py @@ -137,7 +137,7 @@ def mixup(self, batch_size, alpha, images, labels): Mixup: Beyond Empirical Risk Minimization. ICLR'18, https://arxiv.org/abs/1710.09412 - Arguments: + Args: batch_size: The input batch size for images and labels. alpha: Float that controls the strength of Mixup regularization. images: A batch of images of shape [batch_size, ...] diff --git a/models/official/mask_rcnn/distributed_executer.py b/models/official/mask_rcnn/distributed_executer.py index 1e24e3433..1c3457643 100644 --- a/models/official/mask_rcnn/distributed_executer.py +++ b/models/official/mask_rcnn/distributed_executer.py @@ -71,7 +71,7 @@ def build_model_parameters(self, unused_mode, unused_run_config): def build_mask_rcnn_estimator(self, params, run_config, mode): """Creates TPUEstimator/Estimator instance. - Arguments: + Args: params: A dictionary to pass to Estimator `model_fn`. run_config: RunConfig instance specifying distribution strategy configurations. diff --git a/models/official/mask_rcnn/tpu_normalization.py b/models/official/mask_rcnn/tpu_normalization.py index 4fcb9636a..b961d1677 100644 --- a/models/official/mask_rcnn/tpu_normalization.py +++ b/models/official/mask_rcnn/tpu_normalization.py @@ -94,7 +94,7 @@ def cross_replica_batch_normalization(inputs, For detailed information of arguments and implementation, refer to: https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization - Arguments: + Args: inputs: Tensor input. training: Either a Python boolean, or a TensorFlow boolean scalar tensor (e.g. a placeholder). Whether to return the output in training mode
Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.
Alternative Proxies: