Skip to content

Commit

Permalink
Fix typos discovered by codespell (#424)
Browse files Browse the repository at this point in the history
  • Loading branch information
cclauss authored Jan 28, 2025
1 parent 3a5b206 commit 95a4050
Show file tree
Hide file tree
Showing 17 changed files with 28 additions and 28 deletions.
2 changes: 1 addition & 1 deletion compiler_opt/benchmark/filter_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
in their entirety can sometimes be problematic as some subsets of the tests
available in the executable might require certain hardware configurations
such as an X configuration with working graphics drivers, and we would prefer
to avoid those tests. This exectuable goes through an entire test suite
to avoid those tests. This executable goes through an entire test suite
description and returns another test suite description containing only tests
that pass.
Expand Down
4 changes: 2 additions & 2 deletions compiler_opt/es/blackbox_optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -1080,7 +1080,7 @@ def hessv_func(x: FloatArray) -> FloatArray:
"""
hessv = np.matmul(self.saved_hessian, x)
# Reminder:
# If not using sensing-subspace Hessian, also subract diagonal gs(x)*I
# If not using sensing-subspace Hessian, also subtract diagonal gs(x)*I
hessv /= np.power(self.precision_parameter, 2)
hessv *= -1
return hessv
Expand All @@ -1107,7 +1107,7 @@ def hessv_func(x: FloatArray) -> FloatArray:
np.power(self.precision_parameter, 2))
hessv /= float(len(self.saved_perturbations))
# Reminder:
# If not using sensing-subspace Hessian, also subract diagonal gs(x)*I
# If not using sensing-subspace Hessian, also subtract diagonal gs(x)*I
hessv /= np.power(self.precision_parameter, 2)
hessv *= -1
return hessv
Expand Down
2 changes: 1 addition & 1 deletion compiler_opt/rl/compilation_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ def enable(self) -> WorkerFuture:
class CompilationResultObserver(metaclass=abc.ABCMeta):
"""Abstract base class used to observe compilation results.
This is indended for users who need to observe compilations while they are in
This is intended for users who need to observe compilations while they are in
the distributed worker pool, rather than after they have been coalesced in
the collection script.
"""
Expand Down
2 changes: 1 addition & 1 deletion compiler_opt/rl/distributed/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ def compute_return_and_advantage(
advantages = self.compute_advantages(rewards, returns, discounts,
value_preds)

# TODO(b/171573175): remove the condition once historgrams are
# TODO(b/171573175): remove the condition once histograms are
# supported on TPUs.
if self._debug_summaries and not self._use_tpu:
tf.compat.v2.summary.histogram(
Expand Down
2 changes: 1 addition & 1 deletion compiler_opt/rl/distributed/ppo_train_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def train(
agent.initialize()

# Create the policy saver which saves the initial model now, then it
# periodically checkpoints the policy weigths.
# periodically checkpoints the policy weights.
saved_model_dir = os.path.join(root_dir, actor_learner.POLICY_SAVED_MODEL_DIR)
save_model_trigger = triggers.PolicySavedModelTrigger(
saved_model_dir, agent, train_step, interval=1000)
Expand Down
4 changes: 2 additions & 2 deletions compiler_opt/rl/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ def _get_clang_generator(
Returns:
A generator of tuples. Each element of the tuple is created with
clang_session. First argument of the tuple is always an interactive
clang session. The second argumnet is a default clang session if
clang session. The second argument is a default clang session if
interactive_only is False and otherwise the exact same interactive
clang session object as the first argument.
"""
Expand Down Expand Up @@ -343,7 +343,7 @@ class MLGOEnvironmentBase:
"""Base implementation for all MLGO environments.
Depending on the RL framework, one may want different implementations of an
enviroment (tf_agents: PyEnvironment, jax: dm-env, etc). This class
environment (tf_agents: PyEnvironment, jax: dm-env, etc). This class
implements the core methods that are needed to then implement any of these
other environments as well.
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def add_feature_list(seq_example: tf.train.SequenceExample,
np.dtype(np.float32),
str,
]):
raise AssertionError((f'Unsupported type for feautre {feature_name}'
raise AssertionError((f'Unsupported type for feature {feature_name}'
f' of type {type(feature_list[0])}. '
'Supported types are np.int64, np.float32, str'))
if isinstance(feature_list[0], np.float32):
Expand Down Expand Up @@ -382,7 +382,7 @@ def compile_module(
Returns:
sequence_example: a tf.train.SequenceExample containing the trajectory
from compilation. In addition to the features returned from the env
tbe sequence_example adds the following extra features: action,
the sequence_example adds the following extra features: action,
reward and module_name. action is the action taken at any given step,
reward is the reward specified by reward_key, not necessarily the
reward returned by the environment and module_name is the name of
Expand Down Expand Up @@ -933,7 +933,7 @@ def gen_trajectories(
ModuleWorker.select_best-exploration
worker_wait_sec: max number of seconds to wait for a worker to terminate
worker_class_type: the class that will process each module
worker_class_type: allows for overrriding ModuleWorker
worker_class_type: allows for overriding ModuleWorker
worker_manager_class: A pool of workers hosted on the local machines, each
in its own process.
"""
Expand Down
4 changes: 2 additions & 2 deletions compiler_opt/rl/imitation_learning/weighted_bc_trainer_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
class TrainingWeights:
"""Class for computing weights for training.
To use, create an instance by specifying the paritions used in
To use, create an instance by specifying the partitions used in
collecting the data with generate_bc_trajectories. Next, run multiple steps
of update_weights with collected profiles from generate_bc_trajectories,
where each step corresponds to one pair of a pollicy profile and a
Expand Down Expand Up @@ -120,7 +120,7 @@ def create_new_profile(self,
The regret is measured as the difference between the loss of the data_eval
profiles and of the data_comparator profiles. The reward is the negative
regret normalized by the loss of hte data_comparator profiles.
regret normalized by the loss of the data_comparator profiles.
Args:
data_comparator: baseline profiles to measure improvement against
Expand Down
2 changes: 1 addition & 1 deletion compiler_opt/rl/inlining/inlining_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def compile_fn(
Args:
command_line: the fully qualified command line.
tf_policy_path: path to TF policy direcoty on local disk.
tf_policy_path: path to TF policy directory on local disk.
reward_only: whether only return native size.
Returns:
Expand Down
2 changes: 1 addition & 1 deletion compiler_opt/rl/policy_saver.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def _split_tensor_name(name: str) -> Tuple[str, int]:
def _get_non_identity_op(tensor):
"""Get the true output op aliased by Identity `tensor`.
Output signature tensors are in a Function that refrences the true call
Output signature tensors are in a Function that references the true call
in the base SavedModel metagraph. Traverse the function upwards until
we find this true output op and tensor and return that.
Expand Down
6 changes: 3 additions & 3 deletions compiler_opt/rl/random_net_distillation.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def __init__(self,
fc_layer_params=(32,),
initial_intrinsic_reward_scale=1.0,
half_decay_steps=10000):
"""Initilization for RandomNetworkDistillation class.
"""Initialization for RandomNetworkDistillation class.
Args:
time_step_spec: the time step spec for raw observation
Expand Down Expand Up @@ -81,7 +81,7 @@ def __init__(self,
self._external_reward_mean = tf.keras.metrics.Mean()

def _get_intrinsic_reward(self, observation):
"""Compute the intrisic reward.
"""Compute the intrinsic reward.
Args:
observation: raw observation in observation_spec format
Expand All @@ -91,7 +91,7 @@ def _get_intrinsic_reward(self, observation):
"""
with tf.GradientTape() as tape:
# make the predict network parameters trainable
# Compute the feature embedding loss (for next obseravtion trajectory)
# Compute the feature embedding loss (for next observation trajectory)
feature_target, _ = self._target_net(observation)
feature_predict, _ = self._predict_net(observation)
feature_target = tf.stop_gradient(feature_target)
Expand Down
2 changes: 1 addition & 1 deletion compiler_opt/rl/regalloc/regalloc_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def compile_fn(
Args:
command_line: the fully qualified command line.
tf_policy_path: path to TF policy direcoty on local disk.
tf_policy_path: path to TF policy directory on local disk.
reward_only: whether only return reward.
Returns:
Expand Down
2 changes: 1 addition & 1 deletion compiler_opt/tools/feature_importance_graphs.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def load_shap_values(file_name: str) -> DataType:


def init_shap_for_notebook():
"""Initalizes some JS code for interactive feature importance plots."""
"""Initializes some JS code for interactive feature importance plots."""
shap.initjs()


Expand Down
4 changes: 2 additions & 2 deletions compiler_opt/tools/feature_importance_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def flatten_input(to_flatten: types.NestedTensorSpec,
"""Flattens problem instance data into a flat array for shap
Args:
to_flatten: A nested tensor spec of data that needs to be flattend into
to_flatten: A nested tensor spec of data that needs to be flattened into
an array
array_size: An integer representing the size of the output array. Used for
allocating the flat array to place all the data in.
Expand Down Expand Up @@ -167,7 +167,7 @@ def get_max_part_size(input_signature: SignatureType) -> int:

def create_run_model_function(action_fn: Callable,
input_sig: SignatureType) -> Callable:
"""Returns a function that takes in a flattend input array and returns the
"""Returns a function that takes in a flattened input array and returns the
model output as a scalar.
Args:
Expand Down
2 changes: 1 addition & 1 deletion docs/benchmarking.md
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ subdirectory within that repository.
* `--llvm_test_suite_build_path` - The path to place the build for the
llvm-test-suite. Similar behavior to the LLVM build path.
* `llvm_use_incremental` - Whether or not to do an incremental build of LLVM.
If you alread have all the correct compilation flags setup for running MLGO
If you already have all the correct compilation flags setup for running MLGO
with LLVM, you can set this flag and you should get an extremely fast LLVM
build as the only thing changing is the release mode regalloc model.
* `model_path` - The path to the regalloc model. If this is set to "download",
Expand Down
4 changes: 2 additions & 2 deletions docs/inlining-demo/demo.md
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ yours). The `--args=clang_embed_bitcode=true` option above adds the compilation
flag `-Xclang=-fembed-bitcode=all`. This can be seen in the compilation database.
The effect of this is that the object files have the llvm bytecode produced by
clang, before the optimization passes, and the clang command line, captured in
the .llvmbc and .llvmcmd sections, respectivelly. This is the mechanism by which
the .llvmbc and .llvmcmd sections, respectively. This is the mechanism by which
we extract our corpus.

Naturally, the effect of this is that the object files, and the linked binaries,
Expand Down Expand Up @@ -346,7 +346,7 @@ compiler_opt/tools/generate_default_trace.py \
## Deploying and using the new policy

We need to build the 'release' mode of the compiler. Currently, that means
overwritting the model in `llvm/lib/Analysis/models/inliner`.
overwriting the model in `llvm/lib/Analysis/models/inliner`.

```shell
cd $LLVM_SRCDIR
Expand Down
6 changes: 3 additions & 3 deletions docs/regalloc-demo/demo.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ gclient runhooks
## Install Dependencies

If you're working in a Debian based docker container, it will most likely
not come by default with `sudo`. It isn't stricly necessary to install,
not come by default with `sudo`. It isn't strictly necessary to install,
but it makes it easier to copypaste the installation commands below and it
also enables the use of the Chromium dependency auto-installation script:

Expand All @@ -110,7 +110,7 @@ $WORKING_DIR/chromium/src/build/install-build-deps.sh
```

**Note:** These installation commands are all designed to be run on Debian
based distros. However, adapating to other distros with alternative package
based distros. However, adapting to other distros with alternative package
management systems should not be too difficult. The packages for the first
command should be very similarly named and the
[official Chromium documentation](https://chromium.googlesource.com/chromium/src/+/main/docs/linux/build_instructions.md)
Expand Down Expand Up @@ -176,7 +176,7 @@ symbol_level=0
enable_nacl=false
```

Immedaitely after closing the editor, `gn` will generate all of the files
Immediately after closing the editor, `gn` will generate all of the files
necessary so that `ninja` can execute all the necessary compilation steps.
However, to extract a corpus for ML training, we also need a database of
compilation commands. This can be obtained by running the following command:
Expand Down

0 comments on commit 95a4050

Please sign in to comment.