Skip to content

Commit

Permalink
Fix ouput typos (pytorch#120870)
Browse files Browse the repository at this point in the history
Pull Request resolved: pytorch#120870
Approved by: https://github.com/clee2000
  • Loading branch information
kit1980 authored and pytorchmergebot committed Feb 29, 2024
1 parent 14c5ebc commit 09aefe1
Show file tree
Hide file tree
Showing 17 changed files with 31 additions and 31 deletions.
2 changes: 1 addition & 1 deletion aten/src/ATen/native/Normalization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -409,7 +409,7 @@ std::tuple<Tensor, Tensor, Tensor> batch_norm_backward_cpu_template(
invstd = 1 / std::sqrt(running_var_a[f] + eps);
}

// dot product of the Q(X) and gradOuput
// dot product of the Q(X) and gradOutput
accscalar_t dotp = 0;
reduce_iter_local.unsafe_replace_operand(
0, in_data + f * in_channel_stride);
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/cuda/Reduce.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -1054,7 +1054,7 @@ ReduceConfig setReduceConfig(const TensorIterator& iter){
// Case 1: "vectorize along input"
// This case happens when we are reducing along fastest moving dimesion. In such case, threads
// with the same threadIdx.y works on the same reduction cooperatively and will produce results
// for the same output. In such case, values in each loaded vector always correspond to the same ouput.
// for the same output. In such case, values in each loaded vector always correspond to the same output.
//
// Case 2: "vectorize along output"
// This case happens when the fastest moving dimesion is not the dimension of reduction. In such case,
Expand Down
8 changes: 4 additions & 4 deletions aten/src/ATen/native/nested/NestedTensorBackward.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,16 +44,16 @@ std::tuple<Tensor, Tensor, Tensor> nested_linear_backward(
return std::tuple<Tensor, Tensor, Tensor>{Tensor(), Tensor(), Tensor()};
}
Tensor grad_input, grad_weight, grad_bias;
auto grad_ouput_contiguous = grad_output.contiguous();
auto* nt_grad_output = get_nested_tensor_impl(grad_ouput_contiguous);
auto grad_output_contiguous = grad_output.contiguous();
auto* nt_grad_output = get_nested_tensor_impl(grad_output_contiguous);
auto* nt_input = get_nested_tensor_impl(input);
TORCH_INTERNAL_ASSERT(nt_grad_output != nullptr);
TORCH_INTERNAL_ASSERT(nt_input != nullptr);
TORCH_INTERNAL_ASSERT(nested_tensor_impl_is_contiguous(nt_grad_output));
auto grad_ouput_buffer = nt_grad_output->get_buffer();
auto grad_output_buffer = nt_grad_output->get_buffer();
auto input_buffer = nt_input->get_buffer();

auto reshaped_grad = grad_ouput_buffer.reshape({-1, weight.size(0)});
auto reshaped_grad = grad_output_buffer.reshape({-1, weight.size(0)});

if (output_mask[0]) {
auto grad_input_buffer = at::mm(reshaped_grad, weight).view({-1});
Expand Down
2 changes: 1 addition & 1 deletion modules/detectron/select_smooth_l1_loss_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ bool SelectSmoothL1LossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(1);
auto& L = Input(2);
auto& S = Input(3);
// Below is gradient of net w.r.t. avg_loss ("gradOuput"), should be all 1's
// Below is gradient of net w.r.t. avg_loss ("gradOutput"), should be all 1's
auto& d_avg_loss = Input(4);

auto* d_Y_hat = Output(0, Y_hat.sizes(), at::dtype<float>()); // gradient of net w.r.t. Y_hat ("gradInput")
Expand Down
2 changes: 1 addition & 1 deletion modules/detectron/smooth_l1_loss_op.cu
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ bool SmoothL1LossGradientOp<float, CUDAContext>::RunOnDevice() {
auto& Y = Input(1);
auto& alpha_in = Input(2);
auto& alpha_out = Input(3);
auto& d_avg_loss = Input(4); // gradient of net w.r.t. avg_loss ("gradOuput")
auto& d_avg_loss = Input(4); // gradient of net w.r.t. avg_loss ("gradOutput")
// We intentially don't compute gradients for Y, alpha_{in,out} since they
// are not needed (can change in the future if desired)

Expand Down
2 changes: 1 addition & 1 deletion test/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -1790,7 +1790,7 @@ def check_inplace_view(func, input, rs, input_size, input_strides):

# A mode that when enabled runs correctness checks to ensure
# that operators have expected tags based on their input and
# ouput tensor properties
# output tensor properties
class TestTagsMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
if isinstance(args[0], torch.Tensor):
Expand Down
2 changes: 1 addition & 1 deletion test/test_schema_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ def test_schema_check_mode_functionality_with_multiple_outputs(self):
self.assertEqual(m_expected, m_actual)
self.assertEqual(e_expected, e_actual)

# Tests that SchemaCheckMode wraps Torch.tensor with aliasing ouputs due to aliasing inputs
# Tests that SchemaCheckMode wraps Torch.tensor with aliasing outputs due to aliasing inputs
def test_schema_check_mode_functionality_with_multiple_outputs_aliasing(self):
x = torch.rand((3, 3))
actual = torch.zeros(3)
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/distributed/autograd/engine/dist_engine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,9 @@ class DistAccumulateGradCaptureHook
autogradContext_->accumulateGrad(
accumulateGrad_->variable, inputGrads[0], 3 /* num_expected_refs */);
}
const variable_list kEmptyOuput;
const variable_list kEmptyOutput;
for (const auto& hook : accumulateGrad_->post_hooks()) {
(*hook)(kEmptyOuput, inputGrads);
(*hook)(kEmptyOutput, inputGrads);
}
return inputGrads[0];
}
Expand Down
16 changes: 8 additions & 8 deletions torch/csrc/jit/mobile/compatibility/backport_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -187,10 +187,10 @@ std::stringstream update_bytecode_version(
"bytecode",
};

std::stringstream ouput_model_stream;
std::stringstream output_model_stream;
auto writer_func = [&](const void* buf, size_t nbytes) -> size_t {
ouput_model_stream.write(static_cast<const char*>(buf), nbytes);
return !ouput_model_stream ? 0 : nbytes;
output_model_stream.write(static_cast<const char*>(buf), nbytes);
return !output_model_stream ? 0 : nbytes;
};

PyTorchStreamWriter writer_bytecode(writer_func);
Expand Down Expand Up @@ -218,7 +218,7 @@ std::stringstream update_bytecode_version(
/*use_storage_context=*/true,
storage_context);

return ouput_model_stream;
return output_model_stream;
}
} // namespace

Expand Down Expand Up @@ -307,10 +307,10 @@ std::stringstream backport_v5_to_v4(std::stringstream& input_model_stream) {
"bytecode",
};

std::stringstream ouput_model_stream;
std::stringstream output_model_stream;
auto writer_func = [&](const void* buf, size_t nbytes) -> size_t {
ouput_model_stream.write(static_cast<const char*>(buf), nbytes);
return !ouput_model_stream ? 0 : nbytes;
output_model_stream.write(static_cast<const char*>(buf), nbytes);
return !output_model_stream ? 0 : nbytes;
};

PyTorchStreamWriter writer(writer_func);
Expand Down Expand Up @@ -361,7 +361,7 @@ std::stringstream backport_v5_to_v4(std::stringstream& input_model_stream) {
auto constants_tuple =
c10::ivalue::Tuple::create(std::move(constants_values));
writeArchiveV4(writer, kArchiveNameConstants, constants_tuple);
return ouput_model_stream;
return output_model_stream;
}

/*
Expand Down
6 changes: 3 additions & 3 deletions torch/csrc/jit/runtime/jit_trace.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -248,9 +248,9 @@ void insertTracingNodes(Block* block, ProfilingRecord* pr, TracingData& td) {

GRAPH_DEBUG("Tracing ", getHeader(n));
auto tracer = traceNode(n, td, stack);
auto ouputs_size = n->outputs().size();
auto iivs = pop(stack, ouputs_size);
for (size_t j = 0; j < ouputs_size; j++) {
auto outputs_size = n->outputs().size();
auto iivs = pop(stack, outputs_size);
for (size_t j = 0; j < outputs_size; j++) {
auto& iiv = iivs[j];
if (iiv.isTensor()) {
auto t = iiv.toTensor();
Expand Down
2 changes: 1 addition & 1 deletion torch/distributed/_spmd/iter_graph_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ def move_to_next_iter_before(
raise ValueError(
"The target nodes for ``move_to_next_iter_before`` must "
"satisfy one of the following conditions: 1) the user of the "
"node is in the target nodes, 2) the user is the ouput of the "
"node is in the target nodes, 2) the user is the output of the "
"graph, 3) there are no users -- the node is a side-effect node. "
)

Expand Down
2 changes: 1 addition & 1 deletion torch/distributed/_tensor/ops/matrix_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ def scaled_dot_product_attention_strategy(
assert len(spec_list) == 6
input_expected_specs = spec_list[3:]
output_specs: List[Optional[DTensorSpec]] = list(spec_list[:3])
# fix up ouput_specs and fill in None for the int and empty tensor return values
# fix up output_specs and fill in None for the int and empty tensor return values
for i in range(2, 8):
output_specs.insert(i, None)
if all(is_tensor_shardable(qkv_shape, spec) for spec in input_expected_specs):
Expand Down
2 changes: 1 addition & 1 deletion torch/distributed/_tensor/ops/tensor_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ def gen_slice_scatter_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> Strateg

@register_op_strategy(aten._local_scalar_dense.default)
def replica_only_strategy(mesh: DeviceMesh, op_schema: OpSchema) -> StrategyType:
"""Only allow replication on the input/ouput."""
"""Only allow replication on the input/output."""
replicate_spec = DTensorSpec(mesh, tuple([Replicate()] * mesh.ndim))
return OpStrategy([PlacementStrategy(replicate_spec)])

Expand Down
2 changes: 1 addition & 1 deletion torch/distributed/_tensor/tp_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def tp_convolution(
local_tensor_args = cast(Tuple[object, ...], local_tensor_args_list)
local_results = op_call(*local_tensor_args, **local_tensor_kwargs)

# step3 remove extra ouputs from the results
# step3 remove extra outputs from the results
padding_w = padding[1]
w = local_results.size(3)
if rank == 0:
Expand Down
2 changes: 1 addition & 1 deletion torch/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -1707,7 +1707,7 @@ def unravel_index(indices: Tensor, shape: Union[int, Sequence[int], torch.Size])
tensor. All elements must be non-negative.
Returns:
tuple of Tensors: Each ``i``-th tensor in the ouput corresponds with
tuple of Tensors: Each ``i``-th tensor in the output corresponds with
dimension ``i`` of :attr:`shape`. Each tensor has the same shape as
``indices`` and contains one index into dimension ``i`` for each of the
flat indices given by ``indices``.
Expand Down
4 changes: 2 additions & 2 deletions torch/onnx/_internal/jit_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -293,8 +293,8 @@ def _create_node(
for _ in range(1, n_outputs):
node.addOutput()

node_ouputs = tuple(node.outputs()) # type: ignore[possibly-undefined]
assert len(node_ouputs) == n_outputs
node_outputs = tuple(node.outputs()) # type: ignore[possibly-undefined]
assert len(node_outputs) == n_outputs

aten = domain_op.startswith("aten::")

Expand Down
2 changes: 1 addition & 1 deletion torch/testing/_internal/distributed/distributed_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -3622,7 +3622,7 @@ def _test_all_gather_coalesced_helper(
]
assert self._run_all_gather_coalesced_and_verify(
output_tensor_lists, input_tensors, expected_tensors, group_id
), "output tensors do not match expected ouputs"
), "output tensors do not match expected outputs"

self._barrier()

Expand Down

0 comments on commit 09aefe1

Please sign in to comment.