Skip to content

Commit

Permalink
[2/N] Enable Wunused-result, Wunused-variable and Wmissing-braces in …
Browse files Browse the repository at this point in the history
…torch targets (pytorch#110836)

This PR enables Wunused-result, Wunused-variable and Wmissing-braces because our code base is clean.
Pull Request resolved: pytorch#110836
Approved by: https://github.com/Skylion007
  • Loading branch information
cyyever authored and pytorchmergebot committed Oct 11, 2023
1 parent 6d7744c commit a6b452d
Show file tree
Hide file tree
Showing 5 changed files with 10 additions and 19 deletions.
2 changes: 1 addition & 1 deletion cmake/public/cuda.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -348,7 +348,7 @@ message(STATUS "Added CUDA NVCC flags for: ${NVCC_FLAGS_EXTRA}")

# disable some nvcc diagnostic that appears in boost, glog, glags, opencv, etc.
foreach(diag cc_clobber_ignored
set_but_not_used field_without_dll_interface
field_without_dll_interface
base_class_has_different_dll_interface
dll_interface_conflict_none_assumed
dll_interface_conflict_dllexport_assumed
Expand Down
4 changes: 0 additions & 4 deletions cmake/public/utils.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -438,17 +438,13 @@ function(torch_compile_options libname)
-Wdeprecated
-Wno-unused-parameter
-Wno-unused-function
-Wno-unused-result
-Wno-missing-field-initializers
-Wno-unknown-pragmas
-Wno-type-limits
-Wno-array-bounds
-Wno-unknown-pragmas
-Wno-strict-overflow
-Wno-strict-aliasing
# Clang has an unfixed bug leading to spurious missing braces
# warnings, see https://bugs.llvm.org/show_bug.cgi?id=21629
-Wno-missing-braces
)
if(NOT "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
list(APPEND private_compile_options
Expand Down
4 changes: 0 additions & 4 deletions torch/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -311,10 +311,6 @@ if(USE_PRECOMPILED_HEADERS)
"$<$<COMPILE_LANGUAGE:CXX>:ATen/ATen.h>")
endif()

if(NOT MSVC)
target_compile_options(torch_python PRIVATE -Wno-unused-variable)
endif()

# Required workaround for generated sources
# See https://samthursfield.wordpress.com/2015/11/21/cmake-dependencies-between-targets-and-files-and-custom-commands/#custom-commands-in-different-directories
add_dependencies(torch_python generate-torch-sources)
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/cuda/Module.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1088,7 +1088,7 @@ static void registerCudaPluggableAllocator(PyObject* module) {
auto data_ptr = storage_impl->data_ptr().get();
bool succeeded = storage_impl->mutable_data_ptr().compare_exchange_deleter(
alloc->raw_deleter(), c10::detail::deleteNothing);
TORCH_CHECK("Expected standard deleter");
TORCH_CHECK(succeeded, "Expected standard deleter");
c10::cuda::CUDACachingAllocator::raw_delete(data_ptr);
});

Expand Down
17 changes: 8 additions & 9 deletions torch/csrc/profiler/collection.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -307,15 +307,14 @@ uint64_t ThreadLocalSubqueue::TorchOpStorage::EventBlock<T, ChunkSize>::
// ---------------------------------
std::unique_ptr<KinetoObserverContext> ThreadLocalSubqueue::begin_op(
const at::RecordFunction& fn) {
KinetoObserverContext::Event* event = nullptr;
uint64_t corr_id = 0;
std::tie(event, corr_id) = torch_ops_.op_events_.emplace_back(
fn.seqNr(),
fn.forwardThreadId(),
fn.scope(),
fn.isAsync(),
fn.debugHandle(),
fn.name());
auto [event, corr_id] = torch_ops_.op_events_.emplace_back(
torch::profiler::impl::TorchOpBasicFields{
fn.seqNr(),
fn.forwardThreadId(),
fn.scope(),
fn.isAsync(),
fn.debugHandle(),
fn.name()});
if (config_.report_input_shapes) {
torch_ops_.inputs_outputs_.push(fn.inputs());
}
Expand Down

0 comments on commit a6b452d

Please sign in to comment.