From c0316f680459c32bede621b99ad6beaef4c6970c Mon Sep 17 00:00:00 2001 From: Donghyeon Jeong Date: Fri, 26 Jan 2024 10:36:43 +0900 Subject: [PATCH] [TensorV2] Refactoring TensorBase pointer to shared_ptr This PR proposes refactoring the TensorV2 class to use a shared_ptr instead of a raw pointer for managing its TensorBase object. By adopting this change, we can improve the safety and reliability of our code and reduce the likelihood of memory leaks and other issues related to manual memory management. **Changes proposed in this PR:** - Replace the TensorBase pointer in the Tensor class with a shared_ptr. - Update any relevant code to use the shared_ptr instead of the raw pointer. **Self-evaluation:** 1. Build test: [X]Passed [ ]Failed [ ]Skipped 2. Run test: [X]Passed [ ]Failed [ ]Skipped Signed-off-by: Donghyeon Jeong --- nntrainer/tensor/tensor_v2.cpp | 40 ++++++++++++++++++++++------------ nntrainer/tensor/tensor_v2.h | 2 +- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/nntrainer/tensor/tensor_v2.cpp b/nntrainer/tensor/tensor_v2.cpp index 6e0f99d465..22b21df651 100644 --- a/nntrainer/tensor/tensor_v2.cpp +++ b/nntrainer/tensor/tensor_v2.cpp @@ -22,10 +22,12 @@ TensorV2::TensorV2(std::string name_, Tformat fm, Tdatatype d_type) { itensor = nullptr; if (d_type == Tdatatype::FP32) { - itensor = new FloatTensor(name_, fm); + itensor = std::shared_ptr(new FloatTensor(name_, fm), + std::default_delete()); } else if (d_type == Tdatatype::FP16) { #ifdef ENABLE_FP16 - itensor = new HalfTensor(name_, fm); + itensor = std::shared_ptr(new HalfTensor(name_, fm), + std::default_delete()); #else throw std::invalid_argument("Error: enable-fp16 is not enabled"); #endif @@ -42,10 +44,14 @@ TensorV2::TensorV2(const TensorDim &d, bool alloc_now, Initializer init, itensor = nullptr; if (d.getDataType() == Tdatatype::FP32) { - itensor = new FloatTensor(d, alloc_now, init, name); + itensor = + std::shared_ptr(new FloatTensor(d, alloc_now, init, name), + std::default_delete()); } else if (d.getDataType() == Tdatatype::FP16) { #ifdef ENABLE_FP16 - itensor = new HalfTensor(d, alloc_now, init, name); + itensor = + std::shared_ptr(new HalfTensor(d, alloc_now, init, name), + std::default_delete()); #else throw std::invalid_argument("Error: enable-fp16 is not enabled"); #endif @@ -61,10 +67,12 @@ TensorV2::TensorV2(const TensorDim &d, const void *buf) { itensor = nullptr; if (d.getDataType() == Tdatatype::FP32) { - itensor = new FloatTensor(d, buf); + itensor = std::shared_ptr(new FloatTensor(d, buf), + std::default_delete()); } else if (d.getDataType() == Tdatatype::FP16) { #ifdef ENABLE_FP16 - itensor = new HalfTensor(d, buf); + itensor = std::shared_ptr(new HalfTensor(d, buf), + std::default_delete()); #else throw std::invalid_argument("Error: enable-fp16 is not enabled"); #endif @@ -79,14 +87,16 @@ TensorV2::TensorV2(const TensorDim &d, const void *buf) { TensorV2::TensorV2( std::vector>>> const &d, ml::train::TensorDim::TensorType t_type) { - itensor = new FloatTensor(d, t_type.format); + itensor = std::shared_ptr(new FloatTensor(d, t_type.format), + std::default_delete()); } #ifdef ENABLE_FP16 TensorV2::TensorV2( std::vector>>> const &d, ml::train::TensorDim::TensorType t_type) { - itensor = new HalfTensor(d, t_type.format); + itensor = std::shared_ptr(new HalfTensor(d, t_type.format), + std::default_delete()); } #endif @@ -95,12 +105,12 @@ bool TensorV2::operator==(const TensorV2 &rhs) const { if (*itensor == *rhs.itensor) { /// compares tensor data if (getDataType() == Tdatatype::FP32) { - return *dynamic_cast(itensor) == - *dynamic_cast(rhs.itensor); + return *std::dynamic_pointer_cast(itensor) == + *std::dynamic_pointer_cast(rhs.itensor); } else if (getDataType() == Tdatatype::FP16) { #ifdef ENABLE_FP16 - return *dynamic_cast(itensor) == - *dynamic_cast(rhs.itensor); + return *std::dynamic_pointer_cast(itensor) == + *std::dynamic_pointer_cast(rhs.itensor); #else throw std::invalid_argument( "Error: HalfTensor cannot be created or used when FP16 is not enabled. " @@ -305,14 +315,16 @@ size_t TensorV2::width() const { return itensor->width(); } void TensorV2::createSharedDataTensor(const TensorV2 &src, TensorV2 &dest, size_t offset) const { - itensor->createSharedDataTensor(src.itensor, dest.itensor, offset); + itensor->createSharedDataTensor(src.itensor.get(), dest.itensor.get(), + offset); } TensorV2 TensorV2::getSharedDataTensor(const TensorDim dim_, size_t offset, bool reset_stride, const std::string &name_) const { TensorV2 ret = *this; - ret.itensor = itensor->getSharedDataTensor(dim_, offset, reset_stride, name_); + ret.itensor = std::shared_ptr( + itensor->getSharedDataTensor(dim_, offset, reset_stride, name_)); return ret; } diff --git a/nntrainer/tensor/tensor_v2.h b/nntrainer/tensor/tensor_v2.h index 5cf322fcd7..eb2df37db3 100644 --- a/nntrainer/tensor/tensor_v2.h +++ b/nntrainer/tensor/tensor_v2.h @@ -708,7 +708,7 @@ class TensorV2 { } private: - TensorBase *itensor; + std::shared_ptr itensor; }; } // namespace nntrainer