diff --git a/nntrainer/app_context.cpp b/nntrainer/app_context.cpp index a66bc667af..7c14fd8ae2 100644 --- a/nntrainer/app_context.cpp +++ b/nntrainer/app_context.cpp @@ -154,7 +154,7 @@ std::vector getPluginPaths() { * where you would like to look for the layers, while NNTRAINER_CONF_PATH is a * (buildtime hardcoded @a file path) to locate configuration file *.ini file */ - /*** @note for now, NNTRAINER_PATH is a SINGLE PATH rather than serise of path + /*** @note for now, NNTRAINER_PATH is a SINGLE PATH rather than series of path * like PATH environment variable. this could be improved but for now, it is * enough */ @@ -211,7 +211,7 @@ const std::string getFullPath(const std::string &path, std::mutex factory_mutex; /** - * @brief finialize global context + * @brief finalize global context * */ static void fini_global_context_nntrainer(void) __attribute__((destructor)); @@ -221,7 +221,7 @@ static void fini_global_context_nntrainer(void) {} std::once_flag global_app_context_init_flag; static void add_default_object(AppContext &ac) { - /// @note all layers should be added to the app_context to gaurantee that + /// @note all layers should be added to the app_context to guarantee that /// createLayer/createOptimizer class is created using OptType = ml::train::OptimizerType; ac.registerFactory(nntrainer::createOptimizer, SGD::type, OptType::SGD); @@ -319,7 +319,7 @@ static void add_default_object(AppContext &ac) { ac.registerFactory(nntrainer::createLayer, CentroidKNN::type, LayerType::LAYER_CENTROID_KNN); - /** proprocess layers */ + /** preprocess layers */ ac.registerFactory(nntrainer::createLayer, PreprocessFlipLayer::type, LayerType::LAYER_PREPROCESS_FLIP); diff --git a/nntrainer/dataset/data_iteration.h b/nntrainer/dataset/data_iteration.h index efda996e94..93978b1312 100644 --- a/nntrainer/dataset/data_iteration.h +++ b/nntrainer/dataset/data_iteration.h @@ -141,7 +141,7 @@ class Sample { * @brief Construct a new Sample object * @note the batch dimension will be ignored to make a single sample * - * @param iter iteration obejcts + * @param iter iteration objects * @param batch nth batch to create the sample */ Sample(const Iteration &iter, unsigned int batch); diff --git a/nntrainer/dataset/data_producer.h b/nntrainer/dataset/data_producer.h index 736654a841..f29e6406a8 100644 --- a/nntrainer/dataset/data_producer.h +++ b/nntrainer/dataset/data_producer.h @@ -109,10 +109,10 @@ class DataProducer { } /** - * @brief this function helps exporting the dataproducer in a predefined + * @brief this function helps exporting the data producer in a predefined * format, while workarounding issue caused by templated function type eraser * - * @param exporter exporter that conatins exporting logic + * @param exporter exporter that contains exporting logic * @param method enum value to identify how it should be exported to */ virtual void exportTo(Exporter &exporter, @@ -120,7 +120,7 @@ class DataProducer { /** * @brief denote if given producer is thread safe and can be parallelized. - * @note if size() == SIZE_UNDEFIEND, thread safe shall be false + * @note if size() == SIZE_UNDEFINED, thread safe shall be false * * @return bool true if thread safe. */ diff --git a/nntrainer/dataset/databuffer.h b/nntrainer/dataset/databuffer.h index 0432d5f7e2..fc10da796d 100644 --- a/nntrainer/dataset/databuffer.h +++ b/nntrainer/dataset/databuffer.h @@ -139,7 +139,7 @@ class DataBuffer : public ml::train::Dataset { * @brief this function helps exporting the dataset in a predefined format, * while workarounding issue caused by templated function type eraser * - * @param exporter exporter that conatins exporting logic + * @param exporter exporter that contains exporting logic * @param method enum value to identify how it should be exported to */ void exportTo(Exporter &exporter, diff --git a/nntrainer/dataset/dir_data_producers.cpp b/nntrainer/dataset/dir_data_producers.cpp index 8298e44837..3c62923ea2 100644 --- a/nntrainer/dataset/dir_data_producers.cpp +++ b/nntrainer/dataset/dir_data_producers.cpp @@ -72,9 +72,7 @@ static void readImage(const std::string path, float *input, uint width, namespace nntrainer { DirDataProducer::DirDataProducer() : - dir_data_props(new Props()), - num_class(0), - num_data_total(0) {} + dir_data_props(new Props()), num_class(0), num_data_total(0) {} DirDataProducer::DirDataProducer(const std::string &dir_path) : dir_data_props(new Props(props::DirPath(dir_path))), @@ -140,7 +138,7 @@ DirDataProducer::finalize(const std::vector &input_dims, auto sz = size(input_dims, label_dims); NNTR_THROW_IF(sz == 0, std::invalid_argument) - << "size is zero, dataproducer does not provide anything"; + << "size is zero, data producer does not provide anything"; return [sz, input_dims, this](unsigned int idx, std::vector &inputs, std::vector &labels) { diff --git a/nntrainer/dataset/random_data_producers.cpp b/nntrainer/dataset/random_data_producers.cpp index 73d0462c20..a97d5d97be 100644 --- a/nntrainer/dataset/random_data_producers.cpp +++ b/nntrainer/dataset/random_data_producers.cpp @@ -113,7 +113,7 @@ RandomDataOneHotProducer::finalize(const std::vector &input_dims, /// @todo move this to higher order component NNTR_THROW_IF(size(input_dims, label_dims) == 0, std::invalid_argument) - << "size is zero, dataproducer does not provide anything"; + << "size is zero, data producer does not provide anything"; /** prepare states for the generator */ std::vector> label_chooser_; diff --git a/nntrainer/layers/preprocess_l2norm_layer.h b/nntrainer/layers/preprocess_l2norm_layer.h index 4319a0cfc7..9f28debb70 100644 --- a/nntrainer/layers/preprocess_l2norm_layer.h +++ b/nntrainer/layers/preprocess_l2norm_layer.h @@ -29,7 +29,7 @@ class PreprocessL2NormLayer : public Layer { public: /** * @brief Construct a new L2norm Layer object - * that normlizes given feature with l2norm + * that normalizes given feature with l2norm */ PreprocessL2NormLayer() : Layer() {} diff --git a/nntrainer/layers/split_layer.cpp b/nntrainer/layers/split_layer.cpp index 2b15f07f42..4e81e67813 100644 --- a/nntrainer/layers/split_layer.cpp +++ b/nntrainer/layers/split_layer.cpp @@ -44,7 +44,7 @@ void SplitLayer::finalize(InitLayerContext &context) { /** * The split is only done along the split_dimension dimension. - * (Assumes input data is continous) + * (Assumes input data is continuous) * For example, consider input dimension [b,c,h,w], split_number = n * 1. axis = 1, output_dim = [b,c//n,h,w], num_outputs = n * 2. axis = 2, output_dim = [b,c,h//n,w], num_outputs = n @@ -75,7 +75,7 @@ void SplitLayer::finalize(InitLayerContext &context) { * to facilitate easier processing. * * The helper shape consolidates all the dimensions before the split_dimension - * together and all the dimensions after the split_dimension to faciliate + * together and all the dimensions after the split_dimension to facilitate * easier splitting of the data. */ leading_helper_dim = 1;