Skip to content

Commit

Permalink
[Trivial] Fix Typo
Browse files Browse the repository at this point in the history
Fix Typo at
    modified:   nntrainer/dataset/data_iteration.h
    modified:   nntrainer/dataset/data_producer.h
    modified:   nntrainer/dataset/databuffer.h
    modified:   nntrainer/dataset/dir_data_producers.cpp
    modified:   nntrainer/dataset/random_data_producers.cpp
    modified:   nntrainer/layers/preprocess_l2norm_layer.h
    modified:   nntrainer/layers/split_layer.cpp

**Self evaluation:**
1. Build test:	 [X]Passed [ ]Failed [ ]Skipped
2. Run test:	 [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: Donghak PARK <[email protected]>
  • Loading branch information
DonghakPark committed Jul 1, 2024
1 parent 812fcf0 commit daefcee
Show file tree
Hide file tree
Showing 8 changed files with 15 additions and 17 deletions.
8 changes: 4 additions & 4 deletions nntrainer/app_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ std::vector<std::string> getPluginPaths() {
* where you would like to look for the layers, while NNTRAINER_CONF_PATH is a
* (buildtime hardcoded @a file path) to locate configuration file *.ini file
*/
/*** @note for now, NNTRAINER_PATH is a SINGLE PATH rather than serise of path
/*** @note for now, NNTRAINER_PATH is a SINGLE PATH rather than series of path
* like PATH environment variable. this could be improved but for now, it is
* enough
*/
Expand Down Expand Up @@ -211,7 +211,7 @@ const std::string getFullPath(const std::string &path,
std::mutex factory_mutex;

/**
* @brief finialize global context
* @brief finalize global context
*
*/
static void fini_global_context_nntrainer(void) __attribute__((destructor));
Expand All @@ -221,7 +221,7 @@ static void fini_global_context_nntrainer(void) {}
std::once_flag global_app_context_init_flag;

static void add_default_object(AppContext &ac) {
/// @note all layers should be added to the app_context to gaurantee that
/// @note all layers should be added to the app_context to guarantee that
/// createLayer/createOptimizer class is created
using OptType = ml::train::OptimizerType;
ac.registerFactory(nntrainer::createOptimizer<SGD>, SGD::type, OptType::SGD);
Expand Down Expand Up @@ -319,7 +319,7 @@ static void add_default_object(AppContext &ac) {
ac.registerFactory(nntrainer::createLayer<CentroidKNN>, CentroidKNN::type,
LayerType::LAYER_CENTROID_KNN);

/** proprocess layers */
/** preprocess layers */
ac.registerFactory(nntrainer::createLayer<PreprocessFlipLayer>,
PreprocessFlipLayer::type,
LayerType::LAYER_PREPROCESS_FLIP);
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/dataset/data_iteration.h
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ class Sample {
* @brief Construct a new Sample object
* @note the batch dimension will be ignored to make a single sample
*
* @param iter iteration obejcts
* @param iter iteration objects
* @param batch nth batch to create the sample
*/
Sample(const Iteration &iter, unsigned int batch);
Expand Down
6 changes: 3 additions & 3 deletions nntrainer/dataset/data_producer.h
Original file line number Diff line number Diff line change
Expand Up @@ -109,18 +109,18 @@ class DataProducer {
}

/**
* @brief this function helps exporting the dataproducer in a predefined
* @brief this function helps exporting the data producer in a predefined
* format, while workarounding issue caused by templated function type eraser
*
* @param exporter exporter that conatins exporting logic
* @param exporter exporter that contains exporting logic
* @param method enum value to identify how it should be exported to
*/
virtual void exportTo(Exporter &exporter,
const ml::train::ExportMethods &method) const {}

/**
* @brief denote if given producer is thread safe and can be parallelized.
* @note if size() == SIZE_UNDEFIEND, thread safe shall be false
* @note if size() == SIZE_UNDEFINED, thread safe shall be false
*
* @return bool true if thread safe.
*/
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/dataset/databuffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ class DataBuffer : public ml::train::Dataset {
* @brief this function helps exporting the dataset in a predefined format,
* while workarounding issue caused by templated function type eraser
*
* @param exporter exporter that conatins exporting logic
* @param exporter exporter that contains exporting logic
* @param method enum value to identify how it should be exported to
*/
void exportTo(Exporter &exporter,
Expand Down
6 changes: 2 additions & 4 deletions nntrainer/dataset/dir_data_producers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -72,9 +72,7 @@ static void readImage(const std::string path, float *input, uint width,
namespace nntrainer {

DirDataProducer::DirDataProducer() :
dir_data_props(new Props()),
num_class(0),
num_data_total(0) {}
dir_data_props(new Props()), num_class(0), num_data_total(0) {}

DirDataProducer::DirDataProducer(const std::string &dir_path) :
dir_data_props(new Props(props::DirPath(dir_path))),
Expand Down Expand Up @@ -140,7 +138,7 @@ DirDataProducer::finalize(const std::vector<TensorDim> &input_dims,
auto sz = size(input_dims, label_dims);

NNTR_THROW_IF(sz == 0, std::invalid_argument)
<< "size is zero, dataproducer does not provide anything";
<< "size is zero, data producer does not provide anything";

return [sz, input_dims, this](unsigned int idx, std::vector<Tensor> &inputs,
std::vector<Tensor> &labels) {
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/dataset/random_data_producers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ RandomDataOneHotProducer::finalize(const std::vector<TensorDim> &input_dims,

/// @todo move this to higher order component
NNTR_THROW_IF(size(input_dims, label_dims) == 0, std::invalid_argument)
<< "size is zero, dataproducer does not provide anything";
<< "size is zero, data producer does not provide anything";

/** prepare states for the generator */
std::vector<std::uniform_int_distribution<unsigned int>> label_chooser_;
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/layers/preprocess_l2norm_layer.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class PreprocessL2NormLayer : public Layer {
public:
/**
* @brief Construct a new L2norm Layer object
* that normlizes given feature with l2norm
* that normalizes given feature with l2norm
*/
PreprocessL2NormLayer() : Layer() {}

Expand Down
4 changes: 2 additions & 2 deletions nntrainer/layers/split_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ void SplitLayer::finalize(InitLayerContext &context) {

/**
* The split is only done along the split_dimension dimension.
* (Assumes input data is continous)
* (Assumes input data is continuous)
* For example, consider input dimension [b,c,h,w], split_number = n
* 1. axis = 1, output_dim = [b,c//n,h,w], num_outputs = n
* 2. axis = 2, output_dim = [b,c,h//n,w], num_outputs = n
Expand Down Expand Up @@ -75,7 +75,7 @@ void SplitLayer::finalize(InitLayerContext &context) {
* to facilitate easier processing.
*
* The helper shape consolidates all the dimensions before the split_dimension
* together and all the dimensions after the split_dimension to faciliate
* together and all the dimensions after the split_dimension to facilitate
* easier splitting of the data.
*/
leading_helper_dim = 1;
Expand Down

0 comments on commit daefcee

Please sign in to comment.