From a616d714c5e702bdace6fbaadc0d968037bfeb29 Mon Sep 17 00:00:00 2001 From: Ahmed Essam Date: Wed, 2 Sep 2020 05:13:47 +0200 Subject: [PATCH 1/6] Auto dispatch create methods in python --- examples/meta/generator/targets/python.json | 32 ++- .../undocumented/python/classifier_larank.py | 2 +- .../python/classifier_multiclassocas.py | 4 +- .../classifier_multilabeloutputliblinear.py | 2 +- .../python/converter_hasheddoc.py | 9 +- .../converter_locallylinearembedding.py | 2 +- .../python/distance_canberraword.py | 4 +- .../python/distance_director_euclidean.py | 2 +- .../python/distance_hammingword.py | 4 +- .../python/distance_manhattanword.py | 4 +- .../python/distance_normsquared.py | 2 +- .../python/evaluation_clustering_simple.py | 8 +- .../evaluation_contingencytableevaluation.py | 20 +- ...ion_cross_validation_mkl_weight_storage.py | 22 +-- ...ion_cross_validation_multiclass_storage.py | 8 +- .../python/evaluation_meansquarederror.py | 2 +- .../python/evaluation_meansquaredlogerror.py | 2 +- .../python/evaluation_prcevaluation.py | 2 +- .../python/evaluation_rocevaluation.py | 2 +- .../python/evaluation_thresholds.py | 2 +- .../python/features_hasheddocdot.py | 6 +- ..._gaussian_process_binary_classification.py | 2 +- .../classifier_perceptron_graphical.py | 2 +- .../python/graphical/cluster_kmeans.py | 4 +- .../python/graphical/cluster_kpp.py | 6 +- .../python/graphical/converter_algorithms.py | 2 +- .../python/graphical/converter_spe_helix.py | 4 +- .../python/graphical/interactive_kmm_demo.py | 2 +- .../python/graphical/interactive_svm_demo.py | 2 +- .../python/graphical/interactive_svr_demo.py | 2 +- .../graphical/kernel_ridge_regression.py | 4 +- .../graphical/kernel_ridge_regression_sinc.py | 4 +- examples/undocumented/python/graphical/lda.py | 2 +- .../undocumented/python/graphical/mclda.py | 2 +- .../graphical/statistics_linear_time_mmd.py | 4 +- .../statistics_quadratic_time_mmd.py | 4 +- .../undocumented/python/kernel_combined.py | 12 +- .../python/kernel_combined_custom_poly.py | 18 +- .../python/kernel_comm_ulong_string.py | 4 +- .../python/kernel_comm_word_string.py | 4 +- .../python/kernel_director_linear.py | 6 +- examples/undocumented/python/kernel_fisher.py | 2 +- .../python/kernel_fixed_degree_string.py | 2 +- .../python/kernel_histogram_word_string.py | 4 +- examples/undocumented/python/kernel_io.py | 2 +- examples/undocumented/python/kernel_linear.py | 4 +- .../undocumented/python/kernel_linear_word.py | 4 +- .../python/kernel_local_alignment_string.py | 2 +- .../python/kernel_locality_improved_string.py | 2 +- .../python/kernel_match_word_string.py | 4 +- .../python/kernel_poly_match_string.py | 2 +- .../python/kernel_poly_match_word_string.py | 2 +- .../python/kernel_rationalquadratic.py | 4 +- .../python/kernel_salzberg_word_string.py | 4 +- .../kernel_simple_locality_improved_string.py | 2 +- .../python/kernel_sparse_gaussian.py | 2 +- .../python/kernel_sparse_linear.py | 4 +- .../undocumented/python/kernel_sparse_poly.py | 2 +- examples/undocumented/python/kernel_spline.py | 2 +- .../undocumented/python/kernel_ssk_string.py | 2 +- examples/undocumented/python/kernel_top.py | 2 +- .../undocumented/python/kernel_tstudent.py | 4 +- examples/undocumented/python/kernel_wave.py | 4 +- .../undocumented/python/kernel_wavelet.py | 2 +- .../kernel_weighted_comm_word_string.py | 4 +- .../undocumented/python/mkl_multiclass.py | 16 +- .../python/preprocessor_sortulongstring.py | 4 +- .../python/preprocessor_sortwordstring.py | 4 +- .../python/stochasticgbmachine.py | 6 +- .../python/structure_discrete_hmsvm_bmrm.py | 8 +- .../python/structure_discrete_hmsvm_mosek.py | 10 +- .../python/structure_factor_graph_model.py | 18 +- .../python/structure_graphcuts.py | 8 +- ..._hierarchical_multilabel_classification.py | 6 +- .../python/structure_plif_hmsvm_bmrm.py | 2 +- .../python/structure_plif_hmsvm_mosek.py | 2 +- .../tests_check_commwordkernel_memleak.py | 6 +- ...multitask_clustered_logistic_regression.py | 2 +- .../python/variational_classifier.py | 2 +- src/interfaces/python/factory_python.i | 183 ++++++++++++++++++ src/interfaces/python/swig_typemaps.i | 23 +-- src/interfaces/swig/factory.i | 15 +- src/shogun/base/base_types.h | 77 +++++--- src/shogun/base/class_list.cpp.py | 32 ++- src/shogun/base/class_list.cpp.templ | 11 +- src/shogun/base/class_list.h | 12 +- src/shogun/lib/sg_types.h | 2 +- .../util/visitors/InterfaceTypeVisitor.h | 74 +++++++ 88 files changed, 568 insertions(+), 254 deletions(-) create mode 100644 src/interfaces/python/factory_python.i create mode 100644 src/shogun/util/visitors/InterfaceTypeVisitor.h diff --git a/examples/meta/generator/targets/python.json b/examples/meta/generator/targets/python.json index dae92554339..d3008925e83 100644 --- a/examples/meta/generator/targets/python.json +++ b/examples/meta/generator/targets/python.json @@ -2,7 +2,7 @@ "Program": "import numpy as np\n$dependencies\n\n$program", "Dependencies": { "IncludeAllClasses": false, - "IncludeInterfacedClasses": true, + "IncludeInterfacedClasses": true, "IncludeEnums": true, "IncludeGlobalFunctions": true, "DependencyListElement": "import shogun as sg", @@ -81,7 +81,35 @@ "get_string": "$object.get($arguments)" }, "StaticCall": "sg.$typeName.$method($arguments)", - "GlobalCall": {"Default": "sg.$method($arguments$kwargs)"}, + "GlobalCall": { + "Default": "sg.$method($arguments$kwargs)", + "create_svm": "sg.create($arguments$kwargs)", + "create_evaluation": "sg.create($arguments$kwargs)", + "create_multiclass_strategy": "sg.create($arguments$kwargs)", + "create_ecoc_encoder": "sg.create($arguments$kwargs)", + "create_ecoc_decoder": "sg.create($arguments$kwargs)", + "create_transformer": "sg.create($arguments$kwargs)", + "create_layer": "sg.create($arguments$kwargs)", + "create_splitting_strategy": "sg.create($arguments$kwargs)", + "create_machine_evaluation": "sg.create($arguments$kwargs)", + "create_gp_likelihood": "sg.create($arguments$kwargs)", + "create_gp_mean": "sg.create($arguments$kwargs)", + "create_gp_inference": "sg.create($arguments$kwargs)", + "create_differentiable": "sg.create($arguments$kwargs)", + "create_loss": "sg.create($arguments$kwargs)", + "create_parameter_observer": "sg.create($arguments$kwargs)", + "create_evaluation_result": "sg.create($arguments$kwargs)", + "create_distribution": "sg.create($arguments$kwargs)", + "create_combination_rule": "sg.create($arguments$kwargs)", + "create_distance": "sg.create($arguments$kwargs)", + "create_machine": "sg.create($arguments$kwargs)", + "create_structured_model": "sg.create($arguments$kwargs)", + "create_factor_type": "sg.create($arguments$kwargs)", + "create_gaussian_process": "sg.create($arguments$kwargs)", + "create_minimizer": "sg.create($arguments$kwargs)", + "create_lbfgs_minimizer": "sg.create($arguments$kwargs)", + "create_kernel_normalizer": "sg.create($arguments$kwargs)" + }, "Identifier": "$identifier", "Enum":"sg.$value" }, diff --git a/examples/undocumented/python/classifier_larank.py b/examples/undocumented/python/classifier_larank.py index d99b5d1f7cd..6c2689754d7 100644 --- a/examples/undocumented/python/classifier_larank.py +++ b/examples/undocumented/python/classifier_larank.py @@ -24,7 +24,7 @@ def classifier_larank (num_vec,num_class,distance,C=0.9,num_threads=1,num_iter=5 feats_train=sg.create_features(fm_train) feats_test=sg.create_features(fm_test) - kernel=sg.create_kernel("GaussianKernel", width=1) + kernel=sg.create("GaussianKernel", width=1) epsilon=1e-5 labels=MulticlassLabels(label_train) diff --git a/examples/undocumented/python/classifier_multiclassocas.py b/examples/undocumented/python/classifier_multiclassocas.py index 8febc57ec6a..f658a2a6799 100644 --- a/examples/undocumented/python/classifier_multiclassocas.py +++ b/examples/undocumented/python/classifier_multiclassocas.py @@ -6,7 +6,7 @@ def classifier_multiclassocas (num_vec=10,num_class=3,distance=15,width=2.1,C=1, from shogun import MulticlassLabels import shogun as sg try: - sg.create_machine("MulticlassOCAS") + sg.create("MulticlassOCAS") except ImportError: print("MulticlassOCAS not available") return @@ -28,7 +28,7 @@ def classifier_multiclassocas (num_vec=10,num_class=3,distance=15,width=2.1,C=1, labels=sg.create_labels(label_train) - classifier = sg.create_machine("MulticlassOCAS", labels=labels, C=C) + classifier = sg.create("MulticlassOCAS", labels=labels, C=C) classifier.train(feats_train) out = classifier.apply(feats_test).get("labels") diff --git a/examples/undocumented/python/classifier_multilabeloutputliblinear.py b/examples/undocumented/python/classifier_multilabeloutputliblinear.py index 1bd417d8c19..37c9ec6be83 100644 --- a/examples/undocumented/python/classifier_multilabeloutputliblinear.py +++ b/examples/undocumented/python/classifier_multilabeloutputliblinear.py @@ -14,7 +14,7 @@ def classifier_multilabeloutputliblinear (fm_train_real=traindat,fm_test_real=te labels=MulticlassLabels(label_train_multiclass) - classifier = sg.create_machine("MulticlassLibLinear", C=C, labels=labels) + classifier = sg.create("MulticlassLibLinear", C=C, labels=labels) classifier.train(feats_train) # TODO: figure out the new style API for the below call, disabling for now diff --git a/examples/undocumented/python/converter_hasheddoc.py b/examples/undocumented/python/converter_hasheddoc.py index 053698630f4..986f08981d6 100644 --- a/examples/undocumented/python/converter_hasheddoc.py +++ b/examples/undocumented/python/converter_hasheddoc.py @@ -20,7 +20,7 @@ def converter_hasheddoc(strings): normalize=True #create converter - converter = sg.create_transformer('HashedDocConverter', tokenizer=tokenizer, num_bits=num_bits, should_normalize=normalize) + converter = sg.create('HashedDocConverter', tokenizer=tokenizer, num_bits=num_bits, should_normalize=normalize) converted_feats=converter.transform(f) @@ -29,9 +29,9 @@ def converter_hasheddoc(strings): #print('Self dot product of string 0 with converted feats:', converted_feats.dot(0, converted_feats, 0)) - hashed_feats=sg.create_features("HashedDocDotFeatures", num_bits=num_bits, - doc_collection=f, tokenizer=tokenizer, - should_normalize=normalize) + hashed_feats = sg.create("HashedDocDotFeatures", num_bits=num_bits, + doc_collection=f, tokenizer=tokenizer, + should_normalize=normalize) #print('Hashed features\' space dimensionality is', hashed_feats.get_dim_feature_space()) @@ -43,4 +43,3 @@ def converter_hasheddoc(strings): print('HashedDocConverter') converter_hasheddoc(*parameter_list[0]) - diff --git a/examples/undocumented/python/converter_locallylinearembedding.py b/examples/undocumented/python/converter_locallylinearembedding.py index 2a2b8d4e918..0ffeb0d2874 100644 --- a/examples/undocumented/python/converter_locallylinearembedding.py +++ b/examples/undocumented/python/converter_locallylinearembedding.py @@ -12,7 +12,7 @@ def converter_locallylinearembeeding (data, k): features = sg.create_features(data) - converter = sg.create_transformer('LocallyLinearEmbedding', k=k) + converter = sg.create('LocallyLinearEmbedding', k=k) converter.fit(features) features = converter.transform(features) diff --git a/examples/undocumented/python/distance_canberraword.py b/examples/undocumented/python/distance_canberraword.py index 4f46e584e38..a99ca6ac1aa 100644 --- a/examples/undocumented/python/distance_canberraword.py +++ b/examples/undocumented/python/distance_canberraword.py @@ -12,7 +12,7 @@ def distance_canberraword (fm_train_dna=traindna,fm_test_dna=testdna,order=3,gap=0,reverse=False): charfeat=sg.create_string_features(fm_train_dna, sg.DNA) feats_train=sg.create_string_features(charfeat, order-1, order, gap, reverse) - preproc = sg.create_transformer("SortWordString") + preproc = sg.create("SortWordString") preproc.fit(feats_train) feats_train = preproc.transform(feats_train) @@ -20,7 +20,7 @@ def distance_canberraword (fm_train_dna=traindna,fm_test_dna=testdna,order=3,gap feats_test=sg.create_string_features(charfeat, order-1, order, gap, reverse) feats_test = preproc.transform(feats_test) - distance = sg.create_distance("CanberraWordDistance") + distance = sg.create("CanberraWordDistance") distance.init(feats_train, feats_train) dm_train=distance.get_distance_matrix() diff --git a/examples/undocumented/python/distance_director_euclidean.py b/examples/undocumented/python/distance_director_euclidean.py index 55226ced5d0..786ce61db3e 100644 --- a/examples/undocumented/python/distance_director_euclidean.py +++ b/examples/undocumented/python/distance_director_euclidean.py @@ -29,7 +29,7 @@ def distance_function(self, idx_a, idx_b): feats_train.get_global_parallel().set_num_threads(1) feats_test=sg.create_features(fm_test_real) - distance=sg.create_distance("EuclideanDistance") + distance=sg.create("EuclideanDistance") distance.init(feats_train, feats_test) ddistance=DirectorEuclideanDistance() diff --git a/examples/undocumented/python/distance_hammingword.py b/examples/undocumented/python/distance_hammingword.py index 152f114e748..48d44b32ccc 100644 --- a/examples/undocumented/python/distance_hammingword.py +++ b/examples/undocumented/python/distance_hammingword.py @@ -15,7 +15,7 @@ def distance_hammingword (fm_train_dna=traindna,fm_test_dna=testdna, charfeat=sg.create_string_features(fm_train_dna, sg.DNA) feats_train=sg.create_string_features(charfeat, order-1, order, gap, reverse) - preproc = sg.create_transformer("SortWordString") + preproc = sg.create("SortWordString") preproc.fit(feats_train) feats_train = preproc.transform(feats_train) @@ -23,7 +23,7 @@ def distance_hammingword (fm_train_dna=traindna,fm_test_dna=testdna, feats_test=sg.create_string_features(charfeat, order-1, order, gap, reverse) feats_test = preproc.transform(feats_test) - distance = sg.create_distance("HammingWordDistance", use_sign=use_sign) + distance = sg.create("HammingWordDistance", use_sign=use_sign) distance.init(feats_train, feats_train) dm_train=distance.get_distance_matrix() diff --git a/examples/undocumented/python/distance_manhattanword.py b/examples/undocumented/python/distance_manhattanword.py index aee68c6c16e..d622eb4b3bc 100644 --- a/examples/undocumented/python/distance_manhattanword.py +++ b/examples/undocumented/python/distance_manhattanword.py @@ -9,7 +9,7 @@ def distance_manhattenword (train_fname=traindna,test_fname=testdna,order=3,gap= charfeat=sg.create_string_features(sg.read_csv(train_fname), sg.DNA) feats_train=sg.create_string_features(charfeat, order-1, order, gap, reverse) - preproc = sg.create_transformer("SortWordString") + preproc = sg.create("SortWordString") preproc.fit(feats_train) feats_train = preproc.transform(feats_train) @@ -17,7 +17,7 @@ def distance_manhattenword (train_fname=traindna,test_fname=testdna,order=3,gap= feats_test=sg.create_string_features(charfeat, order-1, order, gap, reverse) feats_test = preproc.transform(feats_test) - distance = sg.create_distance('ManhattanWordDistance') + distance = sg.create('ManhattanWordDistance') distance.init(feats_train, feats_train) dm_train=distance.get_distance_matrix() diff --git a/examples/undocumented/python/distance_normsquared.py b/examples/undocumented/python/distance_normsquared.py index 2a4682b45c2..4ecd54009f0 100644 --- a/examples/undocumented/python/distance_normsquared.py +++ b/examples/undocumented/python/distance_normsquared.py @@ -10,7 +10,7 @@ def distance_normsquared (train_fname=traindat,test_fname=testdat): feats_train=sg.create_features(sg.read_csv(train_fname)) feats_test=sg.create_features(sg.read_csv(test_fname)) - distance = sg.create_distance('EuclideanDistance', disable_sqrt=True) + distance = sg.create('EuclideanDistance', disable_sqrt=True) distance.init(feats_train, feats_train) dm_train=distance.get_distance_matrix() diff --git a/examples/undocumented/python/evaluation_clustering_simple.py b/examples/undocumented/python/evaluation_clustering_simple.py index b0ddfcb03a2..342948a5d51 100644 --- a/examples/undocumented/python/evaluation_clustering_simple.py +++ b/examples/undocumented/python/evaluation_clustering_simple.py @@ -7,9 +7,9 @@ def run_clustering(data, k): - distance = sg.create_distance('EuclideanDistance') + distance = sg.create('EuclideanDistance') distance.init(data, data) - kmeans=sg.create_machine("KMeans", k=k, distance=distance, seed=1) + kmeans=sg.create("KMeans", k=k, distance=distance, seed=1) #print("Running clustering...") kmeans.train() @@ -23,9 +23,9 @@ def assign_labels(data, centroids, ncenters): labels = MulticlassLabels(arange(0.,ncenters)) fea_centroids = sg.create_features(centroids) - distance = sg.create_distance('EuclideanDistance') + distance = sg.create('EuclideanDistance') distance.init(fea_centroids, fea_centroids) - knn = sg.create_machine("KNN", k=1, distance=distance, labels=labels) + knn = sg.create("KNN", k=1, distance=distance, labels=labels) knn.train() return knn.apply(data) diff --git a/examples/undocumented/python/evaluation_contingencytableevaluation.py b/examples/undocumented/python/evaluation_contingencytableevaluation.py index 5bdefc75346..036c27fda07 100644 --- a/examples/undocumented/python/evaluation_contingencytableevaluation.py +++ b/examples/undocumented/python/evaluation_contingencytableevaluation.py @@ -16,34 +16,34 @@ def evaluation_contingencytableevaluation (ground_truth, predicted): ground_truth_labels = BinaryLabels(ground_truth) predicted_labels = BinaryLabels(predicted) - base_evaluator = sg.create_evaluation("ContingencyTableEvaluation") + base_evaluator = sg.create("ContingencyTableEvaluation") base_evaluator.evaluate(predicted_labels,ground_truth_labels) - evaluator = sg.create_evaluation("AccuracyMeasure") + evaluator = sg.create("AccuracyMeasure") accuracy = evaluator.evaluate(predicted_labels,ground_truth_labels) - evaluator = sg.create_evaluation("ErrorRateMeasure") + evaluator = sg.create("ErrorRateMeasure") errorrate = evaluator.evaluate(predicted_labels,ground_truth_labels) - evaluator = sg.create_evaluation("BALMeasure") + evaluator = sg.create("BALMeasure") bal = evaluator.evaluate(predicted_labels,ground_truth_labels) - evaluator = sg.create_evaluation("WRACCMeasure") + evaluator = sg.create("WRACCMeasure") wracc = evaluator.evaluate(predicted_labels,ground_truth_labels) - evaluator = sg.create_evaluation("F1Measure") + evaluator = sg.create("F1Measure") f1 = evaluator.evaluate(predicted_labels,ground_truth_labels) - evaluator = sg.create_evaluation("CrossCorrelationMeasure") + evaluator = sg.create("CrossCorrelationMeasure") crosscorrelation = evaluator.evaluate(predicted_labels,ground_truth_labels) - evaluator = sg.create_evaluation("RecallMeasure") + evaluator = sg.create("RecallMeasure") recall = evaluator.evaluate(predicted_labels,ground_truth_labels) - evaluator = sg.create_evaluation("PrecisionMeasure") + evaluator = sg.create("PrecisionMeasure") precision = evaluator.evaluate(predicted_labels,ground_truth_labels) - evaluator = sg.create_evaluation("SpecificityMeasure") + evaluator = sg.create("SpecificityMeasure") specificity = evaluator.evaluate(predicted_labels,ground_truth_labels) return accuracy, errorrate, bal, wracc, f1, crosscorrelation, recall, precision, specificity diff --git a/examples/undocumented/python/evaluation_cross_validation_mkl_weight_storage.py b/examples/undocumented/python/evaluation_cross_validation_mkl_weight_storage.py index 4686fafc984..42ebec6623c 100644 --- a/examples/undocumented/python/evaluation_cross_validation_mkl_weight_storage.py +++ b/examples/undocumented/python/evaluation_cross_validation_mkl_weight_storage.py @@ -23,40 +23,40 @@ def evaluation_cross_validation_mkl_weight_storage(traindat=traindat, label_trai # training data, combined features all on same data features=sg.create_features(traindat) - comb_features=sg.create_features("CombinedFeatures") + comb_features=sg.create("CombinedFeatures") comb_features.add("feature_array", features) comb_features.add("feature_array", features) comb_features.add("feature_array", features) labels=BinaryLabels(label_traindat) # kernel, different Gaussians combined - kernel=sg.create_kernel("CombinedKernel") - kernel.add("kernel_array", sg.create_kernel("GaussianKernel", width=0.1)) - kernel.add("kernel_array", sg.create_kernel("GaussianKernel", width=1)) - kernel.add("kernel_array", sg.create_kernel("GaussianKernel", width=2)) + kernel=sg.create("CombinedKernel") + kernel.add("kernel_array", sg.create("GaussianKernel", width=0.1)) + kernel.add("kernel_array", sg.create("GaussianKernel", width=1)) + kernel.add("kernel_array", sg.create("GaussianKernel", width=2)) # create mkl using libsvm, due to a mem-bug, interleaved is not possible - libsvm = sg.create_machine("LibSVM") - svm = sg.create_machine("MKLClassification", svm=sg.as_svm(libsvm), + libsvm = sg.create("LibSVM") + svm = sg.create("MKLClassification", svm=sg.as_svm(libsvm), interleaved_optimization=False, kernel=kernel) # splitting strategy for 5 fold cross-validation (for classification its better # to use "StratifiedCrossValidation", but the standard # "StratifiedCrossValidationSplitting" is also available - splitting_strategy = sg.create_splitting_strategy( + splitting_strategy = sg.create( "StratifiedCrossValidationSplitting", labels=labels, num_subsets=5) # evaluation method - evaluation_criterium=sg.create_evaluation("ContingencyTableEvaluation", type="ACCURACY") + evaluation_criterium=sg.create("ContingencyTableEvaluation", type="ACCURACY") # cross-validation instance - cross_validation = sg.create_machine_evaluation( + cross_validation = sg.create( "CrossValidation", machine=svm, features=comb_features, labels=labels, splitting_strategy=splitting_strategy, evaluation_criterion=evaluation_criterium, num_runs=3) # append cross vlaidation output classes - mkl_storage=sg.create_parameter_observer("ParameterObserverCV") + mkl_storage=sg.create("ParameterObserverCV") cross_validation.subscribe(mkl_storage) # perform cross-validation diff --git a/examples/undocumented/python/evaluation_cross_validation_multiclass_storage.py b/examples/undocumented/python/evaluation_cross_validation_multiclass_storage.py index d3bf8606c2a..45f9b7e5765 100644 --- a/examples/undocumented/python/evaluation_cross_validation_multiclass_storage.py +++ b/examples/undocumented/python/evaluation_cross_validation_multiclass_storage.py @@ -40,10 +40,10 @@ def evaluation_cross_validation_multiclass_storage (traindat=traindat, label_tra labels=MulticlassLabels(label_traindat) # kernel, different Gaussians combined - kernel=sg.create_kernel("CombinedKernel") - kernel.add("kernel_array", sg.create_kernel("GaussianKernel", width=0.1)) - kernel.add("kernel_array", sg.create_kernel("GaussianKernel", width=1)) - kernel.add("kernel_array", sg.create_kernel("GaussianKernel", width=2)) + kernel=sg.create("CombinedKernel") + kernel.add("kernel_array", sg.create("GaussianKernel", width=0.1)) + kernel.add("kernel_array", sg.create("GaussianKernel", width=1)) + kernel.add("kernel_array", sg.create("GaussianKernel", width=2)) # create mkl using libsvm, due to a mem-bug, interleaved is not possible svm=MKLMulticlass(1.0,kernel,labels); diff --git a/examples/undocumented/python/evaluation_meansquarederror.py b/examples/undocumented/python/evaluation_meansquarederror.py index b922e33ec13..46352748560 100644 --- a/examples/undocumented/python/evaluation_meansquarederror.py +++ b/examples/undocumented/python/evaluation_meansquarederror.py @@ -18,7 +18,7 @@ def evaluation_meansquarederror (ground_truth, predicted): ground_truth_labels = RegressionLabels(ground_truth) predicted_labels = RegressionLabels(predicted) - evaluator = sg.create_evaluation("MeanSquaredError") + evaluator = sg.create("MeanSquaredError") mse = evaluator.evaluate(predicted_labels,ground_truth_labels) return mse diff --git a/examples/undocumented/python/evaluation_meansquaredlogerror.py b/examples/undocumented/python/evaluation_meansquaredlogerror.py index f11180cec7b..bbb2576ce45 100644 --- a/examples/undocumented/python/evaluation_meansquaredlogerror.py +++ b/examples/undocumented/python/evaluation_meansquaredlogerror.py @@ -18,7 +18,7 @@ def evaluation_meansquaredlogerror (ground_truth, predicted): ground_truth_labels = RegressionLabels(ground_truth) predicted_labels = RegressionLabels(predicted) - evaluator = sg.create_evaluation("MeanSquaredLogError") + evaluator = sg.create("MeanSquaredLogError") mse = evaluator.evaluate(predicted_labels,ground_truth_labels) return mse diff --git a/examples/undocumented/python/evaluation_prcevaluation.py b/examples/undocumented/python/evaluation_prcevaluation.py index 3636a0a00c1..6914a03ebe6 100644 --- a/examples/undocumented/python/evaluation_prcevaluation.py +++ b/examples/undocumented/python/evaluation_prcevaluation.py @@ -16,7 +16,7 @@ def evaluation_prcevaluation (ground_truth, predicted): ground_truth_labels = BinaryLabels(ground_truth) predicted_labels = BinaryLabels(predicted) - evaluator = sg.create_evaluation("PRCEvaluation") + evaluator = sg.create("PRCEvaluation") evaluator.evaluate(predicted_labels,ground_truth_labels) return evaluator.get("PRC"), evaluator.get("auPRC") diff --git a/examples/undocumented/python/evaluation_rocevaluation.py b/examples/undocumented/python/evaluation_rocevaluation.py index d5f91abf174..2fd9fcc9836 100644 --- a/examples/undocumented/python/evaluation_rocevaluation.py +++ b/examples/undocumented/python/evaluation_rocevaluation.py @@ -16,7 +16,7 @@ def evaluation_rocevaluation (ground_truth, predicted): ground_truth_labels = BinaryLabels(ground_truth) predicted_labels = BinaryLabels(predicted) - evaluator = sg.create_evaluation("ROCEvaluation") + evaluator = sg.create("ROCEvaluation") evaluator.evaluate(predicted_labels,ground_truth_labels) return evaluator.get("ROC"), evaluator.get("auROC") diff --git a/examples/undocumented/python/evaluation_thresholds.py b/examples/undocumented/python/evaluation_thresholds.py index 40ea0963206..444e3dc8089 100644 --- a/examples/undocumented/python/evaluation_thresholds.py +++ b/examples/undocumented/python/evaluation_thresholds.py @@ -16,7 +16,7 @@ def evaluation_thresholds (index): pred=BinaryLabels(output) truth=BinaryLabels(label) - evaluator=sg.create_evaluation("ROCEvaluation") + evaluator=sg.create("ROCEvaluation") evaluator.evaluate(pred, truth) [fp,tp]=evaluator.get("ROC") diff --git a/examples/undocumented/python/features_hasheddocdot.py b/examples/undocumented/python/features_hasheddocdot.py index cae881e7abe..bae80cf7b5a 100644 --- a/examples/undocumented/python/features_hasheddocdot.py +++ b/examples/undocumented/python/features_hasheddocdot.py @@ -19,9 +19,9 @@ def features_hasheddocdot(strings): normalize=True #create HashedDocDot features - hddf = sg.create_features("HashedDocDotFeatures", num_bits=num_bits, - doc_collection=f, tokenizer=tokenizer, - should_normalize=normalize) + hddf = sg.create("HashedDocDotFeatures", num_bits=num_bits, + doc_collection=f, tokenizer=tokenizer, + should_normalize=normalize) #should expect 32 #print('Feature space dimensionality is', hddf.get_dim_feature_space()) diff --git a/examples/undocumented/python/graphical/classifier_gaussian_process_binary_classification.py b/examples/undocumented/python/graphical/classifier_gaussian_process_binary_classification.py index 74f7bc26842..afb8dd70aed 100644 --- a/examples/undocumented/python/graphical/classifier_gaussian_process_binary_classification.py +++ b/examples/undocumented/python/graphical/classifier_gaussian_process_binary_classification.py @@ -43,7 +43,7 @@ def gaussian_process_binary_classification_laplace(X_train, y_train, n_test=50): test_features = sg.create_features(X_test) # create Gaussian kernel with width = 2.0 - kernel = sg.create_kernel('GaussianKernel', width=2.0) + kernel = sg.create('GaussianKernel', width=2.0) # create zero mean function mean = sg.gp_mean("ZeroMean") diff --git a/examples/undocumented/python/graphical/classifier_perceptron_graphical.py b/examples/undocumented/python/graphical/classifier_perceptron_graphical.py index 1a93a7a3bb8..dda532d3aa0 100644 --- a/examples/undocumented/python/graphical/classifier_perceptron_graphical.py +++ b/examples/undocumented/python/graphical/classifier_perceptron_graphical.py @@ -23,7 +23,7 @@ def classifier_perceptron_graphical(n=100, distance=5, learn_rate=1, max_iter=10 feats_train = sg.create_features(fm_train_real) labels = sg.create_labels(label_train_twoclass) - perceptron = sg.create_machine('Perceptron', labels=labels, learn_rate=learn_rate, max_iterations=max_iter, + perceptron = sg.create('Perceptron', labels=labels, learn_rate=learn_rate, max_iterations=max_iter, initialize_hyperplane=False) # Find limits for visualization diff --git a/examples/undocumented/python/graphical/cluster_kmeans.py b/examples/undocumented/python/graphical/cluster_kmeans.py index f2b83fb81f5..8bdd5986887 100644 --- a/examples/undocumented/python/graphical/cluster_kmeans.py +++ b/examples/undocumented/python/graphical/cluster_kmeans.py @@ -15,9 +15,9 @@ trainlab = np.concatenate((np.ones(num), 2 * np.ones(num), 3 * np.ones(num), 4 * np.ones(num))) feats_train = sg.create_features(traindat) -distance = sg.create_distance('EuclideanDistance') +distance = sg.create('EuclideanDistance') distance.init(feats_train, feats_train) -kmeans = sg.create_machine('KMeans', k=k, distance=distance) +kmeans = sg.create('KMeans', k=k, distance=distance) kmeans.train() centers = kmeans.get('cluster_centers') diff --git a/examples/undocumented/python/graphical/cluster_kpp.py b/examples/undocumented/python/graphical/cluster_kpp.py index bde07634429..91c505f6f5a 100644 --- a/examples/undocumented/python/graphical/cluster_kpp.py +++ b/examples/undocumented/python/graphical/cluster_kpp.py @@ -20,15 +20,15 @@ traindata = np.concatenate((d1, d2, d3, d4), 1) feat_train = sg.create_features(traindata) -distance = sg.create_distance('EuclideanDistance') +distance = sg.create('EuclideanDistance') distance.init(feat_train, feat_train) -kmeans = sg.create_machine('KMeans', k=k, distance=distance, kmeanspp=True) +kmeans = sg.create('KMeans', k=k, distance=distance, kmeanspp=True) kmeans.train() centerspp = kmeans.get('cluster_centers') radipp = kmeans.get('radiuses') -kmeans = sg.create_machine('KMeans', k=k, distance=distance) +kmeans = sg.create('KMeans', k=k, distance=distance) kmeans.train() centers = kmeans.get('cluster_centers') radi = kmeans.get('radiuses') diff --git a/examples/undocumented/python/graphical/converter_algorithms.py b/examples/undocumented/python/graphical/converter_algorithms.py index 635f76e8f70..aab34196564 100644 --- a/examples/undocumented/python/graphical/converter_algorithms.py +++ b/examples/undocumented/python/graphical/converter_algorithms.py @@ -48,7 +48,7 @@ def get_signals_matrix(time_start, time_stop, signal_fun1, signal_fun2): mixed_signals = sg.create_features(X) # Separating - transformer = sg.create_transformer(converter) + transformer = sg.create(converter) transformer.fit(mixed_signals) signals = transformer.transform(mixed_signals) S_ = signals.get('feature_matrix') diff --git a/examples/undocumented/python/graphical/converter_spe_helix.py b/examples/undocumented/python/graphical/converter_spe_helix.py index 9d6ed1cd2db..5e4890b43de 100644 --- a/examples/undocumented/python/graphical/converter_spe_helix.py +++ b/examples/undocumented/python/graphical/converter_spe_helix.py @@ -50,7 +50,7 @@ features = sg.create_features(X) # Create Stochastic Proximity Embedding converter instance -converter = sg.create_transformer('StochasticProximityEmbedding', target_dim=2, m_strategy='SPE_GLOBAL') +converter = sg.create('StochasticProximityEmbedding', target_dim=2, m_strategy='SPE_GLOBAL') # Compute SPE embedding embedding = converter.transform(features) @@ -79,7 +79,7 @@ plt.title('SPE with local strategy') # Compute Isomap embedding (for comparison) -converter = sg.create_transformer('Isomap') +converter = sg.create('Isomap') converter.put('target_dim', 2) converter.put('k', 6) diff --git a/examples/undocumented/python/graphical/interactive_kmm_demo.py b/examples/undocumented/python/graphical/interactive_kmm_demo.py index 6fa6f59e97e..0d4b9028423 100644 --- a/examples/undocumented/python/graphical/interactive_kmm_demo.py +++ b/examples/undocumented/python/graphical/interactive_kmm_demo.py @@ -127,7 +127,7 @@ def train_kmm(self): gk = LinearKernel(train, train) gk.set_normalizer(IdentityKernelNormalizer()) elif kernel_name == "PolynomialKernel": - gk = sg.create_kernel("PolyKernel", degree=degree, c=1.0) + gk = sg.create("PolyKernel", degree=degree, c=1.0) gk.init(train, train) gk.set_normalizer(IdentityKernelNormalizer()) elif kernel_name == "GaussianKernel": diff --git a/examples/undocumented/python/graphical/interactive_svm_demo.py b/examples/undocumented/python/graphical/interactive_svm_demo.py index 20231180635..5713a4a4efc 100644 --- a/examples/undocumented/python/graphical/interactive_svm_demo.py +++ b/examples/undocumented/python/graphical/interactive_svm_demo.py @@ -117,7 +117,7 @@ def train_svm(self): gk = LinearKernel(train, train) gk.set_normalizer(IdentityKernelNormalizer()) elif kernel_name == "PolynomialKernel": - gk = sg.create_kernel("PolyKernel", degree=degree, c=1.0) + gk = sg.create("PolyKernel", degree=degree, c=1.0) gk.init(train, train) gk.set_normalizer(IdentityKernelNormalizer()) elif kernel_name == "GaussianKernel": diff --git a/examples/undocumented/python/graphical/interactive_svr_demo.py b/examples/undocumented/python/graphical/interactive_svr_demo.py index aebcce0dd1a..1dde2df22ee 100644 --- a/examples/undocumented/python/graphical/interactive_svr_demo.py +++ b/examples/undocumented/python/graphical/interactive_svr_demo.py @@ -110,7 +110,7 @@ def train_svm(self): gk = LinearKernel(train, train) gk.set_normalizer(IdentityKernelNormalizer()) elif kernel_name == "PolynomialKernel": - gk = sg.create_kernel("PolyKernel", degree=degree, c=1.0) + gk = sg.create("PolyKernel", degree=degree, c=1.0) gk.init(train, train) gk.set_normalizer(IdentityKernelNormalizer()) elif kernel_name == "GaussianKernel": diff --git a/examples/undocumented/python/graphical/kernel_ridge_regression.py b/examples/undocumented/python/graphical/kernel_ridge_regression.py index 321a5d7503f..af8fdd04515 100644 --- a/examples/undocumented/python/graphical/kernel_ridge_regression.py +++ b/examples/undocumented/python/graphical/kernel_ridge_regression.py @@ -15,9 +15,9 @@ # train krr labels = util.get_labels() train = util.get_realfeatures(pos, neg) -gk = sg.create_kernel('GaussianKernel', width=2.0) +gk = sg.create('GaussianKernel', width=2.0) gk.init(train, train) -krr = sg.create_machine('KernelRidgeRegression', labels=labels, kernel=gk, tau=1e-3) +krr = sg.create('KernelRidgeRegression', labels=labels, kernel=gk, tau=1e-3) krr.train() # compute output plot iso-lines diff --git a/examples/undocumented/python/graphical/kernel_ridge_regression_sinc.py b/examples/undocumented/python/graphical/kernel_ridge_regression_sinc.py index d566ef074be..3e9abc700f8 100644 --- a/examples/undocumented/python/graphical/kernel_ridge_regression_sinc.py +++ b/examples/undocumented/python/graphical/kernel_ridge_regression_sinc.py @@ -8,9 +8,9 @@ feat = sg.create_features(X) lab = sg.create_labels(Y.flatten()) -gk = sg.create_kernel('GaussianKernel', width=1.0) +gk = sg.create('GaussianKernel', width=1.0) gk.init(feat, feat) -krr = sg.create_machine('KernelRidgeRegression', labels=lab, kernel=gk, tau=1e-3) +krr = sg.create('KernelRidgeRegression', labels=lab, kernel=gk, tau=1e-3) krr.train() plt.scatter(X, Y, label='train data', color='tab:red') diff --git a/examples/undocumented/python/graphical/lda.py b/examples/undocumented/python/graphical/lda.py index 89acb832dcc..f5960db7996 100644 --- a/examples/undocumented/python/graphical/lda.py +++ b/examples/undocumented/python/graphical/lda.py @@ -18,7 +18,7 @@ # train lda labels = util.get_labels() features = util.get_realfeatures(pos, neg) -lda = sg.create_machine('LDA', gamma=gamma, labels=labels) +lda = sg.create('LDA', gamma=gamma, labels=labels) lda.train(features) # compute output plot iso-lines diff --git a/examples/undocumented/python/graphical/mclda.py b/examples/undocumented/python/graphical/mclda.py index c2b1e92c1a7..c936949fb58 100644 --- a/examples/undocumented/python/graphical/mclda.py +++ b/examples/undocumented/python/graphical/mclda.py @@ -26,7 +26,7 @@ features = sg.create_features(np.array(np.concatenate([pos, neg], 1))) -lda = sg.create_machine('MCLDA', labels=labels) +lda = sg.create('MCLDA', labels=labels) lda.train(features) # compute output plot iso-lines diff --git a/examples/undocumented/python/graphical/statistics_linear_time_mmd.py b/examples/undocumented/python/graphical/statistics_linear_time_mmd.py index 10fb467ba22..2ae76adfb75 100644 --- a/examples/undocumented/python/graphical/statistics_linear_time_mmd.py +++ b/examples/undocumented/python/graphical/statistics_linear_time_mmd.py @@ -44,9 +44,9 @@ def linear_time_mmd_graphical(): sigmas=[2**x for x in range(-3,10)] widths=[x*x*2 for x in sigmas] print "kernel widths:", widths - combined=sg.create_kernel("CombinedKernel") + combined=sg.create("CombinedKernel") for i in range(len(sigmas)): - combined.append_kernel(sg.create_kernel("GaussianKernel", width=widths[i])) + combined.append_kernel(sg.create("GaussianKernel", width=widths[i])) # mmd instance using streaming features, blocksize of 10000 block_size=1000 diff --git a/examples/undocumented/python/graphical/statistics_quadratic_time_mmd.py b/examples/undocumented/python/graphical/statistics_quadratic_time_mmd.py index a7bb6e061da..98fd847ac19 100644 --- a/examples/undocumented/python/graphical/statistics_quadratic_time_mmd.py +++ b/examples/undocumented/python/graphical/statistics_quadratic_time_mmd.py @@ -45,9 +45,9 @@ def quadratic_time_mmd_graphical(): sigmas=[2**x for x in range(-3,10)] widths=[x*x*2 for x in sigmas] print "kernel widths:", widths - combined=sg.create_kernel("CombinedKernel") + combined=sg.create("CombinedKernel") for i in range(len(sigmas)): - combined.add("kernel_array", sg.create_kernel("GaussianKernel", width=widths[i])) + combined.add("kernel_array", sg.create("GaussianKernel", width=widths[i])) # create MMD instance, use biased statistic mmd=QuadraticTimeMMD(combined,features, m) diff --git a/examples/undocumented/python/kernel_combined.py b/examples/undocumented/python/kernel_combined.py index f3b0b5fcb73..9d722cf8463 100644 --- a/examples/undocumented/python/kernel_combined.py +++ b/examples/undocumented/python/kernel_combined.py @@ -12,13 +12,13 @@ parameter_list = [[traindat,testdat,traindna,testdna],[traindat,testdat,traindna,testdna]] def kernel_combined (fm_train_real=traindat,fm_test_real=testdat,fm_train_dna=traindna,fm_test_dna=testdna ): - kernel=sg.create_kernel("CombinedKernel") - feats_train=sg.create_features("CombinedFeatures") - feats_test=sg.create_features("CombinedFeatures") + kernel=sg.create("CombinedKernel") + feats_train=sg.create("CombinedFeatures") + feats_test=sg.create("CombinedFeatures") subkfeats_train=sg.create_features(fm_train_real) subkfeats_test=sg.create_features(fm_test_real) - subkernel=sg.create_kernel("GaussianKernel", width=1.0) + subkernel=sg.create("GaussianKernel", width=1.0) feats_train.add("feature_array", subkfeats_train) feats_test.add("feature_array", subkfeats_test) kernel.add("kernel_array", subkernel) @@ -26,14 +26,14 @@ def kernel_combined (fm_train_real=traindat,fm_test_real=testdat,fm_train_dna=tr subkfeats_train=sg.create_string_features(fm_train_dna, sg.DNA) subkfeats_test=sg.create_string_features(fm_test_dna, sg.DNA) degree=3 - subkernel=sg.create_kernel("FixedDegreeStringKernel", degree=degree) + subkernel=sg.create("FixedDegreeStringKernel", degree=degree) feats_train.add("feature_array", subkfeats_train) feats_test.add("feature_array", subkfeats_test) kernel.add("kernel_array", subkernel) subkfeats_train=sg.create_string_features(fm_train_dna, sg.DNA) subkfeats_test=sg.create_string_features(fm_test_dna, sg.DNA) - subkernel=sg.create_kernel("LocalAlignmentStringKernel") + subkernel=sg.create("LocalAlignmentStringKernel") feats_train.add("feature_array", subkfeats_train) feats_test.add("feature_array", subkfeats_test) kernel.add("kernel_array", subkernel) diff --git a/examples/undocumented/python/kernel_combined_custom_poly.py b/examples/undocumented/python/kernel_combined_custom_poly.py index fe86d93e77a..5889b88b055 100644 --- a/examples/undocumented/python/kernel_combined_custom_poly.py +++ b/examples/undocumented/python/kernel_combined_custom_poly.py @@ -12,38 +12,38 @@ def kernel_combined_custom_poly (train_fname = traindat,test_fname = testdat,tra from shogun import CustomKernel import shogun as sg - kernel = sg.create_kernel("CombinedKernel") - feats_train = sg.create_features("CombinedFeatures") + kernel = sg.create("CombinedKernel") + feats_train = sg.create("CombinedFeatures") tfeats = sg.create_features(sg.read_csv(train_fname)) - tkernel = sg.create_kernel("PolyKernel", cache_size=10, degree=3) + tkernel = sg.create("PolyKernel", cache_size=10, degree=3) tkernel.init(tfeats, tfeats) K = tkernel.get_kernel_matrix() kernel.add("kernel_array", CustomKernel(K)) subkfeats_train = sg.create_features(sg.read_csv(train_fname)) feats_train.add("feature_array", subkfeats_train) - subkernel = sg.create_kernel("PolyKernel", cache_size=10, degree=2) + subkernel = sg.create("PolyKernel", cache_size=10, degree=2) kernel.add("kernel_array", subkernel) kernel.init(feats_train, feats_train) labels = BinaryLabels(sg.read_csv(train_label_fname)) - svm = sg.create_machine("LibSVM", C1=1.0, C2=1.0, kernel=kernel, labels=labels) + svm = sg.create("LibSVM", C1=1.0, C2=1.0, kernel=kernel, labels=labels) svm.train() - kernel = sg.create_kernel("CombinedKernel") - feats_pred = sg.create_features("CombinedFeatures") + kernel = sg.create("CombinedKernel") + feats_pred = sg.create("CombinedFeatures") pfeats = sg.create_features(sg.read_csv(test_fname)) - tkernel = sg.create_kernel("PolyKernel", cache_size=10, degree=3) + tkernel = sg.create("PolyKernel", cache_size=10, degree=3) tkernel.init(tfeats, pfeats) K = tkernel.get_kernel_matrix() kernel.add("kernel_array", CustomKernel(K)) subkfeats_test = sg.create_features(sg.read_csv(test_fname)) feats_pred.add("feature_array", subkfeats_test) - subkernel = sg.create_kernel("PolyKernel", cache_size=10, degree=2) + subkernel = sg.create("PolyKernel", cache_size=10, degree=2) kernel.add("kernel_array", subkernel) kernel.init(feats_train, feats_pred) diff --git a/examples/undocumented/python/kernel_comm_ulong_string.py b/examples/undocumented/python/kernel_comm_ulong_string.py index ab3541eed99..a292c57b52d 100644 --- a/examples/undocumented/python/kernel_comm_ulong_string.py +++ b/examples/undocumented/python/kernel_comm_ulong_string.py @@ -11,7 +11,7 @@ def kernel_comm_ulong_string (fm_train_dna=traindat,fm_test_dna=testdat, order=3 charfeat=sg.create_string_features(fm_train_dna, sg.DNA) feats_train=sg.create_string_features(charfeat, order-1, order, gap, reverse, sg.PT_UINT64) - preproc = sg.create_transformer("SortUlongString") + preproc = sg.create("SortUlongString") preproc.fit(feats_train) feats_train = preproc.transform(feats_train) @@ -21,7 +21,7 @@ def kernel_comm_ulong_string (fm_train_dna=traindat,fm_test_dna=testdat, order=3 use_sign=False - kernel=sg.create_kernel("CommUlongStringKernel", use_sign=use_sign) + kernel=sg.create("CommUlongStringKernel", use_sign=use_sign) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_comm_word_string.py b/examples/undocumented/python/kernel_comm_word_string.py index 08dcdce929e..be9c9b8eb57 100644 --- a/examples/undocumented/python/kernel_comm_word_string.py +++ b/examples/undocumented/python/kernel_comm_word_string.py @@ -11,7 +11,7 @@ def kernel_comm_word_string (fm_train_dna=traindat, fm_test_dna=testdat, order=3 charfeat=sg.create_string_features(fm_train_dna, sg.DNA) feats_train=sg.create_string_features(charfeat, order-1, order, gap, reverse) - preproc = sg.create_transformer("SortWordString") + preproc = sg.create("SortWordString") preproc.fit(feats_train) feats_train = preproc.transform(feats_train) @@ -19,7 +19,7 @@ def kernel_comm_word_string (fm_train_dna=traindat, fm_test_dna=testdat, order=3 feats_test=sg.create_string_features(charfeat, order-1, order, gap, reverse) feats_test = preproc.transform(feats_test) - kernel=sg.create_kernel("CommWordStringKernel", use_sign=use_sign) + kernel=sg.create("CommWordStringKernel", use_sign=use_sign) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_director_linear.py b/examples/undocumented/python/kernel_director_linear.py index cb5bd62f1f3..583abc03c65 100644 --- a/examples/undocumented/python/kernel_director_linear.py +++ b/examples/undocumented/python/kernel_director_linear.py @@ -27,12 +27,12 @@ def kernel_function(self, idx_a, idx_b): feats_train.get_global_parallel().set_num_threads(1) feats_test=sg.create_features(fm_test_real) - kernel=sg.create_kernel("LinearKernel") - kernel.set_normalizer(sg.create_kernel_normalizer("AvgDiagKernelNormalizer", scale=scale)) + kernel=sg.create("LinearKernel") + kernel.set_normalizer(sg.create("AvgDiagKernelNormalizer", scale=scale)) kernel.init(feats_train, feats_train) dkernel=DirectorLinearKernel() - dkernel.set_normalizer(sg.create_kernel_normalizer("AvgDiagKernelNormalizer", scale=scale)) + dkernel.set_normalizer(sg.create("AvgDiagKernelNormalizer", scale=scale)) dkernel.init(feats_train, feats_train) #print "km_train" diff --git a/examples/undocumented/python/kernel_fisher.py b/examples/undocumented/python/kernel_fisher.py index 739de674bd2..a3ea092322c 100644 --- a/examples/undocumented/python/kernel_fisher.py +++ b/examples/undocumented/python/kernel_fisher.py @@ -46,7 +46,7 @@ def kernel_fisher (fm_train_dna=traindat, fm_test_dna=testdat, neg.set_observations(wordfeats_train) feats_train=FKFeatures(10, pos, neg) feats_train.set_opt_a(-1) #estimate prior - kernel=sg.create_kernel("PolyKernel", c=c) + kernel=sg.create("PolyKernel", c=c) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_fixed_degree_string.py b/examples/undocumented/python/kernel_fixed_degree_string.py index d0ad465340b..a9f678ba6d3 100644 --- a/examples/undocumented/python/kernel_fixed_degree_string.py +++ b/examples/undocumented/python/kernel_fixed_degree_string.py @@ -12,7 +12,7 @@ def kernel_fixed_degree_string (fm_train_dna=traindat, fm_test_dna=testdat,degre feats_train=sg.create_string_features(fm_train_dna, sg.DNA) feats_test=sg.create_string_features(fm_test_dna, sg.DNA) - kernel=sg.create_kernel("FixedDegreeStringKernel", degree=degree) + kernel=sg.create("FixedDegreeStringKernel", degree=degree) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_histogram_word_string.py b/examples/undocumented/python/kernel_histogram_word_string.py index bb66029f25d..818370ae94b 100644 --- a/examples/undocumented/python/kernel_histogram_word_string.py +++ b/examples/undocumented/python/kernel_histogram_word_string.py @@ -17,10 +17,10 @@ def kernel_histogram_word_string (fm_train_dna=traindat,fm_test_dna=testdat,labe feats_test=sg.create_string_features(charfeat, order-1, order, 0, False) labels=sg.create_labels(label_train_dna) - pie=sg.create_machine("PluginEstimate", pos_pseudo=ppseudo_count, neg_pseudo=npseudo_count, labels=labels) + pie=sg.create("PluginEstimate", pos_pseudo=ppseudo_count, neg_pseudo=npseudo_count, labels=labels) pie.train(feats_train) - kernel=sg.create_kernel("HistogramWordStringKernel", estimate=pie) + kernel=sg.create("HistogramWordStringKernel", estimate=pie) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() kernel.init(feats_train, feats_test) diff --git a/examples/undocumented/python/kernel_io.py b/examples/undocumented/python/kernel_io.py index 64500ba675e..cd2590a3da0 100644 --- a/examples/undocumented/python/kernel_io.py +++ b/examples/undocumented/python/kernel_io.py @@ -11,7 +11,7 @@ def kernel_io (train_fname=traindat,test_fname=testdat,width=1.9): feats_train=sg.create_features(sg.read_csv(train_fname)) feats_test=sg.create_features(sg.read_csv(test_fname)) - kernel=sg.create_kernel("GaussianKernel", width=width) + kernel=sg.create("GaussianKernel", width=width) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() tmp_train_csv = NamedTemporaryFile(suffix='train.csv') diff --git a/examples/undocumented/python/kernel_linear.py b/examples/undocumented/python/kernel_linear.py index fd9c6d8ce7f..855905c1ed7 100644 --- a/examples/undocumented/python/kernel_linear.py +++ b/examples/undocumented/python/kernel_linear.py @@ -10,8 +10,8 @@ def kernel_linear (train_fname=traindat,test_fname=testdat,scale=1.2): feats_train=sg.create_features(sg.read_csv(train_fname)) feats_test=sg.create_features(sg.read_csv(test_fname)) - kernel=sg.create_kernel("LinearKernel") - kernel.set_normalizer(sg.create_kernel_normalizer("AvgDiagKernelNormalizer", scale=scale)) + kernel=sg.create("LinearKernel") + kernel.set_normalizer(sg.create("AvgDiagKernelNormalizer", scale=scale)) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_linear_word.py b/examples/undocumented/python/kernel_linear_word.py index 1a49f81e96b..52eb5b72c6c 100644 --- a/examples/undocumented/python/kernel_linear_word.py +++ b/examples/undocumented/python/kernel_linear_word.py @@ -15,9 +15,9 @@ def kernel_linear_word (fm_train_word=traindat,fm_test_word=testdat,scale=1.2): feats_train=sg.create_features(fm_train_word) feats_test=sg.create_features(fm_test_word) - kernel=sg.create_kernel("LinearKernel") + kernel=sg.create("LinearKernel") kernel.init(feats_train, feats_train) - kernel.set_normalizer(sg.create_kernel_normalizer("AvgDiagKernelNormalizer", scale=scale)) + kernel.set_normalizer(sg.create("AvgDiagKernelNormalizer", scale=scale)) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_local_alignment_string.py b/examples/undocumented/python/kernel_local_alignment_string.py index 3e2829e372c..941b3896c61 100644 --- a/examples/undocumented/python/kernel_local_alignment_string.py +++ b/examples/undocumented/python/kernel_local_alignment_string.py @@ -12,7 +12,7 @@ def kernel_local_alignment_string (fm_train_dna=traindat,fm_test_dna=testdat): feats_train=sg.create_string_features(fm_train_dna, sg.DNA) feats_test=sg.create_string_features(fm_test_dna, sg.DNA) - kernel=sg.create_kernel("LocalAlignmentStringKernel") + kernel=sg.create("LocalAlignmentStringKernel") kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_locality_improved_string.py b/examples/undocumented/python/kernel_locality_improved_string.py index 5fc151eec62..87441a17812 100644 --- a/examples/undocumented/python/kernel_locality_improved_string.py +++ b/examples/undocumented/python/kernel_locality_improved_string.py @@ -12,7 +12,7 @@ def kernel_locality_improved_string (fm_train_dna=traindat,fm_test_dna=testdat,l feats_train=sg.create_string_features(fm_train_dna, sg.DNA) feats_test=sg.create_string_features(fm_test_dna, sg.DNA) - kernel=sg.create_kernel("LocalityImprovedStringKernel", length=length, inner_degree=inner_degree, outer_degree=outer_degree) + kernel=sg.create("LocalityImprovedStringKernel", length=length, inner_degree=inner_degree, outer_degree=outer_degree) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() kernel.init(feats_train, feats_test) diff --git a/examples/undocumented/python/kernel_match_word_string.py b/examples/undocumented/python/kernel_match_word_string.py index 2422cb29013..69f7fe366e9 100644 --- a/examples/undocumented/python/kernel_match_word_string.py +++ b/examples/undocumented/python/kernel_match_word_string.py @@ -18,8 +18,8 @@ def kernel_match_word_string (fm_train_dna=traindat,fm_test_dna=testdat, charfeat=sg.create_string_features(fm_test_dna, sg.DNA) feats_test=sg.create_string_features(charfeat, order-1, order, gap, reverse) - kernel=sg.create_kernel("MatchWordStringKernel", cache_size=size_cache, degree=degree) - kernel.set_normalizer(sg.create_kernel_normalizer("AvgDiagKernelNormalizer", scale=scale)) + kernel=sg.create("MatchWordStringKernel", cache_size=size_cache, degree=degree) + kernel.set_normalizer(sg.create("AvgDiagKernelNormalizer", scale=scale)) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_poly_match_string.py b/examples/undocumented/python/kernel_poly_match_string.py index a57888f8a1a..06b078e95b6 100644 --- a/examples/undocumented/python/kernel_poly_match_string.py +++ b/examples/undocumented/python/kernel_poly_match_string.py @@ -11,7 +11,7 @@ def kernel_poly_match_string (fm_train_dna=traindat,fm_test_dna=testdat,degree=3 feats_train=sg.create_string_features(fm_train_dna, sg.DNA) feats_test=sg.create_string_features(fm_train_dna, sg.DNA) - kernel=sg.create_kernel("PolyMatchStringKernel", degree=degree, inhomogene=inhomogene) + kernel=sg.create("PolyMatchStringKernel", degree=degree, inhomogene=inhomogene) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_poly_match_word_string.py b/examples/undocumented/python/kernel_poly_match_word_string.py index cd4431e639e..d9a199dcf7e 100644 --- a/examples/undocumented/python/kernel_poly_match_word_string.py +++ b/examples/undocumented/python/kernel_poly_match_word_string.py @@ -16,7 +16,7 @@ def kernel_poly_match_word_string (fm_train_dna=traindat,fm_test_dna=testdat, charfeat=sg.create_string_features(fm_test_dna, sg.DNA) feats_test=sg.create_string_features(charfeat, order-1, order, gap, reverse) - kernel=sg.create_kernel("PolyMatchWordStringKernel", degree=degree, inhomogene=inhomogene) + kernel=sg.create("PolyMatchWordStringKernel", degree=degree, inhomogene=inhomogene) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_rationalquadratic.py b/examples/undocumented/python/kernel_rationalquadratic.py index 7cccd8a42c7..458334a680e 100644 --- a/examples/undocumented/python/kernel_rationalquadratic.py +++ b/examples/undocumented/python/kernel_rationalquadratic.py @@ -10,9 +10,9 @@ def kernel_rationalquadratic (train_fname=traindat,test_fname=testdat, shift_coe feats_train=sg.create_features(sg.read_csv(train_fname)) feats_test=sg.create_features(sg.read_csv(test_fname)) - distance = sg.create_distance('EuclideanDistance') + distance = sg.create('EuclideanDistance') - kernel = sg.create_kernel('RationalQuadraticKernel', coef=shift_coef, + kernel = sg.create('RationalQuadraticKernel', coef=shift_coef, distance=distance) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_salzberg_word_string.py b/examples/undocumented/python/kernel_salzberg_word_string.py index ff5c16037b0..847fda0c5f0 100644 --- a/examples/undocumented/python/kernel_salzberg_word_string.py +++ b/examples/undocumented/python/kernel_salzberg_word_string.py @@ -17,10 +17,10 @@ def kernel_salzberg_word_string (fm_train_dna=traindat,fm_test_dna=testdat,label feats_test=sg.create_string_features(charfeat, order-1, order, gap, reverse) labels=sg.create_labels(label_train_dna) - pie=sg.create_machine("PluginEstimate", labels=labels) + pie=sg.create("PluginEstimate", labels=labels) pie.train(feats_train) - kernel=sg.create_kernel("SalzbergWordStringKernel", plugin_estimate=pie, labels=labels) + kernel=sg.create("SalzbergWordStringKernel", plugin_estimate=pie, labels=labels) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_simple_locality_improved_string.py b/examples/undocumented/python/kernel_simple_locality_improved_string.py index 5817485a40f..22b81991baf 100644 --- a/examples/undocumented/python/kernel_simple_locality_improved_string.py +++ b/examples/undocumented/python/kernel_simple_locality_improved_string.py @@ -13,7 +13,7 @@ def kernel_simple_locality_improved_string (fm_train_dna=traindat,fm_test_dna=te feats_train=sg.create_string_features(fm_train_dna, sg.DNA) feats_test=sg.create_string_features(fm_test_dna, sg.DNA) - kernel=sg.create_kernel("SimpleLocalityImprovedStringKernel", length=length, inner_degree=inner_degree, outer_degree=outer_degree) + kernel=sg.create("SimpleLocalityImprovedStringKernel", length=length, inner_degree=inner_degree, outer_degree=outer_degree) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_sparse_gaussian.py b/examples/undocumented/python/kernel_sparse_gaussian.py index 4f8caf7fb82..5203746e917 100644 --- a/examples/undocumented/python/kernel_sparse_gaussian.py +++ b/examples/undocumented/python/kernel_sparse_gaussian.py @@ -13,7 +13,7 @@ def kernel_sparse_gaussian (fm_train_real=traindat,fm_test_real=testdat,width=1. feats_train=SparseRealFeatures(fm_train_real) feats_test=SparseRealFeatures(fm_test_real) - kernel=sg.create_kernel("GaussianKernel", width=width) + kernel=sg.create("GaussianKernel", width=width) kernel.init(feats_train, feats_train,) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_sparse_linear.py b/examples/undocumented/python/kernel_sparse_linear.py index 59a7a261b19..4896b98c1e7 100644 --- a/examples/undocumented/python/kernel_sparse_linear.py +++ b/examples/undocumented/python/kernel_sparse_linear.py @@ -14,8 +14,8 @@ def kernel_sparse_linear (fm_train_real=traindat,fm_test_real=testdat,scale=1.1) feats_train=SparseRealFeatures(fm_train_real) feats_test=SparseRealFeatures(fm_test_real) - kernel=sg.create_kernel("LinearKernel") - kernel.set_normalizer(sg.create_kernel_normalizer("AvgDiagKernelNormalizer", scale=scale)) + kernel=sg.create("LinearKernel") + kernel.set_normalizer(sg.create("AvgDiagKernelNormalizer", scale=scale)) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_sparse_poly.py b/examples/undocumented/python/kernel_sparse_poly.py index ad1cead5e8b..f6d92fc6099 100644 --- a/examples/undocumented/python/kernel_sparse_poly.py +++ b/examples/undocumented/python/kernel_sparse_poly.py @@ -17,7 +17,7 @@ def kernel_sparse_poly (fm_train_real=traindat,fm_test_real=testdat, - kernel=sg.create_kernel("PolyKernel", cache_size=cache_size, degree=degree, + kernel=sg.create("PolyKernel", cache_size=cache_size, degree=degree, c=c) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_spline.py b/examples/undocumented/python/kernel_spline.py index 6243e9221d5..d092f4ff6c3 100644 --- a/examples/undocumented/python/kernel_spline.py +++ b/examples/undocumented/python/kernel_spline.py @@ -13,7 +13,7 @@ def kernel_spline (fm_train_real=traindat,fm_test_real=testdat): feats_train=sg.create_features(fm_train_real) feats_test=sg.create_features(fm_test_real) - kernel=sg.create_kernel("SplineKernel") + kernel=sg.create("SplineKernel") kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_ssk_string.py b/examples/undocumented/python/kernel_ssk_string.py index cddba974503..aeb4097920a 100644 --- a/examples/undocumented/python/kernel_ssk_string.py +++ b/examples/undocumented/python/kernel_ssk_string.py @@ -17,7 +17,7 @@ def kernel_ssk_string (fm_train_dna=traindat, fm_test_dna=testdat, maxlen=1, dec feats_train=sg.create_string_features(fm_train_dna, sg.DNA) feats_test=sg.create_string_features(fm_test_dna, sg.DNA) - kernel=sg.create_kernel("SubsequenceStringKernel", maxlen=maxlen, decay=decay) + kernel=sg.create("SubsequenceStringKernel", maxlen=maxlen, decay=decay) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_top.py b/examples/undocumented/python/kernel_top.py index 1ccb79f2fc5..f6d821a4003 100644 --- a/examples/undocumented/python/kernel_top.py +++ b/examples/undocumented/python/kernel_top.py @@ -46,7 +46,7 @@ def kernel_top (fm_train_dna=traindat,fm_test_dna=testdat,label_train_dna=label_ pos.set_observations(wordfeats_train) neg.set_observations(wordfeats_train) feats_train=TOPFeatures(10, pos, neg, False, False) - kernel=sg.create_kernel("PolyKernel", c=c) + kernel=sg.create("PolyKernel", c=c) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_tstudent.py b/examples/undocumented/python/kernel_tstudent.py index 713b23220d0..2e946ae86a0 100644 --- a/examples/undocumented/python/kernel_tstudent.py +++ b/examples/undocumented/python/kernel_tstudent.py @@ -15,9 +15,9 @@ def kernel_tstudent (fm_train_real=traindat,fm_test_real=testdat, degree=2.0): feats_train=sg.create_features(fm_train_real) feats_test=sg.create_features(fm_test_real) - distance = sg.create_distance('EuclideanDistance') + distance = sg.create('EuclideanDistance') - kernel = sg.create_kernel('TStudentKernel', degree=degree, distance=distance) + kernel = sg.create('TStudentKernel', degree=degree, distance=distance) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_wave.py b/examples/undocumented/python/kernel_wave.py index 54ab8968b6b..db100f24151 100644 --- a/examples/undocumented/python/kernel_wave.py +++ b/examples/undocumented/python/kernel_wave.py @@ -13,9 +13,9 @@ def kernel_wave (fm_train_real=traindat,fm_test_real=testdat, theta=1.0): feats_train=sg.create_features(fm_train_real) feats_test=sg.create_features(fm_test_real) - distance = sg.create_distance('EuclideanDistance') + distance = sg.create('EuclideanDistance') - kernel = sg.create_kernel('WaveKernel', theta=theta, distance=distance) + kernel = sg.create('WaveKernel', theta=theta, distance=distance) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_wavelet.py b/examples/undocumented/python/kernel_wavelet.py index 8ff9bed92a3..730a97f2160 100644 --- a/examples/undocumented/python/kernel_wavelet.py +++ b/examples/undocumented/python/kernel_wavelet.py @@ -13,7 +13,7 @@ def kernel_wavelet (fm_train_real=traindat,fm_test_real=testdat, dilation=1.5, t feats_train=sg.create_features(fm_train_real) feats_test=sg.create_features(fm_test_real) - kernel=sg.create_kernel("WaveletKernel", dilation=dilation, translation=translation) + kernel=sg.create("WaveletKernel", dilation=dilation, translation=translation) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/kernel_weighted_comm_word_string.py b/examples/undocumented/python/kernel_weighted_comm_word_string.py index 2b499b192af..44c4bcd0298 100644 --- a/examples/undocumented/python/kernel_weighted_comm_word_string.py +++ b/examples/undocumented/python/kernel_weighted_comm_word_string.py @@ -11,7 +11,7 @@ def kernel_weighted_comm_word_string (fm_train_dna=traindat,fm_test_dna=testdat, charfeat=sg.create_string_features(fm_train_dna, sg.DNA) feats_train=sg.create_string_features(charfeat, order-1, order, gap, reverse) - preproc = sg.create_transformer("SortWordString") + preproc = sg.create("SortWordString") preproc.fit(feats_train) feats_train = preproc.transform(feats_train) @@ -20,7 +20,7 @@ def kernel_weighted_comm_word_string (fm_train_dna=traindat,fm_test_dna=testdat, feats_test = preproc.transform(feats_test) use_sign=False - kernel = sg.create_kernel("WeightedCommWordStringKernel", use_sign=use_sign) + kernel = sg.create("WeightedCommWordStringKernel", use_sign=use_sign) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/mkl_multiclass.py b/examples/undocumented/python/mkl_multiclass.py index a3d583fd41e..6952d6dae45 100644 --- a/examples/undocumented/python/mkl_multiclass.py +++ b/examples/undocumented/python/mkl_multiclass.py @@ -15,27 +15,27 @@ def mkl_multiclass (fm_train_real, fm_test_real, label_train_multiclass, from shogun import MulticlassLabels import shogun as sg - kernel = sg.create_kernel("CombinedKernel") - feats_train = sg.create_features("CombinedFeatures") - feats_test = sg.create_features("CombinedFeatures") + kernel = sg.create("CombinedKernel") + feats_train = sg.create("CombinedFeatures") + feats_test = sg.create("CombinedFeatures") subkfeats_train = sg.create_features(fm_train_real) subkfeats_test = sg.create_features(fm_test_real) - subkernel = sg.create_kernel("GaussianKernel", width=width) + subkernel = sg.create("GaussianKernel", width=width) feats_train.add("feature_array", subkfeats_train) feats_test.add("feature_array", subkfeats_test) kernel.add("kernel_array", subkernel) subkfeats_train = sg.create_features(fm_train_real) subkfeats_test = sg.create_features(fm_test_real) - subkernel = sg.create_kernel("LinearKernel") + subkernel = sg.create("LinearKernel") feats_train.add("feature_array", subkfeats_train) feats_test.add("feature_array", subkfeats_test) kernel.add("kernel_array", subkernel) subkfeats_train = sg.create_features(fm_train_real) subkfeats_test = sg.create_features(fm_test_real) - subkernel = sg.create_kernel("PolyKernel", cache_size=10, degree=2) + subkernel = sg.create("PolyKernel", cache_size=10, degree=2) feats_train.add("feature_array", subkfeats_train) feats_test.add("feature_array", subkfeats_test) kernel.add("kernel_array", subkernel) @@ -44,8 +44,8 @@ def mkl_multiclass (fm_train_real, fm_test_real, label_train_multiclass, labels = MulticlassLabels(label_train_multiclass) - mkl = sg.create_machine("MKLMulticlass", C=C, kernel=kernel, labels=labels, - mkl_eps=mkl_epsilon, mkl_norm=mkl_norm) + mkl = sg.create("MKLMulticlass", C=C, kernel=kernel, labels=labels, + mkl_eps=mkl_epsilon, mkl_norm=mkl_norm) mkl.get("machine").put("epsilon", epsilon) diff --git a/examples/undocumented/python/preprocessor_sortulongstring.py b/examples/undocumented/python/preprocessor_sortulongstring.py index f8eb1c9c5de..5012f8c3092 100644 --- a/examples/undocumented/python/preprocessor_sortulongstring.py +++ b/examples/undocumented/python/preprocessor_sortulongstring.py @@ -16,12 +16,12 @@ def preprocessor_sortulongstring (fm_train_dna=traindna,fm_test_dna=testdna,orde charfeat=sg.create_string_features(fm_test_dna, sg.DNA) feats_test=sg.create_string_features(charfeat, order-1, order, gap, reverse, sg.PT_UINT64) - preproc = sg.create_transformer("SortUlongString") + preproc = sg.create("SortUlongString") preproc.fit(feats_train) feats_train = preproc.transform(feats_train) feats_test = preproc.transform(feats_test) - kernel=sg.create_kernel("CommUlongStringKernel", use_sign=use_sign) + kernel=sg.create("CommUlongStringKernel", use_sign=use_sign) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/preprocessor_sortwordstring.py b/examples/undocumented/python/preprocessor_sortwordstring.py index b928179475f..13b22ebb317 100644 --- a/examples/undocumented/python/preprocessor_sortwordstring.py +++ b/examples/undocumented/python/preprocessor_sortwordstring.py @@ -12,7 +12,7 @@ def preprocessor_sortwordstring (fm_train_dna=traindna,fm_test_dna=testdna,order charfeat=sg.create_string_features(fm_train_dna, sg.DNA) feats_train=sg.create_string_features(charfeat, order-1, order, gap, reverse) - preproc = sg.create_transformer("SortWordString") + preproc = sg.create("SortWordString") preproc.fit(feats_train) feats_train = preproc.transform(feats_train) @@ -20,7 +20,7 @@ def preprocessor_sortwordstring (fm_train_dna=traindna,fm_test_dna=testdna,order feats_test=sg.create_string_features(charfeat, order-1, order, gap, reverse) feats_test = preproc.transform(feats_test) - kernel=sg.create_kernel("CommWordStringKernel", use_sign=use_sign) + kernel=sg.create("CommWordStringKernel", use_sign=use_sign) kernel.init(feats_train, feats_train) km_train=kernel.get_kernel_matrix() diff --git a/examples/undocumented/python/stochasticgbmachine.py b/examples/undocumented/python/stochasticgbmachine.py index 04b2609b1fd..71ee8f025a1 100644 --- a/examples/undocumented/python/stochasticgbmachine.py +++ b/examples/undocumented/python/stochasticgbmachine.py @@ -20,9 +20,9 @@ def stochasticgbmachine(train=traindat,train_labels=label_traindat,ft=feat_types p=np.random.permutation(labels.get_num_labels()) num=labels.get_num_labels()*0.9 - cart=sg.create_machine("CARTree", nominal=ft, max_depth=1) - loss = sg.create_loss('SquaredLoss') - s=sg.create_machine("StochasticGBMachine", machine=cart, loss=loss, + cart=sg.create("CARTree", nominal=ft, max_depth=1) + loss = sg.create('SquaredLoss') + s=sg.create("StochasticGBMachine", machine=cart, loss=loss, num_iterations=500, learning_rate=0.01) # train diff --git a/examples/undocumented/python/structure_discrete_hmsvm_bmrm.py b/examples/undocumented/python/structure_discrete_hmsvm_bmrm.py index 7907fb9f3bf..6be370b5cf7 100644 --- a/examples/undocumented/python/structure_discrete_hmsvm_bmrm.py +++ b/examples/undocumented/python/structure_discrete_hmsvm_bmrm.py @@ -12,7 +12,7 @@ def structure_discrete_hmsvm_bmrm (m_data_dict=data_dict): import shogun as sg try: - _ = sg.create_machine("DualLibQPBMSOSVM") + _ = sg.create("DualLibQPBMSOSVM") except: print("DualLibQPBMSOSVM not available") return @@ -26,15 +26,15 @@ def structure_discrete_hmsvm_bmrm (m_data_dict=data_dict): features = sg.RealMatrixFeatures(m_data_dict['signal'].astype(float), 250, 500) num_obs = 4 # given by the data file used - model = sg.create_structured_model("HMSVMModel", features=features, labels=labels, + model = sg.create("HMSVMModel", features=features, labels=labels, state_model_type="SMT_TWO_STATE", num_obs=num_obs) - sosvm = sg.create_machine("DualLibQPBMSOSVM", model=model, labels=labels, m_lambda=5000.0) + sosvm = sg.create("DualLibQPBMSOSVM", model=model, labels=labels, m_lambda=5000.0) sosvm.train() #print sosvm.get_w() predicted = sosvm.apply(features) - evaluator = sg.create_evaluation("StructuredAccuracy") + evaluator = sg.create("StructuredAccuracy") acc = evaluator.evaluate(predicted, labels) #print('Accuracy = %.4f' % acc) diff --git a/examples/undocumented/python/structure_discrete_hmsvm_mosek.py b/examples/undocumented/python/structure_discrete_hmsvm_mosek.py index 7a475cc3c92..2397562d00b 100644 --- a/examples/undocumented/python/structure_discrete_hmsvm_mosek.py +++ b/examples/undocumented/python/structure_discrete_hmsvm_mosek.py @@ -12,7 +12,7 @@ def structure_discrete_hmsvm_mosek (m_data_dict=data_dict): import shogun as sg try: - _ = sg.create_machine("PrimalMosekSOSVM") + _ = sg.create("PrimalMosekSOSVM") except: print("Mosek not available") return @@ -26,14 +26,14 @@ def structure_discrete_hmsvm_mosek (m_data_dict=data_dict): features = sg.RealMatrixFeatures(m_data_dict['signal'].astype(float), 250, 500) num_obs = 4 # given by the data file used - model = sg.structured_model("HMSVMModel", features=features, labels=labels, - state_model_type=SMT_TWO_STATE, num_obs=num_obs) + model = sg.create("HMSVMModel", features=features, labels=labels, + state_model_type=SMT_TWO_STATE, num_obs=num_obs) - sosvm = sg.create_machine("PrimalMosekSOSVM", model=model, labels=labels) + sosvm = sg.create("PrimalMosekSOSVM", model=model, labels=labels) sosvm.train() predicted = sosvm.apply() - evaluator = sg.create_evaluation("StructuredAccuracy") + evaluator = sg.create("StructuredAccuracy") acc = evaluator.evaluate(predicted, labels) if __name__ == '__main__': diff --git a/examples/undocumented/python/structure_factor_graph_model.py b/examples/undocumented/python/structure_factor_graph_model.py index e666ffdb85a..7a5a6cb3080 100644 --- a/examples/undocumented/python/structure_factor_graph_model.py +++ b/examples/undocumented/python/structure_factor_graph_model.py @@ -7,17 +7,17 @@ tid = 0 cards = np.array([2,2], np.int32) w_gt = np.array([0.3,0.5,1.0,0.2,0.05,0.6,-0.2,0.75]) -fac_type = sg.create_factor_type("TableFactorType",type_id=tid, cards=cards, w=w_gt) +fac_type = sg.create("TableFactorType",type_id=tid, cards=cards, w=w_gt) tid_u = 1 cards_u = np.array([2], np.int32) w_gt_u = np.array([0.5,0.8,1.0,-0.3]) -fac_type_u = sg.create_factor_type("TableFactorType",type_id=tid_u, cards=cards_u, w=w_gt_u) +fac_type_u = sg.create("TableFactorType",type_id=tid_u, cards=cards_u, w=w_gt_u) tid_b = 2 cards_b = np.array([2], np.int32) w_gt_b = np.array([0.8, -0.8]) -fac_type_b = sg.create_factor_type("TableFactorType",type_id=tid_b, cards=cards_b, w=w_gt_b) +fac_type_b = sg.create("TableFactorType",type_id=tid_b, cards=cards_b, w=w_gt_b) def gen_data(ftype, num_samples, show_data = False): @@ -92,14 +92,14 @@ def gen_data(ftype, num_samples, show_data = False): def structure_factor_graph_model(tr_samples = samples, tr_labels = labels, w = w_all, ftype = ftype_all): try: - _ = sg.create_machine("DualLibQPBMSOSVM") + _ = sg.create("DualLibQPBMSOSVM") except: print("DualLibQPBMSOSVM not available") return # create model - model = sg.create_structured_model("FactorGraphModel", features=tr_samples, labels=tr_labels, - inf_type="TREE_MAX_PROD") + model = sg.create("FactorGraphModel", features=tr_samples, labels=tr_labels, + inf_type="TREE_MAX_PROD") w_truth = [w[0].copy(), w[1].copy(), w[2].copy()] w[0] = np.zeros(8) w[1] = np.zeros(4) @@ -112,7 +112,7 @@ def structure_factor_graph_model(tr_samples = samples, tr_labels = labels, w = w model.add("factor_types", ftype[2]) # --- training with BMRM --- - bmrm = sg.create_machine("DualLibQPBMSOSVM", model=model, labels=tr_labels, m_lambda=0.01) + bmrm = sg.create("DualLibQPBMSOSVM", model=model, labels=tr_labels, m_lambda=0.01) #bmrm.set_verbose(True) bmrm.train() #print 'learned weights:' @@ -142,7 +142,7 @@ def structure_factor_graph_model(tr_samples = samples, tr_labels = labels, w = w #print hbm.get_train_errors() # --- training with SGD --- - sgd = sg.create_machine("StochasticSOSVM", model=model, labels=tr_labels, m_lambda=0.01) + sgd = sg.create("StochasticSOSVM", model=model, labels=tr_labels, m_lambda=0.01) #sgd.set_verbose(True) sgd.train() @@ -154,7 +154,7 @@ def structure_factor_graph_model(tr_samples = samples, tr_labels = labels, w = w #print hp.get_train_errors() # --- training with FW --- - fw = sg.create_machine("FWSOSVM", model=model, labels=tr_labels, m_lambda=0.01, + fw = sg.create("FWSOSVM", model=model, labels=tr_labels, m_lambda=0.01, gap_threshold=0.01) fw.train() diff --git a/examples/undocumented/python/structure_graphcuts.py b/examples/undocumented/python/structure_graphcuts.py index 2da38de1a1b..f453a871cac 100644 --- a/examples/undocumented/python/structure_graphcuts.py +++ b/examples/undocumented/python/structure_graphcuts.py @@ -59,13 +59,13 @@ def define_factor_types(num_vars, len_feat, edge_table): cards_u = np.array([n_stats], np.int32) w_u = np.zeros(n_stats*len_feat) for i in range(num_vars): - v_factor_types[i] = sg.create_factor_type("TableFactorType",type_id=i, cards=cards_u, w=w_u) + v_factor_types[i] = sg.create("TableFactorType",type_id=i, cards=cards_u, w=w_u) # pair-wise factors cards_pw = np.array([n_stats, n_stats], np.int32) w_pw = np.zeros(n_stats*n_stats) for j in range(n_edges): - v_factor_types[j + num_vars] = sg.create_factor_type("TableFactorType", type_id=j + num_vars, + v_factor_types[j + num_vars] = sg.create("TableFactorType", type_id=j + num_vars, cards=cards_pw, w=w_pw) return v_factor_types @@ -170,7 +170,7 @@ def graphcuts_sosvm(num_train_samples = 10, len_label = 5, len_feat = 20, num_te (labels_fg, feats_fg) = build_factor_graph_model(labels_train, feats_train, factor_types, full, sg.GRAPH_CUT) # create model and register factor types - model = sg.create_structured_model("FactorGraphModel", features=feats_fg, labels=labels_fg, + model = sg.create("FactorGraphModel", features=feats_fg, labels=labels_fg, inf_type="GRAPH_CUT") for i in range(len(factor_types)): @@ -180,7 +180,7 @@ def graphcuts_sosvm(num_train_samples = 10, len_label = 5, len_feat = 20, num_te # the 3rd parameter is do_weighted_averaging, by turning this on, # a possibly faster convergence rate may be achieved. # the 4th parameter controls outputs of verbose training information - sgd = sg.create_machine("StochasticSOSVM", model=model, labels=labels_fg, do_weighted_averaging=True, + sgd = sg.create("StochasticSOSVM", model=model, labels=labels_fg, do_weighted_averaging=True, num_iter=150, m_lambda=0.0001) # train diff --git a/examples/undocumented/python/structure_hierarchical_multilabel_classification.py b/examples/undocumented/python/structure_hierarchical_multilabel_classification.py index a675a6b6ded..97c3b09f70d 100644 --- a/examples/undocumented/python/structure_hierarchical_multilabel_classification.py +++ b/examples/undocumented/python/structure_hierarchical_multilabel_classification.py @@ -106,11 +106,11 @@ def structure_hierarchical_multilabel_classification(train_file_name, train_file) # TODO: fix HierarchicalMultilabelModel initialisation - model = sg.create_structured_model("HierarchicalMultilabelModel", + model = sg.create("HierarchicalMultilabelModel", features=train_features, labels=train_labels, taxonomy=train_taxonomy) - sgd = sg.create_machine("StochasticSOSVM", model=model, labels=train_labels) + sgd = sg.create("StochasticSOSVM", model=model, labels=train_labels) # t1 = time.time() # sgd.train() # print('>>> Took %f time for training' % (time.time() - t1)) @@ -118,7 +118,7 @@ def structure_hierarchical_multilabel_classification(train_file_name, # test_features, test_labels, test_taxonomy = get_features_labels(test_file) # assert(test_taxonomy.all() == train_taxonomy.all()) - # evaluator = sg.create_evaluation("StructuredAccuracy") + # evaluator = sg.create("StructuredAccuracy") # outlabel = sgd.apply(test_features) # print('>>> Accuracy of classification = %f' % evaluator.evaluate( diff --git a/examples/undocumented/python/structure_plif_hmsvm_bmrm.py b/examples/undocumented/python/structure_plif_hmsvm_bmrm.py index 690b7b4bb53..849ff80c89d 100644 --- a/examples/undocumented/python/structure_plif_hmsvm_bmrm.py +++ b/examples/undocumented/python/structure_plif_hmsvm_bmrm.py @@ -19,7 +19,7 @@ def structure_plif_hmsvm_bmrm (num_examples, example_length, num_features, num_n #print sosvm.get_w() predicted = sosvm.apply(model.get_features()) - evaluator = sg.create_evaluation("StructuredAccuracy") + evaluator = sg.create("StructuredAccuracy") acc = evaluator.evaluate(predicted, model.get_labels()) #print('Accuracy = %.4f' % acc) diff --git a/examples/undocumented/python/structure_plif_hmsvm_mosek.py b/examples/undocumented/python/structure_plif_hmsvm_mosek.py index e806e5fd21d..932b0347248 100644 --- a/examples/undocumented/python/structure_plif_hmsvm_mosek.py +++ b/examples/undocumented/python/structure_plif_hmsvm_mosek.py @@ -19,7 +19,7 @@ def structure_plif_hmsvm_mosek (num_examples, example_length, num_features, num_ #print(sosvm.get_w()) predicted = sosvm.apply(model.get_features()) - evaluator = sg.create_evaluation("StructuredAccuracy") + evaluator = sg.create("StructuredAccuracy") acc = evaluator.evaluate(predicted, model.get_labels()) #print('Accuracy = %.4f' % acc) diff --git a/examples/undocumented/python/tests_check_commwordkernel_memleak.py b/examples/undocumented/python/tests_check_commwordkernel_memleak.py index 0b45b159bd3..66567189884 100644 --- a/examples/undocumented/python/tests_check_commwordkernel_memleak.py +++ b/examples/undocumented/python/tests_check_commwordkernel_memleak.py @@ -62,12 +62,12 @@ def tests_check_commwordkernel_memleak (num, order, gap, reverse): trainudat=StringWordFeatures(traindat.get_alphabet()); trainudat.obtain_from_char(traindat, order-1, order, gap, reverse) #trainudat.io.set_loglevel(MSG_DEBUG) - pre = sg.create_transformer("SortWordString") + pre = sg.create("SortWordString") #pre.io.set_loglevel(MSG_DEBUG) pre.fit(trainudat) trainudat = pre.transform(trainudat) - spec = sg.create_kernel("CommWordStringKernel", cache_size=10, use_sign=False) - spec.set_normalizer(sg.create_kernel_normalizer("IdentityKernelNormalizer")) + spec = sg.create("CommWordStringKernel", cache_size=10, use_sign=False) + spec.set_normalizer(sg.create("IdentityKernelNormalizer")) spec.init(trainudat, trainudat) K=spec.get_kernel_matrix() diff --git a/examples/undocumented/python/transfer_multitask_clustered_logistic_regression.py b/examples/undocumented/python/transfer_multitask_clustered_logistic_regression.py index b202cd0782b..f2850da24a8 100644 --- a/examples/undocumented/python/transfer_multitask_clustered_logistic_regression.py +++ b/examples/undocumented/python/transfer_multitask_clustered_logistic_regression.py @@ -13,7 +13,7 @@ def transfer_multitask_clustered_logistic_regression (fm_train=traindat,fm_test=testdat,label_train=label_traindat): import shogun as sg try: - sg.create_machine("MultitaskClusteredLogisticRegression") + sg.create("MultitaskClusteredLogisticRegression") except ImportError: print("MultitaskClusteredLogisticRegression not available") exit() diff --git a/examples/undocumented/python/variational_classifier.py b/examples/undocumented/python/variational_classifier.py index 77dc1f2edf9..946e3e01be9 100644 --- a/examples/undocumented/python/variational_classifier.py +++ b/examples/undocumented/python/variational_classifier.py @@ -63,7 +63,7 @@ def variational_classifier(kl_inference,train_fname=traindat,test_fname=testdat, error_eval=ErrorRateMeasure() mean_func=ConstMean() kernel_sigma=2*exp(2*kernel_log_sigma); - kernel_func=sg.create_kernel("GaussianKernel", width=kernel_sigma) + kernel_func=sg.create("GaussianKernel", width=kernel_sigma) inf=kl_inference(kernel_func, features_train, mean_func, labels_train, likelihood) try: diff --git a/src/interfaces/python/factory_python.i b/src/interfaces/python/factory_python.i new file mode 100644 index 00000000000..3941cb8e9ee --- /dev/null +++ b/src/interfaces/python/factory_python.i @@ -0,0 +1,183 @@ +%{ +#include + +// we can use: +// SWIG_TypeQueryModule or SWIG_MangledTypeQueryModule or SWIG_Python_TypeQuery +// to query the swig_type and be less implementation dependent +// but this is faster and should not cause problems +#define SHOGUN_GET_SWIG_TYPE(name) \ + SWIGTYPE_p_std__shared_ptrT_shogun__##name##_t + +namespace shogun +{ + class ShogunInterfaceToPyObject : public InterfaceTypeVisitor + { + public: + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(SGObject)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(Machine)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(Kernel)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(Distance)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(Features)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(Labels)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(ECOCEncoder)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(ECOCDecoder)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(Evaluation)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(EvaluationResult)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(MulticlassStrategy)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(NeuralLayer)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(SplittingStrategy)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(Pipeline)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(SVM)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(LikelihoodModel)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(MeanFunction)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(DifferentiableFunction)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(Inference)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(LossFunction)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(Tokenizer)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(CombinationRule)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(KernelNormalizer)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(Transformer)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(MachineEvaluation)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(StructuredModel)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(FactorType)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(ParameterObserver)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(Distribution)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(GaussianProcess)); + } + virtual void on(std::shared_ptr* v) + { + return on_impl(v, SHOGUN_GET_SWIG_TYPE(Alphabet)); + } + + template + void on_impl(std::shared_ptr* v, swig_type_info* type) + { + if (!v) + return; + + auto smartresult = new std::shared_ptr(*v); + m_pyobj = SWIG_Python_NewPointerObj( + nullptr, SWIG_as_voidptr(smartresult), type, SWIG_POINTER_OWN); + } + + PyObject* pyobj() + { + if (!m_pyobj) + { + auto smartresult = new std::shared_ptr(nullptr); + m_pyobj = SWIG_Python_NewPointerObj( + nullptr, SWIG_as_voidptr(smartresult), + SHOGUN_GET_SWIG_TYPE(SGObject), SWIG_POINTER_OWN); + } + return m_pyobj; + } + + private: + PyObject* m_pyobj = nullptr; + }; +} // namespace shogun +%} + +%inline %{ +#include + +namespace shogun +{ + PyObject* create(const char* name) + { + static auto visitor = std::make_shared(); + create(name, PT_NOT_GENERIC, visitor); + return visitor->pyobj(); + } +} + +%} diff --git a/src/interfaces/python/swig_typemaps.i b/src/interfaces/python/swig_typemaps.i index dca294c974c..5ec2ac4504a 100644 --- a/src/interfaces/python/swig_typemaps.i +++ b/src/interfaces/python/swig_typemaps.i @@ -1028,28 +1028,7 @@ TYPEMAP_SPARSEFEATURES_OUT(PyObject, NPY_OBJECT) %pythoncode %{ import sys -_FACTORIES = ["create_distance", - "create_evaluation", - "create_kernel", - "create_machine", - "create_multiclass_strategy", - "create_ecoc_encoder", - "create_ecoc_decoder", - "create_transformer", - "create_layer", - "create_splitting_strategy", - "create_machine_evaluation", - "create_features", - "create_differentiable", - "create_gp_inference", - "create_gp_mean", - "create_gp_likelihood", - "create_loss", - "create_structured_model", - "create_factor_type", - "create_gaussian_process", - "create_kernel_normalizer", - ] +_FACTORIES = ["create", "create_features"] def _internal_factory_wrapper(object_name, new_name, docstring=None): """ diff --git a/src/interfaces/swig/factory.i b/src/interfaces/swig/factory.i index 3421a9ffcb0..854d16ba83d 100644 --- a/src/interfaces/swig/factory.i +++ b/src/interfaces/swig/factory.i @@ -95,6 +95,15 @@ namespace shogun{ #endif //SWIGJAVA +// FIXME: There is an ambiguity with create_kernel for custom kernels which takes a matrix +// the meta example parser can't differentiate between the two! +%template(create_kernel) shogun::create_; +// same problem with the ambiguity +%template(create_features) shogun::create_; + +%template(create_labels) shogun::create_labels; + +#ifndef SWIGPYTHON %template(create_svm) shogun::create_; %template(create_evaluation) shogun::create_; %template(create_multiclass_strategy) shogun::create_; @@ -114,13 +123,13 @@ namespace shogun{ %template(create_distribution) shogun::create_; %template(create_combination_rule) shogun::create_; %template(create_distance) shogun::create_; -%template(create_kernel) shogun::create_; -%template(create_features) shogun::create_; %template(create_machine) shogun::create_; %template(create_structured_model) shogun::create_; %template(create_factor_type) shogun::create_; %template(create_gaussian_process) shogun::create_; -%template(create_labels) shogun::create_labels; %template(create_minimizer) shogun::create_; %template(create_lbfgs_minimizer) shogun::create_; %template(create_kernel_normalizer) shogun::create_; +#else +%include "factory_python.i" +#endif diff --git a/src/shogun/base/base_types.h b/src/shogun/base/base_types.h index 01d16a629cc..a13f012d09f 100644 --- a/src/shogun/base/base_types.h +++ b/src/shogun/base/base_types.h @@ -12,7 +12,7 @@ namespace shogun { - // all shogun base classes for put/add templates + // all shogun base classes for put/add templates and factories class Machine; class Kernel; class Distance; @@ -34,34 +34,15 @@ namespace shogun class LossFunction; class Tokenizer; class CombinationRule; - - // type trait to enable certain methods only for shogun base types - // FIXME: use sg_interface to populate this trait - template - struct is_sg_base - : std::integral_constant< - bool, std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value || - std::is_same::value> - { - }; + class KernelNormalizer; + class Transformer; + class MachineEvaluation; + class StructuredModel; + class FactorType; + class ParameterObserver; + class Distribution; + class GaussianProcess; + class Alphabet; template struct is_string @@ -114,7 +95,43 @@ namespace shogun EvaluationResult, MulticlassStrategy, NeuralLayer, SplittingStrategy, LikelihoodModel, MeanFunction, DifferentiableFunction, Inference, LossFunction, - Tokenizer>; + Tokenizer, CombinationRule, KernelNormalizer, Transformer, + MachineEvaluation, StructuredModel, FactorType, ParameterObserver, + Distribution, GaussianProcess, Alphabet>; + + namespace types_detail + { + template + struct typeInList_impl : public std::false_type + { + }; + + template + struct typeInList_impl + : public std::conditional_t< + std::is_same_v, std::true_type, + typeInList_impl> + { + }; + } // namespace types_detail + + template + struct typeInList : public std::false_type + { + using X = typename TypesT : WTF; + }; + + template class TypesT, typename... Args> + struct typeInList> + : public types_detail::typeInList_impl + { + }; + + + template + struct is_sg_base : public typeInList + { + }; template constexpr auto find_base(type_list<>) diff --git a/src/shogun/base/class_list.cpp.py b/src/shogun/base/class_list.cpp.py index 854d5dd1ece..eafcde94b92 100644 --- a/src/shogun/base/class_list.cpp.py +++ b/src/shogun/base/class_list.cpp.py @@ -41,7 +41,7 @@ "HAVE_NLOPT", "HAVE_PROTOBUF", "HAVE_VIENNACL", "USE_GPL_SHOGUN", "USE_META_INTEGRATION_TESTS", "HAVE_TFLOGGER"] # TODO: remove once plugins are working -class_blacklist = ["SGVector", "SGMatrix", "SGSparseVector", "SGSparseMatrix", +class_blacklist = ["SGVector", "SGMatrix", "SGSparseVector", "SGSparseMatrix", "SGStringList", "SGMatrixList", "SGCachedVector", "SGNDArray", "ObservedValue", "ObservedValueTemplated", "ParameterObserverHistogram", "ParameterObserverScalar", "ParameterObserverTensorBoard", @@ -163,7 +163,18 @@ def get_definitions(classes): definitions.append("#define %s" % SHOGUN_TEMPLATE_CLASS) definitions.append("#define %s" % SHOGUN_BASIC_CLASS) for c, t in classes: - d = "static %s SGObject* __new_%s(EPrimitiveType g) { return g == PT_NOT_GENERIC? new %s(): NULL; }" % (SHOGUN_BASIC_CLASS,c,c) + d = \ +"""static %s std::shared_ptr\n__new_%s(EPrimitiveType g, const std::shared_ptr& visitor) +{ +\tif(g == PT_NOT_GENERIC) +\t{ +\t\tauto obj = std::make_shared<%s>(); +\t\tif(visitor) visitor->on(&obj); +\t\treturn obj; +\t} +\treturn {}; +}""" \ + % (SHOGUN_BASIC_CLASS, c, c) definitions.append(d) return definitions @@ -172,20 +183,27 @@ def get_template_definitions(classes, supports_complex): definitions = [] for c, t in classes: d = [] - d.append("static %s SGObject* __new_%s(EPrimitiveType g)\n{\n\tswitch (g)\n\t{\n" + d.append("static %s std::shared_ptr\n" % (SHOGUN_BASIC_CLASS)) + d.append("\n__new_%s(EPrimitiveType g, const std::shared_ptr& visitor)\n" % (SHOGUN_TEMPLATE_CLASS, c)) + d.append("{\n\tswitch (g)\n\t{\n") for t in types: if t in ('BOOL', 'CHAR'): suffix = '' else: suffix = '_t' if t == 'COMPLEX128' and not supports_complex: - d.append("\t\tcase PT_COMPLEX128: return NULL;\n") + d.append("\t\tcase PT_COMPLEX128: return {};\n") else: - d.append("\t\tcase PT_%s: return new %s<%s%s>();\n" - % (t, c, t.lower(), suffix)) + d.append( + "\t\tcase PT_%s: {\n\t\t\tauto obj = std::make_shared<%s<%s%s>>();" + % (t, c, t.lower(), suffix)) + d.append( + "\n\t\t\tif(visitor) visitor->on(&obj);\n\t\t\treturn obj;\n\t\t}\n") + d.append("\t\tcase PT_SGOBJECT:\n") - d.append("\t\tcase PT_UNDEFINED: return NULL;\n\t}\n\treturn NULL;\n}") + d.append("\t\tcase PT_UNDEFINED: return {};\n\t}") + d.append("\n\treturn {};\n}\n") definitions.append(''.join(d)) return definitions diff --git a/src/shogun/base/class_list.cpp.templ b/src/shogun/base/class_list.cpp.templ index f18c5eca65f..66ed80259eb 100644 --- a/src/shogun/base/class_list.cpp.templ +++ b/src/shogun/base/class_list.cpp.templ @@ -32,6 +32,7 @@ #include #include +#include #include REPLACE includes THIS @@ -45,18 +46,20 @@ REPLACE template_definitions THIS REPLACE complex_template_definitions THIS -typedef SGObject* (*CreateFunction)(EPrimitiveType generic); +typedef std::shared_ptr (*CreateFunction)(EPrimitiveType generic, const std::shared_ptr&); -static const std::map classes = { +static const std::unordered_map classes = { REPLACE struct THIS }; -std::shared_ptr shogun::create(const char* classname, EPrimitiveType generic) +std::shared_ptr shogun::create( + const char* classname, EPrimitiveType generic, + const std::shared_ptr& visitor) { auto entry = classes.find(classname); if (entry != classes.end()) { - return std::shared_ptr(entry->second(generic)); + return entry->second(generic, visitor); } return nullptr; } diff --git a/src/shogun/base/class_list.h b/src/shogun/base/class_list.h index 99471c25fc0..453aaba56eb 100644 --- a/src/shogun/base/class_list.h +++ b/src/shogun/base/class_list.h @@ -20,12 +20,15 @@ namespace shogun { class SGObject; + class InterfaceTypeVisitor; /** new shogun instance * @param sgserializable_name * @param generic */ - std::shared_ptr create(const char* sgserializable_name, EPrimitiveType generic); + std::shared_ptr create( + const char* sgserializable_name, EPrimitiveType generic, + const std::shared_ptr& visitor = {}); /** Creates new shogun instance, typed. * @@ -35,10 +38,11 @@ namespace shogun { */ template std::shared_ptr create_object( - const char* name, - EPrimitiveType pt = PT_NOT_GENERIC) noexcept(false) + const char* name, EPrimitiveType pt = PT_NOT_GENERIC, + const std::shared_ptr& visitor = + {}) noexcept(false) { - auto object = create(name, pt); + auto object = create(name, pt, visitor); if (!object) { error( diff --git a/src/shogun/lib/sg_types.h b/src/shogun/lib/sg_types.h index 0b5e8f28751..374e5286d9e 100644 --- a/src/shogun/lib/sg_types.h +++ b/src/shogun/lib/sg_types.h @@ -245,4 +245,4 @@ namespace shogun #endif // DOXYGEN_SHOULD_SKIP_THIS } // namespace shogun -#endif // SHOGUN_TYPE_H \ No newline at end of file +#endif // SHOGUN_TYPE_H diff --git a/src/shogun/util/visitors/InterfaceTypeVisitor.h b/src/shogun/util/visitors/InterfaceTypeVisitor.h new file mode 100644 index 00000000000..08076b05ed6 --- /dev/null +++ b/src/shogun/util/visitors/InterfaceTypeVisitor.h @@ -0,0 +1,74 @@ +#ifndef _BASE_TYPE_VISITOR_H_ +#define _BASE_TYPE_VISITOR_H_ + +#include + +#include + +namespace shogun +{ + class SGObject; + + class InterfaceTypeVisitor + { + public: + virtual ~InterfaceTypeVisitor() = default; + + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + virtual void on(std::shared_ptr*) = 0; + + template < + class T, + std::enable_if_t, T>* = nullptr> + void on(std::shared_ptr* v) + { + if (!v) + return; + + using Base = std::conditional_t< + std::is_same_v, std::nullptr_t>, SGObject, + base_type>; + + auto v_upcasted = std::static_pointer_cast(*v); + on(&v_upcasted); + } + + template < + class T, + std::enable_if_t, T>* = nullptr> + void on(std::shared_ptr* v) + { + } + }; +} // namespace shogun + +#endif From 87cba0b4f21ee87ce0ece06bb4a61b453ba2cc8e Mon Sep 17 00:00:00 2001 From: Ahmed Essam Date: Wed, 2 Sep 2020 05:53:10 +0200 Subject: [PATCH 2/6] bug fixes --- src/interfaces/python/factory_python.i | 2 +- src/interfaces/python/swig_typemaps.i | 2 +- src/shogun/base/base_types.h | 2 +- src/shogun/base/class_list.cpp.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/interfaces/python/factory_python.i b/src/interfaces/python/factory_python.i index 3941cb8e9ee..97907ba9403 100644 --- a/src/interfaces/python/factory_python.i +++ b/src/interfaces/python/factory_python.i @@ -3,7 +3,7 @@ // we can use: // SWIG_TypeQueryModule or SWIG_MangledTypeQueryModule or SWIG_Python_TypeQuery -// to query the swig_type and be less implementation dependent +// to query the swig_type in run time and be less implementation dependent maybe? // but this is faster and should not cause problems #define SHOGUN_GET_SWIG_TYPE(name) \ SWIGTYPE_p_std__shared_ptrT_shogun__##name##_t diff --git a/src/interfaces/python/swig_typemaps.i b/src/interfaces/python/swig_typemaps.i index 5ec2ac4504a..e362d345ed3 100644 --- a/src/interfaces/python/swig_typemaps.i +++ b/src/interfaces/python/swig_typemaps.i @@ -1028,7 +1028,7 @@ TYPEMAP_SPARSEFEATURES_OUT(PyObject, NPY_OBJECT) %pythoncode %{ import sys -_FACTORIES = ["create", "create_features"] +_FACTORIES = ["create", "create_features", "create_kernel"] def _internal_factory_wrapper(object_name, new_name, docstring=None): """ diff --git a/src/shogun/base/base_types.h b/src/shogun/base/base_types.h index a13f012d09f..607a9730957 100644 --- a/src/shogun/base/base_types.h +++ b/src/shogun/base/base_types.h @@ -118,7 +118,7 @@ namespace shogun template struct typeInList : public std::false_type { - using X = typename TypesT : WTF; + static_assert(!std::is_same_v); }; template class TypesT, typename... Args> diff --git a/src/shogun/base/class_list.cpp.py b/src/shogun/base/class_list.cpp.py index eafcde94b92..57239697f3a 100644 --- a/src/shogun/base/class_list.cpp.py +++ b/src/shogun/base/class_list.cpp.py @@ -185,7 +185,7 @@ def get_template_definitions(classes, supports_complex): d = [] d.append("static %s std::shared_ptr\n" % (SHOGUN_BASIC_CLASS)) d.append("\n__new_%s(EPrimitiveType g, const std::shared_ptr& visitor)\n" - % (SHOGUN_TEMPLATE_CLASS, c)) + % (c)) d.append("{\n\tswitch (g)\n\t{\n") for t in types: if t in ('BOOL', 'CHAR'): From f54957b0d9f71f07b6be8e462602ad50233b2bc3 Mon Sep 17 00:00:00 2001 From: Ahmed Essam Date: Wed, 2 Sep 2020 07:35:44 +0200 Subject: [PATCH 3/6] Rearrange types from most derived to least --- src/shogun/base/base_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/shogun/base/base_types.h b/src/shogun/base/base_types.h index 607a9730957..e50924ff1d2 100644 --- a/src/shogun/base/base_types.h +++ b/src/shogun/base/base_types.h @@ -94,7 +94,7 @@ namespace shogun Features, Labels, ECOCEncoder, ECOCDecoder, Evaluation, EvaluationResult, MulticlassStrategy, NeuralLayer, SplittingStrategy, LikelihoodModel, MeanFunction, - DifferentiableFunction, Inference, LossFunction, + Inference, DifferentiableFunction, LossFunction, Tokenizer, CombinationRule, KernelNormalizer, Transformer, MachineEvaluation, StructuredModel, FactorType, ParameterObserver, Distribution, GaussianProcess, Alphabet>; From 3139635beb44b6f6db82d45f36fb2e141628a466 Mon Sep 17 00:00:00 2001 From: Ahmed Essam Date: Wed, 2 Sep 2020 09:10:26 +0200 Subject: [PATCH 4/6] Throw if class does not exist --- .../python/structure_discrete_hmsvm_mosek.py | 2 +- src/interfaces/python/factory_python.i | 9 +++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/examples/undocumented/python/structure_discrete_hmsvm_mosek.py b/examples/undocumented/python/structure_discrete_hmsvm_mosek.py index 2397562d00b..f0679bddd9c 100644 --- a/examples/undocumented/python/structure_discrete_hmsvm_mosek.py +++ b/examples/undocumented/python/structure_discrete_hmsvm_mosek.py @@ -27,7 +27,7 @@ def structure_discrete_hmsvm_mosek (m_data_dict=data_dict): num_obs = 4 # given by the data file used model = sg.create("HMSVMModel", features=features, labels=labels, - state_model_type=SMT_TWO_STATE, num_obs=num_obs) + state_model_type=sg.SMT_TWO_STATE, num_obs=num_obs) sosvm = sg.create("PrimalMosekSOSVM", model=model, labels=labels) sosvm.train() diff --git a/src/interfaces/python/factory_python.i b/src/interfaces/python/factory_python.i index 97907ba9403..cd6facbf8d4 100644 --- a/src/interfaces/python/factory_python.i +++ b/src/interfaces/python/factory_python.i @@ -151,12 +151,12 @@ namespace shogun PyObject* pyobj() { + // this should never be true since "create" throws on error if (!m_pyobj) { - auto smartresult = new std::shared_ptr(nullptr); m_pyobj = SWIG_Python_NewPointerObj( - nullptr, SWIG_as_voidptr(smartresult), - SHOGUN_GET_SWIG_TYPE(SGObject), SWIG_POINTER_OWN); + nullptr, nullptr, SHOGUN_GET_SWIG_TYPE(SGObject), + SWIG_POINTER_OWN); } return m_pyobj; } @@ -175,7 +175,8 @@ namespace shogun PyObject* create(const char* name) { static auto visitor = std::make_shared(); - create(name, PT_NOT_GENERIC, visitor); + auto sgobj = create(name, PT_NOT_GENERIC, visitor); + require(sgobj, "Class {} does not exist.", name); return visitor->pyobj(); } } From a3fefc48880277eabcbe052c3be4b0b55df8e486 Mon Sep 17 00:00:00 2001 From: Ahmed Essam Date: Wed, 2 Sep 2020 18:19:58 +0200 Subject: [PATCH 5/6] Address comments and fix bugs --- .../{factory_python.i => factory_visitors.i} | 76 +++++++++---------- src/interfaces/swig/factory.i | 2 +- src/shogun/base/base_types.h | 22 +++--- 3 files changed, 47 insertions(+), 53 deletions(-) rename src/interfaces/python/{factory_python.i => factory_visitors.i} (61%) diff --git a/src/interfaces/python/factory_python.i b/src/interfaces/python/factory_visitors.i similarity index 61% rename from src/interfaces/python/factory_python.i rename to src/interfaces/python/factory_visitors.i index cd6facbf8d4..14fa5cc4b9a 100644 --- a/src/interfaces/python/factory_python.i +++ b/src/interfaces/python/factory_visitors.i @@ -4,7 +4,7 @@ // we can use: // SWIG_TypeQueryModule or SWIG_MangledTypeQueryModule or SWIG_Python_TypeQuery // to query the swig_type in run time and be less implementation dependent maybe? -// but this is faster and should not cause problems +// but this is faster (compile-time) and should not cause problems #define SHOGUN_GET_SWIG_TYPE(name) \ SWIGTYPE_p_std__shared_ptrT_shogun__##name##_t @@ -13,127 +13,127 @@ namespace shogun class ShogunInterfaceToPyObject : public InterfaceTypeVisitor { public: - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(SGObject)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(Machine)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(Kernel)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(Distance)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(Features)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(Labels)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(ECOCEncoder)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(ECOCDecoder)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(Evaluation)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(EvaluationResult)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(MulticlassStrategy)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(NeuralLayer)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(SplittingStrategy)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(Pipeline)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(SVM)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(LikelihoodModel)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(MeanFunction)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(DifferentiableFunction)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(Inference)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(LossFunction)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(Tokenizer)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(CombinationRule)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(KernelNormalizer)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(Transformer)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(MachineEvaluation)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(StructuredModel)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(FactorType)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(ParameterObserver)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(Distribution)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(GaussianProcess)); } - virtual void on(std::shared_ptr* v) + void on(std::shared_ptr* v) override { return on_impl(v, SHOGUN_GET_SWIG_TYPE(Alphabet)); } @@ -151,13 +151,9 @@ namespace shogun PyObject* pyobj() { - // this should never be true since "create" throws on error if (!m_pyobj) - { - m_pyobj = SWIG_Python_NewPointerObj( - nullptr, nullptr, SHOGUN_GET_SWIG_TYPE(SGObject), - SWIG_POINTER_OWN); - } + error("Unexpected error while creating the object"); + return m_pyobj; } @@ -175,10 +171,8 @@ namespace shogun PyObject* create(const char* name) { static auto visitor = std::make_shared(); - auto sgobj = create(name, PT_NOT_GENERIC, visitor); - require(sgobj, "Class {} does not exist.", name); + create_object(name, PT_NOT_GENERIC, visitor); return visitor->pyobj(); } } - %} diff --git a/src/interfaces/swig/factory.i b/src/interfaces/swig/factory.i index 854d16ba83d..941dafe1c99 100644 --- a/src/interfaces/swig/factory.i +++ b/src/interfaces/swig/factory.i @@ -131,5 +131,5 @@ namespace shogun{ %template(create_lbfgs_minimizer) shogun::create_; %template(create_kernel_normalizer) shogun::create_; #else -%include "factory_python.i" +%include "factory_visitors.i" #endif diff --git a/src/shogun/base/base_types.h b/src/shogun/base/base_types.h index e50924ff1d2..2097d8dee67 100644 --- a/src/shogun/base/base_types.h +++ b/src/shogun/base/base_types.h @@ -90,14 +90,14 @@ namespace shogun typedef T type; }; - using sg_inferface = type_list; + using sg_inferface = type_list< + GaussianProcess, Kernel, Distance, Features, Labels, ECOCEncoder, + ECOCDecoder, Evaluation, EvaluationResult, MulticlassStrategy, + NeuralLayer, SplittingStrategy, LikelihoodModel, MeanFunction, + Inference, DifferentiableFunction, LossFunction, Tokenizer, + CombinationRule, KernelNormalizer, Transformer, MachineEvaluation, + StructuredModel, FactorType, ParameterObserver, Distribution, Alphabet, + Machine>; namespace types_detail { @@ -116,9 +116,8 @@ namespace shogun } // namespace types_detail template - struct typeInList : public std::false_type + struct typeInList { - static_assert(!std::is_same_v); }; template class TypesT, typename... Args> @@ -127,12 +126,13 @@ namespace shogun { }; - template struct is_sg_base : public typeInList { }; + // all classes in the same class hierarchy chain should be ordered from + // most derived to least derived template constexpr auto find_base(type_list<>) { From e73f55fe89e98d1ce05297a0f7e75a7cf1f7b4e5 Mon Sep 17 00:00:00 2001 From: Ahmed Essam Date: Wed, 2 Sep 2020 20:39:44 +0200 Subject: [PATCH 6/6] Update notebooks --- .../classification/Classification.ipynb | 58 ++++++++--------- .../classification/HashedDocDotFeatures.ipynb | 2 +- .../classification/MKL.ipynb | 26 ++++---- .../SupportVectorMachines.ipynb | 16 ++--- doc/ipython-notebooks/clustering/KMeans.ipynb | 22 +++---- .../Scene_classification.ipynb | 4 +- doc/ipython-notebooks/converter/Tapkee.ipynb | 6 +- .../variational_classifier.ipynb | 62 +++++++++---------- doc/ipython-notebooks/ica/bss_audio.ipynb | 2 +- doc/ipython-notebooks/ica/bss_image.ipynb | 2 +- doc/ipython-notebooks/ica/ecg_sep.ipynb | 2 +- .../intro/Introduction.ipynb | 16 ++--- doc/ipython-notebooks/metric/LMNN.ipynb | 30 ++++----- doc/ipython-notebooks/multiclass/KNN.ipynb | 20 +++--- .../multiclass/Tree/DecisionTrees.ipynb | 48 +++++++------- .../multiclass/Tree/TreeEnsemble.ipynb | 16 ++--- .../multiclass/multiclass_reduction.ipynb | 34 +++++----- .../multiclass/naive_bayes.ipynb | 2 +- .../neuralnets/autoencoders.ipynb | 32 +++++----- .../neuralnets/neuralnets_digits.ipynb | 52 ++++++++-------- .../neuralnets/rbms_dbns.ipynb | 4 +- doc/ipython-notebooks/pca/pca_notebook.ipynb | 8 +-- .../regression/Regression.ipynb | 30 ++++----- .../structure/Binary_Denoising.ipynb | 6 +- .../multilabel_structured_prediction.ipynb | 6 +- 25 files changed, 253 insertions(+), 253 deletions(-) diff --git a/doc/ipython-notebooks/classification/Classification.ipynb b/doc/ipython-notebooks/classification/Classification.ipynb index 06c36aae98c..10d1a585cc0 100644 --- a/doc/ipython-notebooks/classification/Classification.ipynb +++ b/doc/ipython-notebooks/classification/Classification.ipynb @@ -211,7 +211,7 @@ "c = 0.5\n", "epsilon = 1e-3\n", "\n", - "svm_linear = sg.create_machine(\"LibLinear\", C1=c, C2=c, \n", + "svm_linear = sg.create(\"LibLinear\", C1=c, C2=c, \n", " labels=shogun_labels_linear, \n", " epsilon=epsilon,\n", " liblinear_solver_type=\"L2R_L2LOSS_SVC\")\n", @@ -224,7 +224,7 @@ "plt.title(\"Linear SVM - Linear Features\")\n", "plot_model(plt,svm_linear,feats_linear,labels_linear)\n", "\n", - "svm_non_linear = sg.create_machine(\"LibLinear\", C1=c, C2=c, \n", + "svm_non_linear = sg.create(\"LibLinear\", C1=c, C2=c, \n", " labels=shogun_labels_non_linear,\n", " epsilon=epsilon,\n", " liblinear_solver_type=\"L2R_L2LOSS_SVC\")\n", @@ -266,14 +266,14 @@ "gaussian_c = 0.7\n", "\n", "gaussian_kernel_linear = sg.create_kernel(\"GaussianKernel\", width=20)\n", - "gaussian_svm_linear = sg.create_machine('LibSVM', C1=gaussian_c, C2=gaussian_c, \n", + "gaussian_svm_linear = sg.create('LibSVM', C1=gaussian_c, C2=gaussian_c, \n", " kernel=gaussian_kernel_linear, labels=shogun_labels_linear)\n", "gaussian_svm_linear.train(shogun_feats_linear)\n", "classifiers_linear.append(gaussian_svm_linear)\n", "fadings.append(True)\n", "\n", "gaussian_kernel_non_linear = sg.create_kernel(\"GaussianKernel\", width=10)\n", - "gaussian_svm_non_linear=sg.create_machine('LibSVM', C1=gaussian_c, C2=gaussian_c, \n", + "gaussian_svm_non_linear=sg.create('LibSVM', C1=gaussian_c, C2=gaussian_c, \n", " kernel=gaussian_kernel_non_linear, labels=shogun_labels_non_linear)\n", "gaussian_svm_non_linear.train(shogun_feats_non_linear)\n", "classifiers_non_linear.append(gaussian_svm_non_linear)\n", @@ -309,7 +309,7 @@ "\n", "sigmoid_kernel_linear = sg.create_kernel(\"SigmoidKernel\", cache_size=200, gamma=1, coef0=0.5)\n", "sigmoid_kernel_linear.init(shogun_feats_linear, shogun_feats_linear)\n", - "sigmoid_svm_linear = sg.create_machine('LibSVM', C1=sigmoid_c, C2=sigmoid_c, \n", + "sigmoid_svm_linear = sg.create('LibSVM', C1=sigmoid_c, C2=sigmoid_c, \n", " kernel=sigmoid_kernel_linear, labels=shogun_labels_linear)\n", "sigmoid_svm_linear.train()\n", "classifiers_linear.append(sigmoid_svm_linear)\n", @@ -323,7 +323,7 @@ "\n", "sigmoid_kernel_non_linear = sg.create_kernel(\"SigmoidKernel\", cache_size=400, gamma=2.5, coef0=2)\n", "sigmoid_kernel_non_linear.init(shogun_feats_non_linear, shogun_feats_non_linear)\n", - "sigmoid_svm_non_linear = sg.create_machine('LibSVM', C1=sigmoid_c, C2=sigmoid_c, \n", + "sigmoid_svm_non_linear = sg.create('LibSVM', C1=sigmoid_c, C2=sigmoid_c, \n", " kernel=sigmoid_kernel_non_linear, labels=shogun_labels_non_linear)\n", "sigmoid_svm_non_linear.train()\n", "classifiers_non_linear.append(sigmoid_svm_non_linear)\n", @@ -353,7 +353,7 @@ "\n", "poly_kernel_linear = sg.create_kernel('PolyKernel', degree=degree, c=1.0)\n", "poly_kernel_linear.init(shogun_feats_linear, shogun_feats_linear)\n", - "poly_svm_linear = sg.create_machine('LibSVM', C1=poly_c, C2=poly_c, \n", + "poly_svm_linear = sg.create('LibSVM', C1=poly_c, C2=poly_c, \n", " kernel=poly_kernel_linear, labels=shogun_labels_linear)\n", "poly_svm_linear.train()\n", "classifiers_linear.append(poly_svm_linear)\n", @@ -367,7 +367,7 @@ "\n", "poly_kernel_non_linear = sg.create_kernel('PolyKernel', degree=degree, c=1.0)\n", "poly_kernel_non_linear.init(shogun_feats_non_linear, shogun_feats_non_linear)\n", - "poly_svm_non_linear = sg.create_machine('LibSVM', C1=poly_c, C2=poly_c, \n", + "poly_svm_non_linear = sg.create('LibSVM', C1=poly_c, C2=poly_c, \n", " kernel=poly_kernel_non_linear, labels=shogun_labels_non_linear)\n", "poly_svm_non_linear.train()\n", "classifiers_non_linear.append(poly_svm_non_linear)\n", @@ -404,7 +404,7 @@ "shogun_multiclass_labels_linear = sg.MulticlassLabels(multiclass_labels_linear)\n", "shogun_multiclass_labels_non_linear = sg.MulticlassLabels(multiclass_labels_non_linear)\n", "\n", - "naive_bayes_linear = sg.create_machine(\"GaussianNaiveBayes\")\n", + "naive_bayes_linear = sg.create(\"GaussianNaiveBayes\")\n", "naive_bayes_linear.put('features', shogun_feats_linear)\n", "naive_bayes_linear.put('labels', shogun_multiclass_labels_linear)\n", "naive_bayes_linear.train()\n", @@ -417,7 +417,7 @@ "plt.title(\"Naive Bayes - Linear Features\")\n", "plot_model(plt,naive_bayes_linear,feats_linear,labels_linear,fading=False)\n", "\n", - "naive_bayes_non_linear = sg.create_machine(\"GaussianNaiveBayes\")\n", + "naive_bayes_non_linear = sg.create(\"GaussianNaiveBayes\")\n", "naive_bayes_non_linear.put('features', shogun_feats_non_linear)\n", "naive_bayes_non_linear.put('labels', shogun_multiclass_labels_non_linear)\n", "naive_bayes_non_linear.train()\n", @@ -443,9 +443,9 @@ "source": [ "number_of_neighbors = 10\n", "\n", - "distances_linear = sg.create_distance('EuclideanDistance')\n", + "distances_linear = sg.create('EuclideanDistance')\n", "distances_linear.init(shogun_feats_linear, shogun_feats_linear)\n", - "knn_linear = sg.create_machine(\"KNN\", k=number_of_neighbors, distance=distances_linear, \n", + "knn_linear = sg.create(\"KNN\", k=number_of_neighbors, distance=distances_linear, \n", " labels=shogun_labels_linear)\n", "knn_linear.train()\n", "classifiers_linear.append(knn_linear)\n", @@ -457,9 +457,9 @@ "plt.title(\"Nearest Neighbors - Linear Features\")\n", "plot_model(plt,knn_linear,feats_linear,labels_linear,fading=False)\n", "\n", - "distances_non_linear = sg.create_distance('EuclideanDistance')\n", + "distances_non_linear = sg.create('EuclideanDistance')\n", "distances_non_linear.init(shogun_feats_non_linear, shogun_feats_non_linear)\n", - "knn_non_linear = sg.create_machine(\"KNN\", k=number_of_neighbors, distance=distances_non_linear, \n", + "knn_non_linear = sg.create(\"KNN\", k=number_of_neighbors, distance=distances_non_linear, \n", " labels=shogun_labels_non_linear)\n", "knn_non_linear.train()\n", "classifiers_non_linear.append(knn_non_linear)\n", @@ -484,7 +484,7 @@ "source": [ "gamma = 0.1\n", "\n", - "lda_linear = sg.create_machine('LDA', gamma=gamma, labels=shogun_labels_linear)\n", + "lda_linear = sg.create('LDA', gamma=gamma, labels=shogun_labels_linear)\n", "lda_linear.train(shogun_feats_linear)\n", "classifiers_linear.append(lda_linear)\n", "classifiers_names.append(\"LDA\")\n", @@ -495,7 +495,7 @@ "plt.title(\"LDA - Linear Features\")\n", "plot_model(plt,lda_linear,feats_linear,labels_linear)\n", "\n", - "lda_non_linear = sg.create_machine('LDA', gamma=gamma, labels=shogun_labels_non_linear)\n", + "lda_non_linear = sg.create('LDA', gamma=gamma, labels=shogun_labels_non_linear)\n", "lda_non_linear.train(shogun_feats_non_linear)\n", "classifiers_non_linear.append(lda_non_linear)\n", "\n", @@ -517,7 +517,7 @@ "metadata": {}, "outputs": [], "source": [ - "qda_linear = sg.create_machine(\"QDA\", labels=shogun_multiclass_labels_linear)\n", + "qda_linear = sg.create(\"QDA\", labels=shogun_multiclass_labels_linear)\n", "qda_linear.train(shogun_feats_linear)\n", "classifiers_linear.append(qda_linear)\n", "classifiers_names.append(\"QDA\")\n", @@ -528,7 +528,7 @@ "plt.title(\"QDA - Linear Features\")\n", "plot_model(plt,qda_linear,feats_linear,labels_linear,fading=False)\n", "\n", - "qda_non_linear = sg.create_machine(\"QDA\", labels=shogun_multiclass_labels_non_linear)\n", + "qda_non_linear = sg.create(\"QDA\", labels=shogun_multiclass_labels_non_linear)\n", "qda_non_linear.train(shogun_feats_non_linear)\n", "classifiers_non_linear.append(qda_non_linear)\n", "\n", @@ -567,17 +567,17 @@ "# create Gaussian kernel with width = 5.0\n", "kernel = sg.create_kernel(\"GaussianKernel\", width=5.0)\n", "# create zero mean function\n", - "zero_mean = sg.create_gp_mean(\"ZeroMean\")\n", + "zero_mean = sg.create(\"ZeroMean\")\n", "# create logit likelihood model\n", - "likelihood = sg.create_gp_likelihood(\"LogitLikelihood\")\n", + "likelihood = sg.create(\"LogitLikelihood\")\n", "# specify EP approximation inference method\n", - "inference_model_linear = sg.create_gp_inference(\"EPInferenceMethod\",kernel=kernel, \n", + "inference_model_linear = sg.create(\"EPInferenceMethod\",kernel=kernel, \n", " features=shogun_feats_linear, \n", " mean_function=zero_mean, \n", " labels=shogun_labels_linear, \n", " likelihood_model=likelihood)\n", "# create and train GP classifier, which uses Laplace approximation\n", - "gaussian_logit_linear = sg.create_gaussian_process(\"GaussianProcessClassification\", inference_method=inference_model_linear)\n", + "gaussian_logit_linear = sg.create(\"GaussianProcessClassification\", inference_method=inference_model_linear)\n", "gaussian_logit_linear.train()\n", "classifiers_linear.append(gaussian_logit_linear)\n", "classifiers_names.append(\"Gaussian Process Logit\")\n", @@ -588,12 +588,12 @@ "plt.title(\"Gaussian Process - Logit - Linear Features\")\n", "plot_model(plt,gaussian_logit_linear,feats_linear,labels_linear)\n", "\n", - "inference_model_non_linear = sg.create_gp_inference(\"EPInferenceMethod\", kernel=kernel, \n", + "inference_model_non_linear = sg.create(\"EPInferenceMethod\", kernel=kernel, \n", " features=shogun_feats_non_linear, \n", " mean_function=zero_mean, \n", " labels=shogun_labels_non_linear, \n", " likelihood_model=likelihood)\n", - "gaussian_logit_non_linear = sg.create_gaussian_process(\"GaussianProcessClassification\", \n", + "gaussian_logit_non_linear = sg.create(\"GaussianProcessClassification\", \n", " inference_method=inference_model_non_linear)\n", "gaussian_logit_non_linear.train()\n", "classifiers_non_linear.append(gaussian_logit_non_linear)\n", @@ -623,14 +623,14 @@ "metadata": {}, "outputs": [], "source": [ - "likelihood = sg.create_gp_likelihood(\"ProbitLikelihood\")\n", + "likelihood = sg.create(\"ProbitLikelihood\")\n", "\n", - "inference_model_linear = sg.create_gp_inference(\"EPInferenceMethod\", kernel=kernel, \n", + "inference_model_linear = sg.create(\"EPInferenceMethod\", kernel=kernel, \n", " features=shogun_feats_linear, \n", " mean_function=zero_mean, \n", " labels=shogun_labels_linear, \n", " likelihood_model=likelihood)\n", - "gaussian_probit_linear = sg.create_gaussian_process(\"GaussianProcessClassification\", \n", + "gaussian_probit_linear = sg.create(\"GaussianProcessClassification\", \n", " inference_method=inference_model_linear)\n", "gaussian_probit_linear.train()\n", "classifiers_linear.append(gaussian_probit_linear)\n", @@ -642,12 +642,12 @@ "plt.title(\"Gaussian Process - Probit - Linear Features\")\n", "plot_model(plt,gaussian_probit_linear,feats_linear,labels_linear)\n", "\n", - "inference_model_non_linear = sg.create_gp_inference(\"EPInferenceMethod\", kernel=kernel, \n", + "inference_model_non_linear = sg.create(\"EPInferenceMethod\", kernel=kernel, \n", " features=shogun_feats_non_linear, \n", " mean_function=zero_mean, \n", " labels=shogun_labels_non_linear, \n", " likelihood_model=likelihood)\n", - "gaussian_probit_non_linear = sg.create_gaussian_process(\"GaussianProcessClassification\", \n", + "gaussian_probit_non_linear = sg.create(\"GaussianProcessClassification\", \n", " inference_method=inference_model_non_linear)\n", "gaussian_probit_non_linear.train()\n", "classifiers_non_linear.append(gaussian_probit_non_linear)\n", diff --git a/doc/ipython-notebooks/classification/HashedDocDotFeatures.ipynb b/doc/ipython-notebooks/classification/HashedDocDotFeatures.ipynb index bc001cf9be1..1ba1f09ac49 100644 --- a/doc/ipython-notebooks/classification/HashedDocDotFeatures.ipynb +++ b/doc/ipython-notebooks/classification/HashedDocDotFeatures.ipynb @@ -190,7 +190,7 @@ "source": [ "C = 0.1\n", "epsilon = 0.01\n", - "svm = sg.create_machine(\"SVMOcas\", C1=C, C2=C, labels=labels, epsilon=epsilon)" + "svm = sg.create(\"SVMOcas\", C1=C, C2=C, labels=labels, epsilon=epsilon)" ] }, { diff --git a/doc/ipython-notebooks/classification/MKL.ipynb b/doc/ipython-notebooks/classification/MKL.ipynb index cda6f40a2e3..20636966e52 100644 --- a/doc/ipython-notebooks/classification/MKL.ipynb +++ b/doc/ipython-notebooks/classification/MKL.ipynb @@ -164,7 +164,7 @@ "\n", "covs=np.array([[1.0,0.0],[0.0,1.0]])\n", "\n", - "# gmm=sg.create_distribution(\"GMM\")\n", + "# gmm=sg.create(\"GMM\")\n", "# gmm.set_pseudo_count(num_components)\n", "gmm=sg.GMM(num_components)\n", "[gmm.set_nth_mean(means[i], i) for i in range(num_components)]\n", @@ -253,7 +253,7 @@ "kernel.add(\"kernel_array\", kernel1)\n", "kernel.init(feats_train, feats_train)\n", "\n", - "mkl = sg.create_machine(\"MKLClassification\", mkl_norm=1, C1=1, C2=1, kernel=kernel, labels=labels)\n", + "mkl = sg.create(\"MKLClassification\", mkl_norm=1, C1=1, C2=1, kernel=kernel, labels=labels)\n", "\n", "#train to get weights\n", "mkl.train() \n", @@ -387,7 +387,7 @@ "mkl.put(\"kernel\", kernelt)\n", "out = mkl.apply()\n", "\n", - "evaluator = sg.create_evaluation(\"ErrorRateMeasure\")\n", + "evaluator = sg.create(\"ErrorRateMeasure\")\n", "print(\"Test error is %2.2f%% :MKL\" % (100*evaluator.evaluate(out,sg.BinaryLabels(testlab))))\n", "\n", "\n", @@ -395,14 +395,14 @@ "mkl.put(\"kernel\", comb_ker0t)\n", "out = mkl.apply()\n", "\n", - "evaluator = sg.create_evaluation(\"ErrorRateMeasure\")\n", + "evaluator = sg.create(\"ErrorRateMeasure\")\n", "print(\"Test error is %2.2f%% :Subkernel1\"% (100*evaluator.evaluate(out,sg.BinaryLabels(testlab))))\n", "\n", "comb_ker1t.init(feats_train, sg.create_features(testdata))\n", "mkl.put(\"kernel\", comb_ker1t)\n", "out = mkl.apply()\n", "\n", - "evaluator = sg.create_evaluation(\"ErrorRateMeasure\")\n", + "evaluator = sg.create(\"ErrorRateMeasure\")\n", "print(\"Test error is %2.2f%% :subkernel2\" % (100*evaluator.evaluate(out,sg.BinaryLabels(testlab))))\n" ] }, @@ -490,7 +490,7 @@ " kernel.add(\"kernel_array\", kernel3)\n", " \n", " kernel.init(feats_tr, feats_tr)\n", - " mkl = sg.create_machine(\"MKLClassification\", mkl_norm=1, C1=1, C2=2, kernel=kernel, labels=lab)\n", + " mkl = sg.create(\"MKLClassification\", mkl_norm=1, C1=1, C2=2, kernel=kernel, labels=lab)\n", " \n", " mkl.train()\n", " \n", @@ -703,7 +703,7 @@ "\n", "kernel.init(feats_train, feats_train)\n", "\n", - "mkl = sg.create_machine(\"MKLMulticlass\", C=1.2, kernel=kernel, \n", + "mkl = sg.create(\"MKLMulticlass\", C=1.2, kernel=kernel, \n", " labels=labels, mkl_eps=0.001, mkl_norm=1)\n", "\n", "# set epsilon of SVM\n", @@ -715,7 +715,7 @@ "kernel.init(feats_train, feats_test) \n", "\n", "out = mkl.apply()\n", - "evaluator = sg.create_evaluation(\"MulticlassAccuracy\")\n", + "evaluator = sg.create(\"MulticlassAccuracy\")\n", "accuracy = evaluator.evaluate(out, labels_rem)\n", "print(\"Accuracy = %2.2f%%\" % (100*accuracy))\n", "\n", @@ -748,10 +748,10 @@ "\n", "pk = sg.create_kernel('PolyKernel', degree=10, c=2) \n", "\n", - "svm = sg.create_machine(\"GMNPSVM\", C=C, kernel=pk, labels=labels)\n", + "svm = sg.create(\"GMNPSVM\", C=C, kernel=pk, labels=labels)\n", "_=svm.train(feats)\n", "out=svm.apply(feats_rem)\n", - "evaluator = sg.create_evaluation(\"MulticlassAccuracy\")\n", + "evaluator = sg.create(\"MulticlassAccuracy\")\n", "accuracy = evaluator.evaluate(out, labels_rem)\n", "\n", "print(\"Accuracy = %2.2f%%\" % (100*accuracy))\n", @@ -776,10 +776,10 @@ "\n", "gk=sg.create_kernel(\"GaussianKernel\", width=width)\n", "\n", - "svm=sg.create_machine(\"GMNPSVM\", C=C, kernel=gk, labels=labels)\n", + "svm=sg.create(\"GMNPSVM\", C=C, kernel=gk, labels=labels)\n", "_=svm.train(feats)\n", "out=svm.apply(feats_rem)\n", - "evaluator = sg.create_evaluation(\"MulticlassAccuracy\")\n", + "evaluator = sg.create(\"MulticlassAccuracy\")\n", "accuracy = evaluator.evaluate(out, labels_rem)\n", "\n", "print(\"Accuracy = %2.2f%%\" % (100*accuracy))\n", @@ -869,7 +869,7 @@ "\n", "kernel.init(feats_train, feats_train)\n", "\n", - "mkl = sg.create_machine(\"MKLOneClass\", kernel=kernel, labels=labels, interleaved_optimization=False,\n", + "mkl = sg.create(\"MKLOneClass\", kernel=kernel, labels=labels, interleaved_optimization=False,\n", " mkl_norm=1)\n", "\n", "mkl.put(\"epsilon\", 1e-2)\n", diff --git a/doc/ipython-notebooks/classification/SupportVectorMachines.ipynb b/doc/ipython-notebooks/classification/SupportVectorMachines.ipynb index b58eabeab28..c842786e45c 100644 --- a/doc/ipython-notebooks/classification/SupportVectorMachines.ipynb +++ b/doc/ipython-notebooks/classification/SupportVectorMachines.ipynb @@ -161,7 +161,7 @@ "C=1\n", "epsilon=1e-3\n", "\n", - "svm=sg.create_machine('LibLinear', C1=C, C2=C, liblinear_solver_type='L2R_L2LOSS_SVC', epsilon=epsilon)\n", + "svm=sg.create('LibLinear', C1=C, C2=C, liblinear_solver_type='L2R_L2LOSS_SVC', epsilon=epsilon)\n", "\n", "#train\n", "svm.put('labels', labels)\n", @@ -362,7 +362,7 @@ "source": [ "C=1\n", "epsilon=1e-3\n", - "svm=sg.create_machine('LibSVM', C1=C, C2=C, kernel=gaussian_kernel, labels=labels)\n", + "svm=sg.create('LibSVM', C1=C, C2=C, kernel=gaussian_kernel, labels=labels)\n", "_=svm.train()" ] }, @@ -543,7 +543,7 @@ " plt.subplot(1, len(C_values), i+1)\n", " linear_kernel=sg.create_kernel(\"LinearKernel\")\n", " linear_kernel.init(feats_train, feats_train)\n", - " svm1 = sg.create_machine('LibSVM', C1=C_values[i], C2=C_values[i], kernel=linear_kernel, labels=labels)\n", + " svm1 = sg.create('LibSVM', C1=C_values[i], C2=C_values[i], kernel=linear_kernel, labels=labels)\n", " svm1 = sg.as_svm(svm1)\n", " svm1.train()\n", " vec1=svm1.get_support_vectors()\n", @@ -652,7 +652,7 @@ "source": [ "#train machine\n", "C=1\n", - "svm=sg.create_machine('LibSVM', C1=C, C2=C, kernel=gaussian_kernel, labels=labels)\n", + "svm=sg.create('LibSVM', C1=C, C2=C, kernel=gaussian_kernel, labels=labels)\n", "_=svm.train(feats_train)" ] }, @@ -810,11 +810,11 @@ "gaussian_kernel.init(feats_train, feats_train)\n", "\n", "C=1\n", - "svm=sg.create_machine('LibSVM', C1=C, C2=C, kernel=gaussian_kernel, labels=lab_train)\n", + "svm=sg.create('LibSVM', C1=C, C2=C, kernel=gaussian_kernel, labels=lab_train)\n", "_=svm.train()\n", "output=svm.apply(feats_test)\n", "\n", - "Err=sg.create_evaluation(\"ErrorRateMeasure\")\n", + "Err=sg.create(\"ErrorRateMeasure\")\n", "error=Err.evaluate(output, lab_test)\n", "print('Error:', error)\n", "\n", @@ -829,7 +829,7 @@ "svm.train()\n", "output=svm.apply(feats_test)\n", "\n", - "Err=sg.create_evaluation(\"ErrorRateMeasure\")\n", + "Err=sg.create(\"ErrorRateMeasure\")\n", "error=Err.evaluate(output, lab_test)\n", "print('Error with normalization:', error)" ] @@ -933,7 +933,7 @@ "metadata": {}, "outputs": [], "source": [ - "svm=sg.create_machine(\"GMNPSVM\", C=1, kernel=gaussian_kernel, labels=labels)\n", + "svm=sg.create(\"GMNPSVM\", C=1, kernel=gaussian_kernel, labels=labels)\n", "_=svm.train(feats_train)\n", "\n", "size=100\n", diff --git a/doc/ipython-notebooks/clustering/KMeans.ipynb b/doc/ipython-notebooks/clustering/KMeans.ipynb index 3fca5e875e4..e3b1473b548 100644 --- a/doc/ipython-notebooks/clustering/KMeans.ipynb +++ b/doc/ipython-notebooks/clustering/KMeans.ipynb @@ -121,7 +121,7 @@ "k = 2\n", "\n", "# distance metric over feature matrix - Euclidean distance\n", - "distance = sg.create_distance('EuclideanDistance')\n", + "distance = sg.create('EuclideanDistance')\n", "distance.init(train_features, train_features)" ] }, @@ -139,7 +139,7 @@ "outputs": [], "source": [ "# KMeans object created\n", - "kmeans = sg.create_machine(\"KMeans\", k=k, distance=distance)\n", + "kmeans = sg.create(\"KMeans\", k=k, distance=distance)\n", "\n", "# KMeans training \n", "kmeans.train()" @@ -240,7 +240,7 @@ "initial_centers = np.array([[0.,10.],[50.,50.]])\n", "\n", "# initial centers passed\n", - "kmeans = sg.create_machine(\"KMeans\", k=k, distance=distance, initial_centers=initial_centers)" + "kmeans = sg.create(\"KMeans\", k=k, distance=distance, initial_centers=initial_centers)" ] }, { @@ -342,7 +342,7 @@ "outputs": [], "source": [ "# set flag for using KMeans++\n", - "kmeans = sg.create_machine(\"KMeans\", k=k, distance=distance, kmeanspp=True)" + "kmeans = sg.create(\"KMeans\", k=k, distance=distance, kmeanspp=True)" ] }, { @@ -406,7 +406,7 @@ "outputs": [], "source": [ "# set training method to mini-batch\n", - "kmeans = sg.create_machine(\"KMeansMiniBatch\", k=k, distance=distance)" + "kmeans = sg.create(\"KMeansMiniBatch\", k=k, distance=distance)" ] }, { @@ -504,14 +504,14 @@ " k = 3\n", "\n", " # distance function features - euclidean\n", - " distance = sg.create_distance('EuclideanDistance')\n", + " distance = sg.create('EuclideanDistance')\n", " distance.init(train_features, train_features)\n", "\n", " # initialize KMeans object, use kmeans++ to initialize centers [play around: change it to False and compare results]\n", - " kmeans = sg.create_machine(\"KMeans\", k=k, distance=distance, kmeanspp=True)\n", + " kmeans = sg.create(\"KMeans\", k=k, distance=distance, kmeanspp=True)\n", "\n", " # training method is Lloyd by default [play around: change it to mini-batch by uncommenting the following lines]\n", - " #kmeans = sg.create_machine(\"KMeansMiniBatch\", k=k, distance=distance)\n", + " #kmeans = sg.create(\"KMeansMiniBatch\", k=k, distance=distance)\n", "\n", " # training kmeans\n", " kmeans.train(train_features)\n", @@ -587,7 +587,7 @@ "source": [ "def analyzeResult(result): \n", " # shogun object for clustering accuracy\n", - " AccuracyEval = sg.create_evaluation(\"ClusteringAccuracy\")\n", + " AccuracyEval = sg.create(\"ClusteringAccuracy\")\n", "\n", " # evaluates clustering accuracy\n", " accuracy = AccuracyEval.evaluate(result, ground_truth)\n", @@ -653,10 +653,10 @@ "source": [ "def apply_pca_to_data(target_dims):\n", " train_features = sg.create_features(obsmatrix)\n", - " submean = sg.create_transformer(\"PruneVarSubMean\", divide_by_std=False)\n", + " submean = sg.create(\"PruneVarSubMean\", divide_by_std=False)\n", " submean.fit(train_features)\n", " submean.transform(train_features)\n", - " preprocessor = sg.create_transformer(\"PCA\", target_dim=target_dims)\n", + " preprocessor = sg.create(\"PCA\", target_dim=target_dims)\n", " preprocessor.fit(train_features)\n", " pca_transform = preprocessor.get(\"transformation_matrix\")\n", " new_features = np.dot(pca_transform.T, train_features.get(\"feature_matrix\"))\n", diff --git a/doc/ipython-notebooks/computer_vision/Scene_classification.ipynb b/doc/ipython-notebooks/computer_vision/Scene_classification.ipynb index 6d59ee9cbd9..0ffe83bb9d6 100644 --- a/doc/ipython-notebooks/computer_vision/Scene_classification.ipynb +++ b/doc/ipython-notebooks/computer_vision/Scene_classification.ipynb @@ -186,7 +186,7 @@ " sg_descriptor_mat_features=features(descriptor_mat)\n", "\n", " #EuclideanDistance is used for the distance measurement.\n", - " distance = sg.create_distance('EuclideanDistance')\n", + " distance = sg.create('EuclideanDistance')\n", " distance.init(sg_descriptor_mat_features, sg_descriptor_mat_features)\n", "\n", " #group the descriptors into k clusters.\n", @@ -342,7 +342,7 @@ " cluster_labels=[]\n", "\n", " #initialize a KNN in Shogun\n", - " dist = sg.create_distance('EuclideanDistance')\n", + " dist = sg.create('EuclideanDistance')\n", " labels=MulticlassLabels(np.double(range(k)))\n", " knn=KNN(1, dist, labels)\n", "\n", diff --git a/doc/ipython-notebooks/converter/Tapkee.ipynb b/doc/ipython-notebooks/converter/Tapkee.ipynb index 4ada0cbab15..268080cd0d2 100644 --- a/doc/ipython-notebooks/converter/Tapkee.ipynb +++ b/doc/ipython-notebooks/converter/Tapkee.ipynb @@ -127,10 +127,10 @@ "feats = sg.create_features(data)\n", "\n", "# create instance of Isomap converter and set number of neighbours used in kNN search to 20\n", - "isomap = sg.create_transformer('Isomap', target_dim=2, k=20)\n", + "isomap = sg.create('Isomap', target_dim=2, k=20)\n", "\n", "# create instance of Multidimensional Scaling converter and configure it\n", - "mds = sg.create_transformer('MultidimensionalScaling', target_dim=2)\n", + "mds = sg.create('MultidimensionalScaling', target_dim=2)\n", "\n", "# embed Swiss roll data\n", "embedded_data_mds = mds.transform(feats).get('feature_matrix')\n", @@ -165,7 +165,7 @@ "features = sg.create_features(data)\n", "\n", "# create MDS instance\n", - "converter = sg.create_transformer('StochasticProximityEmbedding', target_dim=2)\n", + "converter = sg.create('StochasticProximityEmbedding', target_dim=2)\n", "\n", "# embed helix data\n", "embedded_features = converter.transform(features)\n", diff --git a/doc/ipython-notebooks/gaussian_process/variational_classifier.ipynb b/doc/ipython-notebooks/gaussian_process/variational_classifier.ipynb index 43020eba62c..4c7c5442bd4 100644 --- a/doc/ipython-notebooks/gaussian_process/variational_classifier.ipynb +++ b/doc/ipython-notebooks/gaussian_process/variational_classifier.ipynb @@ -648,7 +648,7 @@ " for c in range(cols):\n", " inference = methods[r][c]\n", " likelihood = likelihoods[r][c]\n", - " inf = sg.create_gp_inference(inference, kernel=kernel_func, features=features, mean_function=mean_func, labels=labels, likelihood_model=sg.create_gp_likelihood(likelihood))\n", + " inf = sg.create(inference, kernel=kernel_func, features=features, mean_function=mean_func, labels=labels, likelihood_model=sg.create(likelihood))\n", " inf.set_scale(exp(kernel_log_scale))\n", " #get the approximated Gaussian distribution\n", " mu = inf.get_posterior_mean()\n", @@ -702,7 +702,7 @@ "kernel_log_scale = 1.5\n", "\n", "#a mean function and a covariance function for GP\n", - "mean_func = sg.create_gp_mean(\"ConstMean\")\n", + "mean_func = sg.create(\"ConstMean\")\n", "#using log_sigma as a hyper-parameter of GP instead of sigma\n", "kernel_sigma = 2*exp(2*kernel_log_sigma)\n", "kernel_func = sg.create_kernel(\"GaussianKernel\", width=kernel_sigma)\n", @@ -741,7 +741,7 @@ " \"LogitDVGLikelihood\"\n", "]\n", "#likelihood\n", - "Z2 = likelihood_points(X,Y,labels,sg.create_gp_likelihood(\"LogitLikelihood\"))\n", + "Z2 = likelihood_points(X,Y,labels,sg.create(\"LogitLikelihood\"))\n", "CS2 = ax2.contour(X, Y, np.exp(Z2))\n", "ax2.set_title('Likelihood')\n", "ax2.axis('equal')\n", @@ -825,7 +825,7 @@ " trained GPC model, name of inference method\n", " \"\"\"\n", "\n", - " mean_func = sg.create_gp_mean(\"ZeroMean\")\n", + " mean_func = sg.create(\"ZeroMean\")\n", " kernel_sigma = 2*exp(2*kernel_log_sigma);\n", " kernel_func = sg.create_kernel(\"GaussianKernel\", width=kernel_sigma)\n", "\n", @@ -834,10 +834,10 @@ " #X is a feature-by-sample matrix\n", " features_train=sg.create_features(x_train)\n", "\n", - " inf = sg.create_gp_inference(inference, kernel=kernel_func, features=features_train, mean_function=mean_func, labels=labels_train, likelihood_model=likelihood)\n", + " inf = sg.create(inference, kernel=kernel_func, features=features_train, mean_function=mean_func, labels=labels_train, likelihood_model=likelihood)\n", " inf.set_scale(exp(kernel_log_scale))\n", "\n", - " gp = sg.create_gaussian_process(\"GaussianProcessClassification\", inference_method=inf)\n", + " gp = sg.create(\"GaussianProcessClassification\", inference_method=inf)\n", " gp.train()\n", "\n", " return gp, inf.get_name()" @@ -919,7 +919,7 @@ " \n", " #binary classification problem (positive labels and negative labels)\n", " inference = \"SingleLaplaceInferenceMethod\"\n", - " likelihood = sg.create_gp_likelihood(\"LogitLikelihood\")\n", + " likelihood = sg.create(\"LogitLikelihood\")\n", " linesearch = 3\n", " \n", " #we show how parameters of GPC affect the decision boundary\n", @@ -1096,7 +1096,7 @@ " trained GPC model, name of inference method\n", " \"\"\"\n", "\n", - " mean_func = sg.create_gp_mean(\"ZeroMean\")\n", + " mean_func = sg.create(\"ZeroMean\")\n", " kernel_sigma = exp(kernel_log_sigma);\n", " kernel_func = sg.create_kernel(\"GaussianARDSparseKernel\")\n", " kernel_func.put(\"log_weights\", np.full((1), 1.0/kernel_sigma))\n", @@ -1107,7 +1107,7 @@ " #X is a feature-by-sample matrix\n", " features_train=sg.create_features(x_train)\n", " features_inducing=sg.create_features(x_inducing)\n", - " inf = sg.create_gp_inference(inference, kernel=kernel_func, features=features_train, mean_function=mean_func, labels=labels_train, likelihood_model=likelihood, inducing_features=features_inducing)\n", + " inf = sg.create(inference, kernel=kernel_func, features=features_train, mean_function=mean_func, labels=labels_train, likelihood_model=likelihood, inducing_features=features_inducing)\n", " inf.set_scale(exp(kernel_log_scale))\n", " #if optimizing_inducing_points:\n", " ##inf.enable_optimizing_inducing_features(True, sg.LBFGSMinimizer())\n", @@ -1118,7 +1118,7 @@ " # inf.set_inducing_noise(1e-6);\n", " #except:\n", " #pass\n", - " gp = sg.create_gaussian_process(\"GaussianProcessClassification\", inference_method=inf)\n", + " gp = sg.create(\"GaussianProcessClassification\", inference_method=inf)\n", " gp.train()\n", " return gp, inf" ] @@ -1183,7 +1183,7 @@ " \"SingleFITCLaplaceInferenceMethod\", #inference method for sparse Gaussian processes\n", " \"SingleLaplaceInferenceMethod\", #inference method for full Gaussian processes\n", " ]\n", - " likelihood = sg.create_gp_likelihood(\"LogitLikelihood\")\n", + " likelihood = sg.create(\"LogitLikelihood\")\n", " linesearch = 3\n", " kernel_log_sigma=0\n", " kernel_log_scale=0\n", @@ -1312,7 +1312,7 @@ " inferences =[\n", " \"SingleFITCLaplaceInferenceMethod\", #inference method for sparse Gaussian processes\n", " ]\n", - " likelihood = sg.create_gp_likelihood(\"LogitLikelihood\")\n", + " likelihood = sg.create(\"LogitLikelihood\")\n", " linesearch = 3\n", " kernel_log_sigma=0\n", " kernel_log_scale=0\n", @@ -1455,7 +1455,7 @@ " predictive result of the testing data set, name of inference method\n", " \"\"\"\n", "\n", - " mean_func = sg.create_gp_mean(\"ZeroMean\")\n", + " mean_func = sg.create(\"ZeroMean\")\n", " kernel_sigma = 2*exp(2*kernel_log_sigma);\n", " kernel_func = sg.create_kernel(\"GaussianKernel\", width=kernel_sigma)\n", "\n", @@ -1466,7 +1466,7 @@ " features_train=sg.create_features(x_train)\n", " features_test=sg.create_features(x_test)\n", "\n", - " inf = sg.create_gp_inference(inference, kernel=kernel_func, features=features_train, mean_function=mean_func, labels=labels_train, likelihood_model=likelihood)\n", + " inf = sg.create(inference, kernel=kernel_func, features=features_train, mean_function=mean_func, labels=labels_train, likelihood_model=likelihood)\n", " inf.set_scale(exp(kernel_log_scale))\n", " try:\n", " #used to make sure the kernel matrix is positive definite\n", @@ -1476,11 +1476,11 @@ " except:\n", " pass\n", " if minimizer !=None:\n", - " opt=sg.create_lbfgs_minimizer(minimizer)\n", + " opt=sg.create(minimizer)\n", " opt.set_lbfgs_parameters(100,2000,linesearch,2000)\n", " inf.register_minimizer(opt);\n", "\n", - " gp = sg.create_gaussian_process(\"GaussianProcessClassification\", inference_method=inf)\n", + " gp = sg.create(\"GaussianProcessClassification\", inference_method=inf)\n", " gp.train()\n", " prob=gp.get_probabilities(features_test)\n", "\n", @@ -1587,7 +1587,7 @@ " minimizer = minimizers[r][c]\n", " likelihood_name = likelihoods[r][c]\n", " linesearch = linesearches[r][c]\n", - " likelihood = sg.create_gp_likelihood(likelihood_name)\n", + " likelihood = sg.create(likelihood_name)\n", " try:\n", " likelihood.set_noise_factor(1e-15)\n", " likelihood.set_strict_scale(0.01)\n", @@ -1787,8 +1787,8 @@ " \"\"\"\n", " \n", " #train a GP classifer\n", - " error_eval = sg.create_evaluation(\"ErrorRateMeasure\")\n", - " mean_func = sg.create_gp_mean(\"ConstMean\")\n", + " error_eval = sg.create(\"ErrorRateMeasure\")\n", + " mean_func = sg.create(\"ConstMean\")\n", " #set hyper-parameters of covariance function\n", " kernel_log_sigma = 1.0\n", " kernel_sigma = 2*exp(2*kernel_log_sigma);\n", @@ -1803,7 +1803,7 @@ "\n", " kernel_log_scale = 1.0\n", "\n", - " inf = sg.create_gp_inference(inference, kernel=kernel_func, features=features_train, mean_function=mean_func, labels=labels_train, likelihood_model=likelihood)\n", + " inf = sg.create(inference, kernel=kernel_func, features=features_train, mean_function=mean_func, labels=labels_train, likelihood_model=likelihood)\n", " print(\"\\nusing %s\"%inf.get_name())\n", " \n", " inf.set_scale(exp(kernel_log_scale))\n", @@ -1815,7 +1815,7 @@ " \n", "\n", " start = time.time()\n", - " gp = sg.create_gaussian_process(\"GaussianProcessClassification\", inference_method=inf)\n", + " gp = sg.create(\"GaussianProcessClassification\", inference_method=inf)\n", " gp.train()\n", " end = time.time()\n", " print(\"cost %.2f seconds at training\"%(end-start))\n", @@ -1922,7 +1922,7 @@ ], "source": [ "inference=\"SingleLaplaceInferenceMethod\"\n", - "likelihood = sg.create_gp_likelihood(\"LogitLikelihood\")\n", + "likelihood = sg.create(\"LogitLikelihood\")\n", "learning_example2(inference, likelihood, x_train, x_test, y_train, y_test, False) #using Newton method\n", "\n", "learning_example2(inference, likelihood, x_train, x_test, y_train, y_test, True) #using lbfgs method" @@ -1971,7 +1971,7 @@ } ], "source": [ - "likelihood = sg.create_gp_likelihood(\"LogitVGLikelihood\")\n", + "likelihood = sg.create(\"LogitVGLikelihood\")\n", "learning_example2(\"KLCovarianceInferenceMethod\", likelihood, x_train, x_test, y_train, y_test)" ] }, @@ -2019,7 +2019,7 @@ } ], "source": [ - "likelihood = sg.create_gp_likelihood(\"LogitVGLikelihood\")\n", + "likelihood = sg.create(\"LogitVGLikelihood\")\n", "learning_example2(\"KLDiagonalInferenceMethod\", likelihood, x_train, x_test, y_train, y_test)" ] }, @@ -2066,7 +2066,7 @@ } ], "source": [ - "likelihood = sg.create_gp_likelihood(\"LogitVGLikelihood\")\n", + "likelihood = sg.create(\"LogitVGLikelihood\")\n", "learning_example2(\"KLCholeskyInferenceMethod\", likelihood, x_train, x_test, y_train, y_test)" ] }, @@ -2117,7 +2117,7 @@ } ], "source": [ - "likelihood = sg.create_gp_likelihood(\"LogitDVGLikelihood\")\n", + "likelihood = sg.create(\"LogitDVGLikelihood\")\n", "#likelihood.set_strict_scale(0.1)\n", "learning_example2(\"KLDualInferenceMethod\", likelihood, x_train, x_test, y_train, y_test)" ] @@ -2161,7 +2161,7 @@ "metadata": {}, "outputs": [], "source": [ - "#likelihood = sg.create_gp_likelihood(\"LogitVGPiecewiseBoundLikelihood\")\n", + "#likelihood = sg.create(\"LogitVGPiecewiseBoundLikelihood\")\n", "#likelihood.set_default_variational_bound()\n", "#likelihood.set_noise_factor(1e-15)\n", "#inference_methods=[\n", @@ -2207,7 +2207,7 @@ " Returns:\n", " Nothing\n", " \"\"\"\n", - " error_eval = sg.create_evaluation(\"ErrorRateMeasure\")\n", + " error_eval = sg.create(\"ErrorRateMeasure\")\n", " mean_func = sg.ZeroMean()\n", " kernel_log_sigma = 1.0\n", " kernel_sigma = 2*exp(2*kernel_log_sigma);\n", @@ -2222,7 +2222,7 @@ "\n", " kernel_log_scale = 1.0\n", "\n", - " inf = sg.create_gp_inference(inference, kernel_func, features_train, mean_func, labels_train, likelihood)\n", + " inf = sg.create(inference, kernel_func, features_train, mean_func, labels_train, likelihood)\n", " print(\"\\nusing %s\"%inf.get_name())\n", " \n", " inf.set_scale(exp(kernel_log_scale))\n", @@ -2231,10 +2231,10 @@ " minimizer1.set_lbfgs_parameters(100,80,sg.BACKTRACKING_STRONG_WOLFE,80)\n", " inf.register_minimizer(minimizer1);\n", "\n", - " gp = sg.create_machine(\"GaussianProcessClassification\", inference_method=inf)\n", + " gp = sg.create(\"GaussianProcessClassification\", inference_method=inf)\n", "\n", " # evaluate our inference method for its derivatives\n", - " grad = sg.create_evaluation(\"GradientEvaluation\", gp, features_train, labels_train, sg.GradientCriterion(), False)\n", + " grad = sg.create(\"GradientEvaluation\", gp, features_train, labels_train, sg.GradientCriterion(), False)\n", " grad.set_function(inf)\n", "\n", " # handles all of the above structures in memory\n", diff --git a/doc/ipython-notebooks/ica/bss_audio.ipynb b/doc/ipython-notebooks/ica/bss_audio.ipynb index f28ddaee365..66b7b70fc48 100644 --- a/doc/ipython-notebooks/ica/bss_audio.ipynb +++ b/doc/ipython-notebooks/ica/bss_audio.ipynb @@ -270,7 +270,7 @@ "outputs": [], "source": [ "# Separating with JADE\n", - "jade = sg.create_transformer('Jade')\n", + "jade = sg.create('Jade')\n", "jade.fit(mixed_signals)\n", "signals = jade.transform(mixed_signals)\n", "\n", diff --git a/doc/ipython-notebooks/ica/bss_image.ipynb b/doc/ipython-notebooks/ica/bss_image.ipynb index 4d0796ee0c7..ace45400653 100644 --- a/doc/ipython-notebooks/ica/bss_image.ipynb +++ b/doc/ipython-notebooks/ica/bss_image.ipynb @@ -153,7 +153,7 @@ "mixed_signals = sg.create_features(X)\n", "\n", "# Separating\n", - "jade = sg.create_transformer('Jade')\n", + "jade = sg.create('Jade')\n", "jade.fit(mixed_signals)\n", "signals = jade.transform(mixed_signals)\n", "S_ = signals.get('feature_matrix')\n", diff --git a/doc/ipython-notebooks/ica/ecg_sep.ipynb b/doc/ipython-notebooks/ica/ecg_sep.ipynb index c1bd04ae7d5..8caa8291f65 100644 --- a/doc/ipython-notebooks/ica/ecg_sep.ipynb +++ b/doc/ipython-notebooks/ica/ecg_sep.ipynb @@ -170,7 +170,7 @@ "outputs": [], "source": [ "# Separating with SOBI\n", - "sep = sg.create_transformer('SOBI')\n", + "sep = sg.create('SOBI')\n", "sep.put('tau', 1.0*np.arange(0,120))\n", " \n", "sep.fit(mixed_signals)\n", diff --git a/doc/ipython-notebooks/intro/Introduction.ipynb b/doc/ipython-notebooks/intro/Introduction.ipynb index 94029f81a2f..879922ab0f9 100644 --- a/doc/ipython-notebooks/intro/Introduction.ipynb +++ b/doc/ipython-notebooks/intro/Introduction.ipynb @@ -259,7 +259,7 @@ "metadata": {}, "outputs": [], "source": [ - "preproc=sg.create_transformer(\"PruneVarSubMean\", divide_by_std=True)\n", + "preproc=sg.create(\"PruneVarSubMean\", divide_by_std=True)\n", "preproc.fit(feats_train)\n", "feats_train = preproc.transform(feats_train)\n", "# Store preprocessed feature matrix.\n", @@ -338,7 +338,7 @@ "#prameters to svm\n", "C=0.9\n", "\n", - "svm=sg.create_machine(\"LibLinear\", C1=C, C2=C, labels=labels, \n", + "svm=sg.create(\"LibLinear\", C1=C, C2=C, labels=labels, \n", " liblinear_solver_type=\"L2R_L2LOSS_SVC\")\n", "#train\n", "svm.train(feats_train)\n", @@ -495,7 +495,7 @@ "label_e=trainlab[num_train:]\n", "labels_true=sg.create_labels(label_e)\n", "\n", - "svm=sg.create_machine(\"LibLinear\", C1=C, C2=C, labels=labels, \n", + "svm=sg.create(\"LibLinear\", C1=C, C2=C, labels=labels, \n", " liblinear_solver_type=\"L2R_L2LOSS_SVC\")\n", "\n", "#train and evaluate\n", @@ -503,7 +503,7 @@ "output=svm.apply(feats_evaluate)\n", "\n", "#use AccuracyMeasure to get accuracy\n", - "acc=sg.create_evaluation(\"AccuracyMeasure\")\n", + "acc=sg.create(\"AccuracyMeasure\")\n", "accuracy=acc.evaluate(output,labels_true)*100\n", "print('Accuracy(%):', accuracy)" ] @@ -546,7 +546,7 @@ "labels=sg.create_labels(sg.read_csv(os.path.join(SHOGUN_DATA_DIR, 'uci/housing/housing_label.dat')))\n", "\n", "#rescale to 0...1\n", - "preproc=sg.create_transformer(\"RescaleFeatures\")\n", + "preproc=sg.create(\"RescaleFeatures\")\n", "preproc.fit(temp_feats)\n", "temp_feats = preproc.transform(temp_feats)\n", "mat = temp_feats.get(\"feature_matrix\")\n", @@ -586,7 +586,7 @@ "width=1.0\n", "tau=0.5\n", "kernel=sg.create_kernel(\"GaussianKernel\", width=width)\n", - "krr=sg.create_machine(\"KernelRidgeRegression\",tau=tau, kernel=kernel, labels=labels)\n", + "krr=sg.create(\"KernelRidgeRegression\",tau=tau, kernel=kernel, labels=labels)\n", "krr.train(feats_train)\n", "kernel.init(feats_train, grid)\n", "out = krr.apply().get(\"labels\")\n" @@ -614,14 +614,14 @@ "\n", "#Regression with first attribute\n", "kernel=sg.create_kernel(\"GaussianKernel\", width=width)\n", - "krr=sg.create_machine(\"KernelRidgeRegression\",tau=tau, kernel=kernel, labels=labels)\n", + "krr=sg.create(\"KernelRidgeRegression\",tau=tau, kernel=kernel, labels=labels)\n", "krr.train(feats_train0)\n", "kernel.init(feats_train0, feats_test)\n", "out0 = krr.apply().get(\"labels\")\n", "\n", "#Regression with second attribute \n", "kernel=sg.create_kernel(\"GaussianKernel\", width=width)\n", - "krr=sg.create_machine(\"KernelRidgeRegression\",tau=tau, kernel=kernel, labels=labels)\n", + "krr=sg.create(\"KernelRidgeRegression\",tau=tau, kernel=kernel, labels=labels)\n", "krr.train(feats_train1)\n", "kernel.init(feats_train1, feats_test)\n", "out1 = krr.apply().get(\"labels\")" diff --git a/doc/ipython-notebooks/metric/LMNN.ipynb b/doc/ipython-notebooks/metric/LMNN.ipynb index f55e311e898..bc5045de449 100644 --- a/doc/ipython-notebooks/metric/LMNN.ipynb +++ b/doc/ipython-notebooks/metric/LMNN.ipynb @@ -371,9 +371,9 @@ "# points as their own 1-nearest neighbours\n", "k = 2\n", "\n", - "distance = sg.create_distance('EuclideanDistance')\n", + "distance = sg.create('EuclideanDistance')\n", "distance.init(feats, feats)\n", - "knn = sg.create_machine(\"KNN\",k=k, distance=distance, labels=labels)\n", + "knn = sg.create(\"KNN\",k=k, distance=distance, labels=labels)\n", "knn.train(feats)\n", "\n", "plot_sandwich_data(x, y, axes[0])\n", @@ -395,7 +395,7 @@ "L = lmnn.get('linear_transform')\n", "xl = np.dot(x, L.T)\n", "feats = sg.create_features(xl.T)\n", - "dist = sg.create_distance('EuclideanDistance')\n", + "dist = sg.create('EuclideanDistance')\n", "dist.init(feats, feats)\n", "knn.put('distance', dist)\n", "\n", @@ -486,7 +486,7 @@ "outputs": [], "source": [ "def visualize_tdsne(features, labels):\n", - " converter = sg.create_transformer(\"TDistributedStochasticNeighborEmbedding\",\n", + " converter = sg.create(\"TDistributedStochasticNeighborEmbedding\",\n", " target_dim=2, perplexity=25)\n", " \n", " embedding = converter.transform(features)\n", @@ -523,14 +523,14 @@ "outputs": [], "source": [ "# set up the classifier\n", - "knn = sg.create_machine(\"KNN\", k=3, distance=sg.create_distance('EuclideanDistance'))\n", + "knn = sg.create(\"KNN\", k=3, distance=sg.create('EuclideanDistance'))\n", "\n", "# set up 5-fold cross-validation\n", - "splitting = sg.create_splitting_strategy(\"StratifiedCrossValidationSplitting\", \n", + "splitting = sg.create(\"StratifiedCrossValidationSplitting\", \n", " labels=ape_labels, num_subsets=5)\n", "# evaluation method\n", - "evaluator = sg.create_evaluation(\"MulticlassAccuracy\")\n", - "cross_validation = sg.create_machine_evaluation(\"CrossValidation\",\n", + "evaluator = sg.create(\"MulticlassAccuracy\")\n", + "cross_validation = sg.create(\"CrossValidation\",\n", " machine=knn, \n", " features=ape_features, \n", " labels=ape_labels, \n", @@ -683,12 +683,12 @@ "source": [ "# kNN classifier\n", "k = 5\n", - "knn = sg.create_machine(\"KNN\", k=k, distance=sg.create_distance(\"EuclideanDistance\"))\n", + "knn = sg.create(\"KNN\", k=k, distance=sg.create(\"EuclideanDistance\"))\n", "\n", - "splitting = sg.create_splitting_strategy(\"StratifiedCrossValidationSplitting\", \n", + "splitting = sg.create(\"StratifiedCrossValidationSplitting\", \n", " labels=wine_labels, num_subsets=5)\n", - "evaluator = sg.create_evaluation(\"MulticlassAccuracy\")\n", - "cross_validation = sg.create_machine_evaluation(\"CrossValidation\",\n", + "evaluator = sg.create(\"MulticlassAccuracy\")\n", + "cross_validation = sg.create(\"CrossValidation\",\n", " machine=knn, \n", " features=wine_features, \n", " labels=wine_labels, \n", @@ -776,7 +776,7 @@ "outputs": [], "source": [ "# preprocess features so that all of them vary within [0,1]\n", - "preprocessor = sg.create_transformer(\"RescaleFeatures\")\n", + "preprocessor = sg.create(\"RescaleFeatures\")\n", "preprocessor.fit(wine_features)\n", "wine_features = preprocessor.transform(wine_features)\n", "\n", @@ -784,7 +784,7 @@ "assert(np.min(wine_features.get(\"feature_matrix\")) >= 0.0 and np.max(wine_features.get(\"feature_matrix\")) <= 1.0)\n", "\n", "# perform kNN classification after the feature rescaling\n", - "knn.put('distance', sg.create_distance(\"EuclideanDistance\"))\n", + "knn.put('distance', sg.create(\"EuclideanDistance\"))\n", "result = cross_validation.evaluate()\n", "euclidean_means[1] = result.get('mean')\n", "\n", @@ -863,7 +863,7 @@ "wine_features = wine_white_features\n", "\n", "# perform kNN classification after whitening\n", - "knn.put(\"distance\", sg.create_distance(\"EuclideanDistance\"))\n", + "knn.put(\"distance\", sg.create(\"EuclideanDistance\"))\n", "result = cross_validation.evaluate()\n", "euclidean_means[2] = result.get('mean')\n", "\n", diff --git a/doc/ipython-notebooks/multiclass/KNN.ipynb b/doc/ipython-notebooks/multiclass/KNN.ipynb index 063a2569883..9d18c05b344 100644 --- a/doc/ipython-notebooks/multiclass/KNN.ipynb +++ b/doc/ipython-notebooks/multiclass/KNN.ipynb @@ -137,8 +137,8 @@ "labels = sg.create_labels(Ytrain)\n", "feats = sg.create_features(Xtrain)\n", "k=3\n", - "dist = sg.create_distance('EuclideanDistance')\n", - "knn = sg.create_machine(\"KNN\", k=k, distance=dist, labels=labels)\n", + "dist = sg.create('EuclideanDistance')\n", + "knn = sg.create(\"KNN\", k=k, distance=dist, labels=labels)\n", "labels_test = sg.create_labels(Ytest)\n", "feats_test = sg.create_features(Xtest)\n", "knn.train(feats)\n", @@ -146,7 +146,7 @@ "print(\"Predictions\", pred.get(\"labels\")[:5])\n", "print(\"Ground Truth\", Ytest[:5])\n", "\n", - "evaluator = sg.create_evaluation(\"MulticlassAccuracy\")\n", + "evaluator = sg.create(\"MulticlassAccuracy\")\n", "accuracy = evaluator.evaluate(pred, labels_test)\n", "\n", "print(\"Accuracy = %2.2f%%\" % (100*accuracy))" @@ -269,7 +269,7 @@ "source": [ "def evaluate(labels, feats, use_cover_tree=False):\n", " import time\n", - " split = sg.create_splitting_strategy(\"CrossValidationSplitting\", labels=labels, num_subsets=Nsplit)\n", + " split = sg.create(\"CrossValidationSplitting\", labels=labels, num_subsets=Nsplit)\n", " split.build_subsets()\n", " \n", " accuracy = np.zeros((Nsplit, len(all_ks)))\n", @@ -285,9 +285,9 @@ " feats.add_subset(idx_train)\n", " labels.add_subset(idx_train)\n", "\n", - " dist = sg.create_distance('EuclideanDistance')\n", + " dist = sg.create('EuclideanDistance')\n", " dist.init(feats, feats)\n", - " knn = sg.create_machine(\"KNN\", k=k, distance=dist, labels=labels)\n", + " knn = sg.create(\"KNN\", k=k, distance=dist, labels=labels)\n", " #knn.set_store_model_features(True)\n", " #FIXME: causes SEGFAULT\n", " if use_cover_tree:\n", @@ -297,7 +297,7 @@ " knn.put('knn_solver', \"KNN_BRUTE\")\n", " knn.train()\n", "\n", - " evaluator = sg.create_evaluation(\"MulticlassAccuracy\")\n", + " evaluator = sg.create(\"MulticlassAccuracy\")\n", " pred = knn.apply()\n", " acc_train[i, j] = evaluator.evaluate(pred, labels)\n", "\n", @@ -409,7 +409,7 @@ "\n", "gk=sg.create_kernel(\"GaussianKernel\", width=width)\n", "\n", - "svm=sg.create_machine(\"GMNPSVM\", C=C, kernel=gk, labels=labels)\n", + "svm=sg.create(\"GMNPSVM\", C=C, kernel=gk, labels=labels)\n", "_=svm.train(feats)" ] }, @@ -427,7 +427,7 @@ "outputs": [], "source": [ "out=svm.apply(feats_test)\n", - "evaluator = sg.create_evaluation(\"MulticlassAccuracy\")\n", + "evaluator = sg.create(\"MulticlassAccuracy\")\n", "accuracy = evaluator.evaluate(out, labels_test)\n", "\n", "print(\"Accuracy = %2.2f%%\" % (100*accuracy))" @@ -453,7 +453,7 @@ "labels_rem=sg.create_labels(Yrem)\n", "out=svm.apply(feats_rem)\n", "\n", - "evaluator = sg.create_evaluation(\"MulticlassAccuracy\")\n", + "evaluator = sg.create(\"MulticlassAccuracy\")\n", "accuracy = evaluator.evaluate(out, labels_rem)\n", "\n", "print(\"Accuracy = %2.2f%%\" % (100*accuracy))\n", diff --git a/doc/ipython-notebooks/multiclass/Tree/DecisionTrees.ipynb b/doc/ipython-notebooks/multiclass/Tree/DecisionTrees.ipynb index 29de81445cf..0a2af809db1 100644 --- a/doc/ipython-notebooks/multiclass/Tree/DecisionTrees.ipynb +++ b/doc/ipython-notebooks/multiclass/Tree/DecisionTrees.ipynb @@ -197,7 +197,7 @@ "outputs": [], "source": [ "# create ID3ClassifierTree object\n", - "id3 = sg.create_machine(\"ID3ClassifierTree\", labels=labels)\n", + "id3 = sg.create(\"ID3ClassifierTree\", labels=labels)\n", "\n", "# learn the tree from training features\n", "is_successful = id3.train(train_feats)" @@ -412,7 +412,7 @@ " train_lab = sg.create_labels(labels)\n", "\n", " # create ID3ClassifierTree object\n", - " id3 = sg.create_machine(\"ID3ClassifierTree\", labels=train_lab)\n", + " id3 = sg.create(\"ID3ClassifierTree\", labels=train_lab)\n", "\n", " # learn the tree from training features\n", " id3.train(train_feats)\n", @@ -439,7 +439,7 @@ "outputs": [], "source": [ "# Shogun object for calculating multiclass accuracy\n", - "accuracy = sg.create_evaluation(\"MulticlassAccuracy\")\n", + "accuracy = sg.create(\"MulticlassAccuracy\")\n", "print('Accuracy : ' + str(accuracy.evaluate(output, test_labels)))" ] }, @@ -610,7 +610,7 @@ "# steps in C4.5 Tree training bundled together in a python method\n", "def train_tree(feats,types,labels):\n", " # C4.5 Tree object\n", - " tree = sg.create_machine(\"C45ClassifierTree\", labels=labels, m_nominal=types)\n", + " tree = sg.create(\"C45ClassifierTree\", labels=labels, m_nominal=types)\n", " # supply training matrix and train\n", " tree.train(feats)\n", " \n", @@ -882,7 +882,7 @@ "outputs": [], "source": [ "# Shogun object for calculating multiclass accuracy\n", - "accuracy = sg.create_evaluation(\"MulticlassAccuracy\")\n", + "accuracy = sg.create(\"MulticlassAccuracy\")\n", "print('Accuracy : ' + str(accuracy.evaluate(output, test_labels)))" ] }, @@ -971,7 +971,7 @@ "source": [ "def train_carttree(feat_types,problem_type,num_folds,use_cv_pruning,labels,feats):\n", " # create CART tree object\n", - " c = sg.create_machine(\"CARTree\", nominal=feat_types,\n", + " c = sg.create(\"CARTree\", nominal=feat_types,\n", " mode=problem_type,\n", " folds=num_folds,\n", " apply_cv_pruning=use_cv_pruning,\n", @@ -1187,7 +1187,7 @@ "feature_types = np.array([False, False, False, False])\n", "\n", "# setup CART-tree with cross validation pruning switched off\n", - "cart = sg.create_machine(\"CARTree\", nominal=feature_types,\n", + "cart = sg.create(\"CARTree\", nominal=feature_types,\n", " mode=\"PT_MULTICLASS\",\n", " folds=5,\n", " apply_cv_pruning=False)" @@ -1212,13 +1212,13 @@ "labels_train = sg.create_labels(lab)\n", "\n", "# set evaluation criteria - multiclass accuracy\n", - "accuracy = sg.create_evaluation(\"MulticlassAccuracy\")\n", + "accuracy = sg.create(\"MulticlassAccuracy\")\n", "\n", "# set splitting criteria - 10 fold cross-validation\n", - "split = sg.create_splitting_strategy(\"CrossValidationSplitting\", labels=labels_train, num_subsets=10)\n", + "split = sg.create(\"CrossValidationSplitting\", labels=labels_train, num_subsets=10)\n", "\n", "# set cross-validation parameters \n", - "cross_val = sg.create_machine_evaluation(\"CrossValidation\",\n", + "cross_val = sg.create(\"CrossValidation\",\n", " machine=cart,\n", " features=feats_train,\n", " labels=labels_train,\n", @@ -1298,18 +1298,18 @@ " # set attribute types - 2 nominal and 2 ordinal\n", " feature_types = np.array([True, True, False, False])\n", " # setup CART-tree with cross validation pruning switched off\n", - " cart = sg.create_machine(\"CARTree\", nominal=feature_types,\n", + " cart = sg.create(\"CARTree\", nominal=feature_types,\n", " mode=\"PT_REGRESSION\",\n", " folds=5,\n", " apply_cv_pruning=False,\n", " max_depth=max_depth)\n", "\n", " # set evaluation criteria - mean squared error\n", - " accuracy = sg.create_evaluation(\"MeanSquaredError\")\n", + " accuracy = sg.create(\"MeanSquaredError\")\n", " # set splitting criteria - 10 fold cross-validation\n", - " split = sg.create_splitting_strategy(\"CrossValidationSplitting\", labels=labels_train, num_subsets=10)\n", + " split = sg.create(\"CrossValidationSplitting\", labels=labels_train, num_subsets=10)\n", " # set cross-validation parameters \n", - " cross_val = sg.create_machine_evaluation(\"CrossValidation\",\n", + " cross_val = sg.create(\"CrossValidation\",\n", " machine=cart,\n", " features=feats_train,\n", " labels=labels_train,\n", @@ -1405,7 +1405,7 @@ "source": [ "def train_chaidtree(dependent_var_type,feature_types,num_bins,feats,labels):\n", " # create CHAID tree object\n", - " c = sg.create_machine(\"CHAIDTree\", dependent_vartype=dependent_var_type,\n", + " c = sg.create(\"CHAIDTree\", dependent_vartype=dependent_var_type,\n", " feature_types=feature_types,\n", " num_breakpoints=num_bins,\n", " labels=labels)\n", @@ -1560,7 +1560,7 @@ "feature_types = np.array([2 for i in range(13)],dtype=np.int32) \n", "\n", "# setup CHAID tree - dependent variable is nominal(0), feature types set, number of bins(20)\n", - "chaid = sg.create_machine(\"CHAIDTree\", dependent_vartype=0,\n", + "chaid = sg.create(\"CHAIDTree\", dependent_vartype=0,\n", " feature_types=feature_types,\n", " num_breakpoints=20)" ] @@ -1579,13 +1579,13 @@ "outputs": [], "source": [ "# set evaluation criteria - multiclass accuracy\n", - "accuracy = sg.create_evaluation(\"MulticlassAccuracy\")\n", + "accuracy = sg.create(\"MulticlassAccuracy\")\n", " \n", "# set splitting criteria - 10 fold cross-validation\n", - "split = sg.create_splitting_strategy(\"CrossValidationSplitting\", labels=train_labels, num_subsets=10)\n", + "split = sg.create(\"CrossValidationSplitting\", labels=train_labels, num_subsets=10)\n", "\n", "# set cross-validation parameters \n", - "cross_val = sg.create_machine_evaluation(\"CrossValidation\",\n", + "cross_val = sg.create(\"CrossValidation\",\n", " machine=chaid,\n", " features=train_feats,\n", " labels=train_labels,\n", @@ -1644,20 +1644,20 @@ " feature_types[9]=1 \n", "\n", " # setup CHAID-tree\n", - " chaid = sg.create_machine(\"CHAIDTree\", dependent_vartype=2,\n", + " chaid = sg.create(\"CHAIDTree\", dependent_vartype=2,\n", " feature_types=feature_types,\n", " num_breakpoints=10,\n", " max_tree_depth=10)\n", "\n", " # set evaluation criteria - mean squared error\n", - " accuracy = sg.create_evaluation(\"MeanSquaredError\")\n", + " accuracy = sg.create(\"MeanSquaredError\")\n", " # set splitting criteria - 5 fold cross-validation\n", - " split = sg.create_splitting_strategy(\"CrossValidationSplitting\", \n", + " split = sg.create(\"CrossValidationSplitting\", \n", " labels=train_labels, \n", " num_subsets=5)\n", "\n", " # set cross-validation parameters \n", - " cross_val = sg.create_machine_evaluation(\"CrossValidation\",\n", + " cross_val = sg.create(\"CrossValidation\",\n", " machine=chaid,\n", " features=train_feats,\n", " labels=train_labels,\n", @@ -1727,4 +1727,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} \ No newline at end of file +} diff --git a/doc/ipython-notebooks/multiclass/Tree/TreeEnsemble.ipynb b/doc/ipython-notebooks/multiclass/Tree/TreeEnsemble.ipynb index d2991dacf25..a78844cf93e 100644 --- a/doc/ipython-notebooks/multiclass/Tree/TreeEnsemble.ipynb +++ b/doc/ipython-notebooks/multiclass/Tree/TreeEnsemble.ipynb @@ -86,14 +86,14 @@ "outputs": [], "source": [ "def setup_random_forest(num_trees,rand_subset_size,combination_rule,feature_types):\n", - " rf=sg.create_machine(\"RandomForest\", num_bags=num_trees,\n", + " rf=sg.create(\"RandomForest\", num_bags=num_trees,\n", " combination_rule=combination_rule)\n", " rf.get(\"machine\").put(\"m_randsubset_size\", rand_subset_size)\n", " rf.get(\"machine\").put(\"nominal\", feature_types)\n", " \n", " return rf\n", "\n", - "comb_rule=sg.create_combination_rule(\"MajorityVote\")\n", + "comb_rule=sg.create(\"MajorityVote\")\n", "feat_types=np.array([False]*16)\n", "rand_forest=setup_random_forest(10,4,comb_rule,feat_types)" ] @@ -139,7 +139,7 @@ "outputs": [], "source": [ "def train_cart(train_feats,train_labels,feature_types,problem_type):\n", - " c=sg.create_machine(\"CARTree\", nominal=feature_types,\n", + " c=sg.create(\"CARTree\", nominal=feature_types,\n", " mode=problem_type,\n", " folds=2,\n", " apply_cv_pruning=False,\n", @@ -169,7 +169,7 @@ "metadata": {}, "outputs": [], "source": [ - "accuracy=sg.create_evaluation(\"MulticlassAccuracy\")\n", + "accuracy=sg.create(\"MulticlassAccuracy\")\n", "\n", "rf_train_accuracy=accuracy.evaluate(output_rand_forest_train,train_labels)*100\n", "rf_test_accuracy=accuracy.evaluate(output_rand_forest_test,test_labels)*100\n", @@ -216,7 +216,7 @@ " rf.put('labels', train_labels)\n", " rf.train(train_feats)\n", " out_test=rf.apply_multiclass(test_feats)\n", - " acc=sg.create_evaluation(\"MulticlassAccuracy\")\n", + " acc=sg.create(\"MulticlassAccuracy\")\n", " return acc.evaluate(out_test,test_labels)" ] }, @@ -369,7 +369,7 @@ "rf.train(train_feats)\n", " \n", "# set evaluation strategy\n", - "rf.put(\"oob_evaluation_metric\", sg.create_evaluation(\"MulticlassAccuracy\"))\n", + "rf.put(\"oob_evaluation_metric\", sg.create(\"MulticlassAccuracy\"))\n", "oobe=rf.get(\"oob_error\")\n", "\n", "print('OOB accuracy : '+str(round(oobe*100,3))+'%')" @@ -410,10 +410,10 @@ "source": [ "def get_oob_errors_wine(num_trees,rand_subset_size):\n", " feat_types=np.array([False]*13)\n", - " rf=setup_random_forest(num_trees,rand_subset_size,sg.create_combination_rule(\"MajorityVote\"),feat_types)\n", + " rf=setup_random_forest(num_trees,rand_subset_size,sg.create(\"MajorityVote\"),feat_types)\n", " rf.put('labels', train_labels)\n", " rf.train(train_feats)\n", - " rf.put(\"oob_evaluation_metric\", sg.create_evaluation(\"MulticlassAccuracy\"))\n", + " rf.put(\"oob_evaluation_metric\", sg.create(\"MulticlassAccuracy\"))\n", " return rf.get(\"oob_error\") \n", "\n", "size=[1,2,4,6,8,10,13]\n", diff --git a/doc/ipython-notebooks/multiclass/multiclass_reduction.ipynb b/doc/ipython-notebooks/multiclass/multiclass_reduction.ipynb index 8d86f6a5bbd..9a8c7ec8aac 100644 --- a/doc/ipython-notebooks/multiclass/multiclass_reduction.ipynb +++ b/doc/ipython-notebooks/multiclass/multiclass_reduction.ipynb @@ -200,10 +200,10 @@ "outputs": [], "source": [ "def evaluate(strategy, C):\n", - " bin_machine = sg.create_machine(\"LibLinear\", liblinear_solver_type=\"L2R_L2LOSS_SVC\",\n", + " bin_machine = sg.create(\"LibLinear\", liblinear_solver_type=\"L2R_L2LOSS_SVC\",\n", " use_bias=True, C1=C, C2=C)\n", "\n", - " mc_machine = sg.create_machine(\"LinearMulticlassMachine\",\n", + " mc_machine = sg.create(\"LinearMulticlassMachine\",\n", " multiclass_strategy=strategy, \n", " machine=bin_machine, \n", " labels=lab_train)\n", @@ -216,7 +216,7 @@ " pred_test = mc_machine.apply(feats_test)\n", " t_test = time.process_time() - t_begin\n", "\n", - " evaluator = sg.create_evaluation(\"MulticlassAccuracy\")\n", + " evaluator = sg.create(\"MulticlassAccuracy\")\n", " acc = evaluator.evaluate(pred_test, lab_test)\n", "\n", " print(\"training time: %.4f\" % t_train)\n", @@ -239,11 +239,11 @@ "source": [ "print(\"\\nOne-vs-Rest\")\n", "print(\"=\"*60)\n", - "evaluate(sg.create_multiclass_strategy(\"MulticlassOneVsRestStrategy\"), 5.0)\n", + "evaluate(sg.create(\"MulticlassOneVsRestStrategy\"), 5.0)\n", "\n", "print(\"\\nOne-vs-One\")\n", "print(\"=\"*60)\n", - "evaluate(sg.create_multiclass_strategy(\"MulticlassOneVsOneStrategy\"), 2.0)" + "evaluate(sg.create(\"MulticlassOneVsOneStrategy\"), 2.0)" ] }, { @@ -259,7 +259,7 @@ "metadata": {}, "outputs": [], "source": [ - "mcsvm = sg.create_machine(\"MulticlassLibLinear\", C=5.0, \n", + "mcsvm = sg.create(\"MulticlassLibLinear\", C=5.0, \n", " labels=lab_train, use_bias=True)\n", "\n", "t_begin = time.process_time()\n", @@ -270,7 +270,7 @@ "pred_test = mcsvm.apply(feats_test)\n", "t_test = time.process_time() - t_begin\n", "\n", - "evaluator = sg.create_evaluation(\"MulticlassAccuracy\")\n", + "evaluator = sg.create(\"MulticlassAccuracy\")\n", "acc = evaluator.evaluate(pred_test, lab_test)\n", "\n", "print(\"training time: %.4f\" % t_train)\n", @@ -467,9 +467,9 @@ " kernel=sg.create_kernel(\"GaussianKernel\", width=width)\n", " kernel.init(feats_train, feats_train)\n", " \n", - " classifier = sg.create_machine(\"LibSVM\", epsilon=epsilon)\n", + " classifier = sg.create(\"LibSVM\", epsilon=epsilon)\n", "\n", - " mc_machine = sg.create_machine(\"KernelMulticlassMachine\",\n", + " mc_machine = sg.create(\"KernelMulticlassMachine\",\n", " multiclass_strategy=strategy, \n", " kernel=kernel, \n", " machine=classifier,\n", @@ -483,7 +483,7 @@ " pred_test = mc_machine.apply_multiclass(feats_test)\n", " t_test = time.process_time() - t_begin\n", "\n", - " evaluator = sg.create_evaluation(\"MulticlassAccuracy\")\n", + " evaluator = sg.create(\"MulticlassAccuracy\")\n", " acc = evaluator.evaluate(pred_test, lab_test)\n", "\n", " print(\"training time: %.4f\" % t_train)\n", @@ -492,7 +492,7 @@ "\n", "print(\"\\nOne-vs-Rest\")\n", "print(\"=\"*60)\n", - "evaluate_multiclass_kernel(sg.create_multiclass_strategy(\"MulticlassOneVsRestStrategy\"))" + "evaluate_multiclass_kernel(sg.create(\"MulticlassOneVsRestStrategy\"))" ] }, { @@ -604,10 +604,10 @@ "kernel=sg.create_kernel(\"GaussianKernel\", width=width)\n", "kernel.init(feats_tr, feats_tr)\n", " \n", - "classifier=sg.create_machine(\"LibSVM\", epsilon=epsilon)\n", + "classifier=sg.create(\"LibSVM\", epsilon=epsilon)\n", "\n", - "mc_machine=sg.create_machine(\"KernelMulticlassMachine\",\n", - " multiclass_strategy=sg.create_multiclass_strategy(\"MulticlassOneVsRestStrategy\"),\n", + "mc_machine=sg.create(\"KernelMulticlassMachine\",\n", + " multiclass_strategy=sg.create(\"MulticlassOneVsRestStrategy\"),\n", " kernel=kernel, \n", " machine=classifier, \n", " labels=labels)\n", @@ -663,11 +663,11 @@ "source": [ "C=2.0\n", " \n", - "bin_machine = sg.create_machine(\"LibLinear\", liblinear_solver_type=\"L2R_L2LOSS_SVC\",\n", + "bin_machine = sg.create(\"LibLinear\", liblinear_solver_type=\"L2R_L2LOSS_SVC\",\n", " use_bias=True, C1=C, C2=C)\n", "\n", - "mc_machine1 = sg.create_machine(\"LinearMulticlassMachine\",\n", - " multiclass_strategy=sg.create_multiclass_strategy(\"MulticlassOneVsOneStrategy\"),\n", + "mc_machine1 = sg.create(\"LinearMulticlassMachine\",\n", + " multiclass_strategy=sg.create(\"MulticlassOneVsOneStrategy\"),\n", " machine=bin_machine, \n", " labels=labels)\n", "mc_machine1.train(feats_tr)\n", diff --git a/doc/ipython-notebooks/multiclass/naive_bayes.ipynb b/doc/ipython-notebooks/multiclass/naive_bayes.ipynb index e9d5a4abeb1..a0067862b11 100644 --- a/doc/ipython-notebooks/multiclass/naive_bayes.ipynb +++ b/doc/ipython-notebooks/multiclass/naive_bayes.ipynb @@ -135,7 +135,7 @@ "source": [ "X_train, Y_train = gen_samples(n_train)\n", "\n", - "machine = sg.create_machine(\"GaussianNaiveBayes\", labels=sg.create_labels(Y_train))\n", + "machine = sg.create(\"GaussianNaiveBayes\", labels=sg.create_labels(Y_train))\n", "\n", "machine.train(sg.create_features(X_train))" ] diff --git a/doc/ipython-notebooks/neuralnets/autoencoders.ipynb b/doc/ipython-notebooks/neuralnets/autoencoders.ipynb index 32327fee204..bde4acab976 100644 --- a/doc/ipython-notebooks/neuralnets/autoencoders.ipynb +++ b/doc/ipython-notebooks/neuralnets/autoencoders.ipynb @@ -97,12 +97,12 @@ "metadata": {}, "outputs": [], "source": [ - "ae = sg.create_machine(\"DeepAutoencoder\", seed=10)\n", - "ae.add(\"layers\", sg.create_layer(\"NeuralInputLayer\", num_neurons=256))\n", - "ae.add(\"layers\", sg.create_layer(\"NeuralRectifiedLinearLayer\", num_neurons=512))\n", - "ae.add(\"layers\", sg.create_layer(\"NeuralRectifiedLinearLayer\", num_neurons=128))\n", - "ae.add(\"layers\", sg.create_layer(\"NeuralRectifiedLinearLayer\", num_neurons=512))\n", - "ae.add(\"layers\", sg.create_layer(\"NeuralLinearLayer\", num_neurons=256))" + "ae = sg.create(\"DeepAutoencoder\", seed=10)\n", + "ae.add(\"layers\", sg.create(\"NeuralInputLayer\", num_neurons=256))\n", + "ae.add(\"layers\", sg.create(\"NeuralRectifiedLinearLayer\", num_neurons=512))\n", + "ae.add(\"layers\", sg.create(\"NeuralRectifiedLinearLayer\", num_neurons=128))\n", + "ae.add(\"layers\", sg.create(\"NeuralRectifiedLinearLayer\", num_neurons=512))\n", + "ae.add(\"layers\", sg.create(\"NeuralLinearLayer\", num_neurons=256))" ] }, { @@ -272,7 +272,7 @@ "metadata": {}, "outputs": [], "source": [ - "nn = sg.convert_to_neural_network(ae, sg.create_layer(\"NeuralSoftmaxLayer\", num_neurons=10), 0.01)\n", + "nn = sg.convert_to_neural_network(ae, sg.create(\"NeuralSoftmaxLayer\", num_neurons=10), 0.01)\n", "\n", "nn.put('max_num_epochs', 50)\n", "\n", @@ -294,7 +294,7 @@ "outputs": [], "source": [ "predictions = nn.apply(Xtest)\n", - "accuracy = sg.create_evaluation(\"MulticlassAccuracy\").evaluate(predictions, Ytest) * 100\n", + "accuracy = sg.create(\"MulticlassAccuracy\").evaluate(predictions, Ytest) * 100\n", "\n", "print(\"Classification accuracy on the test set =\", accuracy, \"%\")" ] @@ -321,13 +321,13 @@ "metadata": {}, "outputs": [], "source": [ - "conv_ae = sg.create_machine(\"DeepAutoencoder\", seed=10)\n", + "conv_ae = sg.create(\"DeepAutoencoder\", seed=10)\n", "# 16x16 single channel images\n", - "conv_ae.add(\"layers\", sg.create_layer(\"NeuralInputLayer\", width=16, height=16, num_neurons=256)) \n", + "conv_ae.add(\"layers\", sg.create(\"NeuralInputLayer\", width=16, height=16, num_neurons=256)) \n", "\n", "# the first encoding layer: 5 feature maps, filters with radius 2 (5x5 filters)\n", "# and max-pooling in a 2x2 region: its output will be 10 8x8 feature maps\n", - "conv_ae.add(\"layers\", sg.create_layer(\"NeuralConvolutionalLayer\",\n", + "conv_ae.add(\"layers\", sg.create(\"NeuralConvolutionalLayer\",\n", " activation_function=\"CMAF_RECTIFIED_LINEAR\", \n", " num_maps=5, \n", " radius_x=2, \n", @@ -337,7 +337,7 @@ "\n", "# the second encoding layer: 15 feature maps, filters with radius 2 (5x5 filters)\n", "# and max-pooling in a 2x2 region: its output will be 20 4x4 feature maps\n", - "conv_ae.add(\"layers\", sg.create_layer(\"NeuralConvolutionalLayer\",\n", + "conv_ae.add(\"layers\", sg.create(\"NeuralConvolutionalLayer\",\n", " activation_function=\"CMAF_RECTIFIED_LINEAR\", \n", " num_maps=15, \n", " radius_x=2, \n", @@ -346,14 +346,14 @@ " pooling_height=2)) \n", "\n", "# the first decoding layer: same structure as the first encoding layer\n", - "conv_ae.add(\"layers\", sg.create_layer(\"NeuralConvolutionalLayer\",\n", + "conv_ae.add(\"layers\", sg.create(\"NeuralConvolutionalLayer\",\n", " activation_function=\"CMAF_RECTIFIED_LINEAR\", \n", " num_maps=15, \n", " radius_x=2, \n", " radius_y=2))\n", "\n", "# the second decoding layer: same structure as the input layer\n", - "conv_ae.add(\"layers\", sg.create_layer(\"NeuralConvolutionalLayer\",\n", + "conv_ae.add(\"layers\", sg.create(\"NeuralConvolutionalLayer\",\n", " activation_function=\"CMAF_RECTIFIED_LINEAR\", \n", " num_maps=1, \n", " radius_x=2, \n", @@ -399,7 +399,7 @@ "metadata": {}, "outputs": [], "source": [ - "conv_nn = sg.convert_to_neural_network(ae, sg.create_layer(\"NeuralSoftmaxLayer\", num_neurons=10), 0.01)\n", + "conv_nn = sg.convert_to_neural_network(ae, sg.create(\"NeuralSoftmaxLayer\", num_neurons=10), 0.01)\n", "\n", "# train the network\n", "conv_nn.put('epsilon', 0.0)\n", @@ -424,7 +424,7 @@ "outputs": [], "source": [ "predictions = conv_nn.apply_multiclass(Xtest)\n", - "accuracy = sg.create_evaluation(\"MulticlassAccuracy\").evaluate(predictions, Ytest) * 100\n", + "accuracy = sg.create(\"MulticlassAccuracy\").evaluate(predictions, Ytest) * 100\n", "\n", "print(\"Classification accuracy on the test set =\", accuracy, \"%\")" ] diff --git a/doc/ipython-notebooks/neuralnets/neuralnets_digits.ipynb b/doc/ipython-notebooks/neuralnets/neuralnets_digits.ipynb index 4dca02f606a..98cd9f11377 100644 --- a/doc/ipython-notebooks/neuralnets/neuralnets_digits.ipynb +++ b/doc/ipython-notebooks/neuralnets/neuralnets_digits.ipynb @@ -106,29 +106,29 @@ "outputs": [], "source": [ "# create the networks\n", - "net_no_reg = sg.create_machine(\"NeuralNetwork\")\n", - "net_no_reg.add(\"layers\", sg.create_layer(\"NeuralInputLayer\", num_neurons=256))\n", - "net_no_reg.add(\"layers\", sg.create_layer(\"NeuralLogisticLayer\", num_neurons=256))\n", - "net_no_reg.add(\"layers\", sg.create_layer(\"NeuralLogisticLayer\", num_neurons=128))\n", - "net_no_reg.add(\"layers\", sg.create_layer(\"NeuralSoftmaxLayer\", num_neurons=10))\n", + "net_no_reg = sg.create(\"NeuralNetwork\")\n", + "net_no_reg.add(\"layers\", sg.create(\"NeuralInputLayer\", num_neurons=256))\n", + "net_no_reg.add(\"layers\", sg.create(\"NeuralLogisticLayer\", num_neurons=256))\n", + "net_no_reg.add(\"layers\", sg.create(\"NeuralLogisticLayer\", num_neurons=128))\n", + "net_no_reg.add(\"layers\", sg.create(\"NeuralSoftmaxLayer\", num_neurons=10))\n", "\n", - "net_l2 = sg.create_machine(\"NeuralNetwork\")\n", - "net_l2.add(\"layers\", sg.create_layer(\"NeuralInputLayer\", num_neurons=256))\n", - "net_l2.add(\"layers\", sg.create_layer(\"NeuralLogisticLayer\", num_neurons=256))\n", - "net_l2.add(\"layers\", sg.create_layer(\"NeuralLogisticLayer\", num_neurons=128))\n", - "net_l2.add(\"layers\", sg.create_layer(\"NeuralSoftmaxLayer\", num_neurons=10))\n", + "net_l2 = sg.create(\"NeuralNetwork\")\n", + "net_l2.add(\"layers\", sg.create(\"NeuralInputLayer\", num_neurons=256))\n", + "net_l2.add(\"layers\", sg.create(\"NeuralLogisticLayer\", num_neurons=256))\n", + "net_l2.add(\"layers\", sg.create(\"NeuralLogisticLayer\", num_neurons=128))\n", + "net_l2.add(\"layers\", sg.create(\"NeuralSoftmaxLayer\", num_neurons=10))\n", "\n", - "net_l1 = sg.create_machine(\"NeuralNetwork\")\n", - "net_l1.add(\"layers\", sg.create_layer(\"NeuralInputLayer\", num_neurons=256))\n", - "net_l1.add(\"layers\", sg.create_layer(\"NeuralLogisticLayer\", num_neurons=256))\n", - "net_l1.add(\"layers\", sg.create_layer(\"NeuralLogisticLayer\", num_neurons=128))\n", - "net_l1.add(\"layers\", sg.create_layer(\"NeuralSoftmaxLayer\", num_neurons=10))\n", + "net_l1 = sg.create(\"NeuralNetwork\")\n", + "net_l1.add(\"layers\", sg.create(\"NeuralInputLayer\", num_neurons=256))\n", + "net_l1.add(\"layers\", sg.create(\"NeuralLogisticLayer\", num_neurons=256))\n", + "net_l1.add(\"layers\", sg.create(\"NeuralLogisticLayer\", num_neurons=128))\n", + "net_l1.add(\"layers\", sg.create(\"NeuralSoftmaxLayer\", num_neurons=10))\n", "\n", - "net_dropout = sg.create_machine(\"NeuralNetwork\")\n", - "net_dropout.add(\"layers\", sg.create_layer(\"NeuralInputLayer\", num_neurons=256))\n", - "net_dropout.add(\"layers\", sg.create_layer(\"NeuralLogisticLayer\", num_neurons=256))\n", - "net_dropout.add(\"layers\", sg.create_layer(\"NeuralLogisticLayer\", num_neurons=128))\n", - "net_dropout.add(\"layers\", sg.create_layer(\"NeuralSoftmaxLayer\", num_neurons=10))" + "net_dropout = sg.create(\"NeuralNetwork\")\n", + "net_dropout.add(\"layers\", sg.create(\"NeuralInputLayer\", num_neurons=256))\n", + "net_dropout.add(\"layers\", sg.create(\"NeuralLogisticLayer\", num_neurons=256))\n", + "net_dropout.add(\"layers\", sg.create(\"NeuralLogisticLayer\", num_neurons=128))\n", + "net_dropout.add(\"layers\", sg.create(\"NeuralSoftmaxLayer\", num_neurons=10))" ] }, { @@ -208,7 +208,7 @@ "def compute_accuracy(net, X, Y):\n", " predictions = net.apply_multiclass(X)\n", "\n", - " evaluator = sg.create_evaluation(\"MulticlassAccuracy\")\n", + " evaluator = sg.create(\"MulticlassAccuracy\")\n", " accuracy = evaluator.evaluate(predictions, Y)\n", " return accuracy*100" ] @@ -373,14 +373,14 @@ "outputs": [], "source": [ "# prepere the layers\n", - "net_conv = sg.create_machine(\"NeuralNetwork\")\n", + "net_conv = sg.create(\"NeuralNetwork\")\n", "\n", "# input layer, a 16x16 image single channel image\n", - "net_conv.add(\"layers\", sg.create_layer(\"NeuralInputLayer\", width=16, height=16, num_neurons=256)) \n", + "net_conv.add(\"layers\", sg.create(\"NeuralInputLayer\", width=16, height=16, num_neurons=256)) \n", "\n", "# the first convolutional layer: 10 feature maps, filters with radius 2 (5x5 filters)\n", "# and max-pooling in a 2x2 region: its output will be 10 8x8 feature maps\n", - "net_conv.add(\"layers\", sg.create_layer(\"NeuralConvolutionalLayer\",\n", + "net_conv.add(\"layers\", sg.create(\"NeuralConvolutionalLayer\",\n", " activation_function=\"CMAF_RECTIFIED_LINEAR\", \n", " num_maps=10, \n", " radius_x=2, \n", @@ -390,7 +390,7 @@ "\n", "# the first convolutional layer: 15 feature maps, filters with radius 2 (5x5 filters)\n", "# and max-pooling in a 2x2 region: its output will be 15 4x4 feature maps\n", - "net_conv.add(\"layers\", sg.create_layer(\"NeuralConvolutionalLayer\",\n", + "net_conv.add(\"layers\", sg.create(\"NeuralConvolutionalLayer\",\n", " activation_function=\"CMAF_RECTIFIED_LINEAR\", \n", " num_maps=15, \n", " radius_x=2, \n", @@ -399,7 +399,7 @@ " pooling_height=2)) \n", "\n", "# output layer\n", - "net_conv.add(\"layers\", sg.create_layer(\"NeuralSoftmaxLayer\", num_neurons=10))" + "net_conv.add(\"layers\", sg.create(\"NeuralSoftmaxLayer\", num_neurons=10))" ] }, { diff --git a/doc/ipython-notebooks/neuralnets/rbms_dbns.ipynb b/doc/ipython-notebooks/neuralnets/rbms_dbns.ipynb index 6adb5c1b07a..160f3898d70 100644 --- a/doc/ipython-notebooks/neuralnets/rbms_dbns.ipynb +++ b/doc/ipython-notebooks/neuralnets/rbms_dbns.ipynb @@ -364,7 +364,7 @@ "outputs": [], "source": [ "# get the neural network\n", - "nn = dbn.convert_to_neural_network(sg.create_layer(\"NeuralSoftmaxLayer\", num_neurons=10))\n", + "nn = dbn.convert_to_neural_network(sg.create(\"NeuralSoftmaxLayer\", num_neurons=10))\n", "\n", "# add some L2 regularization\n", "nn.put(\"l2_coefficient\", 0.0001)\n", @@ -388,7 +388,7 @@ "outputs": [], "source": [ "predictions = nn.apply(sg.create_features(Xtest))\n", - "accuracy = sg.create_evaluation(\"MulticlassAccuracy\").evaluate(predictions, sg.create_labels(Ytest)) * 100\n", + "accuracy = sg.create(\"MulticlassAccuracy\").evaluate(predictions, sg.create_labels(Ytest)) * 100\n", "\n", "print(\"Classification accuracy on the test set =\", accuracy, \"%\")" ] diff --git a/doc/ipython-notebooks/pca/pca_notebook.ipynb b/doc/ipython-notebooks/pca/pca_notebook.ipynb index 58f04111105..6eb281f6b2c 100644 --- a/doc/ipython-notebooks/pca/pca_notebook.ipynb +++ b/doc/ipython-notebooks/pca/pca_notebook.ipynb @@ -324,7 +324,7 @@ "\n", "#PCA(EVD) is choosen since N=100 and D=2 (N>D).\n", "#However we can also use PCA(AUTO) as it will automagically choose the appropriate method. \n", - "preprocessor = sg.create_transformer('PCA', method='EVD')\n", + "preprocessor = sg.create('PCA', method='EVD')\n", "\n", "#since we are projecting down the 2d data, the target dim is 1. But here the exhaustive method is detailed by\n", "#setting the target dimension to 2 to visualize both the eigen vectors.\n", @@ -694,7 +694,7 @@ "\n", "#PCA(EVD) is choosen since N=100 and D=3 (N>D).\n", "#However we can also use PCA(AUTO) as it will automagically choose the appropriate method. \n", - "preprocessor = sg.create_transformer('PCA', method='EVD')\n", + "preprocessor = sg.create('PCA', method='EVD')\n", "\n", "#If we set the target dimension to 2, Shogun would automagically preserve the required 2 eigenvectors(out of 3) according to their\n", "#eigenvalues.\n", @@ -985,7 +985,7 @@ "outputs": [], "source": [ "train_features = sg.create_features(obs_matrix)\n", - "preprocessor= sg.create_transformer('PCA', method='AUTO')\n", + "preprocessor= sg.create('PCA', method='AUTO')\n", "\n", "preprocessor.put('target_dim', 100)\n", "preprocessor.fit(train_features)\n", @@ -1223,7 +1223,7 @@ "#To get Eucledian Distance as the distance measure use EuclideanDistance.\n", "workfeat = sg.create_features(np.mat(train_proj))\n", "testfeat = sg.create_features(np.mat(test_proj).T)\n", - "RaRb = sg.create_distance('EuclideanDistance')\n", + "RaRb = sg.create('EuclideanDistance')\n", "RaRb.init(testfeat, workfeat)\n", "\n", "#The distance between one test image w.r.t all the training is stacked in matrix d.\n", diff --git a/doc/ipython-notebooks/regression/Regression.ipynb b/doc/ipython-notebooks/regression/Regression.ipynb index 7f8b7b48de4..e4b00d7e15b 100644 --- a/doc/ipython-notebooks/regression/Regression.ipynb +++ b/doc/ipython-notebooks/regression/Regression.ipynb @@ -142,7 +142,7 @@ "metadata": {}, "outputs": [], "source": [ - "ls = sg.create_machine(\"LeastSquaresRegression\", labels=labels_train, features=feats_train)\n", + "ls = sg.create(\"LeastSquaresRegression\", labels=labels_train, features=feats_train)\n", "ls.train(feats_train)\n", "w = ls.get('w')\n", "print('Weights:')\n", @@ -244,7 +244,7 @@ "outputs": [], "source": [ "tau = 0.8\n", - "rr = sg.create_machine(\"LinearRidgeRegression\", tau=tau, features=feats_train, labels=labels_train)\n", + "rr = sg.create(\"LinearRidgeRegression\", tau=tau, features=feats_train, labels=labels_train)\n", "rr.train(feats_train)\n", "w = rr.get('w')\n", "print(w)\n", @@ -307,11 +307,11 @@ " return X, y.T\n", "\n", "def generate_weights(taus, feats_train, labels_train, use_bias=True):\n", - " preproc = sg.create_transformer(\"PruneVarSubMean\", divide_by_std=True)\n", + " preproc = sg.create(\"PruneVarSubMean\", divide_by_std=True)\n", " preproc.fit(feats_train)\n", " processed_feats = preproc.transform(feats_train) \n", " weights = []\n", - " rr = sg.create_machine(\"LinearRidgeRegression\", tau=tau, labels=labels_train, use_bias=use_bias)\n", + " rr = sg.create(\"LinearRidgeRegression\", tau=tau, labels=labels_train, use_bias=use_bias)\n", " \n", " #vary regularization\n", " for t in taus:\n", @@ -353,13 +353,13 @@ " for t in taus:\n", " rr.put(\"use_bias\", use_bias)\n", " rr.put('tau', t)\n", - " splitting_strategy = sg.create_splitting_strategy(\"CrossValidationSplitting\",\n", + " splitting_strategy = sg.create(\"CrossValidationSplitting\",\n", " labels=labels_train, \n", " num_subsets=5)\n", " # evaluation method\n", - " evaluation_criterium = sg.create_evaluation(\"MeanSquaredError\")\n", + " evaluation_criterium = sg.create(\"MeanSquaredError\")\n", " # cross-validation instance\n", - " cross_validation = sg.create_machine_evaluation(\"CrossValidation\", \n", + " cross_validation = sg.create(\"CrossValidation\", \n", " machine=rr, \n", " features=feats_train, \n", " labels=labels_train, \n", @@ -526,11 +526,11 @@ "outputs": [], "source": [ "#Preprocess data\n", - "preproc=sg.create_transformer(\"PruneVarSubMean\")\n", + "preproc=sg.create(\"PruneVarSubMean\")\n", "preproc.fit(feats_train)\n", "feats_train = preproc.transform(feats_train) \n", "\n", - "preprocessor=sg.create_transformer(\"NormOne\")\n", + "preprocessor=sg.create(\"NormOne\")\n", "preprocessor.fit(feats_train)\n", "feats_train = preprocessor.transform(feats_train)\n", "\n", @@ -552,7 +552,7 @@ "outputs": [], "source": [ "#Train and generate weights\n", - "la=sg.create_machine(\"LeastAngleRegression\")\n", + "la=sg.create(\"LeastAngleRegression\")\n", "la.put('labels', labels_train)\n", "la.train(feats_train)\n", "\n", @@ -654,7 +654,7 @@ "crime_rate = mat[0]\n", "feats_train = sg.create_features(crime_rate.reshape(1, len(mat[0])))\n", "\n", - "preproc=sg.create_transformer(\"RescaleFeatures\")\n", + "preproc=sg.create(\"RescaleFeatures\")\n", "preproc.fit(feats_train)\n", "feats_train = preproc.transform(feats_train)\n", "\n", @@ -674,7 +674,7 @@ "width=0.5\n", "tau=0.5\n", "kernel=sg.create_kernel(\"GaussianKernel\", width=width)\n", - "krr=sg.create_machine(\"KernelRidgeRegression\", tau=tau, kernel=kernel, labels=train_labels)\n", + "krr=sg.create(\"KernelRidgeRegression\", tau=tau, kernel=kernel, labels=train_labels)\n", "krr.train(feats_train)\n", "\n", "feats_test=sg.create_features(x1.reshape(1,len(x1)))\n", @@ -771,7 +771,7 @@ "source": [ "svr_param=1\n", "svr_C=10\n", - "svr=sg.create_machine(\"LibSVR\", C1=svr_C, C2=svr_C, epsilon=svr_param, \n", + "svr=sg.create(\"LibSVR\", C1=svr_C, C2=svr_C, epsilon=svr_param, \n", " kernel=gaussian_kernel, labels=train_labels, \n", " libsvr_solver_type=\"LIBSVR_EPSILON_SVR\")\n", "\n", @@ -826,7 +826,7 @@ " time_nus=[]\n", " for i in range(len(epsilons)):\n", " svr_param=1\n", - " svr=sg.create_machine(\"LibSVR\", C1=svr_C, C2=svr_C, epsilon=epsilons[i], \n", + " svr=sg.create(\"LibSVR\", C1=svr_C, C2=svr_C, epsilon=epsilons[i], \n", " kernel=gaussian_kernel, labels=train_labels, \n", " libsvr_solver_type=\"LIBSVR_EPSILON_SVR\")\n", " t_start=time.process_time()\n", @@ -836,7 +836,7 @@ " \n", " for i in range(len(nus)):\n", " svr_param=1\n", - " svr=sg.create_machine(\"LibSVR\", C1=svr_C, C2=svr_C, nu=epsilons[i], \n", + " svr=sg.create(\"LibSVR\", C1=svr_C, C2=svr_C, nu=epsilons[i], \n", " kernel=gaussian_kernel, labels=train_labels, \n", " libsvr_solver_type=\"LIBSVR_NU_SVR\")\n", " t_start=time.process_time()\n", diff --git a/doc/ipython-notebooks/structure/Binary_Denoising.ipynb b/doc/ipython-notebooks/structure/Binary_Denoising.ipynb index 7d6f119731b..36a709ceefd 100644 --- a/doc/ipython-notebooks/structure/Binary_Denoising.ipynb +++ b/doc/ipython-notebooks/structure/Binary_Denoising.ipynb @@ -331,12 +331,12 @@ " # unary, type id = 0\n", " cards_u = np.array([num_status], np.int32) # cardinalities\n", " w_u = np.zeros(num_status*dim_feat, np.float64)\n", - " ftype_unary = sg.create_factor_type(\"TableFactorType\", type_id=0, cards=cards_u, w=w_u)\n", + " ftype_unary = sg.create(\"TableFactorType\", type_id=0, cards=cards_u, w=w_u)\n", "\n", " # pairwise, type id = 1\n", " cards_p = np.array([num_status, num_status], np.int32)\n", " w_p = np.zeros(num_status*num_status, np.float64)\n", - " ftype_pair = sg.create_factor_type(\"TableFactorType\", type_id=1, cards=cards_p, w=w_p)\n", + " ftype_pair = sg.create(\"TableFactorType\", type_id=1, cards=cards_p, w=w_p)\n", "\n", " return ftype_unary, ftype_pair" ] @@ -484,7 +484,7 @@ "import time\n", "\n", "# Training with Stocastic Gradient Descent\n", - "sgd = sg.create_machine(\"StochasticSOSVM\", model=model, labels=labels_train, \n", + "sgd = sg.create(\"StochasticSOSVM\", model=model, labels=labels_train, \n", " do_weighted_averaging=True)\n", "sgd.put('num_iter', 300)\n", "sgd.put('lambda', 0.0001)\n", diff --git a/doc/ipython-notebooks/structure/multilabel_structured_prediction.ipynb b/doc/ipython-notebooks/structure/multilabel_structured_prediction.ipynb index 2d7c06bd21e..c552375d14b 100644 --- a/doc/ipython-notebooks/structure/multilabel_structured_prediction.ipynb +++ b/doc/ipython-notebooks/structure/multilabel_structured_prediction.ipynb @@ -216,8 +216,8 @@ "source": [ "import time\n", "\n", - "sgd = sg.create_machine(\"StochasticSOSVM\", model=model, labels=labels)\n", - "sgd_with_bias = sg.create_machine(\"StochasticSOSVM\", model=model_with_bias, labels=labels)\n", + "sgd = sg.create(\"StochasticSOSVM\", model=model, labels=labels)\n", + "sgd_with_bias = sg.create(\"StochasticSOSVM\", model=model_with_bias, labels=labels)\n", "\n", "start = time.process_time()\n", "sgd.train()\n", @@ -262,7 +262,7 @@ " test_labels = create_labels(Y_test, n_classes)\n", " \n", " out_labels = machine.apply(feats_test)\n", - " evaluator = sg.create_evaluation(\"StructuredAccuracy\")\n", + " evaluator = sg.create(\"StructuredAccuracy\")\n", " jaccard_similarity_score = evaluator.evaluate(out_labels, test_labels)\n", " \n", " return jaccard_similarity_score "