diff --git a/paper/paper.bib b/paper/paper.bib index 752a130b..9e490f64 100644 --- a/paper/paper.bib +++ b/paper/paper.bib @@ -194,7 +194,17 @@ @article{matthews_gpflow_2017 file = {Available Version (via Google Scholar):/Users/mstoffel/Zotero/storage/748FHRT5/Matthews et al. - 2017 - GPflow A Gaussian process library using TensorFlow.pdf:application/pdf}, } -@misc{noauthor_notitle_nodate, + +@article{paszke_pytorch_2019, + title = {Pytorch: {An} imperative style, high-performance deep learning library}, + volume = {32}, + shorttitle = {Pytorch}, + url = {https://proceedings.neurips.cc/paper/2019/hash/bdbca288fee7f92f2bfa9f7012727740-Abstract.html}, + urldate = {2024-11-29}, + journal = {Advances in neural information processing systems}, + author = {Paszke, Adam and Gross, Sam and Massa, Francisco and Lerer, Adam and Bradbury, James and Chanan, Gregory and Killeen, Trevor and Lin, Zeming and Gimelshein, Natalia and Antiga, Luca}, + year = {2019}, + file = {Available Version (via Google Scholar):files/4267/Paszke et al. - 2019 - Pytorch An imperative style, high-performance deep learning library.pdf:application/pdf}, } @book{tietz_skorch_2017, diff --git a/paper/paper.md b/paper/paper.md index e85c563d..aaa7dbf8 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -84,7 +84,7 @@ ae.summarise_cv() # metrics for each model | LightGBM | lgbm | 0.6044 | 0.4930 | | Second Order Polynomial | sop | 0.8378 | 0.0297 | -After choosing an emulator based on cross-validation metrics and visualisations, it can be evaluated on the test set, with a default size of 20% of the original dataset. +After choosing an emulator based on its cross-validation performance, it can be evaluated on the test set, which by default is 20% of the original dataset. AutoEmulate provides various visualisations in addition to the metrics. ```python emulator = ae.get_model("GaussianProcess") @@ -94,7 +94,7 @@ ae.plot_eval(emulator) # visualise test set predictions ![Test set predictions](eval_2.png) -If the test-set performance is acceptable, the emulator can be refitted on the combined training and test data before applying it. It's now ready to be used as an efficient replacement for the original simulation, allowing to generate tens of thousands of new data points in seconds using predict(). We implemented global sensitivity analysis as a common use-case, which decomposes the variance in the outputs into the contributions of the various simulation parameters and their interactions. +If the test-set performance is acceptable, the emulator can be refitted on the combined training and test data before applying it. It's now ready to be used as an efficient replacement for the original simulation, and is able to generate tens of thousands of new data points in seconds using predict(). We implemented global sensitivity analysis as a common use-case, which decomposes the variance in the outputs into the contributions of the various simulation parameters and their interactions. ```python emulator = ae.refit(emulator) # refit using full data