diff --git a/docs/_posts/algorithm-design/2023-08-01choose-an-energy-efficient-algorithm.md b/docs/_posts/algorithm-design/2023-08-01choose-an-energy-efficient-algorithm.md deleted file mode 100644 index 501c4f1..0000000 --- a/docs/_posts/algorithm-design/2023-08-01choose-an-energy-efficient-algorithm.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -layout: tactic - -title: "Choose an energy efficient algorithm" -tags: machine-learning algorithms design-tactic -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: algorithm-design -t-description: "Different ML algorithms have different levels of energy consumption. For example, K-nearest neighbor algorithm has way higher energy consumption than Random Forest (Kaack et al., 2022).High energy consumption does not necessarily mean that those algorithms perform better or achieve higher accuracy levels than low-energy algorithms. Thus, choosing a suitable, energy efficient algorithms that achieve wanted outcomes can reduce the energy consumption of ML models (Kaack et al., 2022)" -t-participant: "Data Scientist" -t-artifact: "Algorithm" -t-context: "Machine Learning" -t-feature: "Inference" -t-intent: "Choose an energy efficient algorithm that can achieve wanted model outcomes" -t-targetQA: "Energy efficiency" -t-relatedQA: "Appropriateness" -t-measuredimpact: -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023); -Kaack, L. H., Donti, P. L., Strubell, E., Kamiya, G., Creutzig, F., & Rolnick, D. (2022). Aligning artificial intelligence with climate change mitigation. Nature Climate Change, 12(6), 518-527." -t-source-doi: "DOI: 10.1038/s41558-022-01377-7" -t-diagram: "choose-an-energy-efficient-algorithm.png" ---- \ No newline at end of file diff --git a/docs/_posts/algorithm-design/2023-08-01consider-reinforcement-learning-for-energy-efficiency.md b/docs/_posts/algorithm-design/2023-08-01consider-reinforcement-learning-for-energy-efficiency.md deleted file mode 100644 index e1c8737..0000000 --- a/docs/_posts/algorithm-design/2023-08-01consider-reinforcement-learning-for-energy-efficiency.md +++ /dev/null @@ -1,26 +0,0 @@ ---- -layout: tactic - -title: "Consider reinforcement learning for energy-efficiency" -tags: machine-learning algorithms design-tactic -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: algorithm-design -t-description: "Algorithms can be designed to optimize energy efficiency through reinforcement learning. It can be designed to select the most energy efficient execution target. Also other quality attributes, such as security, privacy (UbiPriSEQ) can be addressed simultaneously" -t-participant: "Data Scientist" -t-artifact: "Algorithm" -t-context: "Machine Learning" -t-feature: "Reinforcement Learning" -t-intent: "Using reinforcement learning algorithms to optimize the energy efficiency (or other QAs) of systems" -t-targetQA: "Energy efficiency" -t-relatedQA: "Inference" -t-measuredimpact: -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023); -Kim, Y. G., & Wu, C. J. (2020, October). Autoscale: Energy efficiency optimization for stochastic edge inference using reinforcement learning. In 2020 53rd Annual IEEE/ACM International Symposium on Microarchitecture (MICRO) (pp. 1082-1096). IEEE.; - -Mohammed, T., Albeshri, A., Katib, I., & Mehmood, R. (2020). UbiPriSEQ—Deep reinforcement learning to manage privacy, security, energy, and QoS in 5G IoT hetnets. Applied Sciences, 10(20), 7120." -" -t-source-doi: "DOI:10.1109/MICRO50266.2020.00090 ; -doi: 10.3390/app10207120 " -t-diagram: "consider-reinforcement-learning-for-energy-efficiency.png" ---- \ No newline at end of file diff --git a/docs/_posts/algorithm-design/2023-08-01decrease-model-complexity.md b/docs/_posts/algorithm-design/2023-08-01decrease-model-complexity.md deleted file mode 100644 index f9ba7f2..0000000 --- a/docs/_posts/algorithm-design/2023-08-01decrease-model-complexity.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -layout: tactic - -title: "Decrease model complexity" -tags: machine-learning algorithms design-tactic -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: algorithm-design -t-description: "Complex AI models have shown to have high energy consumption and therefore scaling down the model complexity can contribute to environmental sustainability. For example, using simple three-layered Convolutional Neural Network architecture to learn post-processing tasks of CT-scans (Morotti et al), using shallower Decision trees (Abreu et al 2020). " -t-participant: "Data Scientist" -t-artifact: "Algorithm" -t-context: "Machine Learning" -t-feature: "Inference" -t-intent: "Decreasing the model complexity makes ML algorithms simpler without sacrificing too much accuracy. These simplified models require less computing power which makes them more energy-efficient." -t-targetQA: "Energy efficiency" -t-relatedQA: -t-measuredimpact: -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023); -Morotti, E., Evangelista, D., & Loli Piccolomini, E. (2021). A green prospective for learned post-processing in sparse-view tomographic reconstruction. Journal of Imaging, 7(8), 139. - -Abreu, B. A., Grellert, M., & Bampi, S. (2020, October). Vlsi design of tree-based inference for low-power learning applications. In 2020 IEEE International Symposium on Circuits and Systems (ISCAS) (pp. 1-5). IEEE." - -t-source-doi: "DOI:10.3390/jimaging7080139; - -DOI:10.1109/ISCAS45731.2020.9180704 -" -t-diagram: "decrease-model-complexity.png" ---- \ No newline at end of file diff --git a/docs/_posts/algorithm-design/2023-08-01design-dynamic-parameter-adaptation.md b/docs/_posts/algorithm-design/2023-08-01design-dynamic-parameter-adaptation.md deleted file mode 100644 index af3945a..0000000 --- a/docs/_posts/algorithm-design/2023-08-01design-dynamic-parameter-adaptation.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -layout: tactic - -title: "Design dynamic parameter adaptation" -tags: machine-learning algorithms design-tactic energy-footprint measured -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: algorithm-design -t-description: "Dynamic parameter adaptation means that the hyperparameters of a ML model are dynamically adapted based on the input data, instead of determining the exact parameters values in the algorithm. For example, García-Martín et al used an nmin adaptation method for very fast decision trees. The nmin method allows the algorithm to grow faster in those branches where there is more confidence in creating a split and delaying the split on the less confident branches. This method resulted in decreased energy consumtpion." -t-participant: "Data Scientist" -t-artifact: "Algorithm" -t-context: "Machine Learning" -t-feature: "Inference" -t-intent: "Design parameters that are dynamically adapted based on the input data" -t-targetQA: "Energy efficiency" -t-relatedQA: "Accuracy" -t-measuredimpact: "Using nmin method in very fast decision trees resulted in lower energy consumption in 22 out of 29 of the tested datasets, with an average of 7% decrease in energy footprint. Additionally, nmin showed higher accuracy for 55% of the datasets, with an average difference of less than 1%." -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023); -"Kim, Y. G., & Wu, C. J. (2020, October). Autoscale: Energy efficiency optimization for stochastic edge inference using reinforcement learning. In 2020 53rd Annual IEEE/ACM International Symposium on Microarchitecture (MICRO) (pp. 1082-1096). IEEE. - -Mohammed, T., Albeshri, A., Katib, I., & Mehmood, R. (2020). UbiPriSEQ—Deep reinforcement learning to manage privacy, security, energy, and QoS in 5G IoT hetnets. Applied Sciences, 10(20), 7120." -" -t-source-doi: "DOI:10.1007/s41060-021-00246-4 " -t-diagram: "design-dynamic-parameter-adaptation.png" ---- \ No newline at end of file diff --git a/docs/_posts/algorithm-design/2023-08-01select-a-lightweight-algorithm-alternative.md b/docs/_posts/algorithm-design/2023-08-01select-a-lightweight-algorithm-alternative.md deleted file mode 100644 index 11c3750..0000000 --- a/docs/_posts/algorithm-design/2023-08-01select-a-lightweight-algorithm-alternative.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -layout: tactic - -title: "Select a lightweight algorithm alternative" -tags: machine-learning algorithms design-tactic -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: algorithm-design -t-description: "Some algorithms may have light-weight alternatives. Using these lighter models can have a lower impact on the environment without a loss of important quality attributes. For example Sorbaro et a (2020), noted that Spiking neural networks is an altretnative for convolutional neural networks. CNN can be converded to SNN without a significant loss of accuracy or performance" -t-participant: "Data Scientist" -t-artifact: "Algorithm" -t-context: "Machine Learning" -t-feature: "Inference" -t-intent: "If possible, choose lighter alternatives of existing algorithms" -t-targetQA: "Energy efficiency" -t-relatedQA: "Accuracy, Performance" -t-measuredimpact: -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023); -Sorbaro, M., Liu, Q., Bortone, M., & Sheik, S. (2020). Optimizing the energy consumption of spiking neural networks for neuromorphic applications. Frontiers in neuroscience, 14, 662." -t-source-doi: "doi: 10.1016/S0925-2312(01)00658-0" -t-diagram: "select-a-lightweight-algorithm-alternative.png" ---- \ No newline at end of file diff --git a/docs/_posts/algorithm-design/2023-08-01use-built-in-library-functions.md b/docs/_posts/algorithm-design/2023-08-01use-built-in-library-functions.md deleted file mode 100644 index 0d167de..0000000 --- a/docs/_posts/algorithm-design/2023-08-01use-built-in-library-functions.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -layout: tactic - -title: "Use built-in library functions" -tags: machine-learning algorithm-design design-tactic libraries -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: "algorithm-design" -t-description: "Apply built-in library functions in the machine learning model instead of writing custom implementations. The existing built-in library functions are usually optimized and well-tested, which is why they may have improved performance and energy efficiency compared to custom-made functions. These built-in libraries can be used for instance for tensor operations " -t-participant: "Data Scientist" -t-artifact: "Algorithm" -t-context: "Machine Learning" -t-feature: -t-intent: "Use built-in libraried for ML models if possible." -t-targetQA: "Performance" -t-relatedQA: "Energy efficiency" -t-measuredimpact: -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023); ." - -t-source-doi: "DOI:10.1145/3530019.3530035" -t-diagram: "use-built-in-library-functions.png" ---- \ No newline at end of file diff --git a/docs/_posts/data-centric/2023-08-01apply_sampling_techniques.md b/docs/_posts/data-centric/2023-08-01apply_sampling_techniques.md deleted file mode 100644 index 8a30c9f..0000000 --- a/docs/_posts/data-centric/2023-08-01apply_sampling_techniques.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -layout: tactic - -title: "Apply sampling techniques" -tags: data-processing machine-learning design-tactic measured energy-footprint -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: data-centric -t-description: "The size of input data seems to have a positive correlation with the energy consumption of computing. Therefore reducing the size of input data can have a positive impact on energy-efficiency of ML. Reducing input data can be done by using only a subset of the original input data. This is called sampling. There are some different ways of conducting sampling (e.g. Simple random sampling or Systematic sampling), but Verdecchia et al. (2022) used stratified sampling which means randomly selecting datapoints from homogeneous subgroups of the original dataset (2022)." -t-participant: "Data Scientist" -t-artifact: "Data" -t-context: "Machine Learning" -t-feature: -t-intent: "Using a subset of the original input data for training and inference" -t-targetQA: "Energy Efficiency" -t-relatedQA: "Accuracy, data representativeness" -t-measuredimpact: "Sampling can lead to savings in energy consumption. Verdecchia et al (2022) achieved decrease in energy consumption of up to 92% " -t-source: "Master Thesis 'Green tactics for ML-important QAs ' by Heli Järvenpää (2023), - -Verdecchia, R., Cruz, L., Sallou, J., Lin, M., Wickenden, J., & Hotellier, E. (2022, June). Data-centric green ai an exploratory empirical study. In 2022 International Conference on ICT for Sustainability (ICT4S) (pp. 35-45). IEEE." -t-source-doi: "DOI: 10.1109/ICT4S55073.2022.00015" -t-diagram: "apply-sampling-techniques.png" ---- \ No newline at end of file diff --git a/docs/_posts/data-centric/2023-08-01project_data_into_a_lower-dimensional_embedding.md b/docs/_posts/data-centric/2023-08-01project_data_into_a_lower-dimensional_embedding.md deleted file mode 100644 index 556d185..0000000 --- a/docs/_posts/data-centric/2023-08-01project_data_into_a_lower-dimensional_embedding.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -layout: tactic - -title: "Project data into a lower-dimensional embedding" -tags: data-processing machine-learning design-tactic measured energy-footprint -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: data-centric -t-description: "Data projection means transforming data into a lower-dimensional embedding and using data to optimize the projection parameters. Reducing the dimensionality of input data shrinks the dimensionality of the overall DNN, which leads to improved performance" -t-participant: "Data Scientist" -t-artifact: "Data" -t-context: "Machine Learning" -t-feature: -t-intent: "Project data into lower-dimensional embedding" -t-targetQA: "Performance" -t-relatedQA: "Accuracy" -t-measuredimpact: -t-diagram: "data-projection.png" -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023)" -t-source-doi: ---- \ No newline at end of file diff --git a/docs/_posts/data-centric/2023-08-01reduce_number_of_data_features.md b/docs/_posts/data-centric/2023-08-01reduce_number_of_data_features.md deleted file mode 100644 index 6030241..0000000 --- a/docs/_posts/data-centric/2023-08-01reduce_number_of_data_features.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -layout: tactic - -title: "Reduce the number of data features" -tags: data-processing machine-learning design-tactic -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: data-centric -t-description: "A huge number of data features can lead to a high computing power in training and inference. Reducing these data features can lead to improved performance while still maintaining accuracy. Reducing the number of input features can be done with selecting only a subset of all the available data features." -t-participant: "Data Scientist" -t-artifact: "Data" -t-context: "Machine Learning" -t-feature: -t-intent: "Reducing the number of data features by choosing only a subset of all the available features" -t-targetQA: "Energy Efficiency" -t-relatedQA: "Accuracy, Data representativeness" -t-measuredimpact: "Number of input features can result in a reduction of energy consumption while still maintaining accuracy." -t-source: -"Master Thesis 'Green tactics for ML-important QAs ' by Heli Järvenpää (2023)" -t-source-doi: -T-diagram: "reduce-number-of-data-features.png" ---- diff --git a/docs/_posts/data-centric/2023-08-01remove_redundant_data.md b/docs/_posts/data-centric/2023-08-01remove_redundant_data.md deleted file mode 100644 index a255204..0000000 --- a/docs/_posts/data-centric/2023-08-01remove_redundant_data.md +++ /dev/null @@ -1,24 +0,0 @@ ---- -layout: tactic - -title: "Remove redundant data" -tags: data-processing machine-learning design-tactic -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: data-centric -t-description: "Identifying and removing redundant data for ML models reduces computing time, number of computing, energy consumption and memory space. Redundant data refers to those datapoints that don’t improve the accuracy of the model. Thus, removing these unimportant datapoints doesn’t sacrifice much accuracy (Dhabe et al. 2021)" -t-participant: "Data Scientist" -t-artifact: "Data" -t-context: "Machine Learning" -t-feature: -t-intent: "Detecting and removing redundant data reduces the size of input data, which can result in less computation power" -t-targetQA: "Energy Efficiency" -t-relatedQA: "Accuracy, data representativeness" -t-measuredimpact: "Removing redundant data from the dataset reduces leads to a smaller input data that further decreases computations, computatuinal time, electricity and memory space" -t-source: "Master Thesis 'Green tactics for ML-important QAs ' by Heli Järvenpää (2023); - -Dhabe, P., Mirani, P., Chugwani, R., & Gandewar, S. (2021). Data Set Reduction to Improve Computing Efficiency and Energy Consumption in Healthcare Domain. In Digital Literacy and Socio-Cultural Acceptance of ICT in Developing Countries (pp. 53-64). Cham: Springer International Publishing" - -T-diagram: "remove-redundant-data.png" -t-source-doi: "DOI:10.1007/978-3-030-61089-0_4" ---- diff --git a/docs/_posts/data-centric/2023-08-01use_input_quantization.md b/docs/_posts/data-centric/2023-08-01use_input_quantization.md deleted file mode 100644 index 2cad685..0000000 --- a/docs/_posts/data-centric/2023-08-01use_input_quantization.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -layout: tactic - -title: "Use input quantization" -tags: data-processing machine-learning design-tactic -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: data-centric -t-description: "In Machine learning, input quantization refers to the process of converting data to a smaller precision (e.g. reduce the bits of data). For example, Abreu et al (2022) investigated different input widths (bits) and found out that 10-bit is enough for accuracy and increasing the number of bits doesn’t contribute to accuracy. Therefore that is only a waste of resources. Additionally, it may have a positive impact in accuracy, since exact data precision may lead to overfitting of a machine learning model. " -t-participant: "Data Scientist" -t-artifact: "Data" -t-context: "Machine Learning" -t-feature: -t-intent: "Reduce the data precision with input quantization" -t-targetQA: "Accuracy" -t-relatedQA: "Energy-efficiency" -t-measuredimpact: -t-source: "Master Thesis 'Green tactics for ML-important QAs ' by Heli Järvenpää (2023), -Abreu, B., Grellert, M., & Bampi, S. (2022). A framework for designing power-efficient inference accelerators in tree-based learning applications. Engineering Applications of Artificial Intelligence, 109, 104638." -t-source-doi: "DOI:10.1016/j.engappai.2021.104638" -t-diagram: "use-input-quantization.png" ---- \ No newline at end of file diff --git a/docs/_posts/deployment/2023-08-01-apply-cloud-fog-network.md b/docs/_posts/deployment/2023-08-01-apply-cloud-fog-network.md deleted file mode 100644 index acb2805..0000000 --- a/docs/_posts/deployment/2023-08-01-apply-cloud-fog-network.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -layout: tactic - -title: "Apply Cloud Fog Network" -tags: machine-learning deployment architecture measured energy-footprint -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: deployment -t-description: "Instead of using distant cloud data centers centers, there are ways to bring the cloud closer to the edge devices. A cloud fog network (CFN) can be used for more energy-efficient processes. CFN supports an architecture where deep neural network models are processed in servers between end-devices and clouds. Yosuf et al (2018) present a architecture that consists of four layers: IoT end devices, Access Fog (AF), Metro Fog (MF) and Cloud datacenter (CDC)" -t-participant: "Software Designer" -t-artifact: "Algorithm - deep neural network" -t-context: "Network" -t-feature: -t-intent: "Apply Cloud Fog Network" -t-targetQA: "Performance" -t-relatedQA:"Energy efficiency" -t-measuredimpact: "On average, the use of Cloud fog network (CFN) architecture led to a 68% reduction in power consumption when compared to a traditional cloud data center architecture." -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023); -Yosuf, B. A., Mohamed, S. H., Alenazi, M. M., El-Gorashi, T. E., & Elmirghani, J. M. (2021, June). Energy-Efficient AI over a Virtualized Cloud Fog Network. In Proceedings of the Twelfth ACM International Conference on Future Energy Systems (pp. 328-334)." -t-source-doi:"DOI:10.1145/3447555.3465378" -t-diagram: "apply-cloud-fog-network.png" - ---- \ No newline at end of file diff --git a/docs/_posts/deployment/2023-08-01-avoid-unnecessary-referencing-to-data.md b/docs/_posts/deployment/2023-08-01-avoid-unnecessary-referencing-to-data.md deleted file mode 100644 index 0ac7ffd..0000000 --- a/docs/_posts/deployment/2023-08-01-avoid-unnecessary-referencing-to-data.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -layout: tactic - -title: "Avoid unnecessary referencing to data" -tags: machine-learning deployment design-tactic -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: deployment -t-description: "Machine learning models require reading and writing enormous amounts of data in the ML workflow. Reading data means retrieving information from storage, while writing data means storing or updating the information. These operations may increase unnecessary data movements and memory usage, which influence the energy consumption of computing. To avoid non-essential referencing of data, reading and writing operations must be designed carefully " -t-participant: "Software Designer" -t-artifact: -t-context: -t-feature: -t-intent: "Avoid unnecessary reading and writing operations of data" -t-targetQA: "Energy efficiency" -t-relatedQA: "Resource utilization" -t-measuredimpact: -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023); -Shanbhag, S., Chimalakonda, S., Sharma, V. S., & Kaulgud, V. (2022, June). Towards a Catalog of Energy Patterns in Deep Learning Development. In Proceedings of the International Conference on Evaluation and Assessment in Software Engineering 2022 (pp. 150-159)." -t-source-doi: "DOI:10.1145/3530019.3530035" -t-diagram: "avoid-unnecessary-referencing-to-data.png" ---- \ No newline at end of file diff --git a/docs/_posts/deployment/2023-08-01-consider-federated-learning.md b/docs/_posts/deployment/2023-08-01-consider-federated-learning.md deleted file mode 100644 index 389bf3a..0000000 --- a/docs/_posts/deployment/2023-08-01-consider-federated-learning.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -layout: tactic - -title: "Consider federated learning" -tags: machine-learning deployment -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: deployment -t-description: "Federated learning (FL) is a machine learning approach that aims to train a shared ML model on decentralized devices. Instead of sending raw data to a central server, FL trains the model directly on the devices where the data is generated, such as mobile phones or edge devices. Only the trained data or updated model parameters are then sent to a central server. Federated learning decreases the resources needed for transferring large amounts of data to a central server, which results in improved energy efficiency." -t-participant: "Software Designer" -t-artifact: "Decentralized device" -t-context: "Machine Learning" -t-feature: "Model Training" -t-intent: "Apply federated learning if applicable" -t-targetQA: "Energy efficiency" -t-relatedQA: -t-measuredimpact: -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023)" -t-source-doi: -t-diagram: "consider-federated-learning.png" ---- \ No newline at end of file diff --git a/docs/_posts/deployment/2023-08-01-design-energy-aware-scheduling.md b/docs/_posts/deployment/2023-08-01-design-energy-aware-scheduling.md deleted file mode 100644 index 69d5e90..0000000 --- a/docs/_posts/deployment/2023-08-01-design-energy-aware-scheduling.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -layout: tactic - -title: "Design energy-aware scheduling" -tags: machine-learning deployment design-tactic measured -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: deployment -t-description: "An energy-aware dynamic scheduling policy refers to a strategy to optimize energy consumption of machine learning tasks. It dynamically schedules tasks or processes based on their energy requirements and system conditions. The objective of an energy-aware dynamic scheduling policy is to make efficient use of available computational resources while minimizing energy consumption" -t-participant: "Software Designer" -t-artifact: -t-context: "Distributed systems" -t-feature: -t-intent: "Dynamically manage workers to maximize the overall utilization in distributed systems" -t-targetQA: "Resource utilization" -t-relatedQA: "Performance, Energy efficiency" -t-measuredimpact: "Scheduling 6 % more workers compared to other methods." -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023); -Sun, Y., Zhou, S., & Gündüz, D. (2020, June). Energy-aware analog aggregation for federated learning with redundant data. In ICC 2020-2020 IEEE International Conference on Communications (ICC) (pp. 1-7). IEEE." -t-source-doi: "DOI:10.1109/ICC40277.2020.9148853" -t-diagram: "design-energy-aware-scheduling.png" ---- \ No newline at end of file diff --git a/docs/_posts/deployment/2023-08-01-use-computation-partitioning.md b/docs/_posts/deployment/2023-08-01-use-computation-partitioning.md deleted file mode 100644 index bb3aab0..0000000 --- a/docs/_posts/deployment/2023-08-01-use-computation-partitioning.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -layout: tactic - -title: "Use computation partitioning" -tags: machine-learning deployment architecture energy-footprint measured -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: deployment -t-description: "Computation partitioning is the process of dividing the computations of a convolutional neural network (CNN) between a mobile client and a cloud server. The goal is to optimize energy consumption and efficiency. The NeuPart framework is an example of a partitioning approach. NeuPart divided computational tasks between the mobile device (client) and the remote server or data center (cloud) in real-time based on energy consumption. By offloading computationally intensive tasks to the cloud and executing lighter tasks locally, NeuPart resulted in a significant energy savings of up to 52% in cloud-based computations." -t-participant: "Software Designer" -t-artifact: "Convolutional neural networks" -t-context: "Cloud" -t-feature: -t-intent: "Dividing computational tasks between the mobile device (client) and the remote server or data center (cloud) in real-time based on specific conditions or requirement." -t-targetQA: "Energy efficiency" -t-relatedQA: "Accuracy" -t-measuredimpact: "The researchers demonstrated that at a certain effective bit rate and transmission power, the optimal partition for specific CNN models resulted in energy savings of up to 52.4% over a fully cloud-based computation and 27.3% over a fully in situ computation." -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023); -Manasi, S. D., Snigdha, F. S., & Sapatnekar, S. S. (2020). NeuPart: Using analytical models to drive energy-efficient partitioning of CNN computations on cloud-connected mobile clients. IEEE Transactions on Very Large Scale Integration (VLSI) Systems, 28(8), 1844-1857." -t-source-doi: -t-diagram: "use-computation-partitioning.png" ---- \ No newline at end of file diff --git a/docs/_posts/deployment/2023-08-01-use-energy-efficient-hardware.md b/docs/_posts/deployment/2023-08-01-use-energy-efficient-hardware.md deleted file mode 100644 index 225798a..0000000 --- a/docs/_posts/deployment/2023-08-01-use-energy-efficient-hardware.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -layout: tactic - -title: "Use energy-efficient hardware" -tags: machine-learning deployment hardware -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: deployment -t-description: "The emissions of machine learning are related to the used hardware. That is why using energy efficient hardware to run machine models can reduce the power consumption of such a model. Energy efficient hardware could include low-energy components. For example the Tensor Processing Units (TPUs) developed by Google have be seen as an energy efficient alternative to the CPUs and GPUs " -t-participant: "Software Designer" -t-artifact: "Hardware" -t-context: "Machine Learning" -t-feature: -t-intent: "Use energy-efficient hardware" -t-targetQA: "Energy efficiency" -t-relatedQA: -t-measuredimpact: -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023)" -t-source-doi: -t-diagram: "use-energy-efficient-hardware.png" ---- \ No newline at end of file diff --git a/docs/_posts/deployment/2023-08-01-use-power-capping.md b/docs/_posts/deployment/2023-08-01-use-power-capping.md deleted file mode 100644 index 9d01535..0000000 --- a/docs/_posts/deployment/2023-08-01-use-power-capping.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -layout: tactic - -title: "Use power capping" -tags: machine-learning deployment hardware design-tactic energy-footprint measured -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: deployment -t-description: "Power capping is a technique used to limit the amount of power consumed by a device or system, such as a CPU, GPU, or server. It involves setting a maximum power consumption threshold for the device, and dynamically adjusting the power usage to ensure that it stays below that threshold. This is typically done to manage the power consumption and heat dissipation of a device, and to prevent it from exceeding the power budget of a data center or other power-limited environment" -t-participant: "Software Designer" -t-artifact: "Hardware" -t-context: "Machine Learning" -t-feature: -t-intent: "Using energy capping to limit the energy usage of ML model" -t-targetQA: "Energy efficiency" -t-relatedQA: "Performance" -t-measuredimpact: "Restricting the use of GPU resources can lead to reduced performance and longer execution times, but in certain configurations, it can also result in a significant reduction in energy consumption (up to 33%) with a moderate impact on performance." -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023); -Krzywaniak, A., Czarnul, P., & Proficz, J. (2022, June). GPU Power Capping for Energy-Performance Trade-Offs in Training of Deep Convolutional Neural Networks for Image Recognition. In Computational Science–ICCS 2022: 22nd International Conference, London, UK, June 21–23, 2022, Proceedings, Part I (pp. 667-681). Cham: Springer International Publishing." -t-source-doi: "DOI:10.1007/978-3-031-08751-6_48" -t-diagram: "use-power-capping.png" ---- \ No newline at end of file diff --git a/docs/_posts/management/2023-08-01_monitor_computing_power.md b/docs/_posts/management/2023-08-01_monitor_computing_power.md deleted file mode 100644 index affcadb..0000000 --- a/docs/_posts/management/2023-08-01_monitor_computing_power.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -layout: tactic - -title: "Monitor computing power" -tags: machine-learning management design-tactic -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: management -t-description: "Estimating and calculating the energy footprint of a machine learning model can help reducing the computational power of ML models. Monitoring the energy consumption of a ML model in long term helps identifying those components where energy is being inefficiently utilized. That can serve a starting point for making improvements to reduce energy consumption. There has been a lack of easy-to-use tools to do that but recently some researchers have provided frameworks how to estimate or calculate the energy footprint of ML" -t-participant: "Data Scientist, Software Designer" -t-artifact: -t-context: "Machine Learning" -t-feature: -t-intent: "Monitor computing power of a machine learning in long term" -t-targetQA: "Energy efficiency" -t-relatedQA: -t-measuredimpact: -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023)" -t-source-doi: -t-diagram: "monitor-computing-power.png" ---- \ No newline at end of file diff --git a/docs/_posts/management/2023-08-01_retrain_the_model_if_needed.md b/docs/_posts/management/2023-08-01_retrain_the_model_if_needed.md deleted file mode 100644 index 64a5e1b..0000000 --- a/docs/_posts/management/2023-08-01_retrain_the_model_if_needed.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -layout: tactic - -title: "Retrain the model if needed" -tags: machine-learning management design-tactic -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: management -t-description: "Retraining a model refers to the process of updating or modifying an existing machine learning model. In long-term, concept drifts may affect the accuracy of already existing machine learning models. Retraining model by e.g. training it again with new data is better than building it again in terms of sustainability" -t-participant: "Data Scientist" -t-artifact: "Machine learning model" -t-context: "Machine Learning" -t-feature: -t-intent: "In case of a concept shift, retrain the existing machine learning model instead of building a new one" -t-targetQA: "Maintainability" -t-relatedQA: -t-measuredimpact: -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023); -Poenaru-Olaru, L., Sallou, J., Cruz, L., Rellermeyer, J. S., & van Deursen, A. Retrain AI Systems Responsibly! Use Sustainable Concept Drift Adaptation Techniques." -t-source-doi: "doi:10.1109/GREENS59328.2023.00009" -t-diagram: "retrain-model-if-needed.png" ---- \ No newline at end of file diff --git a/docs/_posts/management/2023-08-01_use_informed_adaptation.md b/docs/_posts/management/2023-08-01_use_informed_adaptation.md deleted file mode 100644 index ea3cb47..0000000 --- a/docs/_posts/management/2023-08-01_use_informed_adaptation.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -layout: tactic - -title: "Use informed adaptation" -tags: machine-learning management design-tactic -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: management -t-description: " Machine learning models may experience drifts that affect their functionalities. In those cases, the models must be adapted to the drift. Informed adaptation refers to a method of adapting the ML model only when a drift is detected. Therefore the frequency of adaptation is smaller than in blind, regular adaptation. Informed adaptation reduces unnecessary adaptations which consequently saves energy" -t-participant: "Data Scientist" -t-artifact: "Machine learning model" -t-context: "Machine Learning" -t-feature: -t-intent: "Adapt the ML model based on an informed concept shift" -t-targetQA: "Energy efficiency" -t-relatedQA: -t-measuredimpact: -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023); -Poenaru-Olaru, L., Sallou, J., Cruz, L., Rellermeyer, J. S., & van Deursen, A. Retrain AI Systems Responsibly! Use Sustainable Concept Drift Adaptation Techniques." -t-source-doi: "doi:10.1109/GREENS59328.2023.00009" -t-diagram: "use-informed-adaptation.png" ---- \ No newline at end of file diff --git a/docs/_posts/model-optimization/2023-08-01consider-graph-substitution.md b/docs/_posts/model-optimization/2023-08-01consider-graph-substitution.md deleted file mode 100644 index 81e6381..0000000 --- a/docs/_posts/model-optimization/2023-08-01consider-graph-substitution.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -layout: tactic - -title: "Consider Graph Substitution" -tags: machine-learning model-optimization design-tactic measured energy-footprint -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: model-optimization -t-description: "In the context of deep neural networks, graph substitution refers to replacing a large model with a smaller one that performs a similar task. Energy-aware graph substitution however refers replacing energy-intensive nodes of deep neural networks with less energy-consuming nodes (Wang et al 2020)." -t-participant: "Data Scientist" -t-artifact: "ML algorithm" -t-context: "Machine Learning" -t-feature: "Neural Networks" -t-intent: "Replacing energy-intensive nodes with less energy-consuming ones" -t-targetQA: "Energy efficiency" -t-relatedQA: "Performance" -t-measuredimpact: "Decreased energy consumption of 24 % without a significant performance loss" -t-source: "Master Thesis 'Green tactics for ML-important QAs ' by Heli Järvenpää (2023); - -Wang, Y., Ge, R., & Qiu, S. (2020). Energy-Aware DNN Graph Optimization. arXiv preprint arXiv:2005.05837. - -" -t-source-doi:"doi:10.48550/arXiv.2005.05837" -t-diagram: "consider-graph-substitution.png" ---- \ No newline at end of file diff --git a/docs/_posts/model-optimization/2023-08-01consider-knowledge-distillation.md b/docs/_posts/model-optimization/2023-08-01consider-knowledge-distillation.md deleted file mode 100644 index dec71c0..0000000 --- a/docs/_posts/model-optimization/2023-08-01consider-knowledge-distillation.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -layout: tactic - -title: "Consider Knowledge Distillation" -tags: machine-learning model-optimization design-tactic -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: model-optimization -t-description: "Knowledge distillation is a technique where a large, complex model is used to train a smaller, simpler model. The goal is to transfer the learned information from the teacher model to the student model, allowing the student model to achieve comparable performance while requiring fewer computational resources." -t-participant: "Data Scientist" -t-artifact: "ML algorithm" -t-context: "Machine Learning" -t-feature: "Knowledge distillation" -t-intent: "If pre-trained models are too big for a given task, apply knowledge distillation of pre-trained models" -t-targetQA: "Performance" -t-relatedQA: "Accuracy, Energy efficiency" -t-measuredimpact: "Knowledge distallation improve performance when evaluating top 5 accuracy and energy-consumption" -t-source: "Master Thesis 'Green tactics for ML-important QAs ' by Heli Järvenpää (2023); - -Shanbhag, S., Chimalakonda, S., Sharma, V. S., & Kaulgud, V. (2022, June). Towards a Catalog of Energy Patterns in Deep Learning Development. In Proceedings of the International Conference on Evaluation and Assessment in Software Engineering 2022 (pp. 150-159)." -t-source-doi: "DOI:10.1145/3530019.3530035" -t-diagram: "consider-knowledge-distillation.png" ---- \ No newline at end of file diff --git a/docs/_posts/model-optimization/2023-08-01consider-transfer-learning.md b/docs/_posts/model-optimization/2023-08-01consider-transfer-learning.md deleted file mode 100644 index 4f38195..0000000 --- a/docs/_posts/model-optimization/2023-08-01consider-transfer-learning.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -layout: tactic - -title: "Consider Transfer learning" -tags: machine-learning model-optimization design-tactic -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: model-optimization -t-description: "Transfer learning means using knowledge gained from one task (pre-trained model) which is transferred and applied to another related task (adaptive inference)." -t-participant: "Data Scientist" -t-artifact: "ML algorithm" -t-context: "Machine Learning" -t-feature: "Transfer Learning" -t-intent: "Use pre-trained models for a given task whenever feasible" -t-targetQA: "Energy-efficiency" -t-relatedQA: -t-measuredimpact: -t-source: "Master Thesis 'Green tactics for ML-important QAs ' by Heli Järvenpää (2023); - -Jayakodi, N. K., Belakaria, S., Deshwal, A., & Doppa, J. R. (2020). Design and optimization of energy-accuracy tradeoff networks for mobile platforms via pretrained deep models. ACM Transactions on Embedded Computing Systems (TECS), 19(1), 1-24." -t-source-doi: "DOI:10.1145/3366636" -t-diagram: "consider-transfer-learning.png" ---- \ No newline at end of file diff --git a/docs/_posts/model-optimization/2023-08-01enhance-model-sparsity.md b/docs/_posts/model-optimization/2023-08-01enhance-model-sparsity.md deleted file mode 100644 index da79bfc..0000000 --- a/docs/_posts/model-optimization/2023-08-01enhance-model-sparsity.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -layout: tactic - -title: "Enhance model sparsity" -tags: machine-learning model-optimization design-tactic energy-footprint -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: model-optimization -t-description: "Enhancing sparsity of a ML model means reducing the number of model parameters or setting their values to zero. For example, weight sparsification involves identifying and removing unnecessary or less important weights in a neural network. Enhancing model sparsity decreases the complexity of the model and consequently reduces requirements for storage and memory. Therefore it also results in lower power consumption" -t-participant: "Data Scientist" -t-artifact: "ML algorithm" -t-context: "Machine Learning" -t-feature: -t-intent: "Remove unnecessary or less important weights in neural networks" -t-targetQA: "Energy-efficiency" -t-relatedQA: "Accuracy" -t-measuredimpact: "Lower energy consumption" -t-source: "Master Thesis 'Green tactics for ML-important QAs ' by Heli Järvenpää (2023); - -Yang, X., Hua, S., Shi, Y., Wang, H., Zhang, J., & Letaief, K. B. (2020). Sparse optimization for green edge AI inference. Journal of communications and information networks, 5(1), 1-15." -t-source-doi: -t-diagram: "enhance-model-sparsity.png" ---- \ No newline at end of file diff --git a/docs/_posts/model-optimization/2023-08-01set-energy-consumption-as-a-constraint.md b/docs/_posts/model-optimization/2023-08-01set-energy-consumption-as-a-constraint.md deleted file mode 100644 index d415a96..0000000 --- a/docs/_posts/model-optimization/2023-08-01set-energy-consumption-as-a-constraint.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -layout: tactic - -title: "Set energy consumption as a constraint" -tags: machine-learning model-optimization design-tactic -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: model-optimization -t-description: "The tactic of having an energy consumption constraint for model optimization means that the optimization process considers the energy consumption of the model during model tuning and training. Therefore the model is trained to stay under the threshold in terms of energy consumption. The model optimization is seen as a optimization problem and hyperparameters and model is optimized according to the predetermined energy consumption limits" -t-participant: "Data Scientist" -t-artifact: "ML algorithm" -t-context: "Machine Learning" -t-feature: -t-intent: "Use energy consumption as a predetermined constraint for HPO and model optimization. Thus the model will stay below the treshold." -t-targetQA: "Performance" -t-relatedQA: "Accuracy, Energy efficiency" -t-measuredimpact: -t-source: "Master Thesis 'Green tactics for ML-important QAs ' by Heli Järvenpää (2023)" -t-source-doi: -t-diagram: "set-energy-consumption-as-a-constraint.png" ---- \ No newline at end of file diff --git a/docs/_posts/model-optimization/2023-08-01use-energy-aware-pruning.md b/docs/_posts/model-optimization/2023-08-01use-energy-aware-pruning.md deleted file mode 100644 index 293acac..0000000 --- a/docs/_posts/model-optimization/2023-08-01use-energy-aware-pruning.md +++ /dev/null @@ -1,23 +0,0 @@ ---- -layout: tactic - -title: "Use energy-aware pruning" -tags: machine-learning model-optimization design-tactic energy-footprint -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: model-optimization -t-description: "In machine learning, pruning refers to reducing the complexity and size of a trained model by removing unnecessary or less important components, such as weight. Energy-aware pruning uses energy consumption of a CNN to guide the pruning process in order to optimize for the best energy-efficiency. With the estimated energy for each layer in a CNN model, the algorithm can perform layer-by-layer pruning, starting from the layers with the highest energy consumption to the layers with the lowest energy consumption. For pruning each layer, it removes the weights that have the smallest joint impact on the output" -t-participant: "Data Scientist" -t-artifact: "ML algorithm" -t-context: "Machine Learning" -t-feature: -t-intent: "Pruning nodes with smallest joint impact on the output" -t-targetQA: "Energy-efficiency" -t-relatedQA: "Accuracy" -t-measuredimpact: "Energy-aware pruning method reduces the energy consumption " -t-source: "Master Thesis 'Green tactics for ML-important QAs ' by Heli Järvenpää (2023); - -Yang, T. J., Chen, Y. H., & Sze, V. (2017). Designing energy-efficient convolutional neural networks using energy-aware pruning. In Proceedings of the IEEE conference on computer vision and pattern recognition (pp. 5687-5695)." -t-source-doi: -t-diagram: "use-energy-aware-pruning.png" ---- \ No newline at end of file diff --git a/docs/_posts/model-training/2023-08-01_avoid-memory-leaks.md b/docs/_posts/model-training/2023-08-01_avoid-memory-leaks.md deleted file mode 100644 index f989fad..0000000 --- a/docs/_posts/model-training/2023-08-01_avoid-memory-leaks.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -layout: tactic - -title: "Avoid memory leaks" -tags: machine-learning model-training design-tactic -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: model-training -t-description: "Model training requires memory, and sometimes memory leaks and OOM (out of memory) errors may occur during that process. If that happens, the knowledge gained during the prior training process is lost. By considering memory availability constraints and addressing possible OOM exceptions, the system can be designed to operate within the available memory limits. It reduced the likelihood of errors and prevents unnecessary energy consumption. " -t-participant: "Data Scientist" -t-artifact: "Memory" -t-context: "Machine Learning" -t-feature: -t-intent: "Consider possible memory constraints during training" -t-targetQA: "Recoverability" -t-relatedQA: -t-measuredimpact: -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023); -Shanbhag, S., Chimalakonda, S., Sharma, V. S., & Kaulgud, V. (2022, June). Towards a Catalog of Energy Patterns in Deep Learning Development. In Proceedings of the International Conference on Evaluation and Assessment in Software Engineering 2022 (pp. 150-159)." -t-source-doi: "DOI:10.1145/3530019.3530035" -t-diagram: "avoid-memory-leaks.png" ---- \ No newline at end of file diff --git a/docs/_posts/model-training/2023-08-01_use-checkpoints-during-training.md b/docs/_posts/model-training/2023-08-01_use-checkpoints-during-training.md deleted file mode 100644 index bcf0ac7..0000000 --- a/docs/_posts/model-training/2023-08-01_use-checkpoints-during-training.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -layout: tactic - -title: "Use checkpoints during training" -tags: machine-learning model-training design-tactic -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: model-training -t-description: "Training is an energy-intensive stage of machine learning lifecycle (Shanbhag, Chimalakonda, Sharma, & Kaulgud, 2022). Sometimes a failure or an error of a hardware can terminate the training process before it is completed. In those cases, the training process has to be started from the beginning and all the trained data is lost. Use of checkpoints however can save the trained data in regular intervals and incase of a premature termination, the training process can be continued where the last checkpoint was (Shanbhag et al., 2022) " -t-participant: "Data Scientist" -t-artifact: "Memory" -t-context: "Machine Learning" -t-feature: -t-intent: "Use checkpoints during training to prevent knowledge loss of a premature termination" -t-targetQA: "Recoverability" -t-relatedQA: -t-measuredimpact: -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023); -Shanbhag, S., Chimalakonda, S., Sharma, V. S., & Kaulgud, V. (2022, June). Towards a Catalog of Energy Patterns in Deep Learning Development. In Proceedings of the International Conference on Evaluation and Assessment in Software Engineering 2022 (pp. 150-159)." -t-source-doi: "DOI:10.1145/3530019.3530035" -t-diagram: "use-checkpoints-during-training.png" ---- \ No newline at end of file diff --git a/docs/_posts/model-training/2023-08-01_use-quantization-aware-training.md b/docs/_posts/model-training/2023-08-01_use-quantization-aware-training.md deleted file mode 100644 index 8543e66..0000000 --- a/docs/_posts/model-training/2023-08-01_use-quantization-aware-training.md +++ /dev/null @@ -1,22 +0,0 @@ ---- -layout: tactic - -title: "Use quantization-aware training" -tags: machine-learning model-training design-tactic -t-sort: "Awesome Tactic" -t-type: "Architectural Tactic" -categories: model-training -t-description: "Quantization-aware training is a technique used to train neural networks with the objective of achieving better performance and efficiency when using reduced precision data types, such as fixed-point or integer representations, instead of the more commonly used higher precision floating-point representations." -t-participant: "Data Scientist" -t-artifact: "Algorithm" -t-context: "Machine Learning" -t-feature: "Model Training" -t-intent: "Use quantization-aware training to convert high-precision data typed lo lower precision" -t-targetQA: "Accuracy" -t-relatedQA: "Energy-efficiency" -t-measuredimpact: -t-source: "Master Thesis 'Green tactics for ML-important QAs' by Heli Järvenpää (2023); -Kim, M., Saad, W., Mozaffari, M., & Debbah, M. (2022, May). On the tradeoff between energy, precision, and accuracy in federated quantized neural networks. In ICC 2022-IEEE International Conference on Communications (pp. 2194-2199). IEEE." -t-source-doi: "DOI:10.1109/ICC45855.2022.9838362" -t-diagram: "use-quantization-aware-training.png" ---- \ No newline at end of file diff --git a/docs/assets/diagrams/apply-cloud-fog-network.png b/docs/assets/diagrams/apply-cloud-fog-network.png deleted file mode 100644 index 5d3f269..0000000 Binary files a/docs/assets/diagrams/apply-cloud-fog-network.png and /dev/null differ diff --git a/docs/assets/diagrams/apply-sampling-techniques.png b/docs/assets/diagrams/apply-sampling-techniques.png deleted file mode 100644 index 9da5a1f..0000000 Binary files a/docs/assets/diagrams/apply-sampling-techniques.png and /dev/null differ diff --git a/docs/assets/diagrams/avoid-memory-leaks.png b/docs/assets/diagrams/avoid-memory-leaks.png deleted file mode 100644 index e072043..0000000 Binary files a/docs/assets/diagrams/avoid-memory-leaks.png and /dev/null differ diff --git a/docs/assets/diagrams/avoid-unnecessary-referencing-to-data.png b/docs/assets/diagrams/avoid-unnecessary-referencing-to-data.png deleted file mode 100644 index 93446a1..0000000 Binary files a/docs/assets/diagrams/avoid-unnecessary-referencing-to-data.png and /dev/null differ diff --git a/docs/assets/diagrams/choose-an-energy-efficient-algorithm.png b/docs/assets/diagrams/choose-an-energy-efficient-algorithm.png deleted file mode 100644 index ca6de9a..0000000 Binary files a/docs/assets/diagrams/choose-an-energy-efficient-algorithm.png and /dev/null differ diff --git a/docs/assets/diagrams/consider-federated-learning.png b/docs/assets/diagrams/consider-federated-learning.png deleted file mode 100644 index bcb8eb1..0000000 Binary files a/docs/assets/diagrams/consider-federated-learning.png and /dev/null differ diff --git a/docs/assets/diagrams/consider-graph-substitution.png b/docs/assets/diagrams/consider-graph-substitution.png deleted file mode 100644 index 53147d7..0000000 Binary files a/docs/assets/diagrams/consider-graph-substitution.png and /dev/null differ diff --git a/docs/assets/diagrams/consider-knowledge-distillation.png b/docs/assets/diagrams/consider-knowledge-distillation.png deleted file mode 100644 index 7b69d21..0000000 Binary files a/docs/assets/diagrams/consider-knowledge-distillation.png and /dev/null differ diff --git a/docs/assets/diagrams/consider-reinforcement-learning-for-energy-efficiency.png b/docs/assets/diagrams/consider-reinforcement-learning-for-energy-efficiency.png deleted file mode 100644 index 772ff3e..0000000 Binary files a/docs/assets/diagrams/consider-reinforcement-learning-for-energy-efficiency.png and /dev/null differ diff --git a/docs/assets/diagrams/consider-transfer-learning.png b/docs/assets/diagrams/consider-transfer-learning.png deleted file mode 100644 index 0c85373..0000000 Binary files a/docs/assets/diagrams/consider-transfer-learning.png and /dev/null differ diff --git a/docs/assets/diagrams/decrease-model-complexity.png b/docs/assets/diagrams/decrease-model-complexity.png deleted file mode 100644 index 21057fd..0000000 Binary files a/docs/assets/diagrams/decrease-model-complexity.png and /dev/null differ diff --git a/docs/assets/diagrams/design-dynamic-parameter-adaptation.png b/docs/assets/diagrams/design-dynamic-parameter-adaptation.png deleted file mode 100644 index 056bd55..0000000 Binary files a/docs/assets/diagrams/design-dynamic-parameter-adaptation.png and /dev/null differ diff --git a/docs/assets/diagrams/design-energy-aware-scheduling.png b/docs/assets/diagrams/design-energy-aware-scheduling.png deleted file mode 100644 index 1f7a395..0000000 Binary files a/docs/assets/diagrams/design-energy-aware-scheduling.png and /dev/null differ diff --git a/docs/assets/diagrams/enhance-model-sparsity.png b/docs/assets/diagrams/enhance-model-sparsity.png deleted file mode 100644 index e5bc1ef..0000000 Binary files a/docs/assets/diagrams/enhance-model-sparsity.png and /dev/null differ diff --git a/docs/assets/diagrams/monitor-computing-power.png.png b/docs/assets/diagrams/monitor-computing-power.png.png deleted file mode 100644 index 097f600..0000000 Binary files a/docs/assets/diagrams/monitor-computing-power.png.png and /dev/null differ diff --git a/docs/assets/diagrams/project-data-into-a-lower-dimensional-embedding.png b/docs/assets/diagrams/project-data-into-a-lower-dimensional-embedding.png deleted file mode 100644 index 0956e5e..0000000 Binary files a/docs/assets/diagrams/project-data-into-a-lower-dimensional-embedding.png and /dev/null differ diff --git a/docs/assets/diagrams/reduce-number-of-data-features.png b/docs/assets/diagrams/reduce-number-of-data-features.png deleted file mode 100644 index a6a1c57..0000000 Binary files a/docs/assets/diagrams/reduce-number-of-data-features.png and /dev/null differ diff --git a/docs/assets/diagrams/remove-redundant-data.png b/docs/assets/diagrams/remove-redundant-data.png deleted file mode 100644 index cc2a69b..0000000 Binary files a/docs/assets/diagrams/remove-redundant-data.png and /dev/null differ diff --git a/docs/assets/diagrams/retrain-model-if-needed.png.png b/docs/assets/diagrams/retrain-model-if-needed.png.png deleted file mode 100644 index 877efce..0000000 Binary files a/docs/assets/diagrams/retrain-model-if-needed.png.png and /dev/null differ diff --git a/docs/assets/diagrams/select-a-lightweight-algorithm-alternative.png b/docs/assets/diagrams/select-a-lightweight-algorithm-alternative.png deleted file mode 100644 index a0f2a45..0000000 Binary files a/docs/assets/diagrams/select-a-lightweight-algorithm-alternative.png and /dev/null differ diff --git a/docs/assets/diagrams/set-energy-consumption-as-a-constraint.png b/docs/assets/diagrams/set-energy-consumption-as-a-constraint.png deleted file mode 100644 index 0709234..0000000 Binary files a/docs/assets/diagrams/set-energy-consumption-as-a-constraint.png and /dev/null differ diff --git a/docs/assets/diagrams/use-built-in-library-functions.png b/docs/assets/diagrams/use-built-in-library-functions.png deleted file mode 100644 index 34dedf8..0000000 Binary files a/docs/assets/diagrams/use-built-in-library-functions.png and /dev/null differ diff --git a/docs/assets/diagrams/use-checkpoints-during-training.png b/docs/assets/diagrams/use-checkpoints-during-training.png deleted file mode 100644 index 80edccb..0000000 Binary files a/docs/assets/diagrams/use-checkpoints-during-training.png and /dev/null differ diff --git a/docs/assets/diagrams/use-computation-partitioning.png b/docs/assets/diagrams/use-computation-partitioning.png deleted file mode 100644 index 681a614..0000000 Binary files a/docs/assets/diagrams/use-computation-partitioning.png and /dev/null differ diff --git a/docs/assets/diagrams/use-energy-aware-pruning.png b/docs/assets/diagrams/use-energy-aware-pruning.png deleted file mode 100644 index e5bff19..0000000 Binary files a/docs/assets/diagrams/use-energy-aware-pruning.png and /dev/null differ diff --git a/docs/assets/diagrams/use-energy-efficient-hardware.png b/docs/assets/diagrams/use-energy-efficient-hardware.png deleted file mode 100644 index 866c506..0000000 Binary files a/docs/assets/diagrams/use-energy-efficient-hardware.png and /dev/null differ diff --git a/docs/assets/diagrams/use-informed-adaptation.png.png b/docs/assets/diagrams/use-informed-adaptation.png.png deleted file mode 100644 index 0e37095..0000000 Binary files a/docs/assets/diagrams/use-informed-adaptation.png.png and /dev/null differ diff --git a/docs/assets/diagrams/use-input-quantization.png b/docs/assets/diagrams/use-input-quantization.png deleted file mode 100644 index a338dde..0000000 Binary files a/docs/assets/diagrams/use-input-quantization.png and /dev/null differ diff --git a/docs/assets/diagrams/use-power-capping.png b/docs/assets/diagrams/use-power-capping.png deleted file mode 100644 index bfd588f..0000000 Binary files a/docs/assets/diagrams/use-power-capping.png and /dev/null differ diff --git a/docs/assets/diagrams/use-quantization-aware-training.png b/docs/assets/diagrams/use-quantization-aware-training.png deleted file mode 100644 index e22d3aa..0000000 Binary files a/docs/assets/diagrams/use-quantization-aware-training.png and /dev/null differ diff --git a/docs/categories/algorithm-design.md b/docs/categories/algorithm-design.md deleted file mode 100644 index 915cd1c..0000000 --- a/docs/categories/algorithm-design.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -layout: categories -category-name: algorithm-design -category-description: "Algorithm design tactics aim to design (machine learning) algorithms in an environmentally sustainable way." -category-type: "Awesome Tactics" ---- diff --git a/docs/categories/data-centric.md b/docs/categories/data-centric.md deleted file mode 100644 index 2fd8f81..0000000 --- a/docs/categories/data-centric.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -layout: categories -category-name: data-centric -category-description: "The category data-centric involves tactics that aim to modify data to make software more environmentally sustainable. These tactics are applicable mostly for data-driven systems." -category-type: "Awesome Tactics" ---- diff --git a/docs/categories/deployment.md b/docs/categories/deployment.md deleted file mode 100644 index f31095d..0000000 --- a/docs/categories/deployment.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -layout: categories -category-name: deployment -category-description: "Deployment refers to the process of implementing a software or a software component in a real environment. These tactics include desicions to make the whole software more sustainable when it's being used." -category-type: "Awesome Tactics" ---- diff --git a/docs/categories/management.md b/docs/categories/management.md deleted file mode 100644 index 484e840..0000000 --- a/docs/categories/management.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -layout: categories -category-name: management -category-description: "Management category includes tactics that are related to the managing the software in long term" -category-type: "Awesome Tactics" ---- diff --git a/docs/categories/model-optimization.md b/docs/categories/model-optimization.md deleted file mode 100644 index 3178b68..0000000 --- a/docs/categories/model-optimization.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -layout: categories -category-name: model-optimization -category-description: "The category model-optimization involves tactics that are related to optimizing machine learning models before their deployment." -category-type: "Awesome Tactics" ---- diff --git a/docs/categories/model-training.md b/docs/categories/model-training.md deleted file mode 100644 index a7fedab..0000000 --- a/docs/categories/model-training.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -layout: categories -category-name: model-training -category-description: "Model training refers to the machine learning model training. The tactics in this category aim to make the training process more environmentally sustainable." -category-type: "Awesome Tactics" ---- diff --git a/docs/tags/design-tactic.md b/docs/tags/design-tactic.md deleted file mode 100644 index 251c15f..0000000 --- a/docs/tags/design-tactic.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -layout: tags -tag-name: design-tactic ---- diff --git a/docs/tags/libraries.md b/docs/tags/libraries.md deleted file mode 100644 index 0b22519..0000000 --- a/docs/tags/libraries.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -layout: tags -tag-name: libraries ---- diff --git a/docs/tags/machine-learning.md b/docs/tags/machine-learning.md deleted file mode 100644 index 595b035..0000000 --- a/docs/tags/machine-learning.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -layout: tags -tag-name: machine-learning ---- diff --git a/docs/tags/management.md b/docs/tags/management.md deleted file mode 100644 index 116fe9c..0000000 --- a/docs/tags/management.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -layout: tags -tag-name: management ---- diff --git a/docs/tags/model-optimization.md b/docs/tags/model-optimization.md deleted file mode 100644 index e673fd2..0000000 --- a/docs/tags/model-optimization.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -layout: tags -tag-name: model-optimization ---- diff --git a/docs/tags/model-training.md b/docs/tags/model-training.md deleted file mode 100644 index 0f758fd..0000000 --- a/docs/tags/model-training.md +++ /dev/null @@ -1,4 +0,0 @@ ---- -layout: tags -tag-name: model-training ----